Mercurial > hg > graal-compiler
annotate src/share/vm/runtime/sharedRuntime.cpp @ 465:dc16daa0329d
6739363: Xcheck jni doesn't check native function arguments
Summary: Fix adds support for verifying arguments with -Xcheck:jni.
Reviewed-by: coleenp
author | poonam |
---|---|
date | Thu, 04 Dec 2008 17:29:56 -0800 |
parents | 1ee8caae33af |
children | 6d8fc951eb25 70998f2e05ef |
rev | line source |
---|---|
0 | 1 /* |
196 | 2 * Copyright 1997-2008 Sun Microsystems, Inc. All Rights Reserved. |
0 | 3 * DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER. |
4 * | |
5 * This code is free software; you can redistribute it and/or modify it | |
6 * under the terms of the GNU General Public License version 2 only, as | |
7 * published by the Free Software Foundation. | |
8 * | |
9 * This code is distributed in the hope that it will be useful, but WITHOUT | |
10 * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or | |
11 * FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License | |
12 * version 2 for more details (a copy is included in the LICENSE file that | |
13 * accompanied this code). | |
14 * | |
15 * You should have received a copy of the GNU General Public License version | |
16 * 2 along with this work; if not, write to the Free Software Foundation, | |
17 * Inc., 51 Franklin St, Fifth Floor, Boston, MA 02110-1301 USA. | |
18 * | |
19 * Please contact Sun Microsystems, Inc., 4150 Network Circle, Santa Clara, | |
20 * CA 95054 USA or visit www.sun.com if you need additional information or | |
21 * have any questions. | |
22 * | |
23 */ | |
24 | |
25 #include "incls/_precompiled.incl" | |
26 #include "incls/_sharedRuntime.cpp.incl" | |
27 #include <math.h> | |
28 | |
29 HS_DTRACE_PROBE_DECL4(hotspot, object__alloc, Thread*, char*, int, size_t); | |
30 HS_DTRACE_PROBE_DECL7(hotspot, method__entry, int, | |
31 char*, int, char*, int, char*, int); | |
32 HS_DTRACE_PROBE_DECL7(hotspot, method__return, int, | |
33 char*, int, char*, int, char*, int); | |
34 | |
35 // Implementation of SharedRuntime | |
36 | |
37 #ifndef PRODUCT | |
38 // For statistics | |
39 int SharedRuntime::_ic_miss_ctr = 0; | |
40 int SharedRuntime::_wrong_method_ctr = 0; | |
41 int SharedRuntime::_resolve_static_ctr = 0; | |
42 int SharedRuntime::_resolve_virtual_ctr = 0; | |
43 int SharedRuntime::_resolve_opt_virtual_ctr = 0; | |
44 int SharedRuntime::_implicit_null_throws = 0; | |
45 int SharedRuntime::_implicit_div0_throws = 0; | |
46 int SharedRuntime::_throw_null_ctr = 0; | |
47 | |
48 int SharedRuntime::_nof_normal_calls = 0; | |
49 int SharedRuntime::_nof_optimized_calls = 0; | |
50 int SharedRuntime::_nof_inlined_calls = 0; | |
51 int SharedRuntime::_nof_megamorphic_calls = 0; | |
52 int SharedRuntime::_nof_static_calls = 0; | |
53 int SharedRuntime::_nof_inlined_static_calls = 0; | |
54 int SharedRuntime::_nof_interface_calls = 0; | |
55 int SharedRuntime::_nof_optimized_interface_calls = 0; | |
56 int SharedRuntime::_nof_inlined_interface_calls = 0; | |
57 int SharedRuntime::_nof_megamorphic_interface_calls = 0; | |
58 int SharedRuntime::_nof_removable_exceptions = 0; | |
59 | |
60 int SharedRuntime::_new_instance_ctr=0; | |
61 int SharedRuntime::_new_array_ctr=0; | |
62 int SharedRuntime::_multi1_ctr=0; | |
63 int SharedRuntime::_multi2_ctr=0; | |
64 int SharedRuntime::_multi3_ctr=0; | |
65 int SharedRuntime::_multi4_ctr=0; | |
66 int SharedRuntime::_multi5_ctr=0; | |
67 int SharedRuntime::_mon_enter_stub_ctr=0; | |
68 int SharedRuntime::_mon_exit_stub_ctr=0; | |
69 int SharedRuntime::_mon_enter_ctr=0; | |
70 int SharedRuntime::_mon_exit_ctr=0; | |
71 int SharedRuntime::_partial_subtype_ctr=0; | |
72 int SharedRuntime::_jbyte_array_copy_ctr=0; | |
73 int SharedRuntime::_jshort_array_copy_ctr=0; | |
74 int SharedRuntime::_jint_array_copy_ctr=0; | |
75 int SharedRuntime::_jlong_array_copy_ctr=0; | |
76 int SharedRuntime::_oop_array_copy_ctr=0; | |
77 int SharedRuntime::_checkcast_array_copy_ctr=0; | |
78 int SharedRuntime::_unsafe_array_copy_ctr=0; | |
79 int SharedRuntime::_generic_array_copy_ctr=0; | |
80 int SharedRuntime::_slow_array_copy_ctr=0; | |
81 int SharedRuntime::_find_handler_ctr=0; | |
82 int SharedRuntime::_rethrow_ctr=0; | |
83 | |
84 int SharedRuntime::_ICmiss_index = 0; | |
85 int SharedRuntime::_ICmiss_count[SharedRuntime::maxICmiss_count]; | |
86 address SharedRuntime::_ICmiss_at[SharedRuntime::maxICmiss_count]; | |
87 | |
88 void SharedRuntime::trace_ic_miss(address at) { | |
89 for (int i = 0; i < _ICmiss_index; i++) { | |
90 if (_ICmiss_at[i] == at) { | |
91 _ICmiss_count[i]++; | |
92 return; | |
93 } | |
94 } | |
95 int index = _ICmiss_index++; | |
96 if (_ICmiss_index >= maxICmiss_count) _ICmiss_index = maxICmiss_count - 1; | |
97 _ICmiss_at[index] = at; | |
98 _ICmiss_count[index] = 1; | |
99 } | |
100 | |
101 void SharedRuntime::print_ic_miss_histogram() { | |
102 if (ICMissHistogram) { | |
103 tty->print_cr ("IC Miss Histogram:"); | |
104 int tot_misses = 0; | |
105 for (int i = 0; i < _ICmiss_index; i++) { | |
106 tty->print_cr(" at: " INTPTR_FORMAT " nof: %d", _ICmiss_at[i], _ICmiss_count[i]); | |
107 tot_misses += _ICmiss_count[i]; | |
108 } | |
109 tty->print_cr ("Total IC misses: %7d", tot_misses); | |
110 } | |
111 } | |
112 #endif // PRODUCT | |
113 | |
342
37f87013dfd8
6711316: Open source the Garbage-First garbage collector
ysr
parents:
128
diff
changeset
|
114 #ifndef SERIALGC |
37f87013dfd8
6711316: Open source the Garbage-First garbage collector
ysr
parents:
128
diff
changeset
|
115 |
37f87013dfd8
6711316: Open source the Garbage-First garbage collector
ysr
parents:
128
diff
changeset
|
116 // G1 write-barrier pre: executed before a pointer store. |
37f87013dfd8
6711316: Open source the Garbage-First garbage collector
ysr
parents:
128
diff
changeset
|
117 JRT_LEAF(void, SharedRuntime::g1_wb_pre(oopDesc* orig, JavaThread *thread)) |
37f87013dfd8
6711316: Open source the Garbage-First garbage collector
ysr
parents:
128
diff
changeset
|
118 if (orig == NULL) { |
37f87013dfd8
6711316: Open source the Garbage-First garbage collector
ysr
parents:
128
diff
changeset
|
119 assert(false, "should be optimized out"); |
37f87013dfd8
6711316: Open source the Garbage-First garbage collector
ysr
parents:
128
diff
changeset
|
120 return; |
37f87013dfd8
6711316: Open source the Garbage-First garbage collector
ysr
parents:
128
diff
changeset
|
121 } |
37f87013dfd8
6711316: Open source the Garbage-First garbage collector
ysr
parents:
128
diff
changeset
|
122 // store the original value that was in the field reference |
37f87013dfd8
6711316: Open source the Garbage-First garbage collector
ysr
parents:
128
diff
changeset
|
123 thread->satb_mark_queue().enqueue(orig); |
37f87013dfd8
6711316: Open source the Garbage-First garbage collector
ysr
parents:
128
diff
changeset
|
124 JRT_END |
37f87013dfd8
6711316: Open source the Garbage-First garbage collector
ysr
parents:
128
diff
changeset
|
125 |
37f87013dfd8
6711316: Open source the Garbage-First garbage collector
ysr
parents:
128
diff
changeset
|
126 // G1 write-barrier post: executed after a pointer store. |
37f87013dfd8
6711316: Open source the Garbage-First garbage collector
ysr
parents:
128
diff
changeset
|
127 JRT_LEAF(void, SharedRuntime::g1_wb_post(void* card_addr, JavaThread* thread)) |
37f87013dfd8
6711316: Open source the Garbage-First garbage collector
ysr
parents:
128
diff
changeset
|
128 thread->dirty_card_queue().enqueue(card_addr); |
37f87013dfd8
6711316: Open source the Garbage-First garbage collector
ysr
parents:
128
diff
changeset
|
129 JRT_END |
37f87013dfd8
6711316: Open source the Garbage-First garbage collector
ysr
parents:
128
diff
changeset
|
130 |
37f87013dfd8
6711316: Open source the Garbage-First garbage collector
ysr
parents:
128
diff
changeset
|
131 #endif // !SERIALGC |
37f87013dfd8
6711316: Open source the Garbage-First garbage collector
ysr
parents:
128
diff
changeset
|
132 |
0 | 133 |
134 JRT_LEAF(jlong, SharedRuntime::lmul(jlong y, jlong x)) | |
135 return x * y; | |
136 JRT_END | |
137 | |
138 | |
139 JRT_LEAF(jlong, SharedRuntime::ldiv(jlong y, jlong x)) | |
140 if (x == min_jlong && y == CONST64(-1)) { | |
141 return x; | |
142 } else { | |
143 return x / y; | |
144 } | |
145 JRT_END | |
146 | |
147 | |
148 JRT_LEAF(jlong, SharedRuntime::lrem(jlong y, jlong x)) | |
149 if (x == min_jlong && y == CONST64(-1)) { | |
150 return 0; | |
151 } else { | |
152 return x % y; | |
153 } | |
154 JRT_END | |
155 | |
156 | |
157 const juint float_sign_mask = 0x7FFFFFFF; | |
158 const juint float_infinity = 0x7F800000; | |
159 const julong double_sign_mask = CONST64(0x7FFFFFFFFFFFFFFF); | |
160 const julong double_infinity = CONST64(0x7FF0000000000000); | |
161 | |
162 JRT_LEAF(jfloat, SharedRuntime::frem(jfloat x, jfloat y)) | |
163 #ifdef _WIN64 | |
164 // 64-bit Windows on amd64 returns the wrong values for | |
165 // infinity operands. | |
166 union { jfloat f; juint i; } xbits, ybits; | |
167 xbits.f = x; | |
168 ybits.f = y; | |
169 // x Mod Infinity == x unless x is infinity | |
170 if ( ((xbits.i & float_sign_mask) != float_infinity) && | |
171 ((ybits.i & float_sign_mask) == float_infinity) ) { | |
172 return x; | |
173 } | |
174 #endif | |
175 return ((jfloat)fmod((double)x,(double)y)); | |
176 JRT_END | |
177 | |
178 | |
179 JRT_LEAF(jdouble, SharedRuntime::drem(jdouble x, jdouble y)) | |
180 #ifdef _WIN64 | |
181 union { jdouble d; julong l; } xbits, ybits; | |
182 xbits.d = x; | |
183 ybits.d = y; | |
184 // x Mod Infinity == x unless x is infinity | |
185 if ( ((xbits.l & double_sign_mask) != double_infinity) && | |
186 ((ybits.l & double_sign_mask) == double_infinity) ) { | |
187 return x; | |
188 } | |
189 #endif | |
190 return ((jdouble)fmod((double)x,(double)y)); | |
191 JRT_END | |
192 | |
193 | |
194 JRT_LEAF(jint, SharedRuntime::f2i(jfloat x)) | |
195 if (g_isnan(x)) {return 0;} | |
196 jlong lltmp = (jlong)x; | |
197 jint ltmp = (jint)lltmp; | |
198 if (ltmp == lltmp) { | |
199 return ltmp; | |
200 } else { | |
201 if (x < 0) { | |
202 return min_jint; | |
203 } else { | |
204 return max_jint; | |
205 } | |
206 } | |
207 JRT_END | |
208 | |
209 | |
210 JRT_LEAF(jlong, SharedRuntime::f2l(jfloat x)) | |
211 if (g_isnan(x)) {return 0;} | |
212 jlong lltmp = (jlong)x; | |
213 if (lltmp != min_jlong) { | |
214 return lltmp; | |
215 } else { | |
216 if (x < 0) { | |
217 return min_jlong; | |
218 } else { | |
219 return max_jlong; | |
220 } | |
221 } | |
222 JRT_END | |
223 | |
224 | |
225 JRT_LEAF(jint, SharedRuntime::d2i(jdouble x)) | |
226 if (g_isnan(x)) {return 0;} | |
227 jlong lltmp = (jlong)x; | |
228 jint ltmp = (jint)lltmp; | |
229 if (ltmp == lltmp) { | |
230 return ltmp; | |
231 } else { | |
232 if (x < 0) { | |
233 return min_jint; | |
234 } else { | |
235 return max_jint; | |
236 } | |
237 } | |
238 JRT_END | |
239 | |
240 | |
241 JRT_LEAF(jlong, SharedRuntime::d2l(jdouble x)) | |
242 if (g_isnan(x)) {return 0;} | |
243 jlong lltmp = (jlong)x; | |
244 if (lltmp != min_jlong) { | |
245 return lltmp; | |
246 } else { | |
247 if (x < 0) { | |
248 return min_jlong; | |
249 } else { | |
250 return max_jlong; | |
251 } | |
252 } | |
253 JRT_END | |
254 | |
255 | |
256 JRT_LEAF(jfloat, SharedRuntime::d2f(jdouble x)) | |
257 return (jfloat)x; | |
258 JRT_END | |
259 | |
260 | |
261 JRT_LEAF(jfloat, SharedRuntime::l2f(jlong x)) | |
262 return (jfloat)x; | |
263 JRT_END | |
264 | |
265 | |
266 JRT_LEAF(jdouble, SharedRuntime::l2d(jlong x)) | |
267 return (jdouble)x; | |
268 JRT_END | |
269 | |
270 // Exception handling accross interpreter/compiler boundaries | |
271 // | |
272 // exception_handler_for_return_address(...) returns the continuation address. | |
273 // The continuation address is the entry point of the exception handler of the | |
274 // previous frame depending on the return address. | |
275 | |
276 address SharedRuntime::raw_exception_handler_for_return_address(address return_address) { | |
277 assert(frame::verify_return_pc(return_address), "must be a return pc"); | |
278 | |
279 // the fastest case first | |
280 CodeBlob* blob = CodeCache::find_blob(return_address); | |
281 if (blob != NULL && blob->is_nmethod()) { | |
282 nmethod* code = (nmethod*)blob; | |
283 assert(code != NULL, "nmethod must be present"); | |
284 // native nmethods don't have exception handlers | |
285 assert(!code->is_native_method(), "no exception handler"); | |
286 assert(code->header_begin() != code->exception_begin(), "no exception handler"); | |
287 if (code->is_deopt_pc(return_address)) { | |
288 return SharedRuntime::deopt_blob()->unpack_with_exception(); | |
289 } else { | |
290 return code->exception_begin(); | |
291 } | |
292 } | |
293 | |
294 // Entry code | |
295 if (StubRoutines::returns_to_call_stub(return_address)) { | |
296 return StubRoutines::catch_exception_entry(); | |
297 } | |
298 // Interpreted code | |
299 if (Interpreter::contains(return_address)) { | |
300 return Interpreter::rethrow_exception_entry(); | |
301 } | |
302 | |
303 // Compiled code | |
304 if (CodeCache::contains(return_address)) { | |
305 CodeBlob* blob = CodeCache::find_blob(return_address); | |
306 if (blob->is_nmethod()) { | |
307 nmethod* code = (nmethod*)blob; | |
308 assert(code != NULL, "nmethod must be present"); | |
309 assert(code->header_begin() != code->exception_begin(), "no exception handler"); | |
310 return code->exception_begin(); | |
311 } | |
312 if (blob->is_runtime_stub()) { | |
313 ShouldNotReachHere(); // callers are responsible for skipping runtime stub frames | |
314 } | |
315 } | |
316 guarantee(!VtableStubs::contains(return_address), "NULL exceptions in vtables should have been handled already!"); | |
317 #ifndef PRODUCT | |
318 { ResourceMark rm; | |
319 tty->print_cr("No exception handler found for exception at " INTPTR_FORMAT " - potential problems:", return_address); | |
320 tty->print_cr("a) exception happened in (new?) code stubs/buffers that is not handled here"); | |
321 tty->print_cr("b) other problem"); | |
322 } | |
323 #endif // PRODUCT | |
324 ShouldNotReachHere(); | |
325 return NULL; | |
326 } | |
327 | |
328 | |
329 JRT_LEAF(address, SharedRuntime::exception_handler_for_return_address(address return_address)) | |
330 return raw_exception_handler_for_return_address(return_address); | |
331 JRT_END | |
332 | |
333 address SharedRuntime::get_poll_stub(address pc) { | |
334 address stub; | |
335 // Look up the code blob | |
336 CodeBlob *cb = CodeCache::find_blob(pc); | |
337 | |
338 // Should be an nmethod | |
339 assert( cb && cb->is_nmethod(), "safepoint polling: pc must refer to an nmethod" ); | |
340 | |
341 // Look up the relocation information | |
342 assert( ((nmethod*)cb)->is_at_poll_or_poll_return(pc), | |
343 "safepoint polling: type must be poll" ); | |
344 | |
345 assert( ((NativeInstruction*)pc)->is_safepoint_poll(), | |
346 "Only polling locations are used for safepoint"); | |
347 | |
348 bool at_poll_return = ((nmethod*)cb)->is_at_poll_return(pc); | |
349 if (at_poll_return) { | |
350 assert(SharedRuntime::polling_page_return_handler_blob() != NULL, | |
351 "polling page return stub not created yet"); | |
352 stub = SharedRuntime::polling_page_return_handler_blob()->instructions_begin(); | |
353 } else { | |
354 assert(SharedRuntime::polling_page_safepoint_handler_blob() != NULL, | |
355 "polling page safepoint stub not created yet"); | |
356 stub = SharedRuntime::polling_page_safepoint_handler_blob()->instructions_begin(); | |
357 } | |
358 #ifndef PRODUCT | |
359 if( TraceSafepoint ) { | |
360 char buf[256]; | |
361 jio_snprintf(buf, sizeof(buf), | |
362 "... found polling page %s exception at pc = " | |
363 INTPTR_FORMAT ", stub =" INTPTR_FORMAT, | |
364 at_poll_return ? "return" : "loop", | |
365 (intptr_t)pc, (intptr_t)stub); | |
366 tty->print_raw_cr(buf); | |
367 } | |
368 #endif // PRODUCT | |
369 return stub; | |
370 } | |
371 | |
372 | |
373 oop SharedRuntime::retrieve_receiver( symbolHandle sig, frame caller ) { | |
374 assert(caller.is_interpreted_frame(), ""); | |
375 int args_size = ArgumentSizeComputer(sig).size() + 1; | |
376 assert(args_size <= caller.interpreter_frame_expression_stack_size(), "receiver must be on interpreter stack"); | |
377 oop result = (oop) *caller.interpreter_frame_tos_at(args_size - 1); | |
378 assert(Universe::heap()->is_in(result) && result->is_oop(), "receiver must be an oop"); | |
379 return result; | |
380 } | |
381 | |
382 | |
383 void SharedRuntime::throw_and_post_jvmti_exception(JavaThread *thread, Handle h_exception) { | |
384 if (JvmtiExport::can_post_exceptions()) { | |
385 vframeStream vfst(thread, true); | |
386 methodHandle method = methodHandle(thread, vfst.method()); | |
387 address bcp = method()->bcp_from(vfst.bci()); | |
388 JvmtiExport::post_exception_throw(thread, method(), bcp, h_exception()); | |
389 } | |
390 Exceptions::_throw(thread, __FILE__, __LINE__, h_exception); | |
391 } | |
392 | |
393 void SharedRuntime::throw_and_post_jvmti_exception(JavaThread *thread, symbolOop name, const char *message) { | |
394 Handle h_exception = Exceptions::new_exception(thread, name, message); | |
395 throw_and_post_jvmti_exception(thread, h_exception); | |
396 } | |
397 | |
398 // ret_pc points into caller; we are returning caller's exception handler | |
399 // for given exception | |
400 address SharedRuntime::compute_compiled_exc_handler(nmethod* nm, address ret_pc, Handle& exception, | |
401 bool force_unwind, bool top_frame_only) { | |
402 assert(nm != NULL, "must exist"); | |
403 ResourceMark rm; | |
404 | |
405 ScopeDesc* sd = nm->scope_desc_at(ret_pc); | |
406 // determine handler bci, if any | |
407 EXCEPTION_MARK; | |
408 | |
409 int handler_bci = -1; | |
410 int scope_depth = 0; | |
411 if (!force_unwind) { | |
412 int bci = sd->bci(); | |
413 do { | |
414 bool skip_scope_increment = false; | |
415 // exception handler lookup | |
416 KlassHandle ek (THREAD, exception->klass()); | |
417 handler_bci = sd->method()->fast_exception_handler_bci_for(ek, bci, THREAD); | |
418 if (HAS_PENDING_EXCEPTION) { | |
419 // We threw an exception while trying to find the exception handler. | |
420 // Transfer the new exception to the exception handle which will | |
421 // be set into thread local storage, and do another lookup for an | |
422 // exception handler for this exception, this time starting at the | |
423 // BCI of the exception handler which caused the exception to be | |
424 // thrown (bugs 4307310 and 4546590). Set "exception" reference | |
425 // argument to ensure that the correct exception is thrown (4870175). | |
426 exception = Handle(THREAD, PENDING_EXCEPTION); | |
427 CLEAR_PENDING_EXCEPTION; | |
428 if (handler_bci >= 0) { | |
429 bci = handler_bci; | |
430 handler_bci = -1; | |
431 skip_scope_increment = true; | |
432 } | |
433 } | |
434 if (!top_frame_only && handler_bci < 0 && !skip_scope_increment) { | |
435 sd = sd->sender(); | |
436 if (sd != NULL) { | |
437 bci = sd->bci(); | |
438 } | |
439 ++scope_depth; | |
440 } | |
441 } while (!top_frame_only && handler_bci < 0 && sd != NULL); | |
442 } | |
443 | |
444 // found handling method => lookup exception handler | |
445 int catch_pco = ret_pc - nm->instructions_begin(); | |
446 | |
447 ExceptionHandlerTable table(nm); | |
448 HandlerTableEntry *t = table.entry_for(catch_pco, handler_bci, scope_depth); | |
449 if (t == NULL && (nm->is_compiled_by_c1() || handler_bci != -1)) { | |
450 // Allow abbreviated catch tables. The idea is to allow a method | |
451 // to materialize its exceptions without committing to the exact | |
452 // routing of exceptions. In particular this is needed for adding | |
453 // a synthethic handler to unlock monitors when inlining | |
454 // synchonized methods since the unlock path isn't represented in | |
455 // the bytecodes. | |
456 t = table.entry_for(catch_pco, -1, 0); | |
457 } | |
458 | |
459 #ifdef COMPILER1 | |
460 if (nm->is_compiled_by_c1() && t == NULL && handler_bci == -1) { | |
461 // Exception is not handled by this frame so unwind. Note that | |
462 // this is not the same as how C2 does this. C2 emits a table | |
463 // entry that dispatches to the unwind code in the nmethod. | |
464 return NULL; | |
465 } | |
466 #endif /* COMPILER1 */ | |
467 | |
468 | |
469 if (t == NULL) { | |
470 tty->print_cr("MISSING EXCEPTION HANDLER for pc " INTPTR_FORMAT " and handler bci %d", ret_pc, handler_bci); | |
471 tty->print_cr(" Exception:"); | |
472 exception->print(); | |
473 tty->cr(); | |
474 tty->print_cr(" Compiled exception table :"); | |
475 table.print(); | |
476 nm->print_code(); | |
477 guarantee(false, "missing exception handler"); | |
478 return NULL; | |
479 } | |
480 | |
481 return nm->instructions_begin() + t->pco(); | |
482 } | |
483 | |
484 JRT_ENTRY(void, SharedRuntime::throw_AbstractMethodError(JavaThread* thread)) | |
485 // These errors occur only at call sites | |
486 throw_and_post_jvmti_exception(thread, vmSymbols::java_lang_AbstractMethodError()); | |
487 JRT_END | |
488 | |
16
f8236e79048a
6664627: Merge changes made only in hotspot 11 forward to jdk 7
dcubed
parents:
0
diff
changeset
|
489 JRT_ENTRY(void, SharedRuntime::throw_IncompatibleClassChangeError(JavaThread* thread)) |
f8236e79048a
6664627: Merge changes made only in hotspot 11 forward to jdk 7
dcubed
parents:
0
diff
changeset
|
490 // These errors occur only at call sites |
f8236e79048a
6664627: Merge changes made only in hotspot 11 forward to jdk 7
dcubed
parents:
0
diff
changeset
|
491 throw_and_post_jvmti_exception(thread, vmSymbols::java_lang_IncompatibleClassChangeError(), "vtable stub"); |
f8236e79048a
6664627: Merge changes made only in hotspot 11 forward to jdk 7
dcubed
parents:
0
diff
changeset
|
492 JRT_END |
f8236e79048a
6664627: Merge changes made only in hotspot 11 forward to jdk 7
dcubed
parents:
0
diff
changeset
|
493 |
0 | 494 JRT_ENTRY(void, SharedRuntime::throw_ArithmeticException(JavaThread* thread)) |
495 throw_and_post_jvmti_exception(thread, vmSymbols::java_lang_ArithmeticException(), "/ by zero"); | |
496 JRT_END | |
497 | |
498 JRT_ENTRY(void, SharedRuntime::throw_NullPointerException(JavaThread* thread)) | |
499 throw_and_post_jvmti_exception(thread, vmSymbols::java_lang_NullPointerException()); | |
500 JRT_END | |
501 | |
502 JRT_ENTRY(void, SharedRuntime::throw_NullPointerException_at_call(JavaThread* thread)) | |
503 // This entry point is effectively only used for NullPointerExceptions which occur at inline | |
504 // cache sites (when the callee activation is not yet set up) so we are at a call site | |
505 throw_and_post_jvmti_exception(thread, vmSymbols::java_lang_NullPointerException()); | |
506 JRT_END | |
507 | |
508 JRT_ENTRY(void, SharedRuntime::throw_StackOverflowError(JavaThread* thread)) | |
509 // We avoid using the normal exception construction in this case because | |
510 // it performs an upcall to Java, and we're already out of stack space. | |
511 klassOop k = SystemDictionary::StackOverflowError_klass(); | |
512 oop exception_oop = instanceKlass::cast(k)->allocate_instance(CHECK); | |
513 Handle exception (thread, exception_oop); | |
514 if (StackTraceInThrowable) { | |
515 java_lang_Throwable::fill_in_stack_trace(exception); | |
516 } | |
517 throw_and_post_jvmti_exception(thread, exception); | |
518 JRT_END | |
519 | |
520 address SharedRuntime::continuation_for_implicit_exception(JavaThread* thread, | |
521 address pc, | |
522 SharedRuntime::ImplicitExceptionKind exception_kind) | |
523 { | |
524 address target_pc = NULL; | |
525 | |
526 if (Interpreter::contains(pc)) { | |
527 #ifdef CC_INTERP | |
528 // C++ interpreter doesn't throw implicit exceptions | |
529 ShouldNotReachHere(); | |
530 #else | |
531 switch (exception_kind) { | |
532 case IMPLICIT_NULL: return Interpreter::throw_NullPointerException_entry(); | |
533 case IMPLICIT_DIVIDE_BY_ZERO: return Interpreter::throw_ArithmeticException_entry(); | |
534 case STACK_OVERFLOW: return Interpreter::throw_StackOverflowError_entry(); | |
535 default: ShouldNotReachHere(); | |
536 } | |
537 #endif // !CC_INTERP | |
538 } else { | |
539 switch (exception_kind) { | |
540 case STACK_OVERFLOW: { | |
541 // Stack overflow only occurs upon frame setup; the callee is | |
542 // going to be unwound. Dispatch to a shared runtime stub | |
543 // which will cause the StackOverflowError to be fabricated | |
544 // and processed. | |
545 // For stack overflow in deoptimization blob, cleanup thread. | |
546 if (thread->deopt_mark() != NULL) { | |
547 Deoptimization::cleanup_deopt_info(thread, NULL); | |
548 } | |
549 return StubRoutines::throw_StackOverflowError_entry(); | |
550 } | |
551 | |
552 case IMPLICIT_NULL: { | |
553 if (VtableStubs::contains(pc)) { | |
554 // We haven't yet entered the callee frame. Fabricate an | |
555 // exception and begin dispatching it in the caller. Since | |
556 // the caller was at a call site, it's safe to destroy all | |
557 // caller-saved registers, as these entry points do. | |
558 VtableStub* vt_stub = VtableStubs::stub_containing(pc); | |
465
dc16daa0329d
6739363: Xcheck jni doesn't check native function arguments
poonam
parents:
356
diff
changeset
|
559 |
dc16daa0329d
6739363: Xcheck jni doesn't check native function arguments
poonam
parents:
356
diff
changeset
|
560 // If vt_stub is NULL, then return NULL to signal handler to report the SEGV error. |
dc16daa0329d
6739363: Xcheck jni doesn't check native function arguments
poonam
parents:
356
diff
changeset
|
561 if (vt_stub == NULL) return NULL; |
dc16daa0329d
6739363: Xcheck jni doesn't check native function arguments
poonam
parents:
356
diff
changeset
|
562 |
0 | 563 if (vt_stub->is_abstract_method_error(pc)) { |
564 assert(!vt_stub->is_vtable_stub(), "should never see AbstractMethodErrors from vtable-type VtableStubs"); | |
565 return StubRoutines::throw_AbstractMethodError_entry(); | |
566 } else { | |
567 return StubRoutines::throw_NullPointerException_at_call_entry(); | |
568 } | |
569 } else { | |
570 CodeBlob* cb = CodeCache::find_blob(pc); | |
465
dc16daa0329d
6739363: Xcheck jni doesn't check native function arguments
poonam
parents:
356
diff
changeset
|
571 |
dc16daa0329d
6739363: Xcheck jni doesn't check native function arguments
poonam
parents:
356
diff
changeset
|
572 // If code blob is NULL, then return NULL to signal handler to report the SEGV error. |
dc16daa0329d
6739363: Xcheck jni doesn't check native function arguments
poonam
parents:
356
diff
changeset
|
573 if (cb == NULL) return NULL; |
0 | 574 |
575 // Exception happened in CodeCache. Must be either: | |
576 // 1. Inline-cache check in C2I handler blob, | |
577 // 2. Inline-cache check in nmethod, or | |
578 // 3. Implict null exception in nmethod | |
579 | |
580 if (!cb->is_nmethod()) { | |
581 guarantee(cb->is_adapter_blob(), | |
465
dc16daa0329d
6739363: Xcheck jni doesn't check native function arguments
poonam
parents:
356
diff
changeset
|
582 "exception happened outside interpreter, nmethods and vtable stubs (1)"); |
0 | 583 // There is no handler here, so we will simply unwind. |
584 return StubRoutines::throw_NullPointerException_at_call_entry(); | |
585 } | |
586 | |
587 // Otherwise, it's an nmethod. Consult its exception handlers. | |
588 nmethod* nm = (nmethod*)cb; | |
589 if (nm->inlinecache_check_contains(pc)) { | |
590 // exception happened inside inline-cache check code | |
591 // => the nmethod is not yet active (i.e., the frame | |
592 // is not set up yet) => use return address pushed by | |
593 // caller => don't push another return address | |
594 return StubRoutines::throw_NullPointerException_at_call_entry(); | |
595 } | |
596 | |
597 #ifndef PRODUCT | |
598 _implicit_null_throws++; | |
599 #endif | |
600 target_pc = nm->continuation_for_implicit_exception(pc); | |
601 guarantee(target_pc != 0, "must have a continuation point"); | |
602 } | |
603 | |
604 break; // fall through | |
605 } | |
606 | |
607 | |
608 case IMPLICIT_DIVIDE_BY_ZERO: { | |
609 nmethod* nm = CodeCache::find_nmethod(pc); | |
610 guarantee(nm != NULL, "must have containing nmethod for implicit division-by-zero exceptions"); | |
611 #ifndef PRODUCT | |
612 _implicit_div0_throws++; | |
613 #endif | |
614 target_pc = nm->continuation_for_implicit_exception(pc); | |
615 guarantee(target_pc != 0, "must have a continuation point"); | |
616 break; // fall through | |
617 } | |
618 | |
619 default: ShouldNotReachHere(); | |
620 } | |
621 | |
622 guarantee(target_pc != NULL, "must have computed destination PC for implicit exception"); | |
623 assert(exception_kind == IMPLICIT_NULL || exception_kind == IMPLICIT_DIVIDE_BY_ZERO, "wrong implicit exception kind"); | |
624 | |
625 // for AbortVMOnException flag | |
626 NOT_PRODUCT(Exceptions::debug_check_abort("java.lang.NullPointerException")); | |
627 if (exception_kind == IMPLICIT_NULL) { | |
628 Events::log("Implicit null exception at " INTPTR_FORMAT " to " INTPTR_FORMAT, pc, target_pc); | |
629 } else { | |
630 Events::log("Implicit division by zero exception at " INTPTR_FORMAT " to " INTPTR_FORMAT, pc, target_pc); | |
631 } | |
632 return target_pc; | |
633 } | |
634 | |
635 ShouldNotReachHere(); | |
636 return NULL; | |
637 } | |
638 | |
639 | |
640 JNI_ENTRY(void, throw_unsatisfied_link_error(JNIEnv* env, ...)) | |
641 { | |
642 THROW(vmSymbols::java_lang_UnsatisfiedLinkError()); | |
643 } | |
644 JNI_END | |
645 | |
646 | |
647 address SharedRuntime::native_method_throw_unsatisfied_link_error_entry() { | |
648 return CAST_FROM_FN_PTR(address, &throw_unsatisfied_link_error); | |
649 } | |
650 | |
651 | |
652 #ifndef PRODUCT | |
653 JRT_ENTRY(intptr_t, SharedRuntime::trace_bytecode(JavaThread* thread, intptr_t preserve_this_value, intptr_t tos, intptr_t tos2)) | |
654 const frame f = thread->last_frame(); | |
655 assert(f.is_interpreted_frame(), "must be an interpreted frame"); | |
656 #ifndef PRODUCT | |
657 methodHandle mh(THREAD, f.interpreter_frame_method()); | |
658 BytecodeTracer::trace(mh, f.interpreter_frame_bcp(), tos, tos2); | |
659 #endif // !PRODUCT | |
660 return preserve_this_value; | |
661 JRT_END | |
662 #endif // !PRODUCT | |
663 | |
664 | |
665 JRT_ENTRY(void, SharedRuntime::yield_all(JavaThread* thread, int attempts)) | |
666 os::yield_all(attempts); | |
667 JRT_END | |
668 | |
669 | |
670 // --------------------------------------------------------------------------------------------------------- | |
671 // Non-product code | |
672 #ifndef PRODUCT | |
673 | |
674 void SharedRuntime::verify_caller_frame(frame caller_frame, methodHandle callee_method) { | |
675 ResourceMark rm; | |
676 assert (caller_frame.is_interpreted_frame(), "sanity check"); | |
677 assert (callee_method->has_compiled_code(), "callee must be compiled"); | |
678 methodHandle caller_method (Thread::current(), caller_frame.interpreter_frame_method()); | |
679 jint bci = caller_frame.interpreter_frame_bci(); | |
680 methodHandle method = find_callee_method_inside_interpreter(caller_frame, caller_method, bci); | |
681 assert (callee_method == method, "incorrect method"); | |
682 } | |
683 | |
684 methodHandle SharedRuntime::find_callee_method_inside_interpreter(frame caller_frame, methodHandle caller_method, int bci) { | |
685 EXCEPTION_MARK; | |
686 Bytecode_invoke* bytecode = Bytecode_invoke_at(caller_method, bci); | |
687 methodHandle staticCallee = bytecode->static_target(CATCH); // Non-product code | |
688 | |
689 bytecode = Bytecode_invoke_at(caller_method, bci); | |
690 int bytecode_index = bytecode->index(); | |
691 Bytecodes::Code bc = bytecode->adjusted_invoke_code(); | |
692 | |
693 Handle receiver; | |
694 if (bc == Bytecodes::_invokeinterface || | |
695 bc == Bytecodes::_invokevirtual || | |
696 bc == Bytecodes::_invokespecial) { | |
697 symbolHandle signature (THREAD, staticCallee->signature()); | |
698 receiver = Handle(THREAD, retrieve_receiver(signature, caller_frame)); | |
699 } else { | |
700 receiver = Handle(); | |
701 } | |
702 CallInfo result; | |
703 constantPoolHandle constants (THREAD, caller_method->constants()); | |
704 LinkResolver::resolve_invoke(result, receiver, constants, bytecode_index, bc, CATCH); // Non-product code | |
705 methodHandle calleeMethod = result.selected_method(); | |
706 return calleeMethod; | |
707 } | |
708 | |
709 #endif // PRODUCT | |
710 | |
711 | |
712 JRT_ENTRY_NO_ASYNC(void, SharedRuntime::register_finalizer(JavaThread* thread, oopDesc* obj)) | |
713 assert(obj->is_oop(), "must be a valid oop"); | |
714 assert(obj->klass()->klass_part()->has_finalizer(), "shouldn't be here otherwise"); | |
715 instanceKlass::register_finalizer(instanceOop(obj), CHECK); | |
716 JRT_END | |
717 | |
718 | |
719 jlong SharedRuntime::get_java_tid(Thread* thread) { | |
720 if (thread != NULL) { | |
721 if (thread->is_Java_thread()) { | |
722 oop obj = ((JavaThread*)thread)->threadObj(); | |
723 return (obj == NULL) ? 0 : java_lang_Thread::thread_id(obj); | |
724 } | |
725 } | |
726 return 0; | |
727 } | |
728 | |
729 /** | |
730 * This function ought to be a void function, but cannot be because | |
731 * it gets turned into a tail-call on sparc, which runs into dtrace bug | |
732 * 6254741. Once that is fixed we can remove the dummy return value. | |
733 */ | |
734 int SharedRuntime::dtrace_object_alloc(oopDesc* o) { | |
735 return dtrace_object_alloc_base(Thread::current(), o); | |
736 } | |
737 | |
738 int SharedRuntime::dtrace_object_alloc_base(Thread* thread, oopDesc* o) { | |
739 assert(DTraceAllocProbes, "wrong call"); | |
740 Klass* klass = o->blueprint(); | |
741 int size = o->size(); | |
742 symbolOop name = klass->name(); | |
743 HS_DTRACE_PROBE4(hotspot, object__alloc, get_java_tid(thread), | |
744 name->bytes(), name->utf8_length(), size * HeapWordSize); | |
745 return 0; | |
746 } | |
747 | |
748 JRT_LEAF(int, SharedRuntime::dtrace_method_entry( | |
749 JavaThread* thread, methodOopDesc* method)) | |
750 assert(DTraceMethodProbes, "wrong call"); | |
751 symbolOop kname = method->klass_name(); | |
752 symbolOop name = method->name(); | |
753 symbolOop sig = method->signature(); | |
754 HS_DTRACE_PROBE7(hotspot, method__entry, get_java_tid(thread), | |
755 kname->bytes(), kname->utf8_length(), | |
756 name->bytes(), name->utf8_length(), | |
757 sig->bytes(), sig->utf8_length()); | |
758 return 0; | |
759 JRT_END | |
760 | |
761 JRT_LEAF(int, SharedRuntime::dtrace_method_exit( | |
762 JavaThread* thread, methodOopDesc* method)) | |
763 assert(DTraceMethodProbes, "wrong call"); | |
764 symbolOop kname = method->klass_name(); | |
765 symbolOop name = method->name(); | |
766 symbolOop sig = method->signature(); | |
767 HS_DTRACE_PROBE7(hotspot, method__return, get_java_tid(thread), | |
768 kname->bytes(), kname->utf8_length(), | |
769 name->bytes(), name->utf8_length(), | |
770 sig->bytes(), sig->utf8_length()); | |
771 return 0; | |
772 JRT_END | |
773 | |
774 | |
775 // Finds receiver, CallInfo (i.e. receiver method), and calling bytecode) | |
776 // for a call current in progress, i.e., arguments has been pushed on stack | |
777 // put callee has not been invoked yet. Used by: resolve virtual/static, | |
778 // vtable updates, etc. Caller frame must be compiled. | |
779 Handle SharedRuntime::find_callee_info(JavaThread* thread, Bytecodes::Code& bc, CallInfo& callinfo, TRAPS) { | |
780 ResourceMark rm(THREAD); | |
781 | |
782 // last java frame on stack (which includes native call frames) | |
783 vframeStream vfst(thread, true); // Do not skip and javaCalls | |
784 | |
785 return find_callee_info_helper(thread, vfst, bc, callinfo, CHECK_(Handle())); | |
786 } | |
787 | |
788 | |
789 // Finds receiver, CallInfo (i.e. receiver method), and calling bytecode | |
790 // for a call current in progress, i.e., arguments has been pushed on stack | |
791 // but callee has not been invoked yet. Caller frame must be compiled. | |
792 Handle SharedRuntime::find_callee_info_helper(JavaThread* thread, | |
793 vframeStream& vfst, | |
794 Bytecodes::Code& bc, | |
795 CallInfo& callinfo, TRAPS) { | |
796 Handle receiver; | |
797 Handle nullHandle; //create a handy null handle for exception returns | |
798 | |
799 assert(!vfst.at_end(), "Java frame must exist"); | |
800 | |
801 // Find caller and bci from vframe | |
802 methodHandle caller (THREAD, vfst.method()); | |
803 int bci = vfst.bci(); | |
804 | |
805 // Find bytecode | |
806 Bytecode_invoke* bytecode = Bytecode_invoke_at(caller, bci); | |
807 bc = bytecode->adjusted_invoke_code(); | |
808 int bytecode_index = bytecode->index(); | |
809 | |
810 // Find receiver for non-static call | |
811 if (bc != Bytecodes::_invokestatic) { | |
812 // This register map must be update since we need to find the receiver for | |
813 // compiled frames. The receiver might be in a register. | |
814 RegisterMap reg_map2(thread); | |
815 frame stubFrame = thread->last_frame(); | |
816 // Caller-frame is a compiled frame | |
817 frame callerFrame = stubFrame.sender(®_map2); | |
818 | |
819 methodHandle callee = bytecode->static_target(CHECK_(nullHandle)); | |
820 if (callee.is_null()) { | |
821 THROW_(vmSymbols::java_lang_NoSuchMethodException(), nullHandle); | |
822 } | |
823 // Retrieve from a compiled argument list | |
824 receiver = Handle(THREAD, callerFrame.retrieve_receiver(®_map2)); | |
825 | |
826 if (receiver.is_null()) { | |
827 THROW_(vmSymbols::java_lang_NullPointerException(), nullHandle); | |
828 } | |
829 } | |
830 | |
831 // Resolve method. This is parameterized by bytecode. | |
832 constantPoolHandle constants (THREAD, caller->constants()); | |
833 assert (receiver.is_null() || receiver->is_oop(), "wrong receiver"); | |
834 LinkResolver::resolve_invoke(callinfo, receiver, constants, bytecode_index, bc, CHECK_(nullHandle)); | |
835 | |
836 #ifdef ASSERT | |
837 // Check that the receiver klass is of the right subtype and that it is initialized for virtual calls | |
838 if (bc != Bytecodes::_invokestatic) { | |
839 assert(receiver.not_null(), "should have thrown exception"); | |
840 KlassHandle receiver_klass (THREAD, receiver->klass()); | |
841 klassOop rk = constants->klass_ref_at(bytecode_index, CHECK_(nullHandle)); | |
842 // klass is already loaded | |
843 KlassHandle static_receiver_klass (THREAD, rk); | |
844 assert(receiver_klass->is_subtype_of(static_receiver_klass()), "actual receiver must be subclass of static receiver klass"); | |
845 if (receiver_klass->oop_is_instance()) { | |
846 if (instanceKlass::cast(receiver_klass())->is_not_initialized()) { | |
847 tty->print_cr("ERROR: Klass not yet initialized!!"); | |
848 receiver_klass.print(); | |
849 } | |
850 assert (!instanceKlass::cast(receiver_klass())->is_not_initialized(), "receiver_klass must be initialized"); | |
851 } | |
852 } | |
853 #endif | |
854 | |
855 return receiver; | |
856 } | |
857 | |
858 methodHandle SharedRuntime::find_callee_method(JavaThread* thread, TRAPS) { | |
859 ResourceMark rm(THREAD); | |
860 // We need first to check if any Java activations (compiled, interpreted) | |
861 // exist on the stack since last JavaCall. If not, we need | |
862 // to get the target method from the JavaCall wrapper. | |
863 vframeStream vfst(thread, true); // Do not skip any javaCalls | |
864 methodHandle callee_method; | |
865 if (vfst.at_end()) { | |
866 // No Java frames were found on stack since we did the JavaCall. | |
867 // Hence the stack can only contain an entry_frame. We need to | |
868 // find the target method from the stub frame. | |
869 RegisterMap reg_map(thread, false); | |
870 frame fr = thread->last_frame(); | |
871 assert(fr.is_runtime_frame(), "must be a runtimeStub"); | |
872 fr = fr.sender(®_map); | |
873 assert(fr.is_entry_frame(), "must be"); | |
874 // fr is now pointing to the entry frame. | |
875 callee_method = methodHandle(THREAD, fr.entry_frame_call_wrapper()->callee_method()); | |
876 assert(fr.entry_frame_call_wrapper()->receiver() == NULL || !callee_method->is_static(), "non-null receiver for static call??"); | |
877 } else { | |
878 Bytecodes::Code bc; | |
879 CallInfo callinfo; | |
880 find_callee_info_helper(thread, vfst, bc, callinfo, CHECK_(methodHandle())); | |
881 callee_method = callinfo.selected_method(); | |
882 } | |
883 assert(callee_method()->is_method(), "must be"); | |
884 return callee_method; | |
885 } | |
886 | |
887 // Resolves a call. | |
888 methodHandle SharedRuntime::resolve_helper(JavaThread *thread, | |
889 bool is_virtual, | |
890 bool is_optimized, TRAPS) { | |
891 methodHandle callee_method; | |
892 callee_method = resolve_sub_helper(thread, is_virtual, is_optimized, THREAD); | |
893 if (JvmtiExport::can_hotswap_or_post_breakpoint()) { | |
894 int retry_count = 0; | |
895 while (!HAS_PENDING_EXCEPTION && callee_method->is_old() && | |
896 callee_method->method_holder() != SystemDictionary::object_klass()) { | |
897 // If has a pending exception then there is no need to re-try to | |
898 // resolve this method. | |
899 // If the method has been redefined, we need to try again. | |
900 // Hack: we have no way to update the vtables of arrays, so don't | |
901 // require that java.lang.Object has been updated. | |
902 | |
903 // It is very unlikely that method is redefined more than 100 times | |
904 // in the middle of resolve. If it is looping here more than 100 times | |
905 // means then there could be a bug here. | |
906 guarantee((retry_count++ < 100), | |
907 "Could not resolve to latest version of redefined method"); | |
908 // method is redefined in the middle of resolve so re-try. | |
909 callee_method = resolve_sub_helper(thread, is_virtual, is_optimized, THREAD); | |
910 } | |
911 } | |
912 return callee_method; | |
913 } | |
914 | |
915 // Resolves a call. The compilers generate code for calls that go here | |
916 // and are patched with the real destination of the call. | |
917 methodHandle SharedRuntime::resolve_sub_helper(JavaThread *thread, | |
918 bool is_virtual, | |
919 bool is_optimized, TRAPS) { | |
920 | |
921 ResourceMark rm(thread); | |
922 RegisterMap cbl_map(thread, false); | |
923 frame caller_frame = thread->last_frame().sender(&cbl_map); | |
924 | |
925 CodeBlob* cb = caller_frame.cb(); | |
926 guarantee(cb != NULL && cb->is_nmethod(), "must be called from nmethod"); | |
927 // make sure caller is not getting deoptimized | |
928 // and removed before we are done with it. | |
929 // CLEANUP - with lazy deopt shouldn't need this lock | |
930 nmethodLocker caller_lock((nmethod*)cb); | |
931 | |
932 | |
933 // determine call info & receiver | |
934 // note: a) receiver is NULL for static calls | |
935 // b) an exception is thrown if receiver is NULL for non-static calls | |
936 CallInfo call_info; | |
937 Bytecodes::Code invoke_code = Bytecodes::_illegal; | |
938 Handle receiver = find_callee_info(thread, invoke_code, | |
939 call_info, CHECK_(methodHandle())); | |
940 methodHandle callee_method = call_info.selected_method(); | |
941 | |
942 assert((!is_virtual && invoke_code == Bytecodes::_invokestatic) || | |
943 ( is_virtual && invoke_code != Bytecodes::_invokestatic), "inconsistent bytecode"); | |
944 | |
945 #ifndef PRODUCT | |
946 // tracing/debugging/statistics | |
947 int *addr = (is_optimized) ? (&_resolve_opt_virtual_ctr) : | |
948 (is_virtual) ? (&_resolve_virtual_ctr) : | |
949 (&_resolve_static_ctr); | |
950 Atomic::inc(addr); | |
951 | |
952 if (TraceCallFixup) { | |
953 ResourceMark rm(thread); | |
954 tty->print("resolving %s%s (%s) call to", | |
955 (is_optimized) ? "optimized " : "", (is_virtual) ? "virtual" : "static", | |
956 Bytecodes::name(invoke_code)); | |
957 callee_method->print_short_name(tty); | |
958 tty->print_cr(" code: " INTPTR_FORMAT, callee_method->code()); | |
959 } | |
960 #endif | |
961 | |
962 // Compute entry points. This might require generation of C2I converter | |
963 // frames, so we cannot be holding any locks here. Furthermore, the | |
964 // computation of the entry points is independent of patching the call. We | |
965 // always return the entry-point, but we only patch the stub if the call has | |
966 // not been deoptimized. Return values: For a virtual call this is an | |
967 // (cached_oop, destination address) pair. For a static call/optimized | |
968 // virtual this is just a destination address. | |
969 | |
970 StaticCallInfo static_call_info; | |
971 CompiledICInfo virtual_call_info; | |
972 | |
973 | |
974 // Make sure the callee nmethod does not get deoptimized and removed before | |
975 // we are done patching the code. | |
976 nmethod* nm = callee_method->code(); | |
977 nmethodLocker nl_callee(nm); | |
978 #ifdef ASSERT | |
979 address dest_entry_point = nm == NULL ? 0 : nm->entry_point(); // used below | |
980 #endif | |
981 | |
982 if (is_virtual) { | |
983 assert(receiver.not_null(), "sanity check"); | |
984 bool static_bound = call_info.resolved_method()->can_be_statically_bound(); | |
985 KlassHandle h_klass(THREAD, receiver->klass()); | |
986 CompiledIC::compute_monomorphic_entry(callee_method, h_klass, | |
987 is_optimized, static_bound, virtual_call_info, | |
988 CHECK_(methodHandle())); | |
989 } else { | |
990 // static call | |
991 CompiledStaticCall::compute_entry(callee_method, static_call_info); | |
992 } | |
993 | |
994 // grab lock, check for deoptimization and potentially patch caller | |
995 { | |
996 MutexLocker ml_patch(CompiledIC_lock); | |
997 | |
998 // Now that we are ready to patch if the methodOop was redefined then | |
999 // don't update call site and let the caller retry. | |
1000 | |
1001 if (!callee_method->is_old()) { | |
1002 #ifdef ASSERT | |
1003 // We must not try to patch to jump to an already unloaded method. | |
1004 if (dest_entry_point != 0) { | |
1005 assert(CodeCache::find_blob(dest_entry_point) != NULL, | |
1006 "should not unload nmethod while locked"); | |
1007 } | |
1008 #endif | |
1009 if (is_virtual) { | |
1010 CompiledIC* inline_cache = CompiledIC_before(caller_frame.pc()); | |
1011 if (inline_cache->is_clean()) { | |
1012 inline_cache->set_to_monomorphic(virtual_call_info); | |
1013 } | |
1014 } else { | |
1015 CompiledStaticCall* ssc = compiledStaticCall_before(caller_frame.pc()); | |
1016 if (ssc->is_clean()) ssc->set(static_call_info); | |
1017 } | |
1018 } | |
1019 | |
1020 } // unlock CompiledIC_lock | |
1021 | |
1022 return callee_method; | |
1023 } | |
1024 | |
1025 | |
1026 // Inline caches exist only in compiled code | |
1027 JRT_BLOCK_ENTRY(address, SharedRuntime::handle_wrong_method_ic_miss(JavaThread* thread)) | |
1028 #ifdef ASSERT | |
1029 RegisterMap reg_map(thread, false); | |
1030 frame stub_frame = thread->last_frame(); | |
1031 assert(stub_frame.is_runtime_frame(), "sanity check"); | |
1032 frame caller_frame = stub_frame.sender(®_map); | |
1033 assert(!caller_frame.is_interpreted_frame() && !caller_frame.is_entry_frame(), "unexpected frame"); | |
1034 #endif /* ASSERT */ | |
1035 | |
1036 methodHandle callee_method; | |
1037 JRT_BLOCK | |
1038 callee_method = SharedRuntime::handle_ic_miss_helper(thread, CHECK_NULL); | |
1039 // Return methodOop through TLS | |
1040 thread->set_vm_result(callee_method()); | |
1041 JRT_BLOCK_END | |
1042 // return compiled code entry point after potential safepoints | |
1043 assert(callee_method->verified_code_entry() != NULL, " Jump to zero!"); | |
1044 return callee_method->verified_code_entry(); | |
1045 JRT_END | |
1046 | |
1047 | |
1048 // Handle call site that has been made non-entrant | |
1049 JRT_BLOCK_ENTRY(address, SharedRuntime::handle_wrong_method(JavaThread* thread)) | |
1050 // 6243940 We might end up in here if the callee is deoptimized | |
1051 // as we race to call it. We don't want to take a safepoint if | |
1052 // the caller was interpreted because the caller frame will look | |
1053 // interpreted to the stack walkers and arguments are now | |
1054 // "compiled" so it is much better to make this transition | |
1055 // invisible to the stack walking code. The i2c path will | |
1056 // place the callee method in the callee_target. It is stashed | |
1057 // there because if we try and find the callee by normal means a | |
1058 // safepoint is possible and have trouble gc'ing the compiled args. | |
1059 RegisterMap reg_map(thread, false); | |
1060 frame stub_frame = thread->last_frame(); | |
1061 assert(stub_frame.is_runtime_frame(), "sanity check"); | |
1062 frame caller_frame = stub_frame.sender(®_map); | |
1063 if (caller_frame.is_interpreted_frame() || caller_frame.is_entry_frame() ) { | |
1064 methodOop callee = thread->callee_target(); | |
1065 guarantee(callee != NULL && callee->is_method(), "bad handshake"); | |
1066 thread->set_vm_result(callee); | |
1067 thread->set_callee_target(NULL); | |
1068 return callee->get_c2i_entry(); | |
1069 } | |
1070 | |
1071 // Must be compiled to compiled path which is safe to stackwalk | |
1072 methodHandle callee_method; | |
1073 JRT_BLOCK | |
1074 // Force resolving of caller (if we called from compiled frame) | |
1075 callee_method = SharedRuntime::reresolve_call_site(thread, CHECK_NULL); | |
1076 thread->set_vm_result(callee_method()); | |
1077 JRT_BLOCK_END | |
1078 // return compiled code entry point after potential safepoints | |
1079 assert(callee_method->verified_code_entry() != NULL, " Jump to zero!"); | |
1080 return callee_method->verified_code_entry(); | |
1081 JRT_END | |
1082 | |
1083 | |
1084 // resolve a static call and patch code | |
1085 JRT_BLOCK_ENTRY(address, SharedRuntime::resolve_static_call_C(JavaThread *thread )) | |
1086 methodHandle callee_method; | |
1087 JRT_BLOCK | |
1088 callee_method = SharedRuntime::resolve_helper(thread, false, false, CHECK_NULL); | |
1089 thread->set_vm_result(callee_method()); | |
1090 JRT_BLOCK_END | |
1091 // return compiled code entry point after potential safepoints | |
1092 assert(callee_method->verified_code_entry() != NULL, " Jump to zero!"); | |
1093 return callee_method->verified_code_entry(); | |
1094 JRT_END | |
1095 | |
1096 | |
1097 // resolve virtual call and update inline cache to monomorphic | |
1098 JRT_BLOCK_ENTRY(address, SharedRuntime::resolve_virtual_call_C(JavaThread *thread )) | |
1099 methodHandle callee_method; | |
1100 JRT_BLOCK | |
1101 callee_method = SharedRuntime::resolve_helper(thread, true, false, CHECK_NULL); | |
1102 thread->set_vm_result(callee_method()); | |
1103 JRT_BLOCK_END | |
1104 // return compiled code entry point after potential safepoints | |
1105 assert(callee_method->verified_code_entry() != NULL, " Jump to zero!"); | |
1106 return callee_method->verified_code_entry(); | |
1107 JRT_END | |
1108 | |
1109 | |
1110 // Resolve a virtual call that can be statically bound (e.g., always | |
1111 // monomorphic, so it has no inline cache). Patch code to resolved target. | |
1112 JRT_BLOCK_ENTRY(address, SharedRuntime::resolve_opt_virtual_call_C(JavaThread *thread)) | |
1113 methodHandle callee_method; | |
1114 JRT_BLOCK | |
1115 callee_method = SharedRuntime::resolve_helper(thread, true, true, CHECK_NULL); | |
1116 thread->set_vm_result(callee_method()); | |
1117 JRT_BLOCK_END | |
1118 // return compiled code entry point after potential safepoints | |
1119 assert(callee_method->verified_code_entry() != NULL, " Jump to zero!"); | |
1120 return callee_method->verified_code_entry(); | |
1121 JRT_END | |
1122 | |
1123 | |
1124 | |
1125 | |
1126 | |
1127 methodHandle SharedRuntime::handle_ic_miss_helper(JavaThread *thread, TRAPS) { | |
1128 ResourceMark rm(thread); | |
1129 CallInfo call_info; | |
1130 Bytecodes::Code bc; | |
1131 | |
1132 // receiver is NULL for static calls. An exception is thrown for NULL | |
1133 // receivers for non-static calls | |
1134 Handle receiver = find_callee_info(thread, bc, call_info, | |
1135 CHECK_(methodHandle())); | |
1136 // Compiler1 can produce virtual call sites that can actually be statically bound | |
1137 // If we fell thru to below we would think that the site was going megamorphic | |
1138 // when in fact the site can never miss. Worse because we'd think it was megamorphic | |
1139 // we'd try and do a vtable dispatch however methods that can be statically bound | |
1140 // don't have vtable entries (vtable_index < 0) and we'd blow up. So we force a | |
1141 // reresolution of the call site (as if we did a handle_wrong_method and not an | |
1142 // plain ic_miss) and the site will be converted to an optimized virtual call site | |
1143 // never to miss again. I don't believe C2 will produce code like this but if it | |
1144 // did this would still be the correct thing to do for it too, hence no ifdef. | |
1145 // | |
1146 if (call_info.resolved_method()->can_be_statically_bound()) { | |
1147 methodHandle callee_method = SharedRuntime::reresolve_call_site(thread, CHECK_(methodHandle())); | |
1148 if (TraceCallFixup) { | |
1149 RegisterMap reg_map(thread, false); | |
1150 frame caller_frame = thread->last_frame().sender(®_map); | |
1151 ResourceMark rm(thread); | |
1152 tty->print("converting IC miss to reresolve (%s) call to", Bytecodes::name(bc)); | |
1153 callee_method->print_short_name(tty); | |
1154 tty->print_cr(" from pc: " INTPTR_FORMAT, caller_frame.pc()); | |
1155 tty->print_cr(" code: " INTPTR_FORMAT, callee_method->code()); | |
1156 } | |
1157 return callee_method; | |
1158 } | |
1159 | |
1160 methodHandle callee_method = call_info.selected_method(); | |
1161 | |
1162 bool should_be_mono = false; | |
1163 | |
1164 #ifndef PRODUCT | |
1165 Atomic::inc(&_ic_miss_ctr); | |
1166 | |
1167 // Statistics & Tracing | |
1168 if (TraceCallFixup) { | |
1169 ResourceMark rm(thread); | |
1170 tty->print("IC miss (%s) call to", Bytecodes::name(bc)); | |
1171 callee_method->print_short_name(tty); | |
1172 tty->print_cr(" code: " INTPTR_FORMAT, callee_method->code()); | |
1173 } | |
1174 | |
1175 if (ICMissHistogram) { | |
1176 MutexLocker m(VMStatistic_lock); | |
1177 RegisterMap reg_map(thread, false); | |
1178 frame f = thread->last_frame().real_sender(®_map);// skip runtime stub | |
1179 // produce statistics under the lock | |
1180 trace_ic_miss(f.pc()); | |
1181 } | |
1182 #endif | |
1183 | |
1184 // install an event collector so that when a vtable stub is created the | |
1185 // profiler can be notified via a DYNAMIC_CODE_GENERATED event. The | |
1186 // event can't be posted when the stub is created as locks are held | |
1187 // - instead the event will be deferred until the event collector goes | |
1188 // out of scope. | |
1189 JvmtiDynamicCodeEventCollector event_collector; | |
1190 | |
1191 // Update inline cache to megamorphic. Skip update if caller has been | |
1192 // made non-entrant or we are called from interpreted. | |
1193 { MutexLocker ml_patch (CompiledIC_lock); | |
1194 RegisterMap reg_map(thread, false); | |
1195 frame caller_frame = thread->last_frame().sender(®_map); | |
1196 CodeBlob* cb = caller_frame.cb(); | |
1197 if (cb->is_nmethod() && ((nmethod*)cb)->is_in_use()) { | |
1198 // Not a non-entrant nmethod, so find inline_cache | |
1199 CompiledIC* inline_cache = CompiledIC_before(caller_frame.pc()); | |
1200 bool should_be_mono = false; | |
1201 if (inline_cache->is_optimized()) { | |
1202 if (TraceCallFixup) { | |
1203 ResourceMark rm(thread); | |
1204 tty->print("OPTIMIZED IC miss (%s) call to", Bytecodes::name(bc)); | |
1205 callee_method->print_short_name(tty); | |
1206 tty->print_cr(" code: " INTPTR_FORMAT, callee_method->code()); | |
1207 } | |
1208 should_be_mono = true; | |
1209 } else { | |
1210 compiledICHolderOop ic_oop = (compiledICHolderOop) inline_cache->cached_oop(); | |
1211 if ( ic_oop != NULL && ic_oop->is_compiledICHolder()) { | |
1212 | |
1213 if (receiver()->klass() == ic_oop->holder_klass()) { | |
1214 // This isn't a real miss. We must have seen that compiled code | |
1215 // is now available and we want the call site converted to a | |
1216 // monomorphic compiled call site. | |
1217 // We can't assert for callee_method->code() != NULL because it | |
1218 // could have been deoptimized in the meantime | |
1219 if (TraceCallFixup) { | |
1220 ResourceMark rm(thread); | |
1221 tty->print("FALSE IC miss (%s) converting to compiled call to", Bytecodes::name(bc)); | |
1222 callee_method->print_short_name(tty); | |
1223 tty->print_cr(" code: " INTPTR_FORMAT, callee_method->code()); | |
1224 } | |
1225 should_be_mono = true; | |
1226 } | |
1227 } | |
1228 } | |
1229 | |
1230 if (should_be_mono) { | |
1231 | |
1232 // We have a path that was monomorphic but was going interpreted | |
1233 // and now we have (or had) a compiled entry. We correct the IC | |
1234 // by using a new icBuffer. | |
1235 CompiledICInfo info; | |
1236 KlassHandle receiver_klass(THREAD, receiver()->klass()); | |
1237 inline_cache->compute_monomorphic_entry(callee_method, | |
1238 receiver_klass, | |
1239 inline_cache->is_optimized(), | |
1240 false, | |
1241 info, CHECK_(methodHandle())); | |
1242 inline_cache->set_to_monomorphic(info); | |
1243 } else if (!inline_cache->is_megamorphic() && !inline_cache->is_clean()) { | |
1244 // Change to megamorphic | |
1245 inline_cache->set_to_megamorphic(&call_info, bc, CHECK_(methodHandle())); | |
1246 } else { | |
1247 // Either clean or megamorphic | |
1248 } | |
1249 } | |
1250 } // Release CompiledIC_lock | |
1251 | |
1252 return callee_method; | |
1253 } | |
1254 | |
1255 // | |
1256 // Resets a call-site in compiled code so it will get resolved again. | |
1257 // This routines handles both virtual call sites, optimized virtual call | |
1258 // sites, and static call sites. Typically used to change a call sites | |
1259 // destination from compiled to interpreted. | |
1260 // | |
1261 methodHandle SharedRuntime::reresolve_call_site(JavaThread *thread, TRAPS) { | |
1262 ResourceMark rm(thread); | |
1263 RegisterMap reg_map(thread, false); | |
1264 frame stub_frame = thread->last_frame(); | |
1265 assert(stub_frame.is_runtime_frame(), "must be a runtimeStub"); | |
1266 frame caller = stub_frame.sender(®_map); | |
1267 | |
1268 // Do nothing if the frame isn't a live compiled frame. | |
1269 // nmethod could be deoptimized by the time we get here | |
1270 // so no update to the caller is needed. | |
1271 | |
1272 if (caller.is_compiled_frame() && !caller.is_deoptimized_frame()) { | |
1273 | |
1274 address pc = caller.pc(); | |
1275 Events::log("update call-site at pc " INTPTR_FORMAT, pc); | |
1276 | |
1277 // Default call_addr is the location of the "basic" call. | |
1278 // Determine the address of the call we a reresolving. With | |
1279 // Inline Caches we will always find a recognizable call. | |
1280 // With Inline Caches disabled we may or may not find a | |
1281 // recognizable call. We will always find a call for static | |
1282 // calls and for optimized virtual calls. For vanilla virtual | |
1283 // calls it depends on the state of the UseInlineCaches switch. | |
1284 // | |
1285 // With Inline Caches disabled we can get here for a virtual call | |
1286 // for two reasons: | |
1287 // 1 - calling an abstract method. The vtable for abstract methods | |
1288 // will run us thru handle_wrong_method and we will eventually | |
1289 // end up in the interpreter to throw the ame. | |
1290 // 2 - a racing deoptimization. We could be doing a vanilla vtable | |
1291 // call and between the time we fetch the entry address and | |
1292 // we jump to it the target gets deoptimized. Similar to 1 | |
1293 // we will wind up in the interprter (thru a c2i with c2). | |
1294 // | |
1295 address call_addr = NULL; | |
1296 { | |
1297 // Get call instruction under lock because another thread may be | |
1298 // busy patching it. | |
1299 MutexLockerEx ml_patch(Patching_lock, Mutex::_no_safepoint_check_flag); | |
1300 // Location of call instruction | |
1301 if (NativeCall::is_call_before(pc)) { | |
1302 NativeCall *ncall = nativeCall_before(pc); | |
1303 call_addr = ncall->instruction_address(); | |
1304 } | |
1305 } | |
1306 | |
1307 // Check for static or virtual call | |
1308 bool is_static_call = false; | |
1309 nmethod* caller_nm = CodeCache::find_nmethod(pc); | |
1310 // Make sure nmethod doesn't get deoptimized and removed until | |
1311 // this is done with it. | |
1312 // CLEANUP - with lazy deopt shouldn't need this lock | |
1313 nmethodLocker nmlock(caller_nm); | |
1314 | |
1315 if (call_addr != NULL) { | |
1316 RelocIterator iter(caller_nm, call_addr, call_addr+1); | |
1317 int ret = iter.next(); // Get item | |
1318 if (ret) { | |
1319 assert(iter.addr() == call_addr, "must find call"); | |
1320 if (iter.type() == relocInfo::static_call_type) { | |
1321 is_static_call = true; | |
1322 } else { | |
1323 assert(iter.type() == relocInfo::virtual_call_type || | |
1324 iter.type() == relocInfo::opt_virtual_call_type | |
1325 , "unexpected relocInfo. type"); | |
1326 } | |
1327 } else { | |
1328 assert(!UseInlineCaches, "relocation info. must exist for this address"); | |
1329 } | |
1330 | |
1331 // Cleaning the inline cache will force a new resolve. This is more robust | |
1332 // than directly setting it to the new destination, since resolving of calls | |
1333 // is always done through the same code path. (experience shows that it | |
1334 // leads to very hard to track down bugs, if an inline cache gets updated | |
1335 // to a wrong method). It should not be performance critical, since the | |
1336 // resolve is only done once. | |
1337 | |
1338 MutexLocker ml(CompiledIC_lock); | |
1339 // | |
1340 // We do not patch the call site if the nmethod has been made non-entrant | |
1341 // as it is a waste of time | |
1342 // | |
1343 if (caller_nm->is_in_use()) { | |
1344 if (is_static_call) { | |
1345 CompiledStaticCall* ssc= compiledStaticCall_at(call_addr); | |
1346 ssc->set_to_clean(); | |
1347 } else { | |
1348 // compiled, dispatched call (which used to call an interpreted method) | |
1349 CompiledIC* inline_cache = CompiledIC_at(call_addr); | |
1350 inline_cache->set_to_clean(); | |
1351 } | |
1352 } | |
1353 } | |
1354 | |
1355 } | |
1356 | |
1357 methodHandle callee_method = find_callee_method(thread, CHECK_(methodHandle())); | |
1358 | |
1359 | |
1360 #ifndef PRODUCT | |
1361 Atomic::inc(&_wrong_method_ctr); | |
1362 | |
1363 if (TraceCallFixup) { | |
1364 ResourceMark rm(thread); | |
1365 tty->print("handle_wrong_method reresolving call to"); | |
1366 callee_method->print_short_name(tty); | |
1367 tty->print_cr(" code: " INTPTR_FORMAT, callee_method->code()); | |
1368 } | |
1369 #endif | |
1370 | |
1371 return callee_method; | |
1372 } | |
1373 | |
1374 // --------------------------------------------------------------------------- | |
1375 // We are calling the interpreter via a c2i. Normally this would mean that | |
1376 // we were called by a compiled method. However we could have lost a race | |
1377 // where we went int -> i2c -> c2i and so the caller could in fact be | |
1378 // interpreted. If the caller is compiled we attampt to patch the caller | |
1379 // so he no longer calls into the interpreter. | |
1380 IRT_LEAF(void, SharedRuntime::fixup_callers_callsite(methodOopDesc* method, address caller_pc)) | |
1381 methodOop moop(method); | |
1382 | |
1383 address entry_point = moop->from_compiled_entry(); | |
1384 | |
1385 // It's possible that deoptimization can occur at a call site which hasn't | |
1386 // been resolved yet, in which case this function will be called from | |
1387 // an nmethod that has been patched for deopt and we can ignore the | |
1388 // request for a fixup. | |
1389 // Also it is possible that we lost a race in that from_compiled_entry | |
1390 // is now back to the i2c in that case we don't need to patch and if | |
1391 // we did we'd leap into space because the callsite needs to use | |
1392 // "to interpreter" stub in order to load up the methodOop. Don't | |
1393 // ask me how I know this... | |
1394 // | |
1395 | |
1396 CodeBlob* cb = CodeCache::find_blob(caller_pc); | |
1397 if ( !cb->is_nmethod() || entry_point == moop->get_c2i_entry()) { | |
1398 return; | |
1399 } | |
1400 | |
1401 // There is a benign race here. We could be attempting to patch to a compiled | |
1402 // entry point at the same time the callee is being deoptimized. If that is | |
1403 // the case then entry_point may in fact point to a c2i and we'd patch the | |
1404 // call site with the same old data. clear_code will set code() to NULL | |
1405 // at the end of it. If we happen to see that NULL then we can skip trying | |
1406 // to patch. If we hit the window where the callee has a c2i in the | |
1407 // from_compiled_entry and the NULL isn't present yet then we lose the race | |
1408 // and patch the code with the same old data. Asi es la vida. | |
1409 | |
1410 if (moop->code() == NULL) return; | |
1411 | |
1412 if (((nmethod*)cb)->is_in_use()) { | |
1413 | |
1414 // Expect to find a native call there (unless it was no-inline cache vtable dispatch) | |
1415 MutexLockerEx ml_patch(Patching_lock, Mutex::_no_safepoint_check_flag); | |
1416 if (NativeCall::is_call_before(caller_pc + frame::pc_return_offset)) { | |
1417 NativeCall *call = nativeCall_before(caller_pc + frame::pc_return_offset); | |
1418 // | |
1419 // bug 6281185. We might get here after resolving a call site to a vanilla | |
1420 // virtual call. Because the resolvee uses the verified entry it may then | |
1421 // see compiled code and attempt to patch the site by calling us. This would | |
1422 // then incorrectly convert the call site to optimized and its downhill from | |
1423 // there. If you're lucky you'll get the assert in the bugid, if not you've | |
1424 // just made a call site that could be megamorphic into a monomorphic site | |
1425 // for the rest of its life! Just another racing bug in the life of | |
1426 // fixup_callers_callsite ... | |
1427 // | |
1428 RelocIterator iter(cb, call->instruction_address(), call->next_instruction_address()); | |
1429 iter.next(); | |
1430 assert(iter.has_current(), "must have a reloc at java call site"); | |
1431 relocInfo::relocType typ = iter.reloc()->type(); | |
1432 if ( typ != relocInfo::static_call_type && | |
1433 typ != relocInfo::opt_virtual_call_type && | |
1434 typ != relocInfo::static_stub_type) { | |
1435 return; | |
1436 } | |
1437 address destination = call->destination(); | |
1438 if (destination != entry_point) { | |
1439 CodeBlob* callee = CodeCache::find_blob(destination); | |
1440 // callee == cb seems weird. It means calling interpreter thru stub. | |
1441 if (callee == cb || callee->is_adapter_blob()) { | |
1442 // static call or optimized virtual | |
1443 if (TraceCallFixup) { | |
1444 tty->print("fixup callsite at " INTPTR_FORMAT " to compiled code for", caller_pc); | |
1445 moop->print_short_name(tty); | |
1446 tty->print_cr(" to " INTPTR_FORMAT, entry_point); | |
1447 } | |
1448 call->set_destination_mt_safe(entry_point); | |
1449 } else { | |
1450 if (TraceCallFixup) { | |
1451 tty->print("failed to fixup callsite at " INTPTR_FORMAT " to compiled code for", caller_pc); | |
1452 moop->print_short_name(tty); | |
1453 tty->print_cr(" to " INTPTR_FORMAT, entry_point); | |
1454 } | |
1455 // assert is too strong could also be resolve destinations. | |
1456 // assert(InlineCacheBuffer::contains(destination) || VtableStubs::contains(destination), "must be"); | |
1457 } | |
1458 } else { | |
1459 if (TraceCallFixup) { | |
1460 tty->print("already patched callsite at " INTPTR_FORMAT " to compiled code for", caller_pc); | |
1461 moop->print_short_name(tty); | |
1462 tty->print_cr(" to " INTPTR_FORMAT, entry_point); | |
1463 } | |
1464 } | |
1465 } | |
1466 } | |
1467 | |
1468 IRT_END | |
1469 | |
1470 | |
1471 // same as JVM_Arraycopy, but called directly from compiled code | |
1472 JRT_ENTRY(void, SharedRuntime::slow_arraycopy_C(oopDesc* src, jint src_pos, | |
1473 oopDesc* dest, jint dest_pos, | |
1474 jint length, | |
1475 JavaThread* thread)) { | |
1476 #ifndef PRODUCT | |
1477 _slow_array_copy_ctr++; | |
1478 #endif | |
1479 // Check if we have null pointers | |
1480 if (src == NULL || dest == NULL) { | |
1481 THROW(vmSymbols::java_lang_NullPointerException()); | |
1482 } | |
1483 // Do the copy. The casts to arrayOop are necessary to the copy_array API, | |
1484 // even though the copy_array API also performs dynamic checks to ensure | |
1485 // that src and dest are truly arrays (and are conformable). | |
1486 // The copy_array mechanism is awkward and could be removed, but | |
1487 // the compilers don't call this function except as a last resort, | |
1488 // so it probably doesn't matter. | |
1489 Klass::cast(src->klass())->copy_array((arrayOopDesc*)src, src_pos, | |
1490 (arrayOopDesc*)dest, dest_pos, | |
1491 length, thread); | |
1492 } | |
1493 JRT_END | |
1494 | |
1495 char* SharedRuntime::generate_class_cast_message( | |
1496 JavaThread* thread, const char* objName) { | |
1497 | |
1498 // Get target class name from the checkcast instruction | |
1499 vframeStream vfst(thread, true); | |
1500 assert(!vfst.at_end(), "Java frame must exist"); | |
1501 Bytecode_checkcast* cc = Bytecode_checkcast_at( | |
1502 vfst.method()->bcp_from(vfst.bci())); | |
1503 Klass* targetKlass = Klass::cast(vfst.method()->constants()->klass_at( | |
1504 cc->index(), thread)); | |
1505 return generate_class_cast_message(objName, targetKlass->external_name()); | |
1506 } | |
1507 | |
1508 char* SharedRuntime::generate_class_cast_message( | |
1509 const char* objName, const char* targetKlassName) { | |
1510 const char* desc = " cannot be cast to "; | |
1511 size_t msglen = strlen(objName) + strlen(desc) + strlen(targetKlassName) + 1; | |
1512 | |
53 | 1513 char* message = NEW_RESOURCE_ARRAY(char, msglen); |
0 | 1514 if (NULL == message) { |
53 | 1515 // Shouldn't happen, but don't cause even more problems if it does |
0 | 1516 message = const_cast<char*>(objName); |
1517 } else { | |
1518 jio_snprintf(message, msglen, "%s%s%s", objName, desc, targetKlassName); | |
1519 } | |
1520 return message; | |
1521 } | |
1522 | |
1523 JRT_LEAF(void, SharedRuntime::reguard_yellow_pages()) | |
1524 (void) JavaThread::current()->reguard_stack(); | |
1525 JRT_END | |
1526 | |
1527 | |
1528 // Handles the uncommon case in locking, i.e., contention or an inflated lock. | |
1529 #ifndef PRODUCT | |
1530 int SharedRuntime::_monitor_enter_ctr=0; | |
1531 #endif | |
1532 JRT_ENTRY_NO_ASYNC(void, SharedRuntime::complete_monitor_locking_C(oopDesc* _obj, BasicLock* lock, JavaThread* thread)) | |
1533 oop obj(_obj); | |
1534 #ifndef PRODUCT | |
1535 _monitor_enter_ctr++; // monitor enter slow | |
1536 #endif | |
1537 if (PrintBiasedLockingStatistics) { | |
1538 Atomic::inc(BiasedLocking::slow_path_entry_count_addr()); | |
1539 } | |
1540 Handle h_obj(THREAD, obj); | |
1541 if (UseBiasedLocking) { | |
1542 // Retry fast entry if bias is revoked to avoid unnecessary inflation | |
1543 ObjectSynchronizer::fast_enter(h_obj, lock, true, CHECK); | |
1544 } else { | |
1545 ObjectSynchronizer::slow_enter(h_obj, lock, CHECK); | |
1546 } | |
1547 assert(!HAS_PENDING_EXCEPTION, "Should have no exception here"); | |
1548 JRT_END | |
1549 | |
1550 #ifndef PRODUCT | |
1551 int SharedRuntime::_monitor_exit_ctr=0; | |
1552 #endif | |
1553 // Handles the uncommon cases of monitor unlocking in compiled code | |
1554 JRT_LEAF(void, SharedRuntime::complete_monitor_unlocking_C(oopDesc* _obj, BasicLock* lock)) | |
1555 oop obj(_obj); | |
1556 #ifndef PRODUCT | |
1557 _monitor_exit_ctr++; // monitor exit slow | |
1558 #endif | |
1559 Thread* THREAD = JavaThread::current(); | |
1560 // I'm not convinced we need the code contained by MIGHT_HAVE_PENDING anymore | |
1561 // testing was unable to ever fire the assert that guarded it so I have removed it. | |
1562 assert(!HAS_PENDING_EXCEPTION, "Do we need code below anymore?"); | |
1563 #undef MIGHT_HAVE_PENDING | |
1564 #ifdef MIGHT_HAVE_PENDING | |
1565 // Save and restore any pending_exception around the exception mark. | |
1566 // While the slow_exit must not throw an exception, we could come into | |
1567 // this routine with one set. | |
1568 oop pending_excep = NULL; | |
1569 const char* pending_file; | |
1570 int pending_line; | |
1571 if (HAS_PENDING_EXCEPTION) { | |
1572 pending_excep = PENDING_EXCEPTION; | |
1573 pending_file = THREAD->exception_file(); | |
1574 pending_line = THREAD->exception_line(); | |
1575 CLEAR_PENDING_EXCEPTION; | |
1576 } | |
1577 #endif /* MIGHT_HAVE_PENDING */ | |
1578 | |
1579 { | |
1580 // Exit must be non-blocking, and therefore no exceptions can be thrown. | |
1581 EXCEPTION_MARK; | |
1582 ObjectSynchronizer::slow_exit(obj, lock, THREAD); | |
1583 } | |
1584 | |
1585 #ifdef MIGHT_HAVE_PENDING | |
1586 if (pending_excep != NULL) { | |
1587 THREAD->set_pending_exception(pending_excep, pending_file, pending_line); | |
1588 } | |
1589 #endif /* MIGHT_HAVE_PENDING */ | |
1590 JRT_END | |
1591 | |
1592 #ifndef PRODUCT | |
1593 | |
1594 void SharedRuntime::print_statistics() { | |
1595 ttyLocker ttyl; | |
1596 if (xtty != NULL) xtty->head("statistics type='SharedRuntime'"); | |
1597 | |
1598 if (_monitor_enter_ctr ) tty->print_cr("%5d monitor enter slow", _monitor_enter_ctr); | |
1599 if (_monitor_exit_ctr ) tty->print_cr("%5d monitor exit slow", _monitor_exit_ctr); | |
1600 if (_throw_null_ctr) tty->print_cr("%5d implicit null throw", _throw_null_ctr); | |
1601 | |
1602 SharedRuntime::print_ic_miss_histogram(); | |
1603 | |
1604 if (CountRemovableExceptions) { | |
1605 if (_nof_removable_exceptions > 0) { | |
1606 Unimplemented(); // this counter is not yet incremented | |
1607 tty->print_cr("Removable exceptions: %d", _nof_removable_exceptions); | |
1608 } | |
1609 } | |
1610 | |
1611 // Dump the JRT_ENTRY counters | |
1612 if( _new_instance_ctr ) tty->print_cr("%5d new instance requires GC", _new_instance_ctr); | |
1613 if( _new_array_ctr ) tty->print_cr("%5d new array requires GC", _new_array_ctr); | |
1614 if( _multi1_ctr ) tty->print_cr("%5d multianewarray 1 dim", _multi1_ctr); | |
1615 if( _multi2_ctr ) tty->print_cr("%5d multianewarray 2 dim", _multi2_ctr); | |
1616 if( _multi3_ctr ) tty->print_cr("%5d multianewarray 3 dim", _multi3_ctr); | |
1617 if( _multi4_ctr ) tty->print_cr("%5d multianewarray 4 dim", _multi4_ctr); | |
1618 if( _multi5_ctr ) tty->print_cr("%5d multianewarray 5 dim", _multi5_ctr); | |
1619 | |
1620 tty->print_cr("%5d inline cache miss in compiled", _ic_miss_ctr ); | |
1621 tty->print_cr("%5d wrong method", _wrong_method_ctr ); | |
1622 tty->print_cr("%5d unresolved static call site", _resolve_static_ctr ); | |
1623 tty->print_cr("%5d unresolved virtual call site", _resolve_virtual_ctr ); | |
1624 tty->print_cr("%5d unresolved opt virtual call site", _resolve_opt_virtual_ctr ); | |
1625 | |
1626 if( _mon_enter_stub_ctr ) tty->print_cr("%5d monitor enter stub", _mon_enter_stub_ctr ); | |
1627 if( _mon_exit_stub_ctr ) tty->print_cr("%5d monitor exit stub", _mon_exit_stub_ctr ); | |
1628 if( _mon_enter_ctr ) tty->print_cr("%5d monitor enter slow", _mon_enter_ctr ); | |
1629 if( _mon_exit_ctr ) tty->print_cr("%5d monitor exit slow", _mon_exit_ctr ); | |
1630 if( _partial_subtype_ctr) tty->print_cr("%5d slow partial subtype", _partial_subtype_ctr ); | |
1631 if( _jbyte_array_copy_ctr ) tty->print_cr("%5d byte array copies", _jbyte_array_copy_ctr ); | |
1632 if( _jshort_array_copy_ctr ) tty->print_cr("%5d short array copies", _jshort_array_copy_ctr ); | |
1633 if( _jint_array_copy_ctr ) tty->print_cr("%5d int array copies", _jint_array_copy_ctr ); | |
1634 if( _jlong_array_copy_ctr ) tty->print_cr("%5d long array copies", _jlong_array_copy_ctr ); | |
1635 if( _oop_array_copy_ctr ) tty->print_cr("%5d oop array copies", _oop_array_copy_ctr ); | |
1636 if( _checkcast_array_copy_ctr ) tty->print_cr("%5d checkcast array copies", _checkcast_array_copy_ctr ); | |
1637 if( _unsafe_array_copy_ctr ) tty->print_cr("%5d unsafe array copies", _unsafe_array_copy_ctr ); | |
1638 if( _generic_array_copy_ctr ) tty->print_cr("%5d generic array copies", _generic_array_copy_ctr ); | |
1639 if( _slow_array_copy_ctr ) tty->print_cr("%5d slow array copies", _slow_array_copy_ctr ); | |
1640 if( _find_handler_ctr ) tty->print_cr("%5d find exception handler", _find_handler_ctr ); | |
1641 if( _rethrow_ctr ) tty->print_cr("%5d rethrow handler", _rethrow_ctr ); | |
1642 | |
1643 if (xtty != NULL) xtty->tail("statistics"); | |
1644 } | |
1645 | |
1646 inline double percent(int x, int y) { | |
1647 return 100.0 * x / MAX2(y, 1); | |
1648 } | |
1649 | |
1650 class MethodArityHistogram { | |
1651 public: | |
1652 enum { MAX_ARITY = 256 }; | |
1653 private: | |
1654 static int _arity_histogram[MAX_ARITY]; // histogram of #args | |
1655 static int _size_histogram[MAX_ARITY]; // histogram of arg size in words | |
1656 static int _max_arity; // max. arity seen | |
1657 static int _max_size; // max. arg size seen | |
1658 | |
1659 static void add_method_to_histogram(nmethod* nm) { | |
1660 methodOop m = nm->method(); | |
1661 ArgumentCount args(m->signature()); | |
1662 int arity = args.size() + (m->is_static() ? 0 : 1); | |
1663 int argsize = m->size_of_parameters(); | |
1664 arity = MIN2(arity, MAX_ARITY-1); | |
1665 argsize = MIN2(argsize, MAX_ARITY-1); | |
1666 int count = nm->method()->compiled_invocation_count(); | |
1667 _arity_histogram[arity] += count; | |
1668 _size_histogram[argsize] += count; | |
1669 _max_arity = MAX2(_max_arity, arity); | |
1670 _max_size = MAX2(_max_size, argsize); | |
1671 } | |
1672 | |
1673 void print_histogram_helper(int n, int* histo, const char* name) { | |
1674 const int N = MIN2(5, n); | |
1675 tty->print_cr("\nHistogram of call arity (incl. rcvr, calls to compiled methods only):"); | |
1676 double sum = 0; | |
1677 double weighted_sum = 0; | |
1678 int i; | |
1679 for (i = 0; i <= n; i++) { sum += histo[i]; weighted_sum += i*histo[i]; } | |
1680 double rest = sum; | |
1681 double percent = sum / 100; | |
1682 for (i = 0; i <= N; i++) { | |
1683 rest -= histo[i]; | |
1684 tty->print_cr("%4d: %7d (%5.1f%%)", i, histo[i], histo[i] / percent); | |
1685 } | |
1686 tty->print_cr("rest: %7d (%5.1f%%))", (int)rest, rest / percent); | |
1687 tty->print_cr("(avg. %s = %3.1f, max = %d)", name, weighted_sum / sum, n); | |
1688 } | |
1689 | |
1690 void print_histogram() { | |
1691 tty->print_cr("\nHistogram of call arity (incl. rcvr, calls to compiled methods only):"); | |
1692 print_histogram_helper(_max_arity, _arity_histogram, "arity"); | |
1693 tty->print_cr("\nSame for parameter size (in words):"); | |
1694 print_histogram_helper(_max_size, _size_histogram, "size"); | |
1695 tty->cr(); | |
1696 } | |
1697 | |
1698 public: | |
1699 MethodArityHistogram() { | |
1700 MutexLockerEx mu(CodeCache_lock, Mutex::_no_safepoint_check_flag); | |
1701 _max_arity = _max_size = 0; | |
1702 for (int i = 0; i < MAX_ARITY; i++) _arity_histogram[i] = _size_histogram [i] = 0; | |
1703 CodeCache::nmethods_do(add_method_to_histogram); | |
1704 print_histogram(); | |
1705 } | |
1706 }; | |
1707 | |
1708 int MethodArityHistogram::_arity_histogram[MethodArityHistogram::MAX_ARITY]; | |
1709 int MethodArityHistogram::_size_histogram[MethodArityHistogram::MAX_ARITY]; | |
1710 int MethodArityHistogram::_max_arity; | |
1711 int MethodArityHistogram::_max_size; | |
1712 | |
1713 void SharedRuntime::print_call_statistics(int comp_total) { | |
1714 tty->print_cr("Calls from compiled code:"); | |
1715 int total = _nof_normal_calls + _nof_interface_calls + _nof_static_calls; | |
1716 int mono_c = _nof_normal_calls - _nof_optimized_calls - _nof_megamorphic_calls; | |
1717 int mono_i = _nof_interface_calls - _nof_optimized_interface_calls - _nof_megamorphic_interface_calls; | |
1718 tty->print_cr("\t%9d (%4.1f%%) total non-inlined ", total, percent(total, total)); | |
1719 tty->print_cr("\t%9d (%4.1f%%) virtual calls ", _nof_normal_calls, percent(_nof_normal_calls, total)); | |
1720 tty->print_cr("\t %9d (%3.0f%%) inlined ", _nof_inlined_calls, percent(_nof_inlined_calls, _nof_normal_calls)); | |
1721 tty->print_cr("\t %9d (%3.0f%%) optimized ", _nof_optimized_calls, percent(_nof_optimized_calls, _nof_normal_calls)); | |
1722 tty->print_cr("\t %9d (%3.0f%%) monomorphic ", mono_c, percent(mono_c, _nof_normal_calls)); | |
1723 tty->print_cr("\t %9d (%3.0f%%) megamorphic ", _nof_megamorphic_calls, percent(_nof_megamorphic_calls, _nof_normal_calls)); | |
1724 tty->print_cr("\t%9d (%4.1f%%) interface calls ", _nof_interface_calls, percent(_nof_interface_calls, total)); | |
1725 tty->print_cr("\t %9d (%3.0f%%) inlined ", _nof_inlined_interface_calls, percent(_nof_inlined_interface_calls, _nof_interface_calls)); | |
1726 tty->print_cr("\t %9d (%3.0f%%) optimized ", _nof_optimized_interface_calls, percent(_nof_optimized_interface_calls, _nof_interface_calls)); | |
1727 tty->print_cr("\t %9d (%3.0f%%) monomorphic ", mono_i, percent(mono_i, _nof_interface_calls)); | |
1728 tty->print_cr("\t %9d (%3.0f%%) megamorphic ", _nof_megamorphic_interface_calls, percent(_nof_megamorphic_interface_calls, _nof_interface_calls)); | |
1729 tty->print_cr("\t%9d (%4.1f%%) static/special calls", _nof_static_calls, percent(_nof_static_calls, total)); | |
1730 tty->print_cr("\t %9d (%3.0f%%) inlined ", _nof_inlined_static_calls, percent(_nof_inlined_static_calls, _nof_static_calls)); | |
1731 tty->cr(); | |
1732 tty->print_cr("Note 1: counter updates are not MT-safe."); | |
1733 tty->print_cr("Note 2: %% in major categories are relative to total non-inlined calls;"); | |
1734 tty->print_cr(" %% in nested categories are relative to their category"); | |
1735 tty->print_cr(" (and thus add up to more than 100%% with inlining)"); | |
1736 tty->cr(); | |
1737 | |
1738 MethodArityHistogram h; | |
1739 } | |
1740 #endif | |
1741 | |
1742 | |
1743 // --------------------------------------------------------------------------- | |
1744 // Implementation of AdapterHandlerLibrary | |
1745 const char* AdapterHandlerEntry::name = "I2C/C2I adapters"; | |
1746 GrowableArray<uint64_t>* AdapterHandlerLibrary::_fingerprints = NULL; | |
1747 GrowableArray<AdapterHandlerEntry* >* AdapterHandlerLibrary::_handlers = NULL; | |
1748 const int AdapterHandlerLibrary_size = 16*K; | |
1749 u_char AdapterHandlerLibrary::_buffer[AdapterHandlerLibrary_size + 32]; | |
1750 | |
1751 void AdapterHandlerLibrary::initialize() { | |
1752 if (_fingerprints != NULL) return; | |
1753 _fingerprints = new(ResourceObj::C_HEAP)GrowableArray<uint64_t>(32, true); | |
1754 _handlers = new(ResourceObj::C_HEAP)GrowableArray<AdapterHandlerEntry*>(32, true); | |
1755 // Index 0 reserved for the slow path handler | |
1756 _fingerprints->append(0/*the never-allowed 0 fingerprint*/); | |
1757 _handlers->append(NULL); | |
1758 | |
1759 // Create a special handler for abstract methods. Abstract methods | |
1760 // are never compiled so an i2c entry is somewhat meaningless, but | |
1761 // fill it in with something appropriate just in case. Pass handle | |
1762 // wrong method for the c2i transitions. | |
1763 address wrong_method = SharedRuntime::get_handle_wrong_method_stub(); | |
1764 _fingerprints->append(0/*the never-allowed 0 fingerprint*/); | |
1765 assert(_handlers->length() == AbstractMethodHandler, "in wrong slot"); | |
1766 _handlers->append(new AdapterHandlerEntry(StubRoutines::throw_AbstractMethodError_entry(), | |
1767 wrong_method, wrong_method)); | |
1768 } | |
1769 | |
1770 int AdapterHandlerLibrary::get_create_adapter_index(methodHandle method) { | |
1771 // Use customized signature handler. Need to lock around updates to the | |
1772 // _fingerprints array (it is not safe for concurrent readers and a single | |
1773 // writer: this can be fixed if it becomes a problem). | |
1774 | |
1775 // Get the address of the ic_miss handlers before we grab the | |
1776 // AdapterHandlerLibrary_lock. This fixes bug 6236259 which | |
1777 // was caused by the initialization of the stubs happening | |
1778 // while we held the lock and then notifying jvmti while | |
1779 // holding it. This just forces the initialization to be a little | |
1780 // earlier. | |
1781 address ic_miss = SharedRuntime::get_ic_miss_stub(); | |
1782 assert(ic_miss != NULL, "must have handler"); | |
1783 | |
1784 int result; | |
1785 BufferBlob *B = NULL; | |
1786 uint64_t fingerprint; | |
1787 { | |
1788 MutexLocker mu(AdapterHandlerLibrary_lock); | |
1789 // make sure data structure is initialized | |
1790 initialize(); | |
1791 | |
1792 if (method->is_abstract()) { | |
1793 return AbstractMethodHandler; | |
1794 } | |
1795 | |
1796 // Lookup method signature's fingerprint | |
1797 fingerprint = Fingerprinter(method).fingerprint(); | |
1798 assert( fingerprint != CONST64( 0), "no zero fingerprints allowed" ); | |
1799 // Fingerprints are small fixed-size condensed representations of | |
1800 // signatures. If the signature is too large, it won't fit in a | |
1801 // fingerprint. Signatures which cannot support a fingerprint get a new i2c | |
1802 // adapter gen'd each time, instead of searching the cache for one. This -1 | |
1803 // game can be avoided if I compared signatures instead of using | |
1804 // fingerprints. However, -1 fingerprints are very rare. | |
1805 if( fingerprint != UCONST64(-1) ) { // If this is a cache-able fingerprint | |
1806 // Turns out i2c adapters do not care what the return value is. Mask it | |
1807 // out so signatures that only differ in return type will share the same | |
1808 // adapter. | |
1809 fingerprint &= ~(SignatureIterator::result_feature_mask << SignatureIterator::static_feature_size); | |
1810 // Search for a prior existing i2c/c2i adapter | |
1811 int index = _fingerprints->find(fingerprint); | |
1812 if( index >= 0 ) return index; // Found existing handlers? | |
1813 } else { | |
1814 // Annoyingly, I end up adding -1 fingerprints to the array of handlers, | |
1815 // because I need a unique handler index. It cannot be scanned for | |
1816 // because all -1's look alike. Instead, the matching index is passed out | |
1817 // and immediately used to collect the 2 return values (the c2i and i2c | |
1818 // adapters). | |
1819 } | |
1820 | |
1821 // Create I2C & C2I handlers | |
1822 ResourceMark rm; | |
1823 // Improve alignment slightly | |
1824 u_char *buf = (u_char*)(((intptr_t)_buffer + CodeEntryAlignment-1) & ~(CodeEntryAlignment-1)); | |
1825 CodeBuffer buffer(buf, AdapterHandlerLibrary_size); | |
1826 short buffer_locs[20]; | |
1827 buffer.insts()->initialize_shared_locs((relocInfo*)buffer_locs, | |
1828 sizeof(buffer_locs)/sizeof(relocInfo)); | |
1829 MacroAssembler _masm(&buffer); | |
1830 | |
1831 // Fill in the signature array, for the calling-convention call. | |
1832 int total_args_passed = method->size_of_parameters(); // All args on stack | |
1833 | |
1834 BasicType* sig_bt = NEW_RESOURCE_ARRAY(BasicType,total_args_passed); | |
1835 VMRegPair * regs = NEW_RESOURCE_ARRAY(VMRegPair ,total_args_passed); | |
1836 int i=0; | |
1837 if( !method->is_static() ) // Pass in receiver first | |
1838 sig_bt[i++] = T_OBJECT; | |
1839 for( SignatureStream ss(method->signature()); !ss.at_return_type(); ss.next()) { | |
1840 sig_bt[i++] = ss.type(); // Collect remaining bits of signature | |
1841 if( ss.type() == T_LONG || ss.type() == T_DOUBLE ) | |
1842 sig_bt[i++] = T_VOID; // Longs & doubles take 2 Java slots | |
1843 } | |
1844 assert( i==total_args_passed, "" ); | |
1845 | |
1846 // Now get the re-packed compiled-Java layout. | |
1847 int comp_args_on_stack; | |
1848 | |
1849 // Get a description of the compiled java calling convention and the largest used (VMReg) stack slot usage | |
1850 comp_args_on_stack = SharedRuntime::java_calling_convention(sig_bt, regs, total_args_passed, false); | |
1851 | |
1852 AdapterHandlerEntry* entry = SharedRuntime::generate_i2c2i_adapters(&_masm, | |
1853 total_args_passed, | |
1854 comp_args_on_stack, | |
1855 sig_bt, | |
1856 regs); | |
1857 | |
1858 B = BufferBlob::create(AdapterHandlerEntry::name, &buffer); | |
28 | 1859 if (B == NULL) { |
1860 // CodeCache is full, disable compilation | |
1861 // Ought to log this but compile log is only per compile thread | |
1862 // and we're some non descript Java thread. | |
1863 UseInterpreter = true; | |
1864 if (UseCompiler || AlwaysCompileLoopMethods ) { | |
1865 #ifndef PRODUCT | |
1866 warning("CodeCache is full. Compiler has been disabled"); | |
1867 if (CompileTheWorld || ExitOnFullCodeCache) { | |
1868 before_exit(JavaThread::current()); | |
1869 exit_globals(); // will delete tty | |
1870 vm_direct_exit(CompileTheWorld ? 0 : 1); | |
1871 } | |
1872 #endif | |
1873 UseCompiler = false; | |
1874 AlwaysCompileLoopMethods = false; | |
1875 } | |
1876 return 0; // Out of CodeCache space (_handlers[0] == NULL) | |
1877 } | |
0 | 1878 entry->relocate(B->instructions_begin()); |
1879 #ifndef PRODUCT | |
1880 // debugging suppport | |
1881 if (PrintAdapterHandlers) { | |
1882 tty->cr(); | |
1883 tty->print_cr("i2c argument handler #%d for: %s %s (fingerprint = 0x%llx, %d bytes generated)", | |
1884 _handlers->length(), (method->is_static() ? "static" : "receiver"), | |
1885 method->signature()->as_C_string(), fingerprint, buffer.code_size() ); | |
1886 tty->print_cr("c2i argument handler starts at %p",entry->get_c2i_entry()); | |
1887 Disassembler::decode(entry->get_i2c_entry(), entry->get_i2c_entry() + buffer.code_size()); | |
1888 } | |
1889 #endif | |
1890 | |
1891 // add handlers to library | |
1892 _fingerprints->append(fingerprint); | |
1893 _handlers->append(entry); | |
1894 // set handler index | |
1895 assert(_fingerprints->length() == _handlers->length(), "sanity check"); | |
1896 result = _fingerprints->length() - 1; | |
1897 } | |
1898 // Outside of the lock | |
1899 if (B != NULL) { | |
1900 char blob_id[256]; | |
1901 jio_snprintf(blob_id, | |
1902 sizeof(blob_id), | |
1903 "%s(" PTR64_FORMAT ")@" PTR_FORMAT, | |
1904 AdapterHandlerEntry::name, | |
1905 fingerprint, | |
1906 B->instructions_begin()); | |
1907 VTune::register_stub(blob_id, B->instructions_begin(), B->instructions_end()); | |
1908 Forte::register_stub(blob_id, B->instructions_begin(), B->instructions_end()); | |
1909 | |
1910 if (JvmtiExport::should_post_dynamic_code_generated()) { | |
1911 JvmtiExport::post_dynamic_code_generated(blob_id, | |
1912 B->instructions_begin(), | |
1913 B->instructions_end()); | |
1914 } | |
1915 } | |
1916 return result; | |
1917 } | |
1918 | |
1919 void AdapterHandlerEntry::relocate(address new_base) { | |
1920 ptrdiff_t delta = new_base - _i2c_entry; | |
1921 _i2c_entry += delta; | |
1922 _c2i_entry += delta; | |
1923 _c2i_unverified_entry += delta; | |
1924 } | |
1925 | |
1926 // Create a native wrapper for this native method. The wrapper converts the | |
1927 // java compiled calling convention to the native convention, handlizes | |
1928 // arguments, and transitions to native. On return from the native we transition | |
1929 // back to java blocking if a safepoint is in progress. | |
1930 nmethod *AdapterHandlerLibrary::create_native_wrapper(methodHandle method) { | |
1931 ResourceMark rm; | |
1932 nmethod* nm = NULL; | |
1933 | |
1934 if (PrintCompilation) { | |
1935 ttyLocker ttyl; | |
1936 tty->print("--- n%s ", (method->is_synchronized() ? "s" : " ")); | |
1937 method->print_short_name(tty); | |
1938 if (method->is_static()) { | |
1939 tty->print(" (static)"); | |
1940 } | |
1941 tty->cr(); | |
1942 } | |
1943 | |
1944 assert(method->has_native_function(), "must have something valid to call!"); | |
1945 | |
1946 { | |
1947 // perform the work while holding the lock, but perform any printing outside the lock | |
1948 MutexLocker mu(AdapterHandlerLibrary_lock); | |
1949 // See if somebody beat us to it | |
1950 nm = method->code(); | |
1951 if (nm) { | |
1952 return nm; | |
1953 } | |
1954 | |
1955 // Improve alignment slightly | |
1956 u_char* buf = (u_char*)(((intptr_t)_buffer + CodeEntryAlignment-1) & ~(CodeEntryAlignment-1)); | |
1957 CodeBuffer buffer(buf, AdapterHandlerLibrary_size); | |
1958 // Need a few relocation entries | |
1959 double locs_buf[20]; | |
1960 buffer.insts()->initialize_shared_locs((relocInfo*)locs_buf, sizeof(locs_buf) / sizeof(relocInfo)); | |
1961 MacroAssembler _masm(&buffer); | |
1962 | |
1963 // Fill in the signature array, for the calling-convention call. | |
1964 int total_args_passed = method->size_of_parameters(); | |
1965 | |
1966 BasicType* sig_bt = NEW_RESOURCE_ARRAY(BasicType,total_args_passed); | |
1967 VMRegPair * regs = NEW_RESOURCE_ARRAY(VMRegPair ,total_args_passed); | |
1968 int i=0; | |
1969 if( !method->is_static() ) // Pass in receiver first | |
1970 sig_bt[i++] = T_OBJECT; | |
1971 SignatureStream ss(method->signature()); | |
1972 for( ; !ss.at_return_type(); ss.next()) { | |
1973 sig_bt[i++] = ss.type(); // Collect remaining bits of signature | |
1974 if( ss.type() == T_LONG || ss.type() == T_DOUBLE ) | |
1975 sig_bt[i++] = T_VOID; // Longs & doubles take 2 Java slots | |
1976 } | |
1977 assert( i==total_args_passed, "" ); | |
1978 BasicType ret_type = ss.type(); | |
1979 | |
1980 // Now get the compiled-Java layout as input arguments | |
1981 int comp_args_on_stack; | |
1982 comp_args_on_stack = SharedRuntime::java_calling_convention(sig_bt, regs, total_args_passed, false); | |
1983 | |
1984 // Generate the compiled-to-native wrapper code | |
1985 nm = SharedRuntime::generate_native_wrapper(&_masm, | |
1986 method, | |
1987 total_args_passed, | |
1988 comp_args_on_stack, | |
1989 sig_bt,regs, | |
1990 ret_type); | |
1991 } | |
1992 | |
1993 // Must unlock before calling set_code | |
1994 // Install the generated code. | |
1995 if (nm != NULL) { | |
1996 method->set_code(method, nm); | |
1997 nm->post_compiled_method_load_event(); | |
1998 } else { | |
1999 // CodeCache is full, disable compilation | |
2000 // Ought to log this but compile log is only per compile thread | |
2001 // and we're some non descript Java thread. | |
2002 UseInterpreter = true; | |
2003 if (UseCompiler || AlwaysCompileLoopMethods ) { | |
2004 #ifndef PRODUCT | |
2005 warning("CodeCache is full. Compiler has been disabled"); | |
2006 if (CompileTheWorld || ExitOnFullCodeCache) { | |
2007 before_exit(JavaThread::current()); | |
2008 exit_globals(); // will delete tty | |
2009 vm_direct_exit(CompileTheWorld ? 0 : 1); | |
2010 } | |
2011 #endif | |
2012 UseCompiler = false; | |
2013 AlwaysCompileLoopMethods = false; | |
2014 } | |
2015 } | |
2016 return nm; | |
2017 } | |
2018 | |
116
018d5b58dd4f
6537506: Provide a mechanism for specifying Java-level USDT-like dtrace probes
kamg
parents:
62
diff
changeset
|
2019 #ifdef HAVE_DTRACE_H |
018d5b58dd4f
6537506: Provide a mechanism for specifying Java-level USDT-like dtrace probes
kamg
parents:
62
diff
changeset
|
2020 // Create a dtrace nmethod for this method. The wrapper converts the |
018d5b58dd4f
6537506: Provide a mechanism for specifying Java-level USDT-like dtrace probes
kamg
parents:
62
diff
changeset
|
2021 // java compiled calling convention to the native convention, makes a dummy call |
018d5b58dd4f
6537506: Provide a mechanism for specifying Java-level USDT-like dtrace probes
kamg
parents:
62
diff
changeset
|
2022 // (actually nops for the size of the call instruction, which become a trap if |
018d5b58dd4f
6537506: Provide a mechanism for specifying Java-level USDT-like dtrace probes
kamg
parents:
62
diff
changeset
|
2023 // probe is enabled). The returns to the caller. Since this all looks like a |
018d5b58dd4f
6537506: Provide a mechanism for specifying Java-level USDT-like dtrace probes
kamg
parents:
62
diff
changeset
|
2024 // leaf no thread transition is needed. |
018d5b58dd4f
6537506: Provide a mechanism for specifying Java-level USDT-like dtrace probes
kamg
parents:
62
diff
changeset
|
2025 |
018d5b58dd4f
6537506: Provide a mechanism for specifying Java-level USDT-like dtrace probes
kamg
parents:
62
diff
changeset
|
2026 nmethod *AdapterHandlerLibrary::create_dtrace_nmethod(methodHandle method) { |
018d5b58dd4f
6537506: Provide a mechanism for specifying Java-level USDT-like dtrace probes
kamg
parents:
62
diff
changeset
|
2027 ResourceMark rm; |
018d5b58dd4f
6537506: Provide a mechanism for specifying Java-level USDT-like dtrace probes
kamg
parents:
62
diff
changeset
|
2028 nmethod* nm = NULL; |
018d5b58dd4f
6537506: Provide a mechanism for specifying Java-level USDT-like dtrace probes
kamg
parents:
62
diff
changeset
|
2029 |
018d5b58dd4f
6537506: Provide a mechanism for specifying Java-level USDT-like dtrace probes
kamg
parents:
62
diff
changeset
|
2030 if (PrintCompilation) { |
018d5b58dd4f
6537506: Provide a mechanism for specifying Java-level USDT-like dtrace probes
kamg
parents:
62
diff
changeset
|
2031 ttyLocker ttyl; |
018d5b58dd4f
6537506: Provide a mechanism for specifying Java-level USDT-like dtrace probes
kamg
parents:
62
diff
changeset
|
2032 tty->print("--- n%s "); |
018d5b58dd4f
6537506: Provide a mechanism for specifying Java-level USDT-like dtrace probes
kamg
parents:
62
diff
changeset
|
2033 method->print_short_name(tty); |
018d5b58dd4f
6537506: Provide a mechanism for specifying Java-level USDT-like dtrace probes
kamg
parents:
62
diff
changeset
|
2034 if (method->is_static()) { |
018d5b58dd4f
6537506: Provide a mechanism for specifying Java-level USDT-like dtrace probes
kamg
parents:
62
diff
changeset
|
2035 tty->print(" (static)"); |
018d5b58dd4f
6537506: Provide a mechanism for specifying Java-level USDT-like dtrace probes
kamg
parents:
62
diff
changeset
|
2036 } |
018d5b58dd4f
6537506: Provide a mechanism for specifying Java-level USDT-like dtrace probes
kamg
parents:
62
diff
changeset
|
2037 tty->cr(); |
018d5b58dd4f
6537506: Provide a mechanism for specifying Java-level USDT-like dtrace probes
kamg
parents:
62
diff
changeset
|
2038 } |
018d5b58dd4f
6537506: Provide a mechanism for specifying Java-level USDT-like dtrace probes
kamg
parents:
62
diff
changeset
|
2039 |
018d5b58dd4f
6537506: Provide a mechanism for specifying Java-level USDT-like dtrace probes
kamg
parents:
62
diff
changeset
|
2040 { |
018d5b58dd4f
6537506: Provide a mechanism for specifying Java-level USDT-like dtrace probes
kamg
parents:
62
diff
changeset
|
2041 // perform the work while holding the lock, but perform any printing |
018d5b58dd4f
6537506: Provide a mechanism for specifying Java-level USDT-like dtrace probes
kamg
parents:
62
diff
changeset
|
2042 // outside the lock |
018d5b58dd4f
6537506: Provide a mechanism for specifying Java-level USDT-like dtrace probes
kamg
parents:
62
diff
changeset
|
2043 MutexLocker mu(AdapterHandlerLibrary_lock); |
018d5b58dd4f
6537506: Provide a mechanism for specifying Java-level USDT-like dtrace probes
kamg
parents:
62
diff
changeset
|
2044 // See if somebody beat us to it |
018d5b58dd4f
6537506: Provide a mechanism for specifying Java-level USDT-like dtrace probes
kamg
parents:
62
diff
changeset
|
2045 nm = method->code(); |
018d5b58dd4f
6537506: Provide a mechanism for specifying Java-level USDT-like dtrace probes
kamg
parents:
62
diff
changeset
|
2046 if (nm) { |
018d5b58dd4f
6537506: Provide a mechanism for specifying Java-level USDT-like dtrace probes
kamg
parents:
62
diff
changeset
|
2047 return nm; |
018d5b58dd4f
6537506: Provide a mechanism for specifying Java-level USDT-like dtrace probes
kamg
parents:
62
diff
changeset
|
2048 } |
018d5b58dd4f
6537506: Provide a mechanism for specifying Java-level USDT-like dtrace probes
kamg
parents:
62
diff
changeset
|
2049 |
018d5b58dd4f
6537506: Provide a mechanism for specifying Java-level USDT-like dtrace probes
kamg
parents:
62
diff
changeset
|
2050 // Improve alignment slightly |
018d5b58dd4f
6537506: Provide a mechanism for specifying Java-level USDT-like dtrace probes
kamg
parents:
62
diff
changeset
|
2051 u_char* buf = (u_char*) |
018d5b58dd4f
6537506: Provide a mechanism for specifying Java-level USDT-like dtrace probes
kamg
parents:
62
diff
changeset
|
2052 (((intptr_t)_buffer + CodeEntryAlignment-1) & ~(CodeEntryAlignment-1)); |
018d5b58dd4f
6537506: Provide a mechanism for specifying Java-level USDT-like dtrace probes
kamg
parents:
62
diff
changeset
|
2053 CodeBuffer buffer(buf, AdapterHandlerLibrary_size); |
018d5b58dd4f
6537506: Provide a mechanism for specifying Java-level USDT-like dtrace probes
kamg
parents:
62
diff
changeset
|
2054 // Need a few relocation entries |
018d5b58dd4f
6537506: Provide a mechanism for specifying Java-level USDT-like dtrace probes
kamg
parents:
62
diff
changeset
|
2055 double locs_buf[20]; |
018d5b58dd4f
6537506: Provide a mechanism for specifying Java-level USDT-like dtrace probes
kamg
parents:
62
diff
changeset
|
2056 buffer.insts()->initialize_shared_locs( |
018d5b58dd4f
6537506: Provide a mechanism for specifying Java-level USDT-like dtrace probes
kamg
parents:
62
diff
changeset
|
2057 (relocInfo*)locs_buf, sizeof(locs_buf) / sizeof(relocInfo)); |
018d5b58dd4f
6537506: Provide a mechanism for specifying Java-level USDT-like dtrace probes
kamg
parents:
62
diff
changeset
|
2058 MacroAssembler _masm(&buffer); |
018d5b58dd4f
6537506: Provide a mechanism for specifying Java-level USDT-like dtrace probes
kamg
parents:
62
diff
changeset
|
2059 |
018d5b58dd4f
6537506: Provide a mechanism for specifying Java-level USDT-like dtrace probes
kamg
parents:
62
diff
changeset
|
2060 // Generate the compiled-to-native wrapper code |
018d5b58dd4f
6537506: Provide a mechanism for specifying Java-level USDT-like dtrace probes
kamg
parents:
62
diff
changeset
|
2061 nm = SharedRuntime::generate_dtrace_nmethod(&_masm, method); |
018d5b58dd4f
6537506: Provide a mechanism for specifying Java-level USDT-like dtrace probes
kamg
parents:
62
diff
changeset
|
2062 } |
018d5b58dd4f
6537506: Provide a mechanism for specifying Java-level USDT-like dtrace probes
kamg
parents:
62
diff
changeset
|
2063 return nm; |
018d5b58dd4f
6537506: Provide a mechanism for specifying Java-level USDT-like dtrace probes
kamg
parents:
62
diff
changeset
|
2064 } |
018d5b58dd4f
6537506: Provide a mechanism for specifying Java-level USDT-like dtrace probes
kamg
parents:
62
diff
changeset
|
2065 |
018d5b58dd4f
6537506: Provide a mechanism for specifying Java-level USDT-like dtrace probes
kamg
parents:
62
diff
changeset
|
2066 // the dtrace method needs to convert java lang string to utf8 string. |
018d5b58dd4f
6537506: Provide a mechanism for specifying Java-level USDT-like dtrace probes
kamg
parents:
62
diff
changeset
|
2067 void SharedRuntime::get_utf(oopDesc* src, address dst) { |
018d5b58dd4f
6537506: Provide a mechanism for specifying Java-level USDT-like dtrace probes
kamg
parents:
62
diff
changeset
|
2068 typeArrayOop jlsValue = java_lang_String::value(src); |
018d5b58dd4f
6537506: Provide a mechanism for specifying Java-level USDT-like dtrace probes
kamg
parents:
62
diff
changeset
|
2069 int jlsOffset = java_lang_String::offset(src); |
018d5b58dd4f
6537506: Provide a mechanism for specifying Java-level USDT-like dtrace probes
kamg
parents:
62
diff
changeset
|
2070 int jlsLen = java_lang_String::length(src); |
018d5b58dd4f
6537506: Provide a mechanism for specifying Java-level USDT-like dtrace probes
kamg
parents:
62
diff
changeset
|
2071 jchar* jlsPos = (jlsLen == 0) ? NULL : |
018d5b58dd4f
6537506: Provide a mechanism for specifying Java-level USDT-like dtrace probes
kamg
parents:
62
diff
changeset
|
2072 jlsValue->char_at_addr(jlsOffset); |
018d5b58dd4f
6537506: Provide a mechanism for specifying Java-level USDT-like dtrace probes
kamg
parents:
62
diff
changeset
|
2073 (void) UNICODE::as_utf8(jlsPos, jlsLen, (char *)dst, max_dtrace_string_size); |
018d5b58dd4f
6537506: Provide a mechanism for specifying Java-level USDT-like dtrace probes
kamg
parents:
62
diff
changeset
|
2074 } |
018d5b58dd4f
6537506: Provide a mechanism for specifying Java-level USDT-like dtrace probes
kamg
parents:
62
diff
changeset
|
2075 #endif // ndef HAVE_DTRACE_H |
018d5b58dd4f
6537506: Provide a mechanism for specifying Java-level USDT-like dtrace probes
kamg
parents:
62
diff
changeset
|
2076 |
0 | 2077 // ------------------------------------------------------------------------- |
2078 // Java-Java calling convention | |
2079 // (what you use when Java calls Java) | |
2080 | |
2081 //------------------------------name_for_receiver---------------------------------- | |
2082 // For a given signature, return the VMReg for parameter 0. | |
2083 VMReg SharedRuntime::name_for_receiver() { | |
2084 VMRegPair regs; | |
2085 BasicType sig_bt = T_OBJECT; | |
2086 (void) java_calling_convention(&sig_bt, ®s, 1, true); | |
2087 // Return argument 0 register. In the LP64 build pointers | |
2088 // take 2 registers, but the VM wants only the 'main' name. | |
2089 return regs.first(); | |
2090 } | |
2091 | |
2092 VMRegPair *SharedRuntime::find_callee_arguments(symbolOop sig, bool is_static, int* arg_size) { | |
2093 // This method is returning a data structure allocating as a | |
2094 // ResourceObject, so do not put any ResourceMarks in here. | |
2095 char *s = sig->as_C_string(); | |
2096 int len = (int)strlen(s); | |
2097 *s++; len--; // Skip opening paren | |
2098 char *t = s+len; | |
2099 while( *(--t) != ')' ) ; // Find close paren | |
2100 | |
2101 BasicType *sig_bt = NEW_RESOURCE_ARRAY( BasicType, 256 ); | |
2102 VMRegPair *regs = NEW_RESOURCE_ARRAY( VMRegPair, 256 ); | |
2103 int cnt = 0; | |
2104 if (!is_static) { | |
2105 sig_bt[cnt++] = T_OBJECT; // Receiver is argument 0; not in signature | |
2106 } | |
2107 | |
2108 while( s < t ) { | |
2109 switch( *s++ ) { // Switch on signature character | |
2110 case 'B': sig_bt[cnt++] = T_BYTE; break; | |
2111 case 'C': sig_bt[cnt++] = T_CHAR; break; | |
2112 case 'D': sig_bt[cnt++] = T_DOUBLE; sig_bt[cnt++] = T_VOID; break; | |
2113 case 'F': sig_bt[cnt++] = T_FLOAT; break; | |
2114 case 'I': sig_bt[cnt++] = T_INT; break; | |
2115 case 'J': sig_bt[cnt++] = T_LONG; sig_bt[cnt++] = T_VOID; break; | |
2116 case 'S': sig_bt[cnt++] = T_SHORT; break; | |
2117 case 'Z': sig_bt[cnt++] = T_BOOLEAN; break; | |
2118 case 'V': sig_bt[cnt++] = T_VOID; break; | |
2119 case 'L': // Oop | |
2120 while( *s++ != ';' ) ; // Skip signature | |
2121 sig_bt[cnt++] = T_OBJECT; | |
2122 break; | |
2123 case '[': { // Array | |
2124 do { // Skip optional size | |
2125 while( *s >= '0' && *s <= '9' ) s++; | |
2126 } while( *s++ == '[' ); // Nested arrays? | |
2127 // Skip element type | |
2128 if( s[-1] == 'L' ) | |
2129 while( *s++ != ';' ) ; // Skip signature | |
2130 sig_bt[cnt++] = T_ARRAY; | |
2131 break; | |
2132 } | |
2133 default : ShouldNotReachHere(); | |
2134 } | |
2135 } | |
2136 assert( cnt < 256, "grow table size" ); | |
2137 | |
2138 int comp_args_on_stack; | |
2139 comp_args_on_stack = java_calling_convention(sig_bt, regs, cnt, true); | |
2140 | |
2141 // the calling convention doesn't count out_preserve_stack_slots so | |
2142 // we must add that in to get "true" stack offsets. | |
2143 | |
2144 if (comp_args_on_stack) { | |
2145 for (int i = 0; i < cnt; i++) { | |
2146 VMReg reg1 = regs[i].first(); | |
2147 if( reg1->is_stack()) { | |
2148 // Yuck | |
2149 reg1 = reg1->bias(out_preserve_stack_slots()); | |
2150 } | |
2151 VMReg reg2 = regs[i].second(); | |
2152 if( reg2->is_stack()) { | |
2153 // Yuck | |
2154 reg2 = reg2->bias(out_preserve_stack_slots()); | |
2155 } | |
2156 regs[i].set_pair(reg2, reg1); | |
2157 } | |
2158 } | |
2159 | |
2160 // results | |
2161 *arg_size = cnt; | |
2162 return regs; | |
2163 } | |
2164 | |
2165 // OSR Migration Code | |
2166 // | |
2167 // This code is used convert interpreter frames into compiled frames. It is | |
2168 // called from very start of a compiled OSR nmethod. A temp array is | |
2169 // allocated to hold the interesting bits of the interpreter frame. All | |
2170 // active locks are inflated to allow them to move. The displaced headers and | |
2171 // active interpeter locals are copied into the temp buffer. Then we return | |
2172 // back to the compiled code. The compiled code then pops the current | |
2173 // interpreter frame off the stack and pushes a new compiled frame. Then it | |
2174 // copies the interpreter locals and displaced headers where it wants. | |
2175 // Finally it calls back to free the temp buffer. | |
2176 // | |
2177 // All of this is done NOT at any Safepoint, nor is any safepoint or GC allowed. | |
2178 | |
2179 JRT_LEAF(intptr_t*, SharedRuntime::OSR_migration_begin( JavaThread *thread) ) | |
2180 | |
2181 #ifdef IA64 | |
2182 ShouldNotReachHere(); // NYI | |
2183 #endif /* IA64 */ | |
2184 | |
2185 // | |
2186 // This code is dependent on the memory layout of the interpreter local | |
2187 // array and the monitors. On all of our platforms the layout is identical | |
2188 // so this code is shared. If some platform lays the their arrays out | |
2189 // differently then this code could move to platform specific code or | |
2190 // the code here could be modified to copy items one at a time using | |
2191 // frame accessor methods and be platform independent. | |
2192 | |
2193 frame fr = thread->last_frame(); | |
2194 assert( fr.is_interpreted_frame(), "" ); | |
2195 assert( fr.interpreter_frame_expression_stack_size()==0, "only handle empty stacks" ); | |
2196 | |
2197 // Figure out how many monitors are active. | |
2198 int active_monitor_count = 0; | |
2199 for( BasicObjectLock *kptr = fr.interpreter_frame_monitor_end(); | |
2200 kptr < fr.interpreter_frame_monitor_begin(); | |
2201 kptr = fr.next_monitor_in_interpreter_frame(kptr) ) { | |
2202 if( kptr->obj() != NULL ) active_monitor_count++; | |
2203 } | |
2204 | |
2205 // QQQ we could place number of active monitors in the array so that compiled code | |
2206 // could double check it. | |
2207 | |
2208 methodOop moop = fr.interpreter_frame_method(); | |
2209 int max_locals = moop->max_locals(); | |
2210 // Allocate temp buffer, 1 word per local & 2 per active monitor | |
2211 int buf_size_words = max_locals + active_monitor_count*2; | |
2212 intptr_t *buf = NEW_C_HEAP_ARRAY(intptr_t,buf_size_words); | |
2213 | |
2214 // Copy the locals. Order is preserved so that loading of longs works. | |
2215 // Since there's no GC I can copy the oops blindly. | |
2216 assert( sizeof(HeapWord)==sizeof(intptr_t), "fix this code"); | |
2217 if (TaggedStackInterpreter) { | |
2218 for (int i = 0; i < max_locals; i++) { | |
2219 // copy only each local separately to the buffer avoiding the tag | |
2220 buf[i] = *fr.interpreter_frame_local_at(max_locals-i-1); | |
2221 } | |
2222 } else { | |
2223 Copy::disjoint_words( | |
2224 (HeapWord*)fr.interpreter_frame_local_at(max_locals-1), | |
2225 (HeapWord*)&buf[0], | |
2226 max_locals); | |
2227 } | |
2228 | |
2229 // Inflate locks. Copy the displaced headers. Be careful, there can be holes. | |
2230 int i = max_locals; | |
2231 for( BasicObjectLock *kptr2 = fr.interpreter_frame_monitor_end(); | |
2232 kptr2 < fr.interpreter_frame_monitor_begin(); | |
2233 kptr2 = fr.next_monitor_in_interpreter_frame(kptr2) ) { | |
2234 if( kptr2->obj() != NULL) { // Avoid 'holes' in the monitor array | |
2235 BasicLock *lock = kptr2->lock(); | |
2236 // Inflate so the displaced header becomes position-independent | |
2237 if (lock->displaced_header()->is_unlocked()) | |
2238 ObjectSynchronizer::inflate_helper(kptr2->obj()); | |
2239 // Now the displaced header is free to move | |
2240 buf[i++] = (intptr_t)lock->displaced_header(); | |
2241 buf[i++] = (intptr_t)kptr2->obj(); | |
2242 } | |
2243 } | |
2244 assert( i - max_locals == active_monitor_count*2, "found the expected number of monitors" ); | |
2245 | |
2246 return buf; | |
2247 JRT_END | |
2248 | |
2249 JRT_LEAF(void, SharedRuntime::OSR_migration_end( intptr_t* buf) ) | |
2250 FREE_C_HEAP_ARRAY(intptr_t,buf); | |
2251 JRT_END | |
2252 | |
2253 #ifndef PRODUCT | |
2254 bool AdapterHandlerLibrary::contains(CodeBlob* b) { | |
2255 | |
124
b130b98db9cf
6689060: Escape Analysis does not work with Compressed Oops
kvn
parents:
62
diff
changeset
|
2256 if (_handlers == NULL) return false; |
b130b98db9cf
6689060: Escape Analysis does not work with Compressed Oops
kvn
parents:
62
diff
changeset
|
2257 |
0 | 2258 for (int i = 0 ; i < _handlers->length() ; i++) { |
2259 AdapterHandlerEntry* a = get_entry(i); | |
2260 if ( a != NULL && b == CodeCache::find_blob(a->get_i2c_entry()) ) return true; | |
2261 } | |
2262 return false; | |
2263 } | |
2264 | |
2265 void AdapterHandlerLibrary::print_handler(CodeBlob* b) { | |
2266 | |
2267 for (int i = 0 ; i < _handlers->length() ; i++) { | |
2268 AdapterHandlerEntry* a = get_entry(i); | |
2269 if ( a != NULL && b == CodeCache::find_blob(a->get_i2c_entry()) ) { | |
2270 tty->print("Adapter for signature: "); | |
2271 // Fingerprinter::print(_fingerprints->at(i)); | |
2272 tty->print("0x%" FORMAT64_MODIFIER "x", _fingerprints->at(i)); | |
2273 tty->print_cr(" i2c: " INTPTR_FORMAT " c2i: " INTPTR_FORMAT " c2iUV: " INTPTR_FORMAT, | |
2274 a->get_i2c_entry(), a->get_c2i_entry(), a->get_c2i_unverified_entry()); | |
2275 | |
2276 return; | |
2277 } | |
2278 } | |
2279 assert(false, "Should have found handler"); | |
2280 } | |
2281 #endif /* PRODUCT */ |