comparison src/share/vm/runtime/sharedRuntime.cpp @ 0:a61af66fc99e jdk7-b24

Initial load
author duke
date Sat, 01 Dec 2007 00:00:00 +0000
parents
children f8236e79048a 9785f6d2dd97
comparison
equal deleted inserted replaced
-1:000000000000 0:a61af66fc99e
1 /*
2 * Copyright 1997-2007 Sun Microsystems, Inc. All Rights Reserved.
3 * DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER.
4 *
5 * This code is free software; you can redistribute it and/or modify it
6 * under the terms of the GNU General Public License version 2 only, as
7 * published by the Free Software Foundation.
8 *
9 * This code is distributed in the hope that it will be useful, but WITHOUT
10 * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
11 * FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License
12 * version 2 for more details (a copy is included in the LICENSE file that
13 * accompanied this code).
14 *
15 * You should have received a copy of the GNU General Public License version
16 * 2 along with this work; if not, write to the Free Software Foundation,
17 * Inc., 51 Franklin St, Fifth Floor, Boston, MA 02110-1301 USA.
18 *
19 * Please contact Sun Microsystems, Inc., 4150 Network Circle, Santa Clara,
20 * CA 95054 USA or visit www.sun.com if you need additional information or
21 * have any questions.
22 *
23 */
24
25 #include "incls/_precompiled.incl"
26 #include "incls/_sharedRuntime.cpp.incl"
27 #include <math.h>
28
29 HS_DTRACE_PROBE_DECL4(hotspot, object__alloc, Thread*, char*, int, size_t);
30 HS_DTRACE_PROBE_DECL7(hotspot, method__entry, int,
31 char*, int, char*, int, char*, int);
32 HS_DTRACE_PROBE_DECL7(hotspot, method__return, int,
33 char*, int, char*, int, char*, int);
34
35 // Implementation of SharedRuntime
36
37 #ifndef PRODUCT
38 // For statistics
39 int SharedRuntime::_ic_miss_ctr = 0;
40 int SharedRuntime::_wrong_method_ctr = 0;
41 int SharedRuntime::_resolve_static_ctr = 0;
42 int SharedRuntime::_resolve_virtual_ctr = 0;
43 int SharedRuntime::_resolve_opt_virtual_ctr = 0;
44 int SharedRuntime::_implicit_null_throws = 0;
45 int SharedRuntime::_implicit_div0_throws = 0;
46 int SharedRuntime::_throw_null_ctr = 0;
47
48 int SharedRuntime::_nof_normal_calls = 0;
49 int SharedRuntime::_nof_optimized_calls = 0;
50 int SharedRuntime::_nof_inlined_calls = 0;
51 int SharedRuntime::_nof_megamorphic_calls = 0;
52 int SharedRuntime::_nof_static_calls = 0;
53 int SharedRuntime::_nof_inlined_static_calls = 0;
54 int SharedRuntime::_nof_interface_calls = 0;
55 int SharedRuntime::_nof_optimized_interface_calls = 0;
56 int SharedRuntime::_nof_inlined_interface_calls = 0;
57 int SharedRuntime::_nof_megamorphic_interface_calls = 0;
58 int SharedRuntime::_nof_removable_exceptions = 0;
59
60 int SharedRuntime::_new_instance_ctr=0;
61 int SharedRuntime::_new_array_ctr=0;
62 int SharedRuntime::_multi1_ctr=0;
63 int SharedRuntime::_multi2_ctr=0;
64 int SharedRuntime::_multi3_ctr=0;
65 int SharedRuntime::_multi4_ctr=0;
66 int SharedRuntime::_multi5_ctr=0;
67 int SharedRuntime::_mon_enter_stub_ctr=0;
68 int SharedRuntime::_mon_exit_stub_ctr=0;
69 int SharedRuntime::_mon_enter_ctr=0;
70 int SharedRuntime::_mon_exit_ctr=0;
71 int SharedRuntime::_partial_subtype_ctr=0;
72 int SharedRuntime::_jbyte_array_copy_ctr=0;
73 int SharedRuntime::_jshort_array_copy_ctr=0;
74 int SharedRuntime::_jint_array_copy_ctr=0;
75 int SharedRuntime::_jlong_array_copy_ctr=0;
76 int SharedRuntime::_oop_array_copy_ctr=0;
77 int SharedRuntime::_checkcast_array_copy_ctr=0;
78 int SharedRuntime::_unsafe_array_copy_ctr=0;
79 int SharedRuntime::_generic_array_copy_ctr=0;
80 int SharedRuntime::_slow_array_copy_ctr=0;
81 int SharedRuntime::_find_handler_ctr=0;
82 int SharedRuntime::_rethrow_ctr=0;
83
84 int SharedRuntime::_ICmiss_index = 0;
85 int SharedRuntime::_ICmiss_count[SharedRuntime::maxICmiss_count];
86 address SharedRuntime::_ICmiss_at[SharedRuntime::maxICmiss_count];
87
88 void SharedRuntime::trace_ic_miss(address at) {
89 for (int i = 0; i < _ICmiss_index; i++) {
90 if (_ICmiss_at[i] == at) {
91 _ICmiss_count[i]++;
92 return;
93 }
94 }
95 int index = _ICmiss_index++;
96 if (_ICmiss_index >= maxICmiss_count) _ICmiss_index = maxICmiss_count - 1;
97 _ICmiss_at[index] = at;
98 _ICmiss_count[index] = 1;
99 }
100
101 void SharedRuntime::print_ic_miss_histogram() {
102 if (ICMissHistogram) {
103 tty->print_cr ("IC Miss Histogram:");
104 int tot_misses = 0;
105 for (int i = 0; i < _ICmiss_index; i++) {
106 tty->print_cr(" at: " INTPTR_FORMAT " nof: %d", _ICmiss_at[i], _ICmiss_count[i]);
107 tot_misses += _ICmiss_count[i];
108 }
109 tty->print_cr ("Total IC misses: %7d", tot_misses);
110 }
111 }
112 #endif // PRODUCT
113
114
115 JRT_LEAF(jlong, SharedRuntime::lmul(jlong y, jlong x))
116 return x * y;
117 JRT_END
118
119
120 JRT_LEAF(jlong, SharedRuntime::ldiv(jlong y, jlong x))
121 if (x == min_jlong && y == CONST64(-1)) {
122 return x;
123 } else {
124 return x / y;
125 }
126 JRT_END
127
128
129 JRT_LEAF(jlong, SharedRuntime::lrem(jlong y, jlong x))
130 if (x == min_jlong && y == CONST64(-1)) {
131 return 0;
132 } else {
133 return x % y;
134 }
135 JRT_END
136
137
138 const juint float_sign_mask = 0x7FFFFFFF;
139 const juint float_infinity = 0x7F800000;
140 const julong double_sign_mask = CONST64(0x7FFFFFFFFFFFFFFF);
141 const julong double_infinity = CONST64(0x7FF0000000000000);
142
143 JRT_LEAF(jfloat, SharedRuntime::frem(jfloat x, jfloat y))
144 #ifdef _WIN64
145 // 64-bit Windows on amd64 returns the wrong values for
146 // infinity operands.
147 union { jfloat f; juint i; } xbits, ybits;
148 xbits.f = x;
149 ybits.f = y;
150 // x Mod Infinity == x unless x is infinity
151 if ( ((xbits.i & float_sign_mask) != float_infinity) &&
152 ((ybits.i & float_sign_mask) == float_infinity) ) {
153 return x;
154 }
155 #endif
156 return ((jfloat)fmod((double)x,(double)y));
157 JRT_END
158
159
160 JRT_LEAF(jdouble, SharedRuntime::drem(jdouble x, jdouble y))
161 #ifdef _WIN64
162 union { jdouble d; julong l; } xbits, ybits;
163 xbits.d = x;
164 ybits.d = y;
165 // x Mod Infinity == x unless x is infinity
166 if ( ((xbits.l & double_sign_mask) != double_infinity) &&
167 ((ybits.l & double_sign_mask) == double_infinity) ) {
168 return x;
169 }
170 #endif
171 return ((jdouble)fmod((double)x,(double)y));
172 JRT_END
173
174
175 JRT_LEAF(jint, SharedRuntime::f2i(jfloat x))
176 if (g_isnan(x)) {return 0;}
177 jlong lltmp = (jlong)x;
178 jint ltmp = (jint)lltmp;
179 if (ltmp == lltmp) {
180 return ltmp;
181 } else {
182 if (x < 0) {
183 return min_jint;
184 } else {
185 return max_jint;
186 }
187 }
188 JRT_END
189
190
191 JRT_LEAF(jlong, SharedRuntime::f2l(jfloat x))
192 if (g_isnan(x)) {return 0;}
193 jlong lltmp = (jlong)x;
194 if (lltmp != min_jlong) {
195 return lltmp;
196 } else {
197 if (x < 0) {
198 return min_jlong;
199 } else {
200 return max_jlong;
201 }
202 }
203 JRT_END
204
205
206 JRT_LEAF(jint, SharedRuntime::d2i(jdouble x))
207 if (g_isnan(x)) {return 0;}
208 jlong lltmp = (jlong)x;
209 jint ltmp = (jint)lltmp;
210 if (ltmp == lltmp) {
211 return ltmp;
212 } else {
213 if (x < 0) {
214 return min_jint;
215 } else {
216 return max_jint;
217 }
218 }
219 JRT_END
220
221
222 JRT_LEAF(jlong, SharedRuntime::d2l(jdouble x))
223 if (g_isnan(x)) {return 0;}
224 jlong lltmp = (jlong)x;
225 if (lltmp != min_jlong) {
226 return lltmp;
227 } else {
228 if (x < 0) {
229 return min_jlong;
230 } else {
231 return max_jlong;
232 }
233 }
234 JRT_END
235
236
237 JRT_LEAF(jfloat, SharedRuntime::d2f(jdouble x))
238 return (jfloat)x;
239 JRT_END
240
241
242 JRT_LEAF(jfloat, SharedRuntime::l2f(jlong x))
243 return (jfloat)x;
244 JRT_END
245
246
247 JRT_LEAF(jdouble, SharedRuntime::l2d(jlong x))
248 return (jdouble)x;
249 JRT_END
250
251 // Exception handling accross interpreter/compiler boundaries
252 //
253 // exception_handler_for_return_address(...) returns the continuation address.
254 // The continuation address is the entry point of the exception handler of the
255 // previous frame depending on the return address.
256
257 address SharedRuntime::raw_exception_handler_for_return_address(address return_address) {
258 assert(frame::verify_return_pc(return_address), "must be a return pc");
259
260 // the fastest case first
261 CodeBlob* blob = CodeCache::find_blob(return_address);
262 if (blob != NULL && blob->is_nmethod()) {
263 nmethod* code = (nmethod*)blob;
264 assert(code != NULL, "nmethod must be present");
265 // native nmethods don't have exception handlers
266 assert(!code->is_native_method(), "no exception handler");
267 assert(code->header_begin() != code->exception_begin(), "no exception handler");
268 if (code->is_deopt_pc(return_address)) {
269 return SharedRuntime::deopt_blob()->unpack_with_exception();
270 } else {
271 return code->exception_begin();
272 }
273 }
274
275 // Entry code
276 if (StubRoutines::returns_to_call_stub(return_address)) {
277 return StubRoutines::catch_exception_entry();
278 }
279 // Interpreted code
280 if (Interpreter::contains(return_address)) {
281 return Interpreter::rethrow_exception_entry();
282 }
283
284 // Compiled code
285 if (CodeCache::contains(return_address)) {
286 CodeBlob* blob = CodeCache::find_blob(return_address);
287 if (blob->is_nmethod()) {
288 nmethod* code = (nmethod*)blob;
289 assert(code != NULL, "nmethod must be present");
290 assert(code->header_begin() != code->exception_begin(), "no exception handler");
291 return code->exception_begin();
292 }
293 if (blob->is_runtime_stub()) {
294 ShouldNotReachHere(); // callers are responsible for skipping runtime stub frames
295 }
296 }
297 guarantee(!VtableStubs::contains(return_address), "NULL exceptions in vtables should have been handled already!");
298 #ifndef PRODUCT
299 { ResourceMark rm;
300 tty->print_cr("No exception handler found for exception at " INTPTR_FORMAT " - potential problems:", return_address);
301 tty->print_cr("a) exception happened in (new?) code stubs/buffers that is not handled here");
302 tty->print_cr("b) other problem");
303 }
304 #endif // PRODUCT
305 ShouldNotReachHere();
306 return NULL;
307 }
308
309
310 JRT_LEAF(address, SharedRuntime::exception_handler_for_return_address(address return_address))
311 return raw_exception_handler_for_return_address(return_address);
312 JRT_END
313
314 address SharedRuntime::get_poll_stub(address pc) {
315 address stub;
316 // Look up the code blob
317 CodeBlob *cb = CodeCache::find_blob(pc);
318
319 // Should be an nmethod
320 assert( cb && cb->is_nmethod(), "safepoint polling: pc must refer to an nmethod" );
321
322 // Look up the relocation information
323 assert( ((nmethod*)cb)->is_at_poll_or_poll_return(pc),
324 "safepoint polling: type must be poll" );
325
326 assert( ((NativeInstruction*)pc)->is_safepoint_poll(),
327 "Only polling locations are used for safepoint");
328
329 bool at_poll_return = ((nmethod*)cb)->is_at_poll_return(pc);
330 if (at_poll_return) {
331 assert(SharedRuntime::polling_page_return_handler_blob() != NULL,
332 "polling page return stub not created yet");
333 stub = SharedRuntime::polling_page_return_handler_blob()->instructions_begin();
334 } else {
335 assert(SharedRuntime::polling_page_safepoint_handler_blob() != NULL,
336 "polling page safepoint stub not created yet");
337 stub = SharedRuntime::polling_page_safepoint_handler_blob()->instructions_begin();
338 }
339 #ifndef PRODUCT
340 if( TraceSafepoint ) {
341 char buf[256];
342 jio_snprintf(buf, sizeof(buf),
343 "... found polling page %s exception at pc = "
344 INTPTR_FORMAT ", stub =" INTPTR_FORMAT,
345 at_poll_return ? "return" : "loop",
346 (intptr_t)pc, (intptr_t)stub);
347 tty->print_raw_cr(buf);
348 }
349 #endif // PRODUCT
350 return stub;
351 }
352
353
354 oop SharedRuntime::retrieve_receiver( symbolHandle sig, frame caller ) {
355 assert(caller.is_interpreted_frame(), "");
356 int args_size = ArgumentSizeComputer(sig).size() + 1;
357 assert(args_size <= caller.interpreter_frame_expression_stack_size(), "receiver must be on interpreter stack");
358 oop result = (oop) *caller.interpreter_frame_tos_at(args_size - 1);
359 assert(Universe::heap()->is_in(result) && result->is_oop(), "receiver must be an oop");
360 return result;
361 }
362
363
364 void SharedRuntime::throw_and_post_jvmti_exception(JavaThread *thread, Handle h_exception) {
365 if (JvmtiExport::can_post_exceptions()) {
366 vframeStream vfst(thread, true);
367 methodHandle method = methodHandle(thread, vfst.method());
368 address bcp = method()->bcp_from(vfst.bci());
369 JvmtiExport::post_exception_throw(thread, method(), bcp, h_exception());
370 }
371 Exceptions::_throw(thread, __FILE__, __LINE__, h_exception);
372 }
373
374 void SharedRuntime::throw_and_post_jvmti_exception(JavaThread *thread, symbolOop name, const char *message) {
375 Handle h_exception = Exceptions::new_exception(thread, name, message);
376 throw_and_post_jvmti_exception(thread, h_exception);
377 }
378
379 // ret_pc points into caller; we are returning caller's exception handler
380 // for given exception
381 address SharedRuntime::compute_compiled_exc_handler(nmethod* nm, address ret_pc, Handle& exception,
382 bool force_unwind, bool top_frame_only) {
383 assert(nm != NULL, "must exist");
384 ResourceMark rm;
385
386 ScopeDesc* sd = nm->scope_desc_at(ret_pc);
387 // determine handler bci, if any
388 EXCEPTION_MARK;
389
390 int handler_bci = -1;
391 int scope_depth = 0;
392 if (!force_unwind) {
393 int bci = sd->bci();
394 do {
395 bool skip_scope_increment = false;
396 // exception handler lookup
397 KlassHandle ek (THREAD, exception->klass());
398 handler_bci = sd->method()->fast_exception_handler_bci_for(ek, bci, THREAD);
399 if (HAS_PENDING_EXCEPTION) {
400 // We threw an exception while trying to find the exception handler.
401 // Transfer the new exception to the exception handle which will
402 // be set into thread local storage, and do another lookup for an
403 // exception handler for this exception, this time starting at the
404 // BCI of the exception handler which caused the exception to be
405 // thrown (bugs 4307310 and 4546590). Set "exception" reference
406 // argument to ensure that the correct exception is thrown (4870175).
407 exception = Handle(THREAD, PENDING_EXCEPTION);
408 CLEAR_PENDING_EXCEPTION;
409 if (handler_bci >= 0) {
410 bci = handler_bci;
411 handler_bci = -1;
412 skip_scope_increment = true;
413 }
414 }
415 if (!top_frame_only && handler_bci < 0 && !skip_scope_increment) {
416 sd = sd->sender();
417 if (sd != NULL) {
418 bci = sd->bci();
419 }
420 ++scope_depth;
421 }
422 } while (!top_frame_only && handler_bci < 0 && sd != NULL);
423 }
424
425 // found handling method => lookup exception handler
426 int catch_pco = ret_pc - nm->instructions_begin();
427
428 ExceptionHandlerTable table(nm);
429 HandlerTableEntry *t = table.entry_for(catch_pco, handler_bci, scope_depth);
430 if (t == NULL && (nm->is_compiled_by_c1() || handler_bci != -1)) {
431 // Allow abbreviated catch tables. The idea is to allow a method
432 // to materialize its exceptions without committing to the exact
433 // routing of exceptions. In particular this is needed for adding
434 // a synthethic handler to unlock monitors when inlining
435 // synchonized methods since the unlock path isn't represented in
436 // the bytecodes.
437 t = table.entry_for(catch_pco, -1, 0);
438 }
439
440 #ifdef COMPILER1
441 if (nm->is_compiled_by_c1() && t == NULL && handler_bci == -1) {
442 // Exception is not handled by this frame so unwind. Note that
443 // this is not the same as how C2 does this. C2 emits a table
444 // entry that dispatches to the unwind code in the nmethod.
445 return NULL;
446 }
447 #endif /* COMPILER1 */
448
449
450 if (t == NULL) {
451 tty->print_cr("MISSING EXCEPTION HANDLER for pc " INTPTR_FORMAT " and handler bci %d", ret_pc, handler_bci);
452 tty->print_cr(" Exception:");
453 exception->print();
454 tty->cr();
455 tty->print_cr(" Compiled exception table :");
456 table.print();
457 nm->print_code();
458 guarantee(false, "missing exception handler");
459 return NULL;
460 }
461
462 return nm->instructions_begin() + t->pco();
463 }
464
465 JRT_ENTRY(void, SharedRuntime::throw_AbstractMethodError(JavaThread* thread))
466 // These errors occur only at call sites
467 throw_and_post_jvmti_exception(thread, vmSymbols::java_lang_AbstractMethodError());
468 JRT_END
469
470 JRT_ENTRY(void, SharedRuntime::throw_ArithmeticException(JavaThread* thread))
471 throw_and_post_jvmti_exception(thread, vmSymbols::java_lang_ArithmeticException(), "/ by zero");
472 JRT_END
473
474 JRT_ENTRY(void, SharedRuntime::throw_NullPointerException(JavaThread* thread))
475 throw_and_post_jvmti_exception(thread, vmSymbols::java_lang_NullPointerException());
476 JRT_END
477
478 JRT_ENTRY(void, SharedRuntime::throw_NullPointerException_at_call(JavaThread* thread))
479 // This entry point is effectively only used for NullPointerExceptions which occur at inline
480 // cache sites (when the callee activation is not yet set up) so we are at a call site
481 throw_and_post_jvmti_exception(thread, vmSymbols::java_lang_NullPointerException());
482 JRT_END
483
484 JRT_ENTRY(void, SharedRuntime::throw_StackOverflowError(JavaThread* thread))
485 // We avoid using the normal exception construction in this case because
486 // it performs an upcall to Java, and we're already out of stack space.
487 klassOop k = SystemDictionary::StackOverflowError_klass();
488 oop exception_oop = instanceKlass::cast(k)->allocate_instance(CHECK);
489 Handle exception (thread, exception_oop);
490 if (StackTraceInThrowable) {
491 java_lang_Throwable::fill_in_stack_trace(exception);
492 }
493 throw_and_post_jvmti_exception(thread, exception);
494 JRT_END
495
496 address SharedRuntime::continuation_for_implicit_exception(JavaThread* thread,
497 address pc,
498 SharedRuntime::ImplicitExceptionKind exception_kind)
499 {
500 address target_pc = NULL;
501
502 if (Interpreter::contains(pc)) {
503 #ifdef CC_INTERP
504 // C++ interpreter doesn't throw implicit exceptions
505 ShouldNotReachHere();
506 #else
507 switch (exception_kind) {
508 case IMPLICIT_NULL: return Interpreter::throw_NullPointerException_entry();
509 case IMPLICIT_DIVIDE_BY_ZERO: return Interpreter::throw_ArithmeticException_entry();
510 case STACK_OVERFLOW: return Interpreter::throw_StackOverflowError_entry();
511 default: ShouldNotReachHere();
512 }
513 #endif // !CC_INTERP
514 } else {
515 switch (exception_kind) {
516 case STACK_OVERFLOW: {
517 // Stack overflow only occurs upon frame setup; the callee is
518 // going to be unwound. Dispatch to a shared runtime stub
519 // which will cause the StackOverflowError to be fabricated
520 // and processed.
521 // For stack overflow in deoptimization blob, cleanup thread.
522 if (thread->deopt_mark() != NULL) {
523 Deoptimization::cleanup_deopt_info(thread, NULL);
524 }
525 return StubRoutines::throw_StackOverflowError_entry();
526 }
527
528 case IMPLICIT_NULL: {
529 if (VtableStubs::contains(pc)) {
530 // We haven't yet entered the callee frame. Fabricate an
531 // exception and begin dispatching it in the caller. Since
532 // the caller was at a call site, it's safe to destroy all
533 // caller-saved registers, as these entry points do.
534 VtableStub* vt_stub = VtableStubs::stub_containing(pc);
535 guarantee(vt_stub != NULL, "unable to find SEGVing vtable stub");
536 if (vt_stub->is_abstract_method_error(pc)) {
537 assert(!vt_stub->is_vtable_stub(), "should never see AbstractMethodErrors from vtable-type VtableStubs");
538 return StubRoutines::throw_AbstractMethodError_entry();
539 } else {
540 return StubRoutines::throw_NullPointerException_at_call_entry();
541 }
542 } else {
543 CodeBlob* cb = CodeCache::find_blob(pc);
544 guarantee(cb != NULL, "exception happened outside interpreter, nmethods and vtable stubs (1)");
545
546 // Exception happened in CodeCache. Must be either:
547 // 1. Inline-cache check in C2I handler blob,
548 // 2. Inline-cache check in nmethod, or
549 // 3. Implict null exception in nmethod
550
551 if (!cb->is_nmethod()) {
552 guarantee(cb->is_adapter_blob(),
553 "exception happened outside interpreter, nmethods and vtable stubs (2)");
554 // There is no handler here, so we will simply unwind.
555 return StubRoutines::throw_NullPointerException_at_call_entry();
556 }
557
558 // Otherwise, it's an nmethod. Consult its exception handlers.
559 nmethod* nm = (nmethod*)cb;
560 if (nm->inlinecache_check_contains(pc)) {
561 // exception happened inside inline-cache check code
562 // => the nmethod is not yet active (i.e., the frame
563 // is not set up yet) => use return address pushed by
564 // caller => don't push another return address
565 return StubRoutines::throw_NullPointerException_at_call_entry();
566 }
567
568 #ifndef PRODUCT
569 _implicit_null_throws++;
570 #endif
571 target_pc = nm->continuation_for_implicit_exception(pc);
572 guarantee(target_pc != 0, "must have a continuation point");
573 }
574
575 break; // fall through
576 }
577
578
579 case IMPLICIT_DIVIDE_BY_ZERO: {
580 nmethod* nm = CodeCache::find_nmethod(pc);
581 guarantee(nm != NULL, "must have containing nmethod for implicit division-by-zero exceptions");
582 #ifndef PRODUCT
583 _implicit_div0_throws++;
584 #endif
585 target_pc = nm->continuation_for_implicit_exception(pc);
586 guarantee(target_pc != 0, "must have a continuation point");
587 break; // fall through
588 }
589
590 default: ShouldNotReachHere();
591 }
592
593 guarantee(target_pc != NULL, "must have computed destination PC for implicit exception");
594 assert(exception_kind == IMPLICIT_NULL || exception_kind == IMPLICIT_DIVIDE_BY_ZERO, "wrong implicit exception kind");
595
596 // for AbortVMOnException flag
597 NOT_PRODUCT(Exceptions::debug_check_abort("java.lang.NullPointerException"));
598 if (exception_kind == IMPLICIT_NULL) {
599 Events::log("Implicit null exception at " INTPTR_FORMAT " to " INTPTR_FORMAT, pc, target_pc);
600 } else {
601 Events::log("Implicit division by zero exception at " INTPTR_FORMAT " to " INTPTR_FORMAT, pc, target_pc);
602 }
603 return target_pc;
604 }
605
606 ShouldNotReachHere();
607 return NULL;
608 }
609
610
611 JNI_ENTRY(void, throw_unsatisfied_link_error(JNIEnv* env, ...))
612 {
613 THROW(vmSymbols::java_lang_UnsatisfiedLinkError());
614 }
615 JNI_END
616
617
618 address SharedRuntime::native_method_throw_unsatisfied_link_error_entry() {
619 return CAST_FROM_FN_PTR(address, &throw_unsatisfied_link_error);
620 }
621
622
623 #ifndef PRODUCT
624 JRT_ENTRY(intptr_t, SharedRuntime::trace_bytecode(JavaThread* thread, intptr_t preserve_this_value, intptr_t tos, intptr_t tos2))
625 const frame f = thread->last_frame();
626 assert(f.is_interpreted_frame(), "must be an interpreted frame");
627 #ifndef PRODUCT
628 methodHandle mh(THREAD, f.interpreter_frame_method());
629 BytecodeTracer::trace(mh, f.interpreter_frame_bcp(), tos, tos2);
630 #endif // !PRODUCT
631 return preserve_this_value;
632 JRT_END
633 #endif // !PRODUCT
634
635
636 JRT_ENTRY(void, SharedRuntime::yield_all(JavaThread* thread, int attempts))
637 os::yield_all(attempts);
638 JRT_END
639
640
641 // ---------------------------------------------------------------------------------------------------------
642 // Non-product code
643 #ifndef PRODUCT
644
645 void SharedRuntime::verify_caller_frame(frame caller_frame, methodHandle callee_method) {
646 ResourceMark rm;
647 assert (caller_frame.is_interpreted_frame(), "sanity check");
648 assert (callee_method->has_compiled_code(), "callee must be compiled");
649 methodHandle caller_method (Thread::current(), caller_frame.interpreter_frame_method());
650 jint bci = caller_frame.interpreter_frame_bci();
651 methodHandle method = find_callee_method_inside_interpreter(caller_frame, caller_method, bci);
652 assert (callee_method == method, "incorrect method");
653 }
654
655 methodHandle SharedRuntime::find_callee_method_inside_interpreter(frame caller_frame, methodHandle caller_method, int bci) {
656 EXCEPTION_MARK;
657 Bytecode_invoke* bytecode = Bytecode_invoke_at(caller_method, bci);
658 methodHandle staticCallee = bytecode->static_target(CATCH); // Non-product code
659
660 bytecode = Bytecode_invoke_at(caller_method, bci);
661 int bytecode_index = bytecode->index();
662 Bytecodes::Code bc = bytecode->adjusted_invoke_code();
663
664 Handle receiver;
665 if (bc == Bytecodes::_invokeinterface ||
666 bc == Bytecodes::_invokevirtual ||
667 bc == Bytecodes::_invokespecial) {
668 symbolHandle signature (THREAD, staticCallee->signature());
669 receiver = Handle(THREAD, retrieve_receiver(signature, caller_frame));
670 } else {
671 receiver = Handle();
672 }
673 CallInfo result;
674 constantPoolHandle constants (THREAD, caller_method->constants());
675 LinkResolver::resolve_invoke(result, receiver, constants, bytecode_index, bc, CATCH); // Non-product code
676 methodHandle calleeMethod = result.selected_method();
677 return calleeMethod;
678 }
679
680 #endif // PRODUCT
681
682
683 JRT_ENTRY_NO_ASYNC(void, SharedRuntime::register_finalizer(JavaThread* thread, oopDesc* obj))
684 assert(obj->is_oop(), "must be a valid oop");
685 assert(obj->klass()->klass_part()->has_finalizer(), "shouldn't be here otherwise");
686 instanceKlass::register_finalizer(instanceOop(obj), CHECK);
687 JRT_END
688
689
690 jlong SharedRuntime::get_java_tid(Thread* thread) {
691 if (thread != NULL) {
692 if (thread->is_Java_thread()) {
693 oop obj = ((JavaThread*)thread)->threadObj();
694 return (obj == NULL) ? 0 : java_lang_Thread::thread_id(obj);
695 }
696 }
697 return 0;
698 }
699
700 /**
701 * This function ought to be a void function, but cannot be because
702 * it gets turned into a tail-call on sparc, which runs into dtrace bug
703 * 6254741. Once that is fixed we can remove the dummy return value.
704 */
705 int SharedRuntime::dtrace_object_alloc(oopDesc* o) {
706 return dtrace_object_alloc_base(Thread::current(), o);
707 }
708
709 int SharedRuntime::dtrace_object_alloc_base(Thread* thread, oopDesc* o) {
710 assert(DTraceAllocProbes, "wrong call");
711 Klass* klass = o->blueprint();
712 int size = o->size();
713 symbolOop name = klass->name();
714 HS_DTRACE_PROBE4(hotspot, object__alloc, get_java_tid(thread),
715 name->bytes(), name->utf8_length(), size * HeapWordSize);
716 return 0;
717 }
718
719 JRT_LEAF(int, SharedRuntime::dtrace_method_entry(
720 JavaThread* thread, methodOopDesc* method))
721 assert(DTraceMethodProbes, "wrong call");
722 symbolOop kname = method->klass_name();
723 symbolOop name = method->name();
724 symbolOop sig = method->signature();
725 HS_DTRACE_PROBE7(hotspot, method__entry, get_java_tid(thread),
726 kname->bytes(), kname->utf8_length(),
727 name->bytes(), name->utf8_length(),
728 sig->bytes(), sig->utf8_length());
729 return 0;
730 JRT_END
731
732 JRT_LEAF(int, SharedRuntime::dtrace_method_exit(
733 JavaThread* thread, methodOopDesc* method))
734 assert(DTraceMethodProbes, "wrong call");
735 symbolOop kname = method->klass_name();
736 symbolOop name = method->name();
737 symbolOop sig = method->signature();
738 HS_DTRACE_PROBE7(hotspot, method__return, get_java_tid(thread),
739 kname->bytes(), kname->utf8_length(),
740 name->bytes(), name->utf8_length(),
741 sig->bytes(), sig->utf8_length());
742 return 0;
743 JRT_END
744
745
746 // Finds receiver, CallInfo (i.e. receiver method), and calling bytecode)
747 // for a call current in progress, i.e., arguments has been pushed on stack
748 // put callee has not been invoked yet. Used by: resolve virtual/static,
749 // vtable updates, etc. Caller frame must be compiled.
750 Handle SharedRuntime::find_callee_info(JavaThread* thread, Bytecodes::Code& bc, CallInfo& callinfo, TRAPS) {
751 ResourceMark rm(THREAD);
752
753 // last java frame on stack (which includes native call frames)
754 vframeStream vfst(thread, true); // Do not skip and javaCalls
755
756 return find_callee_info_helper(thread, vfst, bc, callinfo, CHECK_(Handle()));
757 }
758
759
760 // Finds receiver, CallInfo (i.e. receiver method), and calling bytecode
761 // for a call current in progress, i.e., arguments has been pushed on stack
762 // but callee has not been invoked yet. Caller frame must be compiled.
763 Handle SharedRuntime::find_callee_info_helper(JavaThread* thread,
764 vframeStream& vfst,
765 Bytecodes::Code& bc,
766 CallInfo& callinfo, TRAPS) {
767 Handle receiver;
768 Handle nullHandle; //create a handy null handle for exception returns
769
770 assert(!vfst.at_end(), "Java frame must exist");
771
772 // Find caller and bci from vframe
773 methodHandle caller (THREAD, vfst.method());
774 int bci = vfst.bci();
775
776 // Find bytecode
777 Bytecode_invoke* bytecode = Bytecode_invoke_at(caller, bci);
778 bc = bytecode->adjusted_invoke_code();
779 int bytecode_index = bytecode->index();
780
781 // Find receiver for non-static call
782 if (bc != Bytecodes::_invokestatic) {
783 // This register map must be update since we need to find the receiver for
784 // compiled frames. The receiver might be in a register.
785 RegisterMap reg_map2(thread);
786 frame stubFrame = thread->last_frame();
787 // Caller-frame is a compiled frame
788 frame callerFrame = stubFrame.sender(&reg_map2);
789
790 methodHandle callee = bytecode->static_target(CHECK_(nullHandle));
791 if (callee.is_null()) {
792 THROW_(vmSymbols::java_lang_NoSuchMethodException(), nullHandle);
793 }
794 // Retrieve from a compiled argument list
795 receiver = Handle(THREAD, callerFrame.retrieve_receiver(&reg_map2));
796
797 if (receiver.is_null()) {
798 THROW_(vmSymbols::java_lang_NullPointerException(), nullHandle);
799 }
800 }
801
802 // Resolve method. This is parameterized by bytecode.
803 constantPoolHandle constants (THREAD, caller->constants());
804 assert (receiver.is_null() || receiver->is_oop(), "wrong receiver");
805 LinkResolver::resolve_invoke(callinfo, receiver, constants, bytecode_index, bc, CHECK_(nullHandle));
806
807 #ifdef ASSERT
808 // Check that the receiver klass is of the right subtype and that it is initialized for virtual calls
809 if (bc != Bytecodes::_invokestatic) {
810 assert(receiver.not_null(), "should have thrown exception");
811 KlassHandle receiver_klass (THREAD, receiver->klass());
812 klassOop rk = constants->klass_ref_at(bytecode_index, CHECK_(nullHandle));
813 // klass is already loaded
814 KlassHandle static_receiver_klass (THREAD, rk);
815 assert(receiver_klass->is_subtype_of(static_receiver_klass()), "actual receiver must be subclass of static receiver klass");
816 if (receiver_klass->oop_is_instance()) {
817 if (instanceKlass::cast(receiver_klass())->is_not_initialized()) {
818 tty->print_cr("ERROR: Klass not yet initialized!!");
819 receiver_klass.print();
820 }
821 assert (!instanceKlass::cast(receiver_klass())->is_not_initialized(), "receiver_klass must be initialized");
822 }
823 }
824 #endif
825
826 return receiver;
827 }
828
829 methodHandle SharedRuntime::find_callee_method(JavaThread* thread, TRAPS) {
830 ResourceMark rm(THREAD);
831 // We need first to check if any Java activations (compiled, interpreted)
832 // exist on the stack since last JavaCall. If not, we need
833 // to get the target method from the JavaCall wrapper.
834 vframeStream vfst(thread, true); // Do not skip any javaCalls
835 methodHandle callee_method;
836 if (vfst.at_end()) {
837 // No Java frames were found on stack since we did the JavaCall.
838 // Hence the stack can only contain an entry_frame. We need to
839 // find the target method from the stub frame.
840 RegisterMap reg_map(thread, false);
841 frame fr = thread->last_frame();
842 assert(fr.is_runtime_frame(), "must be a runtimeStub");
843 fr = fr.sender(&reg_map);
844 assert(fr.is_entry_frame(), "must be");
845 // fr is now pointing to the entry frame.
846 callee_method = methodHandle(THREAD, fr.entry_frame_call_wrapper()->callee_method());
847 assert(fr.entry_frame_call_wrapper()->receiver() == NULL || !callee_method->is_static(), "non-null receiver for static call??");
848 } else {
849 Bytecodes::Code bc;
850 CallInfo callinfo;
851 find_callee_info_helper(thread, vfst, bc, callinfo, CHECK_(methodHandle()));
852 callee_method = callinfo.selected_method();
853 }
854 assert(callee_method()->is_method(), "must be");
855 return callee_method;
856 }
857
858 // Resolves a call.
859 methodHandle SharedRuntime::resolve_helper(JavaThread *thread,
860 bool is_virtual,
861 bool is_optimized, TRAPS) {
862 methodHandle callee_method;
863 callee_method = resolve_sub_helper(thread, is_virtual, is_optimized, THREAD);
864 if (JvmtiExport::can_hotswap_or_post_breakpoint()) {
865 int retry_count = 0;
866 while (!HAS_PENDING_EXCEPTION && callee_method->is_old() &&
867 callee_method->method_holder() != SystemDictionary::object_klass()) {
868 // If has a pending exception then there is no need to re-try to
869 // resolve this method.
870 // If the method has been redefined, we need to try again.
871 // Hack: we have no way to update the vtables of arrays, so don't
872 // require that java.lang.Object has been updated.
873
874 // It is very unlikely that method is redefined more than 100 times
875 // in the middle of resolve. If it is looping here more than 100 times
876 // means then there could be a bug here.
877 guarantee((retry_count++ < 100),
878 "Could not resolve to latest version of redefined method");
879 // method is redefined in the middle of resolve so re-try.
880 callee_method = resolve_sub_helper(thread, is_virtual, is_optimized, THREAD);
881 }
882 }
883 return callee_method;
884 }
885
886 // Resolves a call. The compilers generate code for calls that go here
887 // and are patched with the real destination of the call.
888 methodHandle SharedRuntime::resolve_sub_helper(JavaThread *thread,
889 bool is_virtual,
890 bool is_optimized, TRAPS) {
891
892 ResourceMark rm(thread);
893 RegisterMap cbl_map(thread, false);
894 frame caller_frame = thread->last_frame().sender(&cbl_map);
895
896 CodeBlob* cb = caller_frame.cb();
897 guarantee(cb != NULL && cb->is_nmethod(), "must be called from nmethod");
898 // make sure caller is not getting deoptimized
899 // and removed before we are done with it.
900 // CLEANUP - with lazy deopt shouldn't need this lock
901 nmethodLocker caller_lock((nmethod*)cb);
902
903
904 // determine call info & receiver
905 // note: a) receiver is NULL for static calls
906 // b) an exception is thrown if receiver is NULL for non-static calls
907 CallInfo call_info;
908 Bytecodes::Code invoke_code = Bytecodes::_illegal;
909 Handle receiver = find_callee_info(thread, invoke_code,
910 call_info, CHECK_(methodHandle()));
911 methodHandle callee_method = call_info.selected_method();
912
913 assert((!is_virtual && invoke_code == Bytecodes::_invokestatic) ||
914 ( is_virtual && invoke_code != Bytecodes::_invokestatic), "inconsistent bytecode");
915
916 #ifndef PRODUCT
917 // tracing/debugging/statistics
918 int *addr = (is_optimized) ? (&_resolve_opt_virtual_ctr) :
919 (is_virtual) ? (&_resolve_virtual_ctr) :
920 (&_resolve_static_ctr);
921 Atomic::inc(addr);
922
923 if (TraceCallFixup) {
924 ResourceMark rm(thread);
925 tty->print("resolving %s%s (%s) call to",
926 (is_optimized) ? "optimized " : "", (is_virtual) ? "virtual" : "static",
927 Bytecodes::name(invoke_code));
928 callee_method->print_short_name(tty);
929 tty->print_cr(" code: " INTPTR_FORMAT, callee_method->code());
930 }
931 #endif
932
933 // Compute entry points. This might require generation of C2I converter
934 // frames, so we cannot be holding any locks here. Furthermore, the
935 // computation of the entry points is independent of patching the call. We
936 // always return the entry-point, but we only patch the stub if the call has
937 // not been deoptimized. Return values: For a virtual call this is an
938 // (cached_oop, destination address) pair. For a static call/optimized
939 // virtual this is just a destination address.
940
941 StaticCallInfo static_call_info;
942 CompiledICInfo virtual_call_info;
943
944
945 // Make sure the callee nmethod does not get deoptimized and removed before
946 // we are done patching the code.
947 nmethod* nm = callee_method->code();
948 nmethodLocker nl_callee(nm);
949 #ifdef ASSERT
950 address dest_entry_point = nm == NULL ? 0 : nm->entry_point(); // used below
951 #endif
952
953 if (is_virtual) {
954 assert(receiver.not_null(), "sanity check");
955 bool static_bound = call_info.resolved_method()->can_be_statically_bound();
956 KlassHandle h_klass(THREAD, receiver->klass());
957 CompiledIC::compute_monomorphic_entry(callee_method, h_klass,
958 is_optimized, static_bound, virtual_call_info,
959 CHECK_(methodHandle()));
960 } else {
961 // static call
962 CompiledStaticCall::compute_entry(callee_method, static_call_info);
963 }
964
965 // grab lock, check for deoptimization and potentially patch caller
966 {
967 MutexLocker ml_patch(CompiledIC_lock);
968
969 // Now that we are ready to patch if the methodOop was redefined then
970 // don't update call site and let the caller retry.
971
972 if (!callee_method->is_old()) {
973 #ifdef ASSERT
974 // We must not try to patch to jump to an already unloaded method.
975 if (dest_entry_point != 0) {
976 assert(CodeCache::find_blob(dest_entry_point) != NULL,
977 "should not unload nmethod while locked");
978 }
979 #endif
980 if (is_virtual) {
981 CompiledIC* inline_cache = CompiledIC_before(caller_frame.pc());
982 if (inline_cache->is_clean()) {
983 inline_cache->set_to_monomorphic(virtual_call_info);
984 }
985 } else {
986 CompiledStaticCall* ssc = compiledStaticCall_before(caller_frame.pc());
987 if (ssc->is_clean()) ssc->set(static_call_info);
988 }
989 }
990
991 } // unlock CompiledIC_lock
992
993 return callee_method;
994 }
995
996
997 // Inline caches exist only in compiled code
998 JRT_BLOCK_ENTRY(address, SharedRuntime::handle_wrong_method_ic_miss(JavaThread* thread))
999 #ifdef ASSERT
1000 RegisterMap reg_map(thread, false);
1001 frame stub_frame = thread->last_frame();
1002 assert(stub_frame.is_runtime_frame(), "sanity check");
1003 frame caller_frame = stub_frame.sender(&reg_map);
1004 assert(!caller_frame.is_interpreted_frame() && !caller_frame.is_entry_frame(), "unexpected frame");
1005 #endif /* ASSERT */
1006
1007 methodHandle callee_method;
1008 JRT_BLOCK
1009 callee_method = SharedRuntime::handle_ic_miss_helper(thread, CHECK_NULL);
1010 // Return methodOop through TLS
1011 thread->set_vm_result(callee_method());
1012 JRT_BLOCK_END
1013 // return compiled code entry point after potential safepoints
1014 assert(callee_method->verified_code_entry() != NULL, " Jump to zero!");
1015 return callee_method->verified_code_entry();
1016 JRT_END
1017
1018
1019 // Handle call site that has been made non-entrant
1020 JRT_BLOCK_ENTRY(address, SharedRuntime::handle_wrong_method(JavaThread* thread))
1021 // 6243940 We might end up in here if the callee is deoptimized
1022 // as we race to call it. We don't want to take a safepoint if
1023 // the caller was interpreted because the caller frame will look
1024 // interpreted to the stack walkers and arguments are now
1025 // "compiled" so it is much better to make this transition
1026 // invisible to the stack walking code. The i2c path will
1027 // place the callee method in the callee_target. It is stashed
1028 // there because if we try and find the callee by normal means a
1029 // safepoint is possible and have trouble gc'ing the compiled args.
1030 RegisterMap reg_map(thread, false);
1031 frame stub_frame = thread->last_frame();
1032 assert(stub_frame.is_runtime_frame(), "sanity check");
1033 frame caller_frame = stub_frame.sender(&reg_map);
1034 if (caller_frame.is_interpreted_frame() || caller_frame.is_entry_frame() ) {
1035 methodOop callee = thread->callee_target();
1036 guarantee(callee != NULL && callee->is_method(), "bad handshake");
1037 thread->set_vm_result(callee);
1038 thread->set_callee_target(NULL);
1039 return callee->get_c2i_entry();
1040 }
1041
1042 // Must be compiled to compiled path which is safe to stackwalk
1043 methodHandle callee_method;
1044 JRT_BLOCK
1045 // Force resolving of caller (if we called from compiled frame)
1046 callee_method = SharedRuntime::reresolve_call_site(thread, CHECK_NULL);
1047 thread->set_vm_result(callee_method());
1048 JRT_BLOCK_END
1049 // return compiled code entry point after potential safepoints
1050 assert(callee_method->verified_code_entry() != NULL, " Jump to zero!");
1051 return callee_method->verified_code_entry();
1052 JRT_END
1053
1054
1055 // resolve a static call and patch code
1056 JRT_BLOCK_ENTRY(address, SharedRuntime::resolve_static_call_C(JavaThread *thread ))
1057 methodHandle callee_method;
1058 JRT_BLOCK
1059 callee_method = SharedRuntime::resolve_helper(thread, false, false, CHECK_NULL);
1060 thread->set_vm_result(callee_method());
1061 JRT_BLOCK_END
1062 // return compiled code entry point after potential safepoints
1063 assert(callee_method->verified_code_entry() != NULL, " Jump to zero!");
1064 return callee_method->verified_code_entry();
1065 JRT_END
1066
1067
1068 // resolve virtual call and update inline cache to monomorphic
1069 JRT_BLOCK_ENTRY(address, SharedRuntime::resolve_virtual_call_C(JavaThread *thread ))
1070 methodHandle callee_method;
1071 JRT_BLOCK
1072 callee_method = SharedRuntime::resolve_helper(thread, true, false, CHECK_NULL);
1073 thread->set_vm_result(callee_method());
1074 JRT_BLOCK_END
1075 // return compiled code entry point after potential safepoints
1076 assert(callee_method->verified_code_entry() != NULL, " Jump to zero!");
1077 return callee_method->verified_code_entry();
1078 JRT_END
1079
1080
1081 // Resolve a virtual call that can be statically bound (e.g., always
1082 // monomorphic, so it has no inline cache). Patch code to resolved target.
1083 JRT_BLOCK_ENTRY(address, SharedRuntime::resolve_opt_virtual_call_C(JavaThread *thread))
1084 methodHandle callee_method;
1085 JRT_BLOCK
1086 callee_method = SharedRuntime::resolve_helper(thread, true, true, CHECK_NULL);
1087 thread->set_vm_result(callee_method());
1088 JRT_BLOCK_END
1089 // return compiled code entry point after potential safepoints
1090 assert(callee_method->verified_code_entry() != NULL, " Jump to zero!");
1091 return callee_method->verified_code_entry();
1092 JRT_END
1093
1094
1095
1096
1097
1098 methodHandle SharedRuntime::handle_ic_miss_helper(JavaThread *thread, TRAPS) {
1099 ResourceMark rm(thread);
1100 CallInfo call_info;
1101 Bytecodes::Code bc;
1102
1103 // receiver is NULL for static calls. An exception is thrown for NULL
1104 // receivers for non-static calls
1105 Handle receiver = find_callee_info(thread, bc, call_info,
1106 CHECK_(methodHandle()));
1107 // Compiler1 can produce virtual call sites that can actually be statically bound
1108 // If we fell thru to below we would think that the site was going megamorphic
1109 // when in fact the site can never miss. Worse because we'd think it was megamorphic
1110 // we'd try and do a vtable dispatch however methods that can be statically bound
1111 // don't have vtable entries (vtable_index < 0) and we'd blow up. So we force a
1112 // reresolution of the call site (as if we did a handle_wrong_method and not an
1113 // plain ic_miss) and the site will be converted to an optimized virtual call site
1114 // never to miss again. I don't believe C2 will produce code like this but if it
1115 // did this would still be the correct thing to do for it too, hence no ifdef.
1116 //
1117 if (call_info.resolved_method()->can_be_statically_bound()) {
1118 methodHandle callee_method = SharedRuntime::reresolve_call_site(thread, CHECK_(methodHandle()));
1119 if (TraceCallFixup) {
1120 RegisterMap reg_map(thread, false);
1121 frame caller_frame = thread->last_frame().sender(&reg_map);
1122 ResourceMark rm(thread);
1123 tty->print("converting IC miss to reresolve (%s) call to", Bytecodes::name(bc));
1124 callee_method->print_short_name(tty);
1125 tty->print_cr(" from pc: " INTPTR_FORMAT, caller_frame.pc());
1126 tty->print_cr(" code: " INTPTR_FORMAT, callee_method->code());
1127 }
1128 return callee_method;
1129 }
1130
1131 methodHandle callee_method = call_info.selected_method();
1132
1133 bool should_be_mono = false;
1134
1135 #ifndef PRODUCT
1136 Atomic::inc(&_ic_miss_ctr);
1137
1138 // Statistics & Tracing
1139 if (TraceCallFixup) {
1140 ResourceMark rm(thread);
1141 tty->print("IC miss (%s) call to", Bytecodes::name(bc));
1142 callee_method->print_short_name(tty);
1143 tty->print_cr(" code: " INTPTR_FORMAT, callee_method->code());
1144 }
1145
1146 if (ICMissHistogram) {
1147 MutexLocker m(VMStatistic_lock);
1148 RegisterMap reg_map(thread, false);
1149 frame f = thread->last_frame().real_sender(&reg_map);// skip runtime stub
1150 // produce statistics under the lock
1151 trace_ic_miss(f.pc());
1152 }
1153 #endif
1154
1155 // install an event collector so that when a vtable stub is created the
1156 // profiler can be notified via a DYNAMIC_CODE_GENERATED event. The
1157 // event can't be posted when the stub is created as locks are held
1158 // - instead the event will be deferred until the event collector goes
1159 // out of scope.
1160 JvmtiDynamicCodeEventCollector event_collector;
1161
1162 // Update inline cache to megamorphic. Skip update if caller has been
1163 // made non-entrant or we are called from interpreted.
1164 { MutexLocker ml_patch (CompiledIC_lock);
1165 RegisterMap reg_map(thread, false);
1166 frame caller_frame = thread->last_frame().sender(&reg_map);
1167 CodeBlob* cb = caller_frame.cb();
1168 if (cb->is_nmethod() && ((nmethod*)cb)->is_in_use()) {
1169 // Not a non-entrant nmethod, so find inline_cache
1170 CompiledIC* inline_cache = CompiledIC_before(caller_frame.pc());
1171 bool should_be_mono = false;
1172 if (inline_cache->is_optimized()) {
1173 if (TraceCallFixup) {
1174 ResourceMark rm(thread);
1175 tty->print("OPTIMIZED IC miss (%s) call to", Bytecodes::name(bc));
1176 callee_method->print_short_name(tty);
1177 tty->print_cr(" code: " INTPTR_FORMAT, callee_method->code());
1178 }
1179 should_be_mono = true;
1180 } else {
1181 compiledICHolderOop ic_oop = (compiledICHolderOop) inline_cache->cached_oop();
1182 if ( ic_oop != NULL && ic_oop->is_compiledICHolder()) {
1183
1184 if (receiver()->klass() == ic_oop->holder_klass()) {
1185 // This isn't a real miss. We must have seen that compiled code
1186 // is now available and we want the call site converted to a
1187 // monomorphic compiled call site.
1188 // We can't assert for callee_method->code() != NULL because it
1189 // could have been deoptimized in the meantime
1190 if (TraceCallFixup) {
1191 ResourceMark rm(thread);
1192 tty->print("FALSE IC miss (%s) converting to compiled call to", Bytecodes::name(bc));
1193 callee_method->print_short_name(tty);
1194 tty->print_cr(" code: " INTPTR_FORMAT, callee_method->code());
1195 }
1196 should_be_mono = true;
1197 }
1198 }
1199 }
1200
1201 if (should_be_mono) {
1202
1203 // We have a path that was monomorphic but was going interpreted
1204 // and now we have (or had) a compiled entry. We correct the IC
1205 // by using a new icBuffer.
1206 CompiledICInfo info;
1207 KlassHandle receiver_klass(THREAD, receiver()->klass());
1208 inline_cache->compute_monomorphic_entry(callee_method,
1209 receiver_klass,
1210 inline_cache->is_optimized(),
1211 false,
1212 info, CHECK_(methodHandle()));
1213 inline_cache->set_to_monomorphic(info);
1214 } else if (!inline_cache->is_megamorphic() && !inline_cache->is_clean()) {
1215 // Change to megamorphic
1216 inline_cache->set_to_megamorphic(&call_info, bc, CHECK_(methodHandle()));
1217 } else {
1218 // Either clean or megamorphic
1219 }
1220 }
1221 } // Release CompiledIC_lock
1222
1223 return callee_method;
1224 }
1225
1226 //
1227 // Resets a call-site in compiled code so it will get resolved again.
1228 // This routines handles both virtual call sites, optimized virtual call
1229 // sites, and static call sites. Typically used to change a call sites
1230 // destination from compiled to interpreted.
1231 //
1232 methodHandle SharedRuntime::reresolve_call_site(JavaThread *thread, TRAPS) {
1233 ResourceMark rm(thread);
1234 RegisterMap reg_map(thread, false);
1235 frame stub_frame = thread->last_frame();
1236 assert(stub_frame.is_runtime_frame(), "must be a runtimeStub");
1237 frame caller = stub_frame.sender(&reg_map);
1238
1239 // Do nothing if the frame isn't a live compiled frame.
1240 // nmethod could be deoptimized by the time we get here
1241 // so no update to the caller is needed.
1242
1243 if (caller.is_compiled_frame() && !caller.is_deoptimized_frame()) {
1244
1245 address pc = caller.pc();
1246 Events::log("update call-site at pc " INTPTR_FORMAT, pc);
1247
1248 // Default call_addr is the location of the "basic" call.
1249 // Determine the address of the call we a reresolving. With
1250 // Inline Caches we will always find a recognizable call.
1251 // With Inline Caches disabled we may or may not find a
1252 // recognizable call. We will always find a call for static
1253 // calls and for optimized virtual calls. For vanilla virtual
1254 // calls it depends on the state of the UseInlineCaches switch.
1255 //
1256 // With Inline Caches disabled we can get here for a virtual call
1257 // for two reasons:
1258 // 1 - calling an abstract method. The vtable for abstract methods
1259 // will run us thru handle_wrong_method and we will eventually
1260 // end up in the interpreter to throw the ame.
1261 // 2 - a racing deoptimization. We could be doing a vanilla vtable
1262 // call and between the time we fetch the entry address and
1263 // we jump to it the target gets deoptimized. Similar to 1
1264 // we will wind up in the interprter (thru a c2i with c2).
1265 //
1266 address call_addr = NULL;
1267 {
1268 // Get call instruction under lock because another thread may be
1269 // busy patching it.
1270 MutexLockerEx ml_patch(Patching_lock, Mutex::_no_safepoint_check_flag);
1271 // Location of call instruction
1272 if (NativeCall::is_call_before(pc)) {
1273 NativeCall *ncall = nativeCall_before(pc);
1274 call_addr = ncall->instruction_address();
1275 }
1276 }
1277
1278 // Check for static or virtual call
1279 bool is_static_call = false;
1280 nmethod* caller_nm = CodeCache::find_nmethod(pc);
1281 // Make sure nmethod doesn't get deoptimized and removed until
1282 // this is done with it.
1283 // CLEANUP - with lazy deopt shouldn't need this lock
1284 nmethodLocker nmlock(caller_nm);
1285
1286 if (call_addr != NULL) {
1287 RelocIterator iter(caller_nm, call_addr, call_addr+1);
1288 int ret = iter.next(); // Get item
1289 if (ret) {
1290 assert(iter.addr() == call_addr, "must find call");
1291 if (iter.type() == relocInfo::static_call_type) {
1292 is_static_call = true;
1293 } else {
1294 assert(iter.type() == relocInfo::virtual_call_type ||
1295 iter.type() == relocInfo::opt_virtual_call_type
1296 , "unexpected relocInfo. type");
1297 }
1298 } else {
1299 assert(!UseInlineCaches, "relocation info. must exist for this address");
1300 }
1301
1302 // Cleaning the inline cache will force a new resolve. This is more robust
1303 // than directly setting it to the new destination, since resolving of calls
1304 // is always done through the same code path. (experience shows that it
1305 // leads to very hard to track down bugs, if an inline cache gets updated
1306 // to a wrong method). It should not be performance critical, since the
1307 // resolve is only done once.
1308
1309 MutexLocker ml(CompiledIC_lock);
1310 //
1311 // We do not patch the call site if the nmethod has been made non-entrant
1312 // as it is a waste of time
1313 //
1314 if (caller_nm->is_in_use()) {
1315 if (is_static_call) {
1316 CompiledStaticCall* ssc= compiledStaticCall_at(call_addr);
1317 ssc->set_to_clean();
1318 } else {
1319 // compiled, dispatched call (which used to call an interpreted method)
1320 CompiledIC* inline_cache = CompiledIC_at(call_addr);
1321 inline_cache->set_to_clean();
1322 }
1323 }
1324 }
1325
1326 }
1327
1328 methodHandle callee_method = find_callee_method(thread, CHECK_(methodHandle()));
1329
1330
1331 #ifndef PRODUCT
1332 Atomic::inc(&_wrong_method_ctr);
1333
1334 if (TraceCallFixup) {
1335 ResourceMark rm(thread);
1336 tty->print("handle_wrong_method reresolving call to");
1337 callee_method->print_short_name(tty);
1338 tty->print_cr(" code: " INTPTR_FORMAT, callee_method->code());
1339 }
1340 #endif
1341
1342 return callee_method;
1343 }
1344
1345 // ---------------------------------------------------------------------------
1346 // We are calling the interpreter via a c2i. Normally this would mean that
1347 // we were called by a compiled method. However we could have lost a race
1348 // where we went int -> i2c -> c2i and so the caller could in fact be
1349 // interpreted. If the caller is compiled we attampt to patch the caller
1350 // so he no longer calls into the interpreter.
1351 IRT_LEAF(void, SharedRuntime::fixup_callers_callsite(methodOopDesc* method, address caller_pc))
1352 methodOop moop(method);
1353
1354 address entry_point = moop->from_compiled_entry();
1355
1356 // It's possible that deoptimization can occur at a call site which hasn't
1357 // been resolved yet, in which case this function will be called from
1358 // an nmethod that has been patched for deopt and we can ignore the
1359 // request for a fixup.
1360 // Also it is possible that we lost a race in that from_compiled_entry
1361 // is now back to the i2c in that case we don't need to patch and if
1362 // we did we'd leap into space because the callsite needs to use
1363 // "to interpreter" stub in order to load up the methodOop. Don't
1364 // ask me how I know this...
1365 //
1366
1367 CodeBlob* cb = CodeCache::find_blob(caller_pc);
1368 if ( !cb->is_nmethod() || entry_point == moop->get_c2i_entry()) {
1369 return;
1370 }
1371
1372 // There is a benign race here. We could be attempting to patch to a compiled
1373 // entry point at the same time the callee is being deoptimized. If that is
1374 // the case then entry_point may in fact point to a c2i and we'd patch the
1375 // call site with the same old data. clear_code will set code() to NULL
1376 // at the end of it. If we happen to see that NULL then we can skip trying
1377 // to patch. If we hit the window where the callee has a c2i in the
1378 // from_compiled_entry and the NULL isn't present yet then we lose the race
1379 // and patch the code with the same old data. Asi es la vida.
1380
1381 if (moop->code() == NULL) return;
1382
1383 if (((nmethod*)cb)->is_in_use()) {
1384
1385 // Expect to find a native call there (unless it was no-inline cache vtable dispatch)
1386 MutexLockerEx ml_patch(Patching_lock, Mutex::_no_safepoint_check_flag);
1387 if (NativeCall::is_call_before(caller_pc + frame::pc_return_offset)) {
1388 NativeCall *call = nativeCall_before(caller_pc + frame::pc_return_offset);
1389 //
1390 // bug 6281185. We might get here after resolving a call site to a vanilla
1391 // virtual call. Because the resolvee uses the verified entry it may then
1392 // see compiled code and attempt to patch the site by calling us. This would
1393 // then incorrectly convert the call site to optimized and its downhill from
1394 // there. If you're lucky you'll get the assert in the bugid, if not you've
1395 // just made a call site that could be megamorphic into a monomorphic site
1396 // for the rest of its life! Just another racing bug in the life of
1397 // fixup_callers_callsite ...
1398 //
1399 RelocIterator iter(cb, call->instruction_address(), call->next_instruction_address());
1400 iter.next();
1401 assert(iter.has_current(), "must have a reloc at java call site");
1402 relocInfo::relocType typ = iter.reloc()->type();
1403 if ( typ != relocInfo::static_call_type &&
1404 typ != relocInfo::opt_virtual_call_type &&
1405 typ != relocInfo::static_stub_type) {
1406 return;
1407 }
1408 address destination = call->destination();
1409 if (destination != entry_point) {
1410 CodeBlob* callee = CodeCache::find_blob(destination);
1411 // callee == cb seems weird. It means calling interpreter thru stub.
1412 if (callee == cb || callee->is_adapter_blob()) {
1413 // static call or optimized virtual
1414 if (TraceCallFixup) {
1415 tty->print("fixup callsite at " INTPTR_FORMAT " to compiled code for", caller_pc);
1416 moop->print_short_name(tty);
1417 tty->print_cr(" to " INTPTR_FORMAT, entry_point);
1418 }
1419 call->set_destination_mt_safe(entry_point);
1420 } else {
1421 if (TraceCallFixup) {
1422 tty->print("failed to fixup callsite at " INTPTR_FORMAT " to compiled code for", caller_pc);
1423 moop->print_short_name(tty);
1424 tty->print_cr(" to " INTPTR_FORMAT, entry_point);
1425 }
1426 // assert is too strong could also be resolve destinations.
1427 // assert(InlineCacheBuffer::contains(destination) || VtableStubs::contains(destination), "must be");
1428 }
1429 } else {
1430 if (TraceCallFixup) {
1431 tty->print("already patched callsite at " INTPTR_FORMAT " to compiled code for", caller_pc);
1432 moop->print_short_name(tty);
1433 tty->print_cr(" to " INTPTR_FORMAT, entry_point);
1434 }
1435 }
1436 }
1437 }
1438
1439 IRT_END
1440
1441
1442 // same as JVM_Arraycopy, but called directly from compiled code
1443 JRT_ENTRY(void, SharedRuntime::slow_arraycopy_C(oopDesc* src, jint src_pos,
1444 oopDesc* dest, jint dest_pos,
1445 jint length,
1446 JavaThread* thread)) {
1447 #ifndef PRODUCT
1448 _slow_array_copy_ctr++;
1449 #endif
1450 // Check if we have null pointers
1451 if (src == NULL || dest == NULL) {
1452 THROW(vmSymbols::java_lang_NullPointerException());
1453 }
1454 // Do the copy. The casts to arrayOop are necessary to the copy_array API,
1455 // even though the copy_array API also performs dynamic checks to ensure
1456 // that src and dest are truly arrays (and are conformable).
1457 // The copy_array mechanism is awkward and could be removed, but
1458 // the compilers don't call this function except as a last resort,
1459 // so it probably doesn't matter.
1460 Klass::cast(src->klass())->copy_array((arrayOopDesc*)src, src_pos,
1461 (arrayOopDesc*)dest, dest_pos,
1462 length, thread);
1463 }
1464 JRT_END
1465
1466 char* SharedRuntime::generate_class_cast_message(
1467 JavaThread* thread, const char* objName) {
1468
1469 // Get target class name from the checkcast instruction
1470 vframeStream vfst(thread, true);
1471 assert(!vfst.at_end(), "Java frame must exist");
1472 Bytecode_checkcast* cc = Bytecode_checkcast_at(
1473 vfst.method()->bcp_from(vfst.bci()));
1474 Klass* targetKlass = Klass::cast(vfst.method()->constants()->klass_at(
1475 cc->index(), thread));
1476 return generate_class_cast_message(objName, targetKlass->external_name());
1477 }
1478
1479 char* SharedRuntime::generate_class_cast_message(
1480 const char* objName, const char* targetKlassName) {
1481 const char* desc = " cannot be cast to ";
1482 size_t msglen = strlen(objName) + strlen(desc) + strlen(targetKlassName) + 1;
1483
1484 char* message = NEW_C_HEAP_ARRAY(char, msglen);
1485 if (NULL == message) {
1486 // out of memory - can't use a detailed message. Since caller is
1487 // using a resource mark to free memory, returning this should be
1488 // safe (caller won't explicitly delete it).
1489 message = const_cast<char*>(objName);
1490 } else {
1491 jio_snprintf(message, msglen, "%s%s%s", objName, desc, targetKlassName);
1492 }
1493 return message;
1494 }
1495
1496 JRT_LEAF(void, SharedRuntime::reguard_yellow_pages())
1497 (void) JavaThread::current()->reguard_stack();
1498 JRT_END
1499
1500
1501 // Handles the uncommon case in locking, i.e., contention or an inflated lock.
1502 #ifndef PRODUCT
1503 int SharedRuntime::_monitor_enter_ctr=0;
1504 #endif
1505 JRT_ENTRY_NO_ASYNC(void, SharedRuntime::complete_monitor_locking_C(oopDesc* _obj, BasicLock* lock, JavaThread* thread))
1506 oop obj(_obj);
1507 #ifndef PRODUCT
1508 _monitor_enter_ctr++; // monitor enter slow
1509 #endif
1510 if (PrintBiasedLockingStatistics) {
1511 Atomic::inc(BiasedLocking::slow_path_entry_count_addr());
1512 }
1513 Handle h_obj(THREAD, obj);
1514 if (UseBiasedLocking) {
1515 // Retry fast entry if bias is revoked to avoid unnecessary inflation
1516 ObjectSynchronizer::fast_enter(h_obj, lock, true, CHECK);
1517 } else {
1518 ObjectSynchronizer::slow_enter(h_obj, lock, CHECK);
1519 }
1520 assert(!HAS_PENDING_EXCEPTION, "Should have no exception here");
1521 JRT_END
1522
1523 #ifndef PRODUCT
1524 int SharedRuntime::_monitor_exit_ctr=0;
1525 #endif
1526 // Handles the uncommon cases of monitor unlocking in compiled code
1527 JRT_LEAF(void, SharedRuntime::complete_monitor_unlocking_C(oopDesc* _obj, BasicLock* lock))
1528 oop obj(_obj);
1529 #ifndef PRODUCT
1530 _monitor_exit_ctr++; // monitor exit slow
1531 #endif
1532 Thread* THREAD = JavaThread::current();
1533 // I'm not convinced we need the code contained by MIGHT_HAVE_PENDING anymore
1534 // testing was unable to ever fire the assert that guarded it so I have removed it.
1535 assert(!HAS_PENDING_EXCEPTION, "Do we need code below anymore?");
1536 #undef MIGHT_HAVE_PENDING
1537 #ifdef MIGHT_HAVE_PENDING
1538 // Save and restore any pending_exception around the exception mark.
1539 // While the slow_exit must not throw an exception, we could come into
1540 // this routine with one set.
1541 oop pending_excep = NULL;
1542 const char* pending_file;
1543 int pending_line;
1544 if (HAS_PENDING_EXCEPTION) {
1545 pending_excep = PENDING_EXCEPTION;
1546 pending_file = THREAD->exception_file();
1547 pending_line = THREAD->exception_line();
1548 CLEAR_PENDING_EXCEPTION;
1549 }
1550 #endif /* MIGHT_HAVE_PENDING */
1551
1552 {
1553 // Exit must be non-blocking, and therefore no exceptions can be thrown.
1554 EXCEPTION_MARK;
1555 ObjectSynchronizer::slow_exit(obj, lock, THREAD);
1556 }
1557
1558 #ifdef MIGHT_HAVE_PENDING
1559 if (pending_excep != NULL) {
1560 THREAD->set_pending_exception(pending_excep, pending_file, pending_line);
1561 }
1562 #endif /* MIGHT_HAVE_PENDING */
1563 JRT_END
1564
1565 #ifndef PRODUCT
1566
1567 void SharedRuntime::print_statistics() {
1568 ttyLocker ttyl;
1569 if (xtty != NULL) xtty->head("statistics type='SharedRuntime'");
1570
1571 if (_monitor_enter_ctr ) tty->print_cr("%5d monitor enter slow", _monitor_enter_ctr);
1572 if (_monitor_exit_ctr ) tty->print_cr("%5d monitor exit slow", _monitor_exit_ctr);
1573 if (_throw_null_ctr) tty->print_cr("%5d implicit null throw", _throw_null_ctr);
1574
1575 SharedRuntime::print_ic_miss_histogram();
1576
1577 if (CountRemovableExceptions) {
1578 if (_nof_removable_exceptions > 0) {
1579 Unimplemented(); // this counter is not yet incremented
1580 tty->print_cr("Removable exceptions: %d", _nof_removable_exceptions);
1581 }
1582 }
1583
1584 // Dump the JRT_ENTRY counters
1585 if( _new_instance_ctr ) tty->print_cr("%5d new instance requires GC", _new_instance_ctr);
1586 if( _new_array_ctr ) tty->print_cr("%5d new array requires GC", _new_array_ctr);
1587 if( _multi1_ctr ) tty->print_cr("%5d multianewarray 1 dim", _multi1_ctr);
1588 if( _multi2_ctr ) tty->print_cr("%5d multianewarray 2 dim", _multi2_ctr);
1589 if( _multi3_ctr ) tty->print_cr("%5d multianewarray 3 dim", _multi3_ctr);
1590 if( _multi4_ctr ) tty->print_cr("%5d multianewarray 4 dim", _multi4_ctr);
1591 if( _multi5_ctr ) tty->print_cr("%5d multianewarray 5 dim", _multi5_ctr);
1592
1593 tty->print_cr("%5d inline cache miss in compiled", _ic_miss_ctr );
1594 tty->print_cr("%5d wrong method", _wrong_method_ctr );
1595 tty->print_cr("%5d unresolved static call site", _resolve_static_ctr );
1596 tty->print_cr("%5d unresolved virtual call site", _resolve_virtual_ctr );
1597 tty->print_cr("%5d unresolved opt virtual call site", _resolve_opt_virtual_ctr );
1598
1599 if( _mon_enter_stub_ctr ) tty->print_cr("%5d monitor enter stub", _mon_enter_stub_ctr );
1600 if( _mon_exit_stub_ctr ) tty->print_cr("%5d monitor exit stub", _mon_exit_stub_ctr );
1601 if( _mon_enter_ctr ) tty->print_cr("%5d monitor enter slow", _mon_enter_ctr );
1602 if( _mon_exit_ctr ) tty->print_cr("%5d monitor exit slow", _mon_exit_ctr );
1603 if( _partial_subtype_ctr) tty->print_cr("%5d slow partial subtype", _partial_subtype_ctr );
1604 if( _jbyte_array_copy_ctr ) tty->print_cr("%5d byte array copies", _jbyte_array_copy_ctr );
1605 if( _jshort_array_copy_ctr ) tty->print_cr("%5d short array copies", _jshort_array_copy_ctr );
1606 if( _jint_array_copy_ctr ) tty->print_cr("%5d int array copies", _jint_array_copy_ctr );
1607 if( _jlong_array_copy_ctr ) tty->print_cr("%5d long array copies", _jlong_array_copy_ctr );
1608 if( _oop_array_copy_ctr ) tty->print_cr("%5d oop array copies", _oop_array_copy_ctr );
1609 if( _checkcast_array_copy_ctr ) tty->print_cr("%5d checkcast array copies", _checkcast_array_copy_ctr );
1610 if( _unsafe_array_copy_ctr ) tty->print_cr("%5d unsafe array copies", _unsafe_array_copy_ctr );
1611 if( _generic_array_copy_ctr ) tty->print_cr("%5d generic array copies", _generic_array_copy_ctr );
1612 if( _slow_array_copy_ctr ) tty->print_cr("%5d slow array copies", _slow_array_copy_ctr );
1613 if( _find_handler_ctr ) tty->print_cr("%5d find exception handler", _find_handler_ctr );
1614 if( _rethrow_ctr ) tty->print_cr("%5d rethrow handler", _rethrow_ctr );
1615
1616 if (xtty != NULL) xtty->tail("statistics");
1617 }
1618
1619 inline double percent(int x, int y) {
1620 return 100.0 * x / MAX2(y, 1);
1621 }
1622
1623 class MethodArityHistogram {
1624 public:
1625 enum { MAX_ARITY = 256 };
1626 private:
1627 static int _arity_histogram[MAX_ARITY]; // histogram of #args
1628 static int _size_histogram[MAX_ARITY]; // histogram of arg size in words
1629 static int _max_arity; // max. arity seen
1630 static int _max_size; // max. arg size seen
1631
1632 static void add_method_to_histogram(nmethod* nm) {
1633 methodOop m = nm->method();
1634 ArgumentCount args(m->signature());
1635 int arity = args.size() + (m->is_static() ? 0 : 1);
1636 int argsize = m->size_of_parameters();
1637 arity = MIN2(arity, MAX_ARITY-1);
1638 argsize = MIN2(argsize, MAX_ARITY-1);
1639 int count = nm->method()->compiled_invocation_count();
1640 _arity_histogram[arity] += count;
1641 _size_histogram[argsize] += count;
1642 _max_arity = MAX2(_max_arity, arity);
1643 _max_size = MAX2(_max_size, argsize);
1644 }
1645
1646 void print_histogram_helper(int n, int* histo, const char* name) {
1647 const int N = MIN2(5, n);
1648 tty->print_cr("\nHistogram of call arity (incl. rcvr, calls to compiled methods only):");
1649 double sum = 0;
1650 double weighted_sum = 0;
1651 int i;
1652 for (i = 0; i <= n; i++) { sum += histo[i]; weighted_sum += i*histo[i]; }
1653 double rest = sum;
1654 double percent = sum / 100;
1655 for (i = 0; i <= N; i++) {
1656 rest -= histo[i];
1657 tty->print_cr("%4d: %7d (%5.1f%%)", i, histo[i], histo[i] / percent);
1658 }
1659 tty->print_cr("rest: %7d (%5.1f%%))", (int)rest, rest / percent);
1660 tty->print_cr("(avg. %s = %3.1f, max = %d)", name, weighted_sum / sum, n);
1661 }
1662
1663 void print_histogram() {
1664 tty->print_cr("\nHistogram of call arity (incl. rcvr, calls to compiled methods only):");
1665 print_histogram_helper(_max_arity, _arity_histogram, "arity");
1666 tty->print_cr("\nSame for parameter size (in words):");
1667 print_histogram_helper(_max_size, _size_histogram, "size");
1668 tty->cr();
1669 }
1670
1671 public:
1672 MethodArityHistogram() {
1673 MutexLockerEx mu(CodeCache_lock, Mutex::_no_safepoint_check_flag);
1674 _max_arity = _max_size = 0;
1675 for (int i = 0; i < MAX_ARITY; i++) _arity_histogram[i] = _size_histogram [i] = 0;
1676 CodeCache::nmethods_do(add_method_to_histogram);
1677 print_histogram();
1678 }
1679 };
1680
1681 int MethodArityHistogram::_arity_histogram[MethodArityHistogram::MAX_ARITY];
1682 int MethodArityHistogram::_size_histogram[MethodArityHistogram::MAX_ARITY];
1683 int MethodArityHistogram::_max_arity;
1684 int MethodArityHistogram::_max_size;
1685
1686 void SharedRuntime::print_call_statistics(int comp_total) {
1687 tty->print_cr("Calls from compiled code:");
1688 int total = _nof_normal_calls + _nof_interface_calls + _nof_static_calls;
1689 int mono_c = _nof_normal_calls - _nof_optimized_calls - _nof_megamorphic_calls;
1690 int mono_i = _nof_interface_calls - _nof_optimized_interface_calls - _nof_megamorphic_interface_calls;
1691 tty->print_cr("\t%9d (%4.1f%%) total non-inlined ", total, percent(total, total));
1692 tty->print_cr("\t%9d (%4.1f%%) virtual calls ", _nof_normal_calls, percent(_nof_normal_calls, total));
1693 tty->print_cr("\t %9d (%3.0f%%) inlined ", _nof_inlined_calls, percent(_nof_inlined_calls, _nof_normal_calls));
1694 tty->print_cr("\t %9d (%3.0f%%) optimized ", _nof_optimized_calls, percent(_nof_optimized_calls, _nof_normal_calls));
1695 tty->print_cr("\t %9d (%3.0f%%) monomorphic ", mono_c, percent(mono_c, _nof_normal_calls));
1696 tty->print_cr("\t %9d (%3.0f%%) megamorphic ", _nof_megamorphic_calls, percent(_nof_megamorphic_calls, _nof_normal_calls));
1697 tty->print_cr("\t%9d (%4.1f%%) interface calls ", _nof_interface_calls, percent(_nof_interface_calls, total));
1698 tty->print_cr("\t %9d (%3.0f%%) inlined ", _nof_inlined_interface_calls, percent(_nof_inlined_interface_calls, _nof_interface_calls));
1699 tty->print_cr("\t %9d (%3.0f%%) optimized ", _nof_optimized_interface_calls, percent(_nof_optimized_interface_calls, _nof_interface_calls));
1700 tty->print_cr("\t %9d (%3.0f%%) monomorphic ", mono_i, percent(mono_i, _nof_interface_calls));
1701 tty->print_cr("\t %9d (%3.0f%%) megamorphic ", _nof_megamorphic_interface_calls, percent(_nof_megamorphic_interface_calls, _nof_interface_calls));
1702 tty->print_cr("\t%9d (%4.1f%%) static/special calls", _nof_static_calls, percent(_nof_static_calls, total));
1703 tty->print_cr("\t %9d (%3.0f%%) inlined ", _nof_inlined_static_calls, percent(_nof_inlined_static_calls, _nof_static_calls));
1704 tty->cr();
1705 tty->print_cr("Note 1: counter updates are not MT-safe.");
1706 tty->print_cr("Note 2: %% in major categories are relative to total non-inlined calls;");
1707 tty->print_cr(" %% in nested categories are relative to their category");
1708 tty->print_cr(" (and thus add up to more than 100%% with inlining)");
1709 tty->cr();
1710
1711 MethodArityHistogram h;
1712 }
1713 #endif
1714
1715
1716 // ---------------------------------------------------------------------------
1717 // Implementation of AdapterHandlerLibrary
1718 const char* AdapterHandlerEntry::name = "I2C/C2I adapters";
1719 GrowableArray<uint64_t>* AdapterHandlerLibrary::_fingerprints = NULL;
1720 GrowableArray<AdapterHandlerEntry* >* AdapterHandlerLibrary::_handlers = NULL;
1721 const int AdapterHandlerLibrary_size = 16*K;
1722 u_char AdapterHandlerLibrary::_buffer[AdapterHandlerLibrary_size + 32];
1723
1724 void AdapterHandlerLibrary::initialize() {
1725 if (_fingerprints != NULL) return;
1726 _fingerprints = new(ResourceObj::C_HEAP)GrowableArray<uint64_t>(32, true);
1727 _handlers = new(ResourceObj::C_HEAP)GrowableArray<AdapterHandlerEntry*>(32, true);
1728 // Index 0 reserved for the slow path handler
1729 _fingerprints->append(0/*the never-allowed 0 fingerprint*/);
1730 _handlers->append(NULL);
1731
1732 // Create a special handler for abstract methods. Abstract methods
1733 // are never compiled so an i2c entry is somewhat meaningless, but
1734 // fill it in with something appropriate just in case. Pass handle
1735 // wrong method for the c2i transitions.
1736 address wrong_method = SharedRuntime::get_handle_wrong_method_stub();
1737 _fingerprints->append(0/*the never-allowed 0 fingerprint*/);
1738 assert(_handlers->length() == AbstractMethodHandler, "in wrong slot");
1739 _handlers->append(new AdapterHandlerEntry(StubRoutines::throw_AbstractMethodError_entry(),
1740 wrong_method, wrong_method));
1741 }
1742
1743 int AdapterHandlerLibrary::get_create_adapter_index(methodHandle method) {
1744 // Use customized signature handler. Need to lock around updates to the
1745 // _fingerprints array (it is not safe for concurrent readers and a single
1746 // writer: this can be fixed if it becomes a problem).
1747
1748 // Shouldn't be here if running -Xint
1749 if (Arguments::mode() == Arguments::_int) {
1750 ShouldNotReachHere();
1751 }
1752
1753 // Get the address of the ic_miss handlers before we grab the
1754 // AdapterHandlerLibrary_lock. This fixes bug 6236259 which
1755 // was caused by the initialization of the stubs happening
1756 // while we held the lock and then notifying jvmti while
1757 // holding it. This just forces the initialization to be a little
1758 // earlier.
1759 address ic_miss = SharedRuntime::get_ic_miss_stub();
1760 assert(ic_miss != NULL, "must have handler");
1761
1762 int result;
1763 BufferBlob *B = NULL;
1764 uint64_t fingerprint;
1765 {
1766 MutexLocker mu(AdapterHandlerLibrary_lock);
1767 // make sure data structure is initialized
1768 initialize();
1769
1770 if (method->is_abstract()) {
1771 return AbstractMethodHandler;
1772 }
1773
1774 // Lookup method signature's fingerprint
1775 fingerprint = Fingerprinter(method).fingerprint();
1776 assert( fingerprint != CONST64( 0), "no zero fingerprints allowed" );
1777 // Fingerprints are small fixed-size condensed representations of
1778 // signatures. If the signature is too large, it won't fit in a
1779 // fingerprint. Signatures which cannot support a fingerprint get a new i2c
1780 // adapter gen'd each time, instead of searching the cache for one. This -1
1781 // game can be avoided if I compared signatures instead of using
1782 // fingerprints. However, -1 fingerprints are very rare.
1783 if( fingerprint != UCONST64(-1) ) { // If this is a cache-able fingerprint
1784 // Turns out i2c adapters do not care what the return value is. Mask it
1785 // out so signatures that only differ in return type will share the same
1786 // adapter.
1787 fingerprint &= ~(SignatureIterator::result_feature_mask << SignatureIterator::static_feature_size);
1788 // Search for a prior existing i2c/c2i adapter
1789 int index = _fingerprints->find(fingerprint);
1790 if( index >= 0 ) return index; // Found existing handlers?
1791 } else {
1792 // Annoyingly, I end up adding -1 fingerprints to the array of handlers,
1793 // because I need a unique handler index. It cannot be scanned for
1794 // because all -1's look alike. Instead, the matching index is passed out
1795 // and immediately used to collect the 2 return values (the c2i and i2c
1796 // adapters).
1797 }
1798
1799 // Create I2C & C2I handlers
1800 ResourceMark rm;
1801 // Improve alignment slightly
1802 u_char *buf = (u_char*)(((intptr_t)_buffer + CodeEntryAlignment-1) & ~(CodeEntryAlignment-1));
1803 CodeBuffer buffer(buf, AdapterHandlerLibrary_size);
1804 short buffer_locs[20];
1805 buffer.insts()->initialize_shared_locs((relocInfo*)buffer_locs,
1806 sizeof(buffer_locs)/sizeof(relocInfo));
1807 MacroAssembler _masm(&buffer);
1808
1809 // Fill in the signature array, for the calling-convention call.
1810 int total_args_passed = method->size_of_parameters(); // All args on stack
1811
1812 BasicType* sig_bt = NEW_RESOURCE_ARRAY(BasicType,total_args_passed);
1813 VMRegPair * regs = NEW_RESOURCE_ARRAY(VMRegPair ,total_args_passed);
1814 int i=0;
1815 if( !method->is_static() ) // Pass in receiver first
1816 sig_bt[i++] = T_OBJECT;
1817 for( SignatureStream ss(method->signature()); !ss.at_return_type(); ss.next()) {
1818 sig_bt[i++] = ss.type(); // Collect remaining bits of signature
1819 if( ss.type() == T_LONG || ss.type() == T_DOUBLE )
1820 sig_bt[i++] = T_VOID; // Longs & doubles take 2 Java slots
1821 }
1822 assert( i==total_args_passed, "" );
1823
1824 // Now get the re-packed compiled-Java layout.
1825 int comp_args_on_stack;
1826
1827 // Get a description of the compiled java calling convention and the largest used (VMReg) stack slot usage
1828 comp_args_on_stack = SharedRuntime::java_calling_convention(sig_bt, regs, total_args_passed, false);
1829
1830 AdapterHandlerEntry* entry = SharedRuntime::generate_i2c2i_adapters(&_masm,
1831 total_args_passed,
1832 comp_args_on_stack,
1833 sig_bt,
1834 regs);
1835
1836 B = BufferBlob::create(AdapterHandlerEntry::name, &buffer);
1837 if (B == NULL) return -2; // Out of CodeCache space
1838 entry->relocate(B->instructions_begin());
1839 #ifndef PRODUCT
1840 // debugging suppport
1841 if (PrintAdapterHandlers) {
1842 tty->cr();
1843 tty->print_cr("i2c argument handler #%d for: %s %s (fingerprint = 0x%llx, %d bytes generated)",
1844 _handlers->length(), (method->is_static() ? "static" : "receiver"),
1845 method->signature()->as_C_string(), fingerprint, buffer.code_size() );
1846 tty->print_cr("c2i argument handler starts at %p",entry->get_c2i_entry());
1847 Disassembler::decode(entry->get_i2c_entry(), entry->get_i2c_entry() + buffer.code_size());
1848 }
1849 #endif
1850
1851 // add handlers to library
1852 _fingerprints->append(fingerprint);
1853 _handlers->append(entry);
1854 // set handler index
1855 assert(_fingerprints->length() == _handlers->length(), "sanity check");
1856 result = _fingerprints->length() - 1;
1857 }
1858 // Outside of the lock
1859 if (B != NULL) {
1860 char blob_id[256];
1861 jio_snprintf(blob_id,
1862 sizeof(blob_id),
1863 "%s(" PTR64_FORMAT ")@" PTR_FORMAT,
1864 AdapterHandlerEntry::name,
1865 fingerprint,
1866 B->instructions_begin());
1867 VTune::register_stub(blob_id, B->instructions_begin(), B->instructions_end());
1868 Forte::register_stub(blob_id, B->instructions_begin(), B->instructions_end());
1869
1870 if (JvmtiExport::should_post_dynamic_code_generated()) {
1871 JvmtiExport::post_dynamic_code_generated(blob_id,
1872 B->instructions_begin(),
1873 B->instructions_end());
1874 }
1875 }
1876 return result;
1877 }
1878
1879 void AdapterHandlerEntry::relocate(address new_base) {
1880 ptrdiff_t delta = new_base - _i2c_entry;
1881 _i2c_entry += delta;
1882 _c2i_entry += delta;
1883 _c2i_unverified_entry += delta;
1884 }
1885
1886 // Create a native wrapper for this native method. The wrapper converts the
1887 // java compiled calling convention to the native convention, handlizes
1888 // arguments, and transitions to native. On return from the native we transition
1889 // back to java blocking if a safepoint is in progress.
1890 nmethod *AdapterHandlerLibrary::create_native_wrapper(methodHandle method) {
1891 ResourceMark rm;
1892 nmethod* nm = NULL;
1893
1894 if (PrintCompilation) {
1895 ttyLocker ttyl;
1896 tty->print("--- n%s ", (method->is_synchronized() ? "s" : " "));
1897 method->print_short_name(tty);
1898 if (method->is_static()) {
1899 tty->print(" (static)");
1900 }
1901 tty->cr();
1902 }
1903
1904 assert(method->has_native_function(), "must have something valid to call!");
1905
1906 {
1907 // perform the work while holding the lock, but perform any printing outside the lock
1908 MutexLocker mu(AdapterHandlerLibrary_lock);
1909 // See if somebody beat us to it
1910 nm = method->code();
1911 if (nm) {
1912 return nm;
1913 }
1914
1915 // Improve alignment slightly
1916 u_char* buf = (u_char*)(((intptr_t)_buffer + CodeEntryAlignment-1) & ~(CodeEntryAlignment-1));
1917 CodeBuffer buffer(buf, AdapterHandlerLibrary_size);
1918 // Need a few relocation entries
1919 double locs_buf[20];
1920 buffer.insts()->initialize_shared_locs((relocInfo*)locs_buf, sizeof(locs_buf) / sizeof(relocInfo));
1921 MacroAssembler _masm(&buffer);
1922
1923 // Fill in the signature array, for the calling-convention call.
1924 int total_args_passed = method->size_of_parameters();
1925
1926 BasicType* sig_bt = NEW_RESOURCE_ARRAY(BasicType,total_args_passed);
1927 VMRegPair * regs = NEW_RESOURCE_ARRAY(VMRegPair ,total_args_passed);
1928 int i=0;
1929 if( !method->is_static() ) // Pass in receiver first
1930 sig_bt[i++] = T_OBJECT;
1931 SignatureStream ss(method->signature());
1932 for( ; !ss.at_return_type(); ss.next()) {
1933 sig_bt[i++] = ss.type(); // Collect remaining bits of signature
1934 if( ss.type() == T_LONG || ss.type() == T_DOUBLE )
1935 sig_bt[i++] = T_VOID; // Longs & doubles take 2 Java slots
1936 }
1937 assert( i==total_args_passed, "" );
1938 BasicType ret_type = ss.type();
1939
1940 // Now get the compiled-Java layout as input arguments
1941 int comp_args_on_stack;
1942 comp_args_on_stack = SharedRuntime::java_calling_convention(sig_bt, regs, total_args_passed, false);
1943
1944 // Generate the compiled-to-native wrapper code
1945 nm = SharedRuntime::generate_native_wrapper(&_masm,
1946 method,
1947 total_args_passed,
1948 comp_args_on_stack,
1949 sig_bt,regs,
1950 ret_type);
1951 }
1952
1953 // Must unlock before calling set_code
1954 // Install the generated code.
1955 if (nm != NULL) {
1956 method->set_code(method, nm);
1957 nm->post_compiled_method_load_event();
1958 } else {
1959 // CodeCache is full, disable compilation
1960 // Ought to log this but compile log is only per compile thread
1961 // and we're some non descript Java thread.
1962 UseInterpreter = true;
1963 if (UseCompiler || AlwaysCompileLoopMethods ) {
1964 #ifndef PRODUCT
1965 warning("CodeCache is full. Compiler has been disabled");
1966 if (CompileTheWorld || ExitOnFullCodeCache) {
1967 before_exit(JavaThread::current());
1968 exit_globals(); // will delete tty
1969 vm_direct_exit(CompileTheWorld ? 0 : 1);
1970 }
1971 #endif
1972 UseCompiler = false;
1973 AlwaysCompileLoopMethods = false;
1974 }
1975 }
1976 return nm;
1977 }
1978
1979 // -------------------------------------------------------------------------
1980 // Java-Java calling convention
1981 // (what you use when Java calls Java)
1982
1983 //------------------------------name_for_receiver----------------------------------
1984 // For a given signature, return the VMReg for parameter 0.
1985 VMReg SharedRuntime::name_for_receiver() {
1986 VMRegPair regs;
1987 BasicType sig_bt = T_OBJECT;
1988 (void) java_calling_convention(&sig_bt, &regs, 1, true);
1989 // Return argument 0 register. In the LP64 build pointers
1990 // take 2 registers, but the VM wants only the 'main' name.
1991 return regs.first();
1992 }
1993
1994 VMRegPair *SharedRuntime::find_callee_arguments(symbolOop sig, bool is_static, int* arg_size) {
1995 // This method is returning a data structure allocating as a
1996 // ResourceObject, so do not put any ResourceMarks in here.
1997 char *s = sig->as_C_string();
1998 int len = (int)strlen(s);
1999 *s++; len--; // Skip opening paren
2000 char *t = s+len;
2001 while( *(--t) != ')' ) ; // Find close paren
2002
2003 BasicType *sig_bt = NEW_RESOURCE_ARRAY( BasicType, 256 );
2004 VMRegPair *regs = NEW_RESOURCE_ARRAY( VMRegPair, 256 );
2005 int cnt = 0;
2006 if (!is_static) {
2007 sig_bt[cnt++] = T_OBJECT; // Receiver is argument 0; not in signature
2008 }
2009
2010 while( s < t ) {
2011 switch( *s++ ) { // Switch on signature character
2012 case 'B': sig_bt[cnt++] = T_BYTE; break;
2013 case 'C': sig_bt[cnt++] = T_CHAR; break;
2014 case 'D': sig_bt[cnt++] = T_DOUBLE; sig_bt[cnt++] = T_VOID; break;
2015 case 'F': sig_bt[cnt++] = T_FLOAT; break;
2016 case 'I': sig_bt[cnt++] = T_INT; break;
2017 case 'J': sig_bt[cnt++] = T_LONG; sig_bt[cnt++] = T_VOID; break;
2018 case 'S': sig_bt[cnt++] = T_SHORT; break;
2019 case 'Z': sig_bt[cnt++] = T_BOOLEAN; break;
2020 case 'V': sig_bt[cnt++] = T_VOID; break;
2021 case 'L': // Oop
2022 while( *s++ != ';' ) ; // Skip signature
2023 sig_bt[cnt++] = T_OBJECT;
2024 break;
2025 case '[': { // Array
2026 do { // Skip optional size
2027 while( *s >= '0' && *s <= '9' ) s++;
2028 } while( *s++ == '[' ); // Nested arrays?
2029 // Skip element type
2030 if( s[-1] == 'L' )
2031 while( *s++ != ';' ) ; // Skip signature
2032 sig_bt[cnt++] = T_ARRAY;
2033 break;
2034 }
2035 default : ShouldNotReachHere();
2036 }
2037 }
2038 assert( cnt < 256, "grow table size" );
2039
2040 int comp_args_on_stack;
2041 comp_args_on_stack = java_calling_convention(sig_bt, regs, cnt, true);
2042
2043 // the calling convention doesn't count out_preserve_stack_slots so
2044 // we must add that in to get "true" stack offsets.
2045
2046 if (comp_args_on_stack) {
2047 for (int i = 0; i < cnt; i++) {
2048 VMReg reg1 = regs[i].first();
2049 if( reg1->is_stack()) {
2050 // Yuck
2051 reg1 = reg1->bias(out_preserve_stack_slots());
2052 }
2053 VMReg reg2 = regs[i].second();
2054 if( reg2->is_stack()) {
2055 // Yuck
2056 reg2 = reg2->bias(out_preserve_stack_slots());
2057 }
2058 regs[i].set_pair(reg2, reg1);
2059 }
2060 }
2061
2062 // results
2063 *arg_size = cnt;
2064 return regs;
2065 }
2066
2067 // OSR Migration Code
2068 //
2069 // This code is used convert interpreter frames into compiled frames. It is
2070 // called from very start of a compiled OSR nmethod. A temp array is
2071 // allocated to hold the interesting bits of the interpreter frame. All
2072 // active locks are inflated to allow them to move. The displaced headers and
2073 // active interpeter locals are copied into the temp buffer. Then we return
2074 // back to the compiled code. The compiled code then pops the current
2075 // interpreter frame off the stack and pushes a new compiled frame. Then it
2076 // copies the interpreter locals and displaced headers where it wants.
2077 // Finally it calls back to free the temp buffer.
2078 //
2079 // All of this is done NOT at any Safepoint, nor is any safepoint or GC allowed.
2080
2081 JRT_LEAF(intptr_t*, SharedRuntime::OSR_migration_begin( JavaThread *thread) )
2082
2083 #ifdef IA64
2084 ShouldNotReachHere(); // NYI
2085 #endif /* IA64 */
2086
2087 //
2088 // This code is dependent on the memory layout of the interpreter local
2089 // array and the monitors. On all of our platforms the layout is identical
2090 // so this code is shared. If some platform lays the their arrays out
2091 // differently then this code could move to platform specific code or
2092 // the code here could be modified to copy items one at a time using
2093 // frame accessor methods and be platform independent.
2094
2095 frame fr = thread->last_frame();
2096 assert( fr.is_interpreted_frame(), "" );
2097 assert( fr.interpreter_frame_expression_stack_size()==0, "only handle empty stacks" );
2098
2099 // Figure out how many monitors are active.
2100 int active_monitor_count = 0;
2101 for( BasicObjectLock *kptr = fr.interpreter_frame_monitor_end();
2102 kptr < fr.interpreter_frame_monitor_begin();
2103 kptr = fr.next_monitor_in_interpreter_frame(kptr) ) {
2104 if( kptr->obj() != NULL ) active_monitor_count++;
2105 }
2106
2107 // QQQ we could place number of active monitors in the array so that compiled code
2108 // could double check it.
2109
2110 methodOop moop = fr.interpreter_frame_method();
2111 int max_locals = moop->max_locals();
2112 // Allocate temp buffer, 1 word per local & 2 per active monitor
2113 int buf_size_words = max_locals + active_monitor_count*2;
2114 intptr_t *buf = NEW_C_HEAP_ARRAY(intptr_t,buf_size_words);
2115
2116 // Copy the locals. Order is preserved so that loading of longs works.
2117 // Since there's no GC I can copy the oops blindly.
2118 assert( sizeof(HeapWord)==sizeof(intptr_t), "fix this code");
2119 if (TaggedStackInterpreter) {
2120 for (int i = 0; i < max_locals; i++) {
2121 // copy only each local separately to the buffer avoiding the tag
2122 buf[i] = *fr.interpreter_frame_local_at(max_locals-i-1);
2123 }
2124 } else {
2125 Copy::disjoint_words(
2126 (HeapWord*)fr.interpreter_frame_local_at(max_locals-1),
2127 (HeapWord*)&buf[0],
2128 max_locals);
2129 }
2130
2131 // Inflate locks. Copy the displaced headers. Be careful, there can be holes.
2132 int i = max_locals;
2133 for( BasicObjectLock *kptr2 = fr.interpreter_frame_monitor_end();
2134 kptr2 < fr.interpreter_frame_monitor_begin();
2135 kptr2 = fr.next_monitor_in_interpreter_frame(kptr2) ) {
2136 if( kptr2->obj() != NULL) { // Avoid 'holes' in the monitor array
2137 BasicLock *lock = kptr2->lock();
2138 // Inflate so the displaced header becomes position-independent
2139 if (lock->displaced_header()->is_unlocked())
2140 ObjectSynchronizer::inflate_helper(kptr2->obj());
2141 // Now the displaced header is free to move
2142 buf[i++] = (intptr_t)lock->displaced_header();
2143 buf[i++] = (intptr_t)kptr2->obj();
2144 }
2145 }
2146 assert( i - max_locals == active_monitor_count*2, "found the expected number of monitors" );
2147
2148 return buf;
2149 JRT_END
2150
2151 JRT_LEAF(void, SharedRuntime::OSR_migration_end( intptr_t* buf) )
2152 FREE_C_HEAP_ARRAY(intptr_t,buf);
2153 JRT_END
2154
2155 #ifndef PRODUCT
2156 bool AdapterHandlerLibrary::contains(CodeBlob* b) {
2157
2158 for (int i = 0 ; i < _handlers->length() ; i++) {
2159 AdapterHandlerEntry* a = get_entry(i);
2160 if ( a != NULL && b == CodeCache::find_blob(a->get_i2c_entry()) ) return true;
2161 }
2162 return false;
2163 }
2164
2165 void AdapterHandlerLibrary::print_handler(CodeBlob* b) {
2166
2167 for (int i = 0 ; i < _handlers->length() ; i++) {
2168 AdapterHandlerEntry* a = get_entry(i);
2169 if ( a != NULL && b == CodeCache::find_blob(a->get_i2c_entry()) ) {
2170 tty->print("Adapter for signature: ");
2171 // Fingerprinter::print(_fingerprints->at(i));
2172 tty->print("0x%" FORMAT64_MODIFIER "x", _fingerprints->at(i));
2173 tty->print_cr(" i2c: " INTPTR_FORMAT " c2i: " INTPTR_FORMAT " c2iUV: " INTPTR_FORMAT,
2174 a->get_i2c_entry(), a->get_c2i_entry(), a->get_c2i_unverified_entry());
2175
2176 return;
2177 }
2178 }
2179 assert(false, "Should have found handler");
2180 }
2181 #endif /* PRODUCT */