Mercurial > hg > graal-compiler
comparison src/share/vm/interpreter/bytecodeInterpreter.cpp @ 14489:f460c6926af7
Merge
author | ehelin |
---|---|
date | Tue, 25 Feb 2014 11:02:11 +0100 |
parents | 1e820011ee5f |
children | d3f14809b051 4ca6dc0799b6 ce8f6bb717c9 |
comparison
equal
deleted
inserted
replaced
14488:60fd6d24f49f | 14489:f460c6926af7 |
---|---|
26 #include "classfile/vmSymbols.hpp" | 26 #include "classfile/vmSymbols.hpp" |
27 #include "gc_interface/collectedHeap.hpp" | 27 #include "gc_interface/collectedHeap.hpp" |
28 #include "interpreter/bytecodeHistogram.hpp" | 28 #include "interpreter/bytecodeHistogram.hpp" |
29 #include "interpreter/bytecodeInterpreter.hpp" | 29 #include "interpreter/bytecodeInterpreter.hpp" |
30 #include "interpreter/bytecodeInterpreter.inline.hpp" | 30 #include "interpreter/bytecodeInterpreter.inline.hpp" |
31 #include "interpreter/bytecodeInterpreterProfiling.hpp" | |
31 #include "interpreter/interpreter.hpp" | 32 #include "interpreter/interpreter.hpp" |
32 #include "interpreter/interpreterRuntime.hpp" | 33 #include "interpreter/interpreterRuntime.hpp" |
33 #include "memory/cardTableModRefBS.hpp" | |
34 #include "memory/resourceArea.hpp" | 34 #include "memory/resourceArea.hpp" |
35 #include "oops/methodCounters.hpp" | 35 #include "oops/methodCounters.hpp" |
36 #include "oops/objArrayKlass.hpp" | 36 #include "oops/objArrayKlass.hpp" |
37 #include "oops/oop.inline.hpp" | 37 #include "oops/oop.inline.hpp" |
38 #include "prims/jvmtiExport.hpp" | 38 #include "prims/jvmtiExport.hpp" |
39 #include "prims/jvmtiThreadState.hpp" | |
40 #include "runtime/biasedLocking.hpp" | |
39 #include "runtime/frame.inline.hpp" | 41 #include "runtime/frame.inline.hpp" |
40 #include "runtime/handles.inline.hpp" | 42 #include "runtime/handles.inline.hpp" |
41 #include "runtime/interfaceSupport.hpp" | 43 #include "runtime/interfaceSupport.hpp" |
42 #include "runtime/sharedRuntime.hpp" | 44 #include "runtime/sharedRuntime.hpp" |
43 #include "runtime/threadCritical.hpp" | 45 #include "runtime/threadCritical.hpp" |
64 # include "orderAccess_linux_arm.inline.hpp" | 66 # include "orderAccess_linux_arm.inline.hpp" |
65 #endif | 67 #endif |
66 #ifdef TARGET_OS_ARCH_linux_ppc | 68 #ifdef TARGET_OS_ARCH_linux_ppc |
67 # include "orderAccess_linux_ppc.inline.hpp" | 69 # include "orderAccess_linux_ppc.inline.hpp" |
68 #endif | 70 #endif |
71 #ifdef TARGET_OS_ARCH_aix_ppc | |
72 # include "orderAccess_aix_ppc.inline.hpp" | |
73 #endif | |
69 #ifdef TARGET_OS_ARCH_bsd_x86 | 74 #ifdef TARGET_OS_ARCH_bsd_x86 |
70 # include "orderAccess_bsd_x86.inline.hpp" | 75 # include "orderAccess_bsd_x86.inline.hpp" |
71 #endif | 76 #endif |
72 #ifdef TARGET_OS_ARCH_bsd_zero | 77 #ifdef TARGET_OS_ARCH_bsd_zero |
73 # include "orderAccess_bsd_zero.inline.hpp" | 78 # include "orderAccess_bsd_zero.inline.hpp" |
136 * VM_JAVA_ERROR - Macro for throwing a java exception from | 141 * VM_JAVA_ERROR - Macro for throwing a java exception from |
137 * the interpreter loop. Should really be a CALL_VM but there | 142 * the interpreter loop. Should really be a CALL_VM but there |
138 * is no entry point to do the transition to vm so we just | 143 * is no entry point to do the transition to vm so we just |
139 * do it by hand here. | 144 * do it by hand here. |
140 */ | 145 */ |
141 #define VM_JAVA_ERROR_NO_JUMP(name, msg) \ | 146 #define VM_JAVA_ERROR_NO_JUMP(name, msg, note_a_trap) \ |
142 DECACHE_STATE(); \ | 147 DECACHE_STATE(); \ |
143 SET_LAST_JAVA_FRAME(); \ | 148 SET_LAST_JAVA_FRAME(); \ |
144 { \ | 149 { \ |
150 InterpreterRuntime::note_a_trap(THREAD, istate->method(), BCI()); \ | |
145 ThreadInVMfromJava trans(THREAD); \ | 151 ThreadInVMfromJava trans(THREAD); \ |
146 Exceptions::_throw_msg(THREAD, __FILE__, __LINE__, name, msg); \ | 152 Exceptions::_throw_msg(THREAD, __FILE__, __LINE__, name, msg); \ |
147 } \ | 153 } \ |
148 RESET_LAST_JAVA_FRAME(); \ | 154 RESET_LAST_JAVA_FRAME(); \ |
149 CACHE_STATE(); | 155 CACHE_STATE(); |
150 | 156 |
151 // Normal throw of a java error | 157 // Normal throw of a java error. |
152 #define VM_JAVA_ERROR(name, msg) \ | 158 #define VM_JAVA_ERROR(name, msg, note_a_trap) \ |
153 VM_JAVA_ERROR_NO_JUMP(name, msg) \ | 159 VM_JAVA_ERROR_NO_JUMP(name, msg, note_a_trap) \ |
154 goto handle_exception; | 160 goto handle_exception; |
155 | 161 |
156 #ifdef PRODUCT | 162 #ifdef PRODUCT |
157 #define DO_UPDATE_INSTRUCTION_COUNT(opcode) | 163 #define DO_UPDATE_INSTRUCTION_COUNT(opcode) |
158 #else | 164 #else |
194 RESET_LAST_JAVA_FRAME(); \ | 200 RESET_LAST_JAVA_FRAME(); \ |
195 CACHE_STATE(); \ | 201 CACHE_STATE(); \ |
196 if (THREAD->pop_frame_pending() && \ | 202 if (THREAD->pop_frame_pending() && \ |
197 !THREAD->pop_frame_in_process()) { \ | 203 !THREAD->pop_frame_in_process()) { \ |
198 goto handle_Pop_Frame; \ | 204 goto handle_Pop_Frame; \ |
205 } \ | |
206 if (THREAD->jvmti_thread_state() && \ | |
207 THREAD->jvmti_thread_state()->is_earlyret_pending()) { \ | |
208 goto handle_Early_Return; \ | |
199 } \ | 209 } \ |
200 opcode = *pc; \ | 210 opcode = *pc; \ |
201 } \ | 211 } \ |
202 } \ | 212 } \ |
203 } | 213 } |
330 MethodCounters* mcs; \ | 340 MethodCounters* mcs; \ |
331 GET_METHOD_COUNTERS(mcs); \ | 341 GET_METHOD_COUNTERS(mcs); \ |
332 if (UseLoopCounter) { \ | 342 if (UseLoopCounter) { \ |
333 bool do_OSR = UseOnStackReplacement; \ | 343 bool do_OSR = UseOnStackReplacement; \ |
334 mcs->backedge_counter()->increment(); \ | 344 mcs->backedge_counter()->increment(); \ |
335 if (do_OSR) do_OSR = mcs->backedge_counter()->reached_InvocationLimit(); \ | 345 if (ProfileInterpreter) { \ |
346 BI_PROFILE_GET_OR_CREATE_METHOD_DATA(handle_exception); \ | |
347 /* Check for overflow against MDO count. */ \ | |
348 do_OSR = do_OSR \ | |
349 && (mdo_last_branch_taken_count >= (uint)InvocationCounter::InterpreterBackwardBranchLimit)\ | |
350 /* When ProfileInterpreter is on, the backedge_count comes */ \ | |
351 /* from the methodDataOop, which value does not get reset on */ \ | |
352 /* the call to frequency_counter_overflow(). To avoid */ \ | |
353 /* excessive calls to the overflow routine while the method is */ \ | |
354 /* being compiled, add a second test to make sure the overflow */ \ | |
355 /* function is called only once every overflow_frequency. */ \ | |
356 && (!(mdo_last_branch_taken_count & 1023)); \ | |
357 } else { \ | |
358 /* check for overflow of backedge counter */ \ | |
359 do_OSR = do_OSR \ | |
360 && mcs->invocation_counter()->reached_InvocationLimit(mcs->backedge_counter()); \ | |
361 } \ | |
336 if (do_OSR) { \ | 362 if (do_OSR) { \ |
337 nmethod* osr_nmethod; \ | 363 nmethod* osr_nmethod; \ |
338 OSR_REQUEST(osr_nmethod, branch_pc); \ | 364 OSR_REQUEST(osr_nmethod, branch_pc); \ |
339 if (osr_nmethod != NULL && osr_nmethod->osr_entry_bci() != InvalidOSREntryBci) { \ | 365 if (osr_nmethod != NULL && osr_nmethod->osr_entry_bci() != InvalidOSREntryBci) { \ |
340 intptr_t* buf = SharedRuntime::OSR_migration_begin(THREAD); \ | 366 intptr_t* buf; \ |
367 /* Call OSR migration with last java frame only, no checks. */ \ | |
368 CALL_VM_NAKED_LJF(buf=SharedRuntime::OSR_migration_begin(THREAD)); \ | |
341 istate->set_msg(do_osr); \ | 369 istate->set_msg(do_osr); \ |
342 istate->set_osr_buf((address)buf); \ | 370 istate->set_osr_buf((address)buf); \ |
343 istate->set_osr_entry(osr_nmethod->osr_entry()); \ | 371 istate->set_osr_entry(osr_nmethod->osr_entry()); \ |
344 return; \ | 372 return; \ |
345 } \ | 373 } \ |
346 } \ | 374 } \ |
347 } /* UseCompiler ... */ \ | 375 } /* UseCompiler ... */ \ |
348 mcs->invocation_counter()->increment(); \ | |
349 SAFEPOINT; \ | 376 SAFEPOINT; \ |
350 } | 377 } |
351 | 378 |
352 /* | 379 /* |
353 * For those opcodes that need to have a GC point on a backwards branch | 380 * For those opcodes that need to have a GC point on a backwards branch |
376 #define CACHE_CP() cp = istate->constants(); | 403 #define CACHE_CP() cp = istate->constants(); |
377 #define CACHE_LOCALS() locals = istate->locals(); | 404 #define CACHE_LOCALS() locals = istate->locals(); |
378 #undef CACHE_FRAME | 405 #undef CACHE_FRAME |
379 #define CACHE_FRAME() | 406 #define CACHE_FRAME() |
380 | 407 |
408 // BCI() returns the current bytecode-index. | |
409 #undef BCI | |
410 #define BCI() ((int)(intptr_t)(pc - (intptr_t)istate->method()->code_base())) | |
411 | |
381 /* | 412 /* |
382 * CHECK_NULL - Macro for throwing a NullPointerException if the object | 413 * CHECK_NULL - Macro for throwing a NullPointerException if the object |
383 * passed is a null ref. | 414 * passed is a null ref. |
384 * On some architectures/platforms it should be possible to do this implicitly | 415 * On some architectures/platforms it should be possible to do this implicitly |
385 */ | 416 */ |
386 #undef CHECK_NULL | 417 #undef CHECK_NULL |
387 #define CHECK_NULL(obj_) \ | 418 #define CHECK_NULL(obj_) \ |
388 if ((obj_) == NULL) { \ | 419 if ((obj_) == NULL) { \ |
389 VM_JAVA_ERROR(vmSymbols::java_lang_NullPointerException(), ""); \ | 420 VM_JAVA_ERROR(vmSymbols::java_lang_NullPointerException(), NULL, note_nullCheck_trap); \ |
390 } \ | 421 } \ |
391 VERIFY_OOP(obj_) | 422 VERIFY_OOP(obj_) |
392 | 423 |
393 #define VMdoubleConstZero() 0.0 | 424 #define VMdoubleConstZero() 0.0 |
394 #define VMdoubleConstOne() 1.0 | 425 #define VMdoubleConstOne() 1.0 |
395 #define VMlongConstZero() (max_jlong-max_jlong) | 426 #define VMlongConstZero() (max_jlong-max_jlong) |
396 #define VMlongConstOne() ((max_jlong-max_jlong)+1) | 427 #define VMlongConstOne() ((max_jlong-max_jlong)+1) |
408 CACHE_TOS(); \ | 439 CACHE_TOS(); \ |
409 CACHE_PC(); \ | 440 CACHE_PC(); \ |
410 CACHE_CP(); \ | 441 CACHE_CP(); \ |
411 CACHE_LOCALS(); | 442 CACHE_LOCALS(); |
412 | 443 |
413 // Call the VM don't check for pending exceptions | 444 // Call the VM with last java frame only. |
414 #define CALL_VM_NOCHECK(func) \ | 445 #define CALL_VM_NAKED_LJF(func) \ |
415 DECACHE_STATE(); \ | 446 DECACHE_STATE(); \ |
416 SET_LAST_JAVA_FRAME(); \ | 447 SET_LAST_JAVA_FRAME(); \ |
417 func; \ | 448 func; \ |
418 RESET_LAST_JAVA_FRAME(); \ | 449 RESET_LAST_JAVA_FRAME(); \ |
419 CACHE_STATE(); \ | 450 CACHE_STATE(); |
420 if (THREAD->pop_frame_pending() && \ | 451 |
421 !THREAD->pop_frame_in_process()) { \ | 452 // Call the VM. Don't check for pending exceptions. |
422 goto handle_Pop_Frame; \ | 453 #define CALL_VM_NOCHECK(func) \ |
423 } | 454 CALL_VM_NAKED_LJF(func) \ |
455 if (THREAD->pop_frame_pending() && \ | |
456 !THREAD->pop_frame_in_process()) { \ | |
457 goto handle_Pop_Frame; \ | |
458 } \ | |
459 if (THREAD->jvmti_thread_state() && \ | |
460 THREAD->jvmti_thread_state()->is_earlyret_pending()) { \ | |
461 goto handle_Early_Return; \ | |
462 } | |
424 | 463 |
425 // Call the VM and check for pending exceptions | 464 // Call the VM and check for pending exceptions |
426 #define CALL_VM(func, label) { \ | 465 #define CALL_VM(func, label) { \ |
427 CALL_VM_NOCHECK(func); \ | 466 CALL_VM_NOCHECK(func); \ |
428 if (THREAD->has_pending_exception()) goto label; \ | 467 if (THREAD->has_pending_exception()) goto label; \ |
429 } | 468 } |
430 | 469 |
431 /* | 470 /* |
432 * BytecodeInterpreter::run(interpreterState istate) | 471 * BytecodeInterpreter::run(interpreterState istate) |
433 * BytecodeInterpreter::runWithChecks(interpreterState istate) | 472 * BytecodeInterpreter::runWithChecks(interpreterState istate) |
500 // Screwups with stack management usually cause us to overwrite istate | 539 // Screwups with stack management usually cause us to overwrite istate |
501 // save a copy so we can verify it. | 540 // save a copy so we can verify it. |
502 interpreterState orig = istate; | 541 interpreterState orig = istate; |
503 #endif | 542 #endif |
504 | 543 |
505 static volatile jbyte* _byte_map_base; // adjusted card table base for oop store barrier | |
506 | |
507 register intptr_t* topOfStack = (intptr_t *)istate->stack(); /* access with STACK macros */ | 544 register intptr_t* topOfStack = (intptr_t *)istate->stack(); /* access with STACK macros */ |
508 register address pc = istate->bcp(); | 545 register address pc = istate->bcp(); |
509 register jubyte opcode; | 546 register jubyte opcode; |
510 register intptr_t* locals = istate->locals(); | 547 register intptr_t* locals = istate->locals(); |
511 register ConstantPoolCache* cp = istate->constants(); // method()->constants()->cache() | 548 register ConstantPoolCache* cp = istate->constants(); // method()->constants()->cache() |
512 #ifdef LOTS_OF_REGS | 549 #ifdef LOTS_OF_REGS |
513 register JavaThread* THREAD = istate->thread(); | 550 register JavaThread* THREAD = istate->thread(); |
514 register volatile jbyte* BYTE_MAP_BASE = _byte_map_base; | |
515 #else | 551 #else |
516 #undef THREAD | 552 #undef THREAD |
517 #define THREAD istate->thread() | 553 #define THREAD istate->thread() |
518 #undef BYTE_MAP_BASE | |
519 #define BYTE_MAP_BASE _byte_map_base | |
520 #endif | 554 #endif |
521 | 555 |
522 #ifdef USELABELS | 556 #ifdef USELABELS |
523 const static void* const opclabels_data[256] = { | 557 const static void* const opclabels_data[256] = { |
524 /* 0x00 */ &&opc_nop, &&opc_aconst_null,&&opc_iconst_m1,&&opc_iconst_0, | 558 /* 0x00 */ &&opc_nop, &&opc_aconst_null,&&opc_iconst_m1,&&opc_iconst_0, |
620 guarantee(istate->msg() == initialize || | 654 guarantee(istate->msg() == initialize || |
621 topOfStack >= istate->stack_limit() && | 655 topOfStack >= istate->stack_limit() && |
622 topOfStack < istate->stack_base(), | 656 topOfStack < istate->stack_base(), |
623 "Stack top out of range"); | 657 "Stack top out of range"); |
624 | 658 |
659 #ifdef CC_INTERP_PROFILE | |
660 // MethodData's last branch taken count. | |
661 uint mdo_last_branch_taken_count = 0; | |
662 #else | |
663 const uint mdo_last_branch_taken_count = 0; | |
664 #endif | |
665 | |
625 switch (istate->msg()) { | 666 switch (istate->msg()) { |
626 case initialize: { | 667 case initialize: { |
627 if (initialized++) ShouldNotReachHere(); // Only one initialize call | 668 if (initialized++) ShouldNotReachHere(); // Only one initialize call. |
628 _compiling = (UseCompiler || CountCompiledCalls); | 669 _compiling = (UseCompiler || CountCompiledCalls); |
629 #ifdef VM_JVMTI | 670 #ifdef VM_JVMTI |
630 _jvmti_interp_events = JvmtiExport::can_post_interpreter_events(); | 671 _jvmti_interp_events = JvmtiExport::can_post_interpreter_events(); |
631 #endif | 672 #endif |
632 BarrierSet* bs = Universe::heap()->barrier_set(); | |
633 assert(bs->kind() == BarrierSet::CardTableModRef, "Wrong barrier set kind"); | |
634 _byte_map_base = (volatile jbyte*)(((CardTableModRefBS*)bs)->byte_map_base); | |
635 return; | 673 return; |
636 } | 674 } |
637 break; | 675 break; |
638 case method_entry: { | 676 case method_entry: { |
639 THREAD->set_do_not_unlock(); | 677 THREAD->set_do_not_unlock(); |
644 GET_METHOD_COUNTERS(mcs); | 682 GET_METHOD_COUNTERS(mcs); |
645 if (ProfileInterpreter) { | 683 if (ProfileInterpreter) { |
646 METHOD->increment_interpreter_invocation_count(THREAD); | 684 METHOD->increment_interpreter_invocation_count(THREAD); |
647 } | 685 } |
648 mcs->invocation_counter()->increment(); | 686 mcs->invocation_counter()->increment(); |
649 if (mcs->invocation_counter()->reached_InvocationLimit()) { | 687 if (mcs->invocation_counter()->reached_InvocationLimit(mcs->backedge_counter())) { |
650 CALL_VM((void)InterpreterRuntime::frequency_counter_overflow(THREAD, NULL), handle_exception); | 688 CALL_VM((void)InterpreterRuntime::frequency_counter_overflow(THREAD, NULL), handle_exception); |
651 | 689 // We no longer retry on a counter overflow. |
652 // We no longer retry on a counter overflow | 690 } |
653 | 691 // Get or create profile data. Check for pending (async) exceptions. |
654 // istate->set_msg(retry_method); | 692 BI_PROFILE_GET_OR_CREATE_METHOD_DATA(handle_exception); |
655 // THREAD->clr_do_not_unlock(); | |
656 // return; | |
657 } | |
658 SAFEPOINT; | 693 SAFEPOINT; |
659 } | 694 } |
660 | 695 |
661 if ((istate->_stack_base - istate->_stack_limit) != istate->method()->max_stack() + 1) { | 696 if ((istate->_stack_base - istate->_stack_limit) != istate->method()->max_stack() + 1) { |
662 // initialize | 697 // initialize |
674 interesting = true; | 709 interesting = true; |
675 } | 710 } |
676 } | 711 } |
677 #endif // HACK | 712 #endif // HACK |
678 | 713 |
679 | 714 // Lock method if synchronized. |
680 // lock method if synchronized | |
681 if (METHOD->is_synchronized()) { | 715 if (METHOD->is_synchronized()) { |
682 // oop rcvr = locals[0].j.r; | 716 // oop rcvr = locals[0].j.r; |
683 oop rcvr; | 717 oop rcvr; |
684 if (METHOD->is_static()) { | 718 if (METHOD->is_static()) { |
685 rcvr = METHOD->constants()->pool_holder()->java_mirror(); | 719 rcvr = METHOD->constants()->pool_holder()->java_mirror(); |
686 } else { | 720 } else { |
687 rcvr = LOCALS_OBJECT(0); | 721 rcvr = LOCALS_OBJECT(0); |
688 VERIFY_OOP(rcvr); | 722 VERIFY_OOP(rcvr); |
689 } | 723 } |
690 // The initial monitor is ours for the taking | 724 // The initial monitor is ours for the taking. |
691 BasicObjectLock* mon = &istate->monitor_base()[-1]; | 725 // Monitor not filled in frame manager any longer as this caused race condition with biased locking. |
692 oop monobj = mon->obj(); | 726 BasicObjectLock* mon = &istate->monitor_base()[-1]; |
693 assert(mon->obj() == rcvr, "method monitor mis-initialized"); | 727 mon->set_obj(rcvr); |
694 | 728 bool success = false; |
695 bool success = UseBiasedLocking; | 729 uintptr_t epoch_mask_in_place = (uintptr_t)markOopDesc::epoch_mask_in_place; |
696 if (UseBiasedLocking) { | 730 markOop mark = rcvr->mark(); |
697 markOop mark = rcvr->mark(); | 731 intptr_t hash = (intptr_t) markOopDesc::no_hash; |
698 if (mark->has_bias_pattern()) { | 732 // Implies UseBiasedLocking. |
699 // The bias pattern is present in the object's header. Need to check | 733 if (mark->has_bias_pattern()) { |
700 // whether the bias owner and the epoch are both still current. | 734 uintptr_t thread_ident; |
701 intptr_t xx = ((intptr_t) THREAD) ^ (intptr_t) mark; | 735 uintptr_t anticipated_bias_locking_value; |
702 xx = (intptr_t) rcvr->klass()->prototype_header() ^ xx; | 736 thread_ident = (uintptr_t)istate->thread(); |
703 intptr_t yy = (xx & ~((int) markOopDesc::age_mask_in_place)); | 737 anticipated_bias_locking_value = |
704 if (yy != 0 ) { | 738 (((uintptr_t)rcvr->klass()->prototype_header() | thread_ident) ^ (uintptr_t)mark) & |
705 // At this point we know that the header has the bias pattern and | 739 ~((uintptr_t) markOopDesc::age_mask_in_place); |
706 // that we are not the bias owner in the current epoch. We need to | 740 |
707 // figure out more details about the state of the header in order to | 741 if (anticipated_bias_locking_value == 0) { |
708 // know what operations can be legally performed on the object's | 742 // Already biased towards this thread, nothing to do. |
709 // header. | 743 if (PrintBiasedLockingStatistics) { |
710 | 744 (* BiasedLocking::biased_lock_entry_count_addr())++; |
711 // If the low three bits in the xor result aren't clear, that means | 745 } |
712 // the prototype header is no longer biased and we have to revoke | 746 success = true; |
713 // the bias on this object. | 747 } else if ((anticipated_bias_locking_value & markOopDesc::biased_lock_mask_in_place) != 0) { |
714 | 748 // Try to revoke bias. |
715 if (yy & markOopDesc::biased_lock_mask_in_place == 0 ) { | 749 markOop header = rcvr->klass()->prototype_header(); |
716 // Biasing is still enabled for this data type. See whether the | 750 if (hash != markOopDesc::no_hash) { |
717 // epoch of the current bias is still valid, meaning that the epoch | 751 header = header->copy_set_hash(hash); |
718 // bits of the mark word are equal to the epoch bits of the | 752 } |
719 // prototype header. (Note that the prototype header's epoch bits | 753 if (Atomic::cmpxchg_ptr(header, rcvr->mark_addr(), mark) == mark) { |
720 // only change at a safepoint.) If not, attempt to rebias the object | 754 if (PrintBiasedLockingStatistics) |
721 // toward the current thread. Note that we must be absolutely sure | 755 (*BiasedLocking::revoked_lock_entry_count_addr())++; |
722 // that the current epoch is invalid in order to do this because | 756 } |
723 // otherwise the manipulations it performs on the mark word are | 757 } else if ((anticipated_bias_locking_value & epoch_mask_in_place) != 0) { |
724 // illegal. | 758 // Try to rebias. |
725 if (yy & markOopDesc::epoch_mask_in_place == 0) { | 759 markOop new_header = (markOop) ( (intptr_t) rcvr->klass()->prototype_header() | thread_ident); |
726 // The epoch of the current bias is still valid but we know nothing | 760 if (hash != markOopDesc::no_hash) { |
727 // about the owner; it might be set or it might be clear. Try to | 761 new_header = new_header->copy_set_hash(hash); |
728 // acquire the bias of the object using an atomic operation. If this | 762 } |
729 // fails we will go in to the runtime to revoke the object's bias. | 763 if (Atomic::cmpxchg_ptr((void*)new_header, rcvr->mark_addr(), mark) == mark) { |
730 // Note that we first construct the presumed unbiased header so we | 764 if (PrintBiasedLockingStatistics) { |
731 // don't accidentally blow away another thread's valid bias. | 765 (* BiasedLocking::rebiased_lock_entry_count_addr())++; |
732 intptr_t unbiased = (intptr_t) mark & (markOopDesc::biased_lock_mask_in_place | | |
733 markOopDesc::age_mask_in_place | | |
734 markOopDesc::epoch_mask_in_place); | |
735 if (Atomic::cmpxchg_ptr((intptr_t)THREAD | unbiased, (intptr_t*) rcvr->mark_addr(), unbiased) != unbiased) { | |
736 CALL_VM(InterpreterRuntime::monitorenter(THREAD, mon), handle_exception); | |
737 } | |
738 } else { | |
739 try_rebias: | |
740 // At this point we know the epoch has expired, meaning that the | |
741 // current "bias owner", if any, is actually invalid. Under these | |
742 // circumstances _only_, we are allowed to use the current header's | |
743 // value as the comparison value when doing the cas to acquire the | |
744 // bias in the current epoch. In other words, we allow transfer of | |
745 // the bias from one thread to another directly in this situation. | |
746 xx = (intptr_t) rcvr->klass()->prototype_header() | (intptr_t) THREAD; | |
747 if (Atomic::cmpxchg_ptr((intptr_t)THREAD | (intptr_t) rcvr->klass()->prototype_header(), | |
748 (intptr_t*) rcvr->mark_addr(), | |
749 (intptr_t) mark) != (intptr_t) mark) { | |
750 CALL_VM(InterpreterRuntime::monitorenter(THREAD, mon), handle_exception); | |
751 } | |
752 } | |
753 } else { | |
754 try_revoke_bias: | |
755 // The prototype mark in the klass doesn't have the bias bit set any | |
756 // more, indicating that objects of this data type are not supposed | |
757 // to be biased any more. We are going to try to reset the mark of | |
758 // this object to the prototype value and fall through to the | |
759 // CAS-based locking scheme. Note that if our CAS fails, it means | |
760 // that another thread raced us for the privilege of revoking the | |
761 // bias of this particular object, so it's okay to continue in the | |
762 // normal locking code. | |
763 // | |
764 xx = (intptr_t) rcvr->klass()->prototype_header() | (intptr_t) THREAD; | |
765 if (Atomic::cmpxchg_ptr(rcvr->klass()->prototype_header(), | |
766 (intptr_t*) rcvr->mark_addr(), | |
767 mark) == mark) { | |
768 // (*counters->revoked_lock_entry_count_addr())++; | |
769 success = false; | |
770 } | |
771 } | |
772 } | 766 } |
773 } else { | 767 } else { |
774 cas_label: | 768 CALL_VM(InterpreterRuntime::monitorenter(THREAD, mon), handle_exception); |
775 success = false; | 769 } |
776 } | 770 success = true; |
777 } | 771 } else { |
778 if (!success) { | 772 // Try to bias towards thread in case object is anonymously biased. |
779 markOop displaced = rcvr->mark()->set_unlocked(); | 773 markOop header = (markOop) ((uintptr_t) mark & |
780 mon->lock()->set_displaced_header(displaced); | 774 ((uintptr_t)markOopDesc::biased_lock_mask_in_place | |
781 if (Atomic::cmpxchg_ptr(mon, rcvr->mark_addr(), displaced) != displaced) { | 775 (uintptr_t)markOopDesc::age_mask_in_place | epoch_mask_in_place)); |
782 // Is it simple recursive case? | 776 if (hash != markOopDesc::no_hash) { |
783 if (THREAD->is_lock_owned((address) displaced->clear_lock_bits())) { | 777 header = header->copy_set_hash(hash); |
784 mon->lock()->set_displaced_header(NULL); | 778 } |
785 } else { | 779 markOop new_header = (markOop) ((uintptr_t) header | thread_ident); |
786 CALL_VM(InterpreterRuntime::monitorenter(THREAD, mon), handle_exception); | 780 // Debugging hint. |
781 DEBUG_ONLY(mon->lock()->set_displaced_header((markOop) (uintptr_t) 0xdeaddead);) | |
782 if (Atomic::cmpxchg_ptr((void*)new_header, rcvr->mark_addr(), header) == header) { | |
783 if (PrintBiasedLockingStatistics) { | |
784 (* BiasedLocking::anonymously_biased_lock_entry_count_addr())++; | |
787 } | 785 } |
788 } | 786 } else { |
789 } | 787 CALL_VM(InterpreterRuntime::monitorenter(THREAD, mon), handle_exception); |
788 } | |
789 success = true; | |
790 } | |
791 } | |
792 | |
793 // Traditional lightweight locking. | |
794 if (!success) { | |
795 markOop displaced = rcvr->mark()->set_unlocked(); | |
796 mon->lock()->set_displaced_header(displaced); | |
797 bool call_vm = UseHeavyMonitors; | |
798 if (call_vm || Atomic::cmpxchg_ptr(mon, rcvr->mark_addr(), displaced) != displaced) { | |
799 // Is it simple recursive case? | |
800 if (!call_vm && THREAD->is_lock_owned((address) displaced->clear_lock_bits())) { | |
801 mon->lock()->set_displaced_header(NULL); | |
802 } else { | |
803 CALL_VM(InterpreterRuntime::monitorenter(THREAD, mon), handle_exception); | |
804 } | |
805 } | |
806 } | |
790 } | 807 } |
791 THREAD->clr_do_not_unlock(); | 808 THREAD->clr_do_not_unlock(); |
792 | 809 |
793 // Notify jvmti | 810 // Notify jvmti |
794 #ifdef VM_JVMTI | 811 #ifdef VM_JVMTI |
806 } | 823 } |
807 | 824 |
808 case popping_frame: { | 825 case popping_frame: { |
809 // returned from a java call to pop the frame, restart the call | 826 // returned from a java call to pop the frame, restart the call |
810 // clear the message so we don't confuse ourselves later | 827 // clear the message so we don't confuse ourselves later |
811 ShouldNotReachHere(); // we don't return this. | |
812 assert(THREAD->pop_frame_in_process(), "wrong frame pop state"); | 828 assert(THREAD->pop_frame_in_process(), "wrong frame pop state"); |
813 istate->set_msg(no_request); | 829 istate->set_msg(no_request); |
830 if (_compiling) { | |
831 // Set MDX back to the ProfileData of the invoke bytecode that will be | |
832 // restarted. | |
833 SET_MDX(NULL); | |
834 BI_PROFILE_GET_OR_CREATE_METHOD_DATA(handle_exception); | |
835 } | |
814 THREAD->clr_pop_frame_in_process(); | 836 THREAD->clr_pop_frame_in_process(); |
815 goto run; | 837 goto run; |
816 } | 838 } |
817 | 839 |
818 case method_resume: { | 840 case method_resume: { |
834 #endif // HACK | 856 #endif // HACK |
835 // returned from a java call, continue executing. | 857 // returned from a java call, continue executing. |
836 if (THREAD->pop_frame_pending() && !THREAD->pop_frame_in_process()) { | 858 if (THREAD->pop_frame_pending() && !THREAD->pop_frame_in_process()) { |
837 goto handle_Pop_Frame; | 859 goto handle_Pop_Frame; |
838 } | 860 } |
861 if (THREAD->jvmti_thread_state() && | |
862 THREAD->jvmti_thread_state()->is_earlyret_pending()) { | |
863 goto handle_Early_Return; | |
864 } | |
839 | 865 |
840 if (THREAD->has_pending_exception()) goto handle_exception; | 866 if (THREAD->has_pending_exception()) goto handle_exception; |
841 // Update the pc by the saved amount of the invoke bytecode size | 867 // Update the pc by the saved amount of the invoke bytecode size |
842 UPDATE_PC(istate->bcp_advance()); | 868 UPDATE_PC(istate->bcp_advance()); |
869 | |
870 if (_compiling) { | |
871 // Get or create profile data. Check for pending (async) exceptions. | |
872 BI_PROFILE_GET_OR_CREATE_METHOD_DATA(handle_exception); | |
873 } | |
843 goto run; | 874 goto run; |
844 } | 875 } |
845 | 876 |
846 case deopt_resume2: { | 877 case deopt_resume2: { |
847 // Returned from an opcode that will reexecute. Deopt was | 878 // Returned from an opcode that will reexecute. Deopt was |
848 // a result of a PopFrame request. | 879 // a result of a PopFrame request. |
849 // | 880 // |
881 | |
882 if (_compiling) { | |
883 // Get or create profile data. Check for pending (async) exceptions. | |
884 BI_PROFILE_GET_OR_CREATE_METHOD_DATA(handle_exception); | |
885 } | |
850 goto run; | 886 goto run; |
851 } | 887 } |
852 | 888 |
853 case deopt_resume: { | 889 case deopt_resume: { |
854 // Returned from an opcode that has completed. The stack has | 890 // Returned from an opcode that has completed. The stack has |
867 // this will do the right thing even if an exception is pending. | 903 // this will do the right thing even if an exception is pending. |
868 goto handle_return; | 904 goto handle_return; |
869 } | 905 } |
870 UPDATE_PC(Bytecodes::length_at(METHOD, pc)); | 906 UPDATE_PC(Bytecodes::length_at(METHOD, pc)); |
871 if (THREAD->has_pending_exception()) goto handle_exception; | 907 if (THREAD->has_pending_exception()) goto handle_exception; |
908 | |
909 if (_compiling) { | |
910 // Get or create profile data. Check for pending (async) exceptions. | |
911 BI_PROFILE_GET_OR_CREATE_METHOD_DATA(handle_exception); | |
912 } | |
872 goto run; | 913 goto run; |
873 } | 914 } |
874 case got_monitors: { | 915 case got_monitors: { |
875 // continue locking now that we have a monitor to use | 916 // continue locking now that we have a monitor to use |
876 // we expect to find newly allocated monitor at the "top" of the monitor stack. | 917 // we expect to find newly allocated monitor at the "top" of the monitor stack. |
879 // derefing's lockee ought to provoke implicit null check | 920 // derefing's lockee ought to provoke implicit null check |
880 // find a free monitor | 921 // find a free monitor |
881 BasicObjectLock* entry = (BasicObjectLock*) istate->stack_base(); | 922 BasicObjectLock* entry = (BasicObjectLock*) istate->stack_base(); |
882 assert(entry->obj() == NULL, "Frame manager didn't allocate the monitor"); | 923 assert(entry->obj() == NULL, "Frame manager didn't allocate the monitor"); |
883 entry->set_obj(lockee); | 924 entry->set_obj(lockee); |
884 | 925 bool success = false; |
885 markOop displaced = lockee->mark()->set_unlocked(); | 926 uintptr_t epoch_mask_in_place = (uintptr_t)markOopDesc::epoch_mask_in_place; |
886 entry->lock()->set_displaced_header(displaced); | 927 |
887 if (Atomic::cmpxchg_ptr(entry, lockee->mark_addr(), displaced) != displaced) { | 928 markOop mark = lockee->mark(); |
888 // Is it simple recursive case? | 929 intptr_t hash = (intptr_t) markOopDesc::no_hash; |
889 if (THREAD->is_lock_owned((address) displaced->clear_lock_bits())) { | 930 // implies UseBiasedLocking |
890 entry->lock()->set_displaced_header(NULL); | 931 if (mark->has_bias_pattern()) { |
932 uintptr_t thread_ident; | |
933 uintptr_t anticipated_bias_locking_value; | |
934 thread_ident = (uintptr_t)istate->thread(); | |
935 anticipated_bias_locking_value = | |
936 (((uintptr_t)lockee->klass()->prototype_header() | thread_ident) ^ (uintptr_t)mark) & | |
937 ~((uintptr_t) markOopDesc::age_mask_in_place); | |
938 | |
939 if (anticipated_bias_locking_value == 0) { | |
940 // already biased towards this thread, nothing to do | |
941 if (PrintBiasedLockingStatistics) { | |
942 (* BiasedLocking::biased_lock_entry_count_addr())++; | |
943 } | |
944 success = true; | |
945 } else if ((anticipated_bias_locking_value & markOopDesc::biased_lock_mask_in_place) != 0) { | |
946 // try revoke bias | |
947 markOop header = lockee->klass()->prototype_header(); | |
948 if (hash != markOopDesc::no_hash) { | |
949 header = header->copy_set_hash(hash); | |
950 } | |
951 if (Atomic::cmpxchg_ptr(header, lockee->mark_addr(), mark) == mark) { | |
952 if (PrintBiasedLockingStatistics) { | |
953 (*BiasedLocking::revoked_lock_entry_count_addr())++; | |
954 } | |
955 } | |
956 } else if ((anticipated_bias_locking_value & epoch_mask_in_place) !=0) { | |
957 // try rebias | |
958 markOop new_header = (markOop) ( (intptr_t) lockee->klass()->prototype_header() | thread_ident); | |
959 if (hash != markOopDesc::no_hash) { | |
960 new_header = new_header->copy_set_hash(hash); | |
961 } | |
962 if (Atomic::cmpxchg_ptr((void*)new_header, lockee->mark_addr(), mark) == mark) { | |
963 if (PrintBiasedLockingStatistics) { | |
964 (* BiasedLocking::rebiased_lock_entry_count_addr())++; | |
965 } | |
966 } else { | |
967 CALL_VM(InterpreterRuntime::monitorenter(THREAD, entry), handle_exception); | |
968 } | |
969 success = true; | |
891 } else { | 970 } else { |
892 CALL_VM(InterpreterRuntime::monitorenter(THREAD, entry), handle_exception); | 971 // try to bias towards thread in case object is anonymously biased |
972 markOop header = (markOop) ((uintptr_t) mark & ((uintptr_t)markOopDesc::biased_lock_mask_in_place | | |
973 (uintptr_t)markOopDesc::age_mask_in_place | epoch_mask_in_place)); | |
974 if (hash != markOopDesc::no_hash) { | |
975 header = header->copy_set_hash(hash); | |
976 } | |
977 markOop new_header = (markOop) ((uintptr_t) header | thread_ident); | |
978 // debugging hint | |
979 DEBUG_ONLY(entry->lock()->set_displaced_header((markOop) (uintptr_t) 0xdeaddead);) | |
980 if (Atomic::cmpxchg_ptr((void*)new_header, lockee->mark_addr(), header) == header) { | |
981 if (PrintBiasedLockingStatistics) { | |
982 (* BiasedLocking::anonymously_biased_lock_entry_count_addr())++; | |
983 } | |
984 } else { | |
985 CALL_VM(InterpreterRuntime::monitorenter(THREAD, entry), handle_exception); | |
986 } | |
987 success = true; | |
988 } | |
989 } | |
990 | |
991 // traditional lightweight locking | |
992 if (!success) { | |
993 markOop displaced = lockee->mark()->set_unlocked(); | |
994 entry->lock()->set_displaced_header(displaced); | |
995 bool call_vm = UseHeavyMonitors; | |
996 if (call_vm || Atomic::cmpxchg_ptr(entry, lockee->mark_addr(), displaced) != displaced) { | |
997 // Is it simple recursive case? | |
998 if (!call_vm && THREAD->is_lock_owned((address) displaced->clear_lock_bits())) { | |
999 entry->lock()->set_displaced_header(NULL); | |
1000 } else { | |
1001 CALL_VM(InterpreterRuntime::monitorenter(THREAD, entry), handle_exception); | |
1002 } | |
893 } | 1003 } |
894 } | 1004 } |
895 UPDATE_PC_AND_TOS(1, -1); | 1005 UPDATE_PC_AND_TOS(1, -1); |
896 goto run; | 1006 goto run; |
897 } | 1007 } |
1048 | 1158 |
1049 CASE(_wide): { | 1159 CASE(_wide): { |
1050 uint16_t reg = Bytes::get_Java_u2(pc + 2); | 1160 uint16_t reg = Bytes::get_Java_u2(pc + 2); |
1051 | 1161 |
1052 opcode = pc[1]; | 1162 opcode = pc[1]; |
1163 | |
1164 // Wide and it's sub-bytecode are counted as separate instructions. If we | |
1165 // don't account for this here, the bytecode trace skips the next bytecode. | |
1166 DO_UPDATE_INSTRUCTION_COUNT(opcode); | |
1167 | |
1053 switch(opcode) { | 1168 switch(opcode) { |
1054 case Bytecodes::_aload: | 1169 case Bytecodes::_aload: |
1055 VERIFY_OOP(LOCALS_OBJECT(reg)); | 1170 VERIFY_OOP(LOCALS_OBJECT(reg)); |
1056 SET_STACK_OBJECT(LOCALS_OBJECT(reg), 0); | 1171 SET_STACK_OBJECT(LOCALS_OBJECT(reg), 0); |
1057 UPDATE_PC_AND_TOS_AND_CONTINUE(4, 1); | 1172 UPDATE_PC_AND_TOS_AND_CONTINUE(4, 1); |
1091 // Be nice to see what this generates.... QQQ | 1206 // Be nice to see what this generates.... QQQ |
1092 SET_LOCALS_INT(LOCALS_INT(reg) + offset, reg); | 1207 SET_LOCALS_INT(LOCALS_INT(reg) + offset, reg); |
1093 UPDATE_PC_AND_CONTINUE(6); | 1208 UPDATE_PC_AND_CONTINUE(6); |
1094 } | 1209 } |
1095 case Bytecodes::_ret: | 1210 case Bytecodes::_ret: |
1211 // Profile ret. | |
1212 BI_PROFILE_UPDATE_RET(/*bci=*/((int)(intptr_t)(LOCALS_ADDR(reg)))); | |
1213 // Now, update the pc. | |
1096 pc = istate->method()->code_base() + (intptr_t)(LOCALS_ADDR(reg)); | 1214 pc = istate->method()->code_base() + (intptr_t)(LOCALS_ADDR(reg)); |
1097 UPDATE_PC_AND_CONTINUE(0); | 1215 UPDATE_PC_AND_CONTINUE(0); |
1098 default: | 1216 default: |
1099 VM_JAVA_ERROR(vmSymbols::java_lang_InternalError(), "undefined opcode"); | 1217 VM_JAVA_ERROR(vmSymbols::java_lang_InternalError(), "undefined opcode", note_no_trap); |
1100 } | 1218 } |
1101 } | 1219 } |
1102 | 1220 |
1103 | 1221 |
1104 #undef OPC_STORE_n | 1222 #undef OPC_STORE_n |
1175 #undef OPC_INT_BINARY | 1293 #undef OPC_INT_BINARY |
1176 #define OPC_INT_BINARY(opcname, opname, test) \ | 1294 #define OPC_INT_BINARY(opcname, opname, test) \ |
1177 CASE(_i##opcname): \ | 1295 CASE(_i##opcname): \ |
1178 if (test && (STACK_INT(-1) == 0)) { \ | 1296 if (test && (STACK_INT(-1) == 0)) { \ |
1179 VM_JAVA_ERROR(vmSymbols::java_lang_ArithmeticException(), \ | 1297 VM_JAVA_ERROR(vmSymbols::java_lang_ArithmeticException(), \ |
1180 "/ by zero"); \ | 1298 "/ by zero", note_div0Check_trap); \ |
1181 } \ | 1299 } \ |
1182 SET_STACK_INT(VMint##opname(STACK_INT(-2), \ | 1300 SET_STACK_INT(VMint##opname(STACK_INT(-2), \ |
1183 STACK_INT(-1)), \ | 1301 STACK_INT(-1)), \ |
1184 -2); \ | 1302 -2); \ |
1185 UPDATE_PC_AND_TOS_AND_CONTINUE(1, -1); \ | 1303 UPDATE_PC_AND_TOS_AND_CONTINUE(1, -1); \ |
1187 { \ | 1305 { \ |
1188 if (test) { \ | 1306 if (test) { \ |
1189 jlong l1 = STACK_LONG(-1); \ | 1307 jlong l1 = STACK_LONG(-1); \ |
1190 if (VMlongEqz(l1)) { \ | 1308 if (VMlongEqz(l1)) { \ |
1191 VM_JAVA_ERROR(vmSymbols::java_lang_ArithmeticException(), \ | 1309 VM_JAVA_ERROR(vmSymbols::java_lang_ArithmeticException(), \ |
1192 "/ by long zero"); \ | 1310 "/ by long zero", note_div0Check_trap); \ |
1193 } \ | 1311 } \ |
1194 } \ | 1312 } \ |
1195 /* First long at (-1,-2) next long at (-3,-4) */ \ | 1313 /* First long at (-1,-2) next long at (-3,-4) */ \ |
1196 SET_STACK_LONG(VMlong##opname(STACK_LONG(-3), \ | 1314 SET_STACK_LONG(VMlong##opname(STACK_LONG(-3), \ |
1197 STACK_LONG(-1)), \ | 1315 STACK_LONG(-1)), \ |
1400 /* comparison operators */ | 1518 /* comparison operators */ |
1401 | 1519 |
1402 | 1520 |
1403 #define COMPARISON_OP(name, comparison) \ | 1521 #define COMPARISON_OP(name, comparison) \ |
1404 CASE(_if_icmp##name): { \ | 1522 CASE(_if_icmp##name): { \ |
1405 int skip = (STACK_INT(-2) comparison STACK_INT(-1)) \ | 1523 const bool cmp = (STACK_INT(-2) comparison STACK_INT(-1)); \ |
1524 int skip = cmp \ | |
1406 ? (int16_t)Bytes::get_Java_u2(pc + 1) : 3; \ | 1525 ? (int16_t)Bytes::get_Java_u2(pc + 1) : 3; \ |
1407 address branch_pc = pc; \ | 1526 address branch_pc = pc; \ |
1527 /* Profile branch. */ \ | |
1528 BI_PROFILE_UPDATE_BRANCH(/*is_taken=*/cmp); \ | |
1408 UPDATE_PC_AND_TOS(skip, -2); \ | 1529 UPDATE_PC_AND_TOS(skip, -2); \ |
1409 DO_BACKEDGE_CHECKS(skip, branch_pc); \ | 1530 DO_BACKEDGE_CHECKS(skip, branch_pc); \ |
1410 CONTINUE; \ | 1531 CONTINUE; \ |
1411 } \ | 1532 } \ |
1412 CASE(_if##name): { \ | 1533 CASE(_if##name): { \ |
1413 int skip = (STACK_INT(-1) comparison 0) \ | 1534 const bool cmp = (STACK_INT(-1) comparison 0); \ |
1535 int skip = cmp \ | |
1414 ? (int16_t)Bytes::get_Java_u2(pc + 1) : 3; \ | 1536 ? (int16_t)Bytes::get_Java_u2(pc + 1) : 3; \ |
1415 address branch_pc = pc; \ | 1537 address branch_pc = pc; \ |
1538 /* Profile branch. */ \ | |
1539 BI_PROFILE_UPDATE_BRANCH(/*is_taken=*/cmp); \ | |
1416 UPDATE_PC_AND_TOS(skip, -1); \ | 1540 UPDATE_PC_AND_TOS(skip, -1); \ |
1417 DO_BACKEDGE_CHECKS(skip, branch_pc); \ | 1541 DO_BACKEDGE_CHECKS(skip, branch_pc); \ |
1418 CONTINUE; \ | 1542 CONTINUE; \ |
1419 } | 1543 } |
1420 | 1544 |
1421 #define COMPARISON_OP2(name, comparison) \ | 1545 #define COMPARISON_OP2(name, comparison) \ |
1422 COMPARISON_OP(name, comparison) \ | 1546 COMPARISON_OP(name, comparison) \ |
1423 CASE(_if_acmp##name): { \ | 1547 CASE(_if_acmp##name): { \ |
1424 int skip = (STACK_OBJECT(-2) comparison STACK_OBJECT(-1)) \ | 1548 const bool cmp = (STACK_OBJECT(-2) comparison STACK_OBJECT(-1)); \ |
1549 int skip = cmp \ | |
1425 ? (int16_t)Bytes::get_Java_u2(pc + 1) : 3; \ | 1550 ? (int16_t)Bytes::get_Java_u2(pc + 1) : 3; \ |
1426 address branch_pc = pc; \ | 1551 address branch_pc = pc; \ |
1552 /* Profile branch. */ \ | |
1553 BI_PROFILE_UPDATE_BRANCH(/*is_taken=*/cmp); \ | |
1427 UPDATE_PC_AND_TOS(skip, -2); \ | 1554 UPDATE_PC_AND_TOS(skip, -2); \ |
1428 DO_BACKEDGE_CHECKS(skip, branch_pc); \ | 1555 DO_BACKEDGE_CHECKS(skip, branch_pc); \ |
1429 CONTINUE; \ | 1556 CONTINUE; \ |
1430 } | 1557 } |
1431 | 1558 |
1432 #define NULL_COMPARISON_NOT_OP(name) \ | 1559 #define NULL_COMPARISON_NOT_OP(name) \ |
1433 CASE(_if##name): { \ | 1560 CASE(_if##name): { \ |
1434 int skip = (!(STACK_OBJECT(-1) == NULL)) \ | 1561 const bool cmp = (!(STACK_OBJECT(-1) == NULL)); \ |
1562 int skip = cmp \ | |
1435 ? (int16_t)Bytes::get_Java_u2(pc + 1) : 3; \ | 1563 ? (int16_t)Bytes::get_Java_u2(pc + 1) : 3; \ |
1436 address branch_pc = pc; \ | 1564 address branch_pc = pc; \ |
1565 /* Profile branch. */ \ | |
1566 BI_PROFILE_UPDATE_BRANCH(/*is_taken=*/cmp); \ | |
1437 UPDATE_PC_AND_TOS(skip, -1); \ | 1567 UPDATE_PC_AND_TOS(skip, -1); \ |
1438 DO_BACKEDGE_CHECKS(skip, branch_pc); \ | 1568 DO_BACKEDGE_CHECKS(skip, branch_pc); \ |
1439 CONTINUE; \ | 1569 CONTINUE; \ |
1440 } | 1570 } |
1441 | 1571 |
1442 #define NULL_COMPARISON_OP(name) \ | 1572 #define NULL_COMPARISON_OP(name) \ |
1443 CASE(_if##name): { \ | 1573 CASE(_if##name): { \ |
1444 int skip = ((STACK_OBJECT(-1) == NULL)) \ | 1574 const bool cmp = ((STACK_OBJECT(-1) == NULL)); \ |
1575 int skip = cmp \ | |
1445 ? (int16_t)Bytes::get_Java_u2(pc + 1) : 3; \ | 1576 ? (int16_t)Bytes::get_Java_u2(pc + 1) : 3; \ |
1446 address branch_pc = pc; \ | 1577 address branch_pc = pc; \ |
1578 /* Profile branch. */ \ | |
1579 BI_PROFILE_UPDATE_BRANCH(/*is_taken=*/cmp); \ | |
1447 UPDATE_PC_AND_TOS(skip, -1); \ | 1580 UPDATE_PC_AND_TOS(skip, -1); \ |
1448 DO_BACKEDGE_CHECKS(skip, branch_pc); \ | 1581 DO_BACKEDGE_CHECKS(skip, branch_pc); \ |
1449 CONTINUE; \ | 1582 CONTINUE; \ |
1450 } | 1583 } |
1451 COMPARISON_OP(lt, <); | 1584 COMPARISON_OP(lt, <); |
1464 int32_t key = STACK_INT(-1); | 1597 int32_t key = STACK_INT(-1); |
1465 int32_t low = Bytes::get_Java_u4((address)&lpc[1]); | 1598 int32_t low = Bytes::get_Java_u4((address)&lpc[1]); |
1466 int32_t high = Bytes::get_Java_u4((address)&lpc[2]); | 1599 int32_t high = Bytes::get_Java_u4((address)&lpc[2]); |
1467 int32_t skip; | 1600 int32_t skip; |
1468 key -= low; | 1601 key -= low; |
1469 skip = ((uint32_t) key > (uint32_t)(high - low)) | 1602 if (((uint32_t) key > (uint32_t)(high - low))) { |
1470 ? Bytes::get_Java_u4((address)&lpc[0]) | 1603 key = -1; |
1471 : Bytes::get_Java_u4((address)&lpc[key + 3]); | 1604 skip = Bytes::get_Java_u4((address)&lpc[0]); |
1472 // Does this really need a full backedge check (osr?) | 1605 } else { |
1606 skip = Bytes::get_Java_u4((address)&lpc[key + 3]); | |
1607 } | |
1608 // Profile switch. | |
1609 BI_PROFILE_UPDATE_SWITCH(/*switch_index=*/key); | |
1610 // Does this really need a full backedge check (osr)? | |
1473 address branch_pc = pc; | 1611 address branch_pc = pc; |
1474 UPDATE_PC_AND_TOS(skip, -1); | 1612 UPDATE_PC_AND_TOS(skip, -1); |
1475 DO_BACKEDGE_CHECKS(skip, branch_pc); | 1613 DO_BACKEDGE_CHECKS(skip, branch_pc); |
1476 CONTINUE; | 1614 CONTINUE; |
1477 } | 1615 } |
1478 | 1616 |
1479 /* Goto pc whose table entry matches specified key */ | 1617 /* Goto pc whose table entry matches specified key. */ |
1480 | 1618 |
1481 CASE(_lookupswitch): { | 1619 CASE(_lookupswitch): { |
1482 jint* lpc = (jint*)VMalignWordUp(pc+1); | 1620 jint* lpc = (jint*)VMalignWordUp(pc+1); |
1483 int32_t key = STACK_INT(-1); | 1621 int32_t key = STACK_INT(-1); |
1484 int32_t skip = Bytes::get_Java_u4((address) lpc); /* default amount */ | 1622 int32_t skip = Bytes::get_Java_u4((address) lpc); /* default amount */ |
1623 // Remember index. | |
1624 int index = -1; | |
1625 int newindex = 0; | |
1485 int32_t npairs = Bytes::get_Java_u4((address) &lpc[1]); | 1626 int32_t npairs = Bytes::get_Java_u4((address) &lpc[1]); |
1486 while (--npairs >= 0) { | 1627 while (--npairs >= 0) { |
1487 lpc += 2; | 1628 lpc += 2; |
1488 if (key == (int32_t)Bytes::get_Java_u4((address)lpc)) { | 1629 if (key == (int32_t)Bytes::get_Java_u4((address)lpc)) { |
1489 skip = Bytes::get_Java_u4((address)&lpc[1]); | 1630 skip = Bytes::get_Java_u4((address)&lpc[1]); |
1490 break; | 1631 index = newindex; |
1491 } | 1632 break; |
1492 } | 1633 } |
1634 newindex += 1; | |
1635 } | |
1636 // Profile switch. | |
1637 BI_PROFILE_UPDATE_SWITCH(/*switch_index=*/index); | |
1493 address branch_pc = pc; | 1638 address branch_pc = pc; |
1494 UPDATE_PC_AND_TOS(skip, -1); | 1639 UPDATE_PC_AND_TOS(skip, -1); |
1495 DO_BACKEDGE_CHECKS(skip, branch_pc); | 1640 DO_BACKEDGE_CHECKS(skip, branch_pc); |
1496 CONTINUE; | 1641 CONTINUE; |
1497 } | 1642 } |
1572 char message[jintAsStringSize]; \ | 1717 char message[jintAsStringSize]; \ |
1573 CHECK_NULL(arrObj); \ | 1718 CHECK_NULL(arrObj); \ |
1574 if ((uint32_t)index >= (uint32_t)arrObj->length()) { \ | 1719 if ((uint32_t)index >= (uint32_t)arrObj->length()) { \ |
1575 sprintf(message, "%d", index); \ | 1720 sprintf(message, "%d", index); \ |
1576 VM_JAVA_ERROR(vmSymbols::java_lang_ArrayIndexOutOfBoundsException(), \ | 1721 VM_JAVA_ERROR(vmSymbols::java_lang_ArrayIndexOutOfBoundsException(), \ |
1577 message); \ | 1722 message, note_rangeCheck_trap); \ |
1578 } | 1723 } |
1579 | 1724 |
1580 /* 32-bit loads. These handle conversion from < 32-bit types */ | 1725 /* 32-bit loads. These handle conversion from < 32-bit types */ |
1581 #define ARRAY_LOADTO32(T, T2, format, stackRes, extra) \ | 1726 #define ARRAY_LOADTO32(T, T2, format, stackRes, extra) \ |
1582 { \ | 1727 { \ |
1598 | 1743 |
1599 CASE(_iaload): | 1744 CASE(_iaload): |
1600 ARRAY_LOADTO32(T_INT, jint, "%d", STACK_INT, 0); | 1745 ARRAY_LOADTO32(T_INT, jint, "%d", STACK_INT, 0); |
1601 CASE(_faload): | 1746 CASE(_faload): |
1602 ARRAY_LOADTO32(T_FLOAT, jfloat, "%f", STACK_FLOAT, 0); | 1747 ARRAY_LOADTO32(T_FLOAT, jfloat, "%f", STACK_FLOAT, 0); |
1603 CASE(_aaload): | 1748 CASE(_aaload): { |
1604 ARRAY_LOADTO32(T_OBJECT, oop, INTPTR_FORMAT, STACK_OBJECT, 0); | 1749 ARRAY_INTRO(-2); |
1750 SET_STACK_OBJECT(((objArrayOop) arrObj)->obj_at(index), -2); | |
1751 UPDATE_PC_AND_TOS_AND_CONTINUE(1, -1); | |
1752 } | |
1605 CASE(_baload): | 1753 CASE(_baload): |
1606 ARRAY_LOADTO32(T_BYTE, jbyte, "%d", STACK_INT, 0); | 1754 ARRAY_LOADTO32(T_BYTE, jbyte, "%d", STACK_INT, 0); |
1607 CASE(_caload): | 1755 CASE(_caload): |
1608 ARRAY_LOADTO32(T_CHAR, jchar, "%d", STACK_INT, 0); | 1756 ARRAY_LOADTO32(T_CHAR, jchar, "%d", STACK_INT, 0); |
1609 CASE(_saload): | 1757 CASE(_saload): |
1643 VERIFY_OOP(rhsObject); | 1791 VERIFY_OOP(rhsObject); |
1644 ARRAY_INTRO( -3); | 1792 ARRAY_INTRO( -3); |
1645 // arrObj, index are set | 1793 // arrObj, index are set |
1646 if (rhsObject != NULL) { | 1794 if (rhsObject != NULL) { |
1647 /* Check assignability of rhsObject into arrObj */ | 1795 /* Check assignability of rhsObject into arrObj */ |
1648 Klass* rhsKlassOop = rhsObject->klass(); // EBX (subclass) | 1796 Klass* rhsKlass = rhsObject->klass(); // EBX (subclass) |
1649 Klass* elemKlassOop = ObjArrayKlass::cast(arrObj->klass())->element_klass(); // superklass EAX | 1797 Klass* elemKlass = ObjArrayKlass::cast(arrObj->klass())->element_klass(); // superklass EAX |
1650 // | 1798 // |
1651 // Check for compatibilty. This check must not GC!! | 1799 // Check for compatibilty. This check must not GC!! |
1652 // Seems way more expensive now that we must dispatch | 1800 // Seems way more expensive now that we must dispatch |
1653 // | 1801 // |
1654 if (rhsKlassOop != elemKlassOop && !rhsKlassOop->is_subtype_of(elemKlassOop)) { // ebx->is... | 1802 if (rhsKlass != elemKlass && !rhsKlass->is_subtype_of(elemKlass)) { // ebx->is... |
1655 VM_JAVA_ERROR(vmSymbols::java_lang_ArrayStoreException(), ""); | 1803 // Decrement counter if subtype check failed. |
1656 } | 1804 BI_PROFILE_SUBTYPECHECK_FAILED(rhsKlass); |
1657 } | 1805 VM_JAVA_ERROR(vmSymbols::java_lang_ArrayStoreException(), "", note_arrayCheck_trap); |
1658 oop* elem_loc = (oop*)(((address) arrObj->base(T_OBJECT)) + index * sizeof(oop)); | 1806 } |
1659 // *(oop*)(((address) arrObj->base(T_OBJECT)) + index * sizeof(oop)) = rhsObject; | 1807 // Profile checkcast with null_seen and receiver. |
1660 *elem_loc = rhsObject; | 1808 BI_PROFILE_UPDATE_CHECKCAST(/*null_seen=*/false, rhsKlass); |
1661 // Mark the card | 1809 } else { |
1662 OrderAccess::release_store(&BYTE_MAP_BASE[(uintptr_t)elem_loc >> CardTableModRefBS::card_shift], 0); | 1810 // Profile checkcast with null_seen and receiver. |
1811 BI_PROFILE_UPDATE_CHECKCAST(/*null_seen=*/true, NULL); | |
1812 } | |
1813 ((objArrayOop) arrObj)->obj_at_put(index, rhsObject); | |
1663 UPDATE_PC_AND_TOS_AND_CONTINUE(1, -3); | 1814 UPDATE_PC_AND_TOS_AND_CONTINUE(1, -3); |
1664 } | 1815 } |
1665 CASE(_bastore): | 1816 CASE(_bastore): |
1666 ARRAY_STOREFROM32(T_BYTE, jbyte, "%d", STACK_INT, 0); | 1817 ARRAY_STOREFROM32(T_BYTE, jbyte, "%d", STACK_INT, 0); |
1667 CASE(_castore): | 1818 CASE(_castore): |
1698 else if (most_recent->obj() == lockee) break; | 1849 else if (most_recent->obj() == lockee) break; |
1699 most_recent++; | 1850 most_recent++; |
1700 } | 1851 } |
1701 if (entry != NULL) { | 1852 if (entry != NULL) { |
1702 entry->set_obj(lockee); | 1853 entry->set_obj(lockee); |
1703 markOop displaced = lockee->mark()->set_unlocked(); | 1854 int success = false; |
1704 entry->lock()->set_displaced_header(displaced); | 1855 uintptr_t epoch_mask_in_place = (uintptr_t)markOopDesc::epoch_mask_in_place; |
1705 if (Atomic::cmpxchg_ptr(entry, lockee->mark_addr(), displaced) != displaced) { | 1856 |
1706 // Is it simple recursive case? | 1857 markOop mark = lockee->mark(); |
1707 if (THREAD->is_lock_owned((address) displaced->clear_lock_bits())) { | 1858 intptr_t hash = (intptr_t) markOopDesc::no_hash; |
1708 entry->lock()->set_displaced_header(NULL); | 1859 // implies UseBiasedLocking |
1709 } else { | 1860 if (mark->has_bias_pattern()) { |
1710 CALL_VM(InterpreterRuntime::monitorenter(THREAD, entry), handle_exception); | 1861 uintptr_t thread_ident; |
1862 uintptr_t anticipated_bias_locking_value; | |
1863 thread_ident = (uintptr_t)istate->thread(); | |
1864 anticipated_bias_locking_value = | |
1865 (((uintptr_t)lockee->klass()->prototype_header() | thread_ident) ^ (uintptr_t)mark) & | |
1866 ~((uintptr_t) markOopDesc::age_mask_in_place); | |
1867 | |
1868 if (anticipated_bias_locking_value == 0) { | |
1869 // already biased towards this thread, nothing to do | |
1870 if (PrintBiasedLockingStatistics) { | |
1871 (* BiasedLocking::biased_lock_entry_count_addr())++; | |
1872 } | |
1873 success = true; | |
1874 } | |
1875 else if ((anticipated_bias_locking_value & markOopDesc::biased_lock_mask_in_place) != 0) { | |
1876 // try revoke bias | |
1877 markOop header = lockee->klass()->prototype_header(); | |
1878 if (hash != markOopDesc::no_hash) { | |
1879 header = header->copy_set_hash(hash); | |
1880 } | |
1881 if (Atomic::cmpxchg_ptr(header, lockee->mark_addr(), mark) == mark) { | |
1882 if (PrintBiasedLockingStatistics) | |
1883 (*BiasedLocking::revoked_lock_entry_count_addr())++; | |
1884 } | |
1885 } | |
1886 else if ((anticipated_bias_locking_value & epoch_mask_in_place) !=0) { | |
1887 // try rebias | |
1888 markOop new_header = (markOop) ( (intptr_t) lockee->klass()->prototype_header() | thread_ident); | |
1889 if (hash != markOopDesc::no_hash) { | |
1890 new_header = new_header->copy_set_hash(hash); | |
1891 } | |
1892 if (Atomic::cmpxchg_ptr((void*)new_header, lockee->mark_addr(), mark) == mark) { | |
1893 if (PrintBiasedLockingStatistics) | |
1894 (* BiasedLocking::rebiased_lock_entry_count_addr())++; | |
1895 } | |
1896 else { | |
1897 CALL_VM(InterpreterRuntime::monitorenter(THREAD, entry), handle_exception); | |
1898 } | |
1899 success = true; | |
1900 } | |
1901 else { | |
1902 // try to bias towards thread in case object is anonymously biased | |
1903 markOop header = (markOop) ((uintptr_t) mark & ((uintptr_t)markOopDesc::biased_lock_mask_in_place | | |
1904 (uintptr_t)markOopDesc::age_mask_in_place | | |
1905 epoch_mask_in_place)); | |
1906 if (hash != markOopDesc::no_hash) { | |
1907 header = header->copy_set_hash(hash); | |
1908 } | |
1909 markOop new_header = (markOop) ((uintptr_t) header | thread_ident); | |
1910 // debugging hint | |
1911 DEBUG_ONLY(entry->lock()->set_displaced_header((markOop) (uintptr_t) 0xdeaddead);) | |
1912 if (Atomic::cmpxchg_ptr((void*)new_header, lockee->mark_addr(), header) == header) { | |
1913 if (PrintBiasedLockingStatistics) | |
1914 (* BiasedLocking::anonymously_biased_lock_entry_count_addr())++; | |
1915 } | |
1916 else { | |
1917 CALL_VM(InterpreterRuntime::monitorenter(THREAD, entry), handle_exception); | |
1918 } | |
1919 success = true; | |
1920 } | |
1921 } | |
1922 | |
1923 // traditional lightweight locking | |
1924 if (!success) { | |
1925 markOop displaced = lockee->mark()->set_unlocked(); | |
1926 entry->lock()->set_displaced_header(displaced); | |
1927 bool call_vm = UseHeavyMonitors; | |
1928 if (call_vm || Atomic::cmpxchg_ptr(entry, lockee->mark_addr(), displaced) != displaced) { | |
1929 // Is it simple recursive case? | |
1930 if (!call_vm && THREAD->is_lock_owned((address) displaced->clear_lock_bits())) { | |
1931 entry->lock()->set_displaced_header(NULL); | |
1932 } else { | |
1933 CALL_VM(InterpreterRuntime::monitorenter(THREAD, entry), handle_exception); | |
1934 } | |
1711 } | 1935 } |
1712 } | 1936 } |
1713 UPDATE_PC_AND_TOS_AND_CONTINUE(1, -1); | 1937 UPDATE_PC_AND_TOS_AND_CONTINUE(1, -1); |
1714 } else { | 1938 } else { |
1715 istate->set_msg(more_monitors); | 1939 istate->set_msg(more_monitors); |
1727 while (most_recent != limit ) { | 1951 while (most_recent != limit ) { |
1728 if ((most_recent)->obj() == lockee) { | 1952 if ((most_recent)->obj() == lockee) { |
1729 BasicLock* lock = most_recent->lock(); | 1953 BasicLock* lock = most_recent->lock(); |
1730 markOop header = lock->displaced_header(); | 1954 markOop header = lock->displaced_header(); |
1731 most_recent->set_obj(NULL); | 1955 most_recent->set_obj(NULL); |
1732 // If it isn't recursive we either must swap old header or call the runtime | 1956 if (!lockee->mark()->has_bias_pattern()) { |
1733 if (header != NULL) { | 1957 bool call_vm = UseHeavyMonitors; |
1734 if (Atomic::cmpxchg_ptr(header, lockee->mark_addr(), lock) != lock) { | 1958 // If it isn't recursive we either must swap old header or call the runtime |
1735 // restore object for the slow case | 1959 if (header != NULL || call_vm) { |
1736 most_recent->set_obj(lockee); | 1960 if (call_vm || Atomic::cmpxchg_ptr(header, lockee->mark_addr(), lock) != lock) { |
1737 CALL_VM(InterpreterRuntime::monitorexit(THREAD, most_recent), handle_exception); | 1961 // restore object for the slow case |
1962 most_recent->set_obj(lockee); | |
1963 CALL_VM(InterpreterRuntime::monitorexit(THREAD, most_recent), handle_exception); | |
1964 } | |
1738 } | 1965 } |
1739 } | 1966 } |
1740 UPDATE_PC_AND_TOS_AND_CONTINUE(1, -1); | 1967 UPDATE_PC_AND_TOS_AND_CONTINUE(1, -1); |
1741 } | 1968 } |
1742 most_recent++; | 1969 most_recent++; |
1805 // Now store the result on the stack | 2032 // Now store the result on the stack |
1806 // | 2033 // |
1807 TosState tos_type = cache->flag_state(); | 2034 TosState tos_type = cache->flag_state(); |
1808 int field_offset = cache->f2_as_index(); | 2035 int field_offset = cache->f2_as_index(); |
1809 if (cache->is_volatile()) { | 2036 if (cache->is_volatile()) { |
2037 if (support_IRIW_for_not_multiple_copy_atomic_cpu) { | |
2038 OrderAccess::fence(); | |
2039 } | |
1810 if (tos_type == atos) { | 2040 if (tos_type == atos) { |
1811 VERIFY_OOP(obj->obj_field_acquire(field_offset)); | 2041 VERIFY_OOP(obj->obj_field_acquire(field_offset)); |
1812 SET_STACK_OBJECT(obj->obj_field_acquire(field_offset), -1); | 2042 SET_STACK_OBJECT(obj->obj_field_acquire(field_offset), -1); |
1813 } else if (tos_type == itos) { | 2043 } else if (tos_type == itos) { |
1814 SET_STACK_INT(obj->int_field_acquire(field_offset), -1); | 2044 SET_STACK_INT(obj->int_field_acquire(field_offset), -1); |
1921 if (tos_type == itos) { | 2151 if (tos_type == itos) { |
1922 obj->release_int_field_put(field_offset, STACK_INT(-1)); | 2152 obj->release_int_field_put(field_offset, STACK_INT(-1)); |
1923 } else if (tos_type == atos) { | 2153 } else if (tos_type == atos) { |
1924 VERIFY_OOP(STACK_OBJECT(-1)); | 2154 VERIFY_OOP(STACK_OBJECT(-1)); |
1925 obj->release_obj_field_put(field_offset, STACK_OBJECT(-1)); | 2155 obj->release_obj_field_put(field_offset, STACK_OBJECT(-1)); |
1926 OrderAccess::release_store(&BYTE_MAP_BASE[(uintptr_t)obj >> CardTableModRefBS::card_shift], 0); | |
1927 } else if (tos_type == btos) { | 2156 } else if (tos_type == btos) { |
1928 obj->release_byte_field_put(field_offset, STACK_INT(-1)); | 2157 obj->release_byte_field_put(field_offset, STACK_INT(-1)); |
1929 } else if (tos_type == ltos) { | 2158 } else if (tos_type == ltos) { |
1930 obj->release_long_field_put(field_offset, STACK_LONG(-1)); | 2159 obj->release_long_field_put(field_offset, STACK_LONG(-1)); |
1931 } else if (tos_type == ctos) { | 2160 } else if (tos_type == ctos) { |
1942 if (tos_type == itos) { | 2171 if (tos_type == itos) { |
1943 obj->int_field_put(field_offset, STACK_INT(-1)); | 2172 obj->int_field_put(field_offset, STACK_INT(-1)); |
1944 } else if (tos_type == atos) { | 2173 } else if (tos_type == atos) { |
1945 VERIFY_OOP(STACK_OBJECT(-1)); | 2174 VERIFY_OOP(STACK_OBJECT(-1)); |
1946 obj->obj_field_put(field_offset, STACK_OBJECT(-1)); | 2175 obj->obj_field_put(field_offset, STACK_OBJECT(-1)); |
1947 OrderAccess::release_store(&BYTE_MAP_BASE[(uintptr_t)obj >> CardTableModRefBS::card_shift], 0); | |
1948 } else if (tos_type == btos) { | 2176 } else if (tos_type == btos) { |
1949 obj->byte_field_put(field_offset, STACK_INT(-1)); | 2177 obj->byte_field_put(field_offset, STACK_INT(-1)); |
1950 } else if (tos_type == ltos) { | 2178 } else if (tos_type == ltos) { |
1951 obj->long_field_put(field_offset, STACK_LONG(-1)); | 2179 obj->long_field_put(field_offset, STACK_LONG(-1)); |
1952 } else if (tos_type == ctos) { | 2180 } else if (tos_type == ctos) { |
1979 // If the TLAB isn't pre-zeroed then we'll have to do it | 2207 // If the TLAB isn't pre-zeroed then we'll have to do it |
1980 bool need_zero = !ZeroTLAB; | 2208 bool need_zero = !ZeroTLAB; |
1981 if (UseTLAB) { | 2209 if (UseTLAB) { |
1982 result = (oop) THREAD->tlab().allocate(obj_size); | 2210 result = (oop) THREAD->tlab().allocate(obj_size); |
1983 } | 2211 } |
2212 // Disable non-TLAB-based fast-path, because profiling requires that all | |
2213 // allocations go through InterpreterRuntime::_new() if THREAD->tlab().allocate | |
2214 // returns NULL. | |
2215 #ifndef CC_INTERP_PROFILE | |
1984 if (result == NULL) { | 2216 if (result == NULL) { |
1985 need_zero = true; | 2217 need_zero = true; |
1986 // Try allocate in shared eden | 2218 // Try allocate in shared eden |
1987 retry: | 2219 retry: |
1988 HeapWord* compare_to = *Universe::heap()->top_addr(); | 2220 HeapWord* compare_to = *Universe::heap()->top_addr(); |
1989 HeapWord* new_top = compare_to + obj_size; | 2221 HeapWord* new_top = compare_to + obj_size; |
1990 if (new_top <= *Universe::heap()->end_addr()) { | 2222 if (new_top <= *Universe::heap()->end_addr()) { |
1991 if (Atomic::cmpxchg_ptr(new_top, Universe::heap()->top_addr(), compare_to) != compare_to) { | 2223 if (Atomic::cmpxchg_ptr(new_top, Universe::heap()->top_addr(), compare_to) != compare_to) { |
1992 goto retry; | 2224 goto retry; |
1993 } | 2225 } |
1994 result = (oop) compare_to; | 2226 result = (oop) compare_to; |
1995 } | 2227 } |
1996 } | 2228 } |
2229 #endif | |
1997 if (result != NULL) { | 2230 if (result != NULL) { |
1998 // Initialize object (if nonzero size and need) and then the header | 2231 // Initialize object (if nonzero size and need) and then the header |
1999 if (need_zero ) { | 2232 if (need_zero ) { |
2000 HeapWord* to_zero = (HeapWord*) result + sizeof(oopDesc) / oopSize; | 2233 HeapWord* to_zero = (HeapWord*) result + sizeof(oopDesc) / oopSize; |
2001 obj_size -= sizeof(oopDesc) / oopSize; | 2234 obj_size -= sizeof(oopDesc) / oopSize; |
2008 } else { | 2241 } else { |
2009 result->set_mark(markOopDesc::prototype()); | 2242 result->set_mark(markOopDesc::prototype()); |
2010 } | 2243 } |
2011 result->set_klass_gap(0); | 2244 result->set_klass_gap(0); |
2012 result->set_klass(k_entry); | 2245 result->set_klass(k_entry); |
2246 // Must prevent reordering of stores for object initialization | |
2247 // with stores that publish the new object. | |
2248 OrderAccess::storestore(); | |
2013 SET_STACK_OBJECT(result, 0); | 2249 SET_STACK_OBJECT(result, 0); |
2014 UPDATE_PC_AND_TOS_AND_CONTINUE(3, 1); | 2250 UPDATE_PC_AND_TOS_AND_CONTINUE(3, 1); |
2015 } | 2251 } |
2016 } | 2252 } |
2017 } | 2253 } |
2018 // Slow case allocation | 2254 // Slow case allocation |
2019 CALL_VM(InterpreterRuntime::_new(THREAD, METHOD->constants(), index), | 2255 CALL_VM(InterpreterRuntime::_new(THREAD, METHOD->constants(), index), |
2020 handle_exception); | 2256 handle_exception); |
2257 // Must prevent reordering of stores for object initialization | |
2258 // with stores that publish the new object. | |
2259 OrderAccess::storestore(); | |
2021 SET_STACK_OBJECT(THREAD->vm_result(), 0); | 2260 SET_STACK_OBJECT(THREAD->vm_result(), 0); |
2022 THREAD->set_vm_result(NULL); | 2261 THREAD->set_vm_result(NULL); |
2023 UPDATE_PC_AND_TOS_AND_CONTINUE(3, 1); | 2262 UPDATE_PC_AND_TOS_AND_CONTINUE(3, 1); |
2024 } | 2263 } |
2025 CASE(_anewarray): { | 2264 CASE(_anewarray): { |
2026 u2 index = Bytes::get_Java_u2(pc+1); | 2265 u2 index = Bytes::get_Java_u2(pc+1); |
2027 jint size = STACK_INT(-1); | 2266 jint size = STACK_INT(-1); |
2028 CALL_VM(InterpreterRuntime::anewarray(THREAD, METHOD->constants(), index, size), | 2267 CALL_VM(InterpreterRuntime::anewarray(THREAD, METHOD->constants(), index, size), |
2029 handle_exception); | 2268 handle_exception); |
2269 // Must prevent reordering of stores for object initialization | |
2270 // with stores that publish the new object. | |
2271 OrderAccess::storestore(); | |
2030 SET_STACK_OBJECT(THREAD->vm_result(), -1); | 2272 SET_STACK_OBJECT(THREAD->vm_result(), -1); |
2031 THREAD->set_vm_result(NULL); | 2273 THREAD->set_vm_result(NULL); |
2032 UPDATE_PC_AND_CONTINUE(3); | 2274 UPDATE_PC_AND_CONTINUE(3); |
2033 } | 2275 } |
2034 CASE(_multianewarray): { | 2276 CASE(_multianewarray): { |
2039 (jint*)&topOfStack[dims * Interpreter::stackElementWords+ | 2281 (jint*)&topOfStack[dims * Interpreter::stackElementWords+ |
2040 Interpreter::stackElementWords-1]; | 2282 Interpreter::stackElementWords-1]; |
2041 //adjust pointer to start of stack element | 2283 //adjust pointer to start of stack element |
2042 CALL_VM(InterpreterRuntime::multianewarray(THREAD, dimarray), | 2284 CALL_VM(InterpreterRuntime::multianewarray(THREAD, dimarray), |
2043 handle_exception); | 2285 handle_exception); |
2286 // Must prevent reordering of stores for object initialization | |
2287 // with stores that publish the new object. | |
2288 OrderAccess::storestore(); | |
2044 SET_STACK_OBJECT(THREAD->vm_result(), -dims); | 2289 SET_STACK_OBJECT(THREAD->vm_result(), -dims); |
2045 THREAD->set_vm_result(NULL); | 2290 THREAD->set_vm_result(NULL); |
2046 UPDATE_PC_AND_TOS_AND_CONTINUE(4, -(dims-1)); | 2291 UPDATE_PC_AND_TOS_AND_CONTINUE(4, -(dims-1)); |
2047 } | 2292 } |
2048 CASE(_checkcast): | 2293 CASE(_checkcast): |
2049 if (STACK_OBJECT(-1) != NULL) { | 2294 if (STACK_OBJECT(-1) != NULL) { |
2050 VERIFY_OOP(STACK_OBJECT(-1)); | 2295 VERIFY_OOP(STACK_OBJECT(-1)); |
2051 u2 index = Bytes::get_Java_u2(pc+1); | 2296 u2 index = Bytes::get_Java_u2(pc+1); |
2052 if (ProfileInterpreter) { | |
2053 // needs Profile_checkcast QQQ | |
2054 ShouldNotReachHere(); | |
2055 } | |
2056 // Constant pool may have actual klass or unresolved klass. If it is | 2297 // Constant pool may have actual klass or unresolved klass. If it is |
2057 // unresolved we must resolve it | 2298 // unresolved we must resolve it. |
2058 if (METHOD->constants()->tag_at(index).is_unresolved_klass()) { | 2299 if (METHOD->constants()->tag_at(index).is_unresolved_klass()) { |
2059 CALL_VM(InterpreterRuntime::quicken_io_cc(THREAD), handle_exception); | 2300 CALL_VM(InterpreterRuntime::quicken_io_cc(THREAD), handle_exception); |
2060 } | 2301 } |
2061 Klass* klassOf = (Klass*) METHOD->constants()->slot_at(index).get_klass(); | 2302 Klass* klassOf = (Klass*) METHOD->constants()->slot_at(index).get_klass(); |
2062 Klass* objKlassOop = STACK_OBJECT(-1)->klass(); //ebx | 2303 Klass* objKlass = STACK_OBJECT(-1)->klass(); // ebx |
2063 // | 2304 // |
2064 // Check for compatibilty. This check must not GC!! | 2305 // Check for compatibilty. This check must not GC!! |
2065 // Seems way more expensive now that we must dispatch | 2306 // Seems way more expensive now that we must dispatch. |
2066 // | 2307 // |
2067 if (objKlassOop != klassOf && | 2308 if (objKlass != klassOf && !objKlass->is_subtype_of(klassOf)) { |
2068 !objKlassOop->is_subtype_of(klassOf)) { | 2309 // Decrement counter at checkcast. |
2310 BI_PROFILE_SUBTYPECHECK_FAILED(objKlass); | |
2069 ResourceMark rm(THREAD); | 2311 ResourceMark rm(THREAD); |
2070 const char* objName = objKlassOop->external_name(); | 2312 const char* objName = objKlass->external_name(); |
2071 const char* klassName = klassOf->external_name(); | 2313 const char* klassName = klassOf->external_name(); |
2072 char* message = SharedRuntime::generate_class_cast_message( | 2314 char* message = SharedRuntime::generate_class_cast_message( |
2073 objName, klassName); | 2315 objName, klassName); |
2074 VM_JAVA_ERROR(vmSymbols::java_lang_ClassCastException(), message); | 2316 VM_JAVA_ERROR(vmSymbols::java_lang_ClassCastException(), message, note_classCheck_trap); |
2075 } | 2317 } |
2318 // Profile checkcast with null_seen and receiver. | |
2319 BI_PROFILE_UPDATE_CHECKCAST(/*null_seen=*/false, objKlass); | |
2076 } else { | 2320 } else { |
2077 if (UncommonNullCast) { | 2321 // Profile checkcast with null_seen and receiver. |
2078 // istate->method()->set_null_cast_seen(); | 2322 BI_PROFILE_UPDATE_CHECKCAST(/*null_seen=*/true, NULL); |
2079 // [RGV] Not sure what to do here! | |
2080 | |
2081 } | |
2082 } | 2323 } |
2083 UPDATE_PC_AND_CONTINUE(3); | 2324 UPDATE_PC_AND_CONTINUE(3); |
2084 | 2325 |
2085 CASE(_instanceof): | 2326 CASE(_instanceof): |
2086 if (STACK_OBJECT(-1) == NULL) { | 2327 if (STACK_OBJECT(-1) == NULL) { |
2087 SET_STACK_INT(0, -1); | 2328 SET_STACK_INT(0, -1); |
2329 // Profile instanceof with null_seen and receiver. | |
2330 BI_PROFILE_UPDATE_INSTANCEOF(/*null_seen=*/true, NULL); | |
2088 } else { | 2331 } else { |
2089 VERIFY_OOP(STACK_OBJECT(-1)); | 2332 VERIFY_OOP(STACK_OBJECT(-1)); |
2090 u2 index = Bytes::get_Java_u2(pc+1); | 2333 u2 index = Bytes::get_Java_u2(pc+1); |
2091 // Constant pool may have actual klass or unresolved klass. If it is | 2334 // Constant pool may have actual klass or unresolved klass. If it is |
2092 // unresolved we must resolve it | 2335 // unresolved we must resolve it. |
2093 if (METHOD->constants()->tag_at(index).is_unresolved_klass()) { | 2336 if (METHOD->constants()->tag_at(index).is_unresolved_klass()) { |
2094 CALL_VM(InterpreterRuntime::quicken_io_cc(THREAD), handle_exception); | 2337 CALL_VM(InterpreterRuntime::quicken_io_cc(THREAD), handle_exception); |
2095 } | 2338 } |
2096 Klass* klassOf = (Klass*) METHOD->constants()->slot_at(index).get_klass(); | 2339 Klass* klassOf = (Klass*) METHOD->constants()->slot_at(index).get_klass(); |
2097 Klass* objKlassOop = STACK_OBJECT(-1)->klass(); | 2340 Klass* objKlass = STACK_OBJECT(-1)->klass(); |
2098 // | 2341 // |
2099 // Check for compatibilty. This check must not GC!! | 2342 // Check for compatibilty. This check must not GC!! |
2100 // Seems way more expensive now that we must dispatch | 2343 // Seems way more expensive now that we must dispatch. |
2101 // | 2344 // |
2102 if ( objKlassOop == klassOf || objKlassOop->is_subtype_of(klassOf)) { | 2345 if ( objKlass == klassOf || objKlass->is_subtype_of(klassOf)) { |
2103 SET_STACK_INT(1, -1); | 2346 SET_STACK_INT(1, -1); |
2104 } else { | 2347 } else { |
2105 SET_STACK_INT(0, -1); | 2348 SET_STACK_INT(0, -1); |
2106 } | 2349 // Decrement counter at checkcast. |
2350 BI_PROFILE_SUBTYPECHECK_FAILED(objKlass); | |
2351 } | |
2352 // Profile instanceof with null_seen and receiver. | |
2353 BI_PROFILE_UPDATE_INSTANCEOF(/*null_seen=*/false, objKlass); | |
2107 } | 2354 } |
2108 UPDATE_PC_AND_CONTINUE(3); | 2355 UPDATE_PC_AND_CONTINUE(3); |
2109 | 2356 |
2110 CASE(_ldc_w): | 2357 CASE(_ldc_w): |
2111 CASE(_ldc): | 2358 CASE(_ldc): |
2244 istate->set_msg(call_method); | 2491 istate->set_msg(call_method); |
2245 istate->set_callee(method); | 2492 istate->set_callee(method); |
2246 istate->set_callee_entry_point(method->from_interpreted_entry()); | 2493 istate->set_callee_entry_point(method->from_interpreted_entry()); |
2247 istate->set_bcp_advance(5); | 2494 istate->set_bcp_advance(5); |
2248 | 2495 |
2496 // Invokedynamic has got a call counter, just like an invokestatic -> increment! | |
2497 BI_PROFILE_UPDATE_CALL(); | |
2498 | |
2249 UPDATE_PC_AND_RETURN(0); // I'll be back... | 2499 UPDATE_PC_AND_RETURN(0); // I'll be back... |
2250 } | 2500 } |
2251 | 2501 |
2252 CASE(_invokehandle): { | 2502 CASE(_invokehandle): { |
2253 | 2503 |
2275 | 2525 |
2276 istate->set_msg(call_method); | 2526 istate->set_msg(call_method); |
2277 istate->set_callee(method); | 2527 istate->set_callee(method); |
2278 istate->set_callee_entry_point(method->from_interpreted_entry()); | 2528 istate->set_callee_entry_point(method->from_interpreted_entry()); |
2279 istate->set_bcp_advance(3); | 2529 istate->set_bcp_advance(3); |
2530 | |
2531 // Invokehandle has got a call counter, just like a final call -> increment! | |
2532 BI_PROFILE_UPDATE_FINALCALL(); | |
2280 | 2533 |
2281 UPDATE_PC_AND_RETURN(0); // I'll be back... | 2534 UPDATE_PC_AND_RETURN(0); // I'll be back... |
2282 } | 2535 } |
2283 | 2536 |
2284 CASE(_invokeinterface): { | 2537 CASE(_invokeinterface): { |
2303 if (cache->is_forced_virtual()) { | 2556 if (cache->is_forced_virtual()) { |
2304 Method* callee; | 2557 Method* callee; |
2305 CHECK_NULL(STACK_OBJECT(-(cache->parameter_size()))); | 2558 CHECK_NULL(STACK_OBJECT(-(cache->parameter_size()))); |
2306 if (cache->is_vfinal()) { | 2559 if (cache->is_vfinal()) { |
2307 callee = cache->f2_as_vfinal_method(); | 2560 callee = cache->f2_as_vfinal_method(); |
2561 // Profile 'special case of invokeinterface' final call. | |
2562 BI_PROFILE_UPDATE_FINALCALL(); | |
2308 } else { | 2563 } else { |
2309 // get receiver | 2564 // Get receiver. |
2310 int parms = cache->parameter_size(); | 2565 int parms = cache->parameter_size(); |
2311 // Same comments as invokevirtual apply here | 2566 // Same comments as invokevirtual apply here. |
2312 VERIFY_OOP(STACK_OBJECT(-parms)); | 2567 oop rcvr = STACK_OBJECT(-parms); |
2313 InstanceKlass* rcvrKlass = (InstanceKlass*) | 2568 VERIFY_OOP(rcvr); |
2314 STACK_OBJECT(-parms)->klass(); | 2569 InstanceKlass* rcvrKlass = (InstanceKlass*)rcvr->klass(); |
2315 callee = (Method*) rcvrKlass->start_of_vtable()[ cache->f2_as_index()]; | 2570 callee = (Method*) rcvrKlass->start_of_vtable()[ cache->f2_as_index()]; |
2571 // Profile 'special case of invokeinterface' virtual call. | |
2572 BI_PROFILE_UPDATE_VIRTUALCALL(rcvr->klass()); | |
2316 } | 2573 } |
2317 istate->set_callee(callee); | 2574 istate->set_callee(callee); |
2318 istate->set_callee_entry_point(callee->from_interpreted_entry()); | 2575 istate->set_callee_entry_point(callee->from_interpreted_entry()); |
2319 #ifdef VM_JVMTI | 2576 #ifdef VM_JVMTI |
2320 if (JvmtiExport::can_post_interpreter_events() && THREAD->is_interp_only_mode()) { | 2577 if (JvmtiExport::can_post_interpreter_events() && THREAD->is_interp_only_mode()) { |
2341 } | 2598 } |
2342 // If the interface isn't found, this class doesn't implement this | 2599 // If the interface isn't found, this class doesn't implement this |
2343 // interface. The link resolver checks this but only for the first | 2600 // interface. The link resolver checks this but only for the first |
2344 // time this interface is called. | 2601 // time this interface is called. |
2345 if (i == int2->itable_length()) { | 2602 if (i == int2->itable_length()) { |
2346 VM_JAVA_ERROR(vmSymbols::java_lang_IncompatibleClassChangeError(), ""); | 2603 VM_JAVA_ERROR(vmSymbols::java_lang_IncompatibleClassChangeError(), "", note_no_trap); |
2347 } | 2604 } |
2348 int mindex = cache->f2_as_index(); | 2605 int mindex = cache->f2_as_index(); |
2349 itableMethodEntry* im = ki->first_method_entry(rcvr->klass()); | 2606 itableMethodEntry* im = ki->first_method_entry(rcvr->klass()); |
2350 callee = im[mindex].method(); | 2607 callee = im[mindex].method(); |
2351 if (callee == NULL) { | 2608 if (callee == NULL) { |
2352 VM_JAVA_ERROR(vmSymbols::java_lang_AbstractMethodError(), ""); | 2609 VM_JAVA_ERROR(vmSymbols::java_lang_AbstractMethodError(), "", note_no_trap); |
2353 } | 2610 } |
2611 | |
2612 // Profile virtual call. | |
2613 BI_PROFILE_UPDATE_VIRTUALCALL(rcvr->klass()); | |
2354 | 2614 |
2355 istate->set_callee(callee); | 2615 istate->set_callee(callee); |
2356 istate->set_callee_entry_point(callee->from_interpreted_entry()); | 2616 istate->set_callee_entry_point(callee->from_interpreted_entry()); |
2357 #ifdef VM_JVMTI | 2617 #ifdef VM_JVMTI |
2358 if (JvmtiExport::can_post_interpreter_events() && THREAD->is_interp_only_mode()) { | 2618 if (JvmtiExport::can_post_interpreter_events() && THREAD->is_interp_only_mode()) { |
2381 istate->set_msg(call_method); | 2641 istate->set_msg(call_method); |
2382 { | 2642 { |
2383 Method* callee; | 2643 Method* callee; |
2384 if ((Bytecodes::Code)opcode == Bytecodes::_invokevirtual) { | 2644 if ((Bytecodes::Code)opcode == Bytecodes::_invokevirtual) { |
2385 CHECK_NULL(STACK_OBJECT(-(cache->parameter_size()))); | 2645 CHECK_NULL(STACK_OBJECT(-(cache->parameter_size()))); |
2386 if (cache->is_vfinal()) callee = cache->f2_as_vfinal_method(); | 2646 if (cache->is_vfinal()) { |
2387 else { | 2647 callee = cache->f2_as_vfinal_method(); |
2648 // Profile final call. | |
2649 BI_PROFILE_UPDATE_FINALCALL(); | |
2650 } else { | |
2388 // get receiver | 2651 // get receiver |
2389 int parms = cache->parameter_size(); | 2652 int parms = cache->parameter_size(); |
2390 // this works but needs a resourcemark and seems to create a vtable on every call: | 2653 // this works but needs a resourcemark and seems to create a vtable on every call: |
2391 // Method* callee = rcvr->klass()->vtable()->method_at(cache->f2_as_index()); | 2654 // Method* callee = rcvr->klass()->vtable()->method_at(cache->f2_as_index()); |
2392 // | 2655 // |
2393 // this fails with an assert | 2656 // this fails with an assert |
2394 // InstanceKlass* rcvrKlass = InstanceKlass::cast(STACK_OBJECT(-parms)->klass()); | 2657 // InstanceKlass* rcvrKlass = InstanceKlass::cast(STACK_OBJECT(-parms)->klass()); |
2395 // but this works | 2658 // but this works |
2396 VERIFY_OOP(STACK_OBJECT(-parms)); | 2659 oop rcvr = STACK_OBJECT(-parms); |
2397 InstanceKlass* rcvrKlass = (InstanceKlass*) STACK_OBJECT(-parms)->klass(); | 2660 VERIFY_OOP(rcvr); |
2661 InstanceKlass* rcvrKlass = (InstanceKlass*)rcvr->klass(); | |
2398 /* | 2662 /* |
2399 Executing this code in java.lang.String: | 2663 Executing this code in java.lang.String: |
2400 public String(char value[]) { | 2664 public String(char value[]) { |
2401 this.count = value.length; | 2665 this.count = value.length; |
2402 this.value = (char[])value.clone(); | 2666 this.value = (char[])value.clone(); |
2410 because rcvr->klass()->oop_is_instance() == 0 | 2674 because rcvr->klass()->oop_is_instance() == 0 |
2411 However it seems to have a vtable in the right location. Huh? | 2675 However it seems to have a vtable in the right location. Huh? |
2412 | 2676 |
2413 */ | 2677 */ |
2414 callee = (Method*) rcvrKlass->start_of_vtable()[ cache->f2_as_index()]; | 2678 callee = (Method*) rcvrKlass->start_of_vtable()[ cache->f2_as_index()]; |
2679 // Profile virtual call. | |
2680 BI_PROFILE_UPDATE_VIRTUALCALL(rcvr->klass()); | |
2415 } | 2681 } |
2416 } else { | 2682 } else { |
2417 if ((Bytecodes::Code)opcode == Bytecodes::_invokespecial) { | 2683 if ((Bytecodes::Code)opcode == Bytecodes::_invokespecial) { |
2418 CHECK_NULL(STACK_OBJECT(-(cache->parameter_size()))); | 2684 CHECK_NULL(STACK_OBJECT(-(cache->parameter_size()))); |
2419 } | 2685 } |
2420 callee = cache->f1_as_method(); | 2686 callee = cache->f1_as_method(); |
2687 | |
2688 // Profile call. | |
2689 BI_PROFILE_UPDATE_CALL(); | |
2421 } | 2690 } |
2422 | 2691 |
2423 istate->set_callee(callee); | 2692 istate->set_callee(callee); |
2424 istate->set_callee_entry_point(callee->from_interpreted_entry()); | 2693 istate->set_callee_entry_point(callee->from_interpreted_entry()); |
2425 #ifdef VM_JVMTI | 2694 #ifdef VM_JVMTI |
2437 CASE(_newarray): { | 2706 CASE(_newarray): { |
2438 BasicType atype = (BasicType) *(pc+1); | 2707 BasicType atype = (BasicType) *(pc+1); |
2439 jint size = STACK_INT(-1); | 2708 jint size = STACK_INT(-1); |
2440 CALL_VM(InterpreterRuntime::newarray(THREAD, atype, size), | 2709 CALL_VM(InterpreterRuntime::newarray(THREAD, atype, size), |
2441 handle_exception); | 2710 handle_exception); |
2711 // Must prevent reordering of stores for object initialization | |
2712 // with stores that publish the new object. | |
2713 OrderAccess::storestore(); | |
2442 SET_STACK_OBJECT(THREAD->vm_result(), -1); | 2714 SET_STACK_OBJECT(THREAD->vm_result(), -1); |
2443 THREAD->set_vm_result(NULL); | 2715 THREAD->set_vm_result(NULL); |
2444 | 2716 |
2445 UPDATE_PC_AND_CONTINUE(2); | 2717 UPDATE_PC_AND_CONTINUE(2); |
2446 } | 2718 } |
2467 } | 2739 } |
2468 | 2740 |
2469 CASE(_goto): | 2741 CASE(_goto): |
2470 { | 2742 { |
2471 int16_t offset = (int16_t)Bytes::get_Java_u2(pc + 1); | 2743 int16_t offset = (int16_t)Bytes::get_Java_u2(pc + 1); |
2744 // Profile jump. | |
2745 BI_PROFILE_UPDATE_JUMP(); | |
2472 address branch_pc = pc; | 2746 address branch_pc = pc; |
2473 UPDATE_PC(offset); | 2747 UPDATE_PC(offset); |
2474 DO_BACKEDGE_CHECKS(offset, branch_pc); | 2748 DO_BACKEDGE_CHECKS(offset, branch_pc); |
2475 CONTINUE; | 2749 CONTINUE; |
2476 } | 2750 } |
2483 } | 2757 } |
2484 | 2758 |
2485 CASE(_goto_w): | 2759 CASE(_goto_w): |
2486 { | 2760 { |
2487 int32_t offset = Bytes::get_Java_u4(pc + 1); | 2761 int32_t offset = Bytes::get_Java_u4(pc + 1); |
2762 // Profile jump. | |
2763 BI_PROFILE_UPDATE_JUMP(); | |
2488 address branch_pc = pc; | 2764 address branch_pc = pc; |
2489 UPDATE_PC(offset); | 2765 UPDATE_PC(offset); |
2490 DO_BACKEDGE_CHECKS(offset, branch_pc); | 2766 DO_BACKEDGE_CHECKS(offset, branch_pc); |
2491 CONTINUE; | 2767 CONTINUE; |
2492 } | 2768 } |
2493 | 2769 |
2494 /* return from a jsr or jsr_w */ | 2770 /* return from a jsr or jsr_w */ |
2495 | 2771 |
2496 CASE(_ret): { | 2772 CASE(_ret): { |
2773 // Profile ret. | |
2774 BI_PROFILE_UPDATE_RET(/*bci=*/((int)(intptr_t)(LOCALS_ADDR(pc[1])))); | |
2775 // Now, update the pc. | |
2497 pc = istate->method()->code_base() + (intptr_t)(LOCALS_ADDR(pc[1])); | 2776 pc = istate->method()->code_base() + (intptr_t)(LOCALS_ADDR(pc[1])); |
2498 UPDATE_PC_AND_CONTINUE(0); | 2777 UPDATE_PC_AND_CONTINUE(0); |
2499 } | 2778 } |
2500 | 2779 |
2501 /* debugger breakpoint */ | 2780 /* debugger breakpoint */ |
2565 MORE_STACK(1); | 2844 MORE_STACK(1); |
2566 pc = METHOD->code_base() + continuation_bci; | 2845 pc = METHOD->code_base() + continuation_bci; |
2567 if (TraceExceptions) { | 2846 if (TraceExceptions) { |
2568 ttyLocker ttyl; | 2847 ttyLocker ttyl; |
2569 ResourceMark rm; | 2848 ResourceMark rm; |
2570 tty->print_cr("Exception <%s> (" INTPTR_FORMAT ")", except_oop->print_value_string(), except_oop()); | 2849 tty->print_cr("Exception <%s> (" INTPTR_FORMAT ")", except_oop->print_value_string(), (void*)except_oop()); |
2571 tty->print_cr(" thrown in interpreter method <%s>", METHOD->print_value_string()); | 2850 tty->print_cr(" thrown in interpreter method <%s>", METHOD->print_value_string()); |
2572 tty->print_cr(" at bci %d, continuing at %d for thread " INTPTR_FORMAT, | 2851 tty->print_cr(" at bci %d, continuing at %d for thread " INTPTR_FORMAT, |
2573 pc - (intptr_t)METHOD->code_base(), | 2852 istate->bcp() - (intptr_t)METHOD->code_base(), |
2574 continuation_bci, THREAD); | 2853 continuation_bci, THREAD); |
2575 } | 2854 } |
2576 // for AbortVMOnException flag | 2855 // for AbortVMOnException flag |
2577 NOT_PRODUCT(Exceptions::debug_check_abort(except_oop)); | 2856 NOT_PRODUCT(Exceptions::debug_check_abort(except_oop)); |
2857 | |
2858 // Update profiling data. | |
2859 BI_PROFILE_ALIGN_TO_CURRENT_BCI(); | |
2578 goto run; | 2860 goto run; |
2579 } | 2861 } |
2580 if (TraceExceptions) { | 2862 if (TraceExceptions) { |
2581 ttyLocker ttyl; | 2863 ttyLocker ttyl; |
2582 ResourceMark rm; | 2864 ResourceMark rm; |
2583 tty->print_cr("Exception <%s> (" INTPTR_FORMAT ")", except_oop->print_value_string(), except_oop()); | 2865 tty->print_cr("Exception <%s> (" INTPTR_FORMAT ")", except_oop->print_value_string(), (void*)except_oop()); |
2584 tty->print_cr(" thrown in interpreter method <%s>", METHOD->print_value_string()); | 2866 tty->print_cr(" thrown in interpreter method <%s>", METHOD->print_value_string()); |
2585 tty->print_cr(" at bci %d, unwinding for thread " INTPTR_FORMAT, | 2867 tty->print_cr(" at bci %d, unwinding for thread " INTPTR_FORMAT, |
2586 pc - (intptr_t) METHOD->code_base(), | 2868 istate->bcp() - (intptr_t)METHOD->code_base(), |
2587 THREAD); | 2869 THREAD); |
2588 } | 2870 } |
2589 // for AbortVMOnException flag | 2871 // for AbortVMOnException flag |
2590 NOT_PRODUCT(Exceptions::debug_check_abort(except_oop)); | 2872 NOT_PRODUCT(Exceptions::debug_check_abort(except_oop)); |
2591 // No handler in this activation, unwind and try again | 2873 // No handler in this activation, unwind and try again |
2592 THREAD->set_pending_exception(except_oop(), NULL, 0); | 2874 THREAD->set_pending_exception(except_oop(), NULL, 0); |
2593 goto handle_return; | 2875 goto handle_return; |
2594 } /* handle_exception: */ | 2876 } // handle_exception: |
2595 | |
2596 | |
2597 | 2877 |
2598 // Return from an interpreter invocation with the result of the interpretation | 2878 // Return from an interpreter invocation with the result of the interpretation |
2599 // on the top of the Java Stack (or a pending exception) | 2879 // on the top of the Java Stack (or a pending exception) |
2600 | 2880 |
2601 handle_Pop_Frame: | 2881 handle_Pop_Frame: { |
2602 | 2882 |
2603 // We don't really do anything special here except we must be aware | 2883 // We don't really do anything special here except we must be aware |
2604 // that we can get here without ever locking the method (if sync). | 2884 // that we can get here without ever locking the method (if sync). |
2605 // Also we skip the notification of the exit. | 2885 // Also we skip the notification of the exit. |
2606 | 2886 |
2607 istate->set_msg(popping_frame); | 2887 istate->set_msg(popping_frame); |
2608 // Clear pending so while the pop is in process | 2888 // Clear pending so while the pop is in process |
2609 // we don't start another one if a call_vm is done. | 2889 // we don't start another one if a call_vm is done. |
2610 THREAD->clr_pop_frame_pending(); | 2890 THREAD->clr_pop_frame_pending(); |
2611 // Let interpreter (only) see the we're in the process of popping a frame | 2891 // Let interpreter (only) see the we're in the process of popping a frame |
2612 THREAD->set_pop_frame_in_process(); | 2892 THREAD->set_pop_frame_in_process(); |
2613 | 2893 |
2614 handle_return: | 2894 goto handle_return; |
2615 { | 2895 |
2896 } // handle_Pop_Frame | |
2897 | |
2898 // ForceEarlyReturn ends a method, and returns to the caller with a return value | |
2899 // given by the invoker of the early return. | |
2900 handle_Early_Return: { | |
2901 | |
2902 istate->set_msg(early_return); | |
2903 | |
2904 // Clear expression stack. | |
2905 topOfStack = istate->stack_base() - Interpreter::stackElementWords; | |
2906 | |
2907 JvmtiThreadState *ts = THREAD->jvmti_thread_state(); | |
2908 | |
2909 // Push the value to be returned. | |
2910 switch (istate->method()->result_type()) { | |
2911 case T_BOOLEAN: | |
2912 case T_SHORT: | |
2913 case T_BYTE: | |
2914 case T_CHAR: | |
2915 case T_INT: | |
2916 SET_STACK_INT(ts->earlyret_value().i, 0); | |
2917 MORE_STACK(1); | |
2918 break; | |
2919 case T_LONG: | |
2920 SET_STACK_LONG(ts->earlyret_value().j, 1); | |
2921 MORE_STACK(2); | |
2922 break; | |
2923 case T_FLOAT: | |
2924 SET_STACK_FLOAT(ts->earlyret_value().f, 0); | |
2925 MORE_STACK(1); | |
2926 break; | |
2927 case T_DOUBLE: | |
2928 SET_STACK_DOUBLE(ts->earlyret_value().d, 1); | |
2929 MORE_STACK(2); | |
2930 break; | |
2931 case T_ARRAY: | |
2932 case T_OBJECT: | |
2933 SET_STACK_OBJECT(ts->earlyret_oop(), 0); | |
2934 MORE_STACK(1); | |
2935 break; | |
2936 } | |
2937 | |
2938 ts->clr_earlyret_value(); | |
2939 ts->set_earlyret_oop(NULL); | |
2940 ts->clr_earlyret_pending(); | |
2941 | |
2942 // Fall through to handle_return. | |
2943 | |
2944 } // handle_Early_Return | |
2945 | |
2946 handle_return: { | |
2947 // A storestore barrier is required to order initialization of | |
2948 // final fields with publishing the reference to the object that | |
2949 // holds the field. Without the barrier the value of final fields | |
2950 // can be observed to change. | |
2951 OrderAccess::storestore(); | |
2952 | |
2616 DECACHE_STATE(); | 2953 DECACHE_STATE(); |
2617 | 2954 |
2618 bool suppress_error = istate->msg() == popping_frame; | 2955 bool suppress_error = istate->msg() == popping_frame || istate->msg() == early_return; |
2619 bool suppress_exit_event = THREAD->has_pending_exception() || suppress_error; | 2956 bool suppress_exit_event = THREAD->has_pending_exception() || istate->msg() == popping_frame; |
2620 Handle original_exception(THREAD, THREAD->pending_exception()); | 2957 Handle original_exception(THREAD, THREAD->pending_exception()); |
2621 Handle illegal_state_oop(THREAD, NULL); | 2958 Handle illegal_state_oop(THREAD, NULL); |
2622 | 2959 |
2623 // We'd like a HandleMark here to prevent any subsequent HandleMarkCleaner | 2960 // We'd like a HandleMark here to prevent any subsequent HandleMarkCleaner |
2624 // in any following VM entries from freeing our live handles, but illegal_state_oop | 2961 // in any following VM entries from freeing our live handles, but illegal_state_oop |
2675 oop lockee = end->obj(); | 3012 oop lockee = end->obj(); |
2676 if (lockee != NULL) { | 3013 if (lockee != NULL) { |
2677 BasicLock* lock = end->lock(); | 3014 BasicLock* lock = end->lock(); |
2678 markOop header = lock->displaced_header(); | 3015 markOop header = lock->displaced_header(); |
2679 end->set_obj(NULL); | 3016 end->set_obj(NULL); |
2680 // If it isn't recursive we either must swap old header or call the runtime | 3017 |
2681 if (header != NULL) { | 3018 if (!lockee->mark()->has_bias_pattern()) { |
2682 if (Atomic::cmpxchg_ptr(header, lockee->mark_addr(), lock) != lock) { | 3019 // If it isn't recursive we either must swap old header or call the runtime |
2683 // restore object for the slow case | 3020 if (header != NULL) { |
2684 end->set_obj(lockee); | 3021 if (Atomic::cmpxchg_ptr(header, lockee->mark_addr(), lock) != lock) { |
2685 { | 3022 // restore object for the slow case |
2686 // Prevent any HandleMarkCleaner from freeing our live handles | 3023 end->set_obj(lockee); |
2687 HandleMark __hm(THREAD); | 3024 { |
2688 CALL_VM_NOCHECK(InterpreterRuntime::monitorexit(THREAD, end)); | 3025 // Prevent any HandleMarkCleaner from freeing our live handles |
3026 HandleMark __hm(THREAD); | |
3027 CALL_VM_NOCHECK(InterpreterRuntime::monitorexit(THREAD, end)); | |
3028 } | |
2689 } | 3029 } |
2690 } | 3030 } |
2691 } | 3031 } |
2692 // One error is plenty | 3032 // One error is plenty |
2693 if (illegal_state_oop() == NULL && !suppress_error) { | 3033 if (illegal_state_oop() == NULL && !suppress_error) { |
2728 // and must use first monitor slot. | 3068 // and must use first monitor slot. |
2729 // | 3069 // |
2730 oop rcvr = base->obj(); | 3070 oop rcvr = base->obj(); |
2731 if (rcvr == NULL) { | 3071 if (rcvr == NULL) { |
2732 if (!suppress_error) { | 3072 if (!suppress_error) { |
2733 VM_JAVA_ERROR_NO_JUMP(vmSymbols::java_lang_NullPointerException(), ""); | 3073 VM_JAVA_ERROR_NO_JUMP(vmSymbols::java_lang_NullPointerException(), "", note_nullCheck_trap); |
2734 illegal_state_oop = THREAD->pending_exception(); | 3074 illegal_state_oop = THREAD->pending_exception(); |
3075 THREAD->clear_pending_exception(); | |
3076 } | |
3077 } else if (UseHeavyMonitors) { | |
3078 { | |
3079 // Prevent any HandleMarkCleaner from freeing our live handles. | |
3080 HandleMark __hm(THREAD); | |
3081 CALL_VM_NOCHECK(InterpreterRuntime::monitorexit(THREAD, base)); | |
3082 } | |
3083 if (THREAD->has_pending_exception()) { | |
3084 if (!suppress_error) illegal_state_oop = THREAD->pending_exception(); | |
2735 THREAD->clear_pending_exception(); | 3085 THREAD->clear_pending_exception(); |
2736 } | 3086 } |
2737 } else { | 3087 } else { |
2738 BasicLock* lock = base->lock(); | 3088 BasicLock* lock = base->lock(); |
2739 markOop header = lock->displaced_header(); | 3089 markOop header = lock->displaced_header(); |
2740 base->set_obj(NULL); | 3090 base->set_obj(NULL); |
2741 // If it isn't recursive we either must swap old header or call the runtime | 3091 |
2742 if (header != NULL) { | 3092 if (!rcvr->mark()->has_bias_pattern()) { |
2743 if (Atomic::cmpxchg_ptr(header, rcvr->mark_addr(), lock) != lock) { | 3093 base->set_obj(NULL); |
2744 // restore object for the slow case | 3094 // If it isn't recursive we either must swap old header or call the runtime |
2745 base->set_obj(rcvr); | 3095 if (header != NULL) { |
2746 { | 3096 if (Atomic::cmpxchg_ptr(header, rcvr->mark_addr(), lock) != lock) { |
2747 // Prevent any HandleMarkCleaner from freeing our live handles | 3097 // restore object for the slow case |
2748 HandleMark __hm(THREAD); | 3098 base->set_obj(rcvr); |
2749 CALL_VM_NOCHECK(InterpreterRuntime::monitorexit(THREAD, base)); | 3099 { |
2750 } | 3100 // Prevent any HandleMarkCleaner from freeing our live handles |
2751 if (THREAD->has_pending_exception()) { | 3101 HandleMark __hm(THREAD); |
2752 if (!suppress_error) illegal_state_oop = THREAD->pending_exception(); | 3102 CALL_VM_NOCHECK(InterpreterRuntime::monitorexit(THREAD, base)); |
2753 THREAD->clear_pending_exception(); | 3103 } |
3104 if (THREAD->has_pending_exception()) { | |
3105 if (!suppress_error) illegal_state_oop = THREAD->pending_exception(); | |
3106 THREAD->clear_pending_exception(); | |
3107 } | |
2754 } | 3108 } |
2755 } | 3109 } |
2756 } | 3110 } |
2757 } | 3111 } |
2758 } | 3112 } |
2759 } | 3113 } |
2760 } | 3114 } |
3115 // Clear the do_not_unlock flag now. | |
3116 THREAD->clr_do_not_unlock(); | |
2761 | 3117 |
2762 // | 3118 // |
2763 // Notify jvmti/jvmdi | 3119 // Notify jvmti/jvmdi |
2764 // | 3120 // |
2765 // NOTE: we do not notify a method_exit if we have a pending exception, | 3121 // NOTE: we do not notify a method_exit if we have a pending exception, |
2800 // | 3156 // |
2801 // See if we are returning any exception | 3157 // See if we are returning any exception |
2802 // A pending exception that was pending prior to a possible popping frame | 3158 // A pending exception that was pending prior to a possible popping frame |
2803 // overrides the popping frame. | 3159 // overrides the popping frame. |
2804 // | 3160 // |
2805 assert(!suppress_error || suppress_error && illegal_state_oop() == NULL, "Error was not suppressed"); | 3161 assert(!suppress_error || (suppress_error && illegal_state_oop() == NULL), "Error was not suppressed"); |
2806 if (illegal_state_oop() != NULL || original_exception() != NULL) { | 3162 if (illegal_state_oop() != NULL || original_exception() != NULL) { |
2807 // inform the frame manager we have no result | 3163 // Inform the frame manager we have no result. |
2808 istate->set_msg(throwing_exception); | 3164 istate->set_msg(throwing_exception); |
2809 if (illegal_state_oop() != NULL) | 3165 if (illegal_state_oop() != NULL) |
2810 THREAD->set_pending_exception(illegal_state_oop(), NULL, 0); | 3166 THREAD->set_pending_exception(illegal_state_oop(), NULL, 0); |
2811 else | 3167 else |
2812 THREAD->set_pending_exception(original_exception(), NULL, 0); | 3168 THREAD->set_pending_exception(original_exception(), NULL, 0); |
2813 istate->set_return_kind((Bytecodes::Code)opcode); | |
2814 UPDATE_PC_AND_RETURN(0); | 3169 UPDATE_PC_AND_RETURN(0); |
2815 } | 3170 } |
2816 | 3171 |
2817 if (istate->msg() == popping_frame) { | 3172 if (istate->msg() == popping_frame) { |
2818 // Make it simpler on the assembly code and set the message for the frame pop. | 3173 // Make it simpler on the assembly code and set the message for the frame pop. |
2827 // | 3182 // |
2828 THREAD->popframe_preserve_args(in_ByteSize(METHOD->size_of_parameters() * wordSize), | 3183 THREAD->popframe_preserve_args(in_ByteSize(METHOD->size_of_parameters() * wordSize), |
2829 LOCALS_SLOT(METHOD->size_of_parameters() - 1)); | 3184 LOCALS_SLOT(METHOD->size_of_parameters() - 1)); |
2830 THREAD->set_popframe_condition_bit(JavaThread::popframe_force_deopt_reexecution_bit); | 3185 THREAD->set_popframe_condition_bit(JavaThread::popframe_force_deopt_reexecution_bit); |
2831 } | 3186 } |
2832 THREAD->clr_pop_frame_in_process(); | 3187 } else { |
3188 istate->set_msg(return_from_method); | |
2833 } | 3189 } |
2834 | 3190 |
2835 // Normal return | 3191 // Normal return |
2836 // Advance the pc and return to frame manager | 3192 // Advance the pc and return to frame manager |
2837 istate->set_msg(return_from_method); | |
2838 istate->set_return_kind((Bytecodes::Code)opcode); | |
2839 UPDATE_PC_AND_RETURN(1); | 3193 UPDATE_PC_AND_RETURN(1); |
2840 } /* handle_return: */ | 3194 } /* handle_return: */ |
2841 | 3195 |
2842 // This is really a fatal error return | 3196 // This is really a fatal error return |
2843 | 3197 |
2881 jfloat BytecodeInterpreter::stack_float(intptr_t *tos, int offset) { | 3235 jfloat BytecodeInterpreter::stack_float(intptr_t *tos, int offset) { |
2882 return *((jfloat *) &tos[Interpreter::expr_index_at(-offset)]); | 3236 return *((jfloat *) &tos[Interpreter::expr_index_at(-offset)]); |
2883 } | 3237 } |
2884 | 3238 |
2885 oop BytecodeInterpreter::stack_object(intptr_t *tos, int offset) { | 3239 oop BytecodeInterpreter::stack_object(intptr_t *tos, int offset) { |
2886 return (oop)tos [Interpreter::expr_index_at(-offset)]; | 3240 return cast_to_oop(tos [Interpreter::expr_index_at(-offset)]); |
2887 } | 3241 } |
2888 | 3242 |
2889 jdouble BytecodeInterpreter::stack_double(intptr_t *tos, int offset) { | 3243 jdouble BytecodeInterpreter::stack_double(intptr_t *tos, int offset) { |
2890 return ((VMJavaVal64*) &tos[Interpreter::expr_index_at(-offset)])->d; | 3244 return ((VMJavaVal64*) &tos[Interpreter::expr_index_at(-offset)])->d; |
2891 } | 3245 } |
2950 } | 3304 } |
2951 jfloat BytecodeInterpreter::locals_float(intptr_t* locals, int offset) { | 3305 jfloat BytecodeInterpreter::locals_float(intptr_t* locals, int offset) { |
2952 return (jfloat)locals[Interpreter::local_index_at(-offset)]; | 3306 return (jfloat)locals[Interpreter::local_index_at(-offset)]; |
2953 } | 3307 } |
2954 oop BytecodeInterpreter::locals_object(intptr_t* locals, int offset) { | 3308 oop BytecodeInterpreter::locals_object(intptr_t* locals, int offset) { |
2955 return (oop)locals[Interpreter::local_index_at(-offset)]; | 3309 return cast_to_oop(locals[Interpreter::local_index_at(-offset)]); |
2956 } | 3310 } |
2957 jdouble BytecodeInterpreter::locals_double(intptr_t* locals, int offset) { | 3311 jdouble BytecodeInterpreter::locals_double(intptr_t* locals, int offset) { |
2958 return ((VMJavaVal64*)&locals[Interpreter::local_index_at(-(offset+1))])->d; | 3312 return ((VMJavaVal64*)&locals[Interpreter::local_index_at(-(offset+1))])->d; |
2959 } | 3313 } |
2960 jlong BytecodeInterpreter::locals_long(intptr_t* locals, int offset) { | 3314 jlong BytecodeInterpreter::locals_long(intptr_t* locals, int offset) { |
3108 tty->print_cr("result_to_call._callee: " INTPTR_FORMAT, (uintptr_t) this->_result._to_call._callee); | 3462 tty->print_cr("result_to_call._callee: " INTPTR_FORMAT, (uintptr_t) this->_result._to_call._callee); |
3109 tty->print_cr("result_to_call._callee_entry_point: " INTPTR_FORMAT, (uintptr_t) this->_result._to_call._callee_entry_point); | 3463 tty->print_cr("result_to_call._callee_entry_point: " INTPTR_FORMAT, (uintptr_t) this->_result._to_call._callee_entry_point); |
3110 tty->print_cr("result_to_call._bcp_advance: %d ", this->_result._to_call._bcp_advance); | 3464 tty->print_cr("result_to_call._bcp_advance: %d ", this->_result._to_call._bcp_advance); |
3111 tty->print_cr("osr._osr_buf: " INTPTR_FORMAT, (uintptr_t) this->_result._osr._osr_buf); | 3465 tty->print_cr("osr._osr_buf: " INTPTR_FORMAT, (uintptr_t) this->_result._osr._osr_buf); |
3112 tty->print_cr("osr._osr_entry: " INTPTR_FORMAT, (uintptr_t) this->_result._osr._osr_entry); | 3466 tty->print_cr("osr._osr_entry: " INTPTR_FORMAT, (uintptr_t) this->_result._osr._osr_entry); |
3113 tty->print_cr("result_return_kind 0x%x ", (int) this->_result._return_kind); | |
3114 tty->print_cr("prev_link: " INTPTR_FORMAT, (uintptr_t) this->_prev_link); | 3467 tty->print_cr("prev_link: " INTPTR_FORMAT, (uintptr_t) this->_prev_link); |
3115 tty->print_cr("native_mirror: " INTPTR_FORMAT, (uintptr_t) this->_oop_temp); | 3468 tty->print_cr("native_mirror: " INTPTR_FORMAT, (void*) this->_oop_temp); |
3116 tty->print_cr("stack_base: " INTPTR_FORMAT, (uintptr_t) this->_stack_base); | 3469 tty->print_cr("stack_base: " INTPTR_FORMAT, (uintptr_t) this->_stack_base); |
3117 tty->print_cr("stack_limit: " INTPTR_FORMAT, (uintptr_t) this->_stack_limit); | 3470 tty->print_cr("stack_limit: " INTPTR_FORMAT, (uintptr_t) this->_stack_limit); |
3118 tty->print_cr("monitor_base: " INTPTR_FORMAT, (uintptr_t) this->_monitor_base); | 3471 tty->print_cr("monitor_base: " INTPTR_FORMAT, (uintptr_t) this->_monitor_base); |
3119 #ifdef SPARC | 3472 #ifdef SPARC |
3120 tty->print_cr("last_Java_pc: " INTPTR_FORMAT, (uintptr_t) this->_last_Java_pc); | 3473 tty->print_cr("last_Java_pc: " INTPTR_FORMAT, (uintptr_t) this->_last_Java_pc); |
3127 #endif // !ZERO | 3480 #endif // !ZERO |
3128 tty->print_cr("self_link: " INTPTR_FORMAT, (uintptr_t) this->_self_link); | 3481 tty->print_cr("self_link: " INTPTR_FORMAT, (uintptr_t) this->_self_link); |
3129 } | 3482 } |
3130 | 3483 |
3131 extern "C" { | 3484 extern "C" { |
3132 void PI(uintptr_t arg) { | 3485 void PI(uintptr_t arg) { |
3133 ((BytecodeInterpreter*)arg)->print(); | 3486 ((BytecodeInterpreter*)arg)->print(); |
3134 } | 3487 } |
3135 } | 3488 } |
3136 #endif // PRODUCT | 3489 #endif // PRODUCT |
3137 | 3490 |
3138 #endif // JVMTI | 3491 #endif // JVMTI |
3139 #endif // CC_INTERP | 3492 #endif // CC_INTERP |