comparison src/share/vm/code/nmethod.cpp @ 6948:e522a00b91aa

Merge with http://hg.openjdk.java.net/hsx/hsx25/hotspot/ after NPG - C++ build works
author Doug Simon <doug.simon@oracle.com>
date Mon, 12 Nov 2012 23:14:12 +0100
parents 957c266d8bc5 18fb7da42534
children 2cb439954abf
comparison
equal deleted inserted replaced
6711:ae13cc658b80 6948:e522a00b91aa
32 #include "compiler/compileBroker.hpp" 32 #include "compiler/compileBroker.hpp"
33 #include "compiler/compileLog.hpp" 33 #include "compiler/compileLog.hpp"
34 #include "compiler/compilerOracle.hpp" 34 #include "compiler/compilerOracle.hpp"
35 #include "compiler/disassembler.hpp" 35 #include "compiler/disassembler.hpp"
36 #include "interpreter/bytecode.hpp" 36 #include "interpreter/bytecode.hpp"
37 #include "oops/methodDataOop.hpp" 37 #include "oops/methodData.hpp"
38 #include "prims/jvmtiRedefineClassesTrace.hpp" 38 #include "prims/jvmtiRedefineClassesTrace.hpp"
39 #include "prims/jvmtiImpl.hpp" 39 #include "prims/jvmtiImpl.hpp"
40 #include "runtime/sharedRuntime.hpp" 40 #include "runtime/sharedRuntime.hpp"
41 #include "runtime/sweeper.hpp" 41 #include "runtime/sweeper.hpp"
42 #include "utilities/dtrace.hpp" 42 #include "utilities/dtrace.hpp"
60 HS_DTRACE_PROBE_DECL6(hotspot, compiled__method__unload, 60 HS_DTRACE_PROBE_DECL6(hotspot, compiled__method__unload,
61 char*, int, char*, int, char*, int); 61 char*, int, char*, int, char*, int);
62 62
63 #define DTRACE_METHOD_UNLOAD_PROBE(method) \ 63 #define DTRACE_METHOD_UNLOAD_PROBE(method) \
64 { \ 64 { \
65 methodOop m = (method); \ 65 Method* m = (method); \
66 if (m != NULL) { \ 66 if (m != NULL) { \
67 Symbol* klass_name = m->klass_name(); \ 67 Symbol* klass_name = m->klass_name(); \
68 Symbol* name = m->name(); \ 68 Symbol* name = m->name(); \
69 Symbol* signature = m->signature(); \ 69 Symbol* signature = m->signature(); \
70 HS_DTRACE_PROBE6(hotspot, compiled__method__unload, \ 70 HS_DTRACE_PROBE6(hotspot, compiled__method__unload, \
74 } \ 74 } \
75 } 75 }
76 #else /* USDT2 */ 76 #else /* USDT2 */
77 #define DTRACE_METHOD_UNLOAD_PROBE(method) \ 77 #define DTRACE_METHOD_UNLOAD_PROBE(method) \
78 { \ 78 { \
79 methodOop m = (method); \ 79 Method* m = (method); \
80 if (m != NULL) { \ 80 if (m != NULL) { \
81 Symbol* klass_name = m->klass_name(); \ 81 Symbol* klass_name = m->klass_name(); \
82 Symbol* name = m->name(); \ 82 Symbol* name = m->name(); \
83 Symbol* signature = m->signature(); \ 83 Symbol* signature = m->signature(); \
84 HOTSPOT_COMPILED_METHOD_UNLOAD( \ 84 HOTSPOT_COMPILED_METHOD_UNLOAD( \
462 _has_flushed_dependencies = 0; 462 _has_flushed_dependencies = 0;
463 _speculatively_disconnected = 0; 463 _speculatively_disconnected = 0;
464 _has_unsafe_access = 0; 464 _has_unsafe_access = 0;
465 _has_method_handle_invokes = 0; 465 _has_method_handle_invokes = 0;
466 _lazy_critical_native = 0; 466 _lazy_critical_native = 0;
467 _has_wide_vectors = 0;
467 _marked_for_deoptimization = 0; 468 _marked_for_deoptimization = 0;
468 _lock_count = 0; 469 _lock_count = 0;
469 _stack_traversal_mark = 0; 470 _stack_traversal_mark = 0;
470 _unload_reported = false; // jvmti state 471 _unload_reported = false; // jvmti state
471 472
478 _osr_link = NULL; 479 _osr_link = NULL;
479 _scavenge_root_link = NULL; 480 _scavenge_root_link = NULL;
480 _scavenge_root_state = 0; 481 _scavenge_root_state = 0;
481 _saved_nmethod_link = NULL; 482 _saved_nmethod_link = NULL;
482 _compiler = NULL; 483 _compiler = NULL;
483 484 #ifdef GRAAL
484 _graal_compiled_method = NULL; 485 _graal_installed_code = (oop) Universe::non_oop_word();
485 486 #endif
486 #ifdef HAVE_DTRACE_H 487 #ifdef HAVE_DTRACE_H
487 _trap_offset = 0; 488 _trap_offset = 0;
488 #endif // def HAVE_DTRACE_H 489 #endif // def HAVE_DTRACE_H
489 } 490 }
490 491
496 int frame_complete, 497 int frame_complete,
497 int frame_size, 498 int frame_size,
498 ByteSize basic_lock_owner_sp_offset, 499 ByteSize basic_lock_owner_sp_offset,
499 ByteSize basic_lock_sp_offset, 500 ByteSize basic_lock_sp_offset,
500 OopMapSet* oop_maps) { 501 OopMapSet* oop_maps) {
502 code_buffer->finalize_oop_references(method);
501 // create nmethod 503 // create nmethod
502 nmethod* nm = NULL; 504 nmethod* nm = NULL;
503 { 505 {
504 MutexLockerEx mu(CodeCache_lock, Mutex::_no_safepoint_check_flag); 506 MutexLockerEx mu(CodeCache_lock, Mutex::_no_safepoint_check_flag);
505 int native_nmethod_size = allocation_size(code_buffer, sizeof(nmethod)); 507 int native_nmethod_size = allocation_size(code_buffer, sizeof(nmethod));
530 CodeBuffer *code_buffer, 532 CodeBuffer *code_buffer,
531 int vep_offset, 533 int vep_offset,
532 int trap_offset, 534 int trap_offset,
533 int frame_complete, 535 int frame_complete,
534 int frame_size) { 536 int frame_size) {
537 code_buffer->finalize_oop_references(method);
535 // create nmethod 538 // create nmethod
536 nmethod* nm = NULL; 539 nmethod* nm = NULL;
537 { 540 {
538 MutexLockerEx mu(CodeCache_lock, Mutex::_no_safepoint_check_flag); 541 MutexLockerEx mu(CodeCache_lock, Mutex::_no_safepoint_check_flag);
539 int nmethod_size = allocation_size(code_buffer, sizeof(nmethod)); 542 int nmethod_size = allocation_size(code_buffer, sizeof(nmethod));
574 AbstractCompiler* compiler, 577 AbstractCompiler* compiler,
575 int comp_level 578 int comp_level
576 ) 579 )
577 { 580 {
578 assert(debug_info->oop_recorder() == code_buffer->oop_recorder(), "shared OR"); 581 assert(debug_info->oop_recorder() == code_buffer->oop_recorder(), "shared OR");
582 code_buffer->finalize_oop_references(method);
579 // create nmethod 583 // create nmethod
580 nmethod* nm = NULL; 584 nmethod* nm = NULL;
581 { MutexLockerEx mu(CodeCache_lock, Mutex::_no_safepoint_check_flag); 585 { MutexLockerEx mu(CodeCache_lock, Mutex::_no_safepoint_check_flag);
582 int nmethod_size = 586 int nmethod_size =
583 allocation_size(code_buffer, sizeof(nmethod)) 587 allocation_size(code_buffer, sizeof(nmethod))
602 // which are dependent on those classes. The slow way is to 606 // which are dependent on those classes. The slow way is to
603 // check every nmethod for dependencies which makes it linear in 607 // check every nmethod for dependencies which makes it linear in
604 // the number of methods compiled. For applications with a lot 608 // the number of methods compiled. For applications with a lot
605 // classes the slow way is too slow. 609 // classes the slow way is too slow.
606 for (Dependencies::DepStream deps(nm); deps.next(); ) { 610 for (Dependencies::DepStream deps(nm); deps.next(); ) {
607 klassOop klass = deps.context_type(); 611 Klass* klass = deps.context_type();
608 if (klass == NULL) continue; // ignore things like evol_method 612 if (klass == NULL) continue; // ignore things like evol_method
609 613
610 // record this nmethod as dependent on this klass 614 // record this nmethod as dependent on this klass
611 instanceKlass::cast(klass)->add_dependent_nmethod(nm); 615 InstanceKlass::cast(klass)->add_dependent_nmethod(nm);
612 } 616 }
613 } 617 }
614 if (nm != NULL) nmethod_stats.note_nmethod(nm); 618 if (nm != NULL) nmethod_stats.note_nmethod(nm);
615 if (PrintAssembly && nm != NULL) 619 if (PrintAssembly && nm != NULL)
616 Disassembler::decode(nm); 620 Disassembler::decode(nm);
628 } 632 }
629 633
630 634
631 // For native wrappers 635 // For native wrappers
632 nmethod::nmethod( 636 nmethod::nmethod(
633 methodOop method, 637 Method* method,
634 int nmethod_size, 638 int nmethod_size,
635 int compile_id, 639 int compile_id,
636 CodeOffsets* offsets, 640 CodeOffsets* offsets,
637 CodeBuffer* code_buffer, 641 CodeBuffer* code_buffer,
638 int frame_size, 642 int frame_size,
659 _orig_pc_offset = 0; 663 _orig_pc_offset = 0;
660 664
661 _consts_offset = data_offset(); 665 _consts_offset = data_offset();
662 _stub_offset = data_offset(); 666 _stub_offset = data_offset();
663 _oops_offset = data_offset(); 667 _oops_offset = data_offset();
664 _scopes_data_offset = _oops_offset + round_to(code_buffer->total_oop_size(), oopSize); 668 _metadata_offset = _oops_offset + round_to(code_buffer->total_oop_size(), oopSize);
669 _scopes_data_offset = _metadata_offset + round_to(code_buffer->total_metadata_size(), wordSize);
665 _scopes_pcs_offset = _scopes_data_offset; 670 _scopes_pcs_offset = _scopes_data_offset;
666 _dependencies_offset = _scopes_pcs_offset; 671 _dependencies_offset = _scopes_pcs_offset;
667 _handler_table_offset = _dependencies_offset; 672 _handler_table_offset = _dependencies_offset;
668 _nul_chk_table_offset = _handler_table_offset; 673 _nul_chk_table_offset = _handler_table_offset;
669 _nmethod_end_offset = _nul_chk_table_offset; 674 _nmethod_end_offset = _nul_chk_table_offset;
673 _verified_entry_point = code_begin() + offsets->value(CodeOffsets::Verified_Entry); 678 _verified_entry_point = code_begin() + offsets->value(CodeOffsets::Verified_Entry);
674 _osr_entry_point = NULL; 679 _osr_entry_point = NULL;
675 _exception_cache = NULL; 680 _exception_cache = NULL;
676 _pc_desc_cache.reset_to(NULL); 681 _pc_desc_cache.reset_to(NULL);
677 682
678 code_buffer->copy_oops_to(this); 683 code_buffer->copy_values_to(this);
679 if (ScavengeRootsInCode && detect_scavenge_root_oops()) { 684 if (ScavengeRootsInCode && detect_scavenge_root_oops()) {
680 CodeCache::add_scavenge_root_nmethod(this); 685 CodeCache::add_scavenge_root_nmethod(this);
681 } 686 }
682 debug_only(verify_scavenge_root_oops()); 687 debug_only(verify_scavenge_root_oops());
683 CodeCache::commit(this); 688 CodeCache::commit(this);
697 // print the header part first 702 // print the header part first
698 print(); 703 print();
699 // then print the requested information 704 // then print the requested information
700 if (PrintNativeNMethods) { 705 if (PrintNativeNMethods) {
701 print_code(); 706 print_code();
702 oop_maps->print(); 707 if (oop_maps != NULL) {
708 oop_maps->print();
709 }
703 } 710 }
704 if (PrintRelocations) { 711 if (PrintRelocations) {
705 print_relocations(); 712 print_relocations();
706 } 713 }
707 if (xtty != NULL) { 714 if (xtty != NULL) {
711 } 718 }
712 719
713 // For dtrace wrappers 720 // For dtrace wrappers
714 #ifdef HAVE_DTRACE_H 721 #ifdef HAVE_DTRACE_H
715 nmethod::nmethod( 722 nmethod::nmethod(
716 methodOop method, 723 Method* method,
717 int nmethod_size, 724 int nmethod_size,
718 CodeOffsets* offsets, 725 CodeOffsets* offsets,
719 CodeBuffer* code_buffer, 726 CodeBuffer* code_buffer,
720 int frame_size) 727 int frame_size)
721 : CodeBlob("dtrace nmethod", code_buffer, sizeof(nmethod), 728 : CodeBlob("dtrace nmethod", code_buffer, sizeof(nmethod),
739 _trap_offset = offsets->value(CodeOffsets::Dtrace_trap); 746 _trap_offset = offsets->value(CodeOffsets::Dtrace_trap);
740 _orig_pc_offset = 0; 747 _orig_pc_offset = 0;
741 _consts_offset = data_offset(); 748 _consts_offset = data_offset();
742 _stub_offset = data_offset(); 749 _stub_offset = data_offset();
743 _oops_offset = data_offset(); 750 _oops_offset = data_offset();
744 _scopes_data_offset = _oops_offset + round_to(code_buffer->total_oop_size(), oopSize); 751 _metadata_offset = _oops_offset + round_to(code_buffer->total_oop_size(), oopSize);
752 _scopes_data_offset = _metadata_offset + round_to(code_buffer->total_metadata_size(), wordSize);
745 _scopes_pcs_offset = _scopes_data_offset; 753 _scopes_pcs_offset = _scopes_data_offset;
746 _dependencies_offset = _scopes_pcs_offset; 754 _dependencies_offset = _scopes_pcs_offset;
747 _handler_table_offset = _dependencies_offset; 755 _handler_table_offset = _dependencies_offset;
748 _nul_chk_table_offset = _handler_table_offset; 756 _nul_chk_table_offset = _handler_table_offset;
749 _nmethod_end_offset = _nul_chk_table_offset; 757 _nmethod_end_offset = _nul_chk_table_offset;
753 _verified_entry_point = code_begin() + offsets->value(CodeOffsets::Verified_Entry); 761 _verified_entry_point = code_begin() + offsets->value(CodeOffsets::Verified_Entry);
754 _osr_entry_point = NULL; 762 _osr_entry_point = NULL;
755 _exception_cache = NULL; 763 _exception_cache = NULL;
756 _pc_desc_cache.reset_to(NULL); 764 _pc_desc_cache.reset_to(NULL);
757 765
758 code_buffer->copy_oops_to(this); 766 code_buffer->copy_values_to(this);
759 debug_only(verify_scavenge_root_oops()); 767 debug_only(verify_scavenge_root_oops());
760 CodeCache::commit(this); 768 CodeCache::commit(this);
761 } 769 }
762 770
763 if (PrintNMethods || PrintDebugInfo || PrintRelocations || PrintDependencies) { 771 if (PrintNMethods || PrintDebugInfo || PrintRelocations || PrintDependencies) {
793 return CodeCache::allocate(nmethod_size); 801 return CodeCache::allocate(nmethod_size);
794 } 802 }
795 803
796 804
797 nmethod::nmethod( 805 nmethod::nmethod(
798 methodOop method, 806 Method* method,
799 int nmethod_size, 807 int nmethod_size,
800 int compile_id, 808 int compile_id,
801 int entry_bci, 809 int entry_bci,
802 CodeOffsets* offsets, 810 CodeOffsets* offsets,
803 int orig_pc_offset, 811 int orig_pc_offset,
868 } else { 876 } else {
869 _unwind_handler_offset = -1; 877 _unwind_handler_offset = -1;
870 } 878 }
871 879
872 _oops_offset = data_offset(); 880 _oops_offset = data_offset();
873 _scopes_data_offset = _oops_offset + round_to(code_buffer->total_oop_size (), oopSize); 881 _metadata_offset = _oops_offset + round_to(code_buffer->total_oop_size(), oopSize);
882 _scopes_data_offset = _metadata_offset + round_to(code_buffer->total_metadata_size(), wordSize);
883
874 _scopes_pcs_offset = _scopes_data_offset + round_to(debug_info->data_size (), oopSize); 884 _scopes_pcs_offset = _scopes_data_offset + round_to(debug_info->data_size (), oopSize);
875 _dependencies_offset = _scopes_pcs_offset + adjust_pcs_size(debug_info->pcs_size()); 885 _dependencies_offset = _scopes_pcs_offset + adjust_pcs_size(debug_info->pcs_size());
876 _handler_table_offset = _dependencies_offset + round_to(dependencies->size_in_bytes (), oopSize); 886 _handler_table_offset = _dependencies_offset + round_to(dependencies->size_in_bytes (), oopSize);
877 _nul_chk_table_offset = _handler_table_offset + round_to(handler_table->size_in_bytes(), oopSize); 887 _nul_chk_table_offset = _handler_table_offset + round_to(handler_table->size_in_bytes(), oopSize);
878 _nmethod_end_offset = _nul_chk_table_offset + round_to(nul_chk_table->size_in_bytes(), oopSize); 888 _nmethod_end_offset = _nul_chk_table_offset + round_to(nul_chk_table->size_in_bytes(), oopSize);
882 _osr_entry_point = code_begin() + offsets->value(CodeOffsets::OSR_Entry); 892 _osr_entry_point = code_begin() + offsets->value(CodeOffsets::OSR_Entry);
883 _exception_cache = NULL; 893 _exception_cache = NULL;
884 _pc_desc_cache.reset_to(scopes_pcs_begin()); 894 _pc_desc_cache.reset_to(scopes_pcs_begin());
885 895
886 // Copy contents of ScopeDescRecorder to nmethod 896 // Copy contents of ScopeDescRecorder to nmethod
887 code_buffer->copy_oops_to(this); 897 code_buffer->copy_values_to(this);
888 debug_info->copy_to(this); 898 debug_info->copy_to(this);
889 dependencies->copy_to(this); 899 dependencies->copy_to(this);
890 if (ScavengeRootsInCode && detect_scavenge_root_oops()) { 900 if (ScavengeRootsInCode && detect_scavenge_root_oops()) {
891 CodeCache::add_scavenge_root_nmethod(this); 901 CodeCache::add_scavenge_root_nmethod(this);
892 } 902 }
1024 (*dest) = JNIHandles::resolve_non_null(handle); 1034 (*dest) = JNIHandles::resolve_non_null(handle);
1025 } 1035 }
1026 } 1036 }
1027 1037
1028 1038
1029 void nmethod::copy_oops(GrowableArray<jobject>* array) { 1039 // Have to have the same name because it's called by a template
1030 //assert(oops_size() == 0, "do this handshake just once, please"); 1040 void nmethod::copy_values(GrowableArray<jobject>* array) {
1031 int length = array->length(); 1041 int length = array->length();
1032 assert((address)(oops_begin() + length) <= data_end(), "oops big enough"); 1042 assert((address)(oops_begin() + length) <= (address)oops_end(), "oops big enough");
1033 oop* dest = oops_begin(); 1043 oop* dest = oops_begin();
1034 for (int index = 0 ; index < length; index++) { 1044 for (int index = 0 ; index < length; index++) {
1035 initialize_immediate_oop(&dest[index], array->at(index)); 1045 initialize_immediate_oop(&dest[index], array->at(index));
1036 } 1046 }
1037 1047
1041 // CodeBlob constructor, so it is valid even at this early point to 1051 // CodeBlob constructor, so it is valid even at this early point to
1042 // iterate over relocations and patch the code. 1052 // iterate over relocations and patch the code.
1043 fix_oop_relocations(NULL, NULL, /*initialize_immediates=*/ true); 1053 fix_oop_relocations(NULL, NULL, /*initialize_immediates=*/ true);
1044 } 1054 }
1045 1055
1056 void nmethod::copy_values(GrowableArray<Metadata*>* array) {
1057 int length = array->length();
1058 assert((address)(metadata_begin() + length) <= (address)metadata_end(), "big enough");
1059 Metadata** dest = metadata_begin();
1060 for (int index = 0 ; index < length; index++) {
1061 dest[index] = array->at(index);
1062 }
1063 }
1046 1064
1047 bool nmethod::is_at_poll_return(address pc) { 1065 bool nmethod::is_at_poll_return(address pc) {
1048 RelocIterator iter(this, pc, pc+1); 1066 RelocIterator iter(this, pc, pc+1);
1049 while (iter.next()) { 1067 while (iter.next()) {
1050 if (iter.type() == relocInfo::poll_return_type) 1068 if (iter.type() == relocInfo::poll_return_type)
1075 oop* dest = reloc->oop_addr(); 1093 oop* dest = reloc->oop_addr();
1076 initialize_immediate_oop(dest, (jobject) *dest); 1094 initialize_immediate_oop(dest, (jobject) *dest);
1077 } 1095 }
1078 // Refresh the oop-related bits of this instruction. 1096 // Refresh the oop-related bits of this instruction.
1079 reloc->fix_oop_relocation(); 1097 reloc->fix_oop_relocation();
1098 } else if (iter.type() == relocInfo::metadata_type) {
1099 metadata_Relocation* reloc = iter.metadata_reloc();
1100 reloc->fix_metadata_relocation();
1080 } 1101 }
1081 1102
1082 // There must not be any interfering patches or breakpoints. 1103 // There must not be any interfering patches or breakpoints.
1083 assert(!(iter.type() == relocInfo::breakpoint_type 1104 assert(!(iter.type() == relocInfo::breakpoint_type
1084 && iter.breakpoint_reloc()->active()), 1105 && iter.breakpoint_reloc()->active()),
1195 void nmethod::inc_decompile_count() { 1216 void nmethod::inc_decompile_count() {
1196 #ifndef GRAAL 1217 #ifndef GRAAL
1197 if (!is_compiled_by_c2()) return; 1218 if (!is_compiled_by_c2()) return;
1198 #endif 1219 #endif
1199 // Could be gated by ProfileTraps, but do not bother... 1220 // Could be gated by ProfileTraps, but do not bother...
1200 methodOop m = method(); 1221 Method* m = method();
1201 if (m == NULL) return; 1222 if (m == NULL) return;
1202 methodDataOop mdo = m->method_data(); 1223 MethodData* mdo = m->method_data();
1203 if (mdo == NULL) return; 1224 if (mdo == NULL) return;
1204 // There is a benign race here. See comments in methodDataOop.hpp. 1225 // There is a benign race here. See comments in methodData.hpp.
1205 mdo->inc_decompile_count(); 1226 mdo->inc_decompile_count();
1206 } 1227 }
1207 1228
1208 void nmethod::make_unloaded(BoolObjectClosure* is_alive, oop cause) { 1229 void nmethod::make_unloaded(BoolObjectClosure* is_alive, oop cause) {
1209 1230
1218 flush_dependencies(is_alive); 1239 flush_dependencies(is_alive);
1219 1240
1220 // Break cycle between nmethod & method 1241 // Break cycle between nmethod & method
1221 if (TraceClassUnloading && WizardMode) { 1242 if (TraceClassUnloading && WizardMode) {
1222 tty->print_cr("[Class unloading: Making nmethod " INTPTR_FORMAT 1243 tty->print_cr("[Class unloading: Making nmethod " INTPTR_FORMAT
1223 " unloadable], methodOop(" INTPTR_FORMAT 1244 " unloadable], Method*(" INTPTR_FORMAT
1224 "), cause(" INTPTR_FORMAT ")", 1245 "), cause(" INTPTR_FORMAT ")",
1225 this, (address)_method, (address)cause); 1246 this, (address)_method, (address)cause);
1226 if (!Universe::heap()->is_gc_active()) 1247 if (!Universe::heap()->is_gc_active())
1227 cause->klass()->print(); 1248 cause->klass()->print();
1228 } 1249 }
1229 // Unlink the osr method, so we do not look this up again 1250 // Unlink the osr method, so we do not look this up again
1230 if (is_osr_method()) { 1251 if (is_osr_method()) {
1231 invalidate_osr_method(); 1252 invalidate_osr_method();
1232 } 1253 }
1233 // If _method is already NULL the methodOop is about to be unloaded, 1254 // If _method is already NULL the Method* is about to be unloaded,
1234 // so we don't have to break the cycle. Note that it is possible to 1255 // so we don't have to break the cycle. Note that it is possible to
1235 // have the methodOop live here, in case we unload the nmethod because 1256 // have the Method* live here, in case we unload the nmethod because
1236 // it is pointing to some oop (other than the methodOop) being unloaded. 1257 // it is pointing to some oop (other than the Method*) being unloaded.
1237 if (_method != NULL) { 1258 if (_method != NULL) {
1238 // OSR methods point to the methodOop, but the methodOop does not 1259 // OSR methods point to the Method*, but the Method* does not
1239 // point back! 1260 // point back!
1240 if (_method->code() == this) { 1261 if (_method->code() == this) {
1241 _method->clear_code(); // Break a cycle 1262 _method->clear_code(); // Break a cycle
1242 } 1263 }
1243 _method = NULL; // Clear the method of this dead nmethod 1264 _method = NULL; // Clear the method of this dead nmethod
1244 } 1265 }
1245 1266
1246 #ifdef GRAAL 1267 #ifdef GRAAL
1247 if (_graal_compiled_method != NULL) { 1268 if (graal_installed_code() != NULL) {
1248 HotSpotCompiledMethod::set_nmethod(_graal_compiled_method, 0); 1269 HotSpotInstalledCode::set_nmethod(_graal_installed_code, 0);
1249 _graal_compiled_method = NULL; 1270 _graal_installed_code = NULL;
1250 } 1271 }
1251 #endif 1272 #endif
1252 1273
1253 // Make the class unloaded - i.e., change state and notify sweeper 1274 // Make the class unloaded - i.e., change state and notify sweeper
1254 assert(SafepointSynchronize::is_at_safepoint(), "must be at safepoint"); 1275 assert(SafepointSynchronize::is_at_safepoint(), "must be at safepoint");
1261 _state = unloaded; 1282 _state = unloaded;
1262 1283
1263 // Log the unloading. 1284 // Log the unloading.
1264 log_state_change(); 1285 log_state_change();
1265 1286
1266 // The methodOop is gone at this point 1287 // The Method* is gone at this point
1267 assert(_method == NULL, "Tautology"); 1288 assert(_method == NULL, "Tautology");
1268 1289
1269 set_osr_link(NULL); 1290 set_osr_link(NULL);
1270 //set_scavenge_root_link(NULL); // done by prune_scavenge_root_nmethods 1291 //set_scavenge_root_link(NULL); // done by prune_scavenge_root_nmethods
1271 NMethodSweeper::notify(this); 1292 NMethodSweeper::notify(this);
1273 1294
1274 void nmethod::invalidate_osr_method() { 1295 void nmethod::invalidate_osr_method() {
1275 assert(_entry_bci != InvocationEntryBci, "wrong kind of nmethod"); 1296 assert(_entry_bci != InvocationEntryBci, "wrong kind of nmethod");
1276 // Remove from list of active nmethods 1297 // Remove from list of active nmethods
1277 if (method() != NULL) 1298 if (method() != NULL)
1278 instanceKlass::cast(method()->method_holder())->remove_osr_nmethod(this); 1299 method()->method_holder()->remove_osr_nmethod(this);
1279 // Set entry as invalid 1300 // Set entry as invalid
1280 _entry_bci = InvalidOSREntryBci; 1301 _entry_bci = InvalidOSREntryBci;
1281 } 1302 }
1282 1303
1283 void nmethod::log_state_change() const { 1304 void nmethod::log_state_change() const {
1330 // to do, but return false to indicate this. 1351 // to do, but return false to indicate this.
1331 return false; 1352 return false;
1332 } 1353 }
1333 1354
1334 #ifdef GRAAL 1355 #ifdef GRAAL
1335 if (_graal_compiled_method != NULL) { 1356 if (graal_installed_code() != NULL) {
1336 HotSpotCompiledMethod::set_nmethod(_graal_compiled_method, 0); 1357 HotSpotInstalledCode::set_nmethod(_graal_installed_code, 0);
1337 _graal_compiled_method = NULL; 1358 _graal_installed_code = NULL;
1338 } 1359 }
1339 #endif 1360 #endif
1340 1361
1341 // The caller can be calling the method statically or through an inline 1362 // The caller can be calling the method statically or through an inline
1342 // cache call. 1363 // cache call.
1358 log_state_change(); 1379 log_state_change();
1359 1380
1360 // Remove nmethod from method. 1381 // Remove nmethod from method.
1361 // We need to check if both the _code and _from_compiled_code_entry_point 1382 // We need to check if both the _code and _from_compiled_code_entry_point
1362 // refer to this nmethod because there is a race in setting these two fields 1383 // refer to this nmethod because there is a race in setting these two fields
1363 // in methodOop as seen in bugid 4947125. 1384 // in Method* as seen in bugid 4947125.
1364 // If the vep() points to the zombie nmethod, the memory for the nmethod 1385 // If the vep() points to the zombie nmethod, the memory for the nmethod
1365 // could be flushed and the compiler and vtable stubs could still call 1386 // could be flushed and the compiler and vtable stubs could still call
1366 // through it. 1387 // through it.
1367 if (method() != NULL && (method()->code() == this || 1388 if (method() != NULL && (method()->code() == this ||
1368 method()->from_compiled_entry() == verified_entry_point())) { 1389 method()->from_compiled_entry() == verified_entry_point())) {
1478 assert(Universe::heap()->is_gc_active() == (is_alive != NULL), 1499 assert(Universe::heap()->is_gc_active() == (is_alive != NULL),
1479 "is_alive is non-NULL if and only if we are called during GC"); 1500 "is_alive is non-NULL if and only if we are called during GC");
1480 if (!has_flushed_dependencies()) { 1501 if (!has_flushed_dependencies()) {
1481 set_has_flushed_dependencies(); 1502 set_has_flushed_dependencies();
1482 for (Dependencies::DepStream deps(this); deps.next(); ) { 1503 for (Dependencies::DepStream deps(this); deps.next(); ) {
1483 klassOop klass = deps.context_type(); 1504 Klass* klass = deps.context_type();
1484 if (klass == NULL) continue; // ignore things like evol_method 1505 if (klass == NULL) continue; // ignore things like evol_method
1485 1506
1486 // During GC the is_alive closure is non-NULL, and is used to 1507 // During GC the is_alive closure is non-NULL, and is used to
1487 // determine liveness of dependees that need to be updated. 1508 // determine liveness of dependees that need to be updated.
1488 if (is_alive == NULL || is_alive->do_object_b(klass)) { 1509 if (is_alive == NULL || klass->is_loader_alive(is_alive)) {
1489 instanceKlass::cast(klass)->remove_dependent_nmethod(this); 1510 InstanceKlass::cast(klass)->remove_dependent_nmethod(this);
1490 } 1511 }
1491 } 1512 }
1492 } 1513 }
1493 } 1514 }
1494 1515
1495 1516
1496 // If this oop is not live, the nmethod can be unloaded. 1517 // If this oop is not live, the nmethod can be unloaded.
1497 bool nmethod::can_unload(BoolObjectClosure* is_alive, 1518 bool nmethod::can_unload(BoolObjectClosure* is_alive, oop* root, bool unloading_occurred) {
1498 OopClosure* keep_alive,
1499 oop* root, bool unloading_occurred) {
1500 assert(root != NULL, "just checking"); 1519 assert(root != NULL, "just checking");
1501 oop obj = *root; 1520 oop obj = *root;
1502 if (obj == NULL || is_alive->do_object_b(obj)) { 1521 if (obj == NULL || is_alive->do_object_b(obj)) {
1503 return false; 1522 return false;
1504 } 1523 }
1505 if (obj->is_compiledICHolder()) { 1524
1506 compiledICHolderOop cichk_oop = compiledICHolderOop(obj);
1507 if (is_alive->do_object_b(
1508 cichk_oop->holder_method()->method_holder()) &&
1509 is_alive->do_object_b(cichk_oop->holder_klass())) {
1510 // The oop should be kept alive
1511 keep_alive->do_oop(root);
1512 return false;
1513 }
1514 }
1515 // If ScavengeRootsInCode is true, an nmethod might be unloaded 1525 // If ScavengeRootsInCode is true, an nmethod might be unloaded
1516 // simply because one of its constant oops has gone dead. 1526 // simply because one of its constant oops has gone dead.
1517 // No actual classes need to be unloaded in order for this to occur. 1527 // No actual classes need to be unloaded in order for this to occur.
1518 assert(unloading_occurred || ScavengeRootsInCode, "Inconsistency in unloading"); 1528 assert(unloading_occurred || ScavengeRootsInCode, "Inconsistency in unloading");
1519 make_unloaded(is_alive, obj); 1529 make_unloaded(is_alive, obj);
1524 // post_compiled_method_load_event 1534 // post_compiled_method_load_event
1525 // new method for install_code() path 1535 // new method for install_code() path
1526 // Transfer information from compilation to jvmti 1536 // Transfer information from compilation to jvmti
1527 void nmethod::post_compiled_method_load_event() { 1537 void nmethod::post_compiled_method_load_event() {
1528 1538
1529 methodOop moop = method(); 1539 Method* moop = method();
1530 #ifndef USDT2 1540 #ifndef USDT2
1531 HS_DTRACE_PROBE8(hotspot, compiled__method__load, 1541 HS_DTRACE_PROBE8(hotspot, compiled__method__load,
1532 moop->klass_name()->bytes(), 1542 moop->klass_name()->bytes(),
1533 moop->klass_name()->utf8_length(), 1543 moop->klass_name()->utf8_length(),
1534 moop->name()->bytes(), 1544 moop->name()->bytes(),
1579 assert(_method != NULL && !is_unloaded(), "just checking"); 1589 assert(_method != NULL && !is_unloaded(), "just checking");
1580 DTRACE_METHOD_UNLOAD_PROBE(method()); 1590 DTRACE_METHOD_UNLOAD_PROBE(method());
1581 1591
1582 // If a JVMTI agent has enabled the CompiledMethodUnload event then 1592 // If a JVMTI agent has enabled the CompiledMethodUnload event then
1583 // post the event. Sometime later this nmethod will be made a zombie 1593 // post the event. Sometime later this nmethod will be made a zombie
1584 // by the sweeper but the methodOop will not be valid at that point. 1594 // by the sweeper but the Method* will not be valid at that point.
1585 // If the _jmethod_id is null then no load event was ever requested 1595 // If the _jmethod_id is null then no load event was ever requested
1586 // so don't bother posting the unload. The main reason for this is 1596 // so don't bother posting the unload. The main reason for this is
1587 // that the jmethodID is a weak reference to the methodOop so if 1597 // that the jmethodID is a weak reference to the Method* so if
1588 // it's being unloaded there's no way to look it up since the weak 1598 // it's being unloaded there's no way to look it up since the weak
1589 // ref will have been cleared. 1599 // ref will have been cleared.
1590 if (_jmethod_id != NULL && JvmtiExport::should_post_compiled_method_unload()) { 1600 if (_jmethod_id != NULL && JvmtiExport::should_post_compiled_method_unload()) {
1591 assert(!unload_reported(), "already unloaded"); 1601 assert(!unload_reported(), "already unloaded");
1592 JvmtiDeferredEvent event = 1602 JvmtiDeferredEvent event =
1612 1622
1613 // This is called at the end of the strong tracing/marking phase of a 1623 // This is called at the end of the strong tracing/marking phase of a
1614 // GC to unload an nmethod if it contains otherwise unreachable 1624 // GC to unload an nmethod if it contains otherwise unreachable
1615 // oops. 1625 // oops.
1616 1626
1617 void nmethod::do_unloading(BoolObjectClosure* is_alive, 1627 void nmethod::do_unloading(BoolObjectClosure* is_alive, bool unloading_occurred) {
1618 OopClosure* keep_alive, bool unloading_occurred) {
1619 // Make sure the oop's ready to receive visitors 1628 // Make sure the oop's ready to receive visitors
1620 assert(!is_zombie() && !is_unloaded(), 1629 assert(!is_zombie() && !is_unloaded(),
1621 "should not call follow on zombie or unloaded nmethod"); 1630 "should not call follow on zombie or unloaded nmethod");
1622 1631
1623 // If the method is not entrant then a JMP is plastered over the 1632 // If the method is not entrant then a JMP is plastered over the
1640 // call to post_compiled_method_unload() so that the unloading 1649 // call to post_compiled_method_unload() so that the unloading
1641 // of this nmethod is reported. 1650 // of this nmethod is reported.
1642 unloading_occurred = true; 1651 unloading_occurred = true;
1643 } 1652 }
1644 1653
1645 // Follow methodOop 1654 #ifdef GRAAL
1646 if (can_unload(is_alive, keep_alive, (oop*)&_method, unloading_occurred)) { 1655 // Follow Graal method
1656 if (_graal_installed_code == Universe::non_oop_word()) {
1657 // May have not yet finished initializing a non-default nmethod
1647 return; 1658 return;
1648 } 1659 }
1649 1660 if (_graal_installed_code != NULL && can_unload(is_alive, (oop*)&_graal_installed_code, unloading_occurred)) {
1650 if (_graal_compiled_method != NULL && can_unload(is_alive, keep_alive, (oop*)&_graal_compiled_method, unloading_occurred)) {
1651 return; 1661 return;
1652 } 1662 }
1663 #endif
1653 1664
1654 // Exception cache 1665 // Exception cache
1655 ExceptionCache* ec = exception_cache(); 1666 ExceptionCache* ec = exception_cache();
1656 while (ec != NULL) { 1667 while (ec != NULL) {
1657 oop* ex_addr = (oop*)ec->exception_type_addr(); 1668 Klass* ex_klass = ec->exception_type();
1658 oop ex = *ex_addr;
1659 ExceptionCache* next_ec = ec->next(); 1669 ExceptionCache* next_ec = ec->next();
1660 if (ex != NULL && !is_alive->do_object_b(ex)) { 1670 if (ex_klass != NULL && !ex_klass->is_loader_alive(is_alive)) {
1661 assert(!ex->is_compiledICHolder(), "Possible error here");
1662 remove_from_exception_cache(ec); 1671 remove_from_exception_cache(ec);
1663 } 1672 }
1664 ec = next_ec; 1673 ec = next_ec;
1665 } 1674 }
1666 1675
1671 if (unloading_occurred) { 1680 if (unloading_occurred) {
1672 RelocIterator iter(this, low_boundary); 1681 RelocIterator iter(this, low_boundary);
1673 while(iter.next()) { 1682 while(iter.next()) {
1674 if (iter.type() == relocInfo::virtual_call_type) { 1683 if (iter.type() == relocInfo::virtual_call_type) {
1675 CompiledIC *ic = CompiledIC_at(iter.reloc()); 1684 CompiledIC *ic = CompiledIC_at(iter.reloc());
1676 oop ic_oop = ic->cached_oop(); 1685 if (ic->is_icholder_call()) {
1677 if (ic_oop != NULL && !is_alive->do_object_b(ic_oop)) {
1678 // The only exception is compiledICHolder oops which may 1686 // The only exception is compiledICHolder oops which may
1679 // yet be marked below. (We check this further below). 1687 // yet be marked below. (We check this further below).
1680 if (ic_oop->is_compiledICHolder()) { 1688 CompiledICHolder* cichk_oop = ic->cached_icholder();
1681 compiledICHolderOop cichk_oop = compiledICHolderOop(ic_oop); 1689 if (cichk_oop->holder_method()->method_holder()->is_loader_alive(is_alive) &&
1682 if (is_alive->do_object_b( 1690 cichk_oop->holder_klass()->is_loader_alive(is_alive)) {
1683 cichk_oop->holder_method()->method_holder()) &&
1684 is_alive->do_object_b(cichk_oop->holder_klass())) {
1685 continue; 1691 continue;
1686 } 1692 }
1693 } else {
1694 Metadata* ic_oop = ic->cached_metadata();
1695 if (ic_oop != NULL) {
1696 if (ic_oop->is_klass()) {
1697 if (((Klass*)ic_oop)->is_loader_alive(is_alive)) {
1698 continue;
1699 }
1700 } else if (ic_oop->is_method()) {
1701 if (((Method*)ic_oop)->method_holder()->is_loader_alive(is_alive)) {
1702 continue;
1703 }
1704 } else {
1705 ShouldNotReachHere();
1706 }
1707 }
1687 } 1708 }
1688 ic->set_to_clean(); 1709 ic->set_to_clean();
1689 assert(ic->cached_oop() == NULL,
1690 "cached oop in IC should be cleared");
1691 }
1692 } 1710 }
1693 } 1711 }
1694 } 1712 }
1695 1713
1696 // Compiled code 1714 // Compiled code
1715 {
1697 RelocIterator iter(this, low_boundary); 1716 RelocIterator iter(this, low_boundary);
1698 while (iter.next()) { 1717 while (iter.next()) {
1699 if (iter.type() == relocInfo::oop_type) { 1718 if (iter.type() == relocInfo::oop_type) {
1700 oop_Relocation* r = iter.oop_reloc(); 1719 oop_Relocation* r = iter.oop_reloc();
1701 // In this loop, we must only traverse those oops directly embedded in 1720 // In this loop, we must only traverse those oops directly embedded in
1702 // the code. Other oops (oop_index>0) are seen as part of scopes_oops. 1721 // the code. Other oops (oop_index>0) are seen as part of scopes_oops.
1703 assert(1 == (r->oop_is_immediate()) + 1722 assert(1 == (r->oop_is_immediate()) +
1704 (r->oop_addr() >= oops_begin() && r->oop_addr() < oops_end()), 1723 (r->oop_addr() >= oops_begin() && r->oop_addr() < oops_end()),
1705 "oop must be found in exactly one place"); 1724 "oop must be found in exactly one place");
1706 if (r->oop_is_immediate() && r->oop_value() != NULL) { 1725 if (r->oop_is_immediate() && r->oop_value() != NULL) {
1707 if (can_unload(is_alive, keep_alive, r->oop_addr(), unloading_occurred)) { 1726 if (can_unload(is_alive, r->oop_addr(), unloading_occurred)) {
1708 return; 1727 return;
1709 } 1728 }
1710 } 1729 }
1711 } 1730 }
1712 } 1731 }
1732 }
1713 1733
1714 1734
1715 // Scopes 1735 // Scopes
1716 for (oop* p = oops_begin(); p < oops_end(); p++) { 1736 for (oop* p = oops_begin(); p < oops_end(); p++) {
1717 if (*p == Universe::non_oop_word()) continue; // skip non-oops 1737 if (*p == Universe::non_oop_word()) continue; // skip non-oops
1718 if (can_unload(is_alive, keep_alive, p, unloading_occurred)) { 1738 if (can_unload(is_alive, p, unloading_occurred)) {
1719 return; 1739 return;
1720 } 1740 }
1721 } 1741 }
1722 1742
1723 #ifndef PRODUCT 1743 // Ensure that all metadata is still alive
1724 // This nmethod was not unloaded; check below that all CompiledICs 1744 verify_metadata_loaders(low_boundary, is_alive);
1725 // refer to marked oops. 1745 }
1726 { 1746
1747 #ifdef ASSERT
1748
1749 class CheckClass : AllStatic {
1750 static BoolObjectClosure* _is_alive;
1751
1752 // Check class_loader is alive for this bit of metadata.
1753 static void check_class(Metadata* md) {
1754 Klass* klass = NULL;
1755 if (md->is_klass()) {
1756 klass = ((Klass*)md);
1757 } else if (md->is_method()) {
1758 klass = ((Method*)md)->method_holder();
1759 } else if (md->is_methodData()) {
1760 klass = ((MethodData*)md)->method()->method_holder();
1761 } else {
1762 md->print();
1763 ShouldNotReachHere();
1764 }
1765 assert(klass->is_loader_alive(_is_alive), "must be alive");
1766 }
1767 public:
1768 static void do_check_class(BoolObjectClosure* is_alive, nmethod* nm) {
1769 assert(SafepointSynchronize::is_at_safepoint(), "this is only ok at safepoint");
1770 _is_alive = is_alive;
1771 nm->metadata_do(check_class);
1772 }
1773 };
1774
1775 // This is called during a safepoint so can use static data
1776 BoolObjectClosure* CheckClass::_is_alive = NULL;
1777 #endif // ASSERT
1778
1779
1780 // Processing of oop references should have been sufficient to keep
1781 // all strong references alive. Any weak references should have been
1782 // cleared as well. Visit all the metadata and ensure that it's
1783 // really alive.
1784 void nmethod::verify_metadata_loaders(address low_boundary, BoolObjectClosure* is_alive) {
1785 #ifdef ASSERT
1727 RelocIterator iter(this, low_boundary); 1786 RelocIterator iter(this, low_boundary);
1728 while (iter.next()) { 1787 while (iter.next()) {
1729 if (iter.type() == relocInfo::virtual_call_type) { 1788 // static_stub_Relocations may have dangling references to
1730 CompiledIC *ic = CompiledIC_at(iter.reloc()); 1789 // Method*s so trim them out here. Otherwise it looks like
1731 oop ic_oop = ic->cached_oop(); 1790 // compiled code is maintaining a link to dead metadata.
1732 assert(ic_oop == NULL || is_alive->do_object_b(ic_oop), 1791 address static_call_addr = NULL;
1733 "Found unmarked ic_oop in reachable nmethod"); 1792 if (iter.type() == relocInfo::opt_virtual_call_type) {
1734 } 1793 CompiledIC* cic = CompiledIC_at(iter.reloc());
1735 } 1794 if (!cic->is_call_to_interpreted()) {
1736 } 1795 static_call_addr = iter.addr();
1737 #endif // !PRODUCT 1796 }
1738 } 1797 } else if (iter.type() == relocInfo::static_call_type) {
1798 CompiledStaticCall* csc = compiledStaticCall_at(iter.reloc());
1799 if (!csc->is_call_to_interpreted()) {
1800 static_call_addr = iter.addr();
1801 }
1802 }
1803 if (static_call_addr != NULL) {
1804 RelocIterator sciter(this, low_boundary);
1805 while (sciter.next()) {
1806 if (sciter.type() == relocInfo::static_stub_type &&
1807 sciter.static_stub_reloc()->static_call() == static_call_addr) {
1808 sciter.static_stub_reloc()->clear_inline_cache();
1809 }
1810 }
1811 }
1812 }
1813 // Check that the metadata embedded in the nmethod is alive
1814 CheckClass::do_check_class(is_alive, this);
1815 #endif
1816 }
1817
1818
1819 // Iterate over metadata calling this function. Used by RedefineClasses
1820 void nmethod::metadata_do(void f(Metadata*)) {
1821 address low_boundary = verified_entry_point();
1822 if (is_not_entrant()) {
1823 low_boundary += NativeJump::instruction_size;
1824 // %%% Note: On SPARC we patch only a 4-byte trap, not a full NativeJump.
1825 // (See comment above.)
1826 }
1827 {
1828 // Visit all immediate references that are embedded in the instruction stream.
1829 RelocIterator iter(this, low_boundary);
1830 while (iter.next()) {
1831 if (iter.type() == relocInfo::metadata_type ) {
1832 metadata_Relocation* r = iter.metadata_reloc();
1833 // In this lmetadata, we must only follow those metadatas directly embedded in
1834 // the code. Other metadatas (oop_index>0) are seen as part of
1835 // the metadata section below.
1836 assert(1 == (r->metadata_is_immediate()) +
1837 (r->metadata_addr() >= metadata_begin() && r->metadata_addr() < metadata_end()),
1838 "metadata must be found in exactly one place");
1839 if (r->metadata_is_immediate() && r->metadata_value() != NULL) {
1840 Metadata* md = r->metadata_value();
1841 f(md);
1842 }
1843 }
1844 }
1845 }
1846
1847 // Visit the metadata section
1848 for (Metadata** p = metadata_begin(); p < metadata_end(); p++) {
1849 if (*p == Universe::non_oop_word() || *p == NULL) continue; // skip non-oops
1850 Metadata* md = *p;
1851 f(md);
1852 }
1853 // Call function Method*, not embedded in these other places.
1854 if (_method != NULL) f(_method);
1855 }
1856
1739 1857
1740 // This method is called twice during GC -- once while 1858 // This method is called twice during GC -- once while
1741 // tracing the "active" nmethods on thread stacks during 1859 // tracing the "active" nmethods on thread stacks during
1742 // the (strong) marking phase, and then again when walking 1860 // the (strong) marking phase, and then again when walking
1743 // the code cache contents during the weak roots processing 1861 // the code cache contents during the weak roots processing
1761 low_boundary += NativeJump::instruction_size; 1879 low_boundary += NativeJump::instruction_size;
1762 // %%% Note: On SPARC we patch only a 4-byte trap, not a full NativeJump. 1880 // %%% Note: On SPARC we patch only a 4-byte trap, not a full NativeJump.
1763 // (See comment above.) 1881 // (See comment above.)
1764 } 1882 }
1765 1883
1766 // Compiled code 1884 #ifdef GRAAL
1767 f->do_oop((oop*) &_method); 1885 if (graal_installed_code() != NULL) {
1768 f->do_oop((oop*) &_graal_compiled_method); 1886 f->do_oop((oop*) &_graal_installed_code);
1769 if (!do_strong_roots_only) { 1887 }
1770 // weak roots processing phase -- update ExceptionCache oops 1888 #endif
1771 ExceptionCache* ec = exception_cache();
1772 while(ec != NULL) {
1773 f->do_oop((oop*)ec->exception_type_addr());
1774 ec = ec->next();
1775 }
1776 } // Else strong roots phase -- skip oops in ExceptionCache
1777 1889
1778 RelocIterator iter(this, low_boundary); 1890 RelocIterator iter(this, low_boundary);
1779 1891
1780 while (iter.next()) { 1892 while (iter.next()) {
1781 if (iter.type() == relocInfo::oop_type ) { 1893 if (iter.type() == relocInfo::oop_type ) {
2106 } 2218 }
2107 } 2219 }
2108 return found_check; 2220 return found_check;
2109 } 2221 }
2110 2222
2111 bool nmethod::is_evol_dependent_on(klassOop dependee) { 2223 bool nmethod::is_evol_dependent_on(Klass* dependee) {
2112 instanceKlass *dependee_ik = instanceKlass::cast(dependee); 2224 InstanceKlass *dependee_ik = InstanceKlass::cast(dependee);
2113 objArrayOop dependee_methods = dependee_ik->methods(); 2225 Array<Method*>* dependee_methods = dependee_ik->methods();
2114 for (Dependencies::DepStream deps(this); deps.next(); ) { 2226 for (Dependencies::DepStream deps(this); deps.next(); ) {
2115 if (deps.type() == Dependencies::evol_method) { 2227 if (deps.type() == Dependencies::evol_method) {
2116 methodOop method = deps.method_argument(0); 2228 Method* method = deps.method_argument(0);
2117 for (int j = 0; j < dependee_methods->length(); j++) { 2229 for (int j = 0; j < dependee_methods->length(); j++) {
2118 if ((methodOop) dependee_methods->obj_at(j) == method) { 2230 if (dependee_methods->at(j) == method) {
2119 // RC_TRACE macro has an embedded ResourceMark 2231 // RC_TRACE macro has an embedded ResourceMark
2120 RC_TRACE(0x01000000, 2232 RC_TRACE(0x01000000,
2121 ("Found evol dependency of nmethod %s.%s(%s) compile_id=%d on method %s.%s(%s)", 2233 ("Found evol dependency of nmethod %s.%s(%s) compile_id=%d on method %s.%s(%s)",
2122 _method->method_holder()->klass_part()->external_name(), 2234 _method->method_holder()->external_name(),
2123 _method->name()->as_C_string(), 2235 _method->name()->as_C_string(),
2124 _method->signature()->as_C_string(), compile_id(), 2236 _method->signature()->as_C_string(), compile_id(),
2125 method->method_holder()->klass_part()->external_name(), 2237 method->method_holder()->external_name(),
2126 method->name()->as_C_string(), 2238 method->name()->as_C_string(),
2127 method->signature()->as_C_string())); 2239 method->signature()->as_C_string()));
2128 if (TraceDependencies || LogCompilation) 2240 if (TraceDependencies || LogCompilation)
2129 deps.log_dependency(dependee); 2241 deps.log_dependency(dependee);
2130 return true; 2242 return true;
2134 } 2246 }
2135 return false; 2247 return false;
2136 } 2248 }
2137 2249
2138 // Called from mark_for_deoptimization, when dependee is invalidated. 2250 // Called from mark_for_deoptimization, when dependee is invalidated.
2139 bool nmethod::is_dependent_on_method(methodOop dependee) { 2251 bool nmethod::is_dependent_on_method(Method* dependee) {
2140 for (Dependencies::DepStream deps(this); deps.next(); ) { 2252 for (Dependencies::DepStream deps(this); deps.next(); ) {
2141 if (deps.type() != Dependencies::evol_method) 2253 if (deps.type() != Dependencies::evol_method)
2142 continue; 2254 continue;
2143 methodOop method = deps.method_argument(0); 2255 Method* method = deps.method_argument(0);
2144 if (method == dependee) return true; 2256 if (method == dependee) return true;
2145 } 2257 }
2146 return false; 2258 return false;
2147 } 2259 }
2148 2260
2277 return; 2389 return;
2278 2390
2279 // Make sure all the entry points are correctly aligned for patching. 2391 // Make sure all the entry points are correctly aligned for patching.
2280 NativeJump::check_verified_entry_alignment(entry_point(), verified_entry_point()); 2392 NativeJump::check_verified_entry_alignment(entry_point(), verified_entry_point());
2281 2393
2282 assert(method()->is_oop(), "must be valid"); 2394 // assert(method()->is_oop(), "must be valid");
2283 2395
2284 ResourceMark rm; 2396 ResourceMark rm;
2285 2397
2286 if (!CodeCache::contains(this)) { 2398 if (!CodeCache::contains(this)) {
2287 fatal(err_msg("nmethod at " INTPTR_FORMAT " not in zone", this)); 2399 fatal(err_msg("nmethod at " INTPTR_FORMAT " not in zone", this));
2317 CompiledIC* ic = NULL; 2429 CompiledIC* ic = NULL;
2318 Thread *cur = Thread::current(); 2430 Thread *cur = Thread::current();
2319 if (CompiledIC_lock->owner() == cur || 2431 if (CompiledIC_lock->owner() == cur ||
2320 ((cur->is_VM_thread() || cur->is_ConcurrentGC_thread()) && 2432 ((cur->is_VM_thread() || cur->is_ConcurrentGC_thread()) &&
2321 SafepointSynchronize::is_at_safepoint())) { 2433 SafepointSynchronize::is_at_safepoint())) {
2322 ic = CompiledIC_at(call_site); 2434 ic = CompiledIC_at(this, call_site);
2323 CHECK_UNHANDLED_OOPS_ONLY(Thread::current()->clear_unhandled_oops()); 2435 CHECK_UNHANDLED_OOPS_ONLY(Thread::current()->clear_unhandled_oops());
2324 } else { 2436 } else {
2325 MutexLocker ml_verify (CompiledIC_lock); 2437 MutexLocker ml_verify (CompiledIC_lock);
2326 ic = CompiledIC_at(call_site); 2438 ic = CompiledIC_at(this, call_site);
2327 } 2439 }
2328 PcDesc* pd = pc_desc_at(ic->end_of_call()); 2440 PcDesc* pd = pc_desc_at(ic->end_of_call());
2329 assert(pd != NULL, "PcDesc must exist"); 2441 assert(pd != NULL, "PcDesc must exist");
2330 for (ScopeDesc* sd = new ScopeDesc(this, pd->scope_decode_offset(), 2442 for (ScopeDesc* sd = new ScopeDesc(this, pd->scope_decode_offset(),
2331 pd->obj_decode_offset(), pd->should_reexecute(), pd->rethrow_exception(), 2443 pd->obj_decode_offset(), pd->should_reexecute(), pd->rethrow_exception(),
2458 stub_size()); 2570 stub_size());
2459 if (oops_size () > 0) tty->print_cr(" oops [" INTPTR_FORMAT "," INTPTR_FORMAT "] = %d", 2571 if (oops_size () > 0) tty->print_cr(" oops [" INTPTR_FORMAT "," INTPTR_FORMAT "] = %d",
2460 oops_begin(), 2572 oops_begin(),
2461 oops_end(), 2573 oops_end(),
2462 oops_size()); 2574 oops_size());
2575 if (metadata_size () > 0) tty->print_cr(" metadata [" INTPTR_FORMAT "," INTPTR_FORMAT "] = %d",
2576 metadata_begin(),
2577 metadata_end(),
2578 metadata_size());
2463 if (scopes_data_size () > 0) tty->print_cr(" scopes data [" INTPTR_FORMAT "," INTPTR_FORMAT "] = %d", 2579 if (scopes_data_size () > 0) tty->print_cr(" scopes data [" INTPTR_FORMAT "," INTPTR_FORMAT "] = %d",
2464 scopes_data_begin(), 2580 scopes_data_begin(),
2465 scopes_data_end(), 2581 scopes_data_end(),
2466 scopes_data_size()); 2582 scopes_data_size());
2467 if (scopes_pcs_size () > 0) tty->print_cr(" scopes pcs [" INTPTR_FORMAT "," INTPTR_FORMAT "] = %d", 2583 if (scopes_pcs_size () > 0) tty->print_cr(" scopes pcs [" INTPTR_FORMAT "," INTPTR_FORMAT "] = %d",
2507 ResourceMark rm; 2623 ResourceMark rm;
2508 ttyLocker ttyl; // keep the following output all in one block 2624 ttyLocker ttyl; // keep the following output all in one block
2509 tty->print_cr("Dependencies:"); 2625 tty->print_cr("Dependencies:");
2510 for (Dependencies::DepStream deps(this); deps.next(); ) { 2626 for (Dependencies::DepStream deps(this); deps.next(); ) {
2511 deps.print_dependency(); 2627 deps.print_dependency();
2512 klassOop ctxk = deps.context_type(); 2628 Klass* ctxk = deps.context_type();
2513 if (ctxk != NULL) { 2629 if (ctxk != NULL) {
2514 Klass* k = Klass::cast(ctxk); 2630 Klass* k = Klass::cast(ctxk);
2515 if (k->oop_is_instance() && ((instanceKlass*)k)->is_dependent_nmethod(this)) { 2631 if (k->oop_is_instance() && ((InstanceKlass*)k)->is_dependent_nmethod(this)) {
2516 tty->print_cr(" [nmethod<=klass]%s", k->external_name()); 2632 tty->print_cr(" [nmethod<=klass]%s", k->external_name());
2517 } 2633 }
2518 } 2634 }
2519 deps.log_dependency(); // put it into the xml log also 2635 deps.log_dependency(); // put it into the xml log also
2520 } 2636 }
2573 if (obj == NULL) st.print("NULL"); 2689 if (obj == NULL) st.print("NULL");
2574 else obj->print_value_on(&st); 2690 else obj->print_value_on(&st);
2575 st.print(")"); 2691 st.print(")");
2576 return st.as_string(); 2692 return st.as_string();
2577 } 2693 }
2694 case relocInfo::metadata_type: {
2695 stringStream st;
2696 metadata_Relocation* r = iter.metadata_reloc();
2697 Metadata* obj = r->metadata_value();
2698 st.print("metadata(");
2699 if (obj == NULL) st.print("NULL");
2700 else obj->print_value_on(&st);
2701 st.print(")");
2702 return st.as_string();
2703 }
2578 case relocInfo::virtual_call_type: return "virtual_call"; 2704 case relocInfo::virtual_call_type: return "virtual_call";
2579 case relocInfo::opt_virtual_call_type: return "optimized virtual_call"; 2705 case relocInfo::opt_virtual_call_type: return "optimized virtual_call";
2580 case relocInfo::static_call_type: return "static_call"; 2706 case relocInfo::static_call_type: return "static_call";
2581 case relocInfo::static_stub_type: return "static_stub"; 2707 case relocInfo::static_stub_type: return "static_stub";
2582 case relocInfo::runtime_call_type: return "runtime_call"; 2708 case relocInfo::runtime_call_type: return "runtime_call";
2600 p->return_oop()); 2726 p->return_oop());
2601 } 2727 }
2602 return NULL; 2728 return NULL;
2603 } 2729 }
2604 2730
2605 void nmethod::print_nmethod_labels(outputStream* stream, address block_begin) { 2731 void nmethod::print_nmethod_labels(outputStream* stream, address block_begin) const {
2606 if (block_begin == entry_point()) stream->print_cr("[Entry Point]"); 2732 if (block_begin == entry_point()) stream->print_cr("[Entry Point]");
2607 if (block_begin == verified_entry_point()) stream->print_cr("[Verified Entry Point]"); 2733 if (block_begin == verified_entry_point()) stream->print_cr("[Verified Entry Point]");
2608 if (GRAAL_ONLY(_exception_offset >= 0 &&) block_begin == exception_begin()) stream->print_cr("[Exception Handler]"); 2734 if (GRAAL_ONLY(_exception_offset >= 0 &&) block_begin == exception_begin()) stream->print_cr("[Exception Handler]");
2609 if (block_begin == stub_begin()) stream->print_cr("[Stub Code]"); 2735 if (block_begin == stub_begin()) stream->print_cr("[Stub Code]");
2610 if (GRAAL_ONLY(_deoptimize_offset >= 0 &&) block_begin == deopt_handler_begin()) stream->print_cr("[Deopt Handler Code]"); 2736 if (GRAAL_ONLY(_deoptimize_offset >= 0 &&) block_begin == deopt_handler_begin()) stream->print_cr("[Deopt Handler Code]");
2735 if (sd != NULL) { 2861 if (sd != NULL) {
2736 st->move_to(column); 2862 st->move_to(column);
2737 if (sd->bci() == SynchronizationEntryBCI) { 2863 if (sd->bci() == SynchronizationEntryBCI) {
2738 st->print(";*synchronization entry"); 2864 st->print(";*synchronization entry");
2739 } else { 2865 } else {
2740 if (sd->method().is_null()) { 2866 if (sd->method() == NULL) {
2741 st->print("method is NULL"); 2867 st->print("method is NULL");
2742 } else if (sd->method()->is_native()) { 2868 } else if (sd->method()->is_native()) {
2743 st->print("method is native"); 2869 st->print("method is native");
2744 } else { 2870 } else {
2745 Bytecodes::Code bc = sd->method()->java_code_at(sd->bci()); 2871 Bytecodes::Code bc = sd->method()->java_code_at(sd->bci());
2776 2902
2777 // Print all scopes 2903 // Print all scopes
2778 for (;sd != NULL; sd = sd->sender()) { 2904 for (;sd != NULL; sd = sd->sender()) {
2779 st->move_to(column); 2905 st->move_to(column);
2780 st->print("; -"); 2906 st->print("; -");
2781 if (sd->method().is_null()) { 2907 if (sd->method() == NULL) {
2782 st->print("method is NULL"); 2908 st->print("method is NULL");
2783 } else { 2909 } else {
2784 sd->method()->print_short_name(st); 2910 sd->method()->print_short_name(st);
2785 } 2911 }
2786 int lineno = sd->method()->line_number_from_bci(sd->bci()); 2912 int lineno = sd->method()->line_number_from_bci(sd->bci());