comparison src/cpu/sparc/vm/interp_masm_sparc.cpp @ 6725:da91efe96a93

6964458: Reimplement class meta-data storage to use native memory Summary: Remove PermGen, allocate meta-data in metaspace linked to class loaders, rewrite GC walking, rewrite and rename metadata to be C++ classes Reviewed-by: jmasa, stefank, never, coleenp, kvn, brutisso, mgerdin, dholmes, jrose, twisti, roland Contributed-by: jmasa <jon.masamitsu@oracle.com>, stefank <stefan.karlsson@oracle.com>, mgerdin <mikael.gerdin@oracle.com>, never <tom.rodriguez@oracle.com>
author coleenp
date Sat, 01 Sep 2012 13:25:18 -0400
parents 1d7922586cf6
children 69fb89ec6fa7
comparison
equal deleted inserted replaced
6724:36d1d483d5d6 6725:da91efe96a93
26 #include "interp_masm_sparc.hpp" 26 #include "interp_masm_sparc.hpp"
27 #include "interpreter/interpreter.hpp" 27 #include "interpreter/interpreter.hpp"
28 #include "interpreter/interpreterRuntime.hpp" 28 #include "interpreter/interpreterRuntime.hpp"
29 #include "oops/arrayOop.hpp" 29 #include "oops/arrayOop.hpp"
30 #include "oops/markOop.hpp" 30 #include "oops/markOop.hpp"
31 #include "oops/methodDataOop.hpp" 31 #include "oops/methodData.hpp"
32 #include "oops/methodOop.hpp" 32 #include "oops/method.hpp"
33 #include "prims/jvmtiExport.hpp" 33 #include "prims/jvmtiExport.hpp"
34 #include "prims/jvmtiRedefineClassesTrace.hpp" 34 #include "prims/jvmtiRedefineClassesTrace.hpp"
35 #include "prims/jvmtiThreadState.hpp" 35 #include "prims/jvmtiThreadState.hpp"
36 #include "runtime/basicLock.hpp" 36 #include "runtime/basicLock.hpp"
37 #include "runtime/biasedLocking.hpp" 37 #include "runtime/biasedLocking.hpp"
512 // Reset Lesp. 512 // Reset Lesp.
513 sub( Lmonitors, wordSize, Lesp ); 513 sub( Lmonitors, wordSize, Lesp );
514 514
515 // Reset SP by subtracting more space from Lesp. 515 // Reset SP by subtracting more space from Lesp.
516 Label done; 516 Label done;
517 verify_oop(Lmethod);
518 assert(G4_scratch != Gframe_size, "Only you can prevent register aliasing!"); 517 assert(G4_scratch != Gframe_size, "Only you can prevent register aliasing!");
519 518
520 // A native does not need to do this, since its callee does not change SP. 519 // A native does not need to do this, since its callee does not change SP.
521 ld(Lmethod, methodOopDesc::access_flags_offset(), Gframe_size); // Load access flags. 520 ld(Lmethod, Method::access_flags_offset(), Gframe_size); // Load access flags.
522 btst(JVM_ACC_NATIVE, Gframe_size); 521 btst(JVM_ACC_NATIVE, Gframe_size);
523 br(Assembler::notZero, false, Assembler::pt, done); 522 br(Assembler::notZero, false, Assembler::pt, done);
524 delayed()->nop(); 523 delayed()->nop();
525 524
526 // Compute max expression stack+register save area 525 // Compute max expression stack+register save area
527 lduh(Lmethod, in_bytes(methodOopDesc::max_stack_offset()), Gframe_size); // Load max stack. 526 lduh(Lmethod, in_bytes(Method::max_stack_offset()), Gframe_size); // Load max stack.
528 add( Gframe_size, frame::memory_parameter_word_sp_offset, Gframe_size ); 527 add( Gframe_size, frame::memory_parameter_word_sp_offset, Gframe_size );
529 528
530 // 529 //
531 // now set up a stack frame with the size computed above 530 // now set up a stack frame with the size computed above
532 // 531 //
608 // If jvmti single stepping is on for a thread we must not call compiled code. 607 // If jvmti single stepping is on for a thread we must not call compiled code.
609 void InterpreterMacroAssembler::call_from_interpreter(Register target, Register scratch, Register Rret) { 608 void InterpreterMacroAssembler::call_from_interpreter(Register target, Register scratch, Register Rret) {
610 609
611 // Assume we want to go compiled if available 610 // Assume we want to go compiled if available
612 611
613 ld_ptr(G5_method, in_bytes(methodOopDesc::from_interpreted_offset()), target); 612 ld_ptr(G5_method, in_bytes(Method::from_interpreted_offset()), target);
614 613
615 if (JvmtiExport::can_post_interpreter_events()) { 614 if (JvmtiExport::can_post_interpreter_events()) {
616 // JVMTI events, such as single-stepping, are implemented partly by avoiding running 615 // JVMTI events, such as single-stepping, are implemented partly by avoiding running
617 // compiled code in threads for which the event is enabled. Check here for 616 // compiled code in threads for which the event is enabled. Check here for
618 // interp_only_mode if these events CAN be enabled. 617 // interp_only_mode if these events CAN be enabled.
620 Label skip_compiled_code; 619 Label skip_compiled_code;
621 620
622 const Address interp_only(G2_thread, JavaThread::interp_only_mode_offset()); 621 const Address interp_only(G2_thread, JavaThread::interp_only_mode_offset());
623 ld(interp_only, scratch); 622 ld(interp_only, scratch);
624 cmp_zero_and_br(Assembler::notZero, scratch, skip_compiled_code, true, Assembler::pn); 623 cmp_zero_and_br(Assembler::notZero, scratch, skip_compiled_code, true, Assembler::pn);
625 delayed()->ld_ptr(G5_method, in_bytes(methodOopDesc::interpreter_entry_offset()), target); 624 delayed()->ld_ptr(G5_method, in_bytes(Method::interpreter_entry_offset()), target);
626 bind(skip_compiled_code); 625 bind(skip_compiled_code);
627 } 626 }
628 627
629 // the i2c_adapters need methodOop in G5_method (right? %%%) 628 // the i2c_adapters need Method* in G5_method (right? %%%)
630 // do the call 629 // do the call
631 #ifdef ASSERT 630 #ifdef ASSERT
632 { 631 {
633 Label ok; 632 Label ok;
634 br_notnull_short(target, Assembler::pt, ok); 633 br_notnull_short(target, Assembler::pt, ok);
723 722
724 bind(aligned); 723 bind(aligned);
725 if (should_set_CC == set_CC) tst(Rdst); 724 if (should_set_CC == set_CC) tst(Rdst);
726 } 725 }
727 726
728 727 void InterpreterMacroAssembler::get_cache_index_at_bcp(Register temp, Register index,
729 void InterpreterMacroAssembler::get_cache_index_at_bcp(Register cache, Register tmp,
730 int bcp_offset, size_t index_size) { 728 int bcp_offset, size_t index_size) {
731 assert(bcp_offset > 0, "bcp is still pointing to start of bytecode"); 729 assert(bcp_offset > 0, "bcp is still pointing to start of bytecode");
732 if (index_size == sizeof(u2)) { 730 if (index_size == sizeof(u2)) {
733 get_2_byte_integer_at_bcp(bcp_offset, cache, tmp, Unsigned); 731 get_2_byte_integer_at_bcp(bcp_offset, temp, index, Unsigned);
734 } else if (index_size == sizeof(u4)) { 732 } else if (index_size == sizeof(u4)) {
735 assert(EnableInvokeDynamic, "giant index used only for JSR 292"); 733 assert(EnableInvokeDynamic, "giant index used only for JSR 292");
736 get_4_byte_integer_at_bcp(bcp_offset, cache, tmp); 734 get_4_byte_integer_at_bcp(bcp_offset, temp, index);
737 assert(constantPoolCacheOopDesc::decode_secondary_index(~123) == 123, "else change next line"); 735 assert(ConstantPool::decode_invokedynamic_index(~123) == 123, "else change next line");
738 xor3(tmp, -1, tmp); // convert to plain index 736 xor3(index, -1, index); // convert to plain index
739 } else if (index_size == sizeof(u1)) { 737 } else if (index_size == sizeof(u1)) {
740 assert(EnableInvokeDynamic, "tiny index used only for JSR 292"); 738 ldub(Lbcp, bcp_offset, index);
741 ldub(Lbcp, bcp_offset, tmp);
742 } else { 739 } else {
743 ShouldNotReachHere(); 740 ShouldNotReachHere();
744 } 741 }
745 } 742 }
746 743
763 Register bytecode, 760 Register bytecode,
764 int byte_no, 761 int byte_no,
765 int bcp_offset, 762 int bcp_offset,
766 size_t index_size) { 763 size_t index_size) {
767 get_cache_and_index_at_bcp(cache, temp, bcp_offset, index_size); 764 get_cache_and_index_at_bcp(cache, temp, bcp_offset, index_size);
768 ld_ptr(cache, constantPoolCacheOopDesc::base_offset() + ConstantPoolCacheEntry::indices_offset(), bytecode); 765 ld_ptr(cache, ConstantPoolCache::base_offset() + ConstantPoolCacheEntry::indices_offset(), bytecode);
769 const int shift_count = (1 + byte_no) * BitsPerByte; 766 const int shift_count = (1 + byte_no) * BitsPerByte;
770 assert((byte_no == TemplateTable::f1_byte && shift_count == ConstantPoolCacheEntry::bytecode_1_shift) || 767 assert((byte_no == TemplateTable::f1_byte && shift_count == ConstantPoolCacheEntry::bytecode_1_shift) ||
771 (byte_no == TemplateTable::f2_byte && shift_count == ConstantPoolCacheEntry::bytecode_2_shift), 768 (byte_no == TemplateTable::f2_byte && shift_count == ConstantPoolCacheEntry::bytecode_2_shift),
772 "correct shift count"); 769 "correct shift count");
773 srl(bytecode, shift_count, bytecode); 770 srl(bytecode, shift_count, bytecode);
788 } 785 }
789 // convert from field index to ConstantPoolCacheEntry index 786 // convert from field index to ConstantPoolCacheEntry index
790 // and from word index to byte offset 787 // and from word index to byte offset
791 sll(tmp, exact_log2(in_words(ConstantPoolCacheEntry::size()) * BytesPerWord), tmp); 788 sll(tmp, exact_log2(in_words(ConstantPoolCacheEntry::size()) * BytesPerWord), tmp);
792 // skip past the header 789 // skip past the header
793 add(tmp, in_bytes(constantPoolCacheOopDesc::base_offset()), tmp); 790 add(tmp, in_bytes(ConstantPoolCache::base_offset()), tmp);
794 // construct pointer to cache entry 791 // construct pointer to cache entry
795 add(LcpoolCache, tmp, cache); 792 add(LcpoolCache, tmp, cache);
793 }
794
795
796 // Load object from cpool->resolved_references(index)
797 void InterpreterMacroAssembler::load_resolved_reference_at_index(
798 Register result, Register index) {
799 assert_different_registers(result, index);
800 assert_not_delayed();
801 // convert from field index to resolved_references() index and from
802 // word index to byte offset. Since this is a java object, it can be compressed
803 Register tmp = index; // reuse
804 sll(index, LogBytesPerHeapOop, tmp);
805 get_constant_pool(result);
806 // load pointer for resolved_references[] objArray
807 ld_ptr(result, ConstantPool::resolved_references_offset_in_bytes(), result);
808 // JNIHandles::resolve(result)
809 ld_ptr(result, 0, result);
810 // Add in the index
811 add(result, tmp, result);
812 load_heap_oop(result, arrayOopDesc::base_offset_in_bytes(T_OBJECT), result);
796 } 813 }
797 814
798 815
799 // Generate a subtype check: branch to ok_is_subtype if sub_klass is 816 // Generate a subtype check: branch to ok_is_subtype if sub_klass is
800 // a subtype of super_klass. Blows registers Rsuper_klass, Rsub_klass, tmp1, tmp2. 817 // a subtype of super_klass. Blows registers Rsuper_klass, Rsub_klass, tmp1, tmp2.
937 index_check_without_pop(array, index, index_shift, tmp, res); 954 index_check_without_pop(array, index, index_shift, tmp, res);
938 } 955 }
939 956
940 957
941 void InterpreterMacroAssembler::get_const(Register Rdst) { 958 void InterpreterMacroAssembler::get_const(Register Rdst) {
942 ld_ptr(Lmethod, in_bytes(methodOopDesc::const_offset()), Rdst); 959 ld_ptr(Lmethod, in_bytes(Method::const_offset()), Rdst);
943 } 960 }
944 961
945 962
946 void InterpreterMacroAssembler::get_constant_pool(Register Rdst) { 963 void InterpreterMacroAssembler::get_constant_pool(Register Rdst) {
947 get_const(Rdst); 964 get_const(Rdst);
948 ld_ptr(Rdst, in_bytes(constMethodOopDesc::constants_offset()), Rdst); 965 ld_ptr(Rdst, in_bytes(ConstMethod::constants_offset()), Rdst);
949 } 966 }
950 967
951 968
952 void InterpreterMacroAssembler::get_constant_pool_cache(Register Rdst) { 969 void InterpreterMacroAssembler::get_constant_pool_cache(Register Rdst) {
953 get_constant_pool(Rdst); 970 get_constant_pool(Rdst);
954 ld_ptr(Rdst, constantPoolOopDesc::cache_offset_in_bytes(), Rdst); 971 ld_ptr(Rdst, ConstantPool::cache_offset_in_bytes(), Rdst);
955 } 972 }
956 973
957 974
958 void InterpreterMacroAssembler::get_cpool_and_tags(Register Rcpool, Register Rtags) { 975 void InterpreterMacroAssembler::get_cpool_and_tags(Register Rcpool, Register Rtags) {
959 get_constant_pool(Rcpool); 976 get_constant_pool(Rcpool);
960 ld_ptr(Rcpool, constantPoolOopDesc::tags_offset_in_bytes(), Rtags); 977 ld_ptr(Rcpool, ConstantPool::tags_offset_in_bytes(), Rtags);
961 } 978 }
962 979
963 980
964 // unlock if synchronized method 981 // unlock if synchronized method
965 // 982 //
983 JavaThread::do_not_unlock_if_synchronized_offset()); 1000 JavaThread::do_not_unlock_if_synchronized_offset());
984 ldbool(do_not_unlock_if_synchronized, G1_scratch); 1001 ldbool(do_not_unlock_if_synchronized, G1_scratch);
985 stbool(G0, do_not_unlock_if_synchronized); // reset the flag 1002 stbool(G0, do_not_unlock_if_synchronized); // reset the flag
986 1003
987 // check if synchronized method 1004 // check if synchronized method
988 const Address access_flags(Lmethod, methodOopDesc::access_flags_offset()); 1005 const Address access_flags(Lmethod, Method::access_flags_offset());
989 interp_verify_oop(Otos_i, state, __FILE__, __LINE__); 1006 interp_verify_oop(Otos_i, state, __FILE__, __LINE__);
990 push(state); // save tos 1007 push(state); // save tos
991 ld(access_flags, G3_scratch); // Load access flags. 1008 ld(access_flags, G3_scratch); // Load access flags.
992 btst(JVM_ACC_SYNCHRONIZED, G3_scratch); 1009 btst(JVM_ACC_SYNCHRONIZED, G3_scratch);
993 br(zero, false, pt, unlocked); 1010 br(zero, false, pt, unlocked);
1119 1136
1120 // save result (push state before jvmti call and pop it afterwards) and notify jvmti 1137 // save result (push state before jvmti call and pop it afterwards) and notify jvmti
1121 notify_method_exit(false, state, NotifyJVMTI); 1138 notify_method_exit(false, state, NotifyJVMTI);
1122 1139
1123 interp_verify_oop(Otos_i, state, __FILE__, __LINE__); 1140 interp_verify_oop(Otos_i, state, __FILE__, __LINE__);
1124 verify_oop(Lmethod);
1125 verify_thread(); 1141 verify_thread();
1126 1142
1127 // return tos 1143 // return tos
1128 assert(Otos_l1 == Otos_i, "adjust code below"); 1144 assert(Otos_l1 == Otos_i, "adjust code below");
1129 switch (state) { 1145 switch (state) {
1293 } 1309 }
1294 } 1310 }
1295 1311
1296 #ifndef CC_INTERP 1312 #ifndef CC_INTERP
1297 1313
1298 // Get the method data pointer from the methodOop and set the 1314 // Get the method data pointer from the Method* and set the
1299 // specified register to its value. 1315 // specified register to its value.
1300 1316
1301 void InterpreterMacroAssembler::set_method_data_pointer() { 1317 void InterpreterMacroAssembler::set_method_data_pointer() {
1302 assert(ProfileInterpreter, "must be profiling interpreter"); 1318 assert(ProfileInterpreter, "must be profiling interpreter");
1303 Label get_continue; 1319 Label get_continue;
1304 1320
1305 ld_ptr(Lmethod, in_bytes(methodOopDesc::method_data_offset()), ImethodDataPtr); 1321 ld_ptr(Lmethod, in_bytes(Method::method_data_offset()), ImethodDataPtr);
1306 test_method_data_pointer(get_continue); 1322 test_method_data_pointer(get_continue);
1307 add(ImethodDataPtr, in_bytes(methodDataOopDesc::data_offset()), ImethodDataPtr); 1323 add(ImethodDataPtr, in_bytes(MethodData::data_offset()), ImethodDataPtr);
1308 bind(get_continue); 1324 bind(get_continue);
1309 } 1325 }
1310 1326
1311 // Set the method data pointer for the current bcp. 1327 // Set the method data pointer for the current bcp.
1312 1328
1313 void InterpreterMacroAssembler::set_method_data_pointer_for_bcp() { 1329 void InterpreterMacroAssembler::set_method_data_pointer_for_bcp() {
1314 assert(ProfileInterpreter, "must be profiling interpreter"); 1330 assert(ProfileInterpreter, "must be profiling interpreter");
1315 Label zero_continue; 1331 Label zero_continue;
1316 1332
1317 // Test MDO to avoid the call if it is NULL. 1333 // Test MDO to avoid the call if it is NULL.
1318 ld_ptr(Lmethod, in_bytes(methodOopDesc::method_data_offset()), ImethodDataPtr); 1334 ld_ptr(Lmethod, in_bytes(Method::method_data_offset()), ImethodDataPtr);
1319 test_method_data_pointer(zero_continue); 1335 test_method_data_pointer(zero_continue);
1320 call_VM_leaf(noreg, CAST_FROM_FN_PTR(address, InterpreterRuntime::bcp_to_di), Lmethod, Lbcp); 1336 call_VM_leaf(noreg, CAST_FROM_FN_PTR(address, InterpreterRuntime::bcp_to_di), Lmethod, Lbcp);
1321 add(ImethodDataPtr, in_bytes(methodDataOopDesc::data_offset()), ImethodDataPtr); 1337 add(ImethodDataPtr, in_bytes(MethodData::data_offset()), ImethodDataPtr);
1322 add(ImethodDataPtr, O0, ImethodDataPtr); 1338 add(ImethodDataPtr, O0, ImethodDataPtr);
1323 bind(zero_continue); 1339 bind(zero_continue);
1324 } 1340 }
1325 1341
1326 // Test ImethodDataPtr. If it is null, continue at the specified label 1342 // Test ImethodDataPtr. If it is null, continue at the specified label
1337 test_method_data_pointer(verify_continue); 1353 test_method_data_pointer(verify_continue);
1338 1354
1339 // If the mdp is valid, it will point to a DataLayout header which is 1355 // If the mdp is valid, it will point to a DataLayout header which is
1340 // consistent with the bcp. The converse is highly probable also. 1356 // consistent with the bcp. The converse is highly probable also.
1341 lduh(ImethodDataPtr, in_bytes(DataLayout::bci_offset()), G3_scratch); 1357 lduh(ImethodDataPtr, in_bytes(DataLayout::bci_offset()), G3_scratch);
1342 ld_ptr(Lmethod, methodOopDesc::const_offset(), O5); 1358 ld_ptr(Lmethod, Method::const_offset(), O5);
1343 add(G3_scratch, in_bytes(constMethodOopDesc::codes_offset()), G3_scratch); 1359 add(G3_scratch, in_bytes(ConstMethod::codes_offset()), G3_scratch);
1344 add(G3_scratch, O5, G3_scratch); 1360 add(G3_scratch, O5, G3_scratch);
1345 cmp(Lbcp, G3_scratch); 1361 cmp(Lbcp, G3_scratch);
1346 brx(Assembler::equal, false, Assembler::pt, verify_continue); 1362 brx(Assembler::equal, false, Assembler::pt, verify_continue);
1347 1363
1348 Register temp_reg = O5; 1364 Register temp_reg = O5;
2070 #endif /* CC_INTERP */ 2086 #endif /* CC_INTERP */
2071 2087
2072 void InterpreterMacroAssembler::increment_invocation_counter( Register Rtmp, Register Rtmp2 ) { 2088 void InterpreterMacroAssembler::increment_invocation_counter( Register Rtmp, Register Rtmp2 ) {
2073 assert(UseCompiler, "incrementing must be useful"); 2089 assert(UseCompiler, "incrementing must be useful");
2074 #ifdef CC_INTERP 2090 #ifdef CC_INTERP
2075 Address inv_counter(G5_method, methodOopDesc::invocation_counter_offset() + 2091 Address inv_counter(G5_method, Method::invocation_counter_offset() +
2076 InvocationCounter::counter_offset()); 2092 InvocationCounter::counter_offset());
2077 Address be_counter (G5_method, methodOopDesc::backedge_counter_offset() + 2093 Address be_counter (G5_method, Method::backedge_counter_offset() +
2078 InvocationCounter::counter_offset()); 2094 InvocationCounter::counter_offset());
2079 #else 2095 #else
2080 Address inv_counter(Lmethod, methodOopDesc::invocation_counter_offset() + 2096 Address inv_counter(Lmethod, Method::invocation_counter_offset() +
2081 InvocationCounter::counter_offset()); 2097 InvocationCounter::counter_offset());
2082 Address be_counter (Lmethod, methodOopDesc::backedge_counter_offset() + 2098 Address be_counter (Lmethod, Method::backedge_counter_offset() +
2083 InvocationCounter::counter_offset()); 2099 InvocationCounter::counter_offset());
2084 #endif /* CC_INTERP */ 2100 #endif /* CC_INTERP */
2085 int delta = InvocationCounter::count_increment; 2101 int delta = InvocationCounter::count_increment;
2086 2102
2087 // Load each counter in a register 2103 // Load each counter in a register
2106 } 2122 }
2107 2123
2108 void InterpreterMacroAssembler::increment_backedge_counter( Register Rtmp, Register Rtmp2 ) { 2124 void InterpreterMacroAssembler::increment_backedge_counter( Register Rtmp, Register Rtmp2 ) {
2109 assert(UseCompiler, "incrementing must be useful"); 2125 assert(UseCompiler, "incrementing must be useful");
2110 #ifdef CC_INTERP 2126 #ifdef CC_INTERP
2111 Address be_counter (G5_method, methodOopDesc::backedge_counter_offset() + 2127 Address be_counter (G5_method, Method::backedge_counter_offset() +
2112 InvocationCounter::counter_offset()); 2128 InvocationCounter::counter_offset());
2113 Address inv_counter(G5_method, methodOopDesc::invocation_counter_offset() + 2129 Address inv_counter(G5_method, Method::invocation_counter_offset() +
2114 InvocationCounter::counter_offset()); 2130 InvocationCounter::counter_offset());
2115 #else 2131 #else
2116 Address be_counter (Lmethod, methodOopDesc::backedge_counter_offset() + 2132 Address be_counter (Lmethod, Method::backedge_counter_offset() +
2117 InvocationCounter::counter_offset()); 2133 InvocationCounter::counter_offset());
2118 Address inv_counter(Lmethod, methodOopDesc::invocation_counter_offset() + 2134 Address inv_counter(Lmethod, Method::invocation_counter_offset() +
2119 InvocationCounter::counter_offset()); 2135 InvocationCounter::counter_offset());
2120 #endif /* CC_INTERP */ 2136 #endif /* CC_INTERP */
2121 int delta = InvocationCounter::count_increment; 2137 int delta = InvocationCounter::count_increment;
2122 // Load each counter in a register 2138 // Load each counter in a register
2123 ld( be_counter, Rtmp ); 2139 ld( be_counter, Rtmp );
2150 AddressLiteral limit(&InvocationCounter::InterpreterBackwardBranchLimit); 2166 AddressLiteral limit(&InvocationCounter::InterpreterBackwardBranchLimit);
2151 load_contents(limit, Rtmp); 2167 load_contents(limit, Rtmp);
2152 cmp_and_br_short(backedge_count, Rtmp, Assembler::lessUnsigned, Assembler::pt, did_not_overflow); 2168 cmp_and_br_short(backedge_count, Rtmp, Assembler::lessUnsigned, Assembler::pt, did_not_overflow);
2153 2169
2154 // When ProfileInterpreter is on, the backedge_count comes from the 2170 // When ProfileInterpreter is on, the backedge_count comes from the
2155 // methodDataOop, which value does not get reset on the call to 2171 // MethodData*, which value does not get reset on the call to
2156 // frequency_counter_overflow(). To avoid excessive calls to the overflow 2172 // frequency_counter_overflow(). To avoid excessive calls to the overflow
2157 // routine while the method is being compiled, add a second test to make sure 2173 // routine while the method is being compiled, add a second test to make sure
2158 // the overflow function is called only once every overflow_frequency. 2174 // the overflow function is called only once every overflow_frequency.
2159 if (ProfileInterpreter) { 2175 if (ProfileInterpreter) {
2160 const int overflow_frequency = 1024; 2176 const int overflow_frequency = 1024;
2210 if (state == atos) { MacroAssembler::_verify_oop(reg, "broken oop ", file, line); } 2226 if (state == atos) { MacroAssembler::_verify_oop(reg, "broken oop ", file, line); }
2211 } 2227 }
2212 2228
2213 2229
2214 // local helper function for the verify_oop_or_return_address macro 2230 // local helper function for the verify_oop_or_return_address macro
2215 static bool verify_return_address(methodOopDesc* m, int bci) { 2231 static bool verify_return_address(Method* m, int bci) {
2216 #ifndef PRODUCT 2232 #ifndef PRODUCT
2217 address pc = (address)(m->constMethod()) 2233 address pc = (address)(m->constMethod())
2218 + in_bytes(constMethodOopDesc::codes_offset()) + bci; 2234 + in_bytes(ConstMethod::codes_offset()) + bci;
2219 // assume it is a valid return address if it is inside m and is preceded by a jsr 2235 // assume it is a valid return address if it is inside m and is preceded by a jsr
2220 if (!m->contains(pc)) return false; 2236 if (!m->contains(pc)) return false;
2221 address jsr_pc; 2237 address jsr_pc;
2222 jsr_pc = pc - Bytecodes::length_for(Bytecodes::_jsr); 2238 jsr_pc = pc - Bytecodes::length_for(Bytecodes::_jsr);
2223 if (*jsr_pc == Bytecodes::_jsr && jsr_pc >= m->code_base()) return true; 2239 if (*jsr_pc == Bytecodes::_jsr && jsr_pc >= m->code_base()) return true;