comparison src/cpu/sparc/vm/sharedRuntime_sparc.cpp @ 6725:da91efe96a93

6964458: Reimplement class meta-data storage to use native memory Summary: Remove PermGen, allocate meta-data in metaspace linked to class loaders, rewrite GC walking, rewrite and rename metadata to be C++ classes Reviewed-by: jmasa, stefank, never, coleenp, kvn, brutisso, mgerdin, dholmes, jrose, twisti, roland Contributed-by: jmasa <jon.masamitsu@oracle.com>, stefank <stefan.karlsson@oracle.com>, mgerdin <mikael.gerdin@oracle.com>, never <tom.rodriguez@oracle.com>
author coleenp
date Sat, 01 Sep 2012 13:25:18 -0400
parents 1d7922586cf6
children 8a02ca5e5576
comparison
equal deleted inserted replaced
6724:36d1d483d5d6 6725:da91efe96a93
27 #include "assembler_sparc.inline.hpp" 27 #include "assembler_sparc.inline.hpp"
28 #include "code/debugInfoRec.hpp" 28 #include "code/debugInfoRec.hpp"
29 #include "code/icBuffer.hpp" 29 #include "code/icBuffer.hpp"
30 #include "code/vtableStubs.hpp" 30 #include "code/vtableStubs.hpp"
31 #include "interpreter/interpreter.hpp" 31 #include "interpreter/interpreter.hpp"
32 #include "oops/compiledICHolderOop.hpp" 32 #include "oops/compiledICHolder.hpp"
33 #include "prims/jvmtiRedefineClassesTrace.hpp" 33 #include "prims/jvmtiRedefineClassesTrace.hpp"
34 #include "runtime/sharedRuntime.hpp" 34 #include "runtime/sharedRuntime.hpp"
35 #include "runtime/vframeArray.hpp" 35 #include "runtime/vframeArray.hpp"
36 #include "vmreg_sparc.inline.hpp" 36 #include "vmreg_sparc.inline.hpp"
37 #ifdef COMPILER1 37 #ifdef COMPILER1
597 597
598 598
599 // Patch the callers callsite with entry to compiled code if it exists. 599 // Patch the callers callsite with entry to compiled code if it exists.
600 void AdapterGenerator::patch_callers_callsite() { 600 void AdapterGenerator::patch_callers_callsite() {
601 Label L; 601 Label L;
602 __ ld_ptr(G5_method, in_bytes(methodOopDesc::code_offset()), G3_scratch); 602 __ ld_ptr(G5_method, in_bytes(Method::code_offset()), G3_scratch);
603 __ br_null(G3_scratch, false, Assembler::pt, L); 603 __ br_null(G3_scratch, false, Assembler::pt, L);
604 // Schedule the branch target address early. 604 // Schedule the branch target address early.
605 __ delayed()->ld_ptr(G5_method, in_bytes(methodOopDesc::interpreter_entry_offset()), G3_scratch); 605 __ delayed()->ld_ptr(G5_method, in_bytes(Method::interpreter_entry_offset()), G3_scratch);
606 // Call into the VM to patch the caller, then jump to compiled callee 606 // Call into the VM to patch the caller, then jump to compiled callee
607 __ save_frame(4); // Args in compiled layout; do not blow them 607 __ save_frame(4); // Args in compiled layout; do not blow them
608 608
609 // Must save all the live Gregs the list is: 609 // Must save all the live Gregs the list is:
610 // G1: 1st Long arg (32bit build) 610 // G1: 1st Long arg (32bit build)
611 // G2: global allocated to TLS 611 // G2: global allocated to TLS
612 // G3: used in inline cache check (scratch) 612 // G3: used in inline cache check (scratch)
613 // G4: 2nd Long arg (32bit build); 613 // G4: 2nd Long arg (32bit build);
614 // G5: used in inline cache check (methodOop) 614 // G5: used in inline cache check (Method*)
615 615
616 // The longs must go to the stack by hand since in the 32 bit build they can be trashed by window ops. 616 // The longs must go to the stack by hand since in the 32 bit build they can be trashed by window ops.
617 617
618 #ifdef _LP64 618 #ifdef _LP64
619 // mov(s,d) 619 // mov(s,d)
643 __ delayed()->mov(G2_thread, L7_thread_cache); 643 __ delayed()->mov(G2_thread, L7_thread_cache);
644 __ mov(L7_thread_cache, G2_thread); 644 __ mov(L7_thread_cache, G2_thread);
645 __ ldx(FP, -8 + STACK_BIAS, G1); 645 __ ldx(FP, -8 + STACK_BIAS, G1);
646 __ ldx(FP, -16 + STACK_BIAS, G4); 646 __ ldx(FP, -16 + STACK_BIAS, G4);
647 __ mov(L5, G5_method); 647 __ mov(L5, G5_method);
648 __ ld_ptr(G5_method, in_bytes(methodOopDesc::interpreter_entry_offset()), G3_scratch); 648 __ ld_ptr(G5_method, in_bytes(Method::interpreter_entry_offset()), G3_scratch);
649 #endif /* _LP64 */ 649 #endif /* _LP64 */
650 650
651 __ restore(); // Restore args 651 __ restore(); // Restore args
652 __ bind(L); 652 __ bind(L);
653 } 653 }
851 } 851 }
852 } 852 }
853 853
854 #ifdef _LP64 854 #ifdef _LP64
855 // Need to reload G3_scratch, used for temporary displacements. 855 // Need to reload G3_scratch, used for temporary displacements.
856 __ ld_ptr(G5_method, in_bytes(methodOopDesc::interpreter_entry_offset()), G3_scratch); 856 __ ld_ptr(G5_method, in_bytes(Method::interpreter_entry_offset()), G3_scratch);
857 857
858 // Pass O5_savedSP as an argument to the interpreter. 858 // Pass O5_savedSP as an argument to the interpreter.
859 // The interpreter will restore SP to this value before returning. 859 // The interpreter will restore SP to this value before returning.
860 __ set(extraspace, G1); 860 __ set(extraspace, G1);
861 __ add(SP, G1, O5_savedSP); 861 __ add(SP, G1, O5_savedSP);
1044 __ sub(SP, (comp_words_on_stack)*wordSize, SP); 1044 __ sub(SP, (comp_words_on_stack)*wordSize, SP);
1045 } 1045 }
1046 1046
1047 // Will jump to the compiled code just as if compiled code was doing it. 1047 // Will jump to the compiled code just as if compiled code was doing it.
1048 // Pre-load the register-jump target early, to schedule it better. 1048 // Pre-load the register-jump target early, to schedule it better.
1049 __ ld_ptr(G5_method, in_bytes(methodOopDesc::from_compiled_offset()), G3); 1049 __ ld_ptr(G5_method, in_bytes(Method::from_compiled_offset()), G3);
1050 1050
1051 // Now generate the shuffle code. Pick up all register args and move the 1051 // Now generate the shuffle code. Pick up all register args and move the
1052 // rest through G1_scratch. 1052 // rest through G1_scratch.
1053 for (int i=0; i<total_args_passed; i++) { 1053 for (int i=0; i<total_args_passed; i++) {
1054 if (sig_bt[i] == T_VOID) { 1054 if (sig_bt[i] == T_VOID) {
1161 // Jump to the compiled code just as if compiled code was doing it. 1161 // Jump to the compiled code just as if compiled code was doing it.
1162 // 1162 //
1163 #ifndef _LP64 1163 #ifndef _LP64
1164 if (g3_crushed) { 1164 if (g3_crushed) {
1165 // Rats load was wasted, at least it is in cache... 1165 // Rats load was wasted, at least it is in cache...
1166 __ ld_ptr(G5_method, methodOopDesc::from_compiled_offset(), G3); 1166 __ ld_ptr(G5_method, Method::from_compiled_offset(), G3);
1167 } 1167 }
1168 #endif /* _LP64 */ 1168 #endif /* _LP64 */
1169 1169
1170 // 6243940 We might end up in handle_wrong_method if 1170 // 6243940 We might end up in handle_wrong_method if
1171 // the callee is deoptimized as we race thru here. If that 1171 // the callee is deoptimized as we race thru here. If that
1210 1210
1211 agen.gen_i2c_adapter(total_args_passed, comp_args_on_stack, sig_bt, regs); 1211 agen.gen_i2c_adapter(total_args_passed, comp_args_on_stack, sig_bt, regs);
1212 1212
1213 1213
1214 // ------------------------------------------------------------------------- 1214 // -------------------------------------------------------------------------
1215 // Generate a C2I adapter. On entry we know G5 holds the methodOop. The 1215 // Generate a C2I adapter. On entry we know G5 holds the Method*. The
1216 // args start out packed in the compiled layout. They need to be unpacked 1216 // args start out packed in the compiled layout. They need to be unpacked
1217 // into the interpreter layout. This will almost always require some stack 1217 // into the interpreter layout. This will almost always require some stack
1218 // space. We grow the current (compiled) stack, then repack the args. We 1218 // space. We grow the current (compiled) stack, then repack the args. We
1219 // finally end in a jump to the generic interpreter entry point. On exit 1219 // finally end in a jump to the generic interpreter entry point. On exit
1220 // from the interpreter, the interpreter will restore our SP (lest the 1220 // from the interpreter, the interpreter will restore our SP (lest the
1230 #endif 1230 #endif
1231 1231
1232 AddressLiteral ic_miss(SharedRuntime::get_ic_miss_stub()); 1232 AddressLiteral ic_miss(SharedRuntime::get_ic_miss_stub());
1233 1233
1234 __ verify_oop(O0); 1234 __ verify_oop(O0);
1235 __ verify_oop(G5_method);
1236 __ load_klass(O0, G3_scratch); 1235 __ load_klass(O0, G3_scratch);
1237 __ verify_oop(G3_scratch);
1238 1236
1239 #if !defined(_LP64) && defined(COMPILER2) 1237 #if !defined(_LP64) && defined(COMPILER2)
1240 __ save(SP, -frame::register_save_words*wordSize, SP); 1238 __ save(SP, -frame::register_save_words*wordSize, SP);
1241 __ ld_ptr(G5_method, compiledICHolderOopDesc::holder_klass_offset(), R_temp); 1239 __ ld_ptr(G5_method, CompiledICHolder::holder_klass_offset(), R_temp);
1242 __ verify_oop(R_temp);
1243 __ cmp(G3_scratch, R_temp); 1240 __ cmp(G3_scratch, R_temp);
1244 __ restore(); 1241 __ restore();
1245 #else 1242 #else
1246 __ ld_ptr(G5_method, compiledICHolderOopDesc::holder_klass_offset(), R_temp); 1243 __ ld_ptr(G5_method, CompiledICHolder::holder_klass_offset(), R_temp);
1247 __ verify_oop(R_temp);
1248 __ cmp(G3_scratch, R_temp); 1244 __ cmp(G3_scratch, R_temp);
1249 #endif 1245 #endif
1250 1246
1251 Label ok, ok2; 1247 Label ok, ok2;
1252 __ brx(Assembler::equal, false, Assembler::pt, ok); 1248 __ brx(Assembler::equal, false, Assembler::pt, ok);
1253 __ delayed()->ld_ptr(G5_method, compiledICHolderOopDesc::holder_method_offset(), G5_method); 1249 __ delayed()->ld_ptr(G5_method, CompiledICHolder::holder_method_offset(), G5_method);
1254 __ jump_to(ic_miss, G3_scratch); 1250 __ jump_to(ic_miss, G3_scratch);
1255 __ delayed()->nop(); 1251 __ delayed()->nop();
1256 1252
1257 __ bind(ok); 1253 __ bind(ok);
1258 // Method might have been compiled since the call site was patched to 1254 // Method might have been compiled since the call site was patched to
1259 // interpreted if that is the case treat it as a miss so we can get 1255 // interpreted if that is the case treat it as a miss so we can get
1260 // the call site corrected. 1256 // the call site corrected.
1261 __ ld_ptr(G5_method, in_bytes(methodOopDesc::code_offset()), G3_scratch); 1257 __ ld_ptr(G5_method, in_bytes(Method::code_offset()), G3_scratch);
1262 __ bind(ok2); 1258 __ bind(ok2);
1263 __ br_null(G3_scratch, false, Assembler::pt, skip_fixup); 1259 __ br_null(G3_scratch, false, Assembler::pt, skip_fixup);
1264 __ delayed()->ld_ptr(G5_method, in_bytes(methodOopDesc::interpreter_entry_offset()), G3_scratch); 1260 __ delayed()->ld_ptr(G5_method, in_bytes(Method::interpreter_entry_offset()), G3_scratch);
1265 __ jump_to(ic_miss, G3_scratch); 1261 __ jump_to(ic_miss, G3_scratch);
1266 __ delayed()->nop(); 1262 __ delayed()->nop();
1267 1263
1268 } 1264 }
1269 1265
2569 SkipIfEqual skip_if( 2565 SkipIfEqual skip_if(
2570 masm, G3_scratch, &DTraceMethodProbes, Assembler::zero); 2566 masm, G3_scratch, &DTraceMethodProbes, Assembler::zero);
2571 // create inner frame 2567 // create inner frame
2572 __ save_frame(0); 2568 __ save_frame(0);
2573 __ mov(G2_thread, L7_thread_cache); 2569 __ mov(G2_thread, L7_thread_cache);
2574 __ set_oop_constant(JNIHandles::make_local(method()), O1); 2570 __ set_metadata_constant(method(), O1);
2575 __ call_VM_leaf(L7_thread_cache, 2571 __ call_VM_leaf(L7_thread_cache,
2576 CAST_FROM_FN_PTR(address, SharedRuntime::dtrace_method_entry), 2572 CAST_FROM_FN_PTR(address, SharedRuntime::dtrace_method_entry),
2577 G2_thread, O1); 2573 G2_thread, O1);
2578 __ restore(); 2574 __ restore();
2579 } 2575 }
2581 // RedefineClasses() tracing support for obsolete method entry 2577 // RedefineClasses() tracing support for obsolete method entry
2582 if (RC_TRACE_IN_RANGE(0x00001000, 0x00002000)) { 2578 if (RC_TRACE_IN_RANGE(0x00001000, 0x00002000)) {
2583 // create inner frame 2579 // create inner frame
2584 __ save_frame(0); 2580 __ save_frame(0);
2585 __ mov(G2_thread, L7_thread_cache); 2581 __ mov(G2_thread, L7_thread_cache);
2586 __ set_oop_constant(JNIHandles::make_local(method()), O1); 2582 __ set_metadata_constant(method(), O1);
2587 __ call_VM_leaf(L7_thread_cache, 2583 __ call_VM_leaf(L7_thread_cache,
2588 CAST_FROM_FN_PTR(address, SharedRuntime::rc_trace_method_entry), 2584 CAST_FROM_FN_PTR(address, SharedRuntime::rc_trace_method_entry),
2589 G2_thread, O1); 2585 G2_thread, O1);
2590 __ restore(); 2586 __ restore();
2591 } 2587 }
2867 // Tell dtrace about this method exit 2863 // Tell dtrace about this method exit
2868 { 2864 {
2869 SkipIfEqual skip_if( 2865 SkipIfEqual skip_if(
2870 masm, G3_scratch, &DTraceMethodProbes, Assembler::zero); 2866 masm, G3_scratch, &DTraceMethodProbes, Assembler::zero);
2871 save_native_result(masm, ret_type, stack_slots); 2867 save_native_result(masm, ret_type, stack_slots);
2872 __ set_oop_constant(JNIHandles::make_local(method()), O1); 2868 __ set_metadata_constant(method(), O1);
2873 __ call_VM_leaf(L7_thread_cache, 2869 __ call_VM_leaf(L7_thread_cache,
2874 CAST_FROM_FN_PTR(address, SharedRuntime::dtrace_method_exit), 2870 CAST_FROM_FN_PTR(address, SharedRuntime::dtrace_method_exit),
2875 G2_thread, O1); 2871 G2_thread, O1);
2876 restore_native_result(masm, ret_type, stack_slots); 2872 restore_native_result(masm, ret_type, stack_slots);
2877 } 2873 }
4079 Label pending; 4075 Label pending;
4080 4076
4081 __ ld_ptr(G2_thread, in_bytes(Thread::pending_exception_offset()), O1); 4077 __ ld_ptr(G2_thread, in_bytes(Thread::pending_exception_offset()), O1);
4082 __ br_notnull_short(O1, Assembler::pn, pending); 4078 __ br_notnull_short(O1, Assembler::pn, pending);
4083 4079
4084 // get the returned methodOop 4080 // get the returned Method*
4085 4081
4086 __ get_vm_result(G5_method); 4082 __ get_vm_result_2(G5_method);
4087 __ stx(G5_method, SP, RegisterSaver::G5_offset()+STACK_BIAS); 4083 __ stx(G5_method, SP, RegisterSaver::G5_offset()+STACK_BIAS);
4088 4084
4089 // O0 is where we want to jump, overwrite G3 which is saved and scratch 4085 // O0 is where we want to jump, overwrite G3 which is saved and scratch
4090 4086
4091 __ stx(O0, SP, RegisterSaver::G3_offset()+STACK_BIAS); 4087 __ stx(O0, SP, RegisterSaver::G3_offset()+STACK_BIAS);