comparison src/cpu/sparc/vm/sharedRuntime_sparc.cpp @ 727:6b2273dd6fa9

6822110: Add AddressLiteral class on SPARC Summary: The Address class on SPARC currently handles both, addresses and address literals, what makes the Address class more complicated than it has to be. Reviewed-by: never, kvn
author twisti
date Tue, 21 Apr 2009 11:16:30 -0700
parents e5b0439ef4ae
children dcf03e02b020
comparison
equal deleted inserted replaced
725:928912ce8438 727:6b2273dd6fa9
623 __ mov(G5_method, L5); 623 __ mov(G5_method, L5);
624 __ mov(G5_method, O0); // VM needs target method 624 __ mov(G5_method, O0); // VM needs target method
625 __ mov(I7, O1); // VM needs caller's callsite 625 __ mov(I7, O1); // VM needs caller's callsite
626 // Must be a leaf call... 626 // Must be a leaf call...
627 // can be very far once the blob has been relocated 627 // can be very far once the blob has been relocated
628 Address dest(O7, CAST_FROM_FN_PTR(address, SharedRuntime::fixup_callers_callsite)); 628 AddressLiteral dest(CAST_FROM_FN_PTR(address, SharedRuntime::fixup_callers_callsite));
629 __ relocate(relocInfo::runtime_call_type); 629 __ relocate(relocInfo::runtime_call_type);
630 __ jumpl_to(dest, O7); 630 __ jumpl_to(dest, O7, O7);
631 __ delayed()->mov(G2_thread, L7_thread_cache); 631 __ delayed()->mov(G2_thread, L7_thread_cache);
632 __ mov(L7_thread_cache, G2_thread); 632 __ mov(L7_thread_cache, G2_thread);
633 __ mov(L1, G1); 633 __ mov(L1, G1);
634 __ mov(L4, G4); 634 __ mov(L4, G4);
635 __ mov(L5, G5_method); 635 __ mov(L5, G5_method);
1150 // Jump to the compiled code just as if compiled code was doing it. 1150 // Jump to the compiled code just as if compiled code was doing it.
1151 // 1151 //
1152 #ifndef _LP64 1152 #ifndef _LP64
1153 if (g3_crushed) { 1153 if (g3_crushed) {
1154 // Rats load was wasted, at least it is in cache... 1154 // Rats load was wasted, at least it is in cache...
1155 __ ld_ptr(G5_method, in_bytes(methodOopDesc::from_compiled_offset()), G3); 1155 __ ld_ptr(G5_method, methodOopDesc::from_compiled_offset(), G3);
1156 } 1156 }
1157 #endif /* _LP64 */ 1157 #endif /* _LP64 */
1158 1158
1159 // 6243940 We might end up in handle_wrong_method if 1159 // 6243940 We might end up in handle_wrong_method if
1160 // the callee is deoptimized as we race thru here. If that 1160 // the callee is deoptimized as we race thru here. If that
1163 // "compiled" so it is much better to make this transition 1163 // "compiled" so it is much better to make this transition
1164 // invisible to the stack walking code. Unfortunately if 1164 // invisible to the stack walking code. Unfortunately if
1165 // we try and find the callee by normal means a safepoint 1165 // we try and find the callee by normal means a safepoint
1166 // is possible. So we stash the desired callee in the thread 1166 // is possible. So we stash the desired callee in the thread
1167 // and the vm will find there should this case occur. 1167 // and the vm will find there should this case occur.
1168 Address callee_target_addr(G2_thread, 0, in_bytes(JavaThread::callee_target_offset())); 1168 Address callee_target_addr(G2_thread, JavaThread::callee_target_offset());
1169 __ st_ptr(G5_method, callee_target_addr); 1169 __ st_ptr(G5_method, callee_target_addr);
1170 1170
1171 if (StressNonEntrant) { 1171 if (StressNonEntrant) {
1172 // Open a big window for deopt failure 1172 // Open a big window for deopt failure
1173 __ save_frame(0); 1173 __ save_frame(0);
1216 Register R_temp = L0; // another scratch register 1216 Register R_temp = L0; // another scratch register
1217 #else 1217 #else
1218 Register R_temp = G1; // another scratch register 1218 Register R_temp = G1; // another scratch register
1219 #endif 1219 #endif
1220 1220
1221 Address ic_miss(G3_scratch, SharedRuntime::get_ic_miss_stub()); 1221 AddressLiteral ic_miss(SharedRuntime::get_ic_miss_stub());
1222 1222
1223 __ verify_oop(O0); 1223 __ verify_oop(O0);
1224 __ verify_oop(G5_method); 1224 __ verify_oop(G5_method);
1225 __ load_klass(O0, G3_scratch); 1225 __ load_klass(O0, G3_scratch);
1226 __ verify_oop(G3_scratch); 1226 __ verify_oop(G3_scratch);
1238 #endif 1238 #endif
1239 1239
1240 Label ok, ok2; 1240 Label ok, ok2;
1241 __ brx(Assembler::equal, false, Assembler::pt, ok); 1241 __ brx(Assembler::equal, false, Assembler::pt, ok);
1242 __ delayed()->ld_ptr(G5_method, compiledICHolderOopDesc::holder_method_offset(), G5_method); 1242 __ delayed()->ld_ptr(G5_method, compiledICHolderOopDesc::holder_method_offset(), G5_method);
1243 __ jump_to(ic_miss); 1243 __ jump_to(ic_miss, G3_scratch);
1244 __ delayed()->nop(); 1244 __ delayed()->nop();
1245 1245
1246 __ bind(ok); 1246 __ bind(ok);
1247 // Method might have been compiled since the call site was patched to 1247 // Method might have been compiled since the call site was patched to
1248 // interpreted if that is the case treat it as a miss so we can get 1248 // interpreted if that is the case treat it as a miss so we can get
1249 // the call site corrected. 1249 // the call site corrected.
1250 __ ld_ptr(G5_method, in_bytes(methodOopDesc::code_offset()), G3_scratch); 1250 __ ld_ptr(G5_method, in_bytes(methodOopDesc::code_offset()), G3_scratch);
1251 __ bind(ok2); 1251 __ bind(ok2);
1252 __ br_null(G3_scratch, false, __ pt, skip_fixup); 1252 __ br_null(G3_scratch, false, __ pt, skip_fixup);
1253 __ delayed()->ld_ptr(G5_method, in_bytes(methodOopDesc::interpreter_entry_offset()), G3_scratch); 1253 __ delayed()->ld_ptr(G5_method, in_bytes(methodOopDesc::interpreter_entry_offset()), G3_scratch);
1254 __ jump_to(ic_miss); 1254 __ jump_to(ic_miss, G3_scratch);
1255 __ delayed()->nop(); 1255 __ delayed()->nop();
1256 1256
1257 } 1257 }
1258 1258
1259 address c2i_entry = __ pc(); 1259 address c2i_entry = __ pc();
1442 __ delayed()->mov(L7_thread_cache, G2_thread); // restore in case we have exception 1442 __ delayed()->mov(L7_thread_cache, G2_thread); // restore in case we have exception
1443 // Since this is a native call, we *know* the proper exception handler 1443 // Since this is a native call, we *know* the proper exception handler
1444 // without calling into the VM: it's the empty function. Just pop this 1444 // without calling into the VM: it's the empty function. Just pop this
1445 // frame and then jump to forward_exception_entry; O7 will contain the 1445 // frame and then jump to forward_exception_entry; O7 will contain the
1446 // native caller's return PC. 1446 // native caller's return PC.
1447 Address exception_entry(G3_scratch, StubRoutines::forward_exception_entry()); 1447 AddressLiteral exception_entry(StubRoutines::forward_exception_entry());
1448 __ jump_to(exception_entry); 1448 __ jump_to(exception_entry, G3_scratch);
1449 __ delayed()->restore(); // Pop this frame off. 1449 __ delayed()->restore(); // Pop this frame off.
1450 __ bind(L); 1450 __ bind(L);
1451 } 1451 }
1452 1452
1453 // A simple move of integer like type 1453 // A simple move of integer like type
1820 1820
1821 // First thing make an ic check to see if we should even be here 1821 // First thing make an ic check to see if we should even be here
1822 { 1822 {
1823 Label L; 1823 Label L;
1824 const Register temp_reg = G3_scratch; 1824 const Register temp_reg = G3_scratch;
1825 Address ic_miss(temp_reg, SharedRuntime::get_ic_miss_stub()); 1825 AddressLiteral ic_miss(SharedRuntime::get_ic_miss_stub());
1826 __ verify_oop(O0); 1826 __ verify_oop(O0);
1827 __ load_klass(O0, temp_reg); 1827 __ load_klass(O0, temp_reg);
1828 __ cmp(temp_reg, G5_inline_cache_reg); 1828 __ cmp(temp_reg, G5_inline_cache_reg);
1829 __ brx(Assembler::equal, true, Assembler::pt, L); 1829 __ brx(Assembler::equal, true, Assembler::pt, L);
1830 __ delayed()->nop(); 1830 __ delayed()->nop();
1831 1831
1832 __ jump_to(ic_miss, 0); 1832 __ jump_to(ic_miss, temp_reg);
1833 __ delayed()->nop(); 1833 __ delayed()->nop();
1834 __ align(CodeEntryAlignment); 1834 __ align(CodeEntryAlignment);
1835 __ bind(L); 1835 __ bind(L);
1836 } 1836 }
1837 1837
2259 2259
2260 __ set_last_Java_frame(SP, O7); 2260 __ set_last_Java_frame(SP, O7);
2261 2261
2262 // Transition from _thread_in_Java to _thread_in_native. 2262 // Transition from _thread_in_Java to _thread_in_native.
2263 __ set(_thread_in_native, G3_scratch); 2263 __ set(_thread_in_native, G3_scratch);
2264 __ st(G3_scratch, G2_thread, in_bytes(JavaThread::thread_state_offset())); 2264 __ st(G3_scratch, G2_thread, JavaThread::thread_state_offset());
2265 2265
2266 // We flushed the windows ages ago now mark them as flushed 2266 // We flushed the windows ages ago now mark them as flushed
2267 2267
2268 // mark windows as flushed 2268 // mark windows as flushed
2269 __ set(JavaFrameAnchor::flushed, G3_scratch); 2269 __ set(JavaFrameAnchor::flushed, G3_scratch);
2270 2270
2271 Address flags(G2_thread, 2271 Address flags(G2_thread, JavaThread::frame_anchor_offset() + JavaFrameAnchor::flags_offset());
2272 0,
2273 in_bytes(JavaThread::frame_anchor_offset()) + in_bytes(JavaFrameAnchor::flags_offset()));
2274 2272
2275 #ifdef _LP64 2273 #ifdef _LP64
2276 Address dest(O7, method->native_function()); 2274 AddressLiteral dest(method->native_function());
2277 __ relocate(relocInfo::runtime_call_type); 2275 __ relocate(relocInfo::runtime_call_type);
2278 __ jumpl_to(dest, O7); 2276 __ jumpl_to(dest, O7, O7);
2279 #else 2277 #else
2280 __ call(method->native_function(), relocInfo::runtime_call_type); 2278 __ call(method->native_function(), relocInfo::runtime_call_type);
2281 #endif 2279 #endif
2282 __ delayed()->st(G3_scratch, flags); 2280 __ delayed()->st(G3_scratch, flags);
2283 2281
2314 // must we block? 2312 // must we block?
2315 2313
2316 // Block, if necessary, before resuming in _thread_in_Java state. 2314 // Block, if necessary, before resuming in _thread_in_Java state.
2317 // In order for GC to work, don't clear the last_Java_sp until after blocking. 2315 // In order for GC to work, don't clear the last_Java_sp until after blocking.
2318 { Label no_block; 2316 { Label no_block;
2319 Address sync_state(G3_scratch, SafepointSynchronize::address_of_state()); 2317 AddressLiteral sync_state(SafepointSynchronize::address_of_state());
2320 2318
2321 // Switch thread to "native transition" state before reading the synchronization state. 2319 // Switch thread to "native transition" state before reading the synchronization state.
2322 // This additional state is necessary because reading and testing the synchronization 2320 // This additional state is necessary because reading and testing the synchronization
2323 // state is not atomic w.r.t. GC, as this scenario demonstrates: 2321 // state is not atomic w.r.t. GC, as this scenario demonstrates:
2324 // Java thread A, in _thread_in_native state, loads _not_synchronized and is preempted. 2322 // Java thread A, in _thread_in_native state, loads _not_synchronized and is preempted.
2325 // VM thread changes sync state to synchronizing and suspends threads for GC. 2323 // VM thread changes sync state to synchronizing and suspends threads for GC.
2326 // Thread A is resumed to finish this native method, but doesn't block here since it 2324 // Thread A is resumed to finish this native method, but doesn't block here since it
2327 // didn't see any synchronization is progress, and escapes. 2325 // didn't see any synchronization is progress, and escapes.
2328 __ set(_thread_in_native_trans, G3_scratch); 2326 __ set(_thread_in_native_trans, G3_scratch);
2329 __ st(G3_scratch, G2_thread, in_bytes(JavaThread::thread_state_offset())); 2327 __ st(G3_scratch, G2_thread, JavaThread::thread_state_offset());
2330 if(os::is_MP()) { 2328 if(os::is_MP()) {
2331 if (UseMembar) { 2329 if (UseMembar) {
2332 // Force this write out before the read below 2330 // Force this write out before the read below
2333 __ membar(Assembler::StoreLoad); 2331 __ membar(Assembler::StoreLoad);
2334 } else { 2332 } else {
2341 } 2339 }
2342 __ load_contents(sync_state, G3_scratch); 2340 __ load_contents(sync_state, G3_scratch);
2343 __ cmp(G3_scratch, SafepointSynchronize::_not_synchronized); 2341 __ cmp(G3_scratch, SafepointSynchronize::_not_synchronized);
2344 2342
2345 Label L; 2343 Label L;
2346 Address suspend_state(G2_thread, 0, in_bytes(JavaThread::suspend_flags_offset())); 2344 Address suspend_state(G2_thread, JavaThread::suspend_flags_offset());
2347 __ br(Assembler::notEqual, false, Assembler::pn, L); 2345 __ br(Assembler::notEqual, false, Assembler::pn, L);
2348 __ delayed()-> 2346 __ delayed()->ld(suspend_state, G3_scratch);
2349 ld(suspend_state, G3_scratch);
2350 __ cmp(G3_scratch, 0); 2347 __ cmp(G3_scratch, 0);
2351 __ br(Assembler::equal, false, Assembler::pt, no_block); 2348 __ br(Assembler::equal, false, Assembler::pt, no_block);
2352 __ delayed()->nop(); 2349 __ delayed()->nop();
2353 __ bind(L); 2350 __ bind(L);
2354 2351
2370 // thread state is thread_in_native_trans. Any safepoint blocking has already 2367 // thread state is thread_in_native_trans. Any safepoint blocking has already
2371 // happened so we can now change state to _thread_in_Java. 2368 // happened so we can now change state to _thread_in_Java.
2372 2369
2373 2370
2374 __ set(_thread_in_Java, G3_scratch); 2371 __ set(_thread_in_Java, G3_scratch);
2375 __ st(G3_scratch, G2_thread, in_bytes(JavaThread::thread_state_offset())); 2372 __ st(G3_scratch, G2_thread, JavaThread::thread_state_offset());
2376 2373
2377 2374
2378 Label no_reguard; 2375 Label no_reguard;
2379 __ ld(G2_thread, in_bytes(JavaThread::stack_guard_state_offset()), G3_scratch); 2376 __ ld(G2_thread, JavaThread::stack_guard_state_offset(), G3_scratch);
2380 __ cmp(G3_scratch, JavaThread::stack_guard_yellow_disabled); 2377 __ cmp(G3_scratch, JavaThread::stack_guard_yellow_disabled);
2381 __ br(Assembler::notEqual, false, Assembler::pt, no_reguard); 2378 __ br(Assembler::notEqual, false, Assembler::pt, no_reguard);
2382 __ delayed()->nop(); 2379 __ delayed()->nop();
2383 2380
2384 save_native_result(masm, ret_type, stack_slots); 2381 save_native_result(masm, ret_type, stack_slots);
2682 // First thing make an ic check to see if we should even be here 2679 // First thing make an ic check to see if we should even be here
2683 2680
2684 { 2681 {
2685 Label L; 2682 Label L;
2686 const Register temp_reg = G3_scratch; 2683 const Register temp_reg = G3_scratch;
2687 Address ic_miss(temp_reg, SharedRuntime::get_ic_miss_stub()); 2684 AddressLiteral ic_miss(SharedRuntime::get_ic_miss_stub());
2688 __ verify_oop(O0); 2685 __ verify_oop(O0);
2689 __ ld_ptr(O0, oopDesc::klass_offset_in_bytes(), temp_reg); 2686 __ ld_ptr(O0, oopDesc::klass_offset_in_bytes(), temp_reg);
2690 __ cmp(temp_reg, G5_inline_cache_reg); 2687 __ cmp(temp_reg, G5_inline_cache_reg);
2691 __ brx(Assembler::equal, true, Assembler::pt, L); 2688 __ brx(Assembler::equal, true, Assembler::pt, L);
2692 __ delayed()->nop(); 2689 __ delayed()->nop();
2693 2690
2694 __ jump_to(ic_miss, 0); 2691 __ jump_to(ic_miss, temp_reg);
2695 __ delayed()->nop(); 2692 __ delayed()->nop();
2696 __ align(CodeEntryAlignment); 2693 __ align(CodeEntryAlignment);
2697 __ bind(L); 2694 __ bind(L);
2698 } 2695 }
2699 2696
3153 3150
3154 // Before we make new frames, check to see if stack is available. 3151 // Before we make new frames, check to see if stack is available.
3155 // Do this after the caller's return address is on top of stack 3152 // Do this after the caller's return address is on top of stack
3156 if (UseStackBanging) { 3153 if (UseStackBanging) {
3157 // Get total frame size for interpreted frames 3154 // Get total frame size for interpreted frames
3158 __ ld(Address(O2UnrollBlock, 0, 3155 __ ld(O2UnrollBlock, Deoptimization::UnrollBlock::total_frame_sizes_offset_in_bytes(), O4);
3159 Deoptimization::UnrollBlock::total_frame_sizes_offset_in_bytes()), O4);
3160 __ bang_stack_size(O4, O3, G3_scratch); 3156 __ bang_stack_size(O4, O3, G3_scratch);
3161 } 3157 }
3162 3158
3163 __ ld(Address(O2UnrollBlock, 0, Deoptimization::UnrollBlock::number_of_frames_offset_in_bytes()), O4array_size); 3159 __ ld(O2UnrollBlock, Deoptimization::UnrollBlock::number_of_frames_offset_in_bytes(), O4array_size);
3164 __ ld_ptr(Address(O2UnrollBlock, 0, Deoptimization::UnrollBlock::frame_pcs_offset_in_bytes()), G3pcs); 3160 __ ld_ptr(O2UnrollBlock, Deoptimization::UnrollBlock::frame_pcs_offset_in_bytes(), G3pcs);
3165 3161 __ ld_ptr(O2UnrollBlock, Deoptimization::UnrollBlock::frame_sizes_offset_in_bytes(), O3array);
3166 __ ld_ptr(Address(O2UnrollBlock, 0, Deoptimization::UnrollBlock::frame_sizes_offset_in_bytes()), O3array);
3167 3162
3168 // Adjust old interpreter frame to make space for new frame's extra java locals 3163 // Adjust old interpreter frame to make space for new frame's extra java locals
3169 // 3164 //
3170 // We capture the original sp for the transition frame only because it is needed in 3165 // We capture the original sp for the transition frame only because it is needed in
3171 // order to properly calculate interpreter_sp_adjustment. Even though in real life 3166 // order to properly calculate interpreter_sp_adjustment. Even though in real life
3174 // be told the sp_adjustment for each frame we create. If the frame size array 3169 // be told the sp_adjustment for each frame we create. If the frame size array
3175 // were to have twice the frame count entries then we could have pairs [sp_adjustment, frame_size] 3170 // were to have twice the frame count entries then we could have pairs [sp_adjustment, frame_size]
3176 // for each frame we create and keep up the illusion every where. 3171 // for each frame we create and keep up the illusion every where.
3177 // 3172 //
3178 3173
3179 __ ld(Address(O2UnrollBlock, 0, Deoptimization::UnrollBlock::caller_adjustment_offset_in_bytes()), O7); 3174 __ ld(O2UnrollBlock, Deoptimization::UnrollBlock::caller_adjustment_offset_in_bytes(), O7);
3180 __ mov(SP, O5_savedSP); // remember initial sender's original sp before adjustment 3175 __ mov(SP, O5_savedSP); // remember initial sender's original sp before adjustment
3181 __ sub(SP, O7, SP); 3176 __ sub(SP, O7, SP);
3182 3177
3183 #ifdef ASSERT 3178 #ifdef ASSERT
3184 // make sure that there is at least one entry in the array 3179 // make sure that there is at least one entry in the array
3223 Register O2UnrollBlock = O2; 3218 Register O2UnrollBlock = O2;
3224 Register O3tmp = O3; 3219 Register O3tmp = O3;
3225 Register I5exception_tmp = I5; 3220 Register I5exception_tmp = I5;
3226 Register G4exception_tmp = G4_scratch; 3221 Register G4exception_tmp = G4_scratch;
3227 int frame_size_words; 3222 int frame_size_words;
3228 Address saved_Freturn0_addr(FP, 0, -sizeof(double) + STACK_BIAS); 3223 Address saved_Freturn0_addr(FP, -sizeof(double) + STACK_BIAS);
3229 #if !defined(_LP64) && defined(COMPILER2) 3224 #if !defined(_LP64) && defined(COMPILER2)
3230 Address saved_Greturn1_addr(FP, 0, -sizeof(double) -sizeof(jlong) + STACK_BIAS); 3225 Address saved_Greturn1_addr(FP, -sizeof(double) -sizeof(jlong) + STACK_BIAS);
3231 #endif 3226 #endif
3232 Label cont; 3227 Label cont;
3233 3228
3234 OopMapSet *oop_maps = new OopMapSet(); 3229 OopMapSet *oop_maps = new OopMapSet();
3235 3230
3287 // state will be extracted normally. 3282 // state will be extracted normally.
3288 3283
3289 // save exception oop in JavaThread and fall through into the 3284 // save exception oop in JavaThread and fall through into the
3290 // exception_in_tls case since they are handled in same way except 3285 // exception_in_tls case since they are handled in same way except
3291 // for where the pending exception is kept. 3286 // for where the pending exception is kept.
3292 __ st_ptr(Oexception, G2_thread, in_bytes(JavaThread::exception_oop_offset())); 3287 __ st_ptr(Oexception, G2_thread, JavaThread::exception_oop_offset());
3293 3288
3294 // 3289 //
3295 // Vanilla deoptimization with an exception pending in exception_oop 3290 // Vanilla deoptimization with an exception pending in exception_oop
3296 // 3291 //
3297 int exception_in_tls_offset = __ offset() - start; 3292 int exception_in_tls_offset = __ offset() - start;
3304 3299
3305 #ifdef ASSERT 3300 #ifdef ASSERT
3306 { 3301 {
3307 // verify that there is really an exception oop in exception_oop 3302 // verify that there is really an exception oop in exception_oop
3308 Label has_exception; 3303 Label has_exception;
3309 __ ld_ptr(G2_thread, in_bytes(JavaThread::exception_oop_offset()), Oexception); 3304 __ ld_ptr(G2_thread, JavaThread::exception_oop_offset(), Oexception);
3310 __ br_notnull(Oexception, false, Assembler::pt, has_exception); 3305 __ br_notnull(Oexception, false, Assembler::pt, has_exception);
3311 __ delayed()-> nop(); 3306 __ delayed()-> nop();
3312 __ stop("no exception in thread"); 3307 __ stop("no exception in thread");
3313 __ bind(has_exception); 3308 __ bind(has_exception);
3314 3309
3315 // verify that there is no pending exception 3310 // verify that there is no pending exception
3316 Label no_pending_exception; 3311 Label no_pending_exception;
3317 Address exception_addr(G2_thread, 0, in_bytes(Thread::pending_exception_offset())); 3312 Address exception_addr(G2_thread, Thread::pending_exception_offset());
3318 __ ld_ptr(exception_addr, Oexception); 3313 __ ld_ptr(exception_addr, Oexception);
3319 __ br_null(Oexception, false, Assembler::pt, no_pending_exception); 3314 __ br_null(Oexception, false, Assembler::pt, no_pending_exception);
3320 __ delayed()->nop(); 3315 __ delayed()->nop();
3321 __ stop("must not have pending exception here"); 3316 __ stop("must not have pending exception here");
3322 __ bind(no_pending_exception); 3317 __ bind(no_pending_exception);