comparison src/cpu/sparc/vm/templateInterpreter_sparc.cpp @ 727:6b2273dd6fa9

6822110: Add AddressLiteral class on SPARC Summary: The Address class on SPARC currently handles both, addresses and address literals, what makes the Address class more complicated than it has to be. Reviewed-by: never, kvn
author twisti
date Tue, 21 Apr 2009 11:16:30 -0700
parents e5b0439ef4ae
children 85656c8fa13f
comparison
equal deleted inserted replaced
725:928912ce8438 727:6b2273dd6fa9
85 __ set((intptr_t)message, G4_scratch); 85 __ set((intptr_t)message, G4_scratch);
86 __ call_VM(Oexception, CAST_FROM_FN_PTR(address, InterpreterRuntime::create_exception), G3_scratch, G4_scratch); 86 __ call_VM(Oexception, CAST_FROM_FN_PTR(address, InterpreterRuntime::create_exception), G3_scratch, G4_scratch);
87 } 87 }
88 // throw exception 88 // throw exception
89 assert(Interpreter::throw_exception_entry() != NULL, "generate it first"); 89 assert(Interpreter::throw_exception_entry() != NULL, "generate it first");
90 Address thrower(G3_scratch, Interpreter::throw_exception_entry()); 90 AddressLiteral thrower(Interpreter::throw_exception_entry());
91 __ jump_to (thrower); 91 __ jump_to(thrower, G3_scratch);
92 __ delayed()->nop(); 92 __ delayed()->nop();
93 return entry; 93 return entry;
94 } 94 }
95 95
96 address TemplateInterpreterGenerator::generate_ClassCastException_handler() { 96 address TemplateInterpreterGenerator::generate_ClassCastException_handler() {
184 184
185 185
186 const Register cache = G3_scratch; 186 const Register cache = G3_scratch;
187 const Register size = G1_scratch; 187 const Register size = G1_scratch;
188 __ get_cache_and_index_at_bcp(cache, G1_scratch, 1); 188 __ get_cache_and_index_at_bcp(cache, G1_scratch, 1);
189 __ ld_ptr(Address(cache, 0, in_bytes(constantPoolCacheOopDesc::base_offset()) + 189 __ ld_ptr(cache, constantPoolCacheOopDesc::base_offset() +
190 in_bytes(ConstantPoolCacheEntry::flags_offset())), size); 190 ConstantPoolCacheEntry::flags_offset(), size);
191 __ and3(size, 0xFF, size); // argument size in words 191 __ and3(size, 0xFF, size); // argument size in words
192 __ sll(size, Interpreter::logStackElementSize(), size); // each argument size in bytes 192 __ sll(size, Interpreter::logStackElementSize(), size); // each argument size in bytes
193 __ add(Lesp, size, Lesp); // pop arguments 193 __ add(Lesp, size, Lesp); // pop arguments
194 __ dispatch_next(state, step); 194 __ dispatch_next(state, step);
195 195
199 199
200 address TemplateInterpreterGenerator::generate_deopt_entry_for(TosState state, int step) { 200 address TemplateInterpreterGenerator::generate_deopt_entry_for(TosState state, int step) {
201 address entry = __ pc(); 201 address entry = __ pc();
202 __ get_constant_pool_cache(LcpoolCache); // load LcpoolCache 202 __ get_constant_pool_cache(LcpoolCache); // load LcpoolCache
203 { Label L; 203 { Label L;
204 Address exception_addr (G2_thread, 0, in_bytes(Thread::pending_exception_offset())); 204 Address exception_addr(G2_thread, Thread::pending_exception_offset());
205 205 __ ld_ptr(exception_addr, Gtemp); // Load pending exception.
206 __ ld_ptr(exception_addr, Gtemp);
207 __ tst(Gtemp); 206 __ tst(Gtemp);
208 __ brx(Assembler::equal, false, Assembler::pt, L); 207 __ brx(Assembler::equal, false, Assembler::pt, L);
209 __ delayed()->nop(); 208 __ delayed()->nop();
210 __ call_VM(noreg, CAST_FROM_FN_PTR(address, InterpreterRuntime::throw_pending_exception)); 209 __ call_VM(noreg, CAST_FROM_FN_PTR(address, InterpreterRuntime::throw_pending_exception));
211 __ should_not_reach_here(); 210 __ should_not_reach_here();
280 // 279 //
281 void InterpreterGenerator::generate_counter_incr(Label* overflow, Label* profile_method, Label* profile_method_continue) { 280 void InterpreterGenerator::generate_counter_incr(Label* overflow, Label* profile_method, Label* profile_method_continue) {
282 // Update standard invocation counters 281 // Update standard invocation counters
283 __ increment_invocation_counter(O0, G3_scratch); 282 __ increment_invocation_counter(O0, G3_scratch);
284 if (ProfileInterpreter) { // %%% Merge this into methodDataOop 283 if (ProfileInterpreter) { // %%% Merge this into methodDataOop
285 Address interpreter_invocation_counter(Lmethod, 0, in_bytes(methodOopDesc::interpreter_invocation_counter_offset())); 284 Address interpreter_invocation_counter(Lmethod, methodOopDesc::interpreter_invocation_counter_offset());
286 __ ld(interpreter_invocation_counter, G3_scratch); 285 __ ld(interpreter_invocation_counter, G3_scratch);
287 __ inc(G3_scratch); 286 __ inc(G3_scratch);
288 __ st(G3_scratch, interpreter_invocation_counter); 287 __ st(G3_scratch, interpreter_invocation_counter);
289 } 288 }
290 289
291 if (ProfileInterpreter && profile_method != NULL) { 290 if (ProfileInterpreter && profile_method != NULL) {
292 // Test to see if we should create a method data oop 291 // Test to see if we should create a method data oop
293 Address profile_limit(G3_scratch, (address)&InvocationCounter::InterpreterProfileLimit); 292 AddressLiteral profile_limit(&InvocationCounter::InterpreterProfileLimit);
294 __ sethi(profile_limit); 293 __ sethi(profile_limit, G3_scratch);
295 __ ld(profile_limit, G3_scratch); 294 __ ld(G3_scratch, profile_limit.low10(), G3_scratch);
296 __ cmp(O0, G3_scratch); 295 __ cmp(O0, G3_scratch);
297 __ br(Assembler::lessUnsigned, false, Assembler::pn, *profile_method_continue); 296 __ br(Assembler::lessUnsigned, false, Assembler::pn, *profile_method_continue);
298 __ delayed()->nop(); 297 __ delayed()->nop();
299 298
300 // if no method data exists, go to profile_method 299 // if no method data exists, go to profile_method
301 __ test_method_data_pointer(*profile_method); 300 __ test_method_data_pointer(*profile_method);
302 } 301 }
303 302
304 Address invocation_limit(G3_scratch, (address)&InvocationCounter::InterpreterInvocationLimit); 303 AddressLiteral invocation_limit(&InvocationCounter::InterpreterInvocationLimit);
305 __ sethi(invocation_limit); 304 __ sethi(invocation_limit, G3_scratch);
306 __ ld(invocation_limit, G3_scratch); 305 __ ld(G3_scratch, invocation_limit.low10(), G3_scratch);
307 __ cmp(O0, G3_scratch); 306 __ cmp(O0, G3_scratch);
308 __ br(Assembler::greaterEqualUnsigned, false, Assembler::pn, *overflow); 307 __ br(Assembler::greaterEqualUnsigned, false, Assembler::pn, *overflow);
309 __ delayed()->nop(); 308 __ delayed()->nop();
310 309
311 } 310 }
312 311
313 // Allocate monitor and lock method (asm interpreter) 312 // Allocate monitor and lock method (asm interpreter)
314 // ebx - methodOop 313 // ebx - methodOop
315 // 314 //
316 void InterpreterGenerator::lock_method(void) { 315 void InterpreterGenerator::lock_method(void) {
317 const Address access_flags (Lmethod, 0, in_bytes(methodOopDesc::access_flags_offset())); 316 __ ld(Lmethod, in_bytes(methodOopDesc::access_flags_offset()), O0); // Load access flags.
318 __ ld(access_flags, O0);
319 317
320 #ifdef ASSERT 318 #ifdef ASSERT
321 { Label ok; 319 { Label ok;
322 __ btst(JVM_ACC_SYNCHRONIZED, O0); 320 __ btst(JVM_ACC_SYNCHRONIZED, O0);
323 __ br( Assembler::notZero, false, Assembler::pt, ok); 321 __ br( Assembler::notZero, false, Assembler::pt, ok);
357 355
358 void TemplateInterpreterGenerator::generate_stack_overflow_check(Register Rframe_size, 356 void TemplateInterpreterGenerator::generate_stack_overflow_check(Register Rframe_size,
359 Register Rscratch, 357 Register Rscratch,
360 Register Rscratch2) { 358 Register Rscratch2) {
361 const int page_size = os::vm_page_size(); 359 const int page_size = os::vm_page_size();
362 Address saved_exception_pc(G2_thread, 0, 360 Address saved_exception_pc(G2_thread, JavaThread::saved_exception_pc_offset());
363 in_bytes(JavaThread::saved_exception_pc_offset()));
364 Label after_frame_check; 361 Label after_frame_check;
365 362
366 assert_different_registers(Rframe_size, Rscratch, Rscratch2); 363 assert_different_registers(Rframe_size, Rscratch, Rscratch2);
367 364
368 __ set( page_size, Rscratch ); 365 __ set( page_size, Rscratch );
370 367
371 __ br( Assembler::lessEqual, false, Assembler::pt, after_frame_check ); 368 __ br( Assembler::lessEqual, false, Assembler::pt, after_frame_check );
372 __ delayed()->nop(); 369 __ delayed()->nop();
373 370
374 // get the stack base, and in debug, verify it is non-zero 371 // get the stack base, and in debug, verify it is non-zero
375 __ ld_ptr( G2_thread, in_bytes(Thread::stack_base_offset()), Rscratch ); 372 __ ld_ptr( G2_thread, Thread::stack_base_offset(), Rscratch );
376 #ifdef ASSERT 373 #ifdef ASSERT
377 Label base_not_zero; 374 Label base_not_zero;
378 __ cmp( Rscratch, G0 ); 375 __ cmp( Rscratch, G0 );
379 __ brx( Assembler::notEqual, false, Assembler::pn, base_not_zero ); 376 __ brx( Assembler::notEqual, false, Assembler::pn, base_not_zero );
380 __ delayed()->nop(); 377 __ delayed()->nop();
382 __ bind(base_not_zero); 379 __ bind(base_not_zero);
383 #endif 380 #endif
384 381
385 // get the stack size, and in debug, verify it is non-zero 382 // get the stack size, and in debug, verify it is non-zero
386 assert( sizeof(size_t) == sizeof(intptr_t), "wrong load size" ); 383 assert( sizeof(size_t) == sizeof(intptr_t), "wrong load size" );
387 __ ld_ptr( G2_thread, in_bytes(Thread::stack_size_offset()), Rscratch2 ); 384 __ ld_ptr( G2_thread, Thread::stack_size_offset(), Rscratch2 );
388 #ifdef ASSERT 385 #ifdef ASSERT
389 Label size_not_zero; 386 Label size_not_zero;
390 __ cmp( Rscratch2, G0 ); 387 __ cmp( Rscratch2, G0 );
391 __ brx( Assembler::notEqual, false, Assembler::pn, size_not_zero ); 388 __ brx( Assembler::notEqual, false, Assembler::pn, size_not_zero );
392 __ delayed()->nop(); 389 __ delayed()->nop();
457 // is necessary. 454 // is necessary.
458 // 455 //
459 // (gri - 2/25/2000) 456 // (gri - 2/25/2000)
460 457
461 458
462 const Address size_of_parameters(G5_method, 0, in_bytes(methodOopDesc::size_of_parameters_offset())); 459 const Address size_of_parameters(G5_method, methodOopDesc::size_of_parameters_offset());
463 const Address size_of_locals (G5_method, 0, in_bytes(methodOopDesc::size_of_locals_offset())); 460 const Address size_of_locals (G5_method, methodOopDesc::size_of_locals_offset());
464 const Address max_stack (G5_method, 0, in_bytes(methodOopDesc::max_stack_offset())); 461 const Address max_stack (G5_method, methodOopDesc::max_stack_offset());
465 int rounded_vm_local_words = round_to( frame::interpreter_frame_vm_local_words, WordsPerLong ); 462 int rounded_vm_local_words = round_to( frame::interpreter_frame_vm_local_words, WordsPerLong );
466 463
467 const int extra_space = 464 const int extra_space =
468 rounded_vm_local_words + // frame local scratch space 465 rounded_vm_local_words + // frame local scratch space
469 //6815692//methodOopDesc::extra_stack_words() + // extra push slots for MH adapters 466 //6815692//methodOopDesc::extra_stack_words() + // extra push slots for MH adapters
536 // that all present references to Lbyte_code initialize the register 533 // that all present references to Lbyte_code initialize the register
537 // immediately before use 534 // immediately before use
538 if (native_call) { 535 if (native_call) {
539 __ mov(G0, Lbcp); 536 __ mov(G0, Lbcp);
540 } else { 537 } else {
541 __ ld_ptr(Address(G5_method, 0, in_bytes(methodOopDesc::const_offset())), Lbcp ); 538 __ ld_ptr(G5_method, methodOopDesc::const_offset(), Lbcp);
542 __ add(Address(Lbcp, 0, in_bytes(constMethodOopDesc::codes_offset())), Lbcp ); 539 __ add(Lbcp, in_bytes(constMethodOopDesc::codes_offset()), Lbcp);
543 } 540 }
544 __ mov( G5_method, Lmethod); // set Lmethod 541 __ mov( G5_method, Lmethod); // set Lmethod
545 __ get_constant_pool_cache( LcpoolCache ); // set LcpoolCache 542 __ get_constant_pool_cache( LcpoolCache ); // set LcpoolCache
546 __ sub(FP, rounded_vm_local_words * BytesPerWord, Lmonitors ); // set Lmonitors 543 __ sub(FP, rounded_vm_local_words * BytesPerWord, Lmonitors ); // set Lmonitors
547 #ifdef _LP64 544 #ifdef _LP64
575 __ verify_oop(G5_method); 572 __ verify_oop(G5_method);
576 573
577 // do nothing for empty methods (do not even increment invocation counter) 574 // do nothing for empty methods (do not even increment invocation counter)
578 if ( UseFastEmptyMethods) { 575 if ( UseFastEmptyMethods) {
579 // If we need a safepoint check, generate full interpreter entry. 576 // If we need a safepoint check, generate full interpreter entry.
580 Address sync_state(G3_scratch, SafepointSynchronize::address_of_state()); 577 AddressLiteral sync_state(SafepointSynchronize::address_of_state());
581 __ load_contents(sync_state, G3_scratch); 578 __ set(sync_state, G3_scratch);
582 __ cmp(G3_scratch, SafepointSynchronize::_not_synchronized); 579 __ cmp(G3_scratch, SafepointSynchronize::_not_synchronized);
583 __ br(Assembler::notEqual, false, Assembler::pn, slow_path); 580 __ br(Assembler::notEqual, false, Assembler::pn, slow_path);
584 __ delayed()->nop(); 581 __ delayed()->nop();
585 582
586 // Code: _return 583 // Code: _return
614 // XXX: for compressed oops pointer loading and decoding doesn't fit in 611 // XXX: for compressed oops pointer loading and decoding doesn't fit in
615 // delay slot and damages G1 612 // delay slot and damages G1
616 if ( UseFastAccessorMethods && !UseCompressedOops ) { 613 if ( UseFastAccessorMethods && !UseCompressedOops ) {
617 // Check if we need to reach a safepoint and generate full interpreter 614 // Check if we need to reach a safepoint and generate full interpreter
618 // frame if so. 615 // frame if so.
619 Address sync_state(G3_scratch, SafepointSynchronize::address_of_state()); 616 AddressLiteral sync_state(SafepointSynchronize::address_of_state());
620 __ load_contents(sync_state, G3_scratch); 617 __ load_contents(sync_state, G3_scratch);
621 __ cmp(G3_scratch, SafepointSynchronize::_not_synchronized); 618 __ cmp(G3_scratch, SafepointSynchronize::_not_synchronized);
622 __ br(Assembler::notEqual, false, Assembler::pn, slow_path); 619 __ br(Assembler::notEqual, false, Assembler::pn, slow_path);
623 __ delayed()->nop(); 620 __ delayed()->nop();
624 621
629 __ delayed()->nop(); 626 __ delayed()->nop();
630 627
631 628
632 // read first instruction word and extract bytecode @ 1 and index @ 2 629 // read first instruction word and extract bytecode @ 1 and index @ 2
633 // get first 4 bytes of the bytecodes (big endian!) 630 // get first 4 bytes of the bytecodes (big endian!)
634 __ ld_ptr(Address(G5_method, 0, in_bytes(methodOopDesc::const_offset())), G1_scratch); 631 __ ld_ptr(G5_method, methodOopDesc::const_offset(), G1_scratch);
635 __ ld(Address(G1_scratch, 0, in_bytes(constMethodOopDesc::codes_offset())), G1_scratch); 632 __ ld(G1_scratch, constMethodOopDesc::codes_offset(), G1_scratch);
636 633
637 // move index @ 2 far left then to the right most two bytes. 634 // move index @ 2 far left then to the right most two bytes.
638 __ sll(G1_scratch, 2*BitsPerByte, G1_scratch); 635 __ sll(G1_scratch, 2*BitsPerByte, G1_scratch);
639 __ srl(G1_scratch, 2*BitsPerByte - exact_log2(in_words( 636 __ srl(G1_scratch, 2*BitsPerByte - exact_log2(in_words(
640 ConstantPoolCacheEntry::size()) * BytesPerWord), G1_scratch); 637 ConstantPoolCacheEntry::size()) * BytesPerWord), G1_scratch);
641 638
642 // get constant pool cache 639 // get constant pool cache
643 __ ld_ptr(G5_method, in_bytes(methodOopDesc::constants_offset()), G3_scratch); 640 __ ld_ptr(G5_method, methodOopDesc::constants_offset(), G3_scratch);
644 __ ld_ptr(G3_scratch, constantPoolOopDesc::cache_offset_in_bytes(), G3_scratch); 641 __ ld_ptr(G3_scratch, constantPoolOopDesc::cache_offset_in_bytes(), G3_scratch);
645 642
646 // get specific constant pool cache entry 643 // get specific constant pool cache entry
647 __ add(G3_scratch, G1_scratch, G3_scratch); 644 __ add(G3_scratch, G1_scratch, G3_scratch);
648 645
649 // Check the constant Pool cache entry to see if it has been resolved. 646 // Check the constant Pool cache entry to see if it has been resolved.
650 // If not, need the slow path. 647 // If not, need the slow path.
651 ByteSize cp_base_offset = constantPoolCacheOopDesc::base_offset(); 648 ByteSize cp_base_offset = constantPoolCacheOopDesc::base_offset();
652 __ ld_ptr(G3_scratch, in_bytes(cp_base_offset + ConstantPoolCacheEntry::indices_offset()), G1_scratch); 649 __ ld_ptr(G3_scratch, cp_base_offset + ConstantPoolCacheEntry::indices_offset(), G1_scratch);
653 __ srl(G1_scratch, 2*BitsPerByte, G1_scratch); 650 __ srl(G1_scratch, 2*BitsPerByte, G1_scratch);
654 __ and3(G1_scratch, 0xFF, G1_scratch); 651 __ and3(G1_scratch, 0xFF, G1_scratch);
655 __ cmp(G1_scratch, Bytecodes::_getfield); 652 __ cmp(G1_scratch, Bytecodes::_getfield);
656 __ br(Assembler::notEqual, false, Assembler::pn, slow_path); 653 __ br(Assembler::notEqual, false, Assembler::pn, slow_path);
657 __ delayed()->nop(); 654 __ delayed()->nop();
658 655
659 // Get the type and return field offset from the constant pool cache 656 // Get the type and return field offset from the constant pool cache
660 __ ld_ptr(G3_scratch, in_bytes(cp_base_offset + ConstantPoolCacheEntry::flags_offset()), G1_scratch); 657 __ ld_ptr(G3_scratch, cp_base_offset + ConstantPoolCacheEntry::flags_offset(), G1_scratch);
661 __ ld_ptr(G3_scratch, in_bytes(cp_base_offset + ConstantPoolCacheEntry::f2_offset()), G3_scratch); 658 __ ld_ptr(G3_scratch, cp_base_offset + ConstantPoolCacheEntry::f2_offset(), G3_scratch);
662 659
663 Label xreturn_path; 660 Label xreturn_path;
664 // Need to differentiate between igetfield, agetfield, bgetfield etc. 661 // Need to differentiate between igetfield, agetfield, bgetfield etc.
665 // because they are different sizes. 662 // because they are different sizes.
666 // Get the type from the constant pool cache 663 // Get the type from the constant pool cache
715 bool inc_counter = UseCompiler || CountCompiledCalls; 712 bool inc_counter = UseCompiler || CountCompiledCalls;
716 713
717 // make sure registers are different! 714 // make sure registers are different!
718 assert_different_registers(G2_thread, G5_method, Gargs, Gtmp1, Gtmp2); 715 assert_different_registers(G2_thread, G5_method, Gargs, Gtmp1, Gtmp2);
719 716
720 const Address Laccess_flags (Lmethod, 0, in_bytes(methodOopDesc::access_flags_offset())); 717 const Address Laccess_flags(Lmethod, methodOopDesc::access_flags_offset());
721 718
722 __ verify_oop(G5_method); 719 __ verify_oop(G5_method);
723 720
724 const Register Glocals_size = G3; 721 const Register Glocals_size = G3;
725 assert_different_registers(Glocals_size, G4_scratch, Gframe_size); 722 assert_different_registers(Glocals_size, G4_scratch, Gframe_size);
726 723
727 // make sure method is native & not abstract 724 // make sure method is native & not abstract
728 // rethink these assertions - they can be simplified and shared (gri 2/25/2000) 725 // rethink these assertions - they can be simplified and shared (gri 2/25/2000)
729 #ifdef ASSERT 726 #ifdef ASSERT
730 __ ld(G5_method, in_bytes(methodOopDesc::access_flags_offset()), Gtmp1); 727 __ ld(G5_method, methodOopDesc::access_flags_offset(), Gtmp1);
731 { 728 {
732 Label L; 729 Label L;
733 __ btst(JVM_ACC_NATIVE, Gtmp1); 730 __ btst(JVM_ACC_NATIVE, Gtmp1);
734 __ br(Assembler::notZero, false, Assembler::pt, L); 731 __ br(Assembler::notZero, false, Assembler::pt, L);
735 __ delayed()->nop(); 732 __ delayed()->nop();
752 // No locals to initialize for native method 749 // No locals to initialize for native method
753 // 750 //
754 751
755 // this slot will be set later, we initialize it to null here just in 752 // this slot will be set later, we initialize it to null here just in
756 // case we get a GC before the actual value is stored later 753 // case we get a GC before the actual value is stored later
757 __ st_ptr(G0, Address(FP, 0, (frame::interpreter_frame_oop_temp_offset*wordSize) + STACK_BIAS)); 754 __ st_ptr(G0, FP, (frame::interpreter_frame_oop_temp_offset * wordSize) + STACK_BIAS);
758 755
759 const Address do_not_unlock_if_synchronized(G2_thread, 0, 756 const Address do_not_unlock_if_synchronized(G2_thread,
760 in_bytes(JavaThread::do_not_unlock_if_synchronized_offset())); 757 JavaThread::do_not_unlock_if_synchronized_offset());
761 // Since at this point in the method invocation the exception handler 758 // Since at this point in the method invocation the exception handler
762 // would try to exit the monitor of synchronized methods which hasn't 759 // would try to exit the monitor of synchronized methods which hasn't
763 // been entered yet, we set the thread local variable 760 // been entered yet, we set the thread local variable
764 // _do_not_unlock_if_synchronized to true. If any exception was thrown by 761 // _do_not_unlock_if_synchronized to true. If any exception was thrown by
765 // runtime, exception handling i.e. unlock_if_synchronized_method will 762 // runtime, exception handling i.e. unlock_if_synchronized_method will
822 819
823 // (note that the space for outgoing params is preallocated) 820 // (note that the space for outgoing params is preallocated)
824 821
825 // get signature handler 822 // get signature handler
826 { Label L; 823 { Label L;
827 __ ld_ptr(Address(Lmethod, 0, in_bytes(methodOopDesc::signature_handler_offset())), G3_scratch); 824 Address signature_handler(Lmethod, methodOopDesc::signature_handler_offset());
825 __ ld_ptr(signature_handler, G3_scratch);
828 __ tst(G3_scratch); 826 __ tst(G3_scratch);
829 __ brx(Assembler::notZero, false, Assembler::pt, L); 827 __ brx(Assembler::notZero, false, Assembler::pt, L);
830 __ delayed()->nop(); 828 __ delayed()->nop();
831 __ call_VM(noreg, CAST_FROM_FN_PTR(address, InterpreterRuntime::prepare_native_call), Lmethod); 829 __ call_VM(noreg, CAST_FROM_FN_PTR(address, InterpreterRuntime::prepare_native_call), Lmethod);
832 __ ld_ptr(Address(Lmethod, 0, in_bytes(methodOopDesc::signature_handler_offset())), G3_scratch); 830 __ ld_ptr(signature_handler, G3_scratch);
833 __ bind(L); 831 __ bind(L);
834 } 832 }
835 833
836 // Push a new frame so that the args will really be stored in 834 // Push a new frame so that the args will really be stored in
837 // Copy a few locals across so the new frame has the variables 835 // Copy a few locals across so the new frame has the variables
840 // frame (Lmethod in particular) 838 // frame (Lmethod in particular)
841 839
842 // Flush the method pointer to the register save area 840 // Flush the method pointer to the register save area
843 __ st_ptr(Lmethod, SP, (Lmethod->sp_offset_in_saved_window() * wordSize) + STACK_BIAS); 841 __ st_ptr(Lmethod, SP, (Lmethod->sp_offset_in_saved_window() * wordSize) + STACK_BIAS);
844 __ mov(Llocals, O1); 842 __ mov(Llocals, O1);
843
845 // calculate where the mirror handle body is allocated in the interpreter frame: 844 // calculate where the mirror handle body is allocated in the interpreter frame:
846 845 __ add(FP, (frame::interpreter_frame_oop_temp_offset * wordSize) + STACK_BIAS, O2);
847 Address mirror(FP, 0, (frame::interpreter_frame_oop_temp_offset*wordSize) + STACK_BIAS);
848 __ add(mirror, O2);
849 846
850 // Calculate current frame size 847 // Calculate current frame size
851 __ sub(SP, FP, O3); // Calculate negative of current frame size 848 __ sub(SP, FP, O3); // Calculate negative of current frame size
852 __ save(SP, O3, SP); // Allocate an identical sized frame 849 __ save(SP, O3, SP); // Allocate an identical sized frame
853 850
880 { Label not_static; 877 { Label not_static;
881 878
882 __ ld(Laccess_flags, O0); 879 __ ld(Laccess_flags, O0);
883 __ btst(JVM_ACC_STATIC, O0); 880 __ btst(JVM_ACC_STATIC, O0);
884 __ br( Assembler::zero, false, Assembler::pt, not_static); 881 __ br( Assembler::zero, false, Assembler::pt, not_static);
885 __ delayed()-> 882 // get native function entry point(O0 is a good temp until the very end)
886 // get native function entry point(O0 is a good temp until the very end) 883 __ delayed()->ld_ptr(Lmethod, in_bytes(methodOopDesc::native_function_offset()), O0);
887 ld_ptr(Address(Lmethod, 0, in_bytes(methodOopDesc::native_function_offset())), O0);
888 // for static methods insert the mirror argument 884 // for static methods insert the mirror argument
889 const int mirror_offset = klassOopDesc::klass_part_offset_in_bytes() + Klass::java_mirror_offset_in_bytes(); 885 const int mirror_offset = klassOopDesc::klass_part_offset_in_bytes() + Klass::java_mirror_offset_in_bytes();
890 886
891 __ ld_ptr(Address(Lmethod, 0, in_bytes(methodOopDesc:: constants_offset())), O1); 887 __ ld_ptr(Lmethod, methodOopDesc:: constants_offset(), O1);
892 __ ld_ptr(Address(O1, 0, constantPoolOopDesc::pool_holder_offset_in_bytes()), O1); 888 __ ld_ptr(O1, constantPoolOopDesc::pool_holder_offset_in_bytes(), O1);
893 __ ld_ptr(O1, mirror_offset, O1); 889 __ ld_ptr(O1, mirror_offset, O1);
894 #ifdef ASSERT 890 #ifdef ASSERT
895 if (!PrintSignatureHandlers) // do not dirty the output with this 891 if (!PrintSignatureHandlers) // do not dirty the output with this
896 { Label L; 892 { Label L;
897 __ tst(O1); 893 __ tst(O1);
942 // only the outer frames 938 // only the outer frames
943 939
944 __ flush_windows(); 940 __ flush_windows();
945 941
946 // mark windows as flushed 942 // mark windows as flushed
947 Address flags(G2_thread, 943 Address flags(G2_thread, JavaThread::frame_anchor_offset() + JavaFrameAnchor::flags_offset());
948 0,
949 in_bytes(JavaThread::frame_anchor_offset()) + in_bytes(JavaFrameAnchor::flags_offset()));
950 __ set(JavaFrameAnchor::flushed, G3_scratch); 944 __ set(JavaFrameAnchor::flushed, G3_scratch);
951 __ st(G3_scratch, flags); 945 __ st(G3_scratch, flags);
952 946
953 // Transition from _thread_in_Java to _thread_in_native. We are already safepoint ready. 947 // Transition from _thread_in_Java to _thread_in_native. We are already safepoint ready.
954 948
955 Address thread_state(G2_thread, 0, in_bytes(JavaThread::thread_state_offset())); 949 Address thread_state(G2_thread, JavaThread::thread_state_offset());
956 #ifdef ASSERT 950 #ifdef ASSERT
957 { Label L; 951 { Label L;
958 __ ld(thread_state, G3_scratch); 952 __ ld(thread_state, G3_scratch);
959 __ cmp(G3_scratch, _thread_in_Java); 953 __ cmp(G3_scratch, _thread_in_Java);
960 __ br(Assembler::equal, false, Assembler::pt, L); 954 __ br(Assembler::equal, false, Assembler::pt, L);
980 // must we block? 974 // must we block?
981 975
982 // Block, if necessary, before resuming in _thread_in_Java state. 976 // Block, if necessary, before resuming in _thread_in_Java state.
983 // In order for GC to work, don't clear the last_Java_sp until after blocking. 977 // In order for GC to work, don't clear the last_Java_sp until after blocking.
984 { Label no_block; 978 { Label no_block;
985 Address sync_state(G3_scratch, SafepointSynchronize::address_of_state()); 979 AddressLiteral sync_state(SafepointSynchronize::address_of_state());
986 980
987 // Switch thread to "native transition" state before reading the synchronization state. 981 // Switch thread to "native transition" state before reading the synchronization state.
988 // This additional state is necessary because reading and testing the synchronization 982 // This additional state is necessary because reading and testing the synchronization
989 // state is not atomic w.r.t. GC, as this scenario demonstrates: 983 // state is not atomic w.r.t. GC, as this scenario demonstrates:
990 // Java thread A, in _thread_in_native state, loads _not_synchronized and is preempted. 984 // Java thread A, in _thread_in_native state, loads _not_synchronized and is preempted.
1007 } 1001 }
1008 __ load_contents(sync_state, G3_scratch); 1002 __ load_contents(sync_state, G3_scratch);
1009 __ cmp(G3_scratch, SafepointSynchronize::_not_synchronized); 1003 __ cmp(G3_scratch, SafepointSynchronize::_not_synchronized);
1010 1004
1011 Label L; 1005 Label L;
1012 Address suspend_state(G2_thread, 0, in_bytes(JavaThread::suspend_flags_offset()));
1013 __ br(Assembler::notEqual, false, Assembler::pn, L); 1006 __ br(Assembler::notEqual, false, Assembler::pn, L);
1014 __ delayed()-> 1007 __ delayed()->ld(G2_thread, JavaThread::suspend_flags_offset(), G3_scratch);
1015 ld(suspend_state, G3_scratch);
1016 __ cmp(G3_scratch, 0); 1008 __ cmp(G3_scratch, 0);
1017 __ br(Assembler::equal, false, Assembler::pt, no_block); 1009 __ br(Assembler::equal, false, Assembler::pt, no_block);
1018 __ delayed()->nop(); 1010 __ delayed()->nop();
1019 __ bind(L); 1011 __ bind(L);
1020 1012
1052 1044
1053 __ set(_thread_in_Java, G3_scratch); 1045 __ set(_thread_in_Java, G3_scratch);
1054 __ st(G3_scratch, thread_state); 1046 __ st(G3_scratch, thread_state);
1055 1047
1056 // reset handle block 1048 // reset handle block
1057 __ ld_ptr(G2_thread, in_bytes(JavaThread::active_handles_offset()), G3_scratch); 1049 __ ld_ptr(G2_thread, JavaThread::active_handles_offset(), G3_scratch);
1058 __ st_ptr(G0, G3_scratch, JNIHandleBlock::top_offset_in_bytes()); 1050 __ st_ptr(G0, G3_scratch, JNIHandleBlock::top_offset_in_bytes());
1059 1051
1060 // If we have an oop result store it where it will be safe for any further gc 1052 // If we have an oop result store it where it will be safe for any further gc
1061 // until we return now that we've released the handle it might be protected by 1053 // until we return now that we've released the handle it might be protected by
1062 1054
1081 } 1073 }
1082 1074
1083 1075
1084 // handle exceptions (exception handling will handle unlocking!) 1076 // handle exceptions (exception handling will handle unlocking!)
1085 { Label L; 1077 { Label L;
1086 Address exception_addr (G2_thread, 0, in_bytes(Thread::pending_exception_offset())); 1078 Address exception_addr(G2_thread, Thread::pending_exception_offset());
1087
1088 __ ld_ptr(exception_addr, Gtemp); 1079 __ ld_ptr(exception_addr, Gtemp);
1089 __ tst(Gtemp); 1080 __ tst(Gtemp);
1090 __ brx(Assembler::equal, false, Assembler::pt, L); 1081 __ brx(Assembler::equal, false, Assembler::pt, L);
1091 __ delayed()->nop(); 1082 __ delayed()->nop();
1092 // Note: This could be handled more efficiently since we know that the native 1083 // Note: This could be handled more efficiently since we know that the native
1168 const Register Gtmp2 = G1_scratch; 1159 const Register Gtmp2 = G1_scratch;
1169 1160
1170 // make sure registers are different! 1161 // make sure registers are different!
1171 assert_different_registers(G2_thread, G5_method, Gargs, Gtmp1, Gtmp2); 1162 assert_different_registers(G2_thread, G5_method, Gargs, Gtmp1, Gtmp2);
1172 1163
1173 const Address size_of_parameters(G5_method, 0, in_bytes(methodOopDesc::size_of_parameters_offset())); 1164 const Address size_of_parameters(G5_method, methodOopDesc::size_of_parameters_offset());
1174 const Address size_of_locals (G5_method, 0, in_bytes(methodOopDesc::size_of_locals_offset())); 1165 const Address size_of_locals (G5_method, methodOopDesc::size_of_locals_offset());
1175 // Seems like G5_method is live at the point this is used. So we could make this look consistent 1166 // Seems like G5_method is live at the point this is used. So we could make this look consistent
1176 // and use in the asserts. 1167 // and use in the asserts.
1177 const Address access_flags (Lmethod, 0, in_bytes(methodOopDesc::access_flags_offset())); 1168 const Address access_flags (Lmethod, methodOopDesc::access_flags_offset());
1178 1169
1179 __ verify_oop(G5_method); 1170 __ verify_oop(G5_method);
1180 1171
1181 const Register Glocals_size = G3; 1172 const Register Glocals_size = G3;
1182 assert_different_registers(Glocals_size, G4_scratch, Gframe_size); 1173 assert_different_registers(Glocals_size, G4_scratch, Gframe_size);
1183 1174
1184 // make sure method is not native & not abstract 1175 // make sure method is not native & not abstract
1185 // rethink these assertions - they can be simplified and shared (gri 2/25/2000) 1176 // rethink these assertions - they can be simplified and shared (gri 2/25/2000)
1186 #ifdef ASSERT 1177 #ifdef ASSERT
1187 __ ld(G5_method, in_bytes(methodOopDesc::access_flags_offset()), Gtmp1); 1178 __ ld(G5_method, methodOopDesc::access_flags_offset(), Gtmp1);
1188 { 1179 {
1189 Label L; 1180 Label L;
1190 __ btst(JVM_ACC_NATIVE, Gtmp1); 1181 __ btst(JVM_ACC_NATIVE, Gtmp1);
1191 __ br(Assembler::zero, false, Assembler::pt, L); 1182 __ br(Assembler::zero, false, Assembler::pt, L);
1192 __ delayed()->nop(); 1183 __ delayed()->nop();
1237 1228
1238 __ cmp( O2, O1 ); 1229 __ cmp( O2, O1 );
1239 __ brx( Assembler::lessEqualUnsigned, true, Assembler::pt, clear_loop ); 1230 __ brx( Assembler::lessEqualUnsigned, true, Assembler::pt, clear_loop );
1240 __ delayed()->st_ptr( init_value, O2, 0 ); 1231 __ delayed()->st_ptr( init_value, O2, 0 );
1241 1232
1242 const Address do_not_unlock_if_synchronized(G2_thread, 0, 1233 const Address do_not_unlock_if_synchronized(G2_thread,
1243 in_bytes(JavaThread::do_not_unlock_if_synchronized_offset())); 1234 JavaThread::do_not_unlock_if_synchronized_offset());
1244 // Since at this point in the method invocation the exception handler 1235 // Since at this point in the method invocation the exception handler
1245 // would try to exit the monitor of synchronized methods which hasn't 1236 // would try to exit the monitor of synchronized methods which hasn't
1246 // been entered yet, we set the thread local variable 1237 // been entered yet, we set the thread local variable
1247 // _do_not_unlock_if_synchronized to true. If any exception was thrown by 1238 // _do_not_unlock_if_synchronized to true. If any exception was thrown by
1248 // runtime, exception handling i.e. unlock_if_synchronized_method will 1239 // runtime, exception handling i.e. unlock_if_synchronized_method will
1714 // 1705 //
1715 // JVMTI PopFrame support 1706 // JVMTI PopFrame support
1716 // 1707 //
1717 1708
1718 Interpreter::_remove_activation_preserving_args_entry = __ pc(); 1709 Interpreter::_remove_activation_preserving_args_entry = __ pc();
1719 Address popframe_condition_addr (G2_thread, 0, in_bytes(JavaThread::popframe_condition_offset())); 1710 Address popframe_condition_addr(G2_thread, JavaThread::popframe_condition_offset());
1720 // Set the popframe_processing bit in popframe_condition indicating that we are 1711 // Set the popframe_processing bit in popframe_condition indicating that we are
1721 // currently handling popframe, so that call_VMs that may happen later do not trigger new 1712 // currently handling popframe, so that call_VMs that may happen later do not trigger new
1722 // popframe handling cycles. 1713 // popframe handling cycles.
1723 1714
1724 __ ld(popframe_condition_addr, G3_scratch); 1715 __ ld(popframe_condition_addr, G3_scratch);
1756 __ add(Gtmp2, wordSize, Gtmp2); 1747 __ add(Gtmp2, wordSize, Gtmp2);
1757 // Save these arguments 1748 // Save these arguments
1758 __ call_VM_leaf(L7_thread_cache, CAST_FROM_FN_PTR(address, Deoptimization::popframe_preserve_args), G2_thread, Gtmp1, Gtmp2); 1749 __ call_VM_leaf(L7_thread_cache, CAST_FROM_FN_PTR(address, Deoptimization::popframe_preserve_args), G2_thread, Gtmp1, Gtmp2);
1759 // Inform deoptimization that it is responsible for restoring these arguments 1750 // Inform deoptimization that it is responsible for restoring these arguments
1760 __ set(JavaThread::popframe_force_deopt_reexecution_bit, Gtmp1); 1751 __ set(JavaThread::popframe_force_deopt_reexecution_bit, Gtmp1);
1761 Address popframe_condition_addr(G2_thread, 0, in_bytes(JavaThread::popframe_condition_offset())); 1752 Address popframe_condition_addr(G2_thread, JavaThread::popframe_condition_offset());
1762 __ st(Gtmp1, popframe_condition_addr); 1753 __ st(Gtmp1, popframe_condition_addr);
1763 1754
1764 // Return from the current method 1755 // Return from the current method
1765 // The caller's SP was adjusted upon method entry to accomodate 1756 // The caller's SP was adjusted upon method entry to accomodate
1766 // the callee's non-argument locals. Undo that adjustment. 1757 // the callee's non-argument locals. Undo that adjustment.
1805 1796
1806 __ get_vm_result(Oexception); 1797 __ get_vm_result(Oexception);
1807 __ verify_oop(Oexception); 1798 __ verify_oop(Oexception);
1808 1799
1809 const int return_reg_adjustment = frame::pc_return_offset; 1800 const int return_reg_adjustment = frame::pc_return_offset;
1810 Address issuing_pc_addr(I7, 0, return_reg_adjustment); 1801 Address issuing_pc_addr(I7, return_reg_adjustment);
1811 1802
1812 // We are done with this activation frame; find out where to go next. 1803 // We are done with this activation frame; find out where to go next.
1813 // The continuation point will be an exception handler, which expects 1804 // The continuation point will be an exception handler, which expects
1814 // the following registers set up: 1805 // the following registers set up:
1815 // 1806 //
1851 address entry = __ pc(); 1842 address entry = __ pc();
1852 1843
1853 __ empty_expression_stack(); 1844 __ empty_expression_stack();
1854 __ load_earlyret_value(state); 1845 __ load_earlyret_value(state);
1855 1846
1856 __ ld_ptr(Address(G2_thread, 0, in_bytes(JavaThread::jvmti_thread_state_offset())), G3_scratch); 1847 __ ld_ptr(G2_thread, JavaThread::jvmti_thread_state_offset(), G3_scratch);
1857 Address cond_addr(G3_scratch, 0, in_bytes(JvmtiThreadState::earlyret_state_offset())); 1848 Address cond_addr(G3_scratch, JvmtiThreadState::earlyret_state_offset());
1858 1849
1859 // Clear the earlyret state 1850 // Clear the earlyret state
1860 __ stw(G0 /* JvmtiThreadState::earlyret_inactive */, cond_addr); 1851 __ stw(G0 /* JvmtiThreadState::earlyret_inactive */, cond_addr);
1861 1852
1862 __ remove_activation(state, 1853 __ remove_activation(state,
1919 1910
1920 1911
1921 // helpers for generate_and_dispatch 1912 // helpers for generate_and_dispatch
1922 1913
1923 void TemplateInterpreterGenerator::count_bytecode() { 1914 void TemplateInterpreterGenerator::count_bytecode() {
1924 Address c(G3_scratch, (address)&BytecodeCounter::_counter_value); 1915 __ inc_counter(&BytecodeCounter::_counter_value, G3_scratch, G4_scratch);
1925 __ load_contents(c, G4_scratch);
1926 __ inc(G4_scratch);
1927 __ st(G4_scratch, c);
1928 } 1916 }
1929 1917
1930 1918
1931 void TemplateInterpreterGenerator::histogram_bytecode(Template* t) { 1919 void TemplateInterpreterGenerator::histogram_bytecode(Template* t) {
1932 Address bucket( G3_scratch, (address) &BytecodeHistogram::_counters[t->bytecode()] ); 1920 __ inc_counter(&BytecodeHistogram::_counters[t->bytecode()], G3_scratch, G4_scratch);
1933 __ load_contents(bucket, G4_scratch);
1934 __ inc(G4_scratch);
1935 __ st(G4_scratch, bucket);
1936 } 1921 }
1937 1922
1938 1923
1939 void TemplateInterpreterGenerator::histogram_bytecode_pair(Template* t) { 1924 void TemplateInterpreterGenerator::histogram_bytecode_pair(Template* t) {
1940 address index_addr = (address)&BytecodePairHistogram::_index; 1925 AddressLiteral index (&BytecodePairHistogram::_index);
1941 Address index(G3_scratch, index_addr); 1926 AddressLiteral counters((address) &BytecodePairHistogram::_counters);
1942
1943 address counters_addr = (address)&BytecodePairHistogram::_counters;
1944 Address counters(G3_scratch, counters_addr);
1945 1927
1946 // get index, shift out old bytecode, bring in new bytecode, and store it 1928 // get index, shift out old bytecode, bring in new bytecode, and store it
1947 // _index = (_index >> log2_number_of_codes) | 1929 // _index = (_index >> log2_number_of_codes) |
1948 // (bytecode << log2_number_of_codes); 1930 // (bytecode << log2_number_of_codes);
1949 1931
1950 1932 __ load_contents(index, G4_scratch);
1951 __ load_contents( index, G4_scratch );
1952 __ srl( G4_scratch, BytecodePairHistogram::log2_number_of_codes, G4_scratch ); 1933 __ srl( G4_scratch, BytecodePairHistogram::log2_number_of_codes, G4_scratch );
1953 __ set( ((int)t->bytecode()) << BytecodePairHistogram::log2_number_of_codes, G3_scratch ); 1934 __ set( ((int)t->bytecode()) << BytecodePairHistogram::log2_number_of_codes, G3_scratch );
1954 __ or3( G3_scratch, G4_scratch, G4_scratch ); 1935 __ or3( G3_scratch, G4_scratch, G4_scratch );
1955 __ store_contents( G4_scratch, index ); 1936 __ store_contents(G4_scratch, index, G3_scratch);
1956 1937
1957 // bump bucket contents 1938 // bump bucket contents
1958 // _counters[_index] ++; 1939 // _counters[_index] ++;
1959 1940
1960 __ load_address( counters ); // loads into G3_scratch 1941 __ set(counters, G3_scratch); // loads into G3_scratch
1961 __ sll( G4_scratch, LogBytesPerWord, G4_scratch ); // Index is word address 1942 __ sll( G4_scratch, LogBytesPerWord, G4_scratch ); // Index is word address
1962 __ add (G3_scratch, G4_scratch, G3_scratch); // Add in index 1943 __ add (G3_scratch, G4_scratch, G3_scratch); // Add in index
1963 __ ld (G3_scratch, 0, G4_scratch); 1944 __ ld (G3_scratch, 0, G4_scratch);
1964 __ inc (G4_scratch); 1945 __ inc (G4_scratch);
1965 __ st (G4_scratch, 0, G3_scratch); 1946 __ st (G4_scratch, 0, G3_scratch);
1976 __ delayed()->nop(); 1957 __ delayed()->nop();
1977 } 1958 }
1978 1959
1979 1960
1980 void TemplateInterpreterGenerator::stop_interpreter_at() { 1961 void TemplateInterpreterGenerator::stop_interpreter_at() {
1981 Address counter(G3_scratch , (address)&BytecodeCounter::_counter_value); 1962 AddressLiteral counter(&BytecodeCounter::_counter_value);
1982 __ load_contents (counter, G3_scratch ); 1963 __ load_contents(counter, G3_scratch);
1983 Address stop_at(G4_scratch, (address)&StopInterpreterAt); 1964 AddressLiteral stop_at(&StopInterpreterAt);
1984 __ load_ptr_contents(stop_at, G4_scratch); 1965 __ load_ptr_contents(stop_at, G4_scratch);
1985 __ cmp(G3_scratch, G4_scratch); 1966 __ cmp(G3_scratch, G4_scratch);
1986 __ breakpoint_trap(Assembler::equal); 1967 __ breakpoint_trap(Assembler::equal);
1987 } 1968 }
1988 #endif // not PRODUCT 1969 #endif // not PRODUCT