comparison src/cpu/x86/vm/c1_Runtime1_x86.cpp @ 2321:1b4e6a5d98e0

7012914: JSR 292 MethodHandlesTest C1: frame::verify_return_pc(return_address) failed: must be a return pc Reviewed-by: never, bdelsart
author twisti
date Mon, 28 Feb 2011 06:07:12 -0800
parents e4fee0bdaa85
children 09f96c3ff1ad
comparison
equal deleted inserted replaced
2320:41d4973cf100 2321:1b4e6a5d98e0
246 // 64bit needs to keep stack 16 byte aligned. So we add some alignment dummies to make that 246 // 64bit needs to keep stack 16 byte aligned. So we add some alignment dummies to make that
247 // happen and will assert if the stack size we create is misaligned 247 // happen and will assert if the stack size we create is misaligned
248 #ifdef _LP64 248 #ifdef _LP64
249 align_dummy_0, align_dummy_1, 249 align_dummy_0, align_dummy_1,
250 #endif // _LP64 250 #endif // _LP64
251 dummy1, SLOT2(dummy1H) // 0, 4 251 #ifdef _WIN64
252 dummy2, SLOT2(dummy2H) // 8, 12 252 // Windows always allocates space for it's argument registers (see
253 // Two temps to be used as needed by users of save/restore callee registers 253 // frame::arg_reg_save_area_bytes).
254 temp_2_off, SLOT2(temp_2H_off) // 16, 20 254 arg_reg_save_1, arg_reg_save_1H, // 0, 4
255 temp_1_off, SLOT2(temp_1H_off) // 24, 28 255 arg_reg_save_2, arg_reg_save_2H, // 8, 12
256 arg_reg_save_3, arg_reg_save_3H, // 16, 20
257 arg_reg_save_4, arg_reg_save_4H, // 24, 28
258 #endif // _WIN64
256 xmm_regs_as_doubles_off, // 32 259 xmm_regs_as_doubles_off, // 32
257 float_regs_as_doubles_off = xmm_regs_as_doubles_off + xmm_regs_as_doubles_size_in_slots, // 160 260 float_regs_as_doubles_off = xmm_regs_as_doubles_off + xmm_regs_as_doubles_size_in_slots, // 160
258 fpu_state_off = float_regs_as_doubles_off + float_regs_as_doubles_size_in_slots, // 224 261 fpu_state_off = float_regs_as_doubles_off + float_regs_as_doubles_size_in_slots, // 224
259 // fpu_state_end_off is exclusive 262 // fpu_state_end_off is exclusive
260 fpu_state_end_off = fpu_state_off + (FPUStateSizeInWords / SLOT_PER_WORD), // 352 263 fpu_state_end_off = fpu_state_off + (FPUStateSizeInWords / SLOT_PER_WORD), // 352
280 rdx_off, SLOT2(rdxH_off) // 464, 468 283 rdx_off, SLOT2(rdxH_off) // 464, 468
281 rcx_off, SLOT2(rcxH_off) // 472, 476 284 rcx_off, SLOT2(rcxH_off) // 472, 476
282 rax_off, SLOT2(raxH_off) // 480, 484 285 rax_off, SLOT2(raxH_off) // 480, 484
283 saved_rbp_off, SLOT2(saved_rbpH_off) // 488, 492 286 saved_rbp_off, SLOT2(saved_rbpH_off) // 488, 492
284 return_off, SLOT2(returnH_off) // 496, 500 287 return_off, SLOT2(returnH_off) // 496, 500
285 reg_save_frame_size, // As noted: neglects any parameters to runtime // 504 288 reg_save_frame_size // As noted: neglects any parameters to runtime // 504
286
287 #ifdef _WIN64
288 c_rarg0_off = rcx_off,
289 #else
290 c_rarg0_off = rdi_off,
291 #endif // WIN64
292
293 // equates
294
295 // illegal instruction handler
296 continue_dest_off = temp_1_off,
297
298 // deoptimization equates
299 fp0_off = float_regs_as_doubles_off, // slot for java float/double return value
300 xmm0_off = xmm_regs_as_doubles_off, // slot for java float/double return value
301 deopt_type = temp_2_off, // slot for type of deopt in progress
302 ret_type = temp_1_off // slot for return type
303 }; 289 };
304 290
305 291
306 292
307 // Save off registers which might be killed by calls into the runtime. 293 // Save off registers which might be killed by calls into the runtime.
402 } 388 }
403 389
404 static OopMap* save_live_registers(StubAssembler* sasm, int num_rt_args, 390 static OopMap* save_live_registers(StubAssembler* sasm, int num_rt_args,
405 bool save_fpu_registers = true) { 391 bool save_fpu_registers = true) {
406 __ block_comment("save_live_registers"); 392 __ block_comment("save_live_registers");
407
408 // 64bit passes the args in regs to the c++ runtime
409 int frame_size_in_slots = reg_save_frame_size NOT_LP64(+ num_rt_args); // args + thread
410 // frame_size = round_to(frame_size, 4);
411 sasm->set_frame_size(frame_size_in_slots / VMRegImpl::slots_per_word );
412 393
413 __ pusha(); // integer registers 394 __ pusha(); // integer registers
414 395
415 // assert(float_regs_as_doubles_off % 2 == 0, "misaligned offset"); 396 // assert(float_regs_as_doubles_off % 2 == 0, "misaligned offset");
416 // assert(xmm_regs_as_doubles_off % 2 == 0, "misaligned offset"); 397 // assert(xmm_regs_as_doubles_off % 2 == 0, "misaligned offset");
640 621
641 return oop_maps; 622 return oop_maps;
642 } 623 }
643 624
644 625
645 void Runtime1::generate_handle_exception(StubAssembler *sasm, OopMapSet* oop_maps, OopMap* oop_map, bool save_fpu_registers) { 626 OopMapSet* Runtime1::generate_handle_exception(StubID id, StubAssembler *sasm) {
627 __ block_comment("generate_handle_exception");
628
646 // incoming parameters 629 // incoming parameters
647 const Register exception_oop = rax; 630 const Register exception_oop = rax;
648 const Register exception_pc = rdx; 631 const Register exception_pc = rdx;
649 // other registers used in this stub 632 // other registers used in this stub
650 const Register real_return_addr = rbx;
651 const Register thread = NOT_LP64(rdi) LP64_ONLY(r15_thread); 633 const Register thread = NOT_LP64(rdi) LP64_ONLY(r15_thread);
652 634
653 __ block_comment("generate_handle_exception"); 635 // Save registers, if required.
636 OopMapSet* oop_maps = new OopMapSet();
637 OopMap* oop_map = NULL;
638 switch (id) {
639 case forward_exception_id:
640 // We're handling an exception in the context of a compiled frame.
641 // The registers have been saved in the standard places. Perform
642 // an exception lookup in the caller and dispatch to the handler
643 // if found. Otherwise unwind and dispatch to the callers
644 // exception handler.
645 oop_map = generate_oop_map(sasm, 1 /*thread*/);
646
647 // load and clear pending exception oop into RAX
648 __ movptr(exception_oop, Address(thread, Thread::pending_exception_offset()));
649 __ movptr(Address(thread, Thread::pending_exception_offset()), NULL_WORD);
650
651 // load issuing PC (the return address for this stub) into rdx
652 __ movptr(exception_pc, Address(rbp, 1*BytesPerWord));
653
654 // make sure that the vm_results are cleared (may be unnecessary)
655 __ movptr(Address(thread, JavaThread::vm_result_offset()), NULL_WORD);
656 __ movptr(Address(thread, JavaThread::vm_result_2_offset()), NULL_WORD);
657 break;
658 case handle_exception_nofpu_id:
659 case handle_exception_id:
660 // At this point all registers MAY be live.
661 oop_map = save_live_registers(sasm, 1 /*thread*/, id == handle_exception_nofpu_id);
662 break;
663 case handle_exception_from_callee_id: {
664 // At this point all registers except exception oop (RAX) and
665 // exception pc (RDX) are dead.
666 const int frame_size = 2 /*BP, return address*/ NOT_LP64(+ 1 /*thread*/) WIN64_ONLY(+ frame::arg_reg_save_area_bytes / BytesPerWord);
667 oop_map = new OopMap(frame_size * VMRegImpl::slots_per_word, 0);
668 sasm->set_frame_size(frame_size);
669 WIN64_ONLY(__ subq(rsp, frame::arg_reg_save_area_bytes));
670 break;
671 }
672 default: ShouldNotReachHere();
673 }
654 674
655 #ifdef TIERED 675 #ifdef TIERED
656 // C2 can leave the fpu stack dirty 676 // C2 can leave the fpu stack dirty
657 if (UseSSE < 2 ) { 677 if (UseSSE < 2) {
658 __ empty_FPU_stack(); 678 __ empty_FPU_stack();
659 } 679 }
660 #endif // TIERED 680 #endif // TIERED
661 681
662 // verify that only rax, and rdx is valid at this time 682 // verify that only rax, and rdx is valid at this time
684 #endif 704 #endif
685 705
686 // save exception oop and issuing pc into JavaThread 706 // save exception oop and issuing pc into JavaThread
687 // (exception handler will load it from here) 707 // (exception handler will load it from here)
688 __ movptr(Address(thread, JavaThread::exception_oop_offset()), exception_oop); 708 __ movptr(Address(thread, JavaThread::exception_oop_offset()), exception_oop);
689 __ movptr(Address(thread, JavaThread::exception_pc_offset()), exception_pc); 709 __ movptr(Address(thread, JavaThread::exception_pc_offset()), exception_pc);
690
691 // save real return address (pc that called this stub)
692 __ movptr(real_return_addr, Address(rbp, 1*BytesPerWord));
693 __ movptr(Address(rsp, temp_1_off * VMRegImpl::stack_slot_size), real_return_addr);
694 710
695 // patch throwing pc into return address (has bci & oop map) 711 // patch throwing pc into return address (has bci & oop map)
696 __ movptr(Address(rbp, 1*BytesPerWord), exception_pc); 712 __ movptr(Address(rbp, 1*BytesPerWord), exception_pc);
697 713
698 // compute the exception handler. 714 // compute the exception handler.
699 // the exception oop and the throwing pc are read from the fields in JavaThread 715 // the exception oop and the throwing pc are read from the fields in JavaThread
700 int call_offset = __ call_RT(noreg, noreg, CAST_FROM_FN_PTR(address, exception_handler_for_pc)); 716 int call_offset = __ call_RT(noreg, noreg, CAST_FROM_FN_PTR(address, exception_handler_for_pc));
701 oop_maps->add_gc_map(call_offset, oop_map); 717 oop_maps->add_gc_map(call_offset, oop_map);
702 718
703 // rax,: handler address 719 // rax: handler address
704 // will be the deopt blob if nmethod was deoptimized while we looked up 720 // will be the deopt blob if nmethod was deoptimized while we looked up
705 // handler regardless of whether handler existed in the nmethod. 721 // handler regardless of whether handler existed in the nmethod.
706 722
707 // only rax, is valid at this time, all other registers have been destroyed by the runtime call 723 // only rax, is valid at this time, all other registers have been destroyed by the runtime call
708 __ invalidate_registers(false, true, true, true, true, true); 724 __ invalidate_registers(false, true, true, true, true, true);
709 725
710 #ifdef ASSERT 726 // patch the return address, this stub will directly return to the exception handler
711 // Do we have an exception handler in the nmethod?
712 Label done;
713 __ testptr(rax, rax);
714 __ jcc(Assembler::notZero, done);
715 __ stop("no handler found");
716 __ bind(done);
717 #endif
718
719 // exception handler found
720 // patch the return address -> the stub will directly return to the exception handler
721 __ movptr(Address(rbp, 1*BytesPerWord), rax); 727 __ movptr(Address(rbp, 1*BytesPerWord), rax);
722 728
723 // restore registers 729 switch (id) {
724 restore_live_registers(sasm, save_fpu_registers); 730 case forward_exception_id:
725 731 case handle_exception_nofpu_id:
726 // return to exception handler 732 case handle_exception_id:
727 __ leave(); 733 // Restore the registers that were saved at the beginning.
728 __ ret(0); 734 restore_live_registers(sasm, id == handle_exception_nofpu_id);
729 735 break;
736 case handle_exception_from_callee_id:
737 // WIN64_ONLY: No need to add frame::arg_reg_save_area_bytes to SP
738 // since we do a leave anyway.
739
740 // Pop the return address since we are possibly changing SP (restoring from BP).
741 __ leave();
742 __ pop(rcx);
743
744 // Restore SP from BP if the exception PC is a method handle call site.
745 NOT_LP64(__ get_thread(thread);)
746 __ cmpl(Address(thread, JavaThread::is_method_handle_return_offset()), 0);
747 __ cmovptr(Assembler::notEqual, rsp, rbp_mh_SP_save);
748 __ jmp(rcx); // jump to exception handler
749 break;
750 default: ShouldNotReachHere();
751 }
752
753 return oop_maps;
730 } 754 }
731 755
732 756
733 void Runtime1::generate_unwind_exception(StubAssembler *sasm) { 757 void Runtime1::generate_unwind_exception(StubAssembler *sasm) {
734 // incoming parameters 758 // incoming parameters
789 // get throwing pc (= return address). 813 // get throwing pc (= return address).
790 // rdx has been destroyed by the call, so it must be set again 814 // rdx has been destroyed by the call, so it must be set again
791 // the pop is also necessary to simulate the effect of a ret(0) 815 // the pop is also necessary to simulate the effect of a ret(0)
792 __ pop(exception_pc); 816 __ pop(exception_pc);
793 817
794 // Restore SP from BP if the exception PC is a MethodHandle call site. 818 // Restore SP from BP if the exception PC is a method handle call site.
795 NOT_LP64(__ get_thread(thread);) 819 NOT_LP64(__ get_thread(thread);)
796 __ cmpl(Address(thread, JavaThread::is_method_handle_return_offset()), 0); 820 __ cmpl(Address(thread, JavaThread::is_method_handle_return_offset()), 0);
797 __ cmovptr(Assembler::notEqual, rsp, rbp_mh_SP_save); 821 __ cmovptr(Assembler::notEqual, rsp, rbp_mh_SP_save);
798 822
799 // continue at exception handler (return address removed) 823 // continue at exception handler (return address removed)
932 restore_live_registers(sasm); 956 restore_live_registers(sasm);
933 __ leave(); 957 __ leave();
934 __ ret(0); 958 __ ret(0);
935 959
936 return oop_maps; 960 return oop_maps;
937
938 } 961 }
939 962
940 963
941 OopMapSet* Runtime1::generate_code_for(StubID id, StubAssembler* sasm) { 964 OopMapSet* Runtime1::generate_code_for(StubID id, StubAssembler* sasm) {
942 965
950 // stub code & info for the different stubs 973 // stub code & info for the different stubs
951 OopMapSet* oop_maps = NULL; 974 OopMapSet* oop_maps = NULL;
952 switch (id) { 975 switch (id) {
953 case forward_exception_id: 976 case forward_exception_id:
954 { 977 {
955 // we're handling an exception in the context of a compiled 978 oop_maps = generate_handle_exception(id, sasm);
956 // frame. The registers have been saved in the standard 979 __ leave();
957 // places. Perform an exception lookup in the caller and 980 __ ret(0);
958 // dispatch to the handler if found. Otherwise unwind and
959 // dispatch to the callers exception handler.
960
961 const Register thread = NOT_LP64(rdi) LP64_ONLY(r15_thread);
962 const Register exception_oop = rax;
963 const Register exception_pc = rdx;
964
965 // load pending exception oop into rax,
966 __ movptr(exception_oop, Address(thread, Thread::pending_exception_offset()));
967 // clear pending exception
968 __ movptr(Address(thread, Thread::pending_exception_offset()), NULL_WORD);
969
970 // load issuing PC (the return address for this stub) into rdx
971 __ movptr(exception_pc, Address(rbp, 1*BytesPerWord));
972
973 // make sure that the vm_results are cleared (may be unnecessary)
974 __ movptr(Address(thread, JavaThread::vm_result_offset()), NULL_WORD);
975 __ movptr(Address(thread, JavaThread::vm_result_2_offset()), NULL_WORD);
976
977 // verify that that there is really a valid exception in rax,
978 __ verify_not_null_oop(exception_oop);
979
980 oop_maps = new OopMapSet();
981 OopMap* oop_map = generate_oop_map(sasm, 1);
982 generate_handle_exception(sasm, oop_maps, oop_map);
983 __ stop("should not reach here");
984 } 981 }
985 break; 982 break;
986 983
987 case new_instance_id: 984 case new_instance_id:
988 case fast_new_instance_id: 985 case fast_new_instance_id:
1313 oop_maps = generate_exception_throw(sasm, CAST_FROM_FN_PTR(address, throw_null_pointer_exception), false); 1310 oop_maps = generate_exception_throw(sasm, CAST_FROM_FN_PTR(address, throw_null_pointer_exception), false);
1314 } 1311 }
1315 break; 1312 break;
1316 1313
1317 case handle_exception_nofpu_id: 1314 case handle_exception_nofpu_id:
1318 save_fpu_registers = false;
1319 // fall through
1320 case handle_exception_id: 1315 case handle_exception_id:
1321 { StubFrame f(sasm, "handle_exception", dont_gc_arguments); 1316 { StubFrame f(sasm, "handle_exception", dont_gc_arguments);
1322 oop_maps = new OopMapSet(); 1317 oop_maps = generate_handle_exception(id, sasm);
1323 OopMap* oop_map = save_live_registers(sasm, 1, save_fpu_registers); 1318 }
1324 generate_handle_exception(sasm, oop_maps, oop_map, save_fpu_registers); 1319 break;
1320
1321 case handle_exception_from_callee_id:
1322 { StubFrame f(sasm, "handle_exception_from_callee", dont_gc_arguments);
1323 oop_maps = generate_handle_exception(id, sasm);
1325 } 1324 }
1326 break; 1325 break;
1327 1326
1328 case unwind_exception_id: 1327 case unwind_exception_id:
1329 { __ set_info("unwind_exception", dont_gc_arguments); 1328 { __ set_info("unwind_exception", dont_gc_arguments);