comparison src/cpu/x86/vm/c1_Runtime1_x86.cpp @ 2491:0654ee04b214

Merge with OpenJDK.
author Thomas Wuerthinger <thomas.wuerthinger@oracle.com>
date Fri, 22 Apr 2011 15:30:53 +0200
parents d25d4ca69222 09f96c3ff1ad
children 008adfd6d850
comparison
equal deleted inserted replaced
2490:29246b1d2d3c 2491:0654ee04b214
21 * questions. 21 * questions.
22 * 22 *
23 */ 23 */
24 24
25 #include "precompiled.hpp" 25 #include "precompiled.hpp"
26 #include "asm/assembler.hpp"
26 #include "c1/c1_Defs.hpp" 27 #include "c1/c1_Defs.hpp"
27 #include "c1/c1_MacroAssembler.hpp" 28 #include "c1/c1_MacroAssembler.hpp"
28 #include "c1/c1_Runtime1.hpp" 29 #include "c1/c1_Runtime1.hpp"
29 #include "interpreter/interpreter.hpp" 30 #include "interpreter/interpreter.hpp"
30 #include "nativeInst_x86.hpp" 31 #include "nativeInst_x86.hpp"
246 // 64bit needs to keep stack 16 byte aligned. So we add some alignment dummies to make that 247 // 64bit needs to keep stack 16 byte aligned. So we add some alignment dummies to make that
247 // happen and will assert if the stack size we create is misaligned 248 // happen and will assert if the stack size we create is misaligned
248 #ifdef _LP64 249 #ifdef _LP64
249 align_dummy_0, align_dummy_1, 250 align_dummy_0, align_dummy_1,
250 #endif // _LP64 251 #endif // _LP64
251 dummy1, SLOT2(dummy1H) // 0, 4 252 #ifdef _WIN64
252 dummy2, SLOT2(dummy2H) // 8, 12 253 // Windows always allocates space for it's argument registers (see
253 // Two temps to be used as needed by users of save/restore callee registers 254 // frame::arg_reg_save_area_bytes).
254 temp_2_off, SLOT2(temp_2H_off) // 16, 20 255 arg_reg_save_1, arg_reg_save_1H, // 0, 4
255 temp_1_off, SLOT2(temp_1H_off) // 24, 28 256 arg_reg_save_2, arg_reg_save_2H, // 8, 12
257 arg_reg_save_3, arg_reg_save_3H, // 16, 20
258 arg_reg_save_4, arg_reg_save_4H, // 24, 28
259 #endif // _WIN64
256 xmm_regs_as_doubles_off, // 32 260 xmm_regs_as_doubles_off, // 32
257 float_regs_as_doubles_off = xmm_regs_as_doubles_off + xmm_regs_as_doubles_size_in_slots, // 160 261 float_regs_as_doubles_off = xmm_regs_as_doubles_off + xmm_regs_as_doubles_size_in_slots, // 160
258 fpu_state_off = float_regs_as_doubles_off + float_regs_as_doubles_size_in_slots, // 224 262 fpu_state_off = float_regs_as_doubles_off + float_regs_as_doubles_size_in_slots, // 224
259 // fpu_state_end_off is exclusive 263 // fpu_state_end_off is exclusive
260 fpu_state_end_off = fpu_state_off + (FPUStateSizeInWords / SLOT_PER_WORD), // 352 264 fpu_state_end_off = fpu_state_off + (FPUStateSizeInWords / SLOT_PER_WORD), // 352
280 rdx_off, SLOT2(rdxH_off) // 464, 468 284 rdx_off, SLOT2(rdxH_off) // 464, 468
281 rcx_off, SLOT2(rcxH_off) // 472, 476 285 rcx_off, SLOT2(rcxH_off) // 472, 476
282 rax_off, SLOT2(raxH_off) // 480, 484 286 rax_off, SLOT2(raxH_off) // 480, 484
283 saved_rbp_off, SLOT2(saved_rbpH_off) // 488, 492 287 saved_rbp_off, SLOT2(saved_rbpH_off) // 488, 492
284 return_off, SLOT2(returnH_off) // 496, 500 288 return_off, SLOT2(returnH_off) // 496, 500
285 reg_save_frame_size, // As noted: neglects any parameters to runtime // 504 289 reg_save_frame_size // As noted: neglects any parameters to runtime // 504
286
287 #ifdef _WIN64
288 c_rarg0_off = rcx_off,
289 #else
290 c_rarg0_off = rdi_off,
291 #endif // WIN64
292
293 // equates
294
295 // illegal instruction handler
296 continue_dest_off = temp_1_off,
297
298 // deoptimization equates
299 fp0_off = float_regs_as_doubles_off, // slot for java float/double return value
300 xmm0_off = xmm_regs_as_doubles_off, // slot for java float/double return value
301 deopt_type = temp_2_off, // slot for type of deopt in progress
302 ret_type = temp_1_off // slot for return type
303 }; 290 };
304 291
305 292
306 293
307 // Save off registers which might be killed by calls into the runtime. 294 // Save off registers which might be killed by calls into the runtime.
402 } 389 }
403 390
404 static OopMap* save_live_registers(StubAssembler* sasm, int num_rt_args, 391 static OopMap* save_live_registers(StubAssembler* sasm, int num_rt_args,
405 bool save_fpu_registers = true) { 392 bool save_fpu_registers = true) {
406 __ block_comment("save_live_registers"); 393 __ block_comment("save_live_registers");
407
408 // 64bit passes the args in regs to the c++ runtime
409 int frame_size_in_slots = reg_save_frame_size NOT_LP64(+ num_rt_args); // args + thread
410 // frame_size = round_to(frame_size, 4);
411 sasm->set_frame_size(frame_size_in_slots / VMRegImpl::slots_per_word );
412 394
413 __ pusha(); // integer registers 395 __ pusha(); // integer registers
414 396
415 // assert(float_regs_as_doubles_off % 2 == 0, "misaligned offset"); 397 // assert(float_regs_as_doubles_off % 2 == 0, "misaligned offset");
416 // assert(xmm_regs_as_doubles_off % 2 == 0, "misaligned offset"); 398 // assert(xmm_regs_as_doubles_off % 2 == 0, "misaligned offset");
660 642
661 return oop_maps; 643 return oop_maps;
662 } 644 }
663 645
664 646
665 void Runtime1::generate_handle_exception(StubAssembler *sasm, OopMapSet* oop_maps, OopMap* oop_map, bool save_fpu_registers) { 647 OopMapSet* Runtime1::generate_handle_exception(StubID id, StubAssembler *sasm) {
648 __ block_comment("generate_handle_exception");
649
666 // incoming parameters 650 // incoming parameters
667 const Register exception_oop = rax; 651 const Register exception_oop = rax;
668 const Register exception_pc = rdx; 652 const Register exception_pc = rdx;
669 // other registers used in this stub 653 // other registers used in this stub
670 const Register real_return_addr = rbx;
671 const Register thread = NOT_LP64(rdi) LP64_ONLY(r15_thread); 654 const Register thread = NOT_LP64(rdi) LP64_ONLY(r15_thread);
672 655
673 __ block_comment("generate_handle_exception"); 656 // Save registers, if required.
657 OopMapSet* oop_maps = new OopMapSet();
658 OopMap* oop_map = NULL;
659 switch (id) {
660 case forward_exception_id:
661 // We're handling an exception in the context of a compiled frame.
662 // The registers have been saved in the standard places. Perform
663 // an exception lookup in the caller and dispatch to the handler
664 // if found. Otherwise unwind and dispatch to the callers
665 // exception handler.
666 oop_map = generate_oop_map(sasm, 1 /*thread*/);
667
668 // load and clear pending exception oop into RAX
669 __ movptr(exception_oop, Address(thread, Thread::pending_exception_offset()));
670 __ movptr(Address(thread, Thread::pending_exception_offset()), NULL_WORD);
671
672 // load issuing PC (the return address for this stub) into rdx
673 __ movptr(exception_pc, Address(rbp, 1*BytesPerWord));
674
675 // make sure that the vm_results are cleared (may be unnecessary)
676 __ movptr(Address(thread, JavaThread::vm_result_offset()), NULL_WORD);
677 __ movptr(Address(thread, JavaThread::vm_result_2_offset()), NULL_WORD);
678 break;
679 case handle_exception_nofpu_id:
680 case handle_exception_id:
681 // At this point all registers MAY be live.
682 oop_map = save_live_registers(sasm, 1 /*thread*/, id == handle_exception_nofpu_id);
683 break;
684 case handle_exception_from_callee_id: {
685 // At this point all registers except exception oop (RAX) and
686 // exception pc (RDX) are dead.
687 const int frame_size = 2 /*BP, return address*/ NOT_LP64(+ 1 /*thread*/) WIN64_ONLY(+ frame::arg_reg_save_area_bytes / BytesPerWord);
688 oop_map = new OopMap(frame_size * VMRegImpl::slots_per_word, 0);
689 sasm->set_frame_size(frame_size);
690 WIN64_ONLY(__ subq(rsp, frame::arg_reg_save_area_bytes));
691 break;
692 }
693 default: ShouldNotReachHere();
694 }
674 695
675 #ifdef TIERED 696 #ifdef TIERED
676 // C2 can leave the fpu stack dirty 697 // C2 can leave the fpu stack dirty
677 if (UseSSE < 2 ) { 698 if (UseSSE < 2) {
678 __ empty_FPU_stack(); 699 __ empty_FPU_stack();
679 } 700 }
680 #endif // TIERED 701 #endif // TIERED
681 702
682 // verify that only rax, and rdx is valid at this time 703 // verify that only rax, and rdx is valid at this time
704 #endif 725 #endif
705 726
706 // save exception oop and issuing pc into JavaThread 727 // save exception oop and issuing pc into JavaThread
707 // (exception handler will load it from here) 728 // (exception handler will load it from here)
708 __ movptr(Address(thread, JavaThread::exception_oop_offset()), exception_oop); 729 __ movptr(Address(thread, JavaThread::exception_oop_offset()), exception_oop);
709 __ movptr(Address(thread, JavaThread::exception_pc_offset()), exception_pc); 730 __ movptr(Address(thread, JavaThread::exception_pc_offset()), exception_pc);
710
711 // save real return address (pc that called this stub)
712 __ movptr(real_return_addr, Address(rbp, 1*BytesPerWord));
713 __ movptr(Address(rsp, temp_1_off * VMRegImpl::stack_slot_size), real_return_addr);
714 731
715 // patch throwing pc into return address (has bci & oop map) 732 // patch throwing pc into return address (has bci & oop map)
716 __ movptr(Address(rbp, 1*BytesPerWord), exception_pc); 733 __ movptr(Address(rbp, 1*BytesPerWord), exception_pc);
717 734
718 // compute the exception handler. 735 // compute the exception handler.
719 // the exception oop and the throwing pc are read from the fields in JavaThread 736 // the exception oop and the throwing pc are read from the fields in JavaThread
720 int call_offset = __ call_RT(noreg, noreg, CAST_FROM_FN_PTR(address, exception_handler_for_pc)); 737 int call_offset = __ call_RT(noreg, noreg, CAST_FROM_FN_PTR(address, exception_handler_for_pc));
721 oop_maps->add_gc_map(call_offset, oop_map); 738 oop_maps->add_gc_map(call_offset, oop_map);
722 739
723 // rax,: handler address 740 // rax: handler address
724 // will be the deopt blob if nmethod was deoptimized while we looked up 741 // will be the deopt blob if nmethod was deoptimized while we looked up
725 // handler regardless of whether handler existed in the nmethod. 742 // handler regardless of whether handler existed in the nmethod.
726 743
727 // only rax, is valid at this time, all other registers have been destroyed by the runtime call 744 // only rax, is valid at this time, all other registers have been destroyed by the runtime call
728 __ invalidate_registers(false, true, true, true, true, true); 745 __ invalidate_registers(false, true, true, true, true, true);
729 746
730 #ifdef ASSERT 747 // patch the return address, this stub will directly return to the exception handler
731 // Do we have an exception handler in the nmethod?
732 Label done;
733 __ testptr(rax, rax);
734 __ jcc(Assembler::notZero, done);
735 __ stop("no handler found");
736 __ bind(done);
737 #endif
738
739 // exception handler found
740 // patch the return address -> the stub will directly return to the exception handler
741 __ movptr(Address(rbp, 1*BytesPerWord), rax); 748 __ movptr(Address(rbp, 1*BytesPerWord), rax);
742 749
743 // restore registers 750 switch (id) {
744 restore_live_registers(sasm, save_fpu_registers); 751 case forward_exception_id:
745 752 case handle_exception_nofpu_id:
746 // return to exception handler 753 case handle_exception_id:
747 __ leave(); 754 // Restore the registers that were saved at the beginning.
748 __ ret(0); 755 restore_live_registers(sasm, id == handle_exception_nofpu_id);
749 756 break;
757 case handle_exception_from_callee_id:
758 // WIN64_ONLY: No need to add frame::arg_reg_save_area_bytes to SP
759 // since we do a leave anyway.
760
761 // Pop the return address since we are possibly changing SP (restoring from BP).
762 __ leave();
763 __ pop(rcx);
764
765 // Restore SP from BP if the exception PC is a method handle call site.
766 NOT_LP64(__ get_thread(thread);)
767 __ cmpl(Address(thread, JavaThread::is_method_handle_return_offset()), 0);
768 __ cmovptr(Assembler::notEqual, rsp, rbp_mh_SP_save);
769 __ jmp(rcx); // jump to exception handler
770 break;
771 default: ShouldNotReachHere();
772 }
773
774 return oop_maps;
750 } 775 }
751 776
752 void Runtime1::c1x_generate_handle_exception(StubAssembler *sasm, OopMapSet* oop_maps, OopMap* oop_map) { 777 void Runtime1::c1x_generate_handle_exception(StubAssembler *sasm, OopMapSet* oop_maps, OopMap* oop_map) {
753 NOT_LP64(fatal("64 bit only")); 778 NOT_LP64(fatal("64 bit only"));
754 // incoming parameters 779 // incoming parameters
877 // get throwing pc (= return address). 902 // get throwing pc (= return address).
878 // rdx has been destroyed by the call, so it must be set again 903 // rdx has been destroyed by the call, so it must be set again
879 // the pop is also necessary to simulate the effect of a ret(0) 904 // the pop is also necessary to simulate the effect of a ret(0)
880 __ pop(exception_pc); 905 __ pop(exception_pc);
881 906
882 // Restore SP from BP if the exception PC is a MethodHandle call site. 907 // Restore SP from BP if the exception PC is a method handle call site.
883 NOT_LP64(__ get_thread(thread);) 908 NOT_LP64(__ get_thread(thread);)
884 __ cmpl(Address(thread, JavaThread::is_method_handle_return_offset()), 0); 909 __ cmpl(Address(thread, JavaThread::is_method_handle_return_offset()), 0);
885 __ cmovptr(Assembler::notEqual, rsp, rbp_mh_SP_save); 910 __ cmovptr(Assembler::notEqual, rsp, rbp_mh_SP_save);
886 911
887 // continue at exception handler (return address removed) 912 // continue at exception handler (return address removed)
1020 restore_live_registers(sasm); 1045 restore_live_registers(sasm);
1021 __ leave(); 1046 __ leave();
1022 __ ret(0); 1047 __ ret(0);
1023 1048
1024 return oop_maps; 1049 return oop_maps;
1025
1026 } 1050 }
1027 1051
1028 JRT_ENTRY(void, c1x_create_null_exception(JavaThread* thread)) 1052 JRT_ENTRY(void, c1x_create_null_exception(JavaThread* thread))
1029 thread->set_vm_result(Exceptions::new_exception(thread, vmSymbols::java_lang_NullPointerException(), NULL)()); 1053 thread->set_vm_result(Exceptions::new_exception(thread, vmSymbols::java_lang_NullPointerException(), NULL)());
1030 JRT_END 1054 JRT_END
1044 // stub code & info for the different stubs 1068 // stub code & info for the different stubs
1045 OopMapSet* oop_maps = NULL; 1069 OopMapSet* oop_maps = NULL;
1046 switch (id) { 1070 switch (id) {
1047 case forward_exception_id: 1071 case forward_exception_id:
1048 { 1072 {
1049 // we're handling an exception in the context of a compiled 1073 oop_maps = generate_handle_exception(id, sasm);
1050 // frame. The registers have been saved in the standard 1074 __ leave();
1051 // places. Perform an exception lookup in the caller and 1075 __ ret(0);
1052 // dispatch to the handler if found. Otherwise unwind and
1053 // dispatch to the callers exception handler.
1054
1055 const Register thread = NOT_LP64(rdi) LP64_ONLY(r15_thread);
1056 const Register exception_oop = rax;
1057 const Register exception_pc = rdx;
1058
1059 // load pending exception oop into rax,
1060 __ movptr(exception_oop, Address(thread, Thread::pending_exception_offset()));
1061 // clear pending exception
1062 __ movptr(Address(thread, Thread::pending_exception_offset()), NULL_WORD);
1063
1064 // load issuing PC (the return address for this stub) into rdx
1065 __ movptr(exception_pc, Address(rbp, 1*BytesPerWord));
1066
1067 // make sure that the vm_results are cleared (may be unnecessary)
1068 __ movptr(Address(thread, JavaThread::vm_result_offset()), NULL_WORD);
1069 __ movptr(Address(thread, JavaThread::vm_result_2_offset()), NULL_WORD);
1070
1071 // verify that that there is really a valid exception in rax,
1072 __ verify_not_null_oop(exception_oop);
1073
1074 oop_maps = new OopMapSet();
1075 OopMap* oop_map = generate_oop_map(sasm, 1);
1076 generate_handle_exception(sasm, oop_maps, oop_map);
1077 __ stop("should not reach here");
1078 } 1076 }
1079 break; 1077 break;
1080 1078
1081 case new_instance_id: 1079 case new_instance_id:
1082 case fast_new_instance_id: 1080 case fast_new_instance_id:
1407 oop_maps = generate_exception_throw(sasm, CAST_FROM_FN_PTR(address, throw_null_pointer_exception), false); 1405 oop_maps = generate_exception_throw(sasm, CAST_FROM_FN_PTR(address, throw_null_pointer_exception), false);
1408 } 1406 }
1409 break; 1407 break;
1410 1408
1411 case handle_exception_nofpu_id: 1409 case handle_exception_nofpu_id:
1412 save_fpu_registers = false;
1413 // fall through
1414 case handle_exception_id: 1410 case handle_exception_id:
1415 { StubFrame f(sasm, "handle_exception", dont_gc_arguments); 1411 { StubFrame f(sasm, "handle_exception", dont_gc_arguments);
1416 oop_maps = new OopMapSet(); 1412 oop_maps = generate_handle_exception(id, sasm);
1417 OopMap* oop_map = save_live_registers(sasm, 1, save_fpu_registers); 1413 }
1418 generate_handle_exception(sasm, oop_maps, oop_map, save_fpu_registers); 1414 break;
1415
1416 case handle_exception_from_callee_id:
1417 { StubFrame f(sasm, "handle_exception_from_callee", dont_gc_arguments);
1418 oop_maps = generate_handle_exception(id, sasm);
1419 } 1419 }
1420 break; 1420 break;
1421 1421
1422 case unwind_exception_id: 1422 case unwind_exception_id:
1423 { 1423 {