comparison src/cpu/x86/vm/sharedRuntime_x86_64.cpp @ 18041:52b4284cb496

Merge with jdk8u20-b26
author Gilles Duboscq <duboscq@ssw.jku.at>
date Wed, 15 Oct 2014 16:02:50 +0200
parents 89152779163c 0bf37f737702
children 33a783b15758
comparison
equal deleted inserted replaced
17606:45d7b2c7029d 18041:52b4284cb496
938 return AdapterHandlerLibrary::new_entry(fingerprint, i2c_entry, c2i_entry, c2i_unverified_entry); 938 return AdapterHandlerLibrary::new_entry(fingerprint, i2c_entry, c2i_entry, c2i_unverified_entry);
939 } 939 }
940 940
941 int SharedRuntime::c_calling_convention(const BasicType *sig_bt, 941 int SharedRuntime::c_calling_convention(const BasicType *sig_bt,
942 VMRegPair *regs, 942 VMRegPair *regs,
943 VMRegPair *regs2,
943 int total_args_passed) { 944 int total_args_passed) {
945 assert(regs2 == NULL, "not needed on x86");
944 // We return the amount of VMRegImpl stack slots we need to reserve for all 946 // We return the amount of VMRegImpl stack slots we need to reserve for all
945 // the arguments NOT counting out_preserve_stack_slots. 947 // the arguments NOT counting out_preserve_stack_slots.
946 948
947 // NOTE: These arrays will have to change when c1 is ported 949 // NOTE: These arrays will have to change when c1 is ported
948 #ifdef _WIN64 950 #ifdef _WIN64
1906 } 1908 }
1907 1909
1908 // Now figure out where the args must be stored and how much stack space 1910 // Now figure out where the args must be stored and how much stack space
1909 // they require. 1911 // they require.
1910 int out_arg_slots; 1912 int out_arg_slots;
1911 out_arg_slots = c_calling_convention(out_sig_bt, out_regs, total_c_args); 1913 out_arg_slots = c_calling_convention(out_sig_bt, out_regs, NULL, total_c_args);
1912 1914
1913 // Compute framesize for the wrapper. We need to handlize all oops in 1915 // Compute framesize for the wrapper. We need to handlize all oops in
1914 // incoming registers 1916 // incoming registers
1915 1917
1916 // Calculate the total number of stack slots we will need. 1918 // Calculate the total number of stack slots we will need.
2059 __ subptr(rsp, stack_size - 2*wordSize); 2061 __ subptr(rsp, stack_size - 2*wordSize);
2060 2062
2061 // Frame is now completed as far as size and linkage. 2063 // Frame is now completed as far as size and linkage.
2062 int frame_complete = ((intptr_t)__ pc()) - start; 2064 int frame_complete = ((intptr_t)__ pc()) - start;
2063 2065
2066 if (UseRTMLocking) {
2067 // Abort RTM transaction before calling JNI
2068 // because critical section will be large and will be
2069 // aborted anyway. Also nmethod could be deoptimized.
2070 __ xabort(0);
2071 }
2072
2064 #ifdef ASSERT 2073 #ifdef ASSERT
2065 { 2074 {
2066 Label L; 2075 Label L;
2067 __ mov(rax, rsp); 2076 __ mov(rax, rsp);
2068 __ andptr(rax, -16); // must be 16 byte boundary (see amd64 ABI) 2077 __ andptr(rax, -16); // must be 16 byte boundary (see amd64 ABI)
2549 } 2558 }
2550 2559
2551 if (!is_critical_native) { 2560 if (!is_critical_native) {
2552 // reset handle block 2561 // reset handle block
2553 __ movptr(rcx, Address(r15_thread, JavaThread::active_handles_offset())); 2562 __ movptr(rcx, Address(r15_thread, JavaThread::active_handles_offset()));
2554 __ movptr(Address(rcx, JNIHandleBlock::top_offset_in_bytes()), (int32_t)NULL_WORD); 2563 __ movl(Address(rcx, JNIHandleBlock::top_offset_in_bytes()), (int32_t)NULL_WORD);
2555 } 2564 }
2556 2565
2557 // pop our frame 2566 // pop our frame
2558 2567
2559 __ leave(); 2568 __ leave();
2810 // Now figure out where the args must be stored and how much stack space 2819 // Now figure out where the args must be stored and how much stack space
2811 // they require (neglecting out_preserve_stack_slots but space for storing 2820 // they require (neglecting out_preserve_stack_slots but space for storing
2812 // the 1st six register arguments). It's weird see int_stk_helper. 2821 // the 1st six register arguments). It's weird see int_stk_helper.
2813 2822
2814 int out_arg_slots; 2823 int out_arg_slots;
2815 out_arg_slots = c_calling_convention(out_sig_bt, out_regs, total_c_args); 2824 out_arg_slots = c_calling_convention(out_sig_bt, out_regs, NULL, total_c_args);
2816 2825
2817 // Calculate the total number of stack slots we will need. 2826 // Calculate the total number of stack slots we will need.
2818 2827
2819 // First count the abi requirement plus all of the outgoing args 2828 // First count the abi requirement plus all of the outgoing args
2820 int stack_slots = SharedRuntime::out_preserve_stack_slots() + out_arg_slots; 2829 int stack_slots = SharedRuntime::out_preserve_stack_slots() + out_arg_slots;
3556 3565
3557 // Pick up the initial fp we should save 3566 // Pick up the initial fp we should save
3558 // restore rbp before stack bang because if stack overflow is thrown it needs to be pushed (and preserved) 3567 // restore rbp before stack bang because if stack overflow is thrown it needs to be pushed (and preserved)
3559 __ movptr(rbp, Address(rdi, Deoptimization::UnrollBlock::initial_info_offset_in_bytes())); 3568 __ movptr(rbp, Address(rdi, Deoptimization::UnrollBlock::initial_info_offset_in_bytes()));
3560 3569
3561 // Stack bang to make sure there's enough room for these interpreter frames. 3570 #ifdef ASSERT
3571 // Compilers generate code that bang the stack by as much as the
3572 // interpreter would need. So this stack banging should never
3573 // trigger a fault. Verify that it does not on non product builds.
3562 if (UseStackBanging) { 3574 if (UseStackBanging) {
3563 __ movl(rbx, Address(rdi, Deoptimization::UnrollBlock::total_frame_sizes_offset_in_bytes())); 3575 __ movl(rbx, Address(rdi, Deoptimization::UnrollBlock::total_frame_sizes_offset_in_bytes()));
3564 __ bang_stack_size(rbx, rcx); 3576 __ bang_stack_size(rbx, rcx);
3565 } 3577 }
3578 #endif
3566 3579
3567 // Load address of array of frame pcs into rcx 3580 // Load address of array of frame pcs into rcx
3568 __ movptr(rcx, Address(rdi, Deoptimization::UnrollBlock::frame_pcs_offset_in_bytes())); 3581 __ movptr(rcx, Address(rdi, Deoptimization::UnrollBlock::frame_pcs_offset_in_bytes()));
3569 3582
3570 // Trash the old pc 3583 // Trash the old pc
3694 3707
3695 assert(SimpleRuntimeFrame::framesize % 4 == 0, "sp not 16-byte aligned"); 3708 assert(SimpleRuntimeFrame::framesize % 4 == 0, "sp not 16-byte aligned");
3696 3709
3697 address start = __ pc(); 3710 address start = __ pc();
3698 3711
3712 if (UseRTMLocking) {
3713 // Abort RTM transaction before possible nmethod deoptimization.
3714 __ xabort(0);
3715 }
3716
3699 // Push self-frame. We get here with a return address on the 3717 // Push self-frame. We get here with a return address on the
3700 // stack, so rsp is 8-byte aligned until we allocate our frame. 3718 // stack, so rsp is 8-byte aligned until we allocate our frame.
3701 __ subptr(rsp, SimpleRuntimeFrame::return_off << LogBytesPerInt); // Epilog! 3719 __ subptr(rsp, SimpleRuntimeFrame::return_off << LogBytesPerInt); // Epilog!
3702 3720
3703 // No callee saved registers. rbp is assumed implicitly saved 3721 // No callee saved registers. rbp is assumed implicitly saved
3752 3770
3753 // Pick up the initial fp we should save 3771 // Pick up the initial fp we should save
3754 // restore rbp before stack bang because if stack overflow is thrown it needs to be pushed (and preserved) 3772 // restore rbp before stack bang because if stack overflow is thrown it needs to be pushed (and preserved)
3755 __ movptr(rbp, Address(rdi, Deoptimization::UnrollBlock::initial_info_offset_in_bytes())); 3773 __ movptr(rbp, Address(rdi, Deoptimization::UnrollBlock::initial_info_offset_in_bytes()));
3756 3774
3757 // Stack bang to make sure there's enough room for these interpreter frames. 3775 #ifdef ASSERT
3776 // Compilers generate code that bang the stack by as much as the
3777 // interpreter would need. So this stack banging should never
3778 // trigger a fault. Verify that it does not on non product builds.
3758 if (UseStackBanging) { 3779 if (UseStackBanging) {
3759 __ movl(rbx, Address(rdi ,Deoptimization::UnrollBlock::total_frame_sizes_offset_in_bytes())); 3780 __ movl(rbx, Address(rdi ,Deoptimization::UnrollBlock::total_frame_sizes_offset_in_bytes()));
3760 __ bang_stack_size(rbx, rcx); 3781 __ bang_stack_size(rbx, rcx);
3761 } 3782 }
3783 #endif
3762 3784
3763 // Load address of array of frame pcs into rcx (address*) 3785 // Load address of array of frame pcs into rcx (address*)
3764 __ movptr(rcx, Address(rdi, Deoptimization::UnrollBlock::frame_pcs_offset_in_bytes())); 3786 __ movptr(rcx, Address(rdi, Deoptimization::UnrollBlock::frame_pcs_offset_in_bytes()));
3765 3787
3766 // Trash the return pc 3788 // Trash the return pc
3873 address start = __ pc(); 3895 address start = __ pc();
3874 address call_pc = NULL; 3896 address call_pc = NULL;
3875 int frame_size_in_words; 3897 int frame_size_in_words;
3876 bool cause_return = (poll_type == POLL_AT_RETURN); 3898 bool cause_return = (poll_type == POLL_AT_RETURN);
3877 bool save_vectors = (poll_type == POLL_AT_VECTOR_LOOP); 3899 bool save_vectors = (poll_type == POLL_AT_VECTOR_LOOP);
3900
3901 if (UseRTMLocking) {
3902 // Abort RTM transaction before calling runtime
3903 // because critical section will be large and will be
3904 // aborted anyway. Also nmethod could be deoptimized.
3905 __ xabort(0);
3906 }
3878 3907
3879 // Make room for return address (or push it again) 3908 // Make room for return address (or push it again)
3880 if (!cause_return) { 3909 if (!cause_return) {
3881 __ push(rbx); 3910 __ push(rbx);
3882 } 3911 }