comparison src/cpu/x86/vm/sharedRuntime_x86_32.cpp @ 0:a61af66fc99e jdk7-b24

Initial load
author duke
date Sat, 01 Dec 2007 00:00:00 +0000
parents
children 018d5b58dd4f
comparison
equal deleted inserted replaced
-1:000000000000 0:a61af66fc99e
1 /*
2 * Copyright 2003-2007 Sun Microsystems, Inc. All Rights Reserved.
3 * DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER.
4 *
5 * This code is free software; you can redistribute it and/or modify it
6 * under the terms of the GNU General Public License version 2 only, as
7 * published by the Free Software Foundation.
8 *
9 * This code is distributed in the hope that it will be useful, but WITHOUT
10 * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
11 * FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License
12 * version 2 for more details (a copy is included in the LICENSE file that
13 * accompanied this code).
14 *
15 * You should have received a copy of the GNU General Public License version
16 * 2 along with this work; if not, write to the Free Software Foundation,
17 * Inc., 51 Franklin St, Fifth Floor, Boston, MA 02110-1301 USA.
18 *
19 * Please contact Sun Microsystems, Inc., 4150 Network Circle, Santa Clara,
20 * CA 95054 USA or visit www.sun.com if you need additional information or
21 * have any questions.
22 *
23 */
24
25 #include "incls/_precompiled.incl"
26 #include "incls/_sharedRuntime_x86_32.cpp.incl"
27
28 #define __ masm->
29 #ifdef COMPILER2
30 UncommonTrapBlob *SharedRuntime::_uncommon_trap_blob;
31 #endif // COMPILER2
32
33 DeoptimizationBlob *SharedRuntime::_deopt_blob;
34 SafepointBlob *SharedRuntime::_polling_page_safepoint_handler_blob;
35 SafepointBlob *SharedRuntime::_polling_page_return_handler_blob;
36 RuntimeStub* SharedRuntime::_wrong_method_blob;
37 RuntimeStub* SharedRuntime::_ic_miss_blob;
38 RuntimeStub* SharedRuntime::_resolve_opt_virtual_call_blob;
39 RuntimeStub* SharedRuntime::_resolve_virtual_call_blob;
40 RuntimeStub* SharedRuntime::_resolve_static_call_blob;
41
42 class RegisterSaver {
43 enum { FPU_regs_live = 8 /*for the FPU stack*/+8/*eight more for XMM registers*/ };
44 // Capture info about frame layout
45 enum layout {
46 fpu_state_off = 0,
47 fpu_state_end = fpu_state_off+FPUStateSizeInWords-1,
48 st0_off, st0H_off,
49 st1_off, st1H_off,
50 st2_off, st2H_off,
51 st3_off, st3H_off,
52 st4_off, st4H_off,
53 st5_off, st5H_off,
54 st6_off, st6H_off,
55 st7_off, st7H_off,
56
57 xmm0_off, xmm0H_off,
58 xmm1_off, xmm1H_off,
59 xmm2_off, xmm2H_off,
60 xmm3_off, xmm3H_off,
61 xmm4_off, xmm4H_off,
62 xmm5_off, xmm5H_off,
63 xmm6_off, xmm6H_off,
64 xmm7_off, xmm7H_off,
65 flags_off,
66 rdi_off,
67 rsi_off,
68 ignore_off, // extra copy of rbp,
69 rsp_off,
70 rbx_off,
71 rdx_off,
72 rcx_off,
73 rax_off,
74 // The frame sender code expects that rbp will be in the "natural" place and
75 // will override any oopMap setting for it. We must therefore force the layout
76 // so that it agrees with the frame sender code.
77 rbp_off,
78 return_off, // slot for return address
79 reg_save_size };
80
81
82 public:
83
84 static OopMap* save_live_registers(MacroAssembler* masm, int additional_frame_words,
85 int* total_frame_words, bool verify_fpu = true);
86 static void restore_live_registers(MacroAssembler* masm);
87
88 static int rax_offset() { return rax_off; }
89 static int rbx_offset() { return rbx_off; }
90
91 // Offsets into the register save area
92 // Used by deoptimization when it is managing result register
93 // values on its own
94
95 static int raxOffset(void) { return rax_off; }
96 static int rdxOffset(void) { return rdx_off; }
97 static int rbxOffset(void) { return rbx_off; }
98 static int xmm0Offset(void) { return xmm0_off; }
99 // This really returns a slot in the fp save area, which one is not important
100 static int fpResultOffset(void) { return st0_off; }
101
102 // During deoptimization only the result register need to be restored
103 // all the other values have already been extracted.
104
105 static void restore_result_registers(MacroAssembler* masm);
106
107 };
108
109 OopMap* RegisterSaver::save_live_registers(MacroAssembler* masm, int additional_frame_words,
110 int* total_frame_words, bool verify_fpu) {
111
112 int frame_size_in_bytes = (reg_save_size + additional_frame_words) * wordSize;
113 int frame_words = frame_size_in_bytes / wordSize;
114 *total_frame_words = frame_words;
115
116 assert(FPUStateSizeInWords == 27, "update stack layout");
117
118 // save registers, fpu state, and flags
119 // We assume caller has already has return address slot on the stack
120 // We push epb twice in this sequence because we want the real rbp,
121 // to be under the return like a normal enter and we want to use pushad
122 // We push by hand instead of pusing push
123 __ enter();
124 __ pushad();
125 __ pushfd();
126 __ subl(rsp,FPU_regs_live*sizeof(jdouble)); // Push FPU registers space
127 __ push_FPU_state(); // Save FPU state & init
128
129 if (verify_fpu) {
130 // Some stubs may have non standard FPU control word settings so
131 // only check and reset the value when it required to be the
132 // standard value. The safepoint blob in particular can be used
133 // in methods which are using the 24 bit control word for
134 // optimized float math.
135
136 #ifdef ASSERT
137 // Make sure the control word has the expected value
138 Label ok;
139 __ cmpw(Address(rsp, 0), StubRoutines::fpu_cntrl_wrd_std());
140 __ jccb(Assembler::equal, ok);
141 __ stop("corrupted control word detected");
142 __ bind(ok);
143 #endif
144
145 // Reset the control word to guard against exceptions being unmasked
146 // since fstp_d can cause FPU stack underflow exceptions. Write it
147 // into the on stack copy and then reload that to make sure that the
148 // current and future values are correct.
149 __ movw(Address(rsp, 0), StubRoutines::fpu_cntrl_wrd_std());
150 }
151
152 __ frstor(Address(rsp, 0));
153 if (!verify_fpu) {
154 // Set the control word so that exceptions are masked for the
155 // following code.
156 __ fldcw(ExternalAddress(StubRoutines::addr_fpu_cntrl_wrd_std()));
157 }
158
159 // Save the FPU registers in de-opt-able form
160
161 __ fstp_d(Address(rsp, st0_off*wordSize)); // st(0)
162 __ fstp_d(Address(rsp, st1_off*wordSize)); // st(1)
163 __ fstp_d(Address(rsp, st2_off*wordSize)); // st(2)
164 __ fstp_d(Address(rsp, st3_off*wordSize)); // st(3)
165 __ fstp_d(Address(rsp, st4_off*wordSize)); // st(4)
166 __ fstp_d(Address(rsp, st5_off*wordSize)); // st(5)
167 __ fstp_d(Address(rsp, st6_off*wordSize)); // st(6)
168 __ fstp_d(Address(rsp, st7_off*wordSize)); // st(7)
169
170 if( UseSSE == 1 ) { // Save the XMM state
171 __ movflt(Address(rsp,xmm0_off*wordSize),xmm0);
172 __ movflt(Address(rsp,xmm1_off*wordSize),xmm1);
173 __ movflt(Address(rsp,xmm2_off*wordSize),xmm2);
174 __ movflt(Address(rsp,xmm3_off*wordSize),xmm3);
175 __ movflt(Address(rsp,xmm4_off*wordSize),xmm4);
176 __ movflt(Address(rsp,xmm5_off*wordSize),xmm5);
177 __ movflt(Address(rsp,xmm6_off*wordSize),xmm6);
178 __ movflt(Address(rsp,xmm7_off*wordSize),xmm7);
179 } else if( UseSSE >= 2 ) {
180 __ movdbl(Address(rsp,xmm0_off*wordSize),xmm0);
181 __ movdbl(Address(rsp,xmm1_off*wordSize),xmm1);
182 __ movdbl(Address(rsp,xmm2_off*wordSize),xmm2);
183 __ movdbl(Address(rsp,xmm3_off*wordSize),xmm3);
184 __ movdbl(Address(rsp,xmm4_off*wordSize),xmm4);
185 __ movdbl(Address(rsp,xmm5_off*wordSize),xmm5);
186 __ movdbl(Address(rsp,xmm6_off*wordSize),xmm6);
187 __ movdbl(Address(rsp,xmm7_off*wordSize),xmm7);
188 }
189
190 // Set an oopmap for the call site. This oopmap will map all
191 // oop-registers and debug-info registers as callee-saved. This
192 // will allow deoptimization at this safepoint to find all possible
193 // debug-info recordings, as well as let GC find all oops.
194
195 OopMapSet *oop_maps = new OopMapSet();
196 OopMap* map = new OopMap( frame_words, 0 );
197
198 #define STACK_OFFSET(x) VMRegImpl::stack2reg((x) + additional_frame_words)
199
200 map->set_callee_saved(STACK_OFFSET( rax_off), rax->as_VMReg());
201 map->set_callee_saved(STACK_OFFSET( rcx_off), rcx->as_VMReg());
202 map->set_callee_saved(STACK_OFFSET( rdx_off), rdx->as_VMReg());
203 map->set_callee_saved(STACK_OFFSET( rbx_off), rbx->as_VMReg());
204 // rbp, location is known implicitly, no oopMap
205 map->set_callee_saved(STACK_OFFSET( rsi_off), rsi->as_VMReg());
206 map->set_callee_saved(STACK_OFFSET( rdi_off), rdi->as_VMReg());
207 map->set_callee_saved(STACK_OFFSET(st0_off), as_FloatRegister(0)->as_VMReg());
208 map->set_callee_saved(STACK_OFFSET(st1_off), as_FloatRegister(1)->as_VMReg());
209 map->set_callee_saved(STACK_OFFSET(st2_off), as_FloatRegister(2)->as_VMReg());
210 map->set_callee_saved(STACK_OFFSET(st3_off), as_FloatRegister(3)->as_VMReg());
211 map->set_callee_saved(STACK_OFFSET(st4_off), as_FloatRegister(4)->as_VMReg());
212 map->set_callee_saved(STACK_OFFSET(st5_off), as_FloatRegister(5)->as_VMReg());
213 map->set_callee_saved(STACK_OFFSET(st6_off), as_FloatRegister(6)->as_VMReg());
214 map->set_callee_saved(STACK_OFFSET(st7_off), as_FloatRegister(7)->as_VMReg());
215 map->set_callee_saved(STACK_OFFSET(xmm0_off), xmm0->as_VMReg());
216 map->set_callee_saved(STACK_OFFSET(xmm1_off), xmm1->as_VMReg());
217 map->set_callee_saved(STACK_OFFSET(xmm2_off), xmm2->as_VMReg());
218 map->set_callee_saved(STACK_OFFSET(xmm3_off), xmm3->as_VMReg());
219 map->set_callee_saved(STACK_OFFSET(xmm4_off), xmm4->as_VMReg());
220 map->set_callee_saved(STACK_OFFSET(xmm5_off), xmm5->as_VMReg());
221 map->set_callee_saved(STACK_OFFSET(xmm6_off), xmm6->as_VMReg());
222 map->set_callee_saved(STACK_OFFSET(xmm7_off), xmm7->as_VMReg());
223 // %%% This is really a waste but we'll keep things as they were for now
224 if (true) {
225 #define NEXTREG(x) (x)->as_VMReg()->next()
226 map->set_callee_saved(STACK_OFFSET(st0H_off), NEXTREG(as_FloatRegister(0)));
227 map->set_callee_saved(STACK_OFFSET(st1H_off), NEXTREG(as_FloatRegister(1)));
228 map->set_callee_saved(STACK_OFFSET(st2H_off), NEXTREG(as_FloatRegister(2)));
229 map->set_callee_saved(STACK_OFFSET(st3H_off), NEXTREG(as_FloatRegister(3)));
230 map->set_callee_saved(STACK_OFFSET(st4H_off), NEXTREG(as_FloatRegister(4)));
231 map->set_callee_saved(STACK_OFFSET(st5H_off), NEXTREG(as_FloatRegister(5)));
232 map->set_callee_saved(STACK_OFFSET(st6H_off), NEXTREG(as_FloatRegister(6)));
233 map->set_callee_saved(STACK_OFFSET(st7H_off), NEXTREG(as_FloatRegister(7)));
234 map->set_callee_saved(STACK_OFFSET(xmm0H_off), NEXTREG(xmm0));
235 map->set_callee_saved(STACK_OFFSET(xmm1H_off), NEXTREG(xmm1));
236 map->set_callee_saved(STACK_OFFSET(xmm2H_off), NEXTREG(xmm2));
237 map->set_callee_saved(STACK_OFFSET(xmm3H_off), NEXTREG(xmm3));
238 map->set_callee_saved(STACK_OFFSET(xmm4H_off), NEXTREG(xmm4));
239 map->set_callee_saved(STACK_OFFSET(xmm5H_off), NEXTREG(xmm5));
240 map->set_callee_saved(STACK_OFFSET(xmm6H_off), NEXTREG(xmm6));
241 map->set_callee_saved(STACK_OFFSET(xmm7H_off), NEXTREG(xmm7));
242 #undef NEXTREG
243 #undef STACK_OFFSET
244 }
245
246 return map;
247
248 }
249
250 void RegisterSaver::restore_live_registers(MacroAssembler* masm) {
251
252 // Recover XMM & FPU state
253 if( UseSSE == 1 ) {
254 __ movflt(xmm0,Address(rsp,xmm0_off*wordSize));
255 __ movflt(xmm1,Address(rsp,xmm1_off*wordSize));
256 __ movflt(xmm2,Address(rsp,xmm2_off*wordSize));
257 __ movflt(xmm3,Address(rsp,xmm3_off*wordSize));
258 __ movflt(xmm4,Address(rsp,xmm4_off*wordSize));
259 __ movflt(xmm5,Address(rsp,xmm5_off*wordSize));
260 __ movflt(xmm6,Address(rsp,xmm6_off*wordSize));
261 __ movflt(xmm7,Address(rsp,xmm7_off*wordSize));
262 } else if( UseSSE >= 2 ) {
263 __ movdbl(xmm0,Address(rsp,xmm0_off*wordSize));
264 __ movdbl(xmm1,Address(rsp,xmm1_off*wordSize));
265 __ movdbl(xmm2,Address(rsp,xmm2_off*wordSize));
266 __ movdbl(xmm3,Address(rsp,xmm3_off*wordSize));
267 __ movdbl(xmm4,Address(rsp,xmm4_off*wordSize));
268 __ movdbl(xmm5,Address(rsp,xmm5_off*wordSize));
269 __ movdbl(xmm6,Address(rsp,xmm6_off*wordSize));
270 __ movdbl(xmm7,Address(rsp,xmm7_off*wordSize));
271 }
272 __ pop_FPU_state();
273 __ addl(rsp,FPU_regs_live*sizeof(jdouble)); // Pop FPU registers
274
275 __ popfd();
276 __ popad();
277 // Get the rbp, described implicitly by the frame sender code (no oopMap)
278 __ popl(rbp);
279
280 }
281
282 void RegisterSaver::restore_result_registers(MacroAssembler* masm) {
283
284 // Just restore result register. Only used by deoptimization. By
285 // now any callee save register that needs to be restore to a c2
286 // caller of the deoptee has been extracted into the vframeArray
287 // and will be stuffed into the c2i adapter we create for later
288 // restoration so only result registers need to be restored here.
289 //
290
291 __ frstor(Address(rsp, 0)); // Restore fpu state
292
293 // Recover XMM & FPU state
294 if( UseSSE == 1 ) {
295 __ movflt(xmm0, Address(rsp, xmm0_off*wordSize));
296 } else if( UseSSE >= 2 ) {
297 __ movdbl(xmm0, Address(rsp, xmm0_off*wordSize));
298 }
299 __ movl(rax, Address(rsp, rax_off*wordSize));
300 __ movl(rdx, Address(rsp, rdx_off*wordSize));
301 // Pop all of the register save are off the stack except the return address
302 __ addl(rsp, return_off * wordSize);
303 }
304
305 // The java_calling_convention describes stack locations as ideal slots on
306 // a frame with no abi restrictions. Since we must observe abi restrictions
307 // (like the placement of the register window) the slots must be biased by
308 // the following value.
309 static int reg2offset_in(VMReg r) {
310 // Account for saved rbp, and return address
311 // This should really be in_preserve_stack_slots
312 return (r->reg2stack() + 2) * VMRegImpl::stack_slot_size;
313 }
314
315 static int reg2offset_out(VMReg r) {
316 return (r->reg2stack() + SharedRuntime::out_preserve_stack_slots()) * VMRegImpl::stack_slot_size;
317 }
318
319 // ---------------------------------------------------------------------------
320 // Read the array of BasicTypes from a signature, and compute where the
321 // arguments should go. Values in the VMRegPair regs array refer to 4-byte
322 // quantities. Values less than SharedInfo::stack0 are registers, those above
323 // refer to 4-byte stack slots. All stack slots are based off of the stack pointer
324 // as framesizes are fixed.
325 // VMRegImpl::stack0 refers to the first slot 0(sp).
326 // and VMRegImpl::stack0+1 refers to the memory word 4-byes higher. Register
327 // up to RegisterImpl::number_of_registers) are the 32-bit
328 // integer registers.
329
330 // Pass first two oop/int args in registers ECX and EDX.
331 // Pass first two float/double args in registers XMM0 and XMM1.
332 // Doubles have precedence, so if you pass a mix of floats and doubles
333 // the doubles will grab the registers before the floats will.
334
335 // Note: the INPUTS in sig_bt are in units of Java argument words, which are
336 // either 32-bit or 64-bit depending on the build. The OUTPUTS are in 32-bit
337 // units regardless of build. Of course for i486 there is no 64 bit build
338
339
340 // ---------------------------------------------------------------------------
341 // The compiled Java calling convention.
342 // Pass first two oop/int args in registers ECX and EDX.
343 // Pass first two float/double args in registers XMM0 and XMM1.
344 // Doubles have precedence, so if you pass a mix of floats and doubles
345 // the doubles will grab the registers before the floats will.
346 int SharedRuntime::java_calling_convention(const BasicType *sig_bt,
347 VMRegPair *regs,
348 int total_args_passed,
349 int is_outgoing) {
350 uint stack = 0; // Starting stack position for args on stack
351
352
353 // Pass first two oop/int args in registers ECX and EDX.
354 uint reg_arg0 = 9999;
355 uint reg_arg1 = 9999;
356
357 // Pass first two float/double args in registers XMM0 and XMM1.
358 // Doubles have precedence, so if you pass a mix of floats and doubles
359 // the doubles will grab the registers before the floats will.
360 // CNC - TURNED OFF FOR non-SSE.
361 // On Intel we have to round all doubles (and most floats) at
362 // call sites by storing to the stack in any case.
363 // UseSSE=0 ==> Don't Use ==> 9999+0
364 // UseSSE=1 ==> Floats only ==> 9999+1
365 // UseSSE>=2 ==> Floats or doubles ==> 9999+2
366 enum { fltarg_dontuse = 9999+0, fltarg_float_only = 9999+1, fltarg_flt_dbl = 9999+2 };
367 uint fargs = (UseSSE>=2) ? 2 : UseSSE;
368 uint freg_arg0 = 9999+fargs;
369 uint freg_arg1 = 9999+fargs;
370
371 // Pass doubles & longs aligned on the stack. First count stack slots for doubles
372 int i;
373 for( i = 0; i < total_args_passed; i++) {
374 if( sig_bt[i] == T_DOUBLE ) {
375 // first 2 doubles go in registers
376 if( freg_arg0 == fltarg_flt_dbl ) freg_arg0 = i;
377 else if( freg_arg1 == fltarg_flt_dbl ) freg_arg1 = i;
378 else // Else double is passed low on the stack to be aligned.
379 stack += 2;
380 } else if( sig_bt[i] == T_LONG ) {
381 stack += 2;
382 }
383 }
384 int dstack = 0; // Separate counter for placing doubles
385
386 // Now pick where all else goes.
387 for( i = 0; i < total_args_passed; i++) {
388 // From the type and the argument number (count) compute the location
389 switch( sig_bt[i] ) {
390 case T_SHORT:
391 case T_CHAR:
392 case T_BYTE:
393 case T_BOOLEAN:
394 case T_INT:
395 case T_ARRAY:
396 case T_OBJECT:
397 case T_ADDRESS:
398 if( reg_arg0 == 9999 ) {
399 reg_arg0 = i;
400 regs[i].set1(rcx->as_VMReg());
401 } else if( reg_arg1 == 9999 ) {
402 reg_arg1 = i;
403 regs[i].set1(rdx->as_VMReg());
404 } else {
405 regs[i].set1(VMRegImpl::stack2reg(stack++));
406 }
407 break;
408 case T_FLOAT:
409 if( freg_arg0 == fltarg_flt_dbl || freg_arg0 == fltarg_float_only ) {
410 freg_arg0 = i;
411 regs[i].set1(xmm0->as_VMReg());
412 } else if( freg_arg1 == fltarg_flt_dbl || freg_arg1 == fltarg_float_only ) {
413 freg_arg1 = i;
414 regs[i].set1(xmm1->as_VMReg());
415 } else {
416 regs[i].set1(VMRegImpl::stack2reg(stack++));
417 }
418 break;
419 case T_LONG:
420 assert(sig_bt[i+1] == T_VOID, "missing Half" );
421 regs[i].set2(VMRegImpl::stack2reg(dstack));
422 dstack += 2;
423 break;
424 case T_DOUBLE:
425 assert(sig_bt[i+1] == T_VOID, "missing Half" );
426 if( freg_arg0 == (uint)i ) {
427 regs[i].set2(xmm0->as_VMReg());
428 } else if( freg_arg1 == (uint)i ) {
429 regs[i].set2(xmm1->as_VMReg());
430 } else {
431 regs[i].set2(VMRegImpl::stack2reg(dstack));
432 dstack += 2;
433 }
434 break;
435 case T_VOID: regs[i].set_bad(); break;
436 break;
437 default:
438 ShouldNotReachHere();
439 break;
440 }
441 }
442
443 // return value can be odd number of VMRegImpl stack slots make multiple of 2
444 return round_to(stack, 2);
445 }
446
447 // Patch the callers callsite with entry to compiled code if it exists.
448 static void patch_callers_callsite(MacroAssembler *masm) {
449 Label L;
450 __ verify_oop(rbx);
451 __ cmpl(Address(rbx, in_bytes(methodOopDesc::code_offset())), NULL_WORD);
452 __ jcc(Assembler::equal, L);
453 // Schedule the branch target address early.
454 // Call into the VM to patch the caller, then jump to compiled callee
455 // rax, isn't live so capture return address while we easily can
456 __ movl(rax, Address(rsp, 0));
457 __ pushad();
458 __ pushfd();
459
460 if (UseSSE == 1) {
461 __ subl(rsp, 2*wordSize);
462 __ movflt(Address(rsp, 0), xmm0);
463 __ movflt(Address(rsp, wordSize), xmm1);
464 }
465 if (UseSSE >= 2) {
466 __ subl(rsp, 4*wordSize);
467 __ movdbl(Address(rsp, 0), xmm0);
468 __ movdbl(Address(rsp, 2*wordSize), xmm1);
469 }
470 #ifdef COMPILER2
471 // C2 may leave the stack dirty if not in SSE2+ mode
472 if (UseSSE >= 2) {
473 __ verify_FPU(0, "c2i transition should have clean FPU stack");
474 } else {
475 __ empty_FPU_stack();
476 }
477 #endif /* COMPILER2 */
478
479 // VM needs caller's callsite
480 __ pushl(rax);
481 // VM needs target method
482 __ pushl(rbx);
483 __ verify_oop(rbx);
484 __ call(RuntimeAddress(CAST_FROM_FN_PTR(address, SharedRuntime::fixup_callers_callsite)));
485 __ addl(rsp, 2*wordSize);
486
487 if (UseSSE == 1) {
488 __ movflt(xmm0, Address(rsp, 0));
489 __ movflt(xmm1, Address(rsp, wordSize));
490 __ addl(rsp, 2*wordSize);
491 }
492 if (UseSSE >= 2) {
493 __ movdbl(xmm0, Address(rsp, 0));
494 __ movdbl(xmm1, Address(rsp, 2*wordSize));
495 __ addl(rsp, 4*wordSize);
496 }
497
498 __ popfd();
499 __ popad();
500 __ bind(L);
501 }
502
503
504 // Helper function to put tags in interpreter stack.
505 static void tag_stack(MacroAssembler *masm, const BasicType sig, int st_off) {
506 if (TaggedStackInterpreter) {
507 int tag_offset = st_off + Interpreter::expr_tag_offset_in_bytes(0);
508 if (sig == T_OBJECT || sig == T_ARRAY) {
509 __ movl(Address(rsp, tag_offset), frame::TagReference);
510 } else if (sig == T_LONG || sig == T_DOUBLE) {
511 int next_tag_offset = st_off + Interpreter::expr_tag_offset_in_bytes(1);
512 __ movl(Address(rsp, next_tag_offset), frame::TagValue);
513 __ movl(Address(rsp, tag_offset), frame::TagValue);
514 } else {
515 __ movl(Address(rsp, tag_offset), frame::TagValue);
516 }
517 }
518 }
519
520 // Double and long values with Tagged stacks are not contiguous.
521 static void move_c2i_double(MacroAssembler *masm, XMMRegister r, int st_off) {
522 int next_off = st_off - Interpreter::stackElementSize();
523 if (TaggedStackInterpreter) {
524 __ movdbl(Address(rsp, next_off), r);
525 // Move top half up and put tag in the middle.
526 __ movl(rdi, Address(rsp, next_off+wordSize));
527 __ movl(Address(rsp, st_off), rdi);
528 tag_stack(masm, T_DOUBLE, next_off);
529 } else {
530 __ movdbl(Address(rsp, next_off), r);
531 }
532 }
533
534 static void gen_c2i_adapter(MacroAssembler *masm,
535 int total_args_passed,
536 int comp_args_on_stack,
537 const BasicType *sig_bt,
538 const VMRegPair *regs,
539 Label& skip_fixup) {
540 // Before we get into the guts of the C2I adapter, see if we should be here
541 // at all. We've come from compiled code and are attempting to jump to the
542 // interpreter, which means the caller made a static call to get here
543 // (vcalls always get a compiled target if there is one). Check for a
544 // compiled target. If there is one, we need to patch the caller's call.
545 patch_callers_callsite(masm);
546
547 __ bind(skip_fixup);
548
549 #ifdef COMPILER2
550 // C2 may leave the stack dirty if not in SSE2+ mode
551 if (UseSSE >= 2) {
552 __ verify_FPU(0, "c2i transition should have clean FPU stack");
553 } else {
554 __ empty_FPU_stack();
555 }
556 #endif /* COMPILER2 */
557
558 // Since all args are passed on the stack, total_args_passed * interpreter_
559 // stack_element_size is the
560 // space we need.
561 int extraspace = total_args_passed * Interpreter::stackElementSize();
562
563 // Get return address
564 __ popl(rax);
565
566 // set senderSP value
567 __ movl(rsi, rsp);
568
569 __ subl(rsp, extraspace);
570
571 // Now write the args into the outgoing interpreter space
572 for (int i = 0; i < total_args_passed; i++) {
573 if (sig_bt[i] == T_VOID) {
574 assert(i > 0 && (sig_bt[i-1] == T_LONG || sig_bt[i-1] == T_DOUBLE), "missing half");
575 continue;
576 }
577
578 // st_off points to lowest address on stack.
579 int st_off = ((total_args_passed - 1) - i) * Interpreter::stackElementSize();
580 // Say 4 args:
581 // i st_off
582 // 0 12 T_LONG
583 // 1 8 T_VOID
584 // 2 4 T_OBJECT
585 // 3 0 T_BOOL
586 VMReg r_1 = regs[i].first();
587 VMReg r_2 = regs[i].second();
588 if (!r_1->is_valid()) {
589 assert(!r_2->is_valid(), "");
590 continue;
591 }
592
593 if (r_1->is_stack()) {
594 // memory to memory use fpu stack top
595 int ld_off = r_1->reg2stack() * VMRegImpl::stack_slot_size + extraspace;
596
597 if (!r_2->is_valid()) {
598 __ movl(rdi, Address(rsp, ld_off));
599 __ movl(Address(rsp, st_off), rdi);
600 tag_stack(masm, sig_bt[i], st_off);
601 } else {
602
603 // ld_off == LSW, ld_off+VMRegImpl::stack_slot_size == MSW
604 // st_off == MSW, st_off-wordSize == LSW
605
606 int next_off = st_off - Interpreter::stackElementSize();
607 __ movl(rdi, Address(rsp, ld_off));
608 __ movl(Address(rsp, next_off), rdi);
609 __ movl(rdi, Address(rsp, ld_off + wordSize));
610 __ movl(Address(rsp, st_off), rdi);
611 tag_stack(masm, sig_bt[i], next_off);
612 }
613 } else if (r_1->is_Register()) {
614 Register r = r_1->as_Register();
615 if (!r_2->is_valid()) {
616 __ movl(Address(rsp, st_off), r);
617 tag_stack(masm, sig_bt[i], st_off);
618 } else {
619 // long/double in gpr
620 ShouldNotReachHere();
621 }
622 } else {
623 assert(r_1->is_XMMRegister(), "");
624 if (!r_2->is_valid()) {
625 __ movflt(Address(rsp, st_off), r_1->as_XMMRegister());
626 tag_stack(masm, sig_bt[i], st_off);
627 } else {
628 assert(sig_bt[i] == T_DOUBLE || sig_bt[i] == T_LONG, "wrong type");
629 move_c2i_double(masm, r_1->as_XMMRegister(), st_off);
630 }
631 }
632 }
633
634 // Schedule the branch target address early.
635 __ movl(rcx, Address(rbx, in_bytes(methodOopDesc::interpreter_entry_offset())));
636 // And repush original return address
637 __ pushl(rax);
638 __ jmp(rcx);
639 }
640
641
642 // For tagged stacks, double or long value aren't contiguous on the stack
643 // so get them contiguous for the xmm load
644 static void move_i2c_double(MacroAssembler *masm, XMMRegister r, Register saved_sp, int ld_off) {
645 int next_val_off = ld_off - Interpreter::stackElementSize();
646 if (TaggedStackInterpreter) {
647 // use tag slot temporarily for MSW
648 __ movl(rsi, Address(saved_sp, ld_off));
649 __ movl(Address(saved_sp, next_val_off+wordSize), rsi);
650 __ movdbl(r, Address(saved_sp, next_val_off));
651 // restore tag
652 __ movl(Address(saved_sp, next_val_off+wordSize), frame::TagValue);
653 } else {
654 __ movdbl(r, Address(saved_sp, next_val_off));
655 }
656 }
657
658 static void gen_i2c_adapter(MacroAssembler *masm,
659 int total_args_passed,
660 int comp_args_on_stack,
661 const BasicType *sig_bt,
662 const VMRegPair *regs) {
663 // we're being called from the interpreter but need to find the
664 // compiled return entry point. The return address on the stack
665 // should point at it and we just need to pull the old value out.
666 // load up the pointer to the compiled return entry point and
667 // rewrite our return pc. The code is arranged like so:
668 //
669 // .word Interpreter::return_sentinel
670 // .word address_of_compiled_return_point
671 // return_entry_point: blah_blah_blah
672 //
673 // So we can find the appropriate return point by loading up the word
674 // just prior to the current return address we have on the stack.
675 //
676 // We will only enter here from an interpreted frame and never from after
677 // passing thru a c2i. Azul allowed this but we do not. If we lose the
678 // race and use a c2i we will remain interpreted for the race loser(s).
679 // This removes all sorts of headaches on the x86 side and also eliminates
680 // the possibility of having c2i -> i2c -> c2i -> ... endless transitions.
681
682
683 // Note: rsi contains the senderSP on entry. We must preserve it since
684 // we may do a i2c -> c2i transition if we lose a race where compiled
685 // code goes non-entrant while we get args ready.
686
687 // Pick up the return address
688 __ movl(rax, Address(rsp, 0));
689
690 // If UseSSE >= 2 then no cleanup is needed on the return to the
691 // interpreter so skip fixing up the return entry point unless
692 // VerifyFPU is enabled.
693 if (UseSSE < 2 || VerifyFPU) {
694 Label skip, chk_int;
695 // If we were called from the call stub we need to do a little bit different
696 // cleanup than if the interpreter returned to the call stub.
697
698 ExternalAddress stub_return_address(StubRoutines::_call_stub_return_address);
699 __ cmp32(rax, stub_return_address.addr());
700 __ jcc(Assembler::notEqual, chk_int);
701 assert(StubRoutines::i486::get_call_stub_compiled_return() != NULL, "must be set");
702 __ lea(rax, ExternalAddress(StubRoutines::i486::get_call_stub_compiled_return()));
703 __ jmp(skip);
704
705 // It must be the interpreter since we never get here via a c2i (unlike Azul)
706
707 __ bind(chk_int);
708 #ifdef ASSERT
709 {
710 Label ok;
711 __ cmpl(Address(rax, -8), Interpreter::return_sentinel);
712 __ jcc(Assembler::equal, ok);
713 __ int3();
714 __ bind(ok);
715 }
716 #endif // ASSERT
717 __ movl(rax, Address(rax, -4));
718 __ bind(skip);
719 }
720
721 // rax, now contains the compiled return entry point which will do an
722 // cleanup needed for the return from compiled to interpreted.
723
724 // Must preserve original SP for loading incoming arguments because
725 // we need to align the outgoing SP for compiled code.
726 __ movl(rdi, rsp);
727
728 // Cut-out for having no stack args. Since up to 2 int/oop args are passed
729 // in registers, we will occasionally have no stack args.
730 int comp_words_on_stack = 0;
731 if (comp_args_on_stack) {
732 // Sig words on the stack are greater-than VMRegImpl::stack0. Those in
733 // registers are below. By subtracting stack0, we either get a negative
734 // number (all values in registers) or the maximum stack slot accessed.
735 // int comp_args_on_stack = VMRegImpl::reg2stack(max_arg);
736 // Convert 4-byte stack slots to words.
737 comp_words_on_stack = round_to(comp_args_on_stack*4, wordSize)>>LogBytesPerWord;
738 // Round up to miminum stack alignment, in wordSize
739 comp_words_on_stack = round_to(comp_words_on_stack, 2);
740 __ subl(rsp, comp_words_on_stack * wordSize);
741 }
742
743 // Align the outgoing SP
744 __ andl(rsp, -(StackAlignmentInBytes));
745
746 // push the return address on the stack (note that pushing, rather
747 // than storing it, yields the correct frame alignment for the callee)
748 __ pushl(rax);
749
750 // Put saved SP in another register
751 const Register saved_sp = rax;
752 __ movl(saved_sp, rdi);
753
754
755 // Will jump to the compiled code just as if compiled code was doing it.
756 // Pre-load the register-jump target early, to schedule it better.
757 __ movl(rdi, Address(rbx, in_bytes(methodOopDesc::from_compiled_offset())));
758
759 // Now generate the shuffle code. Pick up all register args and move the
760 // rest through the floating point stack top.
761 for (int i = 0; i < total_args_passed; i++) {
762 if (sig_bt[i] == T_VOID) {
763 // Longs and doubles are passed in native word order, but misaligned
764 // in the 32-bit build.
765 assert(i > 0 && (sig_bt[i-1] == T_LONG || sig_bt[i-1] == T_DOUBLE), "missing half");
766 continue;
767 }
768
769 // Pick up 0, 1 or 2 words from SP+offset.
770
771 assert(!regs[i].second()->is_valid() || regs[i].first()->next() == regs[i].second(),
772 "scrambled load targets?");
773 // Load in argument order going down.
774 int ld_off = (total_args_passed - i)*Interpreter::stackElementSize() + Interpreter::value_offset_in_bytes();
775 // Point to interpreter value (vs. tag)
776 int next_off = ld_off - Interpreter::stackElementSize();
777 //
778 //
779 //
780 VMReg r_1 = regs[i].first();
781 VMReg r_2 = regs[i].second();
782 if (!r_1->is_valid()) {
783 assert(!r_2->is_valid(), "");
784 continue;
785 }
786 if (r_1->is_stack()) {
787 // Convert stack slot to an SP offset (+ wordSize to account for return address )
788 int st_off = regs[i].first()->reg2stack()*VMRegImpl::stack_slot_size + wordSize;
789
790 // We can use rsi as a temp here because compiled code doesn't need rsi as an input
791 // and if we end up going thru a c2i because of a miss a reasonable value of rsi
792 // we be generated.
793 if (!r_2->is_valid()) {
794 // __ fld_s(Address(saved_sp, ld_off));
795 // __ fstp_s(Address(rsp, st_off));
796 __ movl(rsi, Address(saved_sp, ld_off));
797 __ movl(Address(rsp, st_off), rsi);
798 } else {
799 // Interpreter local[n] == MSW, local[n+1] == LSW however locals
800 // are accessed as negative so LSW is at LOW address
801
802 // ld_off is MSW so get LSW
803 // st_off is LSW (i.e. reg.first())
804 // __ fld_d(Address(saved_sp, next_off));
805 // __ fstp_d(Address(rsp, st_off));
806 __ movl(rsi, Address(saved_sp, next_off));
807 __ movl(Address(rsp, st_off), rsi);
808 __ movl(rsi, Address(saved_sp, ld_off));
809 __ movl(Address(rsp, st_off + wordSize), rsi);
810 }
811 } else if (r_1->is_Register()) { // Register argument
812 Register r = r_1->as_Register();
813 assert(r != rax, "must be different");
814 if (r_2->is_valid()) {
815 assert(r_2->as_Register() != rax, "need another temporary register");
816 // Remember r_1 is low address (and LSB on x86)
817 // So r_2 gets loaded from high address regardless of the platform
818 __ movl(r_2->as_Register(), Address(saved_sp, ld_off));
819 __ movl(r, Address(saved_sp, next_off));
820 } else {
821 __ movl(r, Address(saved_sp, ld_off));
822 }
823 } else {
824 assert(r_1->is_XMMRegister(), "");
825 if (!r_2->is_valid()) {
826 __ movflt(r_1->as_XMMRegister(), Address(saved_sp, ld_off));
827 } else {
828 move_i2c_double(masm, r_1->as_XMMRegister(), saved_sp, ld_off);
829 }
830 }
831 }
832
833 // 6243940 We might end up in handle_wrong_method if
834 // the callee is deoptimized as we race thru here. If that
835 // happens we don't want to take a safepoint because the
836 // caller frame will look interpreted and arguments are now
837 // "compiled" so it is much better to make this transition
838 // invisible to the stack walking code. Unfortunately if
839 // we try and find the callee by normal means a safepoint
840 // is possible. So we stash the desired callee in the thread
841 // and the vm will find there should this case occur.
842
843 __ get_thread(rax);
844 __ movl(Address(rax, JavaThread::callee_target_offset()), rbx);
845
846 // move methodOop to rax, in case we end up in an c2i adapter.
847 // the c2i adapters expect methodOop in rax, (c2) because c2's
848 // resolve stubs return the result (the method) in rax,.
849 // I'd love to fix this.
850 __ movl(rax, rbx);
851
852 __ jmp(rdi);
853 }
854
855 // ---------------------------------------------------------------
856 AdapterHandlerEntry* SharedRuntime::generate_i2c2i_adapters(MacroAssembler *masm,
857 int total_args_passed,
858 int comp_args_on_stack,
859 const BasicType *sig_bt,
860 const VMRegPair *regs) {
861 address i2c_entry = __ pc();
862
863 gen_i2c_adapter(masm, total_args_passed, comp_args_on_stack, sig_bt, regs);
864
865 // -------------------------------------------------------------------------
866 // Generate a C2I adapter. On entry we know rbx, holds the methodOop during calls
867 // to the interpreter. The args start out packed in the compiled layout. They
868 // need to be unpacked into the interpreter layout. This will almost always
869 // require some stack space. We grow the current (compiled) stack, then repack
870 // the args. We finally end in a jump to the generic interpreter entry point.
871 // On exit from the interpreter, the interpreter will restore our SP (lest the
872 // compiled code, which relys solely on SP and not EBP, get sick).
873
874 address c2i_unverified_entry = __ pc();
875 Label skip_fixup;
876
877 Register holder = rax;
878 Register receiver = rcx;
879 Register temp = rbx;
880
881 {
882
883 Label missed;
884
885 __ verify_oop(holder);
886 __ movl(temp, Address(receiver, oopDesc::klass_offset_in_bytes()));
887 __ verify_oop(temp);
888
889 __ cmpl(temp, Address(holder, compiledICHolderOopDesc::holder_klass_offset()));
890 __ movl(rbx, Address(holder, compiledICHolderOopDesc::holder_method_offset()));
891 __ jcc(Assembler::notEqual, missed);
892 // Method might have been compiled since the call site was patched to
893 // interpreted if that is the case treat it as a miss so we can get
894 // the call site corrected.
895 __ cmpl(Address(rbx, in_bytes(methodOopDesc::code_offset())), NULL_WORD);
896 __ jcc(Assembler::equal, skip_fixup);
897
898 __ bind(missed);
899 __ jump(RuntimeAddress(SharedRuntime::get_ic_miss_stub()));
900 }
901
902 address c2i_entry = __ pc();
903
904 gen_c2i_adapter(masm, total_args_passed, comp_args_on_stack, sig_bt, regs, skip_fixup);
905
906 __ flush();
907 return new AdapterHandlerEntry(i2c_entry, c2i_entry, c2i_unverified_entry);
908 }
909
910 int SharedRuntime::c_calling_convention(const BasicType *sig_bt,
911 VMRegPair *regs,
912 int total_args_passed) {
913 // We return the amount of VMRegImpl stack slots we need to reserve for all
914 // the arguments NOT counting out_preserve_stack_slots.
915
916 uint stack = 0; // All arguments on stack
917
918 for( int i = 0; i < total_args_passed; i++) {
919 // From the type and the argument number (count) compute the location
920 switch( sig_bt[i] ) {
921 case T_BOOLEAN:
922 case T_CHAR:
923 case T_FLOAT:
924 case T_BYTE:
925 case T_SHORT:
926 case T_INT:
927 case T_OBJECT:
928 case T_ARRAY:
929 case T_ADDRESS:
930 regs[i].set1(VMRegImpl::stack2reg(stack++));
931 break;
932 case T_LONG:
933 case T_DOUBLE: // The stack numbering is reversed from Java
934 // Since C arguments do not get reversed, the ordering for
935 // doubles on the stack must be opposite the Java convention
936 assert(sig_bt[i+1] == T_VOID, "missing Half" );
937 regs[i].set2(VMRegImpl::stack2reg(stack));
938 stack += 2;
939 break;
940 case T_VOID: regs[i].set_bad(); break;
941 default:
942 ShouldNotReachHere();
943 break;
944 }
945 }
946 return stack;
947 }
948
949 // A simple move of integer like type
950 static void simple_move32(MacroAssembler* masm, VMRegPair src, VMRegPair dst) {
951 if (src.first()->is_stack()) {
952 if (dst.first()->is_stack()) {
953 // stack to stack
954 // __ ld(FP, reg2offset(src.first()) + STACK_BIAS, L5);
955 // __ st(L5, SP, reg2offset(dst.first()) + STACK_BIAS);
956 __ movl(rax, Address(rbp, reg2offset_in(src.first())));
957 __ movl(Address(rsp, reg2offset_out(dst.first())), rax);
958 } else {
959 // stack to reg
960 __ movl(dst.first()->as_Register(), Address(rbp, reg2offset_in(src.first())));
961 }
962 } else if (dst.first()->is_stack()) {
963 // reg to stack
964 __ movl(Address(rsp, reg2offset_out(dst.first())), src.first()->as_Register());
965 } else {
966 __ movl(dst.first()->as_Register(), src.first()->as_Register());
967 }
968 }
969
970 // An oop arg. Must pass a handle not the oop itself
971 static void object_move(MacroAssembler* masm,
972 OopMap* map,
973 int oop_handle_offset,
974 int framesize_in_slots,
975 VMRegPair src,
976 VMRegPair dst,
977 bool is_receiver,
978 int* receiver_offset) {
979
980 // Because of the calling conventions we know that src can be a
981 // register or a stack location. dst can only be a stack location.
982
983 assert(dst.first()->is_stack(), "must be stack");
984 // must pass a handle. First figure out the location we use as a handle
985
986 if (src.first()->is_stack()) {
987 // Oop is already on the stack as an argument
988 Register rHandle = rax;
989 Label nil;
990 __ xorl(rHandle, rHandle);
991 __ cmpl(Address(rbp, reg2offset_in(src.first())), NULL_WORD);
992 __ jcc(Assembler::equal, nil);
993 __ leal(rHandle, Address(rbp, reg2offset_in(src.first())));
994 __ bind(nil);
995 __ movl(Address(rsp, reg2offset_out(dst.first())), rHandle);
996
997 int offset_in_older_frame = src.first()->reg2stack() + SharedRuntime::out_preserve_stack_slots();
998 map->set_oop(VMRegImpl::stack2reg(offset_in_older_frame + framesize_in_slots));
999 if (is_receiver) {
1000 *receiver_offset = (offset_in_older_frame + framesize_in_slots) * VMRegImpl::stack_slot_size;
1001 }
1002 } else {
1003 // Oop is in an a register we must store it to the space we reserve
1004 // on the stack for oop_handles
1005 const Register rOop = src.first()->as_Register();
1006 const Register rHandle = rax;
1007 int oop_slot = (rOop == rcx ? 0 : 1) * VMRegImpl::slots_per_word + oop_handle_offset;
1008 int offset = oop_slot*VMRegImpl::stack_slot_size;
1009 Label skip;
1010 __ movl(Address(rsp, offset), rOop);
1011 map->set_oop(VMRegImpl::stack2reg(oop_slot));
1012 __ xorl(rHandle, rHandle);
1013 __ cmpl(rOop, NULL_WORD);
1014 __ jcc(Assembler::equal, skip);
1015 __ leal(rHandle, Address(rsp, offset));
1016 __ bind(skip);
1017 // Store the handle parameter
1018 __ movl(Address(rsp, reg2offset_out(dst.first())), rHandle);
1019 if (is_receiver) {
1020 *receiver_offset = offset;
1021 }
1022 }
1023 }
1024
1025 // A float arg may have to do float reg int reg conversion
1026 static void float_move(MacroAssembler* masm, VMRegPair src, VMRegPair dst) {
1027 assert(!src.second()->is_valid() && !dst.second()->is_valid(), "bad float_move");
1028
1029 // Because of the calling convention we know that src is either a stack location
1030 // or an xmm register. dst can only be a stack location.
1031
1032 assert(dst.first()->is_stack() && ( src.first()->is_stack() || src.first()->is_XMMRegister()), "bad parameters");
1033
1034 if (src.first()->is_stack()) {
1035 __ movl(rax, Address(rbp, reg2offset_in(src.first())));
1036 __ movl(Address(rsp, reg2offset_out(dst.first())), rax);
1037 } else {
1038 // reg to stack
1039 __ movflt(Address(rsp, reg2offset_out(dst.first())), src.first()->as_XMMRegister());
1040 }
1041 }
1042
1043 // A long move
1044 static void long_move(MacroAssembler* masm, VMRegPair src, VMRegPair dst) {
1045
1046 // The only legal possibility for a long_move VMRegPair is:
1047 // 1: two stack slots (possibly unaligned)
1048 // as neither the java or C calling convention will use registers
1049 // for longs.
1050
1051 if (src.first()->is_stack() && dst.first()->is_stack()) {
1052 assert(src.second()->is_stack() && dst.second()->is_stack(), "must be all stack");
1053 __ movl(rax, Address(rbp, reg2offset_in(src.first())));
1054 __ movl(rbx, Address(rbp, reg2offset_in(src.second())));
1055 __ movl(Address(rsp, reg2offset_out(dst.first())), rax);
1056 __ movl(Address(rsp, reg2offset_out(dst.second())), rbx);
1057 } else {
1058 ShouldNotReachHere();
1059 }
1060 }
1061
1062 // A double move
1063 static void double_move(MacroAssembler* masm, VMRegPair src, VMRegPair dst) {
1064
1065 // The only legal possibilities for a double_move VMRegPair are:
1066 // The painful thing here is that like long_move a VMRegPair might be
1067
1068 // Because of the calling convention we know that src is either
1069 // 1: a single physical register (xmm registers only)
1070 // 2: two stack slots (possibly unaligned)
1071 // dst can only be a pair of stack slots.
1072
1073 assert(dst.first()->is_stack() && (src.first()->is_XMMRegister() || src.first()->is_stack()), "bad args");
1074
1075 if (src.first()->is_stack()) {
1076 // source is all stack
1077 __ movl(rax, Address(rbp, reg2offset_in(src.first())));
1078 __ movl(rbx, Address(rbp, reg2offset_in(src.second())));
1079 __ movl(Address(rsp, reg2offset_out(dst.first())), rax);
1080 __ movl(Address(rsp, reg2offset_out(dst.second())), rbx);
1081 } else {
1082 // reg to stack
1083 // No worries about stack alignment
1084 __ movdbl(Address(rsp, reg2offset_out(dst.first())), src.first()->as_XMMRegister());
1085 }
1086 }
1087
1088
1089 void SharedRuntime::save_native_result(MacroAssembler *masm, BasicType ret_type, int frame_slots) {
1090 // We always ignore the frame_slots arg and just use the space just below frame pointer
1091 // which by this time is free to use
1092 switch (ret_type) {
1093 case T_FLOAT:
1094 __ fstp_s(Address(rbp, -wordSize));
1095 break;
1096 case T_DOUBLE:
1097 __ fstp_d(Address(rbp, -2*wordSize));
1098 break;
1099 case T_VOID: break;
1100 case T_LONG:
1101 __ movl(Address(rbp, -wordSize), rax);
1102 __ movl(Address(rbp, -2*wordSize), rdx);
1103 break;
1104 default: {
1105 __ movl(Address(rbp, -wordSize), rax);
1106 }
1107 }
1108 }
1109
1110 void SharedRuntime::restore_native_result(MacroAssembler *masm, BasicType ret_type, int frame_slots) {
1111 // We always ignore the frame_slots arg and just use the space just below frame pointer
1112 // which by this time is free to use
1113 switch (ret_type) {
1114 case T_FLOAT:
1115 __ fld_s(Address(rbp, -wordSize));
1116 break;
1117 case T_DOUBLE:
1118 __ fld_d(Address(rbp, -2*wordSize));
1119 break;
1120 case T_LONG:
1121 __ movl(rax, Address(rbp, -wordSize));
1122 __ movl(rdx, Address(rbp, -2*wordSize));
1123 break;
1124 case T_VOID: break;
1125 default: {
1126 __ movl(rax, Address(rbp, -wordSize));
1127 }
1128 }
1129 }
1130
1131 // ---------------------------------------------------------------------------
1132 // Generate a native wrapper for a given method. The method takes arguments
1133 // in the Java compiled code convention, marshals them to the native
1134 // convention (handlizes oops, etc), transitions to native, makes the call,
1135 // returns to java state (possibly blocking), unhandlizes any result and
1136 // returns.
1137 nmethod *SharedRuntime::generate_native_wrapper(MacroAssembler *masm,
1138 methodHandle method,
1139 int total_in_args,
1140 int comp_args_on_stack,
1141 BasicType *in_sig_bt,
1142 VMRegPair *in_regs,
1143 BasicType ret_type) {
1144
1145 // An OopMap for lock (and class if static)
1146 OopMapSet *oop_maps = new OopMapSet();
1147
1148 // We have received a description of where all the java arg are located
1149 // on entry to the wrapper. We need to convert these args to where
1150 // the jni function will expect them. To figure out where they go
1151 // we convert the java signature to a C signature by inserting
1152 // the hidden arguments as arg[0] and possibly arg[1] (static method)
1153
1154 int total_c_args = total_in_args + 1;
1155 if (method->is_static()) {
1156 total_c_args++;
1157 }
1158
1159 BasicType* out_sig_bt = NEW_RESOURCE_ARRAY(BasicType, total_c_args);
1160 VMRegPair* out_regs = NEW_RESOURCE_ARRAY(VMRegPair, total_c_args);
1161
1162 int argc = 0;
1163 out_sig_bt[argc++] = T_ADDRESS;
1164 if (method->is_static()) {
1165 out_sig_bt[argc++] = T_OBJECT;
1166 }
1167
1168 int i;
1169 for (i = 0; i < total_in_args ; i++ ) {
1170 out_sig_bt[argc++] = in_sig_bt[i];
1171 }
1172
1173
1174 // Now figure out where the args must be stored and how much stack space
1175 // they require (neglecting out_preserve_stack_slots but space for storing
1176 // the 1st six register arguments). It's weird see int_stk_helper.
1177 //
1178 int out_arg_slots;
1179 out_arg_slots = c_calling_convention(out_sig_bt, out_regs, total_c_args);
1180
1181 // Compute framesize for the wrapper. We need to handlize all oops in
1182 // registers a max of 2 on x86.
1183
1184 // Calculate the total number of stack slots we will need.
1185
1186 // First count the abi requirement plus all of the outgoing args
1187 int stack_slots = SharedRuntime::out_preserve_stack_slots() + out_arg_slots;
1188
1189 // Now the space for the inbound oop handle area
1190
1191 int oop_handle_offset = stack_slots;
1192 stack_slots += 2*VMRegImpl::slots_per_word;
1193
1194 // Now any space we need for handlizing a klass if static method
1195
1196 int klass_slot_offset = 0;
1197 int klass_offset = -1;
1198 int lock_slot_offset = 0;
1199 bool is_static = false;
1200 int oop_temp_slot_offset = 0;
1201
1202 if (method->is_static()) {
1203 klass_slot_offset = stack_slots;
1204 stack_slots += VMRegImpl::slots_per_word;
1205 klass_offset = klass_slot_offset * VMRegImpl::stack_slot_size;
1206 is_static = true;
1207 }
1208
1209 // Plus a lock if needed
1210
1211 if (method->is_synchronized()) {
1212 lock_slot_offset = stack_slots;
1213 stack_slots += VMRegImpl::slots_per_word;
1214 }
1215
1216 // Now a place (+2) to save return values or temp during shuffling
1217 // + 2 for return address (which we own) and saved rbp,
1218 stack_slots += 4;
1219
1220 // Ok The space we have allocated will look like:
1221 //
1222 //
1223 // FP-> | |
1224 // |---------------------|
1225 // | 2 slots for moves |
1226 // |---------------------|
1227 // | lock box (if sync) |
1228 // |---------------------| <- lock_slot_offset (-lock_slot_rbp_offset)
1229 // | klass (if static) |
1230 // |---------------------| <- klass_slot_offset
1231 // | oopHandle area |
1232 // |---------------------| <- oop_handle_offset (a max of 2 registers)
1233 // | outbound memory |
1234 // | based arguments |
1235 // | |
1236 // |---------------------|
1237 // | |
1238 // SP-> | out_preserved_slots |
1239 //
1240 //
1241 // ****************************************************************************
1242 // WARNING - on Windows Java Natives use pascal calling convention and pop the
1243 // arguments off of the stack after the jni call. Before the call we can use
1244 // instructions that are SP relative. After the jni call we switch to FP
1245 // relative instructions instead of re-adjusting the stack on windows.
1246 // ****************************************************************************
1247
1248
1249 // Now compute actual number of stack words we need rounding to make
1250 // stack properly aligned.
1251 stack_slots = round_to(stack_slots, 2 * VMRegImpl::slots_per_word);
1252
1253 int stack_size = stack_slots * VMRegImpl::stack_slot_size;
1254
1255 intptr_t start = (intptr_t)__ pc();
1256
1257 // First thing make an ic check to see if we should even be here
1258
1259 // We are free to use all registers as temps without saving them and
1260 // restoring them except rbp,. rbp, is the only callee save register
1261 // as far as the interpreter and the compiler(s) are concerned.
1262
1263
1264 const Register ic_reg = rax;
1265 const Register receiver = rcx;
1266 Label hit;
1267 Label exception_pending;
1268
1269
1270 __ verify_oop(receiver);
1271 __ cmpl(ic_reg, Address(receiver, oopDesc::klass_offset_in_bytes()));
1272 __ jcc(Assembler::equal, hit);
1273
1274 __ jump(RuntimeAddress(SharedRuntime::get_ic_miss_stub()));
1275
1276 // verified entry must be aligned for code patching.
1277 // and the first 5 bytes must be in the same cache line
1278 // if we align at 8 then we will be sure 5 bytes are in the same line
1279 __ align(8);
1280
1281 __ bind(hit);
1282
1283 int vep_offset = ((intptr_t)__ pc()) - start;
1284
1285 #ifdef COMPILER1
1286 if (InlineObjectHash && method->intrinsic_id() == vmIntrinsics::_hashCode) {
1287 // Object.hashCode can pull the hashCode from the header word
1288 // instead of doing a full VM transition once it's been computed.
1289 // Since hashCode is usually polymorphic at call sites we can't do
1290 // this optimization at the call site without a lot of work.
1291 Label slowCase;
1292 Register receiver = rcx;
1293 Register result = rax;
1294 __ movl(result, Address(receiver, oopDesc::mark_offset_in_bytes()));
1295
1296 // check if locked
1297 __ testl (result, markOopDesc::unlocked_value);
1298 __ jcc (Assembler::zero, slowCase);
1299
1300 if (UseBiasedLocking) {
1301 // Check if biased and fall through to runtime if so
1302 __ testl (result, markOopDesc::biased_lock_bit_in_place);
1303 __ jcc (Assembler::notZero, slowCase);
1304 }
1305
1306 // get hash
1307 __ andl (result, markOopDesc::hash_mask_in_place);
1308 // test if hashCode exists
1309 __ jcc (Assembler::zero, slowCase);
1310 __ shrl (result, markOopDesc::hash_shift);
1311 __ ret(0);
1312 __ bind (slowCase);
1313 }
1314 #endif // COMPILER1
1315
1316 // The instruction at the verified entry point must be 5 bytes or longer
1317 // because it can be patched on the fly by make_non_entrant. The stack bang
1318 // instruction fits that requirement.
1319
1320 // Generate stack overflow check
1321
1322 if (UseStackBanging) {
1323 __ bang_stack_with_offset(StackShadowPages*os::vm_page_size());
1324 } else {
1325 // need a 5 byte instruction to allow MT safe patching to non-entrant
1326 __ fat_nop();
1327 }
1328
1329 // Generate a new frame for the wrapper.
1330 __ enter();
1331 // -2 because return address is already present and so is saved rbp,
1332 __ subl(rsp, stack_size - 2*wordSize);
1333
1334 // Frame is now completed as far a size and linkage.
1335
1336 int frame_complete = ((intptr_t)__ pc()) - start;
1337
1338 // Calculate the difference between rsp and rbp,. We need to know it
1339 // after the native call because on windows Java Natives will pop
1340 // the arguments and it is painful to do rsp relative addressing
1341 // in a platform independent way. So after the call we switch to
1342 // rbp, relative addressing.
1343
1344 int fp_adjustment = stack_size - 2*wordSize;
1345
1346 #ifdef COMPILER2
1347 // C2 may leave the stack dirty if not in SSE2+ mode
1348 if (UseSSE >= 2) {
1349 __ verify_FPU(0, "c2i transition should have clean FPU stack");
1350 } else {
1351 __ empty_FPU_stack();
1352 }
1353 #endif /* COMPILER2 */
1354
1355 // Compute the rbp, offset for any slots used after the jni call
1356
1357 int lock_slot_rbp_offset = (lock_slot_offset*VMRegImpl::stack_slot_size) - fp_adjustment;
1358 int oop_temp_slot_rbp_offset = (oop_temp_slot_offset*VMRegImpl::stack_slot_size) - fp_adjustment;
1359
1360 // We use rdi as a thread pointer because it is callee save and
1361 // if we load it once it is usable thru the entire wrapper
1362 const Register thread = rdi;
1363
1364 // We use rsi as the oop handle for the receiver/klass
1365 // It is callee save so it survives the call to native
1366
1367 const Register oop_handle_reg = rsi;
1368
1369 __ get_thread(thread);
1370
1371
1372 //
1373 // We immediately shuffle the arguments so that any vm call we have to
1374 // make from here on out (sync slow path, jvmti, etc.) we will have
1375 // captured the oops from our caller and have a valid oopMap for
1376 // them.
1377
1378 // -----------------
1379 // The Grand Shuffle
1380 //
1381 // Natives require 1 or 2 extra arguments over the normal ones: the JNIEnv*
1382 // and, if static, the class mirror instead of a receiver. This pretty much
1383 // guarantees that register layout will not match (and x86 doesn't use reg
1384 // parms though amd does). Since the native abi doesn't use register args
1385 // and the java conventions does we don't have to worry about collisions.
1386 // All of our moved are reg->stack or stack->stack.
1387 // We ignore the extra arguments during the shuffle and handle them at the
1388 // last moment. The shuffle is described by the two calling convention
1389 // vectors we have in our possession. We simply walk the java vector to
1390 // get the source locations and the c vector to get the destinations.
1391
1392 int c_arg = method->is_static() ? 2 : 1 ;
1393
1394 // Record rsp-based slot for receiver on stack for non-static methods
1395 int receiver_offset = -1;
1396
1397 // This is a trick. We double the stack slots so we can claim
1398 // the oops in the caller's frame. Since we are sure to have
1399 // more args than the caller doubling is enough to make
1400 // sure we can capture all the incoming oop args from the
1401 // caller.
1402 //
1403 OopMap* map = new OopMap(stack_slots * 2, 0 /* arg_slots*/);
1404
1405 // Mark location of rbp,
1406 // map->set_callee_saved(VMRegImpl::stack2reg( stack_slots - 2), stack_slots * 2, 0, rbp->as_VMReg());
1407
1408 // We know that we only have args in at most two integer registers (rcx, rdx). So rax, rbx
1409 // Are free to temporaries if we have to do stack to steck moves.
1410 // All inbound args are referenced based on rbp, and all outbound args via rsp.
1411
1412 for (i = 0; i < total_in_args ; i++, c_arg++ ) {
1413 switch (in_sig_bt[i]) {
1414 case T_ARRAY:
1415 case T_OBJECT:
1416 object_move(masm, map, oop_handle_offset, stack_slots, in_regs[i], out_regs[c_arg],
1417 ((i == 0) && (!is_static)),
1418 &receiver_offset);
1419 break;
1420 case T_VOID:
1421 break;
1422
1423 case T_FLOAT:
1424 float_move(masm, in_regs[i], out_regs[c_arg]);
1425 break;
1426
1427 case T_DOUBLE:
1428 assert( i + 1 < total_in_args &&
1429 in_sig_bt[i + 1] == T_VOID &&
1430 out_sig_bt[c_arg+1] == T_VOID, "bad arg list");
1431 double_move(masm, in_regs[i], out_regs[c_arg]);
1432 break;
1433
1434 case T_LONG :
1435 long_move(masm, in_regs[i], out_regs[c_arg]);
1436 break;
1437
1438 case T_ADDRESS: assert(false, "found T_ADDRESS in java args");
1439
1440 default:
1441 simple_move32(masm, in_regs[i], out_regs[c_arg]);
1442 }
1443 }
1444
1445 // Pre-load a static method's oop into rsi. Used both by locking code and
1446 // the normal JNI call code.
1447 if (method->is_static()) {
1448
1449 // load opp into a register
1450 __ movoop(oop_handle_reg, JNIHandles::make_local(Klass::cast(method->method_holder())->java_mirror()));
1451
1452 // Now handlize the static class mirror it's known not-null.
1453 __ movl(Address(rsp, klass_offset), oop_handle_reg);
1454 map->set_oop(VMRegImpl::stack2reg(klass_slot_offset));
1455
1456 // Now get the handle
1457 __ leal(oop_handle_reg, Address(rsp, klass_offset));
1458 // store the klass handle as second argument
1459 __ movl(Address(rsp, wordSize), oop_handle_reg);
1460 }
1461
1462 // Change state to native (we save the return address in the thread, since it might not
1463 // be pushed on the stack when we do a a stack traversal). It is enough that the pc()
1464 // points into the right code segment. It does not have to be the correct return pc.
1465 // We use the same pc/oopMap repeatedly when we call out
1466
1467 intptr_t the_pc = (intptr_t) __ pc();
1468 oop_maps->add_gc_map(the_pc - start, map);
1469
1470 __ set_last_Java_frame(thread, rsp, noreg, (address)the_pc);
1471
1472
1473 // We have all of the arguments setup at this point. We must not touch any register
1474 // argument registers at this point (what if we save/restore them there are no oop?
1475
1476 {
1477 SkipIfEqual skip_if(masm, &DTraceMethodProbes, 0);
1478 __ movoop(rax, JNIHandles::make_local(method()));
1479 __ call_VM_leaf(
1480 CAST_FROM_FN_PTR(address, SharedRuntime::dtrace_method_entry),
1481 thread, rax);
1482 }
1483
1484
1485 // These are register definitions we need for locking/unlocking
1486 const Register swap_reg = rax; // Must use rax, for cmpxchg instruction
1487 const Register obj_reg = rcx; // Will contain the oop
1488 const Register lock_reg = rdx; // Address of compiler lock object (BasicLock)
1489
1490 Label slow_path_lock;
1491 Label lock_done;
1492
1493 // Lock a synchronized method
1494 if (method->is_synchronized()) {
1495
1496
1497 const int mark_word_offset = BasicLock::displaced_header_offset_in_bytes();
1498
1499 // Get the handle (the 2nd argument)
1500 __ movl(oop_handle_reg, Address(rsp, wordSize));
1501
1502 // Get address of the box
1503
1504 __ leal(lock_reg, Address(rbp, lock_slot_rbp_offset));
1505
1506 // Load the oop from the handle
1507 __ movl(obj_reg, Address(oop_handle_reg, 0));
1508
1509 if (UseBiasedLocking) {
1510 // Note that oop_handle_reg is trashed during this call
1511 __ biased_locking_enter(lock_reg, obj_reg, swap_reg, oop_handle_reg, false, lock_done, &slow_path_lock);
1512 }
1513
1514 // Load immediate 1 into swap_reg %rax,
1515 __ movl(swap_reg, 1);
1516
1517 // Load (object->mark() | 1) into swap_reg %rax,
1518 __ orl(swap_reg, Address(obj_reg, 0));
1519
1520 // Save (object->mark() | 1) into BasicLock's displaced header
1521 __ movl(Address(lock_reg, mark_word_offset), swap_reg);
1522
1523 if (os::is_MP()) {
1524 __ lock();
1525 }
1526
1527 // src -> dest iff dest == rax, else rax, <- dest
1528 // *obj_reg = lock_reg iff *obj_reg == rax, else rax, = *(obj_reg)
1529 __ cmpxchg(lock_reg, Address(obj_reg, 0));
1530 __ jcc(Assembler::equal, lock_done);
1531
1532 // Test if the oopMark is an obvious stack pointer, i.e.,
1533 // 1) (mark & 3) == 0, and
1534 // 2) rsp <= mark < mark + os::pagesize()
1535 // These 3 tests can be done by evaluating the following
1536 // expression: ((mark - rsp) & (3 - os::vm_page_size())),
1537 // assuming both stack pointer and pagesize have their
1538 // least significant 2 bits clear.
1539 // NOTE: the oopMark is in swap_reg %rax, as the result of cmpxchg
1540
1541 __ subl(swap_reg, rsp);
1542 __ andl(swap_reg, 3 - os::vm_page_size());
1543
1544 // Save the test result, for recursive case, the result is zero
1545 __ movl(Address(lock_reg, mark_word_offset), swap_reg);
1546 __ jcc(Assembler::notEqual, slow_path_lock);
1547 // Slow path will re-enter here
1548 __ bind(lock_done);
1549
1550 if (UseBiasedLocking) {
1551 // Re-fetch oop_handle_reg as we trashed it above
1552 __ movl(oop_handle_reg, Address(rsp, wordSize));
1553 }
1554 }
1555
1556
1557 // Finally just about ready to make the JNI call
1558
1559
1560 // get JNIEnv* which is first argument to native
1561
1562 __ leal(rdx, Address(thread, in_bytes(JavaThread::jni_environment_offset())));
1563 __ movl(Address(rsp, 0), rdx);
1564
1565 // Now set thread in native
1566 __ movl(Address(thread, JavaThread::thread_state_offset()), _thread_in_native);
1567
1568 __ call(RuntimeAddress(method->native_function()));
1569
1570 // WARNING - on Windows Java Natives use pascal calling convention and pop the
1571 // arguments off of the stack. We could just re-adjust the stack pointer here
1572 // and continue to do SP relative addressing but we instead switch to FP
1573 // relative addressing.
1574
1575 // Unpack native results.
1576 switch (ret_type) {
1577 case T_BOOLEAN: __ c2bool(rax); break;
1578 case T_CHAR : __ andl(rax, 0xFFFF); break;
1579 case T_BYTE : __ sign_extend_byte (rax); break;
1580 case T_SHORT : __ sign_extend_short(rax); break;
1581 case T_INT : /* nothing to do */ break;
1582 case T_DOUBLE :
1583 case T_FLOAT :
1584 // Result is in st0 we'll save as needed
1585 break;
1586 case T_ARRAY: // Really a handle
1587 case T_OBJECT: // Really a handle
1588 break; // can't de-handlize until after safepoint check
1589 case T_VOID: break;
1590 case T_LONG: break;
1591 default : ShouldNotReachHere();
1592 }
1593
1594 // Switch thread to "native transition" state before reading the synchronization state.
1595 // This additional state is necessary because reading and testing the synchronization
1596 // state is not atomic w.r.t. GC, as this scenario demonstrates:
1597 // Java thread A, in _thread_in_native state, loads _not_synchronized and is preempted.
1598 // VM thread changes sync state to synchronizing and suspends threads for GC.
1599 // Thread A is resumed to finish this native method, but doesn't block here since it
1600 // didn't see any synchronization is progress, and escapes.
1601 __ movl(Address(thread, JavaThread::thread_state_offset()), _thread_in_native_trans);
1602
1603 if(os::is_MP()) {
1604 if (UseMembar) {
1605 __ membar(); // Force this write out before the read below
1606 } else {
1607 // Write serialization page so VM thread can do a pseudo remote membar.
1608 // We use the current thread pointer to calculate a thread specific
1609 // offset to write to within the page. This minimizes bus traffic
1610 // due to cache line collision.
1611 __ serialize_memory(thread, rcx);
1612 }
1613 }
1614
1615 if (AlwaysRestoreFPU) {
1616 // Make sure the control word is correct.
1617 __ fldcw(ExternalAddress(StubRoutines::addr_fpu_cntrl_wrd_std()));
1618 }
1619
1620 // check for safepoint operation in progress and/or pending suspend requests
1621 { Label Continue;
1622
1623 __ cmp32(ExternalAddress((address)SafepointSynchronize::address_of_state()),
1624 SafepointSynchronize::_not_synchronized);
1625
1626 Label L;
1627 __ jcc(Assembler::notEqual, L);
1628 __ cmpl(Address(thread, JavaThread::suspend_flags_offset()), 0);
1629 __ jcc(Assembler::equal, Continue);
1630 __ bind(L);
1631
1632 // Don't use call_VM as it will see a possible pending exception and forward it
1633 // and never return here preventing us from clearing _last_native_pc down below.
1634 // Also can't use call_VM_leaf either as it will check to see if rsi & rdi are
1635 // preserved and correspond to the bcp/locals pointers. So we do a runtime call
1636 // by hand.
1637 //
1638 save_native_result(masm, ret_type, stack_slots);
1639 __ pushl(thread);
1640 __ call(RuntimeAddress(CAST_FROM_FN_PTR(address,
1641 JavaThread::check_special_condition_for_native_trans)));
1642 __ increment(rsp, wordSize);
1643 // Restore any method result value
1644 restore_native_result(masm, ret_type, stack_slots);
1645
1646 __ bind(Continue);
1647 }
1648
1649 // change thread state
1650 __ movl(Address(thread, JavaThread::thread_state_offset()), _thread_in_Java);
1651
1652 Label reguard;
1653 Label reguard_done;
1654 __ cmpl(Address(thread, JavaThread::stack_guard_state_offset()), JavaThread::stack_guard_yellow_disabled);
1655 __ jcc(Assembler::equal, reguard);
1656
1657 // slow path reguard re-enters here
1658 __ bind(reguard_done);
1659
1660 // Handle possible exception (will unlock if necessary)
1661
1662 // native result if any is live
1663
1664 // Unlock
1665 Label slow_path_unlock;
1666 Label unlock_done;
1667 if (method->is_synchronized()) {
1668
1669 Label done;
1670
1671 // Get locked oop from the handle we passed to jni
1672 __ movl(obj_reg, Address(oop_handle_reg, 0));
1673
1674 if (UseBiasedLocking) {
1675 __ biased_locking_exit(obj_reg, rbx, done);
1676 }
1677
1678 // Simple recursive lock?
1679
1680 __ cmpl(Address(rbp, lock_slot_rbp_offset), NULL_WORD);
1681 __ jcc(Assembler::equal, done);
1682
1683 // Must save rax, if if it is live now because cmpxchg must use it
1684 if (ret_type != T_FLOAT && ret_type != T_DOUBLE && ret_type != T_VOID) {
1685 save_native_result(masm, ret_type, stack_slots);
1686 }
1687
1688 // get old displaced header
1689 __ movl(rbx, Address(rbp, lock_slot_rbp_offset));
1690
1691 // get address of the stack lock
1692 __ leal(rax, Address(rbp, lock_slot_rbp_offset));
1693
1694 // Atomic swap old header if oop still contains the stack lock
1695 if (os::is_MP()) {
1696 __ lock();
1697 }
1698
1699 // src -> dest iff dest == rax, else rax, <- dest
1700 // *obj_reg = rbx, iff *obj_reg == rax, else rax, = *(obj_reg)
1701 __ cmpxchg(rbx, Address(obj_reg, 0));
1702 __ jcc(Assembler::notEqual, slow_path_unlock);
1703
1704 // slow path re-enters here
1705 __ bind(unlock_done);
1706 if (ret_type != T_FLOAT && ret_type != T_DOUBLE && ret_type != T_VOID) {
1707 restore_native_result(masm, ret_type, stack_slots);
1708 }
1709
1710 __ bind(done);
1711
1712 }
1713
1714 {
1715 SkipIfEqual skip_if(masm, &DTraceMethodProbes, 0);
1716 // Tell dtrace about this method exit
1717 save_native_result(masm, ret_type, stack_slots);
1718 __ movoop(rax, JNIHandles::make_local(method()));
1719 __ call_VM_leaf(
1720 CAST_FROM_FN_PTR(address, SharedRuntime::dtrace_method_exit),
1721 thread, rax);
1722 restore_native_result(masm, ret_type, stack_slots);
1723 }
1724
1725 // We can finally stop using that last_Java_frame we setup ages ago
1726
1727 __ reset_last_Java_frame(thread, false, true);
1728
1729 // Unpack oop result
1730 if (ret_type == T_OBJECT || ret_type == T_ARRAY) {
1731 Label L;
1732 __ cmpl(rax, NULL_WORD);
1733 __ jcc(Assembler::equal, L);
1734 __ movl(rax, Address(rax, 0));
1735 __ bind(L);
1736 __ verify_oop(rax);
1737 }
1738
1739 // reset handle block
1740 __ movl(rcx, Address(thread, JavaThread::active_handles_offset()));
1741
1742 __ movl(Address(rcx, JNIHandleBlock::top_offset_in_bytes()), 0);
1743
1744 // Any exception pending?
1745 __ cmpl(Address(thread, in_bytes(Thread::pending_exception_offset())), NULL_WORD);
1746 __ jcc(Assembler::notEqual, exception_pending);
1747
1748
1749 // no exception, we're almost done
1750
1751 // check that only result value is on FPU stack
1752 __ verify_FPU(ret_type == T_FLOAT || ret_type == T_DOUBLE ? 1 : 0, "native_wrapper normal exit");
1753
1754 // Fixup floating pointer results so that result looks like a return from a compiled method
1755 if (ret_type == T_FLOAT) {
1756 if (UseSSE >= 1) {
1757 // Pop st0 and store as float and reload into xmm register
1758 __ fstp_s(Address(rbp, -4));
1759 __ movflt(xmm0, Address(rbp, -4));
1760 }
1761 } else if (ret_type == T_DOUBLE) {
1762 if (UseSSE >= 2) {
1763 // Pop st0 and store as double and reload into xmm register
1764 __ fstp_d(Address(rbp, -8));
1765 __ movdbl(xmm0, Address(rbp, -8));
1766 }
1767 }
1768
1769 // Return
1770
1771 __ leave();
1772 __ ret(0);
1773
1774 // Unexpected paths are out of line and go here
1775
1776 // Slow path locking & unlocking
1777 if (method->is_synchronized()) {
1778
1779 // BEGIN Slow path lock
1780
1781 __ bind(slow_path_lock);
1782
1783 // has last_Java_frame setup. No exceptions so do vanilla call not call_VM
1784 // args are (oop obj, BasicLock* lock, JavaThread* thread)
1785 __ pushl(thread);
1786 __ pushl(lock_reg);
1787 __ pushl(obj_reg);
1788 __ call(RuntimeAddress(CAST_FROM_FN_PTR(address, SharedRuntime::complete_monitor_locking_C)));
1789 __ addl(rsp, 3*wordSize);
1790
1791 #ifdef ASSERT
1792 { Label L;
1793 __ cmpl(Address(thread, in_bytes(Thread::pending_exception_offset())), (int)NULL_WORD);
1794 __ jcc(Assembler::equal, L);
1795 __ stop("no pending exception allowed on exit from monitorenter");
1796 __ bind(L);
1797 }
1798 #endif
1799 __ jmp(lock_done);
1800
1801 // END Slow path lock
1802
1803 // BEGIN Slow path unlock
1804 __ bind(slow_path_unlock);
1805
1806 // Slow path unlock
1807
1808 if (ret_type == T_FLOAT || ret_type == T_DOUBLE ) {
1809 save_native_result(masm, ret_type, stack_slots);
1810 }
1811 // Save pending exception around call to VM (which contains an EXCEPTION_MARK)
1812
1813 __ pushl(Address(thread, in_bytes(Thread::pending_exception_offset())));
1814 __ movl(Address(thread, in_bytes(Thread::pending_exception_offset())), NULL_WORD);
1815
1816
1817 // should be a peal
1818 // +wordSize because of the push above
1819 __ leal(rax, Address(rbp, lock_slot_rbp_offset));
1820 __ pushl(rax);
1821
1822 __ pushl(obj_reg);
1823 __ call(RuntimeAddress(CAST_FROM_FN_PTR(address, SharedRuntime::complete_monitor_unlocking_C)));
1824 __ addl(rsp, 2*wordSize);
1825 #ifdef ASSERT
1826 {
1827 Label L;
1828 __ cmpl(Address(thread, in_bytes(Thread::pending_exception_offset())), NULL_WORD);
1829 __ jcc(Assembler::equal, L);
1830 __ stop("no pending exception allowed on exit complete_monitor_unlocking_C");
1831 __ bind(L);
1832 }
1833 #endif /* ASSERT */
1834
1835 __ popl(Address(thread, in_bytes(Thread::pending_exception_offset())));
1836
1837 if (ret_type == T_FLOAT || ret_type == T_DOUBLE ) {
1838 restore_native_result(masm, ret_type, stack_slots);
1839 }
1840 __ jmp(unlock_done);
1841 // END Slow path unlock
1842
1843 }
1844
1845 // SLOW PATH Reguard the stack if needed
1846
1847 __ bind(reguard);
1848 save_native_result(masm, ret_type, stack_slots);
1849 {
1850 __ call(RuntimeAddress(CAST_FROM_FN_PTR(address, SharedRuntime::reguard_yellow_pages)));
1851 }
1852 restore_native_result(masm, ret_type, stack_slots);
1853 __ jmp(reguard_done);
1854
1855
1856 // BEGIN EXCEPTION PROCESSING
1857
1858 // Forward the exception
1859 __ bind(exception_pending);
1860
1861 // remove possible return value from FPU register stack
1862 __ empty_FPU_stack();
1863
1864 // pop our frame
1865 __ leave();
1866 // and forward the exception
1867 __ jump(RuntimeAddress(StubRoutines::forward_exception_entry()));
1868
1869 __ flush();
1870
1871 nmethod *nm = nmethod::new_native_nmethod(method,
1872 masm->code(),
1873 vep_offset,
1874 frame_complete,
1875 stack_slots / VMRegImpl::slots_per_word,
1876 (is_static ? in_ByteSize(klass_offset) : in_ByteSize(receiver_offset)),
1877 in_ByteSize(lock_slot_offset*VMRegImpl::stack_slot_size),
1878 oop_maps);
1879 return nm;
1880
1881 }
1882
1883 // this function returns the adjust size (in number of words) to a c2i adapter
1884 // activation for use during deoptimization
1885 int Deoptimization::last_frame_adjust(int callee_parameters, int callee_locals ) {
1886 return (callee_locals - callee_parameters) * Interpreter::stackElementWords();
1887 }
1888
1889
1890 uint SharedRuntime::out_preserve_stack_slots() {
1891 return 0;
1892 }
1893
1894
1895 //------------------------------generate_deopt_blob----------------------------
1896 void SharedRuntime::generate_deopt_blob() {
1897 // allocate space for the code
1898 ResourceMark rm;
1899 // setup code generation tools
1900 CodeBuffer buffer("deopt_blob", 1024, 1024);
1901 MacroAssembler* masm = new MacroAssembler(&buffer);
1902 int frame_size_in_words;
1903 OopMap* map = NULL;
1904 // Account for the extra args we place on the stack
1905 // by the time we call fetch_unroll_info
1906 const int additional_words = 2; // deopt kind, thread
1907
1908 OopMapSet *oop_maps = new OopMapSet();
1909
1910 // -------------
1911 // This code enters when returning to a de-optimized nmethod. A return
1912 // address has been pushed on the the stack, and return values are in
1913 // registers.
1914 // If we are doing a normal deopt then we were called from the patched
1915 // nmethod from the point we returned to the nmethod. So the return
1916 // address on the stack is wrong by NativeCall::instruction_size
1917 // We will adjust the value to it looks like we have the original return
1918 // address on the stack (like when we eagerly deoptimized).
1919 // In the case of an exception pending with deoptimized then we enter
1920 // with a return address on the stack that points after the call we patched
1921 // into the exception handler. We have the following register state:
1922 // rax,: exception
1923 // rbx,: exception handler
1924 // rdx: throwing pc
1925 // So in this case we simply jam rdx into the useless return address and
1926 // the stack looks just like we want.
1927 //
1928 // At this point we need to de-opt. We save the argument return
1929 // registers. We call the first C routine, fetch_unroll_info(). This
1930 // routine captures the return values and returns a structure which
1931 // describes the current frame size and the sizes of all replacement frames.
1932 // The current frame is compiled code and may contain many inlined
1933 // functions, each with their own JVM state. We pop the current frame, then
1934 // push all the new frames. Then we call the C routine unpack_frames() to
1935 // populate these frames. Finally unpack_frames() returns us the new target
1936 // address. Notice that callee-save registers are BLOWN here; they have
1937 // already been captured in the vframeArray at the time the return PC was
1938 // patched.
1939 address start = __ pc();
1940 Label cont;
1941
1942 // Prolog for non exception case!
1943
1944 // Save everything in sight.
1945
1946 map = RegisterSaver::save_live_registers(masm, additional_words, &frame_size_in_words);
1947 // Normal deoptimization
1948 __ pushl(Deoptimization::Unpack_deopt);
1949 __ jmp(cont);
1950
1951 int reexecute_offset = __ pc() - start;
1952
1953 // Reexecute case
1954 // return address is the pc describes what bci to do re-execute at
1955
1956 // No need to update map as each call to save_live_registers will produce identical oopmap
1957 (void) RegisterSaver::save_live_registers(masm, additional_words, &frame_size_in_words);
1958
1959 __ pushl(Deoptimization::Unpack_reexecute);
1960 __ jmp(cont);
1961
1962 int exception_offset = __ pc() - start;
1963
1964 // Prolog for exception case
1965
1966 // all registers are dead at this entry point, except for rax, and
1967 // rdx which contain the exception oop and exception pc
1968 // respectively. Set them in TLS and fall thru to the
1969 // unpack_with_exception_in_tls entry point.
1970
1971 __ get_thread(rdi);
1972 __ movl(Address(rdi, JavaThread::exception_pc_offset()), rdx);
1973 __ movl(Address(rdi, JavaThread::exception_oop_offset()), rax);
1974
1975 int exception_in_tls_offset = __ pc() - start;
1976
1977 // new implementation because exception oop is now passed in JavaThread
1978
1979 // Prolog for exception case
1980 // All registers must be preserved because they might be used by LinearScan
1981 // Exceptiop oop and throwing PC are passed in JavaThread
1982 // tos: stack at point of call to method that threw the exception (i.e. only
1983 // args are on the stack, no return address)
1984
1985 // make room on stack for the return address
1986 // It will be patched later with the throwing pc. The correct value is not
1987 // available now because loading it from memory would destroy registers.
1988 __ pushl(0);
1989
1990 // Save everything in sight.
1991
1992 // No need to update map as each call to save_live_registers will produce identical oopmap
1993 (void) RegisterSaver::save_live_registers(masm, additional_words, &frame_size_in_words);
1994
1995 // Now it is safe to overwrite any register
1996
1997 // store the correct deoptimization type
1998 __ pushl(Deoptimization::Unpack_exception);
1999
2000 // load throwing pc from JavaThread and patch it as the return address
2001 // of the current frame. Then clear the field in JavaThread
2002 __ get_thread(rdi);
2003 __ movl(rdx, Address(rdi, JavaThread::exception_pc_offset()));
2004 __ movl(Address(rbp, wordSize), rdx);
2005 __ movl(Address(rdi, JavaThread::exception_pc_offset()), NULL_WORD);
2006
2007 #ifdef ASSERT
2008 // verify that there is really an exception oop in JavaThread
2009 __ movl(rax, Address(rdi, JavaThread::exception_oop_offset()));
2010 __ verify_oop(rax);
2011
2012 // verify that there is no pending exception
2013 Label no_pending_exception;
2014 __ movl(rax, Address(rdi, Thread::pending_exception_offset()));
2015 __ testl(rax, rax);
2016 __ jcc(Assembler::zero, no_pending_exception);
2017 __ stop("must not have pending exception here");
2018 __ bind(no_pending_exception);
2019 #endif
2020
2021 __ bind(cont);
2022
2023 // Compiled code leaves the floating point stack dirty, empty it.
2024 __ empty_FPU_stack();
2025
2026
2027 // Call C code. Need thread and this frame, but NOT official VM entry
2028 // crud. We cannot block on this call, no GC can happen.
2029 __ get_thread(rcx);
2030 __ pushl(rcx);
2031 // fetch_unroll_info needs to call last_java_frame()
2032 __ set_last_Java_frame(rcx, noreg, noreg, NULL);
2033
2034 __ call(RuntimeAddress(CAST_FROM_FN_PTR(address, Deoptimization::fetch_unroll_info)));
2035
2036 // Need to have an oopmap that tells fetch_unroll_info where to
2037 // find any register it might need.
2038
2039 oop_maps->add_gc_map( __ pc()-start, map);
2040
2041 // Discard arg to fetch_unroll_info
2042 __ popl(rcx);
2043
2044 __ get_thread(rcx);
2045 __ reset_last_Java_frame(rcx, false, false);
2046
2047 // Load UnrollBlock into EDI
2048 __ movl(rdi, rax);
2049
2050 // Move the unpack kind to a safe place in the UnrollBlock because
2051 // we are very short of registers
2052
2053 Address unpack_kind(rdi, Deoptimization::UnrollBlock::unpack_kind_offset_in_bytes());
2054 // retrieve the deopt kind from where we left it.
2055 __ popl(rax);
2056 __ movl(unpack_kind, rax); // save the unpack_kind value
2057
2058 Label noException;
2059 __ cmpl(rax, Deoptimization::Unpack_exception); // Was exception pending?
2060 __ jcc(Assembler::notEqual, noException);
2061 __ movl(rax, Address(rcx, JavaThread::exception_oop_offset()));
2062 __ movl(rdx, Address(rcx, JavaThread::exception_pc_offset()));
2063 __ movl(Address(rcx, JavaThread::exception_oop_offset()), NULL_WORD);
2064 __ movl(Address(rcx, JavaThread::exception_pc_offset()), NULL_WORD);
2065
2066 __ verify_oop(rax);
2067
2068 // Overwrite the result registers with the exception results.
2069 __ movl(Address(rsp, RegisterSaver::raxOffset()*wordSize), rax);
2070 __ movl(Address(rsp, RegisterSaver::rdxOffset()*wordSize), rdx);
2071
2072 __ bind(noException);
2073
2074 // Stack is back to only having register save data on the stack.
2075 // Now restore the result registers. Everything else is either dead or captured
2076 // in the vframeArray.
2077
2078 RegisterSaver::restore_result_registers(masm);
2079
2080 // All of the register save area has been popped of the stack. Only the
2081 // return address remains.
2082
2083 // Pop all the frames we must move/replace.
2084 //
2085 // Frame picture (youngest to oldest)
2086 // 1: self-frame (no frame link)
2087 // 2: deopting frame (no frame link)
2088 // 3: caller of deopting frame (could be compiled/interpreted).
2089 //
2090 // Note: by leaving the return address of self-frame on the stack
2091 // and using the size of frame 2 to adjust the stack
2092 // when we are done the return to frame 3 will still be on the stack.
2093
2094 // Pop deoptimized frame
2095 __ addl(rsp,Address(rdi,Deoptimization::UnrollBlock::size_of_deoptimized_frame_offset_in_bytes()));
2096
2097 // sp should be pointing at the return address to the caller (3)
2098
2099 // Stack bang to make sure there's enough room for these interpreter frames.
2100 if (UseStackBanging) {
2101 __ movl(rbx, Address(rdi ,Deoptimization::UnrollBlock::total_frame_sizes_offset_in_bytes()));
2102 __ bang_stack_size(rbx, rcx);
2103 }
2104
2105 // Load array of frame pcs into ECX
2106 __ movl(rcx,Address(rdi,Deoptimization::UnrollBlock::frame_pcs_offset_in_bytes()));
2107
2108 __ popl(rsi); // trash the old pc
2109
2110 // Load array of frame sizes into ESI
2111 __ movl(rsi,Address(rdi,Deoptimization::UnrollBlock::frame_sizes_offset_in_bytes()));
2112
2113 Address counter(rdi, Deoptimization::UnrollBlock::counter_temp_offset_in_bytes());
2114
2115 __ movl(rbx, Address(rdi, Deoptimization::UnrollBlock::number_of_frames_offset_in_bytes()));
2116 __ movl(counter, rbx);
2117
2118 // Pick up the initial fp we should save
2119 __ movl(rbp, Address(rdi, Deoptimization::UnrollBlock::initial_fp_offset_in_bytes()));
2120
2121 // Now adjust the caller's stack to make up for the extra locals
2122 // but record the original sp so that we can save it in the skeletal interpreter
2123 // frame and the stack walking of interpreter_sender will get the unextended sp
2124 // value and not the "real" sp value.
2125
2126 Address sp_temp(rdi, Deoptimization::UnrollBlock::sender_sp_temp_offset_in_bytes());
2127 __ movl(sp_temp, rsp);
2128 __ subl(rsp, Address(rdi, Deoptimization::UnrollBlock::caller_adjustment_offset_in_bytes()));
2129
2130 // Push interpreter frames in a loop
2131 Label loop;
2132 __ bind(loop);
2133 __ movl(rbx, Address(rsi, 0)); // Load frame size
2134 #ifdef CC_INTERP
2135 __ subl(rbx, 4*wordSize); // we'll push pc and ebp by hand and
2136 #ifdef ASSERT
2137 __ pushl(0xDEADDEAD); // Make a recognizable pattern
2138 __ pushl(0xDEADDEAD);
2139 #else /* ASSERT */
2140 __ subl(rsp, 2*wordSize); // skip the "static long no_param"
2141 #endif /* ASSERT */
2142 #else /* CC_INTERP */
2143 __ subl(rbx, 2*wordSize); // we'll push pc and rbp, by hand
2144 #endif /* CC_INTERP */
2145 __ pushl(Address(rcx, 0)); // save return address
2146 __ enter(); // save old & set new rbp,
2147 __ subl(rsp, rbx); // Prolog!
2148 __ movl(rbx, sp_temp); // sender's sp
2149 #ifdef CC_INTERP
2150 __ movl(Address(rbp,
2151 -(sizeof(BytecodeInterpreter)) + in_bytes(byte_offset_of(BytecodeInterpreter, _sender_sp))),
2152 rbx); // Make it walkable
2153 #else /* CC_INTERP */
2154 // This value is corrected by layout_activation_impl
2155 __ movl(Address(rbp, frame::interpreter_frame_last_sp_offset * wordSize), NULL_WORD );
2156 __ movl(Address(rbp, frame::interpreter_frame_sender_sp_offset * wordSize), rbx); // Make it walkable
2157 #endif /* CC_INTERP */
2158 __ movl(sp_temp, rsp); // pass to next frame
2159 __ addl(rsi, 4); // Bump array pointer (sizes)
2160 __ addl(rcx, 4); // Bump array pointer (pcs)
2161 __ decrement(counter); // decrement counter
2162 __ jcc(Assembler::notZero, loop);
2163 __ pushl(Address(rcx, 0)); // save final return address
2164
2165 // Re-push self-frame
2166 __ enter(); // save old & set new rbp,
2167
2168 // Return address and rbp, are in place
2169 // We'll push additional args later. Just allocate a full sized
2170 // register save area
2171 __ subl(rsp, (frame_size_in_words-additional_words - 2) * wordSize);
2172
2173 // Restore frame locals after moving the frame
2174 __ movl(Address(rsp, RegisterSaver::raxOffset()*wordSize), rax);
2175 __ movl(Address(rsp, RegisterSaver::rdxOffset()*wordSize), rdx);
2176 __ fstp_d(Address(rsp, RegisterSaver::fpResultOffset()*wordSize)); // Pop float stack and store in local
2177 if( UseSSE>=2 ) __ movdbl(Address(rsp, RegisterSaver::xmm0Offset()*wordSize), xmm0);
2178 if( UseSSE==1 ) __ movflt(Address(rsp, RegisterSaver::xmm0Offset()*wordSize), xmm0);
2179
2180 // Set up the args to unpack_frame
2181
2182 __ pushl(unpack_kind); // get the unpack_kind value
2183 __ get_thread(rcx);
2184 __ pushl(rcx);
2185
2186 // set last_Java_sp, last_Java_fp
2187 __ set_last_Java_frame(rcx, noreg, rbp, NULL);
2188
2189 // Call C code. Need thread but NOT official VM entry
2190 // crud. We cannot block on this call, no GC can happen. Call should
2191 // restore return values to their stack-slots with the new SP.
2192 __ call(RuntimeAddress(CAST_FROM_FN_PTR(address, Deoptimization::unpack_frames)));
2193 // Set an oopmap for the call site
2194 oop_maps->add_gc_map( __ pc()-start, new OopMap( frame_size_in_words, 0 ));
2195
2196 // rax, contains the return result type
2197 __ pushl(rax);
2198
2199 __ get_thread(rcx);
2200 __ reset_last_Java_frame(rcx, false, false);
2201
2202 // Collect return values
2203 __ movl(rax,Address(rsp, (RegisterSaver::raxOffset() + additional_words + 1)*wordSize));
2204 __ movl(rdx,Address(rsp, (RegisterSaver::rdxOffset() + additional_words + 1)*wordSize));
2205
2206 // Clear floating point stack before returning to interpreter
2207 __ empty_FPU_stack();
2208
2209 // Check if we should push the float or double return value.
2210 Label results_done, yes_double_value;
2211 __ cmpl(Address(rsp, 0), T_DOUBLE);
2212 __ jcc (Assembler::zero, yes_double_value);
2213 __ cmpl(Address(rsp, 0), T_FLOAT);
2214 __ jcc (Assembler::notZero, results_done);
2215
2216 // return float value as expected by interpreter
2217 if( UseSSE>=1 ) __ movflt(xmm0, Address(rsp, (RegisterSaver::xmm0Offset() + additional_words + 1)*wordSize));
2218 else __ fld_d(Address(rsp, (RegisterSaver::fpResultOffset() + additional_words + 1)*wordSize));
2219 __ jmp(results_done);
2220
2221 // return double value as expected by interpreter
2222 __ bind(yes_double_value);
2223 if( UseSSE>=2 ) __ movdbl(xmm0, Address(rsp, (RegisterSaver::xmm0Offset() + additional_words + 1)*wordSize));
2224 else __ fld_d(Address(rsp, (RegisterSaver::fpResultOffset() + additional_words + 1)*wordSize));
2225
2226 __ bind(results_done);
2227
2228 // Pop self-frame.
2229 __ leave(); // Epilog!
2230
2231 // Jump to interpreter
2232 __ ret(0);
2233
2234 // -------------
2235 // make sure all code is generated
2236 masm->flush();
2237
2238 _deopt_blob = DeoptimizationBlob::create( &buffer, oop_maps, 0, exception_offset, reexecute_offset, frame_size_in_words);
2239 _deopt_blob->set_unpack_with_exception_in_tls_offset(exception_in_tls_offset);
2240 }
2241
2242
2243 #ifdef COMPILER2
2244 //------------------------------generate_uncommon_trap_blob--------------------
2245 void SharedRuntime::generate_uncommon_trap_blob() {
2246 // allocate space for the code
2247 ResourceMark rm;
2248 // setup code generation tools
2249 CodeBuffer buffer("uncommon_trap_blob", 512, 512);
2250 MacroAssembler* masm = new MacroAssembler(&buffer);
2251
2252 enum frame_layout {
2253 arg0_off, // thread sp + 0 // Arg location for
2254 arg1_off, // unloaded_class_index sp + 1 // calling C
2255 // The frame sender code expects that rbp will be in the "natural" place and
2256 // will override any oopMap setting for it. We must therefore force the layout
2257 // so that it agrees with the frame sender code.
2258 rbp_off, // callee saved register sp + 2
2259 return_off, // slot for return address sp + 3
2260 framesize
2261 };
2262
2263 address start = __ pc();
2264 // Push self-frame.
2265 __ subl(rsp, return_off*wordSize); // Epilog!
2266
2267 // rbp, is an implicitly saved callee saved register (i.e. the calling
2268 // convention will save restore it in prolog/epilog) Other than that
2269 // there are no callee save registers no that adapter frames are gone.
2270 __ movl(Address(rsp, rbp_off*wordSize),rbp);
2271
2272 // Clear the floating point exception stack
2273 __ empty_FPU_stack();
2274
2275 // set last_Java_sp
2276 __ get_thread(rdx);
2277 __ set_last_Java_frame(rdx, noreg, noreg, NULL);
2278
2279 // Call C code. Need thread but NOT official VM entry
2280 // crud. We cannot block on this call, no GC can happen. Call should
2281 // capture callee-saved registers as well as return values.
2282 __ movl(Address(rsp, arg0_off*wordSize),rdx);
2283 // argument already in ECX
2284 __ movl(Address(rsp, arg1_off*wordSize),rcx);
2285 __ call(RuntimeAddress(CAST_FROM_FN_PTR(address, Deoptimization::uncommon_trap)));
2286
2287 // Set an oopmap for the call site
2288 OopMapSet *oop_maps = new OopMapSet();
2289 OopMap* map = new OopMap( framesize, 0 );
2290 // No oopMap for rbp, it is known implicitly
2291
2292 oop_maps->add_gc_map( __ pc()-start, map);
2293
2294 __ get_thread(rcx);
2295
2296 __ reset_last_Java_frame(rcx, false, false);
2297
2298 // Load UnrollBlock into EDI
2299 __ movl(rdi, rax);
2300
2301 // Pop all the frames we must move/replace.
2302 //
2303 // Frame picture (youngest to oldest)
2304 // 1: self-frame (no frame link)
2305 // 2: deopting frame (no frame link)
2306 // 3: caller of deopting frame (could be compiled/interpreted).
2307
2308 // Pop self-frame. We have no frame, and must rely only on EAX and ESP.
2309 __ addl(rsp,(framesize-1)*wordSize); // Epilog!
2310
2311 // Pop deoptimized frame
2312 __ addl(rsp,Address(rdi,Deoptimization::UnrollBlock::size_of_deoptimized_frame_offset_in_bytes()));
2313
2314 // sp should be pointing at the return address to the caller (3)
2315
2316 // Stack bang to make sure there's enough room for these interpreter frames.
2317 if (UseStackBanging) {
2318 __ movl(rbx, Address(rdi ,Deoptimization::UnrollBlock::total_frame_sizes_offset_in_bytes()));
2319 __ bang_stack_size(rbx, rcx);
2320 }
2321
2322
2323 // Load array of frame pcs into ECX
2324 __ movl(rcx,Address(rdi,Deoptimization::UnrollBlock::frame_pcs_offset_in_bytes()));
2325
2326 __ popl(rsi); // trash the pc
2327
2328 // Load array of frame sizes into ESI
2329 __ movl(rsi,Address(rdi,Deoptimization::UnrollBlock::frame_sizes_offset_in_bytes()));
2330
2331 Address counter(rdi, Deoptimization::UnrollBlock::counter_temp_offset_in_bytes());
2332
2333 __ movl(rbx, Address(rdi, Deoptimization::UnrollBlock::number_of_frames_offset_in_bytes()));
2334 __ movl(counter, rbx);
2335
2336 // Pick up the initial fp we should save
2337 __ movl(rbp, Address(rdi, Deoptimization::UnrollBlock::initial_fp_offset_in_bytes()));
2338
2339 // Now adjust the caller's stack to make up for the extra locals
2340 // but record the original sp so that we can save it in the skeletal interpreter
2341 // frame and the stack walking of interpreter_sender will get the unextended sp
2342 // value and not the "real" sp value.
2343
2344 Address sp_temp(rdi, Deoptimization::UnrollBlock::sender_sp_temp_offset_in_bytes());
2345 __ movl(sp_temp, rsp);
2346 __ subl(rsp, Address(rdi, Deoptimization::UnrollBlock::caller_adjustment_offset_in_bytes()));
2347
2348 // Push interpreter frames in a loop
2349 Label loop;
2350 __ bind(loop);
2351 __ movl(rbx, Address(rsi, 0)); // Load frame size
2352 #ifdef CC_INTERP
2353 __ subl(rbx, 4*wordSize); // we'll push pc and ebp by hand and
2354 #ifdef ASSERT
2355 __ pushl(0xDEADDEAD); // Make a recognizable pattern
2356 __ pushl(0xDEADDEAD); // (parm to RecursiveInterpreter...)
2357 #else /* ASSERT */
2358 __ subl(rsp, 2*wordSize); // skip the "static long no_param"
2359 #endif /* ASSERT */
2360 #else /* CC_INTERP */
2361 __ subl(rbx, 2*wordSize); // we'll push pc and rbp, by hand
2362 #endif /* CC_INTERP */
2363 __ pushl(Address(rcx, 0)); // save return address
2364 __ enter(); // save old & set new rbp,
2365 __ subl(rsp, rbx); // Prolog!
2366 __ movl(rbx, sp_temp); // sender's sp
2367 #ifdef CC_INTERP
2368 __ movl(Address(rbp,
2369 -(sizeof(BytecodeInterpreter)) + in_bytes(byte_offset_of(BytecodeInterpreter, _sender_sp))),
2370 rbx); // Make it walkable
2371 #else /* CC_INTERP */
2372 // This value is corrected by layout_activation_impl
2373 __ movl(Address(rbp, frame::interpreter_frame_last_sp_offset * wordSize), NULL_WORD );
2374 __ movl(Address(rbp, frame::interpreter_frame_sender_sp_offset * wordSize), rbx); // Make it walkable
2375 #endif /* CC_INTERP */
2376 __ movl(sp_temp, rsp); // pass to next frame
2377 __ addl(rsi, 4); // Bump array pointer (sizes)
2378 __ addl(rcx, 4); // Bump array pointer (pcs)
2379 __ decrement(counter); // decrement counter
2380 __ jcc(Assembler::notZero, loop);
2381 __ pushl(Address(rcx, 0)); // save final return address
2382
2383 // Re-push self-frame
2384 __ enter(); // save old & set new rbp,
2385 __ subl(rsp, (framesize-2) * wordSize); // Prolog!
2386
2387
2388 // set last_Java_sp, last_Java_fp
2389 __ get_thread(rdi);
2390 __ set_last_Java_frame(rdi, noreg, rbp, NULL);
2391
2392 // Call C code. Need thread but NOT official VM entry
2393 // crud. We cannot block on this call, no GC can happen. Call should
2394 // restore return values to their stack-slots with the new SP.
2395 __ movl(Address(rsp,arg0_off*wordSize),rdi);
2396 __ movl(Address(rsp,arg1_off*wordSize), Deoptimization::Unpack_uncommon_trap);
2397 __ call(RuntimeAddress(CAST_FROM_FN_PTR(address, Deoptimization::unpack_frames)));
2398 // Set an oopmap for the call site
2399 oop_maps->add_gc_map( __ pc()-start, new OopMap( framesize, 0 ) );
2400
2401 __ get_thread(rdi);
2402 __ reset_last_Java_frame(rdi, true, false);
2403
2404 // Pop self-frame.
2405 __ leave(); // Epilog!
2406
2407 // Jump to interpreter
2408 __ ret(0);
2409
2410 // -------------
2411 // make sure all code is generated
2412 masm->flush();
2413
2414 _uncommon_trap_blob = UncommonTrapBlob::create(&buffer, oop_maps, framesize);
2415 }
2416 #endif // COMPILER2
2417
2418 //------------------------------generate_handler_blob------
2419 //
2420 // Generate a special Compile2Runtime blob that saves all registers,
2421 // setup oopmap, and calls safepoint code to stop the compiled code for
2422 // a safepoint.
2423 //
2424 static SafepointBlob* generate_handler_blob(address call_ptr, bool cause_return) {
2425
2426 // Account for thread arg in our frame
2427 const int additional_words = 1;
2428 int frame_size_in_words;
2429
2430 assert (StubRoutines::forward_exception_entry() != NULL, "must be generated before");
2431
2432 ResourceMark rm;
2433 OopMapSet *oop_maps = new OopMapSet();
2434 OopMap* map;
2435
2436 // allocate space for the code
2437 // setup code generation tools
2438 CodeBuffer buffer("handler_blob", 1024, 512);
2439 MacroAssembler* masm = new MacroAssembler(&buffer);
2440
2441 const Register java_thread = rdi; // callee-saved for VC++
2442 address start = __ pc();
2443 address call_pc = NULL;
2444
2445 // If cause_return is true we are at a poll_return and there is
2446 // the return address on the stack to the caller on the nmethod
2447 // that is safepoint. We can leave this return on the stack and
2448 // effectively complete the return and safepoint in the caller.
2449 // Otherwise we push space for a return address that the safepoint
2450 // handler will install later to make the stack walking sensible.
2451 if( !cause_return )
2452 __ pushl(rbx); // Make room for return address (or push it again)
2453
2454 map = RegisterSaver::save_live_registers(masm, additional_words, &frame_size_in_words, false);
2455
2456 // The following is basically a call_VM. However, we need the precise
2457 // address of the call in order to generate an oopmap. Hence, we do all the
2458 // work ourselves.
2459
2460 // Push thread argument and setup last_Java_sp
2461 __ get_thread(java_thread);
2462 __ pushl(java_thread);
2463 __ set_last_Java_frame(java_thread, noreg, noreg, NULL);
2464
2465 // if this was not a poll_return then we need to correct the return address now.
2466 if( !cause_return ) {
2467 __ movl(rax, Address(java_thread, JavaThread::saved_exception_pc_offset()));
2468 __ movl(Address(rbp, wordSize), rax);
2469 }
2470
2471 // do the call
2472 __ call(RuntimeAddress(call_ptr));
2473
2474 // Set an oopmap for the call site. This oopmap will map all
2475 // oop-registers and debug-info registers as callee-saved. This
2476 // will allow deoptimization at this safepoint to find all possible
2477 // debug-info recordings, as well as let GC find all oops.
2478
2479 oop_maps->add_gc_map( __ pc() - start, map);
2480
2481 // Discard arg
2482 __ popl(rcx);
2483
2484 Label noException;
2485
2486 // Clear last_Java_sp again
2487 __ get_thread(java_thread);
2488 __ reset_last_Java_frame(java_thread, false, false);
2489
2490 __ cmpl(Address(java_thread, Thread::pending_exception_offset()), NULL_WORD);
2491 __ jcc(Assembler::equal, noException);
2492
2493 // Exception pending
2494
2495 RegisterSaver::restore_live_registers(masm);
2496
2497 __ jump(RuntimeAddress(StubRoutines::forward_exception_entry()));
2498
2499 __ bind(noException);
2500
2501 // Normal exit, register restoring and exit
2502 RegisterSaver::restore_live_registers(masm);
2503
2504 __ ret(0);
2505
2506 // make sure all code is generated
2507 masm->flush();
2508
2509 // Fill-out other meta info
2510 return SafepointBlob::create(&buffer, oop_maps, frame_size_in_words);
2511 }
2512
2513 //
2514 // generate_resolve_blob - call resolution (static/virtual/opt-virtual/ic-miss
2515 //
2516 // Generate a stub that calls into vm to find out the proper destination
2517 // of a java call. All the argument registers are live at this point
2518 // but since this is generic code we don't know what they are and the caller
2519 // must do any gc of the args.
2520 //
2521 static RuntimeStub* generate_resolve_blob(address destination, const char* name) {
2522 assert (StubRoutines::forward_exception_entry() != NULL, "must be generated before");
2523
2524 // allocate space for the code
2525 ResourceMark rm;
2526
2527 CodeBuffer buffer(name, 1000, 512);
2528 MacroAssembler* masm = new MacroAssembler(&buffer);
2529
2530 int frame_size_words;
2531 enum frame_layout {
2532 thread_off,
2533 extra_words };
2534
2535 OopMapSet *oop_maps = new OopMapSet();
2536 OopMap* map = NULL;
2537
2538 int start = __ offset();
2539
2540 map = RegisterSaver::save_live_registers(masm, extra_words, &frame_size_words);
2541
2542 int frame_complete = __ offset();
2543
2544 const Register thread = rdi;
2545 __ get_thread(rdi);
2546
2547 __ pushl(thread);
2548 __ set_last_Java_frame(thread, noreg, rbp, NULL);
2549
2550 __ call(RuntimeAddress(destination));
2551
2552
2553 // Set an oopmap for the call site.
2554 // We need this not only for callee-saved registers, but also for volatile
2555 // registers that the compiler might be keeping live across a safepoint.
2556
2557 oop_maps->add_gc_map( __ offset() - start, map);
2558
2559 // rax, contains the address we are going to jump to assuming no exception got installed
2560
2561 __ addl(rsp, wordSize);
2562
2563 // clear last_Java_sp
2564 __ reset_last_Java_frame(thread, true, false);
2565 // check for pending exceptions
2566 Label pending;
2567 __ cmpl(Address(thread, Thread::pending_exception_offset()), NULL_WORD);
2568 __ jcc(Assembler::notEqual, pending);
2569
2570 // get the returned methodOop
2571 __ movl(rbx, Address(thread, JavaThread::vm_result_offset()));
2572 __ movl(Address(rsp, RegisterSaver::rbx_offset() * wordSize), rbx);
2573
2574 __ movl(Address(rsp, RegisterSaver::rax_offset() * wordSize), rax);
2575
2576 RegisterSaver::restore_live_registers(masm);
2577
2578 // We are back the the original state on entry and ready to go.
2579
2580 __ jmp(rax);
2581
2582 // Pending exception after the safepoint
2583
2584 __ bind(pending);
2585
2586 RegisterSaver::restore_live_registers(masm);
2587
2588 // exception pending => remove activation and forward to exception handler
2589
2590 __ get_thread(thread);
2591 __ movl(Address(thread, JavaThread::vm_result_offset()), NULL_WORD);
2592 __ movl(rax, Address(thread, Thread::pending_exception_offset()));
2593 __ jump(RuntimeAddress(StubRoutines::forward_exception_entry()));
2594
2595 // -------------
2596 // make sure all code is generated
2597 masm->flush();
2598
2599 // return the blob
2600 // frame_size_words or bytes??
2601 return RuntimeStub::new_runtime_stub(name, &buffer, frame_complete, frame_size_words, oop_maps, true);
2602 }
2603
2604 void SharedRuntime::generate_stubs() {
2605
2606 _wrong_method_blob = generate_resolve_blob(CAST_FROM_FN_PTR(address, SharedRuntime::handle_wrong_method),
2607 "wrong_method_stub");
2608
2609 _ic_miss_blob = generate_resolve_blob(CAST_FROM_FN_PTR(address, SharedRuntime::handle_wrong_method_ic_miss),
2610 "ic_miss_stub");
2611
2612 _resolve_opt_virtual_call_blob = generate_resolve_blob(CAST_FROM_FN_PTR(address, SharedRuntime::resolve_opt_virtual_call_C),
2613 "resolve_opt_virtual_call");
2614
2615 _resolve_virtual_call_blob = generate_resolve_blob(CAST_FROM_FN_PTR(address, SharedRuntime::resolve_virtual_call_C),
2616 "resolve_virtual_call");
2617
2618 _resolve_static_call_blob = generate_resolve_blob(CAST_FROM_FN_PTR(address, SharedRuntime::resolve_static_call_C),
2619 "resolve_static_call");
2620
2621 _polling_page_safepoint_handler_blob =
2622 generate_handler_blob(CAST_FROM_FN_PTR(address,
2623 SafepointSynchronize::handle_polling_page_exception), false);
2624
2625 _polling_page_return_handler_blob =
2626 generate_handler_blob(CAST_FROM_FN_PTR(address,
2627 SafepointSynchronize::handle_polling_page_exception), true);
2628
2629 generate_deopt_blob();
2630 #ifdef COMPILER2
2631 generate_uncommon_trap_blob();
2632 #endif // COMPILER2
2633 }