comparison src/cpu/x86/vm/graalRuntime_x86.cpp @ 7125:1baf7f1e3f23

decoupled C++ Graal runtime from C1
author Doug Simon <doug.simon@oracle.com>
date Mon, 03 Dec 2012 15:32:17 +0100
parents
children 6c46172c04bf fcae6d960acd
comparison
equal deleted inserted replaced
7124:ab65fa23f8e9 7125:1baf7f1e3f23
1 /*
2 * Copyright (c) 1999, 2012, Oracle and/or its affiliates. All rights reserved.
3 * DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER.
4 *
5 * This code is free software; you can redistribute it and/or modify it
6 * under the terms of the GNU General Public License version 2 only, as
7 * published by the Free Software Foundation.
8 *
9 * This code is distributed in the hope that it will be useful, but WITHOUT
10 * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
11 * FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License
12 * version 2 for more details (a copy is included in the LICENSE file that
13 * accompanied this code).
14 *
15 * You should have received a copy of the GNU General Public License version
16 * 2 along with this work; if not, write to the Free Software Foundation,
17 * Inc., 51 Franklin St, Fifth Floor, Boston, MA 02110-1301 USA.
18 *
19 * Please contact Oracle, 500 Oracle Parkway, Redwood Shores, CA 94065 USA
20 * or visit www.oracle.com if you need additional information or have any
21 * questions.
22 *
23 */
24
25 #include "precompiled.hpp"
26 #include "asm/assembler.hpp"
27 #include "graal/graalRuntime.hpp"
28 #include "interpreter/interpreter.hpp"
29 #include "nativeInst_x86.hpp"
30 #include "oops/compiledICHolder.hpp"
31 #include "oops/oop.inline.hpp"
32 #include "prims/jvmtiExport.hpp"
33 #include "register_x86.hpp"
34 #include "runtime/sharedRuntime.hpp"
35 #include "runtime/signature.hpp"
36 #include "runtime/vframeArray.hpp"
37 #include "vmreg_x86.inline.hpp"
38
39 static void restore_live_registers(GraalStubAssembler* sasm, bool restore_fpu_registers = true);
40
41 // Implementation of GraalStubAssembler
42
43 int GraalStubAssembler::call_RT(Register oop_result1, Register metadata_result, address entry, int args_size) {
44 // setup registers
45 const Register thread = NOT_LP64(rdi) LP64_ONLY(r15_thread); // is callee-saved register (Visual C++ calling conventions)
46 assert(!(oop_result1->is_valid() || metadata_result->is_valid()) || oop_result1 != metadata_result, "registers must be different");
47 assert(oop_result1 != thread && metadata_result != thread, "registers must be different");
48 assert(args_size >= 0, "illegal args_size");
49 bool align_stack = false;
50 #ifdef _LP64
51 // At a method handle call, the stack may not be properly aligned
52 // when returning with an exception.
53 align_stack = (stub_id() == false /*GraalRuntime::handle_exception_from_callee_id*/);
54 #endif
55
56 #ifdef _LP64
57 mov(c_rarg0, thread);
58 set_num_rt_args(0); // Nothing on stack
59 #else
60 set_num_rt_args(1 + args_size);
61
62 // push java thread (becomes first argument of C function)
63 get_thread(thread);
64 push(thread);
65 #endif // _LP64
66
67 int call_offset;
68 if (!align_stack) {
69 set_last_Java_frame(thread, noreg, rbp, NULL);
70 } else {
71 address the_pc = pc();
72 call_offset = offset();
73 set_last_Java_frame(thread, noreg, rbp, the_pc);
74 andptr(rsp, -(StackAlignmentInBytes)); // Align stack
75 }
76
77 // do the call
78 call(RuntimeAddress(entry));
79 if (!align_stack) {
80 call_offset = offset();
81 }
82 // verify callee-saved register
83 #ifdef ASSERT
84 guarantee(thread != rax, "change this code");
85 push(rax);
86 { Label L;
87 get_thread(rax);
88 cmpptr(thread, rax);
89 jcc(Assembler::equal, L);
90 int3();
91 stop("GraalStubAssembler::call_RT: rdi not callee saved?");
92 bind(L);
93 }
94 pop(rax);
95 #endif
96 reset_last_Java_frame(thread, true, align_stack);
97
98 // discard thread and arguments
99 NOT_LP64(addptr(rsp, num_rt_args()*BytesPerWord));
100
101 // check for pending exceptions
102 { Label L;
103 cmpptr(Address(thread, Thread::pending_exception_offset()), (int32_t)NULL_WORD);
104 jcc(Assembler::equal, L);
105 // exception pending => remove activation and forward to exception handler
106 movptr(rax, Address(thread, Thread::pending_exception_offset()));
107 // make sure that the vm_results are cleared
108 if (oop_result1->is_valid()) {
109 movptr(Address(thread, JavaThread::vm_result_offset()), NULL_WORD);
110 }
111 if (metadata_result->is_valid()) {
112 movptr(Address(thread, JavaThread::vm_result_2_offset()), NULL_WORD);
113 }
114 #ifdef GRAAL
115 // (thomaswue) Deoptimize in case of an exception.
116 restore_live_registers(this, false);
117 movptr(Address(thread, Thread::pending_exception_offset()), NULL_WORD);
118 leave();
119 movl(rscratch1, Deoptimization::make_trap_request(Deoptimization::Reason_constraint, Deoptimization::Action_reinterpret));
120 jump(RuntimeAddress(SharedRuntime::deopt_blob()->uncommon_trap()));
121 #else
122 if (frame_size() == no_frame_size) {
123 leave();
124 jump(RuntimeAddress(StubRoutines::forward_exception_entry()));
125 } else if (_stub_id == GraalRuntime::forward_exception_id) {
126 should_not_reach_here();
127 } else {
128 jump(RuntimeAddress(GraalRuntime::entry_for(GraalRuntime::forward_exception_id)));
129 }
130 #endif
131 bind(L);
132 }
133 // get oop results if there are any and reset the values in the thread
134 if (oop_result1->is_valid()) {
135 get_vm_result(oop_result1, thread);
136 }
137 if (metadata_result->is_valid()) {
138 get_vm_result_2(metadata_result, thread);
139 }
140 return call_offset;
141 }
142
143
144 int GraalStubAssembler::call_RT(Register oop_result1, Register metadata_result, address entry, Register arg1) {
145 #ifdef _LP64
146 mov(c_rarg1, arg1);
147 #else
148 push(arg1);
149 #endif // _LP64
150 return call_RT(oop_result1, metadata_result, entry, 1);
151 }
152
153
154 int GraalStubAssembler::call_RT(Register oop_result1, Register metadata_result, address entry, Register arg1, Register arg2) {
155 #ifdef _LP64
156 if (c_rarg1 == arg2) {
157 if (c_rarg2 == arg1) {
158 xchgq(arg1, arg2);
159 } else {
160 mov(c_rarg2, arg2);
161 mov(c_rarg1, arg1);
162 }
163 } else {
164 mov(c_rarg1, arg1);
165 mov(c_rarg2, arg2);
166 }
167 #else
168 push(arg2);
169 push(arg1);
170 #endif // _LP64
171 return call_RT(oop_result1, metadata_result, entry, 2);
172 }
173
174
175 int GraalStubAssembler::call_RT(Register oop_result1, Register metadata_result, address entry, Register arg1, Register arg2, Register arg3) {
176 #ifdef _LP64
177 // if there is any conflict use the stack
178 if (arg1 == c_rarg2 || arg1 == c_rarg3 ||
179 arg2 == c_rarg1 || arg1 == c_rarg3 ||
180 arg3 == c_rarg1 || arg1 == c_rarg2) {
181 push(arg3);
182 push(arg2);
183 push(arg1);
184 pop(c_rarg1);
185 pop(c_rarg2);
186 pop(c_rarg3);
187 } else {
188 mov(c_rarg1, arg1);
189 mov(c_rarg2, arg2);
190 mov(c_rarg3, arg3);
191 }
192 #else
193 push(arg3);
194 push(arg2);
195 push(arg1);
196 #endif // _LP64
197 return call_RT(oop_result1, metadata_result, entry, 3);
198 }
199
200 // Implementation of GraalStubFrame
201
202 class GraalStubFrame: public StackObj {
203 private:
204 GraalStubAssembler* _sasm;
205
206 public:
207 GraalStubFrame(GraalStubAssembler* sasm, const char* name, bool must_gc_arguments);
208 ~GraalStubFrame();
209 };
210
211
212 #define __ _sasm->
213
214 GraalStubFrame::GraalStubFrame(GraalStubAssembler* sasm, const char* name, bool must_gc_arguments) {
215 _sasm = sasm;
216 __ set_info(name, must_gc_arguments);
217 __ enter();
218 }
219
220 GraalStubFrame::~GraalStubFrame() {
221 __ leave();
222 __ ret(0);
223 }
224
225 #undef __
226
227
228 // Implementation of GraalRuntime
229
230 const int float_regs_as_doubles_size_in_slots = FloatRegisterImpl::number_of_registers * 2;
231 const int xmm_regs_as_doubles_size_in_slots = XMMRegisterImpl::number_of_registers * 2;
232
233 // Stack layout for saving/restoring all the registers needed during a runtime
234 // call (this includes deoptimization)
235 // Note: note that users of this frame may well have arguments to some runtime
236 // while these values are on the stack. These positions neglect those arguments
237 // but the code in save_live_registers will take the argument count into
238 // account.
239 //
240 #ifdef _LP64
241 #define SLOT2(x) x,
242 #define SLOT_PER_WORD 2
243 #else
244 #define SLOT2(x)
245 #define SLOT_PER_WORD 1
246 #endif // _LP64
247
248 enum reg_save_layout {
249 // 64bit needs to keep stack 16 byte aligned. So we add some alignment dummies to make that
250 // happen and will assert if the stack size we create is misaligned
251 #ifdef _LP64
252 align_dummy_0, align_dummy_1,
253 #endif // _LP64
254 #ifdef _WIN64
255 // Windows always allocates space for it's argument registers (see
256 // frame::arg_reg_save_area_bytes).
257 arg_reg_save_1, arg_reg_save_1H, // 0, 4
258 arg_reg_save_2, arg_reg_save_2H, // 8, 12
259 arg_reg_save_3, arg_reg_save_3H, // 16, 20
260 arg_reg_save_4, arg_reg_save_4H, // 24, 28
261 #endif // _WIN64
262 xmm_regs_as_doubles_off, // 32
263 float_regs_as_doubles_off = xmm_regs_as_doubles_off + xmm_regs_as_doubles_size_in_slots, // 160
264 fpu_state_off = float_regs_as_doubles_off + float_regs_as_doubles_size_in_slots, // 224
265 // fpu_state_end_off is exclusive
266 fpu_state_end_off = fpu_state_off + (FPUStateSizeInWords / SLOT_PER_WORD), // 352
267 marker = fpu_state_end_off, SLOT2(markerH) // 352, 356
268 extra_space_offset, // 360
269 #ifdef _LP64
270 r15_off = extra_space_offset, r15H_off, // 360, 364
271 r14_off, r14H_off, // 368, 372
272 r13_off, r13H_off, // 376, 380
273 r12_off, r12H_off, // 384, 388
274 r11_off, r11H_off, // 392, 396
275 r10_off, r10H_off, // 400, 404
276 r9_off, r9H_off, // 408, 412
277 r8_off, r8H_off, // 416, 420
278 rdi_off, rdiH_off, // 424, 428
279 #else
280 rdi_off = extra_space_offset,
281 #endif // _LP64
282 rsi_off, SLOT2(rsiH_off) // 432, 436
283 rbp_off, SLOT2(rbpH_off) // 440, 444
284 rsp_off, SLOT2(rspH_off) // 448, 452
285 rbx_off, SLOT2(rbxH_off) // 456, 460
286 rdx_off, SLOT2(rdxH_off) // 464, 468
287 rcx_off, SLOT2(rcxH_off) // 472, 476
288 rax_off, SLOT2(raxH_off) // 480, 484
289 saved_rbp_off, SLOT2(saved_rbpH_off) // 488, 492
290 return_off, SLOT2(returnH_off) // 496, 500
291 reg_save_frame_size // As noted: neglects any parameters to runtime // 504
292 };
293
294 // Save registers which might be killed by calls into the runtime.
295 // Tries to smart about FP registers. In particular we separate
296 // saving and describing the FPU registers for deoptimization since we
297 // have to save the FPU registers twice if we describe them and on P4
298 // saving FPU registers which don't contain anything appears
299 // expensive. The deopt blob is the only thing which needs to
300 // describe FPU registers. In all other cases it should be sufficient
301 // to simply save their current value.
302
303 static OopMap* generate_oop_map(GraalStubAssembler* sasm, int num_rt_args,
304 bool save_fpu_registers = true) {
305
306 // In 64bit all the args are in regs so there are no additional stack slots
307 LP64_ONLY(num_rt_args = 0);
308 LP64_ONLY(assert((reg_save_frame_size * VMRegImpl::stack_slot_size) % 16 == 0, "must be 16 byte aligned");)
309 int frame_size_in_slots = reg_save_frame_size + num_rt_args; // args + thread
310 sasm->set_frame_size(frame_size_in_slots / VMRegImpl::slots_per_word );
311
312 // record saved value locations in an OopMap
313 // locations are offsets from sp after runtime call; num_rt_args is number of arguments in call, including thread
314 OopMap* map = new OopMap(frame_size_in_slots, 0);
315 map->set_callee_saved(VMRegImpl::stack2reg(rax_off + num_rt_args), rax->as_VMReg());
316 map->set_callee_saved(VMRegImpl::stack2reg(rcx_off + num_rt_args), rcx->as_VMReg());
317 map->set_callee_saved(VMRegImpl::stack2reg(rdx_off + num_rt_args), rdx->as_VMReg());
318 map->set_callee_saved(VMRegImpl::stack2reg(rbx_off + num_rt_args), rbx->as_VMReg());
319 map->set_callee_saved(VMRegImpl::stack2reg(rsi_off + num_rt_args), rsi->as_VMReg());
320 map->set_callee_saved(VMRegImpl::stack2reg(rdi_off + num_rt_args), rdi->as_VMReg());
321 #ifdef _LP64
322 map->set_callee_saved(VMRegImpl::stack2reg(r8_off + num_rt_args), r8->as_VMReg());
323 map->set_callee_saved(VMRegImpl::stack2reg(r9_off + num_rt_args), r9->as_VMReg());
324 map->set_callee_saved(VMRegImpl::stack2reg(r10_off + num_rt_args), r10->as_VMReg());
325 map->set_callee_saved(VMRegImpl::stack2reg(r11_off + num_rt_args), r11->as_VMReg());
326 map->set_callee_saved(VMRegImpl::stack2reg(r12_off + num_rt_args), r12->as_VMReg());
327 map->set_callee_saved(VMRegImpl::stack2reg(r13_off + num_rt_args), r13->as_VMReg());
328 map->set_callee_saved(VMRegImpl::stack2reg(r14_off + num_rt_args), r14->as_VMReg());
329 map->set_callee_saved(VMRegImpl::stack2reg(r15_off + num_rt_args), r15->as_VMReg());
330
331 // This is stupid but needed.
332 map->set_callee_saved(VMRegImpl::stack2reg(raxH_off + num_rt_args), rax->as_VMReg()->next());
333 map->set_callee_saved(VMRegImpl::stack2reg(rcxH_off + num_rt_args), rcx->as_VMReg()->next());
334 map->set_callee_saved(VMRegImpl::stack2reg(rdxH_off + num_rt_args), rdx->as_VMReg()->next());
335 map->set_callee_saved(VMRegImpl::stack2reg(rbxH_off + num_rt_args), rbx->as_VMReg()->next());
336 map->set_callee_saved(VMRegImpl::stack2reg(rsiH_off + num_rt_args), rsi->as_VMReg()->next());
337 map->set_callee_saved(VMRegImpl::stack2reg(rdiH_off + num_rt_args), rdi->as_VMReg()->next());
338
339 map->set_callee_saved(VMRegImpl::stack2reg(r8H_off + num_rt_args), r8->as_VMReg()->next());
340 map->set_callee_saved(VMRegImpl::stack2reg(r9H_off + num_rt_args), r9->as_VMReg()->next());
341 map->set_callee_saved(VMRegImpl::stack2reg(r10H_off + num_rt_args), r10->as_VMReg()->next());
342 map->set_callee_saved(VMRegImpl::stack2reg(r11H_off + num_rt_args), r11->as_VMReg()->next());
343 map->set_callee_saved(VMRegImpl::stack2reg(r12H_off + num_rt_args), r12->as_VMReg()->next());
344 map->set_callee_saved(VMRegImpl::stack2reg(r13H_off + num_rt_args), r13->as_VMReg()->next());
345 map->set_callee_saved(VMRegImpl::stack2reg(r14H_off + num_rt_args), r14->as_VMReg()->next());
346 map->set_callee_saved(VMRegImpl::stack2reg(r15H_off + num_rt_args), r15->as_VMReg()->next());
347 #endif // _LP64
348
349 if (save_fpu_registers) {
350 if (UseSSE < 2) {
351 int fpu_off = float_regs_as_doubles_off;
352 for (int n = 0; n < FloatRegisterImpl::number_of_registers; n++) {
353 VMReg fpu_name_0 = as_FloatRegister(n)->as_VMReg();
354 map->set_callee_saved(VMRegImpl::stack2reg(fpu_off + num_rt_args), fpu_name_0);
355 // %%% This is really a waste but we'll keep things as they were for now
356 if (true) {
357 map->set_callee_saved(VMRegImpl::stack2reg(fpu_off + 1 + num_rt_args), fpu_name_0->next());
358 }
359 fpu_off += 2;
360 }
361 assert(fpu_off == fpu_state_off, "incorrect number of fpu stack slots");
362 }
363
364 if (UseSSE >= 2) {
365 int xmm_off = xmm_regs_as_doubles_off;
366 for (int n = 0; n < XMMRegisterImpl::number_of_registers; n++) {
367 VMReg xmm_name_0 = as_XMMRegister(n)->as_VMReg();
368 map->set_callee_saved(VMRegImpl::stack2reg(xmm_off + num_rt_args), xmm_name_0);
369 // %%% This is really a waste but we'll keep things as they were for now
370 if (true) {
371 map->set_callee_saved(VMRegImpl::stack2reg(xmm_off + 1 + num_rt_args), xmm_name_0->next());
372 }
373 xmm_off += 2;
374 }
375 assert(xmm_off == float_regs_as_doubles_off, "incorrect number of xmm registers");
376
377 } else if (UseSSE == 1) {
378 int xmm_off = xmm_regs_as_doubles_off;
379 for (int n = 0; n < XMMRegisterImpl::number_of_registers; n++) {
380 VMReg xmm_name_0 = as_XMMRegister(n)->as_VMReg();
381 map->set_callee_saved(VMRegImpl::stack2reg(xmm_off + num_rt_args), xmm_name_0);
382 xmm_off += 2;
383 }
384 assert(xmm_off == float_regs_as_doubles_off, "incorrect number of xmm registers");
385 }
386 }
387
388 return map;
389 }
390
391 #define __ sasm->
392
393 static OopMap* save_live_registers(GraalStubAssembler* sasm, int num_rt_args,
394 bool save_fpu_registers = true) {
395 __ block_comment("save_live_registers");
396
397 __ pusha(); // integer registers
398
399 // assert(float_regs_as_doubles_off % 2 == 0, "misaligned offset");
400 // assert(xmm_regs_as_doubles_off % 2 == 0, "misaligned offset");
401
402 __ subptr(rsp, extra_space_offset * VMRegImpl::stack_slot_size);
403
404 #ifdef ASSERT
405 __ movptr(Address(rsp, marker * VMRegImpl::stack_slot_size), (int32_t)0xfeedbeef);
406 #endif
407
408 if (save_fpu_registers) {
409 if (UseSSE < 2) {
410 // save FPU stack
411 __ fnsave(Address(rsp, fpu_state_off * VMRegImpl::stack_slot_size));
412 __ fwait();
413
414 #ifdef ASSERT
415 Label ok;
416 __ cmpw(Address(rsp, fpu_state_off * VMRegImpl::stack_slot_size), StubRoutines::fpu_cntrl_wrd_std());
417 __ jccb(Assembler::equal, ok);
418 __ stop("corrupted control word detected");
419 __ bind(ok);
420 #endif
421
422 // Reset the control word to guard against exceptions being unmasked
423 // since fstp_d can cause FPU stack underflow exceptions. Write it
424 // into the on stack copy and then reload that to make sure that the
425 // current and future values are correct.
426 __ movw(Address(rsp, fpu_state_off * VMRegImpl::stack_slot_size), StubRoutines::fpu_cntrl_wrd_std());
427 __ frstor(Address(rsp, fpu_state_off * VMRegImpl::stack_slot_size));
428
429 // Save the FPU registers in de-opt-able form
430 __ fstp_d(Address(rsp, float_regs_as_doubles_off * VMRegImpl::stack_slot_size + 0));
431 __ fstp_d(Address(rsp, float_regs_as_doubles_off * VMRegImpl::stack_slot_size + 8));
432 __ fstp_d(Address(rsp, float_regs_as_doubles_off * VMRegImpl::stack_slot_size + 16));
433 __ fstp_d(Address(rsp, float_regs_as_doubles_off * VMRegImpl::stack_slot_size + 24));
434 __ fstp_d(Address(rsp, float_regs_as_doubles_off * VMRegImpl::stack_slot_size + 32));
435 __ fstp_d(Address(rsp, float_regs_as_doubles_off * VMRegImpl::stack_slot_size + 40));
436 __ fstp_d(Address(rsp, float_regs_as_doubles_off * VMRegImpl::stack_slot_size + 48));
437 __ fstp_d(Address(rsp, float_regs_as_doubles_off * VMRegImpl::stack_slot_size + 56));
438 }
439
440 if (UseSSE >= 2) {
441 // save XMM registers
442 // XMM registers can contain float or double values, but this is not known here,
443 // so always save them as doubles.
444 // note that float values are _not_ converted automatically, so for float values
445 // the second word contains only garbage data.
446 __ movdbl(Address(rsp, xmm_regs_as_doubles_off * VMRegImpl::stack_slot_size + 0), xmm0);
447 __ movdbl(Address(rsp, xmm_regs_as_doubles_off * VMRegImpl::stack_slot_size + 8), xmm1);
448 __ movdbl(Address(rsp, xmm_regs_as_doubles_off * VMRegImpl::stack_slot_size + 16), xmm2);
449 __ movdbl(Address(rsp, xmm_regs_as_doubles_off * VMRegImpl::stack_slot_size + 24), xmm3);
450 __ movdbl(Address(rsp, xmm_regs_as_doubles_off * VMRegImpl::stack_slot_size + 32), xmm4);
451 __ movdbl(Address(rsp, xmm_regs_as_doubles_off * VMRegImpl::stack_slot_size + 40), xmm5);
452 __ movdbl(Address(rsp, xmm_regs_as_doubles_off * VMRegImpl::stack_slot_size + 48), xmm6);
453 __ movdbl(Address(rsp, xmm_regs_as_doubles_off * VMRegImpl::stack_slot_size + 56), xmm7);
454 #ifdef _LP64
455 __ movdbl(Address(rsp, xmm_regs_as_doubles_off * VMRegImpl::stack_slot_size + 64), xmm8);
456 __ movdbl(Address(rsp, xmm_regs_as_doubles_off * VMRegImpl::stack_slot_size + 72), xmm9);
457 __ movdbl(Address(rsp, xmm_regs_as_doubles_off * VMRegImpl::stack_slot_size + 80), xmm10);
458 __ movdbl(Address(rsp, xmm_regs_as_doubles_off * VMRegImpl::stack_slot_size + 88), xmm11);
459 __ movdbl(Address(rsp, xmm_regs_as_doubles_off * VMRegImpl::stack_slot_size + 96), xmm12);
460 __ movdbl(Address(rsp, xmm_regs_as_doubles_off * VMRegImpl::stack_slot_size + 104), xmm13);
461 __ movdbl(Address(rsp, xmm_regs_as_doubles_off * VMRegImpl::stack_slot_size + 112), xmm14);
462 __ movdbl(Address(rsp, xmm_regs_as_doubles_off * VMRegImpl::stack_slot_size + 120), xmm15);
463 #endif // _LP64
464 } else if (UseSSE == 1) {
465 // save XMM registers as float because double not supported without SSE2
466 __ movflt(Address(rsp, xmm_regs_as_doubles_off * VMRegImpl::stack_slot_size + 0), xmm0);
467 __ movflt(Address(rsp, xmm_regs_as_doubles_off * VMRegImpl::stack_slot_size + 8), xmm1);
468 __ movflt(Address(rsp, xmm_regs_as_doubles_off * VMRegImpl::stack_slot_size + 16), xmm2);
469 __ movflt(Address(rsp, xmm_regs_as_doubles_off * VMRegImpl::stack_slot_size + 24), xmm3);
470 __ movflt(Address(rsp, xmm_regs_as_doubles_off * VMRegImpl::stack_slot_size + 32), xmm4);
471 __ movflt(Address(rsp, xmm_regs_as_doubles_off * VMRegImpl::stack_slot_size + 40), xmm5);
472 __ movflt(Address(rsp, xmm_regs_as_doubles_off * VMRegImpl::stack_slot_size + 48), xmm6);
473 __ movflt(Address(rsp, xmm_regs_as_doubles_off * VMRegImpl::stack_slot_size + 56), xmm7);
474 }
475 }
476
477 // FPU stack must be empty now
478 __ verify_FPU(0, "save_live_registers");
479
480 return generate_oop_map(sasm, num_rt_args, save_fpu_registers);
481 }
482
483
484 static void restore_fpu(GraalStubAssembler* sasm, bool restore_fpu_registers = true) {
485 if (restore_fpu_registers) {
486 if (UseSSE >= 2) {
487 // restore XMM registers
488 __ movdbl(xmm0, Address(rsp, xmm_regs_as_doubles_off * VMRegImpl::stack_slot_size + 0));
489 __ movdbl(xmm1, Address(rsp, xmm_regs_as_doubles_off * VMRegImpl::stack_slot_size + 8));
490 __ movdbl(xmm2, Address(rsp, xmm_regs_as_doubles_off * VMRegImpl::stack_slot_size + 16));
491 __ movdbl(xmm3, Address(rsp, xmm_regs_as_doubles_off * VMRegImpl::stack_slot_size + 24));
492 __ movdbl(xmm4, Address(rsp, xmm_regs_as_doubles_off * VMRegImpl::stack_slot_size + 32));
493 __ movdbl(xmm5, Address(rsp, xmm_regs_as_doubles_off * VMRegImpl::stack_slot_size + 40));
494 __ movdbl(xmm6, Address(rsp, xmm_regs_as_doubles_off * VMRegImpl::stack_slot_size + 48));
495 __ movdbl(xmm7, Address(rsp, xmm_regs_as_doubles_off * VMRegImpl::stack_slot_size + 56));
496 #ifdef _LP64
497 __ movdbl(xmm8, Address(rsp, xmm_regs_as_doubles_off * VMRegImpl::stack_slot_size + 64));
498 __ movdbl(xmm9, Address(rsp, xmm_regs_as_doubles_off * VMRegImpl::stack_slot_size + 72));
499 __ movdbl(xmm10, Address(rsp, xmm_regs_as_doubles_off * VMRegImpl::stack_slot_size + 80));
500 __ movdbl(xmm11, Address(rsp, xmm_regs_as_doubles_off * VMRegImpl::stack_slot_size + 88));
501 __ movdbl(xmm12, Address(rsp, xmm_regs_as_doubles_off * VMRegImpl::stack_slot_size + 96));
502 __ movdbl(xmm13, Address(rsp, xmm_regs_as_doubles_off * VMRegImpl::stack_slot_size + 104));
503 __ movdbl(xmm14, Address(rsp, xmm_regs_as_doubles_off * VMRegImpl::stack_slot_size + 112));
504 __ movdbl(xmm15, Address(rsp, xmm_regs_as_doubles_off * VMRegImpl::stack_slot_size + 120));
505 #endif // _LP64
506 } else if (UseSSE == 1) {
507 // restore XMM registers
508 __ movflt(xmm0, Address(rsp, xmm_regs_as_doubles_off * VMRegImpl::stack_slot_size + 0));
509 __ movflt(xmm1, Address(rsp, xmm_regs_as_doubles_off * VMRegImpl::stack_slot_size + 8));
510 __ movflt(xmm2, Address(rsp, xmm_regs_as_doubles_off * VMRegImpl::stack_slot_size + 16));
511 __ movflt(xmm3, Address(rsp, xmm_regs_as_doubles_off * VMRegImpl::stack_slot_size + 24));
512 __ movflt(xmm4, Address(rsp, xmm_regs_as_doubles_off * VMRegImpl::stack_slot_size + 32));
513 __ movflt(xmm5, Address(rsp, xmm_regs_as_doubles_off * VMRegImpl::stack_slot_size + 40));
514 __ movflt(xmm6, Address(rsp, xmm_regs_as_doubles_off * VMRegImpl::stack_slot_size + 48));
515 __ movflt(xmm7, Address(rsp, xmm_regs_as_doubles_off * VMRegImpl::stack_slot_size + 56));
516 }
517
518 if (UseSSE < 2) {
519 __ frstor(Address(rsp, fpu_state_off * VMRegImpl::stack_slot_size));
520 } else {
521 // check that FPU stack is really empty
522 __ verify_FPU(0, "restore_live_registers");
523 }
524
525 } else {
526 // check that FPU stack is really empty
527 __ verify_FPU(0, "restore_live_registers");
528 }
529
530 #ifdef ASSERT
531 {
532 Label ok;
533 __ cmpptr(Address(rsp, marker * VMRegImpl::stack_slot_size), (int32_t)0xfeedbeef);
534 __ jcc(Assembler::equal, ok);
535 __ stop("bad offsets in frame");
536 __ bind(ok);
537 }
538 #endif // ASSERT
539
540 __ addptr(rsp, extra_space_offset * VMRegImpl::stack_slot_size);
541 }
542
543
544 static void restore_live_registers(GraalStubAssembler* sasm, bool restore_fpu_registers/* = true*/) {
545 __ block_comment("restore_live_registers");
546
547 restore_fpu(sasm, restore_fpu_registers);
548 __ popa();
549 }
550
551
552 static void restore_live_registers_except_rax(GraalStubAssembler* sasm, bool restore_fpu_registers = true) {
553 __ block_comment("restore_live_registers_except_rax");
554
555 restore_fpu(sasm, restore_fpu_registers);
556
557 #ifdef _LP64
558 __ movptr(r15, Address(rsp, 0));
559 __ movptr(r14, Address(rsp, wordSize));
560 __ movptr(r13, Address(rsp, 2 * wordSize));
561 __ movptr(r12, Address(rsp, 3 * wordSize));
562 __ movptr(r11, Address(rsp, 4 * wordSize));
563 __ movptr(r10, Address(rsp, 5 * wordSize));
564 __ movptr(r9, Address(rsp, 6 * wordSize));
565 __ movptr(r8, Address(rsp, 7 * wordSize));
566 __ movptr(rdi, Address(rsp, 8 * wordSize));
567 __ movptr(rsi, Address(rsp, 9 * wordSize));
568 __ movptr(rbp, Address(rsp, 10 * wordSize));
569 // skip rsp
570 __ movptr(rbx, Address(rsp, 12 * wordSize));
571 __ movptr(rdx, Address(rsp, 13 * wordSize));
572 __ movptr(rcx, Address(rsp, 14 * wordSize));
573
574 __ addptr(rsp, 16 * wordSize);
575 #else
576
577 __ pop(rdi);
578 __ pop(rsi);
579 __ pop(rbp);
580 __ pop(rbx); // skip this value
581 __ pop(rbx);
582 __ pop(rdx);
583 __ pop(rcx);
584 __ addptr(rsp, BytesPerWord);
585 #endif // _LP64
586 }
587
588 OopMapSet* GraalRuntime::generate_handle_exception(StubID id, GraalStubAssembler *sasm) {
589 __ block_comment("generate_handle_exception");
590
591 // incoming parameters
592 const Register exception_oop = rax;
593 const Register exception_pc = rdx;
594 // other registers used in this stub
595 const Register thread = NOT_LP64(rdi) LP64_ONLY(r15_thread);
596
597 // Save registers, if required.
598 OopMapSet* oop_maps = new OopMapSet();
599 OopMap* oop_map = NULL;
600 switch (id) {
601 case graal_handle_exception_nofpu_id:
602 // At this point all registers MAY be live.
603 oop_map = save_live_registers(sasm, 1 /*thread*/, id == graal_handle_exception_nofpu_id);
604 break;
605 default: ShouldNotReachHere();
606 }
607
608 #ifdef TIERED
609 // C2 can leave the fpu stack dirty
610 if (UseSSE < 2) {
611 __ empty_FPU_stack();
612 }
613 #endif // TIERED
614
615 // verify that only rax, and rdx is valid at this time
616 #ifdef ASSERT
617 __ movptr(rbx, 0xDEAD);
618 __ movptr(rcx, 0xDEAD);
619 __ movptr(rsi, 0xDEAD);
620 __ movptr(rdi, 0xDEAD);
621 #endif
622
623 // verify that rax, contains a valid exception
624 __ verify_not_null_oop(exception_oop);
625
626 // load address of JavaThread object for thread-local data
627 NOT_LP64(__ get_thread(thread);)
628
629 #ifdef ASSERT
630 // check that fields in JavaThread for exception oop and issuing pc are
631 // empty before writing to them
632 Label oop_empty;
633 __ cmpptr(Address(thread, JavaThread::exception_oop_offset()), (int32_t) NULL_WORD);
634 __ jcc(Assembler::equal, oop_empty);
635 __ stop("exception oop already set");
636 __ bind(oop_empty);
637
638 Label pc_empty;
639 __ cmpptr(Address(thread, JavaThread::exception_pc_offset()), 0);
640 __ jcc(Assembler::equal, pc_empty);
641 __ stop("exception pc already set");
642 __ bind(pc_empty);
643 #endif
644
645 // save exception oop and issuing pc into JavaThread
646 // (exception handler will load it from here)
647 __ movptr(Address(thread, JavaThread::exception_oop_offset()), exception_oop);
648 __ movptr(Address(thread, JavaThread::exception_pc_offset()), exception_pc);
649
650 // patch throwing pc into return address (has bci & oop map)
651 __ movptr(Address(rbp, 1*BytesPerWord), exception_pc);
652
653 // compute the exception handler.
654 // the exception oop and the throwing pc are read from the fields in JavaThread
655 int call_offset = __ call_RT(noreg, noreg, CAST_FROM_FN_PTR(address, exception_handler_for_pc));
656 oop_maps->add_gc_map(call_offset, oop_map);
657
658 // rax: handler address
659 // will be the deopt blob if nmethod was deoptimized while we looked up
660 // handler regardless of whether handler existed in the nmethod.
661
662 // only rax, is valid at this time, all other registers have been destroyed by the runtime call
663 #ifdef ASSERT
664 __ movptr(rbx, 0xDEAD);
665 __ movptr(rcx, 0xDEAD);
666 __ movptr(rdx, 0xDEAD);
667 __ movptr(rsi, 0xDEAD);
668 __ movptr(rdi, 0xDEAD);
669 #endif
670
671 // patch the return address, this stub will directly return to the exception handler
672 __ movptr(Address(rbp, 1*BytesPerWord), rax);
673
674 switch (id) {
675 case graal_handle_exception_nofpu_id:
676 // Restore the registers that were saved at the beginning.
677 restore_live_registers(sasm, id == graal_handle_exception_nofpu_id);
678 break;
679 default: ShouldNotReachHere();
680 }
681
682 return oop_maps;
683 }
684
685 void GraalRuntime::generate_unwind_exception(GraalStubAssembler *sasm) {
686 // incoming parameters
687 const Register exception_oop = rax;
688 // callee-saved copy of exception_oop during runtime call
689 const Register exception_oop_callee_saved = NOT_LP64(rsi) LP64_ONLY(r14);
690 // other registers used in this stub
691 const Register exception_pc = rdx;
692 const Register handler_addr = rbx;
693 const Register thread = NOT_LP64(rdi) LP64_ONLY(r15_thread);
694
695 // verify that only rax is valid at this time
696 #ifdef ASSERT
697 __ movptr(rbx, 0xDEAD);
698 __ movptr(rcx, 0xDEAD);
699 __ movptr(rdx, 0xDEAD);
700 __ movptr(rsi, 0xDEAD);
701 __ movptr(rdi, 0xDEAD);
702 #endif
703
704 #ifdef ASSERT
705 // check that fields in JavaThread for exception oop and issuing pc are empty
706 NOT_LP64(__ get_thread(thread);)
707 Label oop_empty;
708 __ cmpptr(Address(thread, JavaThread::exception_oop_offset()), 0);
709 __ jcc(Assembler::equal, oop_empty);
710 __ stop("exception oop must be empty");
711 __ bind(oop_empty);
712
713 Label pc_empty;
714 __ cmpptr(Address(thread, JavaThread::exception_pc_offset()), 0);
715 __ jcc(Assembler::equal, pc_empty);
716 __ stop("exception pc must be empty");
717 __ bind(pc_empty);
718 #endif
719
720 // clear the FPU stack in case any FPU results are left behind
721 __ empty_FPU_stack();
722
723 // save exception_oop in callee-saved register to preserve it during runtime calls
724 __ verify_not_null_oop(exception_oop);
725 __ movptr(exception_oop_callee_saved, exception_oop);
726
727 NOT_LP64(__ get_thread(thread);)
728 // Get return address (is on top of stack after leave).
729 __ movptr(exception_pc, Address(rsp, 0));
730
731 // search the exception handler address of the caller (using the return address)
732 __ call_VM_leaf(CAST_FROM_FN_PTR(address, SharedRuntime::exception_handler_for_return_address), thread, exception_pc);
733 // rax: exception handler address of the caller
734
735 // Only RAX and RSI are valid at this time, all other registers have been destroyed by the call.
736 #ifdef ASSERT
737 __ movptr(rbx, 0xDEAD);
738 __ movptr(rcx, 0xDEAD);
739 __ movptr(rdx, 0xDEAD);
740 __ movptr(rdi, 0xDEAD);
741 #endif
742
743 // move result of call into correct register
744 __ movptr(handler_addr, rax);
745
746 // Restore exception oop to RAX (required convention of exception handler).
747 __ movptr(exception_oop, exception_oop_callee_saved);
748
749 // verify that there is really a valid exception in rax
750 __ verify_not_null_oop(exception_oop);
751
752 // get throwing pc (= return address).
753 // rdx has been destroyed by the call, so it must be set again
754 // the pop is also necessary to simulate the effect of a ret(0)
755 __ pop(exception_pc);
756
757 // Restore SP from BP if the exception PC is a method handle call site.
758 NOT_LP64(__ get_thread(thread);)
759 __ cmpl(Address(thread, JavaThread::is_method_handle_return_offset()), 0);
760 __ cmovptr(Assembler::notEqual, rsp, rbp_mh_SP_save);
761
762 // continue at exception handler (return address removed)
763 // note: do *not* remove arguments when unwinding the
764 // activation since the caller assumes having
765 // all arguments on the stack when entering the
766 // runtime to determine the exception handler
767 // (GC happens at call site with arguments!)
768 // rax: exception oop
769 // rdx: throwing pc
770 // rbx: exception handler
771 __ jmp(handler_addr);
772 }
773
774 OopMapSet* GraalRuntime::generate_code_for(StubID id, GraalStubAssembler* sasm) {
775
776 // for better readability
777 const bool must_gc_arguments = true;
778 const bool dont_gc_arguments = false;
779
780 // default value; overwritten for some optimized stubs that are called from methods that do not use the fpu
781 bool save_fpu_registers = true;
782
783 // stub code & info for the different stubs
784 OopMapSet* oop_maps = NULL;
785 switch (id) {
786
787 case graal_new_instance_id:
788 {
789 Register klass = rdx; // Incoming
790 Register obj = rax; // Result
791 __ set_info("new_instance", dont_gc_arguments);
792 __ enter();
793 OopMap* map = save_live_registers(sasm, 2);
794 int call_offset = __ call_RT(obj, noreg, CAST_FROM_FN_PTR(address, new_instance), klass);
795 oop_maps = new OopMapSet();
796 oop_maps->add_gc_map(call_offset, map);
797 restore_live_registers_except_rax(sasm);
798 __ verify_oop(obj);
799 __ leave();
800 __ ret(0);
801
802 // rax,: new instance
803 }
804
805 break;
806
807 case graal_new_type_array_id:
808 case graal_new_object_array_id:
809 {
810 Register length = rbx; // Incoming
811 Register klass = rdx; // Incoming
812 Register obj = rax; // Result
813
814 if (id == graal_new_type_array_id) {
815 __ set_info("new_type_array", dont_gc_arguments);
816 } else {
817 __ set_info("new_object_array", dont_gc_arguments);
818 }
819
820 #ifdef ASSERT
821 // assert object type is really an array of the proper kind
822 {
823 Label ok;
824 Register t0 = obj;
825 __ movl(t0, Address(klass, Klass::layout_helper_offset()));
826 __ sarl(t0, Klass::_lh_array_tag_shift);
827 int tag = ((id == graal_new_type_array_id)
828 ? Klass::_lh_array_tag_type_value
829 : Klass::_lh_array_tag_obj_value);
830 __ cmpl(t0, tag);
831 __ jcc(Assembler::equal, ok);
832 __ stop("assert(is an array klass)");
833 __ should_not_reach_here();
834 __ bind(ok);
835 }
836 #endif // ASSERT
837 __ enter();
838 OopMap* map = save_live_registers(sasm, 3);
839 int call_offset;
840 if (id == graal_new_type_array_id) {
841 call_offset = __ call_RT(obj, noreg, CAST_FROM_FN_PTR(address, new_type_array), klass, length);
842 } else {
843 call_offset = __ call_RT(obj, noreg, CAST_FROM_FN_PTR(address, new_object_array), klass, length);
844 }
845
846 oop_maps = new OopMapSet();
847 oop_maps->add_gc_map(call_offset, map);
848 restore_live_registers_except_rax(sasm);
849
850 __ verify_oop(obj);
851 __ leave();
852 __ ret(0);
853
854 // rax,: new array
855 }
856 break;
857
858 case graal_new_multi_array_id:
859 { GraalStubFrame f(sasm, "new_multi_array", dont_gc_arguments);
860 // rax,: klass
861 // rbx,: rank
862 // rcx: address of 1st dimension
863 OopMap* map = save_live_registers(sasm, 4);
864 int call_offset = __ call_RT(rax, noreg, CAST_FROM_FN_PTR(address, new_multi_array), rax, rbx, rcx);
865
866 oop_maps = new OopMapSet();
867 oop_maps->add_gc_map(call_offset, map);
868 restore_live_registers_except_rax(sasm);
869
870 // rax,: new multi array
871 __ verify_oop(rax);
872 }
873 break;
874
875 case graal_register_finalizer_id:
876 {
877 __ set_info("register_finalizer", dont_gc_arguments);
878
879 // This is called via call_runtime so the arguments
880 // will be place in C abi locations
881
882 #ifdef _LP64
883 __ verify_oop(j_rarg0);
884 __ mov(rax, j_rarg0);
885 #else
886 // The object is passed on the stack and we haven't pushed a
887 // frame yet so it's one work away from top of stack.
888 __ movptr(rax, Address(rsp, 1 * BytesPerWord));
889 __ verify_oop(rax);
890 #endif // _LP64
891
892 // load the klass and check the has finalizer flag
893 Label register_finalizer;
894 Register t = rsi;
895 __ load_klass(t, rax);
896 __ movl(t, Address(t, Klass::access_flags_offset()));
897 __ testl(t, JVM_ACC_HAS_FINALIZER);
898 __ jcc(Assembler::notZero, register_finalizer);
899 __ ret(0);
900
901 __ bind(register_finalizer);
902 __ enter();
903 OopMap* oop_map = save_live_registers(sasm, 2 /*num_rt_args */);
904 int call_offset = __ call_RT(noreg, noreg, CAST_FROM_FN_PTR(address, SharedRuntime::register_finalizer), rax);
905 oop_maps = new OopMapSet();
906 oop_maps->add_gc_map(call_offset, oop_map);
907
908 // Now restore all the live registers
909 restore_live_registers(sasm);
910
911 __ leave();
912 __ ret(0);
913 }
914 break;
915
916 case graal_handle_exception_nofpu_id:
917 { GraalStubFrame f(sasm, "handle_exception", dont_gc_arguments);
918 oop_maps = generate_handle_exception(id, sasm);
919 }
920 break;
921
922 case graal_slow_subtype_check_id:
923 {
924 // Typical calling sequence:
925 // __ push(klass_RInfo); // object klass or other subclass
926 // __ push(sup_k_RInfo); // array element klass or other superclass
927 // __ call(slow_subtype_check);
928 // Note that the subclass is pushed first, and is therefore deepest.
929 // Previous versions of this code reversed the names 'sub' and 'super'.
930 // This was operationally harmless but made the code unreadable.
931 enum layout {
932 rax_off, SLOT2(raxH_off)
933 rcx_off, SLOT2(rcxH_off)
934 rsi_off, SLOT2(rsiH_off)
935 rdi_off, SLOT2(rdiH_off)
936 // saved_rbp_off, SLOT2(saved_rbpH_off)
937 return_off, SLOT2(returnH_off)
938 sup_k_off, SLOT2(sup_kH_off)
939 klass_off, SLOT2(superH_off)
940 framesize,
941 result_off = klass_off // deepest argument is also the return value
942 };
943
944 __ set_info("slow_subtype_check", dont_gc_arguments);
945 __ push(rdi);
946 __ push(rsi);
947 __ push(rcx);
948 __ push(rax);
949
950 // This is called by pushing args and not with C abi
951 __ movptr(rsi, Address(rsp, (klass_off) * VMRegImpl::stack_slot_size)); // subclass
952 __ movptr(rax, Address(rsp, (sup_k_off) * VMRegImpl::stack_slot_size)); // superclass
953
954 Label miss;
955 Label success;
956 __ check_klass_subtype_fast_path(rsi, rax, rcx, &success, &miss, NULL);
957
958 __ check_klass_subtype_slow_path(rsi, rax, rcx, rdi, NULL, &miss);
959
960 // fallthrough on success:
961 __ bind(success);
962 __ movptr(Address(rsp, (result_off) * VMRegImpl::stack_slot_size), 1); // result
963 __ pop(rax);
964 __ pop(rcx);
965 __ pop(rsi);
966 __ pop(rdi);
967 __ ret(0);
968
969 __ bind(miss);
970 __ movptr(Address(rsp, (result_off) * VMRegImpl::stack_slot_size), NULL_WORD); // result
971 __ pop(rax);
972 __ pop(rcx);
973 __ pop(rsi);
974 __ pop(rdi);
975 __ ret(0);
976 }
977 break;
978
979 case graal_unwind_exception_call_id: {
980 // remove the frame from the stack
981 __ movptr(rsp, rbp);
982 __ pop(rbp);
983 // exception_oop is passed using ordinary java calling conventions
984 __ movptr(rax, j_rarg0);
985
986 Label nonNullExceptionOop;
987 __ testptr(rax, rax);
988 __ jcc(Assembler::notZero, nonNullExceptionOop);
989 {
990 __ enter();
991 oop_maps = new OopMapSet();
992 OopMap* oop_map = save_live_registers(sasm, 0);
993 int call_offset = __ call_RT(rax, noreg, (address)graal_create_null_exception, 0);
994 oop_maps->add_gc_map(call_offset, oop_map);
995 __ leave();
996 }
997 __ bind(nonNullExceptionOop);
998
999 __ set_info("unwind_exception", dont_gc_arguments);
1000 // note: no stubframe since we are about to leave the current
1001 // activation and we are calling a leaf VM function only.
1002 generate_unwind_exception(sasm);
1003 __ should_not_reach_here();
1004 break;
1005 }
1006
1007 case graal_OSR_migration_end_id: {
1008 __ enter();
1009 save_live_registers(sasm, 0);
1010 __ movptr(c_rarg0, j_rarg0);
1011 __ call(RuntimeAddress(CAST_FROM_FN_PTR(address, SharedRuntime::OSR_migration_end)));
1012 restore_live_registers(sasm);
1013 __ leave();
1014 __ ret(0);
1015 break;
1016 }
1017
1018 case graal_set_deopt_info_id: {
1019 __ movptr(Address(r15_thread, JavaThread::graal_deopt_info_offset()), rscratch1);
1020 __ ret(0);
1021 break;
1022 }
1023
1024 case graal_create_null_pointer_exception_id: {
1025 __ enter();
1026 oop_maps = new OopMapSet();
1027 OopMap* oop_map = save_live_registers(sasm, 0);
1028 int call_offset = __ call_RT(rax, noreg, (address)graal_create_null_exception, 0);
1029 oop_maps->add_gc_map(call_offset, oop_map);
1030 __ leave();
1031 __ ret(0);
1032 break;
1033 }
1034
1035 case graal_create_out_of_bounds_exception_id: {
1036 __ enter();
1037 oop_maps = new OopMapSet();
1038 OopMap* oop_map = save_live_registers(sasm, 0);
1039 int call_offset = __ call_RT(rax, noreg, (address)graal_create_out_of_bounds_exception, j_rarg0);
1040 oop_maps->add_gc_map(call_offset, oop_map);
1041 __ leave();
1042 __ ret(0);
1043 break;
1044 }
1045
1046 case graal_vm_error_id: {
1047 __ enter();
1048 oop_maps = new OopMapSet();
1049 OopMap* oop_map = save_live_registers(sasm, 0);
1050 int call_offset = __ call_RT(noreg, noreg, (address)graal_vm_error, j_rarg0, j_rarg1, j_rarg2);
1051 oop_maps->add_gc_map(call_offset, oop_map);
1052 restore_live_registers(sasm);
1053 __ leave();
1054 __ ret(0);
1055 break;
1056 }
1057
1058 case graal_log_printf_id: {
1059 __ enter();
1060 oop_maps = new OopMapSet();
1061 OopMap* oop_map = save_live_registers(sasm, 0);
1062 int call_offset = __ call_RT(noreg, noreg, (address)graal_log_printf, j_rarg0, j_rarg1, j_rarg2);
1063 oop_maps->add_gc_map(call_offset, oop_map);
1064 restore_live_registers(sasm);
1065 __ leave();
1066 __ ret(0);
1067 break;
1068 }
1069
1070 case graal_log_primitive_id: {
1071 __ enter();
1072 oop_maps = new OopMapSet();
1073 OopMap* oop_map = save_live_registers(sasm, 0);
1074 int call_offset = __ call_RT(noreg, noreg, (address)graal_log_primitive, j_rarg0, j_rarg1, j_rarg2);
1075 oop_maps->add_gc_map(call_offset, oop_map);
1076 restore_live_registers(sasm);
1077 __ leave();
1078 __ ret(0);
1079 break;
1080 }
1081
1082 case graal_log_object_id: {
1083 __ enter();
1084 oop_maps = new OopMapSet();
1085 OopMap* oop_map = save_live_registers(sasm, 0);
1086 int call_offset = __ call_RT(noreg, noreg, (address)graal_log_object, j_rarg0, j_rarg1);
1087 oop_maps->add_gc_map(call_offset, oop_map);
1088 restore_live_registers(sasm);
1089 __ leave();
1090 __ ret(0);
1091 break;
1092 }
1093
1094 case graal_verify_oop_id: {
1095 // We use enter & leave so that a better stack trace is produced in the hs_err file
1096 __ enter();
1097 __ verify_oop(r13, "Graal verify oop");
1098 __ leave();
1099 __ ret(0);
1100 break;
1101 }
1102
1103 case graal_arithmetic_frem_id: {
1104 __ subptr(rsp, 8);
1105 __ movflt(Address(rsp, 0), xmm1);
1106 __ fld_s(Address(rsp, 0));
1107 __ movflt(Address(rsp, 0), xmm0);
1108 __ fld_s(Address(rsp, 0));
1109 Label L;
1110 __ bind(L);
1111 __ fprem();
1112 __ fwait();
1113 __ fnstsw_ax();
1114 __ testl(rax, 0x400);
1115 __ jcc(Assembler::notZero, L);
1116 __ fxch(1);
1117 __ fpop();
1118 __ fstp_s(Address(rsp, 0));
1119 __ movflt(xmm0, Address(rsp, 0));
1120 __ addptr(rsp, 8);
1121 __ ret(0);
1122 break;
1123 }
1124 case graal_arithmetic_drem_id: {
1125 __ subptr(rsp, 8);
1126 __ movdbl(Address(rsp, 0), xmm1);
1127 __ fld_d(Address(rsp, 0));
1128 __ movdbl(Address(rsp, 0), xmm0);
1129 __ fld_d(Address(rsp, 0));
1130 Label L;
1131 __ bind(L);
1132 __ fprem();
1133 __ fwait();
1134 __ fnstsw_ax();
1135 __ testl(rax, 0x400);
1136 __ jcc(Assembler::notZero, L);
1137 __ fxch(1);
1138 __ fpop();
1139 __ fstp_d(Address(rsp, 0));
1140 __ movdbl(xmm0, Address(rsp, 0));
1141 __ addptr(rsp, 8);
1142 __ ret(0);
1143 break;
1144 }
1145 case graal_monitorenter_id: {
1146 Register obj = j_rarg0;
1147 Register lock = j_rarg1;
1148 {
1149 GraalStubFrame f(sasm, "graal_monitorenter", dont_gc_arguments);
1150 OopMap* map = save_live_registers(sasm, 2, save_fpu_registers);
1151
1152 // Called with store_parameter and not C abi
1153 int call_offset = __ call_RT(noreg, noreg, CAST_FROM_FN_PTR(address, graal_monitorenter), obj, lock);
1154
1155 oop_maps = new OopMapSet();
1156 oop_maps->add_gc_map(call_offset, map);
1157 restore_live_registers(sasm, save_fpu_registers);
1158 }
1159 __ ret(0);
1160 break;
1161 }
1162 case graal_monitorexit_id: {
1163 Register obj = j_rarg0;
1164 Register lock = j_rarg1;
1165 {
1166 GraalStubFrame f(sasm, "graal_monitorexit", dont_gc_arguments);
1167 OopMap* map = save_live_registers(sasm, 2, save_fpu_registers);
1168
1169 // note: really a leaf routine but must setup last java sp
1170 // => use call_RT for now (speed can be improved by
1171 // doing last java sp setup manually)
1172 int call_offset = __ call_RT(noreg, noreg, CAST_FROM_FN_PTR(address, graal_monitorexit), obj, lock);
1173
1174 oop_maps = new OopMapSet();
1175 oop_maps->add_gc_map(call_offset, map);
1176 restore_live_registers(sasm, save_fpu_registers);
1177 }
1178 __ ret(0);
1179 break;
1180 }
1181
1182 default:
1183 { GraalStubFrame f(sasm, "unimplemented entry", dont_gc_arguments);
1184 __ movptr(rax, (int)id);
1185 __ call_RT(noreg, noreg, CAST_FROM_FN_PTR(address, unimplemented_entry), rax);
1186 __ should_not_reach_here();
1187 }
1188 break;
1189 }
1190 return oop_maps;
1191 }
1192
1193 #undef __