Mercurial > hg > graal-jvmci-8
annotate src/cpu/x86/vm/c1_LIRAssembler_x86.cpp @ 1192:776fb94f33cc
6918006: G1: spill space must be reserved on the stack for barrier calls on Windows x64
Summary: Stub code generated to call G1 barriers does not allocate spill space on the stack as required by Windows x64 ABI. The fix is to use more ABI-friendly call_VM_leaf().
Reviewed-by: iveresov, never, kvn
author | apetrusenko |
---|---|
date | Thu, 21 Jan 2010 18:51:10 -0800 |
parents | 323bd24c6520 |
children | ba263cfb7611 6deeaebad47a |
rev | line source |
---|---|
0 | 1 /* |
579 | 2 * Copyright 2000-2009 Sun Microsystems, Inc. All Rights Reserved. |
0 | 3 * DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER. |
4 * | |
5 * This code is free software; you can redistribute it and/or modify it | |
6 * under the terms of the GNU General Public License version 2 only, as | |
7 * published by the Free Software Foundation. | |
8 * | |
9 * This code is distributed in the hope that it will be useful, but WITHOUT | |
10 * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or | |
11 * FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License | |
12 * version 2 for more details (a copy is included in the LICENSE file that | |
13 * accompanied this code). | |
14 * | |
15 * You should have received a copy of the GNU General Public License version | |
16 * 2 along with this work; if not, write to the Free Software Foundation, | |
17 * Inc., 51 Franklin St, Fifth Floor, Boston, MA 02110-1301 USA. | |
18 * | |
19 * Please contact Sun Microsystems, Inc., 4150 Network Circle, Santa Clara, | |
20 * CA 95054 USA or visit www.sun.com if you need additional information or | |
21 * have any questions. | |
22 * | |
23 */ | |
24 | |
25 # include "incls/_precompiled.incl" | |
26 # include "incls/_c1_LIRAssembler_x86.cpp.incl" | |
27 | |
28 | |
29 // These masks are used to provide 128-bit aligned bitmasks to the XMM | |
30 // instructions, to allow sign-masking or sign-bit flipping. They allow | |
31 // fast versions of NegF/NegD and AbsF/AbsD. | |
32 | |
33 // Note: 'double' and 'long long' have 32-bits alignment on x86. | |
34 static jlong* double_quadword(jlong *adr, jlong lo, jlong hi) { | |
35 // Use the expression (adr)&(~0xF) to provide 128-bits aligned address | |
36 // of 128-bits operands for SSE instructions. | |
37 jlong *operand = (jlong*)(((long)adr)&((long)(~0xF))); | |
38 // Store the value to a 128-bits operand. | |
39 operand[0] = lo; | |
40 operand[1] = hi; | |
41 return operand; | |
42 } | |
43 | |
44 // Buffer for 128-bits masks used by SSE instructions. | |
45 static jlong fp_signmask_pool[(4+1)*2]; // 4*128bits(data) + 128bits(alignment) | |
46 | |
47 // Static initialization during VM startup. | |
48 static jlong *float_signmask_pool = double_quadword(&fp_signmask_pool[1*2], CONST64(0x7FFFFFFF7FFFFFFF), CONST64(0x7FFFFFFF7FFFFFFF)); | |
49 static jlong *double_signmask_pool = double_quadword(&fp_signmask_pool[2*2], CONST64(0x7FFFFFFFFFFFFFFF), CONST64(0x7FFFFFFFFFFFFFFF)); | |
50 static jlong *float_signflip_pool = double_quadword(&fp_signmask_pool[3*2], CONST64(0x8000000080000000), CONST64(0x8000000080000000)); | |
51 static jlong *double_signflip_pool = double_quadword(&fp_signmask_pool[4*2], CONST64(0x8000000000000000), CONST64(0x8000000000000000)); | |
52 | |
53 | |
54 | |
55 NEEDS_CLEANUP // remove this definitions ? | |
56 const Register IC_Klass = rax; // where the IC klass is cached | |
57 const Register SYNC_header = rax; // synchronization header | |
58 const Register SHIFT_count = rcx; // where count for shift operations must be | |
59 | |
60 #define __ _masm-> | |
61 | |
62 | |
63 static void select_different_registers(Register preserve, | |
64 Register extra, | |
65 Register &tmp1, | |
66 Register &tmp2) { | |
67 if (tmp1 == preserve) { | |
68 assert_different_registers(tmp1, tmp2, extra); | |
69 tmp1 = extra; | |
70 } else if (tmp2 == preserve) { | |
71 assert_different_registers(tmp1, tmp2, extra); | |
72 tmp2 = extra; | |
73 } | |
74 assert_different_registers(preserve, tmp1, tmp2); | |
75 } | |
76 | |
77 | |
78 | |
79 static void select_different_registers(Register preserve, | |
80 Register extra, | |
81 Register &tmp1, | |
82 Register &tmp2, | |
83 Register &tmp3) { | |
84 if (tmp1 == preserve) { | |
85 assert_different_registers(tmp1, tmp2, tmp3, extra); | |
86 tmp1 = extra; | |
87 } else if (tmp2 == preserve) { | |
88 assert_different_registers(tmp1, tmp2, tmp3, extra); | |
89 tmp2 = extra; | |
90 } else if (tmp3 == preserve) { | |
91 assert_different_registers(tmp1, tmp2, tmp3, extra); | |
92 tmp3 = extra; | |
93 } | |
94 assert_different_registers(preserve, tmp1, tmp2, tmp3); | |
95 } | |
96 | |
97 | |
98 | |
99 bool LIR_Assembler::is_small_constant(LIR_Opr opr) { | |
100 if (opr->is_constant()) { | |
101 LIR_Const* constant = opr->as_constant_ptr(); | |
102 switch (constant->type()) { | |
103 case T_INT: { | |
104 return true; | |
105 } | |
106 | |
107 default: | |
108 return false; | |
109 } | |
110 } | |
111 return false; | |
112 } | |
113 | |
114 | |
115 LIR_Opr LIR_Assembler::receiverOpr() { | |
304 | 116 return FrameMap::receiver_opr; |
0 | 117 } |
118 | |
119 LIR_Opr LIR_Assembler::incomingReceiverOpr() { | |
120 return receiverOpr(); | |
121 } | |
122 | |
123 LIR_Opr LIR_Assembler::osrBufferPointer() { | |
304 | 124 return FrameMap::as_pointer_opr(receiverOpr()->as_register()); |
0 | 125 } |
126 | |
127 //--------------fpu register translations----------------------- | |
128 | |
129 | |
130 address LIR_Assembler::float_constant(float f) { | |
131 address const_addr = __ float_constant(f); | |
132 if (const_addr == NULL) { | |
133 bailout("const section overflow"); | |
134 return __ code()->consts()->start(); | |
135 } else { | |
136 return const_addr; | |
137 } | |
138 } | |
139 | |
140 | |
141 address LIR_Assembler::double_constant(double d) { | |
142 address const_addr = __ double_constant(d); | |
143 if (const_addr == NULL) { | |
144 bailout("const section overflow"); | |
145 return __ code()->consts()->start(); | |
146 } else { | |
147 return const_addr; | |
148 } | |
149 } | |
150 | |
151 | |
152 void LIR_Assembler::set_24bit_FPU() { | |
153 __ fldcw(ExternalAddress(StubRoutines::addr_fpu_cntrl_wrd_24())); | |
154 } | |
155 | |
156 void LIR_Assembler::reset_FPU() { | |
157 __ fldcw(ExternalAddress(StubRoutines::addr_fpu_cntrl_wrd_std())); | |
158 } | |
159 | |
160 void LIR_Assembler::fpop() { | |
161 __ fpop(); | |
162 } | |
163 | |
164 void LIR_Assembler::fxch(int i) { | |
165 __ fxch(i); | |
166 } | |
167 | |
168 void LIR_Assembler::fld(int i) { | |
169 __ fld_s(i); | |
170 } | |
171 | |
172 void LIR_Assembler::ffree(int i) { | |
173 __ ffree(i); | |
174 } | |
175 | |
176 void LIR_Assembler::breakpoint() { | |
177 __ int3(); | |
178 } | |
179 | |
180 void LIR_Assembler::push(LIR_Opr opr) { | |
181 if (opr->is_single_cpu()) { | |
182 __ push_reg(opr->as_register()); | |
183 } else if (opr->is_double_cpu()) { | |
304 | 184 NOT_LP64(__ push_reg(opr->as_register_hi())); |
0 | 185 __ push_reg(opr->as_register_lo()); |
186 } else if (opr->is_stack()) { | |
187 __ push_addr(frame_map()->address_for_slot(opr->single_stack_ix())); | |
188 } else if (opr->is_constant()) { | |
189 LIR_Const* const_opr = opr->as_constant_ptr(); | |
190 if (const_opr->type() == T_OBJECT) { | |
191 __ push_oop(const_opr->as_jobject()); | |
192 } else if (const_opr->type() == T_INT) { | |
193 __ push_jint(const_opr->as_jint()); | |
194 } else { | |
195 ShouldNotReachHere(); | |
196 } | |
197 | |
198 } else { | |
199 ShouldNotReachHere(); | |
200 } | |
201 } | |
202 | |
203 void LIR_Assembler::pop(LIR_Opr opr) { | |
204 if (opr->is_single_cpu()) { | |
304 | 205 __ pop_reg(opr->as_register()); |
0 | 206 } else { |
207 ShouldNotReachHere(); | |
208 } | |
209 } | |
210 | |
304 | 211 bool LIR_Assembler::is_literal_address(LIR_Address* addr) { |
212 return addr->base()->is_illegal() && addr->index()->is_illegal(); | |
213 } | |
214 | |
0 | 215 //------------------------------------------- |
304 | 216 |
0 | 217 Address LIR_Assembler::as_Address(LIR_Address* addr) { |
304 | 218 return as_Address(addr, rscratch1); |
219 } | |
220 | |
221 Address LIR_Assembler::as_Address(LIR_Address* addr, Register tmp) { | |
0 | 222 if (addr->base()->is_illegal()) { |
223 assert(addr->index()->is_illegal(), "must be illegal too"); | |
304 | 224 AddressLiteral laddr((address)addr->disp(), relocInfo::none); |
225 if (! __ reachable(laddr)) { | |
226 __ movptr(tmp, laddr.addr()); | |
227 Address res(tmp, 0); | |
228 return res; | |
229 } else { | |
230 return __ as_Address(laddr); | |
231 } | |
0 | 232 } |
233 | |
304 | 234 Register base = addr->base()->as_pointer_register(); |
0 | 235 |
236 if (addr->index()->is_illegal()) { | |
237 return Address( base, addr->disp()); | |
304 | 238 } else if (addr->index()->is_cpu_register()) { |
239 Register index = addr->index()->as_pointer_register(); | |
0 | 240 return Address(base, index, (Address::ScaleFactor) addr->scale(), addr->disp()); |
241 } else if (addr->index()->is_constant()) { | |
304 | 242 intptr_t addr_offset = (addr->index()->as_constant_ptr()->as_jint() << addr->scale()) + addr->disp(); |
243 assert(Assembler::is_simm32(addr_offset), "must be"); | |
0 | 244 |
245 return Address(base, addr_offset); | |
246 } else { | |
247 Unimplemented(); | |
248 return Address(); | |
249 } | |
250 } | |
251 | |
252 | |
253 Address LIR_Assembler::as_Address_hi(LIR_Address* addr) { | |
254 Address base = as_Address(addr); | |
255 return Address(base._base, base._index, base._scale, base._disp + BytesPerWord); | |
256 } | |
257 | |
258 | |
259 Address LIR_Assembler::as_Address_lo(LIR_Address* addr) { | |
260 return as_Address(addr); | |
261 } | |
262 | |
263 | |
264 void LIR_Assembler::osr_entry() { | |
265 offsets()->set_value(CodeOffsets::OSR_Entry, code_offset()); | |
266 BlockBegin* osr_entry = compilation()->hir()->osr_entry(); | |
267 ValueStack* entry_state = osr_entry->state(); | |
268 int number_of_locks = entry_state->locks_size(); | |
269 | |
270 // we jump here if osr happens with the interpreter | |
271 // state set up to continue at the beginning of the | |
272 // loop that triggered osr - in particular, we have | |
273 // the following registers setup: | |
274 // | |
275 // rcx: osr buffer | |
276 // | |
277 | |
278 // build frame | |
279 ciMethod* m = compilation()->method(); | |
280 __ build_frame(initial_frame_size_in_bytes()); | |
281 | |
282 // OSR buffer is | |
283 // | |
284 // locals[nlocals-1..0] | |
285 // monitors[0..number_of_locks] | |
286 // | |
287 // locals is a direct copy of the interpreter frame so in the osr buffer | |
288 // so first slot in the local array is the last local from the interpreter | |
289 // and last slot is local[0] (receiver) from the interpreter | |
290 // | |
291 // Similarly with locks. The first lock slot in the osr buffer is the nth lock | |
292 // from the interpreter frame, the nth lock slot in the osr buffer is 0th lock | |
293 // in the interpreter frame (the method lock if a sync method) | |
294 | |
295 // Initialize monitors in the compiled activation. | |
296 // rcx: pointer to osr buffer | |
297 // | |
298 // All other registers are dead at this point and the locals will be | |
299 // copied into place by code emitted in the IR. | |
300 | |
304 | 301 Register OSR_buf = osrBufferPointer()->as_pointer_register(); |
0 | 302 { assert(frame::interpreter_frame_monitor_size() == BasicObjectLock::size(), "adjust code below"); |
303 int monitor_offset = BytesPerWord * method()->max_locals() + | |
1060 | 304 (2 * BytesPerWord) * (number_of_locks - 1); |
305 // SharedRuntime::OSR_migration_begin() packs BasicObjectLocks in | |
306 // the OSR buffer using 2 word entries: first the lock and then | |
307 // the oop. | |
0 | 308 for (int i = 0; i < number_of_locks; i++) { |
1060 | 309 int slot_offset = monitor_offset - ((i * 2) * BytesPerWord); |
0 | 310 #ifdef ASSERT |
311 // verify the interpreter's monitor has a non-null object | |
312 { | |
313 Label L; | |
1060 | 314 __ cmpptr(Address(OSR_buf, slot_offset + 1*BytesPerWord), (int32_t)NULL_WORD); |
0 | 315 __ jcc(Assembler::notZero, L); |
316 __ stop("locked object is NULL"); | |
317 __ bind(L); | |
318 } | |
319 #endif | |
1060 | 320 __ movptr(rbx, Address(OSR_buf, slot_offset + 0)); |
304 | 321 __ movptr(frame_map()->address_for_monitor_lock(i), rbx); |
1060 | 322 __ movptr(rbx, Address(OSR_buf, slot_offset + 1*BytesPerWord)); |
304 | 323 __ movptr(frame_map()->address_for_monitor_object(i), rbx); |
0 | 324 } |
325 } | |
326 } | |
327 | |
328 | |
329 // inline cache check; done before the frame is built. | |
330 int LIR_Assembler::check_icache() { | |
331 Register receiver = FrameMap::receiver_opr->as_register(); | |
332 Register ic_klass = IC_Klass; | |
304 | 333 const int ic_cmp_size = LP64_ONLY(10) NOT_LP64(9); |
0 | 334 |
335 if (!VerifyOops) { | |
336 // insert some nops so that the verified entry point is aligned on CodeEntryAlignment | |
304 | 337 while ((__ offset() + ic_cmp_size) % CodeEntryAlignment != 0) { |
0 | 338 __ nop(); |
339 } | |
340 } | |
341 int offset = __ offset(); | |
342 __ inline_cache_check(receiver, IC_Klass); | |
343 assert(__ offset() % CodeEntryAlignment == 0 || VerifyOops, "alignment must be correct"); | |
344 if (VerifyOops) { | |
345 // force alignment after the cache check. | |
346 // It's been verified to be aligned if !VerifyOops | |
347 __ align(CodeEntryAlignment); | |
348 } | |
349 return offset; | |
350 } | |
351 | |
352 | |
353 void LIR_Assembler::jobject2reg_with_patching(Register reg, CodeEmitInfo* info) { | |
354 jobject o = NULL; | |
355 PatchingStub* patch = new PatchingStub(_masm, PatchingStub::load_klass_id); | |
356 __ movoop(reg, o); | |
357 patching_epilog(patch, lir_patch_normal, reg, info); | |
358 } | |
359 | |
360 | |
361 void LIR_Assembler::monitorexit(LIR_Opr obj_opr, LIR_Opr lock_opr, Register new_hdr, int monitor_no, Register exception) { | |
362 if (exception->is_valid()) { | |
363 // preserve exception | |
364 // note: the monitor_exit runtime call is a leaf routine | |
365 // and cannot block => no GC can happen | |
366 // The slow case (MonitorAccessStub) uses the first two stack slots | |
367 // ([esp+0] and [esp+4]), therefore we store the exception at [esp+8] | |
304 | 368 __ movptr (Address(rsp, 2*wordSize), exception); |
0 | 369 } |
370 | |
371 Register obj_reg = obj_opr->as_register(); | |
372 Register lock_reg = lock_opr->as_register(); | |
373 | |
374 // setup registers (lock_reg must be rax, for lock_object) | |
375 assert(obj_reg != SYNC_header && lock_reg != SYNC_header, "rax, must be available here"); | |
376 Register hdr = lock_reg; | |
377 assert(new_hdr == SYNC_header, "wrong register"); | |
378 lock_reg = new_hdr; | |
379 // compute pointer to BasicLock | |
380 Address lock_addr = frame_map()->address_for_monitor_lock(monitor_no); | |
304 | 381 __ lea(lock_reg, lock_addr); |
0 | 382 // unlock object |
383 MonitorAccessStub* slow_case = new MonitorExitStub(lock_opr, true, monitor_no); | |
384 // _slow_case_stubs->append(slow_case); | |
385 // temporary fix: must be created after exceptionhandler, therefore as call stub | |
386 _slow_case_stubs->append(slow_case); | |
387 if (UseFastLocking) { | |
388 // try inlined fast unlocking first, revert to slow locking if it fails | |
389 // note: lock_reg points to the displaced header since the displaced header offset is 0! | |
390 assert(BasicLock::displaced_header_offset_in_bytes() == 0, "lock_reg must point to the displaced header"); | |
391 __ unlock_object(hdr, obj_reg, lock_reg, *slow_case->entry()); | |
392 } else { | |
393 // always do slow unlocking | |
394 // note: the slow unlocking code could be inlined here, however if we use | |
395 // slow unlocking, speed doesn't matter anyway and this solution is | |
396 // simpler and requires less duplicated code - additionally, the | |
397 // slow unlocking code is the same in either case which simplifies | |
398 // debugging | |
399 __ jmp(*slow_case->entry()); | |
400 } | |
401 // done | |
402 __ bind(*slow_case->continuation()); | |
403 | |
404 if (exception->is_valid()) { | |
405 // restore exception | |
304 | 406 __ movptr (exception, Address(rsp, 2 * wordSize)); |
0 | 407 } |
408 } | |
409 | |
410 // This specifies the rsp decrement needed to build the frame | |
411 int LIR_Assembler::initial_frame_size_in_bytes() { | |
412 // if rounding, must let FrameMap know! | |
304 | 413 |
414 // The frame_map records size in slots (32bit word) | |
415 | |
416 // subtract two words to account for return address and link | |
417 return (frame_map()->framesize() - (2*VMRegImpl::slots_per_word)) * VMRegImpl::stack_slot_size; | |
0 | 418 } |
419 | |
420 | |
421 void LIR_Assembler::emit_exception_handler() { | |
422 // if the last instruction is a call (typically to do a throw which | |
423 // is coming at the end after block reordering) the return address | |
424 // must still point into the code area in order to avoid assertion | |
425 // failures when searching for the corresponding bci => add a nop | |
426 // (was bug 5/14/1999 - gri) | |
427 | |
428 __ nop(); | |
429 | |
430 // generate code for exception handler | |
431 address handler_base = __ start_a_stub(exception_handler_size); | |
432 if (handler_base == NULL) { | |
433 // not enough space left for the handler | |
434 bailout("exception handler overflow"); | |
435 return; | |
436 } | |
437 #ifdef ASSERT | |
438 int offset = code_offset(); | |
439 #endif // ASSERT | |
440 | |
441 compilation()->offsets()->set_value(CodeOffsets::Exceptions, code_offset()); | |
442 | |
443 // if the method does not have an exception handler, then there is | |
444 // no reason to search for one | |
780
c96bf21b756f
6788527: Server vm intermittently fails with assertion "live value must not be garbage" with fastdebug bits
kvn
parents:
647
diff
changeset
|
445 if (compilation()->has_exception_handlers() || compilation()->env()->jvmti_can_post_exceptions()) { |
0 | 446 // the exception oop and pc are in rax, and rdx |
447 // no other registers need to be preserved, so invalidate them | |
448 __ invalidate_registers(false, true, true, false, true, true); | |
449 | |
450 // check that there is really an exception | |
451 __ verify_not_null_oop(rax); | |
452 | |
453 // search an exception handler (rax: exception oop, rdx: throwing pc) | |
454 __ call(RuntimeAddress(Runtime1::entry_for(Runtime1::handle_exception_nofpu_id))); | |
455 | |
456 // if the call returns here, then the exception handler for particular | |
457 // exception doesn't exist -> unwind activation and forward exception to caller | |
458 } | |
459 | |
460 // the exception oop is in rax, | |
461 // no other registers need to be preserved, so invalidate them | |
462 __ invalidate_registers(false, true, true, true, true, true); | |
463 | |
464 // check that there is really an exception | |
465 __ verify_not_null_oop(rax); | |
466 | |
467 // unlock the receiver/klass if necessary | |
468 // rax,: exception | |
469 ciMethod* method = compilation()->method(); | |
470 if (method->is_synchronized() && GenerateSynchronizationCode) { | |
471 monitorexit(FrameMap::rbx_oop_opr, FrameMap::rcx_opr, SYNC_header, 0, rax); | |
472 } | |
473 | |
474 // unwind activation and forward exception to caller | |
475 // rax,: exception | |
476 __ jump(RuntimeAddress(Runtime1::entry_for(Runtime1::unwind_exception_id))); | |
477 | |
478 assert(code_offset() - offset <= exception_handler_size, "overflow"); | |
479 | |
480 __ end_a_stub(); | |
481 } | |
482 | |
483 void LIR_Assembler::emit_deopt_handler() { | |
484 // if the last instruction is a call (typically to do a throw which | |
485 // is coming at the end after block reordering) the return address | |
486 // must still point into the code area in order to avoid assertion | |
487 // failures when searching for the corresponding bci => add a nop | |
488 // (was bug 5/14/1999 - gri) | |
489 | |
490 __ nop(); | |
491 | |
492 // generate code for exception handler | |
493 address handler_base = __ start_a_stub(deopt_handler_size); | |
494 if (handler_base == NULL) { | |
495 // not enough space left for the handler | |
496 bailout("deopt handler overflow"); | |
497 return; | |
498 } | |
499 #ifdef ASSERT | |
500 int offset = code_offset(); | |
501 #endif // ASSERT | |
502 | |
503 compilation()->offsets()->set_value(CodeOffsets::Deopt, code_offset()); | |
504 | |
505 InternalAddress here(__ pc()); | |
506 __ pushptr(here.addr()); | |
507 | |
508 __ jump(RuntimeAddress(SharedRuntime::deopt_blob()->unpack())); | |
509 | |
510 assert(code_offset() - offset <= deopt_handler_size, "overflow"); | |
511 | |
512 __ end_a_stub(); | |
513 | |
514 } | |
515 | |
516 | |
517 // This is the fast version of java.lang.String.compare; it has not | |
518 // OSR-entry and therefore, we generate a slow version for OSR's | |
519 void LIR_Assembler::emit_string_compare(LIR_Opr arg0, LIR_Opr arg1, LIR_Opr dst, CodeEmitInfo* info) { | |
304 | 520 __ movptr (rbx, rcx); // receiver is in rcx |
521 __ movptr (rax, arg1->as_register()); | |
0 | 522 |
523 // Get addresses of first characters from both Strings | |
304 | 524 __ movptr (rsi, Address(rax, java_lang_String::value_offset_in_bytes())); |
525 __ movptr (rcx, Address(rax, java_lang_String::offset_offset_in_bytes())); | |
526 __ lea (rsi, Address(rsi, rcx, Address::times_2, arrayOopDesc::base_offset_in_bytes(T_CHAR))); | |
0 | 527 |
528 | |
529 // rbx, may be NULL | |
530 add_debug_info_for_null_check_here(info); | |
304 | 531 __ movptr (rdi, Address(rbx, java_lang_String::value_offset_in_bytes())); |
532 __ movptr (rcx, Address(rbx, java_lang_String::offset_offset_in_bytes())); | |
533 __ lea (rdi, Address(rdi, rcx, Address::times_2, arrayOopDesc::base_offset_in_bytes(T_CHAR))); | |
0 | 534 |
535 // compute minimum length (in rax) and difference of lengths (on top of stack) | |
536 if (VM_Version::supports_cmov()) { | |
304 | 537 __ movl (rbx, Address(rbx, java_lang_String::count_offset_in_bytes())); |
538 __ movl (rax, Address(rax, java_lang_String::count_offset_in_bytes())); | |
539 __ mov (rcx, rbx); | |
540 __ subptr (rbx, rax); // subtract lengths | |
541 __ push (rbx); // result | |
542 __ cmov (Assembler::lessEqual, rax, rcx); | |
0 | 543 } else { |
544 Label L; | |
304 | 545 __ movl (rbx, Address(rbx, java_lang_String::count_offset_in_bytes())); |
546 __ movl (rcx, Address(rax, java_lang_String::count_offset_in_bytes())); | |
547 __ mov (rax, rbx); | |
548 __ subptr (rbx, rcx); | |
549 __ push (rbx); | |
550 __ jcc (Assembler::lessEqual, L); | |
551 __ mov (rax, rcx); | |
0 | 552 __ bind (L); |
553 } | |
554 // is minimum length 0? | |
555 Label noLoop, haveResult; | |
304 | 556 __ testptr (rax, rax); |
0 | 557 __ jcc (Assembler::zero, noLoop); |
558 | |
559 // compare first characters | |
622
56aae7be60d4
6812678: macro assembler needs delayed binding of a few constants (for 6655638)
jrose
parents:
512
diff
changeset
|
560 __ load_unsigned_short(rcx, Address(rdi, 0)); |
56aae7be60d4
6812678: macro assembler needs delayed binding of a few constants (for 6655638)
jrose
parents:
512
diff
changeset
|
561 __ load_unsigned_short(rbx, Address(rsi, 0)); |
0 | 562 __ subl(rcx, rbx); |
563 __ jcc(Assembler::notZero, haveResult); | |
564 // starting loop | |
565 __ decrement(rax); // we already tested index: skip one | |
566 __ jcc(Assembler::zero, noLoop); | |
567 | |
568 // set rsi.edi to the end of the arrays (arrays have same length) | |
569 // negate the index | |
570 | |
304 | 571 __ lea(rsi, Address(rsi, rax, Address::times_2, type2aelembytes(T_CHAR))); |
572 __ lea(rdi, Address(rdi, rax, Address::times_2, type2aelembytes(T_CHAR))); | |
573 __ negptr(rax); | |
0 | 574 |
575 // compare the strings in a loop | |
576 | |
577 Label loop; | |
578 __ align(wordSize); | |
579 __ bind(loop); | |
622
56aae7be60d4
6812678: macro assembler needs delayed binding of a few constants (for 6655638)
jrose
parents:
512
diff
changeset
|
580 __ load_unsigned_short(rcx, Address(rdi, rax, Address::times_2, 0)); |
56aae7be60d4
6812678: macro assembler needs delayed binding of a few constants (for 6655638)
jrose
parents:
512
diff
changeset
|
581 __ load_unsigned_short(rbx, Address(rsi, rax, Address::times_2, 0)); |
0 | 582 __ subl(rcx, rbx); |
583 __ jcc(Assembler::notZero, haveResult); | |
584 __ increment(rax); | |
585 __ jcc(Assembler::notZero, loop); | |
586 | |
587 // strings are equal up to min length | |
588 | |
589 __ bind(noLoop); | |
304 | 590 __ pop(rax); |
0 | 591 return_op(LIR_OprFact::illegalOpr); |
592 | |
593 __ bind(haveResult); | |
594 // leave instruction is going to discard the TOS value | |
304 | 595 __ mov (rax, rcx); // result of call is in rax, |
0 | 596 } |
597 | |
598 | |
599 void LIR_Assembler::return_op(LIR_Opr result) { | |
600 assert(result->is_illegal() || !result->is_single_cpu() || result->as_register() == rax, "word returns are in rax,"); | |
601 if (!result->is_illegal() && result->is_float_kind() && !result->is_xmm_register()) { | |
602 assert(result->fpu() == 0, "result must already be on TOS"); | |
603 } | |
604 | |
605 // Pop the stack before the safepoint code | |
606 __ leave(); | |
607 | |
608 bool result_is_oop = result->is_valid() ? result->is_oop() : false; | |
609 | |
610 // Note: we do not need to round double result; float result has the right precision | |
611 // the poll sets the condition code, but no data registers | |
612 AddressLiteral polling_page(os::get_polling_page() + (SafepointPollOffset % os::vm_page_size()), | |
613 relocInfo::poll_return_type); | |
304 | 614 |
615 // NOTE: the requires that the polling page be reachable else the reloc | |
616 // goes to the movq that loads the address and not the faulting instruction | |
617 // which breaks the signal handler code | |
618 | |
0 | 619 __ test32(rax, polling_page); |
620 | |
621 __ ret(0); | |
622 } | |
623 | |
624 | |
625 int LIR_Assembler::safepoint_poll(LIR_Opr tmp, CodeEmitInfo* info) { | |
626 AddressLiteral polling_page(os::get_polling_page() + (SafepointPollOffset % os::vm_page_size()), | |
627 relocInfo::poll_type); | |
628 | |
629 if (info != NULL) { | |
630 add_debug_info_for_branch(info); | |
631 } else { | |
632 ShouldNotReachHere(); | |
633 } | |
634 | |
635 int offset = __ offset(); | |
304 | 636 |
637 // NOTE: the requires that the polling page be reachable else the reloc | |
638 // goes to the movq that loads the address and not the faulting instruction | |
639 // which breaks the signal handler code | |
640 | |
0 | 641 __ test32(rax, polling_page); |
642 return offset; | |
643 } | |
644 | |
645 | |
646 void LIR_Assembler::move_regs(Register from_reg, Register to_reg) { | |
304 | 647 if (from_reg != to_reg) __ mov(to_reg, from_reg); |
0 | 648 } |
649 | |
650 void LIR_Assembler::swap_reg(Register a, Register b) { | |
304 | 651 __ xchgptr(a, b); |
0 | 652 } |
653 | |
654 | |
655 void LIR_Assembler::const2reg(LIR_Opr src, LIR_Opr dest, LIR_PatchCode patch_code, CodeEmitInfo* info) { | |
656 assert(src->is_constant(), "should not call otherwise"); | |
657 assert(dest->is_register(), "should not call otherwise"); | |
658 LIR_Const* c = src->as_constant_ptr(); | |
659 | |
660 switch (c->type()) { | |
661 case T_INT: { | |
662 assert(patch_code == lir_patch_none, "no patching handled here"); | |
663 __ movl(dest->as_register(), c->as_jint()); | |
664 break; | |
665 } | |
666 | |
667 case T_LONG: { | |
668 assert(patch_code == lir_patch_none, "no patching handled here"); | |
304 | 669 #ifdef _LP64 |
670 __ movptr(dest->as_register_lo(), (intptr_t)c->as_jlong()); | |
671 #else | |
672 __ movptr(dest->as_register_lo(), c->as_jint_lo()); | |
673 __ movptr(dest->as_register_hi(), c->as_jint_hi()); | |
674 #endif // _LP64 | |
0 | 675 break; |
676 } | |
677 | |
678 case T_OBJECT: { | |
679 if (patch_code != lir_patch_none) { | |
680 jobject2reg_with_patching(dest->as_register(), info); | |
681 } else { | |
682 __ movoop(dest->as_register(), c->as_jobject()); | |
683 } | |
684 break; | |
685 } | |
686 | |
687 case T_FLOAT: { | |
688 if (dest->is_single_xmm()) { | |
689 if (c->is_zero_float()) { | |
690 __ xorps(dest->as_xmm_float_reg(), dest->as_xmm_float_reg()); | |
691 } else { | |
692 __ movflt(dest->as_xmm_float_reg(), | |
693 InternalAddress(float_constant(c->as_jfloat()))); | |
694 } | |
695 } else { | |
696 assert(dest->is_single_fpu(), "must be"); | |
697 assert(dest->fpu_regnr() == 0, "dest must be TOS"); | |
698 if (c->is_zero_float()) { | |
699 __ fldz(); | |
700 } else if (c->is_one_float()) { | |
701 __ fld1(); | |
702 } else { | |
703 __ fld_s (InternalAddress(float_constant(c->as_jfloat()))); | |
704 } | |
705 } | |
706 break; | |
707 } | |
708 | |
709 case T_DOUBLE: { | |
710 if (dest->is_double_xmm()) { | |
711 if (c->is_zero_double()) { | |
712 __ xorpd(dest->as_xmm_double_reg(), dest->as_xmm_double_reg()); | |
713 } else { | |
714 __ movdbl(dest->as_xmm_double_reg(), | |
715 InternalAddress(double_constant(c->as_jdouble()))); | |
716 } | |
717 } else { | |
718 assert(dest->is_double_fpu(), "must be"); | |
719 assert(dest->fpu_regnrLo() == 0, "dest must be TOS"); | |
720 if (c->is_zero_double()) { | |
721 __ fldz(); | |
722 } else if (c->is_one_double()) { | |
723 __ fld1(); | |
724 } else { | |
725 __ fld_d (InternalAddress(double_constant(c->as_jdouble()))); | |
726 } | |
727 } | |
728 break; | |
729 } | |
730 | |
731 default: | |
732 ShouldNotReachHere(); | |
733 } | |
734 } | |
735 | |
736 void LIR_Assembler::const2stack(LIR_Opr src, LIR_Opr dest) { | |
737 assert(src->is_constant(), "should not call otherwise"); | |
738 assert(dest->is_stack(), "should not call otherwise"); | |
739 LIR_Const* c = src->as_constant_ptr(); | |
740 | |
741 switch (c->type()) { | |
742 case T_INT: // fall through | |
743 case T_FLOAT: | |
744 __ movl(frame_map()->address_for_slot(dest->single_stack_ix()), c->as_jint_bits()); | |
745 break; | |
746 | |
747 case T_OBJECT: | |
748 __ movoop(frame_map()->address_for_slot(dest->single_stack_ix()), c->as_jobject()); | |
749 break; | |
750 | |
751 case T_LONG: // fall through | |
752 case T_DOUBLE: | |
304 | 753 #ifdef _LP64 |
754 __ movptr(frame_map()->address_for_slot(dest->double_stack_ix(), | |
755 lo_word_offset_in_bytes), (intptr_t)c->as_jlong_bits()); | |
756 #else | |
757 __ movptr(frame_map()->address_for_slot(dest->double_stack_ix(), | |
758 lo_word_offset_in_bytes), c->as_jint_lo_bits()); | |
759 __ movptr(frame_map()->address_for_slot(dest->double_stack_ix(), | |
760 hi_word_offset_in_bytes), c->as_jint_hi_bits()); | |
761 #endif // _LP64 | |
0 | 762 break; |
763 | |
764 default: | |
765 ShouldNotReachHere(); | |
766 } | |
767 } | |
768 | |
769 void LIR_Assembler::const2mem(LIR_Opr src, LIR_Opr dest, BasicType type, CodeEmitInfo* info ) { | |
770 assert(src->is_constant(), "should not call otherwise"); | |
771 assert(dest->is_address(), "should not call otherwise"); | |
772 LIR_Const* c = src->as_constant_ptr(); | |
773 LIR_Address* addr = dest->as_address_ptr(); | |
774 | |
304 | 775 int null_check_here = code_offset(); |
0 | 776 switch (type) { |
777 case T_INT: // fall through | |
778 case T_FLOAT: | |
779 __ movl(as_Address(addr), c->as_jint_bits()); | |
780 break; | |
781 | |
782 case T_OBJECT: // fall through | |
783 case T_ARRAY: | |
784 if (c->as_jobject() == NULL) { | |
512
db4caa99ef11
6787106: Hotspot 32 bit build fails on platforms having different definitions for intptr_t & int32_t
xlu
parents:
321
diff
changeset
|
785 __ movptr(as_Address(addr), NULL_WORD); |
0 | 786 } else { |
304 | 787 if (is_literal_address(addr)) { |
788 ShouldNotReachHere(); | |
789 __ movoop(as_Address(addr, noreg), c->as_jobject()); | |
790 } else { | |
1060 | 791 #ifdef _LP64 |
792 __ movoop(rscratch1, c->as_jobject()); | |
793 null_check_here = code_offset(); | |
794 __ movptr(as_Address_lo(addr), rscratch1); | |
795 #else | |
304 | 796 __ movoop(as_Address(addr), c->as_jobject()); |
1060 | 797 #endif |
304 | 798 } |
0 | 799 } |
800 break; | |
801 | |
802 case T_LONG: // fall through | |
803 case T_DOUBLE: | |
304 | 804 #ifdef _LP64 |
805 if (is_literal_address(addr)) { | |
806 ShouldNotReachHere(); | |
807 __ movptr(as_Address(addr, r15_thread), (intptr_t)c->as_jlong_bits()); | |
808 } else { | |
809 __ movptr(r10, (intptr_t)c->as_jlong_bits()); | |
810 null_check_here = code_offset(); | |
811 __ movptr(as_Address_lo(addr), r10); | |
812 } | |
813 #else | |
814 // Always reachable in 32bit so this doesn't produce useless move literal | |
815 __ movptr(as_Address_hi(addr), c->as_jint_hi_bits()); | |
816 __ movptr(as_Address_lo(addr), c->as_jint_lo_bits()); | |
817 #endif // _LP64 | |
0 | 818 break; |
819 | |
820 case T_BOOLEAN: // fall through | |
821 case T_BYTE: | |
822 __ movb(as_Address(addr), c->as_jint() & 0xFF); | |
823 break; | |
824 | |
825 case T_CHAR: // fall through | |
826 case T_SHORT: | |
827 __ movw(as_Address(addr), c->as_jint() & 0xFFFF); | |
828 break; | |
829 | |
830 default: | |
831 ShouldNotReachHere(); | |
832 }; | |
304 | 833 |
834 if (info != NULL) { | |
835 add_debug_info_for_null_check(null_check_here, info); | |
836 } | |
0 | 837 } |
838 | |
839 | |
840 void LIR_Assembler::reg2reg(LIR_Opr src, LIR_Opr dest) { | |
841 assert(src->is_register(), "should not call otherwise"); | |
842 assert(dest->is_register(), "should not call otherwise"); | |
843 | |
844 // move between cpu-registers | |
845 if (dest->is_single_cpu()) { | |
304 | 846 #ifdef _LP64 |
847 if (src->type() == T_LONG) { | |
848 // Can do LONG -> OBJECT | |
849 move_regs(src->as_register_lo(), dest->as_register()); | |
850 return; | |
851 } | |
852 #endif | |
0 | 853 assert(src->is_single_cpu(), "must match"); |
854 if (src->type() == T_OBJECT) { | |
855 __ verify_oop(src->as_register()); | |
856 } | |
857 move_regs(src->as_register(), dest->as_register()); | |
858 | |
859 } else if (dest->is_double_cpu()) { | |
304 | 860 #ifdef _LP64 |
861 if (src->type() == T_OBJECT || src->type() == T_ARRAY) { | |
862 // Surprising to me but we can see move of a long to t_object | |
863 __ verify_oop(src->as_register()); | |
864 move_regs(src->as_register(), dest->as_register_lo()); | |
865 return; | |
866 } | |
867 #endif | |
0 | 868 assert(src->is_double_cpu(), "must match"); |
869 Register f_lo = src->as_register_lo(); | |
870 Register f_hi = src->as_register_hi(); | |
871 Register t_lo = dest->as_register_lo(); | |
872 Register t_hi = dest->as_register_hi(); | |
304 | 873 #ifdef _LP64 |
874 assert(f_hi == f_lo, "must be same"); | |
875 assert(t_hi == t_lo, "must be same"); | |
876 move_regs(f_lo, t_lo); | |
877 #else | |
0 | 878 assert(f_lo != f_hi && t_lo != t_hi, "invalid register allocation"); |
879 | |
304 | 880 |
0 | 881 if (f_lo == t_hi && f_hi == t_lo) { |
882 swap_reg(f_lo, f_hi); | |
883 } else if (f_hi == t_lo) { | |
884 assert(f_lo != t_hi, "overwriting register"); | |
885 move_regs(f_hi, t_hi); | |
886 move_regs(f_lo, t_lo); | |
887 } else { | |
888 assert(f_hi != t_lo, "overwriting register"); | |
889 move_regs(f_lo, t_lo); | |
890 move_regs(f_hi, t_hi); | |
891 } | |
304 | 892 #endif // LP64 |
0 | 893 |
894 // special moves from fpu-register to xmm-register | |
895 // necessary for method results | |
896 } else if (src->is_single_xmm() && !dest->is_single_xmm()) { | |
897 __ movflt(Address(rsp, 0), src->as_xmm_float_reg()); | |
898 __ fld_s(Address(rsp, 0)); | |
899 } else if (src->is_double_xmm() && !dest->is_double_xmm()) { | |
900 __ movdbl(Address(rsp, 0), src->as_xmm_double_reg()); | |
901 __ fld_d(Address(rsp, 0)); | |
902 } else if (dest->is_single_xmm() && !src->is_single_xmm()) { | |
903 __ fstp_s(Address(rsp, 0)); | |
904 __ movflt(dest->as_xmm_float_reg(), Address(rsp, 0)); | |
905 } else if (dest->is_double_xmm() && !src->is_double_xmm()) { | |
906 __ fstp_d(Address(rsp, 0)); | |
907 __ movdbl(dest->as_xmm_double_reg(), Address(rsp, 0)); | |
908 | |
909 // move between xmm-registers | |
910 } else if (dest->is_single_xmm()) { | |
911 assert(src->is_single_xmm(), "must match"); | |
912 __ movflt(dest->as_xmm_float_reg(), src->as_xmm_float_reg()); | |
913 } else if (dest->is_double_xmm()) { | |
914 assert(src->is_double_xmm(), "must match"); | |
915 __ movdbl(dest->as_xmm_double_reg(), src->as_xmm_double_reg()); | |
916 | |
917 // move between fpu-registers (no instruction necessary because of fpu-stack) | |
918 } else if (dest->is_single_fpu() || dest->is_double_fpu()) { | |
919 assert(src->is_single_fpu() || src->is_double_fpu(), "must match"); | |
920 assert(src->fpu() == dest->fpu(), "currently should be nothing to do"); | |
921 } else { | |
922 ShouldNotReachHere(); | |
923 } | |
924 } | |
925 | |
926 void LIR_Assembler::reg2stack(LIR_Opr src, LIR_Opr dest, BasicType type, bool pop_fpu_stack) { | |
927 assert(src->is_register(), "should not call otherwise"); | |
928 assert(dest->is_stack(), "should not call otherwise"); | |
929 | |
930 if (src->is_single_cpu()) { | |
931 Address dst = frame_map()->address_for_slot(dest->single_stack_ix()); | |
932 if (type == T_OBJECT || type == T_ARRAY) { | |
933 __ verify_oop(src->as_register()); | |
304 | 934 __ movptr (dst, src->as_register()); |
935 } else { | |
936 __ movl (dst, src->as_register()); | |
0 | 937 } |
938 | |
939 } else if (src->is_double_cpu()) { | |
940 Address dstLO = frame_map()->address_for_slot(dest->double_stack_ix(), lo_word_offset_in_bytes); | |
941 Address dstHI = frame_map()->address_for_slot(dest->double_stack_ix(), hi_word_offset_in_bytes); | |
304 | 942 __ movptr (dstLO, src->as_register_lo()); |
943 NOT_LP64(__ movptr (dstHI, src->as_register_hi())); | |
0 | 944 |
945 } else if (src->is_single_xmm()) { | |
946 Address dst_addr = frame_map()->address_for_slot(dest->single_stack_ix()); | |
947 __ movflt(dst_addr, src->as_xmm_float_reg()); | |
948 | |
949 } else if (src->is_double_xmm()) { | |
950 Address dst_addr = frame_map()->address_for_slot(dest->double_stack_ix()); | |
951 __ movdbl(dst_addr, src->as_xmm_double_reg()); | |
952 | |
953 } else if (src->is_single_fpu()) { | |
954 assert(src->fpu_regnr() == 0, "argument must be on TOS"); | |
955 Address dst_addr = frame_map()->address_for_slot(dest->single_stack_ix()); | |
956 if (pop_fpu_stack) __ fstp_s (dst_addr); | |
957 else __ fst_s (dst_addr); | |
958 | |
959 } else if (src->is_double_fpu()) { | |
960 assert(src->fpu_regnrLo() == 0, "argument must be on TOS"); | |
961 Address dst_addr = frame_map()->address_for_slot(dest->double_stack_ix()); | |
962 if (pop_fpu_stack) __ fstp_d (dst_addr); | |
963 else __ fst_d (dst_addr); | |
964 | |
965 } else { | |
966 ShouldNotReachHere(); | |
967 } | |
968 } | |
969 | |
970 | |
971 void LIR_Assembler::reg2mem(LIR_Opr src, LIR_Opr dest, BasicType type, LIR_PatchCode patch_code, CodeEmitInfo* info, bool pop_fpu_stack, bool /* unaligned */) { | |
972 LIR_Address* to_addr = dest->as_address_ptr(); | |
973 PatchingStub* patch = NULL; | |
974 | |
975 if (type == T_ARRAY || type == T_OBJECT) { | |
976 __ verify_oop(src->as_register()); | |
977 } | |
978 if (patch_code != lir_patch_none) { | |
979 patch = new PatchingStub(_masm, PatchingStub::access_field_id); | |
304 | 980 Address toa = as_Address(to_addr); |
981 assert(toa.disp() != 0, "must have"); | |
0 | 982 } |
983 if (info != NULL) { | |
984 add_debug_info_for_null_check_here(info); | |
985 } | |
986 | |
987 switch (type) { | |
988 case T_FLOAT: { | |
989 if (src->is_single_xmm()) { | |
990 __ movflt(as_Address(to_addr), src->as_xmm_float_reg()); | |
991 } else { | |
992 assert(src->is_single_fpu(), "must be"); | |
993 assert(src->fpu_regnr() == 0, "argument must be on TOS"); | |
994 if (pop_fpu_stack) __ fstp_s(as_Address(to_addr)); | |
995 else __ fst_s (as_Address(to_addr)); | |
996 } | |
997 break; | |
998 } | |
999 | |
1000 case T_DOUBLE: { | |
1001 if (src->is_double_xmm()) { | |
1002 __ movdbl(as_Address(to_addr), src->as_xmm_double_reg()); | |
1003 } else { | |
1004 assert(src->is_double_fpu(), "must be"); | |
1005 assert(src->fpu_regnrLo() == 0, "argument must be on TOS"); | |
1006 if (pop_fpu_stack) __ fstp_d(as_Address(to_addr)); | |
1007 else __ fst_d (as_Address(to_addr)); | |
1008 } | |
1009 break; | |
1010 } | |
1011 | |
1012 case T_ADDRESS: // fall through | |
1013 case T_ARRAY: // fall through | |
1014 case T_OBJECT: // fall through | |
304 | 1015 #ifdef _LP64 |
1016 __ movptr(as_Address(to_addr), src->as_register()); | |
1017 break; | |
1018 #endif // _LP64 | |
0 | 1019 case T_INT: |
1020 __ movl(as_Address(to_addr), src->as_register()); | |
1021 break; | |
1022 | |
1023 case T_LONG: { | |
1024 Register from_lo = src->as_register_lo(); | |
1025 Register from_hi = src->as_register_hi(); | |
304 | 1026 #ifdef _LP64 |
1027 __ movptr(as_Address_lo(to_addr), from_lo); | |
1028 #else | |
0 | 1029 Register base = to_addr->base()->as_register(); |
1030 Register index = noreg; | |
1031 if (to_addr->index()->is_register()) { | |
1032 index = to_addr->index()->as_register(); | |
1033 } | |
1034 if (base == from_lo || index == from_lo) { | |
1035 assert(base != from_hi, "can't be"); | |
1036 assert(index == noreg || (index != base && index != from_hi), "can't handle this"); | |
1037 __ movl(as_Address_hi(to_addr), from_hi); | |
1038 if (patch != NULL) { | |
1039 patching_epilog(patch, lir_patch_high, base, info); | |
1040 patch = new PatchingStub(_masm, PatchingStub::access_field_id); | |
1041 patch_code = lir_patch_low; | |
1042 } | |
1043 __ movl(as_Address_lo(to_addr), from_lo); | |
1044 } else { | |
1045 assert(index == noreg || (index != base && index != from_lo), "can't handle this"); | |
1046 __ movl(as_Address_lo(to_addr), from_lo); | |
1047 if (patch != NULL) { | |
1048 patching_epilog(patch, lir_patch_low, base, info); | |
1049 patch = new PatchingStub(_masm, PatchingStub::access_field_id); | |
1050 patch_code = lir_patch_high; | |
1051 } | |
1052 __ movl(as_Address_hi(to_addr), from_hi); | |
1053 } | |
304 | 1054 #endif // _LP64 |
0 | 1055 break; |
1056 } | |
1057 | |
1058 case T_BYTE: // fall through | |
1059 case T_BOOLEAN: { | |
1060 Register src_reg = src->as_register(); | |
1061 Address dst_addr = as_Address(to_addr); | |
1062 assert(VM_Version::is_P6() || src_reg->has_byte_register(), "must use byte registers if not P6"); | |
1063 __ movb(dst_addr, src_reg); | |
1064 break; | |
1065 } | |
1066 | |
1067 case T_CHAR: // fall through | |
1068 case T_SHORT: | |
1069 __ movw(as_Address(to_addr), src->as_register()); | |
1070 break; | |
1071 | |
1072 default: | |
1073 ShouldNotReachHere(); | |
1074 } | |
1075 | |
1076 if (patch_code != lir_patch_none) { | |
1077 patching_epilog(patch, patch_code, to_addr->base()->as_register(), info); | |
1078 } | |
1079 } | |
1080 | |
1081 | |
1082 void LIR_Assembler::stack2reg(LIR_Opr src, LIR_Opr dest, BasicType type) { | |
1083 assert(src->is_stack(), "should not call otherwise"); | |
1084 assert(dest->is_register(), "should not call otherwise"); | |
1085 | |
1086 if (dest->is_single_cpu()) { | |
1087 if (type == T_ARRAY || type == T_OBJECT) { | |
304 | 1088 __ movptr(dest->as_register(), frame_map()->address_for_slot(src->single_stack_ix())); |
0 | 1089 __ verify_oop(dest->as_register()); |
304 | 1090 } else { |
1091 __ movl(dest->as_register(), frame_map()->address_for_slot(src->single_stack_ix())); | |
0 | 1092 } |
1093 | |
1094 } else if (dest->is_double_cpu()) { | |
1095 Address src_addr_LO = frame_map()->address_for_slot(src->double_stack_ix(), lo_word_offset_in_bytes); | |
1096 Address src_addr_HI = frame_map()->address_for_slot(src->double_stack_ix(), hi_word_offset_in_bytes); | |
304 | 1097 __ movptr(dest->as_register_lo(), src_addr_LO); |
1098 NOT_LP64(__ movptr(dest->as_register_hi(), src_addr_HI)); | |
0 | 1099 |
1100 } else if (dest->is_single_xmm()) { | |
1101 Address src_addr = frame_map()->address_for_slot(src->single_stack_ix()); | |
1102 __ movflt(dest->as_xmm_float_reg(), src_addr); | |
1103 | |
1104 } else if (dest->is_double_xmm()) { | |
1105 Address src_addr = frame_map()->address_for_slot(src->double_stack_ix()); | |
1106 __ movdbl(dest->as_xmm_double_reg(), src_addr); | |
1107 | |
1108 } else if (dest->is_single_fpu()) { | |
1109 assert(dest->fpu_regnr() == 0, "dest must be TOS"); | |
1110 Address src_addr = frame_map()->address_for_slot(src->single_stack_ix()); | |
1111 __ fld_s(src_addr); | |
1112 | |
1113 } else if (dest->is_double_fpu()) { | |
1114 assert(dest->fpu_regnrLo() == 0, "dest must be TOS"); | |
1115 Address src_addr = frame_map()->address_for_slot(src->double_stack_ix()); | |
1116 __ fld_d(src_addr); | |
1117 | |
1118 } else { | |
1119 ShouldNotReachHere(); | |
1120 } | |
1121 } | |
1122 | |
1123 | |
1124 void LIR_Assembler::stack2stack(LIR_Opr src, LIR_Opr dest, BasicType type) { | |
1125 if (src->is_single_stack()) { | |
304 | 1126 if (type == T_OBJECT || type == T_ARRAY) { |
1127 __ pushptr(frame_map()->address_for_slot(src ->single_stack_ix())); | |
1128 __ popptr (frame_map()->address_for_slot(dest->single_stack_ix())); | |
1129 } else { | |
1060 | 1130 #ifndef _LP64 |
304 | 1131 __ pushl(frame_map()->address_for_slot(src ->single_stack_ix())); |
1132 __ popl (frame_map()->address_for_slot(dest->single_stack_ix())); | |
1060 | 1133 #else |
1134 //no pushl on 64bits | |
1135 __ movl(rscratch1, frame_map()->address_for_slot(src ->single_stack_ix())); | |
1136 __ movl(frame_map()->address_for_slot(dest->single_stack_ix()), rscratch1); | |
1137 #endif | |
304 | 1138 } |
0 | 1139 |
1140 } else if (src->is_double_stack()) { | |
304 | 1141 #ifdef _LP64 |
1142 __ pushptr(frame_map()->address_for_slot(src ->double_stack_ix())); | |
1143 __ popptr (frame_map()->address_for_slot(dest->double_stack_ix())); | |
1144 #else | |
0 | 1145 __ pushl(frame_map()->address_for_slot(src ->double_stack_ix(), 0)); |
304 | 1146 // push and pop the part at src + wordSize, adding wordSize for the previous push |
321
6e7305abe64c
6746320: Hotspot regression test for 6512111 fails in -Xmixed mode
never
parents:
304
diff
changeset
|
1147 __ pushl(frame_map()->address_for_slot(src ->double_stack_ix(), 2 * wordSize)); |
6e7305abe64c
6746320: Hotspot regression test for 6512111 fails in -Xmixed mode
never
parents:
304
diff
changeset
|
1148 __ popl (frame_map()->address_for_slot(dest->double_stack_ix(), 2 * wordSize)); |
0 | 1149 __ popl (frame_map()->address_for_slot(dest->double_stack_ix(), 0)); |
304 | 1150 #endif // _LP64 |
0 | 1151 |
1152 } else { | |
1153 ShouldNotReachHere(); | |
1154 } | |
1155 } | |
1156 | |
1157 | |
1158 void LIR_Assembler::mem2reg(LIR_Opr src, LIR_Opr dest, BasicType type, LIR_PatchCode patch_code, CodeEmitInfo* info, bool /* unaligned */) { | |
1159 assert(src->is_address(), "should not call otherwise"); | |
1160 assert(dest->is_register(), "should not call otherwise"); | |
1161 | |
1162 LIR_Address* addr = src->as_address_ptr(); | |
1163 Address from_addr = as_Address(addr); | |
1164 | |
1165 switch (type) { | |
1166 case T_BOOLEAN: // fall through | |
1167 case T_BYTE: // fall through | |
1168 case T_CHAR: // fall through | |
1169 case T_SHORT: | |
1170 if (!VM_Version::is_P6() && !from_addr.uses(dest->as_register())) { | |
1171 // on pre P6 processors we may get partial register stalls | |
1172 // so blow away the value of to_rinfo before loading a | |
1173 // partial word into it. Do it here so that it precedes | |
1174 // the potential patch point below. | |
304 | 1175 __ xorptr(dest->as_register(), dest->as_register()); |
0 | 1176 } |
1177 break; | |
1178 } | |
1179 | |
1180 PatchingStub* patch = NULL; | |
1181 if (patch_code != lir_patch_none) { | |
1182 patch = new PatchingStub(_masm, PatchingStub::access_field_id); | |
304 | 1183 assert(from_addr.disp() != 0, "must have"); |
0 | 1184 } |
1185 if (info != NULL) { | |
1186 add_debug_info_for_null_check_here(info); | |
1187 } | |
1188 | |
1189 switch (type) { | |
1190 case T_FLOAT: { | |
1191 if (dest->is_single_xmm()) { | |
1192 __ movflt(dest->as_xmm_float_reg(), from_addr); | |
1193 } else { | |
1194 assert(dest->is_single_fpu(), "must be"); | |
1195 assert(dest->fpu_regnr() == 0, "dest must be TOS"); | |
1196 __ fld_s(from_addr); | |
1197 } | |
1198 break; | |
1199 } | |
1200 | |
1201 case T_DOUBLE: { | |
1202 if (dest->is_double_xmm()) { | |
1203 __ movdbl(dest->as_xmm_double_reg(), from_addr); | |
1204 } else { | |
1205 assert(dest->is_double_fpu(), "must be"); | |
1206 assert(dest->fpu_regnrLo() == 0, "dest must be TOS"); | |
1207 __ fld_d(from_addr); | |
1208 } | |
1209 break; | |
1210 } | |
1211 | |
1212 case T_ADDRESS: // fall through | |
1213 case T_OBJECT: // fall through | |
1214 case T_ARRAY: // fall through | |
304 | 1215 #ifdef _LP64 |
1216 __ movptr(dest->as_register(), from_addr); | |
1217 break; | |
1218 #endif // _L64 | |
0 | 1219 case T_INT: |
304 | 1220 // %%% could this be a movl? this is safer but longer instruction |
1221 __ movl2ptr(dest->as_register(), from_addr); | |
0 | 1222 break; |
1223 | |
1224 case T_LONG: { | |
1225 Register to_lo = dest->as_register_lo(); | |
1226 Register to_hi = dest->as_register_hi(); | |
304 | 1227 #ifdef _LP64 |
1228 __ movptr(to_lo, as_Address_lo(addr)); | |
1229 #else | |
0 | 1230 Register base = addr->base()->as_register(); |
1231 Register index = noreg; | |
1232 if (addr->index()->is_register()) { | |
1233 index = addr->index()->as_register(); | |
1234 } | |
1235 if ((base == to_lo && index == to_hi) || | |
1236 (base == to_hi && index == to_lo)) { | |
1237 // addresses with 2 registers are only formed as a result of | |
1238 // array access so this code will never have to deal with | |
1239 // patches or null checks. | |
1240 assert(info == NULL && patch == NULL, "must be"); | |
304 | 1241 __ lea(to_hi, as_Address(addr)); |
0 | 1242 __ movl(to_lo, Address(to_hi, 0)); |
1243 __ movl(to_hi, Address(to_hi, BytesPerWord)); | |
1244 } else if (base == to_lo || index == to_lo) { | |
1245 assert(base != to_hi, "can't be"); | |
1246 assert(index == noreg || (index != base && index != to_hi), "can't handle this"); | |
1247 __ movl(to_hi, as_Address_hi(addr)); | |
1248 if (patch != NULL) { | |
1249 patching_epilog(patch, lir_patch_high, base, info); | |
1250 patch = new PatchingStub(_masm, PatchingStub::access_field_id); | |
1251 patch_code = lir_patch_low; | |
1252 } | |
1253 __ movl(to_lo, as_Address_lo(addr)); | |
1254 } else { | |
1255 assert(index == noreg || (index != base && index != to_lo), "can't handle this"); | |
1256 __ movl(to_lo, as_Address_lo(addr)); | |
1257 if (patch != NULL) { | |
1258 patching_epilog(patch, lir_patch_low, base, info); | |
1259 patch = new PatchingStub(_masm, PatchingStub::access_field_id); | |
1260 patch_code = lir_patch_high; | |
1261 } | |
1262 __ movl(to_hi, as_Address_hi(addr)); | |
1263 } | |
304 | 1264 #endif // _LP64 |
0 | 1265 break; |
1266 } | |
1267 | |
1268 case T_BOOLEAN: // fall through | |
1269 case T_BYTE: { | |
1270 Register dest_reg = dest->as_register(); | |
1271 assert(VM_Version::is_P6() || dest_reg->has_byte_register(), "must use byte registers if not P6"); | |
1272 if (VM_Version::is_P6() || from_addr.uses(dest_reg)) { | |
304 | 1273 __ movsbl(dest_reg, from_addr); |
0 | 1274 } else { |
1275 __ movb(dest_reg, from_addr); | |
1276 __ shll(dest_reg, 24); | |
1277 __ sarl(dest_reg, 24); | |
1278 } | |
304 | 1279 // These are unsigned so the zero extension on 64bit is just what we need |
0 | 1280 break; |
1281 } | |
1282 | |
1283 case T_CHAR: { | |
1284 Register dest_reg = dest->as_register(); | |
1285 assert(VM_Version::is_P6() || dest_reg->has_byte_register(), "must use byte registers if not P6"); | |
1286 if (VM_Version::is_P6() || from_addr.uses(dest_reg)) { | |
304 | 1287 __ movzwl(dest_reg, from_addr); |
0 | 1288 } else { |
1289 __ movw(dest_reg, from_addr); | |
1290 } | |
304 | 1291 // This is unsigned so the zero extension on 64bit is just what we need |
1292 // __ movl2ptr(dest_reg, dest_reg); | |
0 | 1293 break; |
1294 } | |
1295 | |
1296 case T_SHORT: { | |
1297 Register dest_reg = dest->as_register(); | |
1298 if (VM_Version::is_P6() || from_addr.uses(dest_reg)) { | |
304 | 1299 __ movswl(dest_reg, from_addr); |
0 | 1300 } else { |
1301 __ movw(dest_reg, from_addr); | |
1302 __ shll(dest_reg, 16); | |
1303 __ sarl(dest_reg, 16); | |
1304 } | |
304 | 1305 // Might not be needed in 64bit but certainly doesn't hurt (except for code size) |
1306 __ movl2ptr(dest_reg, dest_reg); | |
0 | 1307 break; |
1308 } | |
1309 | |
1310 default: | |
1311 ShouldNotReachHere(); | |
1312 } | |
1313 | |
1314 if (patch != NULL) { | |
1315 patching_epilog(patch, patch_code, addr->base()->as_register(), info); | |
1316 } | |
1317 | |
1318 if (type == T_ARRAY || type == T_OBJECT) { | |
1319 __ verify_oop(dest->as_register()); | |
1320 } | |
1321 } | |
1322 | |
1323 | |
1324 void LIR_Assembler::prefetchr(LIR_Opr src) { | |
1325 LIR_Address* addr = src->as_address_ptr(); | |
1326 Address from_addr = as_Address(addr); | |
1327 | |
1328 if (VM_Version::supports_sse()) { | |
1329 switch (ReadPrefetchInstr) { | |
1330 case 0: | |
1331 __ prefetchnta(from_addr); break; | |
1332 case 1: | |
1333 __ prefetcht0(from_addr); break; | |
1334 case 2: | |
1335 __ prefetcht2(from_addr); break; | |
1336 default: | |
1337 ShouldNotReachHere(); break; | |
1338 } | |
1339 } else if (VM_Version::supports_3dnow()) { | |
1340 __ prefetchr(from_addr); | |
1341 } | |
1342 } | |
1343 | |
1344 | |
1345 void LIR_Assembler::prefetchw(LIR_Opr src) { | |
1346 LIR_Address* addr = src->as_address_ptr(); | |
1347 Address from_addr = as_Address(addr); | |
1348 | |
1349 if (VM_Version::supports_sse()) { | |
1350 switch (AllocatePrefetchInstr) { | |
1351 case 0: | |
1352 __ prefetchnta(from_addr); break; | |
1353 case 1: | |
1354 __ prefetcht0(from_addr); break; | |
1355 case 2: | |
1356 __ prefetcht2(from_addr); break; | |
1357 case 3: | |
1358 __ prefetchw(from_addr); break; | |
1359 default: | |
1360 ShouldNotReachHere(); break; | |
1361 } | |
1362 } else if (VM_Version::supports_3dnow()) { | |
1363 __ prefetchw(from_addr); | |
1364 } | |
1365 } | |
1366 | |
1367 | |
1368 NEEDS_CLEANUP; // This could be static? | |
1369 Address::ScaleFactor LIR_Assembler::array_element_size(BasicType type) const { | |
29
d5fc211aea19
6633953: type2aelembytes{T_ADDRESS} should be 8 bytes in 64 bit VM
kvn
parents:
0
diff
changeset
|
1370 int elem_size = type2aelembytes(type); |
0 | 1371 switch (elem_size) { |
1372 case 1: return Address::times_1; | |
1373 case 2: return Address::times_2; | |
1374 case 4: return Address::times_4; | |
1375 case 8: return Address::times_8; | |
1376 } | |
1377 ShouldNotReachHere(); | |
1378 return Address::no_scale; | |
1379 } | |
1380 | |
1381 | |
1382 void LIR_Assembler::emit_op3(LIR_Op3* op) { | |
1383 switch (op->code()) { | |
1384 case lir_idiv: | |
1385 case lir_irem: | |
1386 arithmetic_idiv(op->code(), | |
1387 op->in_opr1(), | |
1388 op->in_opr2(), | |
1389 op->in_opr3(), | |
1390 op->result_opr(), | |
1391 op->info()); | |
1392 break; | |
1393 default: ShouldNotReachHere(); break; | |
1394 } | |
1395 } | |
1396 | |
1397 void LIR_Assembler::emit_opBranch(LIR_OpBranch* op) { | |
1398 #ifdef ASSERT | |
1399 assert(op->block() == NULL || op->block()->label() == op->label(), "wrong label"); | |
1400 if (op->block() != NULL) _branch_target_blocks.append(op->block()); | |
1401 if (op->ublock() != NULL) _branch_target_blocks.append(op->ublock()); | |
1402 #endif | |
1403 | |
1404 if (op->cond() == lir_cond_always) { | |
1405 if (op->info() != NULL) add_debug_info_for_branch(op->info()); | |
1406 __ jmp (*(op->label())); | |
1407 } else { | |
1408 Assembler::Condition acond = Assembler::zero; | |
1409 if (op->code() == lir_cond_float_branch) { | |
1410 assert(op->ublock() != NULL, "must have unordered successor"); | |
1411 __ jcc(Assembler::parity, *(op->ublock()->label())); | |
1412 switch(op->cond()) { | |
1413 case lir_cond_equal: acond = Assembler::equal; break; | |
1414 case lir_cond_notEqual: acond = Assembler::notEqual; break; | |
1415 case lir_cond_less: acond = Assembler::below; break; | |
1416 case lir_cond_lessEqual: acond = Assembler::belowEqual; break; | |
1417 case lir_cond_greaterEqual: acond = Assembler::aboveEqual; break; | |
1418 case lir_cond_greater: acond = Assembler::above; break; | |
1419 default: ShouldNotReachHere(); | |
1420 } | |
1421 } else { | |
1422 switch (op->cond()) { | |
1423 case lir_cond_equal: acond = Assembler::equal; break; | |
1424 case lir_cond_notEqual: acond = Assembler::notEqual; break; | |
1425 case lir_cond_less: acond = Assembler::less; break; | |
1426 case lir_cond_lessEqual: acond = Assembler::lessEqual; break; | |
1427 case lir_cond_greaterEqual: acond = Assembler::greaterEqual;break; | |
1428 case lir_cond_greater: acond = Assembler::greater; break; | |
1429 case lir_cond_belowEqual: acond = Assembler::belowEqual; break; | |
1430 case lir_cond_aboveEqual: acond = Assembler::aboveEqual; break; | |
1431 default: ShouldNotReachHere(); | |
1432 } | |
1433 } | |
1434 __ jcc(acond,*(op->label())); | |
1435 } | |
1436 } | |
1437 | |
1438 void LIR_Assembler::emit_opConvert(LIR_OpConvert* op) { | |
1439 LIR_Opr src = op->in_opr(); | |
1440 LIR_Opr dest = op->result_opr(); | |
1441 | |
1442 switch (op->bytecode()) { | |
1443 case Bytecodes::_i2l: | |
304 | 1444 #ifdef _LP64 |
1445 __ movl2ptr(dest->as_register_lo(), src->as_register()); | |
1446 #else | |
0 | 1447 move_regs(src->as_register(), dest->as_register_lo()); |
1448 move_regs(src->as_register(), dest->as_register_hi()); | |
1449 __ sarl(dest->as_register_hi(), 31); | |
304 | 1450 #endif // LP64 |
0 | 1451 break; |
1452 | |
1453 case Bytecodes::_l2i: | |
1454 move_regs(src->as_register_lo(), dest->as_register()); | |
1455 break; | |
1456 | |
1457 case Bytecodes::_i2b: | |
1458 move_regs(src->as_register(), dest->as_register()); | |
1459 __ sign_extend_byte(dest->as_register()); | |
1460 break; | |
1461 | |
1462 case Bytecodes::_i2c: | |
1463 move_regs(src->as_register(), dest->as_register()); | |
1464 __ andl(dest->as_register(), 0xFFFF); | |
1465 break; | |
1466 | |
1467 case Bytecodes::_i2s: | |
1468 move_regs(src->as_register(), dest->as_register()); | |
1469 __ sign_extend_short(dest->as_register()); | |
1470 break; | |
1471 | |
1472 | |
1473 case Bytecodes::_f2d: | |
1474 case Bytecodes::_d2f: | |
1475 if (dest->is_single_xmm()) { | |
1476 __ cvtsd2ss(dest->as_xmm_float_reg(), src->as_xmm_double_reg()); | |
1477 } else if (dest->is_double_xmm()) { | |
1478 __ cvtss2sd(dest->as_xmm_double_reg(), src->as_xmm_float_reg()); | |
1479 } else { | |
1480 assert(src->fpu() == dest->fpu(), "register must be equal"); | |
1481 // do nothing (float result is rounded later through spilling) | |
1482 } | |
1483 break; | |
1484 | |
1485 case Bytecodes::_i2f: | |
1486 case Bytecodes::_i2d: | |
1487 if (dest->is_single_xmm()) { | |
304 | 1488 __ cvtsi2ssl(dest->as_xmm_float_reg(), src->as_register()); |
0 | 1489 } else if (dest->is_double_xmm()) { |
304 | 1490 __ cvtsi2sdl(dest->as_xmm_double_reg(), src->as_register()); |
0 | 1491 } else { |
1492 assert(dest->fpu() == 0, "result must be on TOS"); | |
1493 __ movl(Address(rsp, 0), src->as_register()); | |
1494 __ fild_s(Address(rsp, 0)); | |
1495 } | |
1496 break; | |
1497 | |
1498 case Bytecodes::_f2i: | |
1499 case Bytecodes::_d2i: | |
1500 if (src->is_single_xmm()) { | |
304 | 1501 __ cvttss2sil(dest->as_register(), src->as_xmm_float_reg()); |
0 | 1502 } else if (src->is_double_xmm()) { |
304 | 1503 __ cvttsd2sil(dest->as_register(), src->as_xmm_double_reg()); |
0 | 1504 } else { |
1505 assert(src->fpu() == 0, "input must be on TOS"); | |
1506 __ fldcw(ExternalAddress(StubRoutines::addr_fpu_cntrl_wrd_trunc())); | |
1507 __ fist_s(Address(rsp, 0)); | |
1508 __ movl(dest->as_register(), Address(rsp, 0)); | |
1509 __ fldcw(ExternalAddress(StubRoutines::addr_fpu_cntrl_wrd_std())); | |
1510 } | |
1511 | |
1512 // IA32 conversion instructions do not match JLS for overflow, underflow and NaN -> fixup in stub | |
1513 assert(op->stub() != NULL, "stub required"); | |
1514 __ cmpl(dest->as_register(), 0x80000000); | |
1515 __ jcc(Assembler::equal, *op->stub()->entry()); | |
1516 __ bind(*op->stub()->continuation()); | |
1517 break; | |
1518 | |
1519 case Bytecodes::_l2f: | |
1520 case Bytecodes::_l2d: | |
1521 assert(!dest->is_xmm_register(), "result in xmm register not supported (no SSE instruction present)"); | |
1522 assert(dest->fpu() == 0, "result must be on TOS"); | |
1523 | |
304 | 1524 __ movptr(Address(rsp, 0), src->as_register_lo()); |
1525 NOT_LP64(__ movl(Address(rsp, BytesPerWord), src->as_register_hi())); | |
0 | 1526 __ fild_d(Address(rsp, 0)); |
1527 // float result is rounded later through spilling | |
1528 break; | |
1529 | |
1530 case Bytecodes::_f2l: | |
1531 case Bytecodes::_d2l: | |
1532 assert(!src->is_xmm_register(), "input in xmm register not supported (no SSE instruction present)"); | |
1533 assert(src->fpu() == 0, "input must be on TOS"); | |
304 | 1534 assert(dest == FrameMap::long0_opr, "runtime stub places result in these registers"); |
0 | 1535 |
1536 // instruction sequence too long to inline it here | |
1537 { | |
1538 __ call(RuntimeAddress(Runtime1::entry_for(Runtime1::fpu2long_stub_id))); | |
1539 } | |
1540 break; | |
1541 | |
1542 default: ShouldNotReachHere(); | |
1543 } | |
1544 } | |
1545 | |
1546 void LIR_Assembler::emit_alloc_obj(LIR_OpAllocObj* op) { | |
1547 if (op->init_check()) { | |
1548 __ cmpl(Address(op->klass()->as_register(), | |
1549 instanceKlass::init_state_offset_in_bytes() + sizeof(oopDesc)), | |
1550 instanceKlass::fully_initialized); | |
1551 add_debug_info_for_null_check_here(op->stub()->info()); | |
1552 __ jcc(Assembler::notEqual, *op->stub()->entry()); | |
1553 } | |
1554 __ allocate_object(op->obj()->as_register(), | |
1555 op->tmp1()->as_register(), | |
1556 op->tmp2()->as_register(), | |
1557 op->header_size(), | |
1558 op->object_size(), | |
1559 op->klass()->as_register(), | |
1560 *op->stub()->entry()); | |
1561 __ bind(*op->stub()->continuation()); | |
1562 } | |
1563 | |
1564 void LIR_Assembler::emit_alloc_array(LIR_OpAllocArray* op) { | |
1565 if (UseSlowPath || | |
1566 (!UseFastNewObjectArray && (op->type() == T_OBJECT || op->type() == T_ARRAY)) || | |
1567 (!UseFastNewTypeArray && (op->type() != T_OBJECT && op->type() != T_ARRAY))) { | |
1568 __ jmp(*op->stub()->entry()); | |
1569 } else { | |
1570 Register len = op->len()->as_register(); | |
1571 Register tmp1 = op->tmp1()->as_register(); | |
1572 Register tmp2 = op->tmp2()->as_register(); | |
1573 Register tmp3 = op->tmp3()->as_register(); | |
1574 if (len == tmp1) { | |
1575 tmp1 = tmp3; | |
1576 } else if (len == tmp2) { | |
1577 tmp2 = tmp3; | |
1578 } else if (len == tmp3) { | |
1579 // everything is ok | |
1580 } else { | |
304 | 1581 __ mov(tmp3, len); |
0 | 1582 } |
1583 __ allocate_array(op->obj()->as_register(), | |
1584 len, | |
1585 tmp1, | |
1586 tmp2, | |
1587 arrayOopDesc::header_size(op->type()), | |
1588 array_element_size(op->type()), | |
1589 op->klass()->as_register(), | |
1590 *op->stub()->entry()); | |
1591 } | |
1592 __ bind(*op->stub()->continuation()); | |
1593 } | |
1594 | |
1595 | |
1596 | |
1597 void LIR_Assembler::emit_opTypeCheck(LIR_OpTypeCheck* op) { | |
1598 LIR_Code code = op->code(); | |
1599 if (code == lir_store_check) { | |
1600 Register value = op->object()->as_register(); | |
1601 Register array = op->array()->as_register(); | |
1602 Register k_RInfo = op->tmp1()->as_register(); | |
1603 Register klass_RInfo = op->tmp2()->as_register(); | |
1604 Register Rtmp1 = op->tmp3()->as_register(); | |
1605 | |
1606 CodeStub* stub = op->stub(); | |
1607 Label done; | |
304 | 1608 __ cmpptr(value, (int32_t)NULL_WORD); |
0 | 1609 __ jcc(Assembler::equal, done); |
1610 add_debug_info_for_null_check_here(op->info_for_exception()); | |
304 | 1611 __ movptr(k_RInfo, Address(array, oopDesc::klass_offset_in_bytes())); |
1612 __ movptr(klass_RInfo, Address(value, oopDesc::klass_offset_in_bytes())); | |
0 | 1613 |
1614 // get instance klass | |
304 | 1615 __ movptr(k_RInfo, Address(k_RInfo, objArrayKlass::element_klass_offset_in_bytes() + sizeof(oopDesc))); |
644
c517646eef23
6813212: factor duplicated assembly code for general subclass check (for 6655638)
jrose
parents:
622
diff
changeset
|
1616 // perform the fast part of the checking logic |
c517646eef23
6813212: factor duplicated assembly code for general subclass check (for 6655638)
jrose
parents:
622
diff
changeset
|
1617 __ check_klass_subtype_fast_path(klass_RInfo, k_RInfo, Rtmp1, &done, stub->entry(), NULL); |
c517646eef23
6813212: factor duplicated assembly code for general subclass check (for 6655638)
jrose
parents:
622
diff
changeset
|
1618 // call out-of-line instance of __ check_klass_subtype_slow_path(...): |
304 | 1619 __ push(klass_RInfo); |
1620 __ push(k_RInfo); | |
0 | 1621 __ call(RuntimeAddress(Runtime1::entry_for(Runtime1::slow_subtype_check_id))); |
304 | 1622 __ pop(klass_RInfo); |
1623 __ pop(k_RInfo); | |
1624 // result is a boolean | |
0 | 1625 __ cmpl(k_RInfo, 0); |
1626 __ jcc(Assembler::equal, *stub->entry()); | |
1627 __ bind(done); | |
1628 } else if (op->code() == lir_checkcast) { | |
1629 // we always need a stub for the failure case. | |
1630 CodeStub* stub = op->stub(); | |
1631 Register obj = op->object()->as_register(); | |
1632 Register k_RInfo = op->tmp1()->as_register(); | |
1633 Register klass_RInfo = op->tmp2()->as_register(); | |
1634 Register dst = op->result_opr()->as_register(); | |
1635 ciKlass* k = op->klass(); | |
1636 Register Rtmp1 = noreg; | |
1637 | |
1638 Label done; | |
1639 if (obj == k_RInfo) { | |
1640 k_RInfo = dst; | |
1641 } else if (obj == klass_RInfo) { | |
1642 klass_RInfo = dst; | |
1643 } | |
1644 if (k->is_loaded()) { | |
1645 select_different_registers(obj, dst, k_RInfo, klass_RInfo); | |
1646 } else { | |
1647 Rtmp1 = op->tmp3()->as_register(); | |
1648 select_different_registers(obj, dst, k_RInfo, klass_RInfo, Rtmp1); | |
1649 } | |
1650 | |
1651 assert_different_registers(obj, k_RInfo, klass_RInfo); | |
1652 if (!k->is_loaded()) { | |
1653 jobject2reg_with_patching(k_RInfo, op->info_for_patch()); | |
1654 } else { | |
304 | 1655 #ifdef _LP64 |
989
148e5441d916
6863023: need non-perm oops in code cache for JSR 292
jrose
parents:
780
diff
changeset
|
1656 __ movoop(k_RInfo, k->constant_encoding()); |
304 | 1657 #else |
0 | 1658 k_RInfo = noreg; |
304 | 1659 #endif // _LP64 |
0 | 1660 } |
1661 assert(obj != k_RInfo, "must be different"); | |
304 | 1662 __ cmpptr(obj, (int32_t)NULL_WORD); |
0 | 1663 if (op->profiled_method() != NULL) { |
1664 ciMethod* method = op->profiled_method(); | |
1665 int bci = op->profiled_bci(); | |
1666 | |
1667 Label profile_done; | |
1668 __ jcc(Assembler::notEqual, profile_done); | |
1669 // Object is null; update methodDataOop | |
1670 ciMethodData* md = method->method_data(); | |
1671 if (md == NULL) { | |
1672 bailout("out of memory building methodDataOop"); | |
1673 return; | |
1674 } | |
1675 ciProfileData* data = md->bci_to_data(bci); | |
1676 assert(data != NULL, "need data for checkcast"); | |
1677 assert(data->is_BitData(), "need BitData for checkcast"); | |
1678 Register mdo = klass_RInfo; | |
989
148e5441d916
6863023: need non-perm oops in code cache for JSR 292
jrose
parents:
780
diff
changeset
|
1679 __ movoop(mdo, md->constant_encoding()); |
0 | 1680 Address data_addr(mdo, md->byte_offset_of_slot(data, DataLayout::header_offset())); |
1681 int header_bits = DataLayout::flag_mask_to_header_mask(BitData::null_seen_byte_constant()); | |
1682 __ orl(data_addr, header_bits); | |
1683 __ jmp(done); | |
1684 __ bind(profile_done); | |
1685 } else { | |
1686 __ jcc(Assembler::equal, done); | |
1687 } | |
1688 __ verify_oop(obj); | |
1689 | |
1690 if (op->fast_check()) { | |
1691 // get object classo | |
1692 // not a safepoint as obj null check happens earlier | |
1693 if (k->is_loaded()) { | |
304 | 1694 #ifdef _LP64 |
1695 __ cmpptr(k_RInfo, Address(obj, oopDesc::klass_offset_in_bytes())); | |
1696 #else | |
989
148e5441d916
6863023: need non-perm oops in code cache for JSR 292
jrose
parents:
780
diff
changeset
|
1697 __ cmpoop(Address(obj, oopDesc::klass_offset_in_bytes()), k->constant_encoding()); |
304 | 1698 #endif // _LP64 |
0 | 1699 } else { |
304 | 1700 __ cmpptr(k_RInfo, Address(obj, oopDesc::klass_offset_in_bytes())); |
0 | 1701 |
1702 } | |
1703 __ jcc(Assembler::notEqual, *stub->entry()); | |
1704 __ bind(done); | |
1705 } else { | |
1706 // get object class | |
1707 // not a safepoint as obj null check happens earlier | |
304 | 1708 __ movptr(klass_RInfo, Address(obj, oopDesc::klass_offset_in_bytes())); |
0 | 1709 if (k->is_loaded()) { |
1710 // See if we get an immediate positive hit | |
304 | 1711 #ifdef _LP64 |
1712 __ cmpptr(k_RInfo, Address(klass_RInfo, k->super_check_offset())); | |
1713 #else | |
989
148e5441d916
6863023: need non-perm oops in code cache for JSR 292
jrose
parents:
780
diff
changeset
|
1714 __ cmpoop(Address(klass_RInfo, k->super_check_offset()), k->constant_encoding()); |
304 | 1715 #endif // _LP64 |
0 | 1716 if (sizeof(oopDesc) + Klass::secondary_super_cache_offset_in_bytes() != k->super_check_offset()) { |
1717 __ jcc(Assembler::notEqual, *stub->entry()); | |
1718 } else { | |
1719 // See if we get an immediate positive hit | |
1720 __ jcc(Assembler::equal, done); | |
1721 // check for self | |
304 | 1722 #ifdef _LP64 |
1723 __ cmpptr(klass_RInfo, k_RInfo); | |
1724 #else | |
989
148e5441d916
6863023: need non-perm oops in code cache for JSR 292
jrose
parents:
780
diff
changeset
|
1725 __ cmpoop(klass_RInfo, k->constant_encoding()); |
304 | 1726 #endif // _LP64 |
0 | 1727 __ jcc(Assembler::equal, done); |
1728 | |
304 | 1729 __ push(klass_RInfo); |
1730 #ifdef _LP64 | |
1731 __ push(k_RInfo); | |
1732 #else | |
989
148e5441d916
6863023: need non-perm oops in code cache for JSR 292
jrose
parents:
780
diff
changeset
|
1733 __ pushoop(k->constant_encoding()); |
304 | 1734 #endif // _LP64 |
0 | 1735 __ call(RuntimeAddress(Runtime1::entry_for(Runtime1::slow_subtype_check_id))); |
304 | 1736 __ pop(klass_RInfo); |
1737 __ pop(klass_RInfo); | |
1738 // result is a boolean | |
0 | 1739 __ cmpl(klass_RInfo, 0); |
1740 __ jcc(Assembler::equal, *stub->entry()); | |
1741 } | |
1742 __ bind(done); | |
1743 } else { | |
644
c517646eef23
6813212: factor duplicated assembly code for general subclass check (for 6655638)
jrose
parents:
622
diff
changeset
|
1744 // perform the fast part of the checking logic |
c517646eef23
6813212: factor duplicated assembly code for general subclass check (for 6655638)
jrose
parents:
622
diff
changeset
|
1745 __ check_klass_subtype_fast_path(klass_RInfo, k_RInfo, Rtmp1, &done, stub->entry(), NULL); |
c517646eef23
6813212: factor duplicated assembly code for general subclass check (for 6655638)
jrose
parents:
622
diff
changeset
|
1746 // call out-of-line instance of __ check_klass_subtype_slow_path(...): |
304 | 1747 __ push(klass_RInfo); |
1748 __ push(k_RInfo); | |
0 | 1749 __ call(RuntimeAddress(Runtime1::entry_for(Runtime1::slow_subtype_check_id))); |
304 | 1750 __ pop(klass_RInfo); |
1751 __ pop(k_RInfo); | |
1752 // result is a boolean | |
0 | 1753 __ cmpl(k_RInfo, 0); |
1754 __ jcc(Assembler::equal, *stub->entry()); | |
1755 __ bind(done); | |
1756 } | |
1757 | |
1758 } | |
1759 if (dst != obj) { | |
304 | 1760 __ mov(dst, obj); |
0 | 1761 } |
1762 } else if (code == lir_instanceof) { | |
1763 Register obj = op->object()->as_register(); | |
1764 Register k_RInfo = op->tmp1()->as_register(); | |
1765 Register klass_RInfo = op->tmp2()->as_register(); | |
1766 Register dst = op->result_opr()->as_register(); | |
1767 ciKlass* k = op->klass(); | |
1768 | |
1769 Label done; | |
1770 Label zero; | |
1771 Label one; | |
1772 if (obj == k_RInfo) { | |
1773 k_RInfo = klass_RInfo; | |
1774 klass_RInfo = obj; | |
1775 } | |
1776 // patching may screw with our temporaries on sparc, | |
1777 // so let's do it before loading the class | |
1778 if (!k->is_loaded()) { | |
1779 jobject2reg_with_patching(k_RInfo, op->info_for_patch()); | |
304 | 1780 } else { |
989
148e5441d916
6863023: need non-perm oops in code cache for JSR 292
jrose
parents:
780
diff
changeset
|
1781 LP64_ONLY(__ movoop(k_RInfo, k->constant_encoding())); |
0 | 1782 } |
1783 assert(obj != k_RInfo, "must be different"); | |
1784 | |
1785 __ verify_oop(obj); | |
1786 if (op->fast_check()) { | |
304 | 1787 __ cmpptr(obj, (int32_t)NULL_WORD); |
0 | 1788 __ jcc(Assembler::equal, zero); |
1789 // get object class | |
1790 // not a safepoint as obj null check happens earlier | |
304 | 1791 if (LP64_ONLY(false &&) k->is_loaded()) { |
989
148e5441d916
6863023: need non-perm oops in code cache for JSR 292
jrose
parents:
780
diff
changeset
|
1792 NOT_LP64(__ cmpoop(Address(obj, oopDesc::klass_offset_in_bytes()), k->constant_encoding())); |
0 | 1793 k_RInfo = noreg; |
1794 } else { | |
304 | 1795 __ cmpptr(k_RInfo, Address(obj, oopDesc::klass_offset_in_bytes())); |
0 | 1796 |
1797 } | |
1798 __ jcc(Assembler::equal, one); | |
1799 } else { | |
1800 // get object class | |
1801 // not a safepoint as obj null check happens earlier | |
304 | 1802 __ cmpptr(obj, (int32_t)NULL_WORD); |
0 | 1803 __ jcc(Assembler::equal, zero); |
304 | 1804 __ movptr(klass_RInfo, Address(obj, oopDesc::klass_offset_in_bytes())); |
1805 | |
1806 #ifndef _LP64 | |
0 | 1807 if (k->is_loaded()) { |
1808 // See if we get an immediate positive hit | |
989
148e5441d916
6863023: need non-perm oops in code cache for JSR 292
jrose
parents:
780
diff
changeset
|
1809 __ cmpoop(Address(klass_RInfo, k->super_check_offset()), k->constant_encoding()); |
0 | 1810 __ jcc(Assembler::equal, one); |
1811 if (sizeof(oopDesc) + Klass::secondary_super_cache_offset_in_bytes() == k->super_check_offset()) { | |
1812 // check for self | |
989
148e5441d916
6863023: need non-perm oops in code cache for JSR 292
jrose
parents:
780
diff
changeset
|
1813 __ cmpoop(klass_RInfo, k->constant_encoding()); |
0 | 1814 __ jcc(Assembler::equal, one); |
304 | 1815 __ push(klass_RInfo); |
989
148e5441d916
6863023: need non-perm oops in code cache for JSR 292
jrose
parents:
780
diff
changeset
|
1816 __ pushoop(k->constant_encoding()); |
0 | 1817 __ call(RuntimeAddress(Runtime1::entry_for(Runtime1::slow_subtype_check_id))); |
304 | 1818 __ pop(klass_RInfo); |
1819 __ pop(dst); | |
0 | 1820 __ jmp(done); |
1821 } | |
644
c517646eef23
6813212: factor duplicated assembly code for general subclass check (for 6655638)
jrose
parents:
622
diff
changeset
|
1822 } |
c517646eef23
6813212: factor duplicated assembly code for general subclass check (for 6655638)
jrose
parents:
622
diff
changeset
|
1823 else // next block is unconditional if LP64: |
304 | 1824 #endif // LP64 |
644
c517646eef23
6813212: factor duplicated assembly code for general subclass check (for 6655638)
jrose
parents:
622
diff
changeset
|
1825 { |
0 | 1826 assert(dst != klass_RInfo && dst != k_RInfo, "need 3 registers"); |
1827 | |
644
c517646eef23
6813212: factor duplicated assembly code for general subclass check (for 6655638)
jrose
parents:
622
diff
changeset
|
1828 // perform the fast part of the checking logic |
c517646eef23
6813212: factor duplicated assembly code for general subclass check (for 6655638)
jrose
parents:
622
diff
changeset
|
1829 __ check_klass_subtype_fast_path(klass_RInfo, k_RInfo, dst, &one, &zero, NULL); |
c517646eef23
6813212: factor duplicated assembly code for general subclass check (for 6655638)
jrose
parents:
622
diff
changeset
|
1830 // call out-of-line instance of __ check_klass_subtype_slow_path(...): |
304 | 1831 __ push(klass_RInfo); |
1832 __ push(k_RInfo); | |
0 | 1833 __ call(RuntimeAddress(Runtime1::entry_for(Runtime1::slow_subtype_check_id))); |
304 | 1834 __ pop(klass_RInfo); |
1835 __ pop(dst); | |
0 | 1836 __ jmp(done); |
1837 } | |
1838 } | |
1839 __ bind(zero); | |
304 | 1840 __ xorptr(dst, dst); |
0 | 1841 __ jmp(done); |
1842 __ bind(one); | |
304 | 1843 __ movptr(dst, 1); |
0 | 1844 __ bind(done); |
1845 } else { | |
1846 ShouldNotReachHere(); | |
1847 } | |
1848 | |
1849 } | |
1850 | |
1851 | |
1852 void LIR_Assembler::emit_compare_and_swap(LIR_OpCompareAndSwap* op) { | |
304 | 1853 if (LP64_ONLY(false &&) op->code() == lir_cas_long && VM_Version::supports_cx8()) { |
0 | 1854 assert(op->cmp_value()->as_register_lo() == rax, "wrong register"); |
1855 assert(op->cmp_value()->as_register_hi() == rdx, "wrong register"); | |
1856 assert(op->new_value()->as_register_lo() == rbx, "wrong register"); | |
1857 assert(op->new_value()->as_register_hi() == rcx, "wrong register"); | |
1858 Register addr = op->addr()->as_register(); | |
1859 if (os::is_MP()) { | |
1860 __ lock(); | |
1861 } | |
304 | 1862 NOT_LP64(__ cmpxchg8(Address(addr, 0))); |
1863 | |
1864 } else if (op->code() == lir_cas_int || op->code() == lir_cas_obj ) { | |
1865 NOT_LP64(assert(op->addr()->is_single_cpu(), "must be single");) | |
1866 Register addr = (op->addr()->is_single_cpu() ? op->addr()->as_register() : op->addr()->as_register_lo()); | |
0 | 1867 Register newval = op->new_value()->as_register(); |
1868 Register cmpval = op->cmp_value()->as_register(); | |
1869 assert(cmpval == rax, "wrong register"); | |
1870 assert(newval != NULL, "new val must be register"); | |
1871 assert(cmpval != newval, "cmp and new values must be in different registers"); | |
1872 assert(cmpval != addr, "cmp and addr must be in different registers"); | |
1873 assert(newval != addr, "new value and addr must be in different registers"); | |
1874 if (os::is_MP()) { | |
1875 __ lock(); | |
1876 } | |
304 | 1877 if ( op->code() == lir_cas_obj) { |
1878 __ cmpxchgptr(newval, Address(addr, 0)); | |
1879 } else if (op->code() == lir_cas_int) { | |
1880 __ cmpxchgl(newval, Address(addr, 0)); | |
1881 } else { | |
1882 LP64_ONLY(__ cmpxchgq(newval, Address(addr, 0))); | |
1883 } | |
1884 #ifdef _LP64 | |
1885 } else if (op->code() == lir_cas_long) { | |
1886 Register addr = (op->addr()->is_single_cpu() ? op->addr()->as_register() : op->addr()->as_register_lo()); | |
1887 Register newval = op->new_value()->as_register_lo(); | |
1888 Register cmpval = op->cmp_value()->as_register_lo(); | |
1889 assert(cmpval == rax, "wrong register"); | |
1890 assert(newval != NULL, "new val must be register"); | |
1891 assert(cmpval != newval, "cmp and new values must be in different registers"); | |
1892 assert(cmpval != addr, "cmp and addr must be in different registers"); | |
1893 assert(newval != addr, "new value and addr must be in different registers"); | |
1894 if (os::is_MP()) { | |
1895 __ lock(); | |
1896 } | |
1897 __ cmpxchgq(newval, Address(addr, 0)); | |
1898 #endif // _LP64 | |
0 | 1899 } else { |
1900 Unimplemented(); | |
1901 } | |
1902 } | |
1903 | |
1904 | |
1905 void LIR_Assembler::cmove(LIR_Condition condition, LIR_Opr opr1, LIR_Opr opr2, LIR_Opr result) { | |
1906 Assembler::Condition acond, ncond; | |
1907 switch (condition) { | |
1908 case lir_cond_equal: acond = Assembler::equal; ncond = Assembler::notEqual; break; | |
1909 case lir_cond_notEqual: acond = Assembler::notEqual; ncond = Assembler::equal; break; | |
1910 case lir_cond_less: acond = Assembler::less; ncond = Assembler::greaterEqual; break; | |
1911 case lir_cond_lessEqual: acond = Assembler::lessEqual; ncond = Assembler::greater; break; | |
1912 case lir_cond_greaterEqual: acond = Assembler::greaterEqual; ncond = Assembler::less; break; | |
1913 case lir_cond_greater: acond = Assembler::greater; ncond = Assembler::lessEqual; break; | |
1914 case lir_cond_belowEqual: acond = Assembler::belowEqual; ncond = Assembler::above; break; | |
1915 case lir_cond_aboveEqual: acond = Assembler::aboveEqual; ncond = Assembler::below; break; | |
1916 default: ShouldNotReachHere(); | |
1917 } | |
1918 | |
1919 if (opr1->is_cpu_register()) { | |
1920 reg2reg(opr1, result); | |
1921 } else if (opr1->is_stack()) { | |
1922 stack2reg(opr1, result, result->type()); | |
1923 } else if (opr1->is_constant()) { | |
1924 const2reg(opr1, result, lir_patch_none, NULL); | |
1925 } else { | |
1926 ShouldNotReachHere(); | |
1927 } | |
1928 | |
1929 if (VM_Version::supports_cmov() && !opr2->is_constant()) { | |
1930 // optimized version that does not require a branch | |
1931 if (opr2->is_single_cpu()) { | |
1932 assert(opr2->cpu_regnr() != result->cpu_regnr(), "opr2 already overwritten by previous move"); | |
304 | 1933 __ cmov(ncond, result->as_register(), opr2->as_register()); |
0 | 1934 } else if (opr2->is_double_cpu()) { |
1935 assert(opr2->cpu_regnrLo() != result->cpu_regnrLo() && opr2->cpu_regnrLo() != result->cpu_regnrHi(), "opr2 already overwritten by previous move"); | |
1936 assert(opr2->cpu_regnrHi() != result->cpu_regnrLo() && opr2->cpu_regnrHi() != result->cpu_regnrHi(), "opr2 already overwritten by previous move"); | |
304 | 1937 __ cmovptr(ncond, result->as_register_lo(), opr2->as_register_lo()); |
1938 NOT_LP64(__ cmovptr(ncond, result->as_register_hi(), opr2->as_register_hi());) | |
0 | 1939 } else if (opr2->is_single_stack()) { |
1940 __ cmovl(ncond, result->as_register(), frame_map()->address_for_slot(opr2->single_stack_ix())); | |
1941 } else if (opr2->is_double_stack()) { | |
304 | 1942 __ cmovptr(ncond, result->as_register_lo(), frame_map()->address_for_slot(opr2->double_stack_ix(), lo_word_offset_in_bytes)); |
1943 NOT_LP64(__ cmovptr(ncond, result->as_register_hi(), frame_map()->address_for_slot(opr2->double_stack_ix(), hi_word_offset_in_bytes));) | |
0 | 1944 } else { |
1945 ShouldNotReachHere(); | |
1946 } | |
1947 | |
1948 } else { | |
1949 Label skip; | |
1950 __ jcc (acond, skip); | |
1951 if (opr2->is_cpu_register()) { | |
1952 reg2reg(opr2, result); | |
1953 } else if (opr2->is_stack()) { | |
1954 stack2reg(opr2, result, result->type()); | |
1955 } else if (opr2->is_constant()) { | |
1956 const2reg(opr2, result, lir_patch_none, NULL); | |
1957 } else { | |
1958 ShouldNotReachHere(); | |
1959 } | |
1960 __ bind(skip); | |
1961 } | |
1962 } | |
1963 | |
1964 | |
1965 void LIR_Assembler::arith_op(LIR_Code code, LIR_Opr left, LIR_Opr right, LIR_Opr dest, CodeEmitInfo* info, bool pop_fpu_stack) { | |
1966 assert(info == NULL, "should never be used, idiv/irem and ldiv/lrem not handled by this method"); | |
1967 | |
1968 if (left->is_single_cpu()) { | |
1969 assert(left == dest, "left and dest must be equal"); | |
1970 Register lreg = left->as_register(); | |
1971 | |
1972 if (right->is_single_cpu()) { | |
1973 // cpu register - cpu register | |
1974 Register rreg = right->as_register(); | |
1975 switch (code) { | |
1976 case lir_add: __ addl (lreg, rreg); break; | |
1977 case lir_sub: __ subl (lreg, rreg); break; | |
1978 case lir_mul: __ imull(lreg, rreg); break; | |
1979 default: ShouldNotReachHere(); | |
1980 } | |
1981 | |
1982 } else if (right->is_stack()) { | |
1983 // cpu register - stack | |
1984 Address raddr = frame_map()->address_for_slot(right->single_stack_ix()); | |
1985 switch (code) { | |
1986 case lir_add: __ addl(lreg, raddr); break; | |
1987 case lir_sub: __ subl(lreg, raddr); break; | |
1988 default: ShouldNotReachHere(); | |
1989 } | |
1990 | |
1991 } else if (right->is_constant()) { | |
1992 // cpu register - constant | |
1993 jint c = right->as_constant_ptr()->as_jint(); | |
1994 switch (code) { | |
1995 case lir_add: { | |
1996 __ increment(lreg, c); | |
1997 break; | |
1998 } | |
1999 case lir_sub: { | |
2000 __ decrement(lreg, c); | |
2001 break; | |
2002 } | |
2003 default: ShouldNotReachHere(); | |
2004 } | |
2005 | |
2006 } else { | |
2007 ShouldNotReachHere(); | |
2008 } | |
2009 | |
2010 } else if (left->is_double_cpu()) { | |
2011 assert(left == dest, "left and dest must be equal"); | |
2012 Register lreg_lo = left->as_register_lo(); | |
2013 Register lreg_hi = left->as_register_hi(); | |
2014 | |
2015 if (right->is_double_cpu()) { | |
2016 // cpu register - cpu register | |
2017 Register rreg_lo = right->as_register_lo(); | |
2018 Register rreg_hi = right->as_register_hi(); | |
304 | 2019 NOT_LP64(assert_different_registers(lreg_lo, lreg_hi, rreg_lo, rreg_hi)); |
2020 LP64_ONLY(assert_different_registers(lreg_lo, rreg_lo)); | |
0 | 2021 switch (code) { |
2022 case lir_add: | |
304 | 2023 __ addptr(lreg_lo, rreg_lo); |
2024 NOT_LP64(__ adcl(lreg_hi, rreg_hi)); | |
0 | 2025 break; |
2026 case lir_sub: | |
304 | 2027 __ subptr(lreg_lo, rreg_lo); |
2028 NOT_LP64(__ sbbl(lreg_hi, rreg_hi)); | |
0 | 2029 break; |
2030 case lir_mul: | |
304 | 2031 #ifdef _LP64 |
2032 __ imulq(lreg_lo, rreg_lo); | |
2033 #else | |
0 | 2034 assert(lreg_lo == rax && lreg_hi == rdx, "must be"); |
2035 __ imull(lreg_hi, rreg_lo); | |
2036 __ imull(rreg_hi, lreg_lo); | |
2037 __ addl (rreg_hi, lreg_hi); | |
2038 __ mull (rreg_lo); | |
2039 __ addl (lreg_hi, rreg_hi); | |
304 | 2040 #endif // _LP64 |
0 | 2041 break; |
2042 default: | |
2043 ShouldNotReachHere(); | |
2044 } | |
2045 | |
2046 } else if (right->is_constant()) { | |
2047 // cpu register - constant | |
304 | 2048 #ifdef _LP64 |
2049 jlong c = right->as_constant_ptr()->as_jlong_bits(); | |
2050 __ movptr(r10, (intptr_t) c); | |
2051 switch (code) { | |
2052 case lir_add: | |
2053 __ addptr(lreg_lo, r10); | |
2054 break; | |
2055 case lir_sub: | |
2056 __ subptr(lreg_lo, r10); | |
2057 break; | |
2058 default: | |
2059 ShouldNotReachHere(); | |
2060 } | |
2061 #else | |
0 | 2062 jint c_lo = right->as_constant_ptr()->as_jint_lo(); |
2063 jint c_hi = right->as_constant_ptr()->as_jint_hi(); | |
2064 switch (code) { | |
2065 case lir_add: | |
304 | 2066 __ addptr(lreg_lo, c_lo); |
0 | 2067 __ adcl(lreg_hi, c_hi); |
2068 break; | |
2069 case lir_sub: | |
304 | 2070 __ subptr(lreg_lo, c_lo); |
0 | 2071 __ sbbl(lreg_hi, c_hi); |
2072 break; | |
2073 default: | |
2074 ShouldNotReachHere(); | |
2075 } | |
304 | 2076 #endif // _LP64 |
0 | 2077 |
2078 } else { | |
2079 ShouldNotReachHere(); | |
2080 } | |
2081 | |
2082 } else if (left->is_single_xmm()) { | |
2083 assert(left == dest, "left and dest must be equal"); | |
2084 XMMRegister lreg = left->as_xmm_float_reg(); | |
2085 | |
2086 if (right->is_single_xmm()) { | |
2087 XMMRegister rreg = right->as_xmm_float_reg(); | |
2088 switch (code) { | |
2089 case lir_add: __ addss(lreg, rreg); break; | |
2090 case lir_sub: __ subss(lreg, rreg); break; | |
2091 case lir_mul_strictfp: // fall through | |
2092 case lir_mul: __ mulss(lreg, rreg); break; | |
2093 case lir_div_strictfp: // fall through | |
2094 case lir_div: __ divss(lreg, rreg); break; | |
2095 default: ShouldNotReachHere(); | |
2096 } | |
2097 } else { | |
2098 Address raddr; | |
2099 if (right->is_single_stack()) { | |
2100 raddr = frame_map()->address_for_slot(right->single_stack_ix()); | |
2101 } else if (right->is_constant()) { | |
2102 // hack for now | |
2103 raddr = __ as_Address(InternalAddress(float_constant(right->as_jfloat()))); | |
2104 } else { | |
2105 ShouldNotReachHere(); | |
2106 } | |
2107 switch (code) { | |
2108 case lir_add: __ addss(lreg, raddr); break; | |
2109 case lir_sub: __ subss(lreg, raddr); break; | |
2110 case lir_mul_strictfp: // fall through | |
2111 case lir_mul: __ mulss(lreg, raddr); break; | |
2112 case lir_div_strictfp: // fall through | |
2113 case lir_div: __ divss(lreg, raddr); break; | |
2114 default: ShouldNotReachHere(); | |
2115 } | |
2116 } | |
2117 | |
2118 } else if (left->is_double_xmm()) { | |
2119 assert(left == dest, "left and dest must be equal"); | |
2120 | |
2121 XMMRegister lreg = left->as_xmm_double_reg(); | |
2122 if (right->is_double_xmm()) { | |
2123 XMMRegister rreg = right->as_xmm_double_reg(); | |
2124 switch (code) { | |
2125 case lir_add: __ addsd(lreg, rreg); break; | |
2126 case lir_sub: __ subsd(lreg, rreg); break; | |
2127 case lir_mul_strictfp: // fall through | |
2128 case lir_mul: __ mulsd(lreg, rreg); break; | |
2129 case lir_div_strictfp: // fall through | |
2130 case lir_div: __ divsd(lreg, rreg); break; | |
2131 default: ShouldNotReachHere(); | |
2132 } | |
2133 } else { | |
2134 Address raddr; | |
2135 if (right->is_double_stack()) { | |
2136 raddr = frame_map()->address_for_slot(right->double_stack_ix()); | |
2137 } else if (right->is_constant()) { | |
2138 // hack for now | |
2139 raddr = __ as_Address(InternalAddress(double_constant(right->as_jdouble()))); | |
2140 } else { | |
2141 ShouldNotReachHere(); | |
2142 } | |
2143 switch (code) { | |
2144 case lir_add: __ addsd(lreg, raddr); break; | |
2145 case lir_sub: __ subsd(lreg, raddr); break; | |
2146 case lir_mul_strictfp: // fall through | |
2147 case lir_mul: __ mulsd(lreg, raddr); break; | |
2148 case lir_div_strictfp: // fall through | |
2149 case lir_div: __ divsd(lreg, raddr); break; | |
2150 default: ShouldNotReachHere(); | |
2151 } | |
2152 } | |
2153 | |
2154 } else if (left->is_single_fpu()) { | |
2155 assert(dest->is_single_fpu(), "fpu stack allocation required"); | |
2156 | |
2157 if (right->is_single_fpu()) { | |
2158 arith_fpu_implementation(code, left->fpu_regnr(), right->fpu_regnr(), dest->fpu_regnr(), pop_fpu_stack); | |
2159 | |
2160 } else { | |
2161 assert(left->fpu_regnr() == 0, "left must be on TOS"); | |
2162 assert(dest->fpu_regnr() == 0, "dest must be on TOS"); | |
2163 | |
2164 Address raddr; | |
2165 if (right->is_single_stack()) { | |
2166 raddr = frame_map()->address_for_slot(right->single_stack_ix()); | |
2167 } else if (right->is_constant()) { | |
2168 address const_addr = float_constant(right->as_jfloat()); | |
2169 assert(const_addr != NULL, "incorrect float/double constant maintainance"); | |
2170 // hack for now | |
2171 raddr = __ as_Address(InternalAddress(const_addr)); | |
2172 } else { | |
2173 ShouldNotReachHere(); | |
2174 } | |
2175 | |
2176 switch (code) { | |
2177 case lir_add: __ fadd_s(raddr); break; | |
2178 case lir_sub: __ fsub_s(raddr); break; | |
2179 case lir_mul_strictfp: // fall through | |
2180 case lir_mul: __ fmul_s(raddr); break; | |
2181 case lir_div_strictfp: // fall through | |
2182 case lir_div: __ fdiv_s(raddr); break; | |
2183 default: ShouldNotReachHere(); | |
2184 } | |
2185 } | |
2186 | |
2187 } else if (left->is_double_fpu()) { | |
2188 assert(dest->is_double_fpu(), "fpu stack allocation required"); | |
2189 | |
2190 if (code == lir_mul_strictfp || code == lir_div_strictfp) { | |
2191 // Double values require special handling for strictfp mul/div on x86 | |
2192 __ fld_x(ExternalAddress(StubRoutines::addr_fpu_subnormal_bias1())); | |
2193 __ fmulp(left->fpu_regnrLo() + 1); | |
2194 } | |
2195 | |
2196 if (right->is_double_fpu()) { | |
2197 arith_fpu_implementation(code, left->fpu_regnrLo(), right->fpu_regnrLo(), dest->fpu_regnrLo(), pop_fpu_stack); | |
2198 | |
2199 } else { | |
2200 assert(left->fpu_regnrLo() == 0, "left must be on TOS"); | |
2201 assert(dest->fpu_regnrLo() == 0, "dest must be on TOS"); | |
2202 | |
2203 Address raddr; | |
2204 if (right->is_double_stack()) { | |
2205 raddr = frame_map()->address_for_slot(right->double_stack_ix()); | |
2206 } else if (right->is_constant()) { | |
2207 // hack for now | |
2208 raddr = __ as_Address(InternalAddress(double_constant(right->as_jdouble()))); | |
2209 } else { | |
2210 ShouldNotReachHere(); | |
2211 } | |
2212 | |
2213 switch (code) { | |
2214 case lir_add: __ fadd_d(raddr); break; | |
2215 case lir_sub: __ fsub_d(raddr); break; | |
2216 case lir_mul_strictfp: // fall through | |
2217 case lir_mul: __ fmul_d(raddr); break; | |
2218 case lir_div_strictfp: // fall through | |
2219 case lir_div: __ fdiv_d(raddr); break; | |
2220 default: ShouldNotReachHere(); | |
2221 } | |
2222 } | |
2223 | |
2224 if (code == lir_mul_strictfp || code == lir_div_strictfp) { | |
2225 // Double values require special handling for strictfp mul/div on x86 | |
2226 __ fld_x(ExternalAddress(StubRoutines::addr_fpu_subnormal_bias2())); | |
2227 __ fmulp(dest->fpu_regnrLo() + 1); | |
2228 } | |
2229 | |
2230 } else if (left->is_single_stack() || left->is_address()) { | |
2231 assert(left == dest, "left and dest must be equal"); | |
2232 | |
2233 Address laddr; | |
2234 if (left->is_single_stack()) { | |
2235 laddr = frame_map()->address_for_slot(left->single_stack_ix()); | |
2236 } else if (left->is_address()) { | |
2237 laddr = as_Address(left->as_address_ptr()); | |
2238 } else { | |
2239 ShouldNotReachHere(); | |
2240 } | |
2241 | |
2242 if (right->is_single_cpu()) { | |
2243 Register rreg = right->as_register(); | |
2244 switch (code) { | |
2245 case lir_add: __ addl(laddr, rreg); break; | |
2246 case lir_sub: __ subl(laddr, rreg); break; | |
2247 default: ShouldNotReachHere(); | |
2248 } | |
2249 } else if (right->is_constant()) { | |
2250 jint c = right->as_constant_ptr()->as_jint(); | |
2251 switch (code) { | |
2252 case lir_add: { | |
304 | 2253 __ incrementl(laddr, c); |
0 | 2254 break; |
2255 } | |
2256 case lir_sub: { | |
304 | 2257 __ decrementl(laddr, c); |
0 | 2258 break; |
2259 } | |
2260 default: ShouldNotReachHere(); | |
2261 } | |
2262 } else { | |
2263 ShouldNotReachHere(); | |
2264 } | |
2265 | |
2266 } else { | |
2267 ShouldNotReachHere(); | |
2268 } | |
2269 } | |
2270 | |
2271 void LIR_Assembler::arith_fpu_implementation(LIR_Code code, int left_index, int right_index, int dest_index, bool pop_fpu_stack) { | |
2272 assert(pop_fpu_stack || (left_index == dest_index || right_index == dest_index), "invalid LIR"); | |
2273 assert(!pop_fpu_stack || (left_index - 1 == dest_index || right_index - 1 == dest_index), "invalid LIR"); | |
2274 assert(left_index == 0 || right_index == 0, "either must be on top of stack"); | |
2275 | |
2276 bool left_is_tos = (left_index == 0); | |
2277 bool dest_is_tos = (dest_index == 0); | |
2278 int non_tos_index = (left_is_tos ? right_index : left_index); | |
2279 | |
2280 switch (code) { | |
2281 case lir_add: | |
2282 if (pop_fpu_stack) __ faddp(non_tos_index); | |
2283 else if (dest_is_tos) __ fadd (non_tos_index); | |
2284 else __ fadda(non_tos_index); | |
2285 break; | |
2286 | |
2287 case lir_sub: | |
2288 if (left_is_tos) { | |
2289 if (pop_fpu_stack) __ fsubrp(non_tos_index); | |
2290 else if (dest_is_tos) __ fsub (non_tos_index); | |
2291 else __ fsubra(non_tos_index); | |
2292 } else { | |
2293 if (pop_fpu_stack) __ fsubp (non_tos_index); | |
2294 else if (dest_is_tos) __ fsubr (non_tos_index); | |
2295 else __ fsuba (non_tos_index); | |
2296 } | |
2297 break; | |
2298 | |
2299 case lir_mul_strictfp: // fall through | |
2300 case lir_mul: | |
2301 if (pop_fpu_stack) __ fmulp(non_tos_index); | |
2302 else if (dest_is_tos) __ fmul (non_tos_index); | |
2303 else __ fmula(non_tos_index); | |
2304 break; | |
2305 | |
2306 case lir_div_strictfp: // fall through | |
2307 case lir_div: | |
2308 if (left_is_tos) { | |
2309 if (pop_fpu_stack) __ fdivrp(non_tos_index); | |
2310 else if (dest_is_tos) __ fdiv (non_tos_index); | |
2311 else __ fdivra(non_tos_index); | |
2312 } else { | |
2313 if (pop_fpu_stack) __ fdivp (non_tos_index); | |
2314 else if (dest_is_tos) __ fdivr (non_tos_index); | |
2315 else __ fdiva (non_tos_index); | |
2316 } | |
2317 break; | |
2318 | |
2319 case lir_rem: | |
2320 assert(left_is_tos && dest_is_tos && right_index == 1, "must be guaranteed by FPU stack allocation"); | |
2321 __ fremr(noreg); | |
2322 break; | |
2323 | |
2324 default: | |
2325 ShouldNotReachHere(); | |
2326 } | |
2327 } | |
2328 | |
2329 | |
2330 void LIR_Assembler::intrinsic_op(LIR_Code code, LIR_Opr value, LIR_Opr unused, LIR_Opr dest, LIR_Op* op) { | |
2331 if (value->is_double_xmm()) { | |
2332 switch(code) { | |
2333 case lir_abs : | |
2334 { | |
2335 if (dest->as_xmm_double_reg() != value->as_xmm_double_reg()) { | |
2336 __ movdbl(dest->as_xmm_double_reg(), value->as_xmm_double_reg()); | |
2337 } | |
2338 __ andpd(dest->as_xmm_double_reg(), | |
2339 ExternalAddress((address)double_signmask_pool)); | |
2340 } | |
2341 break; | |
2342 | |
2343 case lir_sqrt: __ sqrtsd(dest->as_xmm_double_reg(), value->as_xmm_double_reg()); break; | |
2344 // all other intrinsics are not available in the SSE instruction set, so FPU is used | |
2345 default : ShouldNotReachHere(); | |
2346 } | |
2347 | |
2348 } else if (value->is_double_fpu()) { | |
2349 assert(value->fpu_regnrLo() == 0 && dest->fpu_regnrLo() == 0, "both must be on TOS"); | |
2350 switch(code) { | |
2351 case lir_log : __ flog() ; break; | |
2352 case lir_log10 : __ flog10() ; break; | |
2353 case lir_abs : __ fabs() ; break; | |
2354 case lir_sqrt : __ fsqrt(); break; | |
2355 case lir_sin : | |
2356 // Should consider not saving rbx, if not necessary | |
2357 __ trigfunc('s', op->as_Op2()->fpu_stack_size()); | |
2358 break; | |
2359 case lir_cos : | |
2360 // Should consider not saving rbx, if not necessary | |
2361 assert(op->as_Op2()->fpu_stack_size() <= 6, "sin and cos need two free stack slots"); | |
2362 __ trigfunc('c', op->as_Op2()->fpu_stack_size()); | |
2363 break; | |
2364 case lir_tan : | |
2365 // Should consider not saving rbx, if not necessary | |
2366 __ trigfunc('t', op->as_Op2()->fpu_stack_size()); | |
2367 break; | |
2368 default : ShouldNotReachHere(); | |
2369 } | |
2370 } else { | |
2371 Unimplemented(); | |
2372 } | |
2373 } | |
2374 | |
2375 void LIR_Assembler::logic_op(LIR_Code code, LIR_Opr left, LIR_Opr right, LIR_Opr dst) { | |
2376 // assert(left->destroys_register(), "check"); | |
2377 if (left->is_single_cpu()) { | |
2378 Register reg = left->as_register(); | |
2379 if (right->is_constant()) { | |
2380 int val = right->as_constant_ptr()->as_jint(); | |
2381 switch (code) { | |
2382 case lir_logic_and: __ andl (reg, val); break; | |
2383 case lir_logic_or: __ orl (reg, val); break; | |
2384 case lir_logic_xor: __ xorl (reg, val); break; | |
2385 default: ShouldNotReachHere(); | |
2386 } | |
2387 } else if (right->is_stack()) { | |
2388 // added support for stack operands | |
2389 Address raddr = frame_map()->address_for_slot(right->single_stack_ix()); | |
2390 switch (code) { | |
2391 case lir_logic_and: __ andl (reg, raddr); break; | |
2392 case lir_logic_or: __ orl (reg, raddr); break; | |
2393 case lir_logic_xor: __ xorl (reg, raddr); break; | |
2394 default: ShouldNotReachHere(); | |
2395 } | |
2396 } else { | |
2397 Register rright = right->as_register(); | |
2398 switch (code) { | |
304 | 2399 case lir_logic_and: __ andptr (reg, rright); break; |
2400 case lir_logic_or : __ orptr (reg, rright); break; | |
2401 case lir_logic_xor: __ xorptr (reg, rright); break; | |
0 | 2402 default: ShouldNotReachHere(); |
2403 } | |
2404 } | |
2405 move_regs(reg, dst->as_register()); | |
2406 } else { | |
2407 Register l_lo = left->as_register_lo(); | |
2408 Register l_hi = left->as_register_hi(); | |
2409 if (right->is_constant()) { | |
304 | 2410 #ifdef _LP64 |
2411 __ mov64(rscratch1, right->as_constant_ptr()->as_jlong()); | |
2412 switch (code) { | |
2413 case lir_logic_and: | |
2414 __ andq(l_lo, rscratch1); | |
2415 break; | |
2416 case lir_logic_or: | |
2417 __ orq(l_lo, rscratch1); | |
2418 break; | |
2419 case lir_logic_xor: | |
2420 __ xorq(l_lo, rscratch1); | |
2421 break; | |
2422 default: ShouldNotReachHere(); | |
2423 } | |
2424 #else | |
0 | 2425 int r_lo = right->as_constant_ptr()->as_jint_lo(); |
2426 int r_hi = right->as_constant_ptr()->as_jint_hi(); | |
2427 switch (code) { | |
2428 case lir_logic_and: | |
2429 __ andl(l_lo, r_lo); | |
2430 __ andl(l_hi, r_hi); | |
2431 break; | |
2432 case lir_logic_or: | |
2433 __ orl(l_lo, r_lo); | |
2434 __ orl(l_hi, r_hi); | |
2435 break; | |
2436 case lir_logic_xor: | |
2437 __ xorl(l_lo, r_lo); | |
2438 __ xorl(l_hi, r_hi); | |
2439 break; | |
2440 default: ShouldNotReachHere(); | |
2441 } | |
304 | 2442 #endif // _LP64 |
0 | 2443 } else { |
2444 Register r_lo = right->as_register_lo(); | |
2445 Register r_hi = right->as_register_hi(); | |
2446 assert(l_lo != r_hi, "overwriting registers"); | |
2447 switch (code) { | |
2448 case lir_logic_and: | |
304 | 2449 __ andptr(l_lo, r_lo); |
2450 NOT_LP64(__ andptr(l_hi, r_hi);) | |
0 | 2451 break; |
2452 case lir_logic_or: | |
304 | 2453 __ orptr(l_lo, r_lo); |
2454 NOT_LP64(__ orptr(l_hi, r_hi);) | |
0 | 2455 break; |
2456 case lir_logic_xor: | |
304 | 2457 __ xorptr(l_lo, r_lo); |
2458 NOT_LP64(__ xorptr(l_hi, r_hi);) | |
0 | 2459 break; |
2460 default: ShouldNotReachHere(); | |
2461 } | |
2462 } | |
2463 | |
2464 Register dst_lo = dst->as_register_lo(); | |
2465 Register dst_hi = dst->as_register_hi(); | |
2466 | |
304 | 2467 #ifdef _LP64 |
2468 move_regs(l_lo, dst_lo); | |
2469 #else | |
0 | 2470 if (dst_lo == l_hi) { |
2471 assert(dst_hi != l_lo, "overwriting registers"); | |
2472 move_regs(l_hi, dst_hi); | |
2473 move_regs(l_lo, dst_lo); | |
2474 } else { | |
2475 assert(dst_lo != l_hi, "overwriting registers"); | |
2476 move_regs(l_lo, dst_lo); | |
2477 move_regs(l_hi, dst_hi); | |
2478 } | |
304 | 2479 #endif // _LP64 |
0 | 2480 } |
2481 } | |
2482 | |
2483 | |
2484 // we assume that rax, and rdx can be overwritten | |
2485 void LIR_Assembler::arithmetic_idiv(LIR_Code code, LIR_Opr left, LIR_Opr right, LIR_Opr temp, LIR_Opr result, CodeEmitInfo* info) { | |
2486 | |
2487 assert(left->is_single_cpu(), "left must be register"); | |
2488 assert(right->is_single_cpu() || right->is_constant(), "right must be register or constant"); | |
2489 assert(result->is_single_cpu(), "result must be register"); | |
2490 | |
2491 // assert(left->destroys_register(), "check"); | |
2492 // assert(right->destroys_register(), "check"); | |
2493 | |
2494 Register lreg = left->as_register(); | |
2495 Register dreg = result->as_register(); | |
2496 | |
2497 if (right->is_constant()) { | |
2498 int divisor = right->as_constant_ptr()->as_jint(); | |
2499 assert(divisor > 0 && is_power_of_2(divisor), "must be"); | |
2500 if (code == lir_idiv) { | |
2501 assert(lreg == rax, "must be rax,"); | |
2502 assert(temp->as_register() == rdx, "tmp register must be rdx"); | |
2503 __ cdql(); // sign extend into rdx:rax | |
2504 if (divisor == 2) { | |
2505 __ subl(lreg, rdx); | |
2506 } else { | |
2507 __ andl(rdx, divisor - 1); | |
2508 __ addl(lreg, rdx); | |
2509 } | |
2510 __ sarl(lreg, log2_intptr(divisor)); | |
2511 move_regs(lreg, dreg); | |
2512 } else if (code == lir_irem) { | |
2513 Label done; | |
304 | 2514 __ mov(dreg, lreg); |
0 | 2515 __ andl(dreg, 0x80000000 | (divisor - 1)); |
2516 __ jcc(Assembler::positive, done); | |
2517 __ decrement(dreg); | |
2518 __ orl(dreg, ~(divisor - 1)); | |
2519 __ increment(dreg); | |
2520 __ bind(done); | |
2521 } else { | |
2522 ShouldNotReachHere(); | |
2523 } | |
2524 } else { | |
2525 Register rreg = right->as_register(); | |
2526 assert(lreg == rax, "left register must be rax,"); | |
2527 assert(rreg != rdx, "right register must not be rdx"); | |
2528 assert(temp->as_register() == rdx, "tmp register must be rdx"); | |
2529 | |
2530 move_regs(lreg, rax); | |
2531 | |
2532 int idivl_offset = __ corrected_idivl(rreg); | |
2533 add_debug_info_for_div0(idivl_offset, info); | |
2534 if (code == lir_irem) { | |
2535 move_regs(rdx, dreg); // result is in rdx | |
2536 } else { | |
2537 move_regs(rax, dreg); | |
2538 } | |
2539 } | |
2540 } | |
2541 | |
2542 | |
2543 void LIR_Assembler::comp_op(LIR_Condition condition, LIR_Opr opr1, LIR_Opr opr2, LIR_Op2* op) { | |
2544 if (opr1->is_single_cpu()) { | |
2545 Register reg1 = opr1->as_register(); | |
2546 if (opr2->is_single_cpu()) { | |
2547 // cpu register - cpu register | |
304 | 2548 if (opr1->type() == T_OBJECT || opr1->type() == T_ARRAY) { |
2549 __ cmpptr(reg1, opr2->as_register()); | |
2550 } else { | |
2551 assert(opr2->type() != T_OBJECT && opr2->type() != T_ARRAY, "cmp int, oop?"); | |
2552 __ cmpl(reg1, opr2->as_register()); | |
2553 } | |
0 | 2554 } else if (opr2->is_stack()) { |
2555 // cpu register - stack | |
304 | 2556 if (opr1->type() == T_OBJECT || opr1->type() == T_ARRAY) { |
2557 __ cmpptr(reg1, frame_map()->address_for_slot(opr2->single_stack_ix())); | |
2558 } else { | |
2559 __ cmpl(reg1, frame_map()->address_for_slot(opr2->single_stack_ix())); | |
2560 } | |
0 | 2561 } else if (opr2->is_constant()) { |
2562 // cpu register - constant | |
2563 LIR_Const* c = opr2->as_constant_ptr(); | |
2564 if (c->type() == T_INT) { | |
2565 __ cmpl(reg1, c->as_jint()); | |
304 | 2566 } else if (c->type() == T_OBJECT || c->type() == T_ARRAY) { |
2567 // In 64bit oops are single register | |
0 | 2568 jobject o = c->as_jobject(); |
2569 if (o == NULL) { | |
304 | 2570 __ cmpptr(reg1, (int32_t)NULL_WORD); |
0 | 2571 } else { |
304 | 2572 #ifdef _LP64 |
2573 __ movoop(rscratch1, o); | |
2574 __ cmpptr(reg1, rscratch1); | |
2575 #else | |
0 | 2576 __ cmpoop(reg1, c->as_jobject()); |
304 | 2577 #endif // _LP64 |
0 | 2578 } |
2579 } else { | |
2580 ShouldNotReachHere(); | |
2581 } | |
2582 // cpu register - address | |
2583 } else if (opr2->is_address()) { | |
2584 if (op->info() != NULL) { | |
2585 add_debug_info_for_null_check_here(op->info()); | |
2586 } | |
2587 __ cmpl(reg1, as_Address(opr2->as_address_ptr())); | |
2588 } else { | |
2589 ShouldNotReachHere(); | |
2590 } | |
2591 | |
2592 } else if(opr1->is_double_cpu()) { | |
2593 Register xlo = opr1->as_register_lo(); | |
2594 Register xhi = opr1->as_register_hi(); | |
2595 if (opr2->is_double_cpu()) { | |
304 | 2596 #ifdef _LP64 |
2597 __ cmpptr(xlo, opr2->as_register_lo()); | |
2598 #else | |
0 | 2599 // cpu register - cpu register |
2600 Register ylo = opr2->as_register_lo(); | |
2601 Register yhi = opr2->as_register_hi(); | |
2602 __ subl(xlo, ylo); | |
2603 __ sbbl(xhi, yhi); | |
2604 if (condition == lir_cond_equal || condition == lir_cond_notEqual) { | |
2605 __ orl(xhi, xlo); | |
2606 } | |
304 | 2607 #endif // _LP64 |
0 | 2608 } else if (opr2->is_constant()) { |
2609 // cpu register - constant 0 | |
2610 assert(opr2->as_jlong() == (jlong)0, "only handles zero"); | |
304 | 2611 #ifdef _LP64 |
2612 __ cmpptr(xlo, (int32_t)opr2->as_jlong()); | |
2613 #else | |
0 | 2614 assert(condition == lir_cond_equal || condition == lir_cond_notEqual, "only handles equals case"); |
2615 __ orl(xhi, xlo); | |
304 | 2616 #endif // _LP64 |
0 | 2617 } else { |
2618 ShouldNotReachHere(); | |
2619 } | |
2620 | |
2621 } else if (opr1->is_single_xmm()) { | |
2622 XMMRegister reg1 = opr1->as_xmm_float_reg(); | |
2623 if (opr2->is_single_xmm()) { | |
2624 // xmm register - xmm register | |
2625 __ ucomiss(reg1, opr2->as_xmm_float_reg()); | |
2626 } else if (opr2->is_stack()) { | |
2627 // xmm register - stack | |
2628 __ ucomiss(reg1, frame_map()->address_for_slot(opr2->single_stack_ix())); | |
2629 } else if (opr2->is_constant()) { | |
2630 // xmm register - constant | |
2631 __ ucomiss(reg1, InternalAddress(float_constant(opr2->as_jfloat()))); | |
2632 } else if (opr2->is_address()) { | |
2633 // xmm register - address | |
2634 if (op->info() != NULL) { | |
2635 add_debug_info_for_null_check_here(op->info()); | |
2636 } | |
2637 __ ucomiss(reg1, as_Address(opr2->as_address_ptr())); | |
2638 } else { | |
2639 ShouldNotReachHere(); | |
2640 } | |
2641 | |
2642 } else if (opr1->is_double_xmm()) { | |
2643 XMMRegister reg1 = opr1->as_xmm_double_reg(); | |
2644 if (opr2->is_double_xmm()) { | |
2645 // xmm register - xmm register | |
2646 __ ucomisd(reg1, opr2->as_xmm_double_reg()); | |
2647 } else if (opr2->is_stack()) { | |
2648 // xmm register - stack | |
2649 __ ucomisd(reg1, frame_map()->address_for_slot(opr2->double_stack_ix())); | |
2650 } else if (opr2->is_constant()) { | |
2651 // xmm register - constant | |
2652 __ ucomisd(reg1, InternalAddress(double_constant(opr2->as_jdouble()))); | |
2653 } else if (opr2->is_address()) { | |
2654 // xmm register - address | |
2655 if (op->info() != NULL) { | |
2656 add_debug_info_for_null_check_here(op->info()); | |
2657 } | |
2658 __ ucomisd(reg1, as_Address(opr2->pointer()->as_address())); | |
2659 } else { | |
2660 ShouldNotReachHere(); | |
2661 } | |
2662 | |
2663 } else if(opr1->is_single_fpu() || opr1->is_double_fpu()) { | |
2664 assert(opr1->is_fpu_register() && opr1->fpu() == 0, "currently left-hand side must be on TOS (relax this restriction)"); | |
2665 assert(opr2->is_fpu_register(), "both must be registers"); | |
2666 __ fcmp(noreg, opr2->fpu(), op->fpu_pop_count() > 0, op->fpu_pop_count() > 1); | |
2667 | |
2668 } else if (opr1->is_address() && opr2->is_constant()) { | |
304 | 2669 LIR_Const* c = opr2->as_constant_ptr(); |
2670 #ifdef _LP64 | |
2671 if (c->type() == T_OBJECT || c->type() == T_ARRAY) { | |
2672 assert(condition == lir_cond_equal || condition == lir_cond_notEqual, "need to reverse"); | |
2673 __ movoop(rscratch1, c->as_jobject()); | |
2674 } | |
2675 #endif // LP64 | |
0 | 2676 if (op->info() != NULL) { |
2677 add_debug_info_for_null_check_here(op->info()); | |
2678 } | |
2679 // special case: address - constant | |
2680 LIR_Address* addr = opr1->as_address_ptr(); | |
2681 if (c->type() == T_INT) { | |
2682 __ cmpl(as_Address(addr), c->as_jint()); | |
304 | 2683 } else if (c->type() == T_OBJECT || c->type() == T_ARRAY) { |
2684 #ifdef _LP64 | |
2685 // %%% Make this explode if addr isn't reachable until we figure out a | |
2686 // better strategy by giving noreg as the temp for as_Address | |
2687 __ cmpptr(rscratch1, as_Address(addr, noreg)); | |
2688 #else | |
0 | 2689 __ cmpoop(as_Address(addr), c->as_jobject()); |
304 | 2690 #endif // _LP64 |
0 | 2691 } else { |
2692 ShouldNotReachHere(); | |
2693 } | |
2694 | |
2695 } else { | |
2696 ShouldNotReachHere(); | |
2697 } | |
2698 } | |
2699 | |
2700 void LIR_Assembler::comp_fl2i(LIR_Code code, LIR_Opr left, LIR_Opr right, LIR_Opr dst, LIR_Op2* op) { | |
2701 if (code == lir_cmp_fd2i || code == lir_ucmp_fd2i) { | |
2702 if (left->is_single_xmm()) { | |
2703 assert(right->is_single_xmm(), "must match"); | |
2704 __ cmpss2int(left->as_xmm_float_reg(), right->as_xmm_float_reg(), dst->as_register(), code == lir_ucmp_fd2i); | |
2705 } else if (left->is_double_xmm()) { | |
2706 assert(right->is_double_xmm(), "must match"); | |
2707 __ cmpsd2int(left->as_xmm_double_reg(), right->as_xmm_double_reg(), dst->as_register(), code == lir_ucmp_fd2i); | |
2708 | |
2709 } else { | |
2710 assert(left->is_single_fpu() || left->is_double_fpu(), "must be"); | |
2711 assert(right->is_single_fpu() || right->is_double_fpu(), "must match"); | |
2712 | |
2713 assert(left->fpu() == 0, "left must be on TOS"); | |
2714 __ fcmp2int(dst->as_register(), code == lir_ucmp_fd2i, right->fpu(), | |
2715 op->fpu_pop_count() > 0, op->fpu_pop_count() > 1); | |
2716 } | |
2717 } else { | |
2718 assert(code == lir_cmp_l2i, "check"); | |
304 | 2719 #ifdef _LP64 |
2720 Register dest = dst->as_register(); | |
2721 __ xorptr(dest, dest); | |
2722 Label high, done; | |
2723 __ cmpptr(left->as_register_lo(), right->as_register_lo()); | |
2724 __ jcc(Assembler::equal, done); | |
2725 __ jcc(Assembler::greater, high); | |
2726 __ decrement(dest); | |
2727 __ jmp(done); | |
2728 __ bind(high); | |
2729 __ increment(dest); | |
2730 | |
2731 __ bind(done); | |
2732 | |
2733 #else | |
0 | 2734 __ lcmp2int(left->as_register_hi(), |
2735 left->as_register_lo(), | |
2736 right->as_register_hi(), | |
2737 right->as_register_lo()); | |
2738 move_regs(left->as_register_hi(), dst->as_register()); | |
304 | 2739 #endif // _LP64 |
0 | 2740 } |
2741 } | |
2742 | |
2743 | |
2744 void LIR_Assembler::align_call(LIR_Code code) { | |
2745 if (os::is_MP()) { | |
2746 // make sure that the displacement word of the call ends up word aligned | |
2747 int offset = __ offset(); | |
2748 switch (code) { | |
2749 case lir_static_call: | |
2750 case lir_optvirtual_call: | |
2751 offset += NativeCall::displacement_offset; | |
2752 break; | |
2753 case lir_icvirtual_call: | |
2754 offset += NativeCall::displacement_offset + NativeMovConstReg::instruction_size; | |
2755 break; | |
2756 case lir_virtual_call: // currently, sparc-specific for niagara | |
2757 default: ShouldNotReachHere(); | |
2758 } | |
2759 while (offset++ % BytesPerWord != 0) { | |
2760 __ nop(); | |
2761 } | |
2762 } | |
2763 } | |
2764 | |
2765 | |
2766 void LIR_Assembler::call(address entry, relocInfo::relocType rtype, CodeEmitInfo* info) { | |
2767 assert(!os::is_MP() || (__ offset() + NativeCall::displacement_offset) % BytesPerWord == 0, | |
2768 "must be aligned"); | |
2769 __ call(AddressLiteral(entry, rtype)); | |
2770 add_call_info(code_offset(), info); | |
2771 } | |
2772 | |
2773 | |
2774 void LIR_Assembler::ic_call(address entry, CodeEmitInfo* info) { | |
2775 RelocationHolder rh = virtual_call_Relocation::spec(pc()); | |
2776 __ movoop(IC_Klass, (jobject)Universe::non_oop_word()); | |
2777 assert(!os::is_MP() || | |
2778 (__ offset() + NativeCall::displacement_offset) % BytesPerWord == 0, | |
2779 "must be aligned"); | |
2780 __ call(AddressLiteral(entry, rh)); | |
2781 add_call_info(code_offset(), info); | |
2782 } | |
2783 | |
2784 | |
2785 /* Currently, vtable-dispatch is only enabled for sparc platforms */ | |
2786 void LIR_Assembler::vtable_call(int vtable_offset, CodeEmitInfo* info) { | |
2787 ShouldNotReachHere(); | |
2788 } | |
2789 | |
2790 void LIR_Assembler::emit_static_call_stub() { | |
2791 address call_pc = __ pc(); | |
2792 address stub = __ start_a_stub(call_stub_size); | |
2793 if (stub == NULL) { | |
2794 bailout("static call stub overflow"); | |
2795 return; | |
2796 } | |
2797 | |
2798 int start = __ offset(); | |
2799 if (os::is_MP()) { | |
2800 // make sure that the displacement word of the call ends up word aligned | |
2801 int offset = __ offset() + NativeMovConstReg::instruction_size + NativeCall::displacement_offset; | |
2802 while (offset++ % BytesPerWord != 0) { | |
2803 __ nop(); | |
2804 } | |
2805 } | |
2806 __ relocate(static_stub_Relocation::spec(call_pc)); | |
2807 __ movoop(rbx, (jobject)NULL); | |
2808 // must be set to -1 at code generation time | |
2809 assert(!os::is_MP() || ((__ offset() + 1) % BytesPerWord) == 0, "must be aligned on MP"); | |
304 | 2810 // On 64bit this will die since it will take a movq & jmp, must be only a jmp |
2811 __ jump(RuntimeAddress(__ pc())); | |
0 | 2812 |
2813 assert(__ offset() - start <= call_stub_size, "stub too big") | |
2814 __ end_a_stub(); | |
2815 } | |
2816 | |
2817 | |
2818 void LIR_Assembler::throw_op(LIR_Opr exceptionPC, LIR_Opr exceptionOop, CodeEmitInfo* info, bool unwind) { | |
2819 assert(exceptionOop->as_register() == rax, "must match"); | |
2820 assert(unwind || exceptionPC->as_register() == rdx, "must match"); | |
2821 | |
2822 // exception object is not added to oop map by LinearScan | |
2823 // (LinearScan assumes that no oops are in fixed registers) | |
2824 info->add_register_oop(exceptionOop); | |
2825 Runtime1::StubID unwind_id; | |
2826 | |
2827 if (!unwind) { | |
2828 // get current pc information | |
2829 // pc is only needed if the method has an exception handler, the unwind code does not need it. | |
2830 int pc_for_athrow_offset = __ offset(); | |
2831 InternalAddress pc_for_athrow(__ pc()); | |
2832 __ lea(exceptionPC->as_register(), pc_for_athrow); | |
2833 add_call_info(pc_for_athrow_offset, info); // for exception handler | |
2834 | |
2835 __ verify_not_null_oop(rax); | |
2836 // search an exception handler (rax: exception oop, rdx: throwing pc) | |
2837 if (compilation()->has_fpu_code()) { | |
2838 unwind_id = Runtime1::handle_exception_id; | |
2839 } else { | |
2840 unwind_id = Runtime1::handle_exception_nofpu_id; | |
2841 } | |
2842 } else { | |
2843 unwind_id = Runtime1::unwind_exception_id; | |
2844 } | |
2845 __ call(RuntimeAddress(Runtime1::entry_for(unwind_id))); | |
2846 | |
2847 // enough room for two byte trap | |
2848 __ nop(); | |
2849 } | |
2850 | |
2851 | |
2852 void LIR_Assembler::shift_op(LIR_Code code, LIR_Opr left, LIR_Opr count, LIR_Opr dest, LIR_Opr tmp) { | |
2853 | |
2854 // optimized version for linear scan: | |
2855 // * count must be already in ECX (guaranteed by LinearScan) | |
2856 // * left and dest must be equal | |
2857 // * tmp must be unused | |
2858 assert(count->as_register() == SHIFT_count, "count must be in ECX"); | |
2859 assert(left == dest, "left and dest must be equal"); | |
2860 assert(tmp->is_illegal(), "wasting a register if tmp is allocated"); | |
2861 | |
2862 if (left->is_single_cpu()) { | |
2863 Register value = left->as_register(); | |
2864 assert(value != SHIFT_count, "left cannot be ECX"); | |
2865 | |
2866 switch (code) { | |
2867 case lir_shl: __ shll(value); break; | |
2868 case lir_shr: __ sarl(value); break; | |
2869 case lir_ushr: __ shrl(value); break; | |
2870 default: ShouldNotReachHere(); | |
2871 } | |
2872 } else if (left->is_double_cpu()) { | |
2873 Register lo = left->as_register_lo(); | |
2874 Register hi = left->as_register_hi(); | |
2875 assert(lo != SHIFT_count && hi != SHIFT_count, "left cannot be ECX"); | |
304 | 2876 #ifdef _LP64 |
2877 switch (code) { | |
2878 case lir_shl: __ shlptr(lo); break; | |
2879 case lir_shr: __ sarptr(lo); break; | |
2880 case lir_ushr: __ shrptr(lo); break; | |
2881 default: ShouldNotReachHere(); | |
2882 } | |
2883 #else | |
0 | 2884 |
2885 switch (code) { | |
2886 case lir_shl: __ lshl(hi, lo); break; | |
2887 case lir_shr: __ lshr(hi, lo, true); break; | |
2888 case lir_ushr: __ lshr(hi, lo, false); break; | |
2889 default: ShouldNotReachHere(); | |
2890 } | |
304 | 2891 #endif // LP64 |
0 | 2892 } else { |
2893 ShouldNotReachHere(); | |
2894 } | |
2895 } | |
2896 | |
2897 | |
2898 void LIR_Assembler::shift_op(LIR_Code code, LIR_Opr left, jint count, LIR_Opr dest) { | |
2899 if (dest->is_single_cpu()) { | |
2900 // first move left into dest so that left is not destroyed by the shift | |
2901 Register value = dest->as_register(); | |
2902 count = count & 0x1F; // Java spec | |
2903 | |
2904 move_regs(left->as_register(), value); | |
2905 switch (code) { | |
2906 case lir_shl: __ shll(value, count); break; | |
2907 case lir_shr: __ sarl(value, count); break; | |
2908 case lir_ushr: __ shrl(value, count); break; | |
2909 default: ShouldNotReachHere(); | |
2910 } | |
2911 } else if (dest->is_double_cpu()) { | |
304 | 2912 #ifndef _LP64 |
0 | 2913 Unimplemented(); |
304 | 2914 #else |
2915 // first move left into dest so that left is not destroyed by the shift | |
2916 Register value = dest->as_register_lo(); | |
2917 count = count & 0x1F; // Java spec | |
2918 | |
2919 move_regs(left->as_register_lo(), value); | |
2920 switch (code) { | |
2921 case lir_shl: __ shlptr(value, count); break; | |
2922 case lir_shr: __ sarptr(value, count); break; | |
2923 case lir_ushr: __ shrptr(value, count); break; | |
2924 default: ShouldNotReachHere(); | |
2925 } | |
2926 #endif // _LP64 | |
0 | 2927 } else { |
2928 ShouldNotReachHere(); | |
2929 } | |
2930 } | |
2931 | |
2932 | |
2933 void LIR_Assembler::store_parameter(Register r, int offset_from_rsp_in_words) { | |
2934 assert(offset_from_rsp_in_words >= 0, "invalid offset from rsp"); | |
2935 int offset_from_rsp_in_bytes = offset_from_rsp_in_words * BytesPerWord; | |
2936 assert(offset_from_rsp_in_bytes < frame_map()->reserved_argument_area_size(), "invalid offset"); | |
304 | 2937 __ movptr (Address(rsp, offset_from_rsp_in_bytes), r); |
0 | 2938 } |
2939 | |
2940 | |
2941 void LIR_Assembler::store_parameter(jint c, int offset_from_rsp_in_words) { | |
2942 assert(offset_from_rsp_in_words >= 0, "invalid offset from rsp"); | |
2943 int offset_from_rsp_in_bytes = offset_from_rsp_in_words * BytesPerWord; | |
2944 assert(offset_from_rsp_in_bytes < frame_map()->reserved_argument_area_size(), "invalid offset"); | |
304 | 2945 __ movptr (Address(rsp, offset_from_rsp_in_bytes), c); |
0 | 2946 } |
2947 | |
2948 | |
2949 void LIR_Assembler::store_parameter(jobject o, int offset_from_rsp_in_words) { | |
2950 assert(offset_from_rsp_in_words >= 0, "invalid offset from rsp"); | |
2951 int offset_from_rsp_in_bytes = offset_from_rsp_in_words * BytesPerWord; | |
2952 assert(offset_from_rsp_in_bytes < frame_map()->reserved_argument_area_size(), "invalid offset"); | |
2953 __ movoop (Address(rsp, offset_from_rsp_in_bytes), o); | |
2954 } | |
2955 | |
2956 | |
2957 // This code replaces a call to arraycopy; no exception may | |
2958 // be thrown in this code, they must be thrown in the System.arraycopy | |
2959 // activation frame; we could save some checks if this would not be the case | |
2960 void LIR_Assembler::emit_arraycopy(LIR_OpArrayCopy* op) { | |
2961 ciArrayKlass* default_type = op->expected_type(); | |
2962 Register src = op->src()->as_register(); | |
2963 Register dst = op->dst()->as_register(); | |
2964 Register src_pos = op->src_pos()->as_register(); | |
2965 Register dst_pos = op->dst_pos()->as_register(); | |
2966 Register length = op->length()->as_register(); | |
2967 Register tmp = op->tmp()->as_register(); | |
2968 | |
2969 CodeStub* stub = op->stub(); | |
2970 int flags = op->flags(); | |
2971 BasicType basic_type = default_type != NULL ? default_type->element_type()->basic_type() : T_ILLEGAL; | |
2972 if (basic_type == T_ARRAY) basic_type = T_OBJECT; | |
2973 | |
2974 // if we don't know anything or it's an object array, just go through the generic arraycopy | |
2975 if (default_type == NULL) { | |
2976 Label done; | |
2977 // save outgoing arguments on stack in case call to System.arraycopy is needed | |
2978 // HACK ALERT. This code used to push the parameters in a hardwired fashion | |
2979 // for interpreter calling conventions. Now we have to do it in new style conventions. | |
2980 // For the moment until C1 gets the new register allocator I just force all the | |
2981 // args to the right place (except the register args) and then on the back side | |
2982 // reload the register args properly if we go slow path. Yuck | |
2983 | |
2984 // These are proper for the calling convention | |
2985 | |
2986 store_parameter(length, 2); | |
2987 store_parameter(dst_pos, 1); | |
2988 store_parameter(dst, 0); | |
2989 | |
2990 // these are just temporary placements until we need to reload | |
2991 store_parameter(src_pos, 3); | |
2992 store_parameter(src, 4); | |
304 | 2993 NOT_LP64(assert(src == rcx && src_pos == rdx, "mismatch in calling convention");) |
2994 | |
2995 address entry = CAST_FROM_FN_PTR(address, Runtime1::arraycopy); | |
0 | 2996 |
2997 // pass arguments: may push as this is not a safepoint; SP must be fix at each safepoint | |
304 | 2998 #ifdef _LP64 |
2999 // The arguments are in java calling convention so we can trivially shift them to C | |
3000 // convention | |
3001 assert_different_registers(c_rarg0, j_rarg1, j_rarg2, j_rarg3, j_rarg4); | |
3002 __ mov(c_rarg0, j_rarg0); | |
3003 assert_different_registers(c_rarg1, j_rarg2, j_rarg3, j_rarg4); | |
3004 __ mov(c_rarg1, j_rarg1); | |
3005 assert_different_registers(c_rarg2, j_rarg3, j_rarg4); | |
3006 __ mov(c_rarg2, j_rarg2); | |
3007 assert_different_registers(c_rarg3, j_rarg4); | |
3008 __ mov(c_rarg3, j_rarg3); | |
3009 #ifdef _WIN64 | |
3010 // Allocate abi space for args but be sure to keep stack aligned | |
3011 __ subptr(rsp, 6*wordSize); | |
3012 store_parameter(j_rarg4, 4); | |
3013 __ call(RuntimeAddress(entry)); | |
3014 __ addptr(rsp, 6*wordSize); | |
3015 #else | |
3016 __ mov(c_rarg4, j_rarg4); | |
3017 __ call(RuntimeAddress(entry)); | |
3018 #endif // _WIN64 | |
3019 #else | |
3020 __ push(length); | |
3021 __ push(dst_pos); | |
3022 __ push(dst); | |
3023 __ push(src_pos); | |
3024 __ push(src); | |
0 | 3025 __ call_VM_leaf(entry, 5); // removes pushed parameter from the stack |
3026 | |
304 | 3027 #endif // _LP64 |
3028 | |
0 | 3029 __ cmpl(rax, 0); |
3030 __ jcc(Assembler::equal, *stub->continuation()); | |
3031 | |
3032 // Reload values from the stack so they are where the stub | |
3033 // expects them. | |
304 | 3034 __ movptr (dst, Address(rsp, 0*BytesPerWord)); |
3035 __ movptr (dst_pos, Address(rsp, 1*BytesPerWord)); | |
3036 __ movptr (length, Address(rsp, 2*BytesPerWord)); | |
3037 __ movptr (src_pos, Address(rsp, 3*BytesPerWord)); | |
3038 __ movptr (src, Address(rsp, 4*BytesPerWord)); | |
0 | 3039 __ jmp(*stub->entry()); |
3040 | |
3041 __ bind(*stub->continuation()); | |
3042 return; | |
3043 } | |
3044 | |
3045 assert(default_type != NULL && default_type->is_array_klass() && default_type->is_loaded(), "must be true at this point"); | |
3046 | |
29
d5fc211aea19
6633953: type2aelembytes{T_ADDRESS} should be 8 bytes in 64 bit VM
kvn
parents:
0
diff
changeset
|
3047 int elem_size = type2aelembytes(basic_type); |
0 | 3048 int shift_amount; |
3049 Address::ScaleFactor scale; | |
3050 | |
3051 switch (elem_size) { | |
3052 case 1 : | |
3053 shift_amount = 0; | |
3054 scale = Address::times_1; | |
3055 break; | |
3056 case 2 : | |
3057 shift_amount = 1; | |
3058 scale = Address::times_2; | |
3059 break; | |
3060 case 4 : | |
3061 shift_amount = 2; | |
3062 scale = Address::times_4; | |
3063 break; | |
3064 case 8 : | |
3065 shift_amount = 3; | |
3066 scale = Address::times_8; | |
3067 break; | |
3068 default: | |
3069 ShouldNotReachHere(); | |
3070 } | |
3071 | |
3072 Address src_length_addr = Address(src, arrayOopDesc::length_offset_in_bytes()); | |
3073 Address dst_length_addr = Address(dst, arrayOopDesc::length_offset_in_bytes()); | |
3074 Address src_klass_addr = Address(src, oopDesc::klass_offset_in_bytes()); | |
3075 Address dst_klass_addr = Address(dst, oopDesc::klass_offset_in_bytes()); | |
3076 | |
304 | 3077 // length and pos's are all sign extended at this point on 64bit |
3078 | |
0 | 3079 // test for NULL |
3080 if (flags & LIR_OpArrayCopy::src_null_check) { | |
304 | 3081 __ testptr(src, src); |
0 | 3082 __ jcc(Assembler::zero, *stub->entry()); |
3083 } | |
3084 if (flags & LIR_OpArrayCopy::dst_null_check) { | |
304 | 3085 __ testptr(dst, dst); |
0 | 3086 __ jcc(Assembler::zero, *stub->entry()); |
3087 } | |
3088 | |
3089 // check if negative | |
3090 if (flags & LIR_OpArrayCopy::src_pos_positive_check) { | |
3091 __ testl(src_pos, src_pos); | |
3092 __ jcc(Assembler::less, *stub->entry()); | |
3093 } | |
3094 if (flags & LIR_OpArrayCopy::dst_pos_positive_check) { | |
3095 __ testl(dst_pos, dst_pos); | |
3096 __ jcc(Assembler::less, *stub->entry()); | |
3097 } | |
3098 if (flags & LIR_OpArrayCopy::length_positive_check) { | |
3099 __ testl(length, length); | |
3100 __ jcc(Assembler::less, *stub->entry()); | |
3101 } | |
3102 | |
3103 if (flags & LIR_OpArrayCopy::src_range_check) { | |
304 | 3104 __ lea(tmp, Address(src_pos, length, Address::times_1, 0)); |
0 | 3105 __ cmpl(tmp, src_length_addr); |
3106 __ jcc(Assembler::above, *stub->entry()); | |
3107 } | |
3108 if (flags & LIR_OpArrayCopy::dst_range_check) { | |
304 | 3109 __ lea(tmp, Address(dst_pos, length, Address::times_1, 0)); |
0 | 3110 __ cmpl(tmp, dst_length_addr); |
3111 __ jcc(Assembler::above, *stub->entry()); | |
3112 } | |
3113 | |
3114 if (flags & LIR_OpArrayCopy::type_check) { | |
304 | 3115 __ movptr(tmp, src_klass_addr); |
3116 __ cmpptr(tmp, dst_klass_addr); | |
0 | 3117 __ jcc(Assembler::notEqual, *stub->entry()); |
3118 } | |
3119 | |
3120 #ifdef ASSERT | |
3121 if (basic_type != T_OBJECT || !(flags & LIR_OpArrayCopy::type_check)) { | |
3122 // Sanity check the known type with the incoming class. For the | |
3123 // primitive case the types must match exactly with src.klass and | |
3124 // dst.klass each exactly matching the default type. For the | |
3125 // object array case, if no type check is needed then either the | |
3126 // dst type is exactly the expected type and the src type is a | |
3127 // subtype which we can't check or src is the same array as dst | |
3128 // but not necessarily exactly of type default_type. | |
3129 Label known_ok, halt; | |
989
148e5441d916
6863023: need non-perm oops in code cache for JSR 292
jrose
parents:
780
diff
changeset
|
3130 __ movoop(tmp, default_type->constant_encoding()); |
0 | 3131 if (basic_type != T_OBJECT) { |
304 | 3132 __ cmpptr(tmp, dst_klass_addr); |
0 | 3133 __ jcc(Assembler::notEqual, halt); |
304 | 3134 __ cmpptr(tmp, src_klass_addr); |
0 | 3135 __ jcc(Assembler::equal, known_ok); |
3136 } else { | |
304 | 3137 __ cmpptr(tmp, dst_klass_addr); |
0 | 3138 __ jcc(Assembler::equal, known_ok); |
304 | 3139 __ cmpptr(src, dst); |
0 | 3140 __ jcc(Assembler::equal, known_ok); |
3141 } | |
3142 __ bind(halt); | |
3143 __ stop("incorrect type information in arraycopy"); | |
3144 __ bind(known_ok); | |
3145 } | |
3146 #endif | |
3147 | |
304 | 3148 if (shift_amount > 0 && basic_type != T_OBJECT) { |
3149 __ shlptr(length, shift_amount); | |
3150 } | |
3151 | |
3152 #ifdef _LP64 | |
3153 assert_different_registers(c_rarg0, dst, dst_pos, length); | |
1060 | 3154 __ movl2ptr(src_pos, src_pos); //higher 32bits must be null |
304 | 3155 __ lea(c_rarg0, Address(src, src_pos, scale, arrayOopDesc::base_offset_in_bytes(basic_type))); |
3156 assert_different_registers(c_rarg1, length); | |
1060 | 3157 __ movl2ptr(dst_pos, dst_pos); //higher 32bits must be null |
304 | 3158 __ lea(c_rarg1, Address(dst, dst_pos, scale, arrayOopDesc::base_offset_in_bytes(basic_type))); |
3159 __ mov(c_rarg2, length); | |
3160 | |
3161 #else | |
3162 __ lea(tmp, Address(src, src_pos, scale, arrayOopDesc::base_offset_in_bytes(basic_type))); | |
0 | 3163 store_parameter(tmp, 0); |
304 | 3164 __ lea(tmp, Address(dst, dst_pos, scale, arrayOopDesc::base_offset_in_bytes(basic_type))); |
0 | 3165 store_parameter(tmp, 1); |
3166 store_parameter(length, 2); | |
304 | 3167 #endif // _LP64 |
0 | 3168 if (basic_type == T_OBJECT) { |
3169 __ call_VM_leaf(CAST_FROM_FN_PTR(address, Runtime1::oop_arraycopy), 0); | |
3170 } else { | |
3171 __ call_VM_leaf(CAST_FROM_FN_PTR(address, Runtime1::primitive_arraycopy), 0); | |
3172 } | |
3173 | |
3174 __ bind(*stub->continuation()); | |
3175 } | |
3176 | |
3177 | |
3178 void LIR_Assembler::emit_lock(LIR_OpLock* op) { | |
3179 Register obj = op->obj_opr()->as_register(); // may not be an oop | |
3180 Register hdr = op->hdr_opr()->as_register(); | |
3181 Register lock = op->lock_opr()->as_register(); | |
3182 if (!UseFastLocking) { | |
3183 __ jmp(*op->stub()->entry()); | |
3184 } else if (op->code() == lir_lock) { | |
3185 Register scratch = noreg; | |
3186 if (UseBiasedLocking) { | |
3187 scratch = op->scratch_opr()->as_register(); | |
3188 } | |
3189 assert(BasicLock::displaced_header_offset_in_bytes() == 0, "lock_reg must point to the displaced header"); | |
3190 // add debug info for NullPointerException only if one is possible | |
3191 int null_check_offset = __ lock_object(hdr, obj, lock, scratch, *op->stub()->entry()); | |
3192 if (op->info() != NULL) { | |
3193 add_debug_info_for_null_check(null_check_offset, op->info()); | |
3194 } | |
3195 // done | |
3196 } else if (op->code() == lir_unlock) { | |
3197 assert(BasicLock::displaced_header_offset_in_bytes() == 0, "lock_reg must point to the displaced header"); | |
3198 __ unlock_object(hdr, obj, lock, *op->stub()->entry()); | |
3199 } else { | |
3200 Unimplemented(); | |
3201 } | |
3202 __ bind(*op->stub()->continuation()); | |
3203 } | |
3204 | |
3205 | |
3206 void LIR_Assembler::emit_profile_call(LIR_OpProfileCall* op) { | |
3207 ciMethod* method = op->profiled_method(); | |
3208 int bci = op->profiled_bci(); | |
3209 | |
3210 // Update counter for all call types | |
3211 ciMethodData* md = method->method_data(); | |
3212 if (md == NULL) { | |
3213 bailout("out of memory building methodDataOop"); | |
3214 return; | |
3215 } | |
3216 ciProfileData* data = md->bci_to_data(bci); | |
3217 assert(data->is_CounterData(), "need CounterData for calls"); | |
3218 assert(op->mdo()->is_single_cpu(), "mdo must be allocated"); | |
3219 Register mdo = op->mdo()->as_register(); | |
989
148e5441d916
6863023: need non-perm oops in code cache for JSR 292
jrose
parents:
780
diff
changeset
|
3220 __ movoop(mdo, md->constant_encoding()); |
0 | 3221 Address counter_addr(mdo, md->byte_offset_of_slot(data, CounterData::count_offset())); |
3222 __ addl(counter_addr, DataLayout::counter_increment); | |
3223 Bytecodes::Code bc = method->java_code_at_bci(bci); | |
3224 // Perform additional virtual call profiling for invokevirtual and | |
3225 // invokeinterface bytecodes | |
3226 if ((bc == Bytecodes::_invokevirtual || bc == Bytecodes::_invokeinterface) && | |
3227 Tier1ProfileVirtualCalls) { | |
3228 assert(op->recv()->is_single_cpu(), "recv must be allocated"); | |
3229 Register recv = op->recv()->as_register(); | |
3230 assert_different_registers(mdo, recv); | |
3231 assert(data->is_VirtualCallData(), "need VirtualCallData for virtual calls"); | |
3232 ciKlass* known_klass = op->known_holder(); | |
3233 if (Tier1OptimizeVirtualCallProfiling && known_klass != NULL) { | |
3234 // We know the type that will be seen at this call site; we can | |
3235 // statically update the methodDataOop rather than needing to do | |
3236 // dynamic tests on the receiver type | |
3237 | |
3238 // NOTE: we should probably put a lock around this search to | |
3239 // avoid collisions by concurrent compilations | |
3240 ciVirtualCallData* vc_data = (ciVirtualCallData*) data; | |
3241 uint i; | |
3242 for (i = 0; i < VirtualCallData::row_limit(); i++) { | |
3243 ciKlass* receiver = vc_data->receiver(i); | |
3244 if (known_klass->equals(receiver)) { | |
3245 Address data_addr(mdo, md->byte_offset_of_slot(data, VirtualCallData::receiver_count_offset(i))); | |
3246 __ addl(data_addr, DataLayout::counter_increment); | |
3247 return; | |
3248 } | |
3249 } | |
3250 | |
3251 // Receiver type not found in profile data; select an empty slot | |
3252 | |
3253 // Note that this is less efficient than it should be because it | |
3254 // always does a write to the receiver part of the | |
3255 // VirtualCallData rather than just the first time | |
3256 for (i = 0; i < VirtualCallData::row_limit(); i++) { | |
3257 ciKlass* receiver = vc_data->receiver(i); | |
3258 if (receiver == NULL) { | |
3259 Address recv_addr(mdo, md->byte_offset_of_slot(data, VirtualCallData::receiver_offset(i))); | |
989
148e5441d916
6863023: need non-perm oops in code cache for JSR 292
jrose
parents:
780
diff
changeset
|
3260 __ movoop(recv_addr, known_klass->constant_encoding()); |
0 | 3261 Address data_addr(mdo, md->byte_offset_of_slot(data, VirtualCallData::receiver_count_offset(i))); |
3262 __ addl(data_addr, DataLayout::counter_increment); | |
3263 return; | |
3264 } | |
3265 } | |
3266 } else { | |
304 | 3267 __ movptr(recv, Address(recv, oopDesc::klass_offset_in_bytes())); |
0 | 3268 Label update_done; |
3269 uint i; | |
3270 for (i = 0; i < VirtualCallData::row_limit(); i++) { | |
3271 Label next_test; | |
3272 // See if the receiver is receiver[n]. | |
304 | 3273 __ cmpptr(recv, Address(mdo, md->byte_offset_of_slot(data, VirtualCallData::receiver_offset(i)))); |
0 | 3274 __ jcc(Assembler::notEqual, next_test); |
3275 Address data_addr(mdo, md->byte_offset_of_slot(data, VirtualCallData::receiver_count_offset(i))); | |
3276 __ addl(data_addr, DataLayout::counter_increment); | |
3277 __ jmp(update_done); | |
3278 __ bind(next_test); | |
3279 } | |
3280 | |
3281 // Didn't find receiver; find next empty slot and fill it in | |
3282 for (i = 0; i < VirtualCallData::row_limit(); i++) { | |
3283 Label next_test; | |
3284 Address recv_addr(mdo, md->byte_offset_of_slot(data, VirtualCallData::receiver_offset(i))); | |
304 | 3285 __ cmpptr(recv_addr, (int32_t)NULL_WORD); |
0 | 3286 __ jcc(Assembler::notEqual, next_test); |
304 | 3287 __ movptr(recv_addr, recv); |
0 | 3288 __ movl(Address(mdo, md->byte_offset_of_slot(data, VirtualCallData::receiver_count_offset(i))), DataLayout::counter_increment); |
3289 if (i < (VirtualCallData::row_limit() - 1)) { | |
3290 __ jmp(update_done); | |
3291 } | |
3292 __ bind(next_test); | |
3293 } | |
3294 | |
3295 __ bind(update_done); | |
3296 } | |
3297 } | |
3298 } | |
3299 | |
3300 | |
3301 void LIR_Assembler::emit_delay(LIR_OpDelay*) { | |
3302 Unimplemented(); | |
3303 } | |
3304 | |
3305 | |
3306 void LIR_Assembler::monitor_address(int monitor_no, LIR_Opr dst) { | |
304 | 3307 __ lea(dst->as_register(), frame_map()->address_for_monitor_lock(monitor_no)); |
0 | 3308 } |
3309 | |
3310 | |
3311 void LIR_Assembler::align_backward_branch_target() { | |
3312 __ align(BytesPerWord); | |
3313 } | |
3314 | |
3315 | |
3316 void LIR_Assembler::negate(LIR_Opr left, LIR_Opr dest) { | |
3317 if (left->is_single_cpu()) { | |
3318 __ negl(left->as_register()); | |
3319 move_regs(left->as_register(), dest->as_register()); | |
3320 | |
3321 } else if (left->is_double_cpu()) { | |
3322 Register lo = left->as_register_lo(); | |
304 | 3323 #ifdef _LP64 |
3324 Register dst = dest->as_register_lo(); | |
3325 __ movptr(dst, lo); | |
3326 __ negptr(dst); | |
3327 #else | |
0 | 3328 Register hi = left->as_register_hi(); |
3329 __ lneg(hi, lo); | |
3330 if (dest->as_register_lo() == hi) { | |
3331 assert(dest->as_register_hi() != lo, "destroying register"); | |
3332 move_regs(hi, dest->as_register_hi()); | |
3333 move_regs(lo, dest->as_register_lo()); | |
3334 } else { | |
3335 move_regs(lo, dest->as_register_lo()); | |
3336 move_regs(hi, dest->as_register_hi()); | |
3337 } | |
304 | 3338 #endif // _LP64 |
0 | 3339 |
3340 } else if (dest->is_single_xmm()) { | |
3341 if (left->as_xmm_float_reg() != dest->as_xmm_float_reg()) { | |
3342 __ movflt(dest->as_xmm_float_reg(), left->as_xmm_float_reg()); | |
3343 } | |
3344 __ xorps(dest->as_xmm_float_reg(), | |
3345 ExternalAddress((address)float_signflip_pool)); | |
3346 | |
3347 } else if (dest->is_double_xmm()) { | |
3348 if (left->as_xmm_double_reg() != dest->as_xmm_double_reg()) { | |
3349 __ movdbl(dest->as_xmm_double_reg(), left->as_xmm_double_reg()); | |
3350 } | |
3351 __ xorpd(dest->as_xmm_double_reg(), | |
3352 ExternalAddress((address)double_signflip_pool)); | |
3353 | |
3354 } else if (left->is_single_fpu() || left->is_double_fpu()) { | |
3355 assert(left->fpu() == 0, "arg must be on TOS"); | |
3356 assert(dest->fpu() == 0, "dest must be TOS"); | |
3357 __ fchs(); | |
3358 | |
3359 } else { | |
3360 ShouldNotReachHere(); | |
3361 } | |
3362 } | |
3363 | |
3364 | |
3365 void LIR_Assembler::leal(LIR_Opr addr, LIR_Opr dest) { | |
3366 assert(addr->is_address() && dest->is_register(), "check"); | |
304 | 3367 Register reg; |
3368 reg = dest->as_pointer_register(); | |
3369 __ lea(reg, as_Address(addr->as_address_ptr())); | |
0 | 3370 } |
3371 | |
3372 | |
3373 | |
3374 void LIR_Assembler::rt_call(LIR_Opr result, address dest, const LIR_OprList* args, LIR_Opr tmp, CodeEmitInfo* info) { | |
3375 assert(!tmp->is_valid(), "don't need temporary"); | |
3376 __ call(RuntimeAddress(dest)); | |
3377 if (info != NULL) { | |
3378 add_call_info_here(info); | |
3379 } | |
3380 } | |
3381 | |
3382 | |
3383 void LIR_Assembler::volatile_move_op(LIR_Opr src, LIR_Opr dest, BasicType type, CodeEmitInfo* info) { | |
3384 assert(type == T_LONG, "only for volatile long fields"); | |
3385 | |
3386 if (info != NULL) { | |
3387 add_debug_info_for_null_check_here(info); | |
3388 } | |
3389 | |
3390 if (src->is_double_xmm()) { | |
3391 if (dest->is_double_cpu()) { | |
304 | 3392 #ifdef _LP64 |
3393 __ movdq(dest->as_register_lo(), src->as_xmm_double_reg()); | |
3394 #else | |
3395 __ movdl(dest->as_register_lo(), src->as_xmm_double_reg()); | |
0 | 3396 __ psrlq(src->as_xmm_double_reg(), 32); |
304 | 3397 __ movdl(dest->as_register_hi(), src->as_xmm_double_reg()); |
3398 #endif // _LP64 | |
0 | 3399 } else if (dest->is_double_stack()) { |
3400 __ movdbl(frame_map()->address_for_slot(dest->double_stack_ix()), src->as_xmm_double_reg()); | |
3401 } else if (dest->is_address()) { | |
3402 __ movdbl(as_Address(dest->as_address_ptr()), src->as_xmm_double_reg()); | |
3403 } else { | |
3404 ShouldNotReachHere(); | |
3405 } | |
3406 | |
3407 } else if (dest->is_double_xmm()) { | |
3408 if (src->is_double_stack()) { | |
3409 __ movdbl(dest->as_xmm_double_reg(), frame_map()->address_for_slot(src->double_stack_ix())); | |
3410 } else if (src->is_address()) { | |
3411 __ movdbl(dest->as_xmm_double_reg(), as_Address(src->as_address_ptr())); | |
3412 } else { | |
3413 ShouldNotReachHere(); | |
3414 } | |
3415 | |
3416 } else if (src->is_double_fpu()) { | |
3417 assert(src->fpu_regnrLo() == 0, "must be TOS"); | |
3418 if (dest->is_double_stack()) { | |
3419 __ fistp_d(frame_map()->address_for_slot(dest->double_stack_ix())); | |
3420 } else if (dest->is_address()) { | |
3421 __ fistp_d(as_Address(dest->as_address_ptr())); | |
3422 } else { | |
3423 ShouldNotReachHere(); | |
3424 } | |
3425 | |
3426 } else if (dest->is_double_fpu()) { | |
3427 assert(dest->fpu_regnrLo() == 0, "must be TOS"); | |
3428 if (src->is_double_stack()) { | |
3429 __ fild_d(frame_map()->address_for_slot(src->double_stack_ix())); | |
3430 } else if (src->is_address()) { | |
3431 __ fild_d(as_Address(src->as_address_ptr())); | |
3432 } else { | |
3433 ShouldNotReachHere(); | |
3434 } | |
3435 } else { | |
3436 ShouldNotReachHere(); | |
3437 } | |
3438 } | |
3439 | |
3440 | |
3441 void LIR_Assembler::membar() { | |
304 | 3442 // QQQ sparc TSO uses this, |
3443 __ membar( Assembler::Membar_mask_bits(Assembler::StoreLoad)); | |
0 | 3444 } |
3445 | |
3446 void LIR_Assembler::membar_acquire() { | |
3447 // No x86 machines currently require load fences | |
3448 // __ load_fence(); | |
3449 } | |
3450 | |
3451 void LIR_Assembler::membar_release() { | |
3452 // No x86 machines currently require store fences | |
3453 // __ store_fence(); | |
3454 } | |
3455 | |
3456 void LIR_Assembler::get_thread(LIR_Opr result_reg) { | |
3457 assert(result_reg->is_register(), "check"); | |
304 | 3458 #ifdef _LP64 |
3459 // __ get_thread(result_reg->as_register_lo()); | |
3460 __ mov(result_reg->as_register(), r15_thread); | |
3461 #else | |
0 | 3462 __ get_thread(result_reg->as_register()); |
304 | 3463 #endif // _LP64 |
0 | 3464 } |
3465 | |
3466 | |
3467 void LIR_Assembler::peephole(LIR_List*) { | |
3468 // do nothing for now | |
3469 } | |
3470 | |
3471 | |
3472 #undef __ |