Mercurial > hg > truffle
annotate src/cpu/x86/vm/c1_LIRAssembler_x86.cpp @ 1204:18a389214829
6921352: JSR 292 needs its own deopt handler
Summary: We need to introduce a new MH deopt handler so we can easily determine if the deopt happened at a MH call site or not.
Reviewed-by: never, jrose
author | twisti |
---|---|
date | Mon, 01 Feb 2010 19:29:46 +0100 |
parents | 24128c2ffa87 |
children | 87684f1a88b5 |
rev | line source |
---|---|
0 | 1 /* |
1204 | 2 * Copyright 2000-2010 Sun Microsystems, Inc. All Rights Reserved. |
0 | 3 * DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER. |
4 * | |
5 * This code is free software; you can redistribute it and/or modify it | |
6 * under the terms of the GNU General Public License version 2 only, as | |
7 * published by the Free Software Foundation. | |
8 * | |
9 * This code is distributed in the hope that it will be useful, but WITHOUT | |
10 * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or | |
11 * FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License | |
12 * version 2 for more details (a copy is included in the LICENSE file that | |
13 * accompanied this code). | |
14 * | |
15 * You should have received a copy of the GNU General Public License version | |
16 * 2 along with this work; if not, write to the Free Software Foundation, | |
17 * Inc., 51 Franklin St, Fifth Floor, Boston, MA 02110-1301 USA. | |
18 * | |
19 * Please contact Sun Microsystems, Inc., 4150 Network Circle, Santa Clara, | |
20 * CA 95054 USA or visit www.sun.com if you need additional information or | |
21 * have any questions. | |
22 * | |
23 */ | |
24 | |
25 # include "incls/_precompiled.incl" | |
26 # include "incls/_c1_LIRAssembler_x86.cpp.incl" | |
27 | |
28 | |
29 // These masks are used to provide 128-bit aligned bitmasks to the XMM | |
30 // instructions, to allow sign-masking or sign-bit flipping. They allow | |
31 // fast versions of NegF/NegD and AbsF/AbsD. | |
32 | |
33 // Note: 'double' and 'long long' have 32-bits alignment on x86. | |
34 static jlong* double_quadword(jlong *adr, jlong lo, jlong hi) { | |
35 // Use the expression (adr)&(~0xF) to provide 128-bits aligned address | |
36 // of 128-bits operands for SSE instructions. | |
37 jlong *operand = (jlong*)(((long)adr)&((long)(~0xF))); | |
38 // Store the value to a 128-bits operand. | |
39 operand[0] = lo; | |
40 operand[1] = hi; | |
41 return operand; | |
42 } | |
43 | |
44 // Buffer for 128-bits masks used by SSE instructions. | |
45 static jlong fp_signmask_pool[(4+1)*2]; // 4*128bits(data) + 128bits(alignment) | |
46 | |
47 // Static initialization during VM startup. | |
48 static jlong *float_signmask_pool = double_quadword(&fp_signmask_pool[1*2], CONST64(0x7FFFFFFF7FFFFFFF), CONST64(0x7FFFFFFF7FFFFFFF)); | |
49 static jlong *double_signmask_pool = double_quadword(&fp_signmask_pool[2*2], CONST64(0x7FFFFFFFFFFFFFFF), CONST64(0x7FFFFFFFFFFFFFFF)); | |
50 static jlong *float_signflip_pool = double_quadword(&fp_signmask_pool[3*2], CONST64(0x8000000080000000), CONST64(0x8000000080000000)); | |
51 static jlong *double_signflip_pool = double_quadword(&fp_signmask_pool[4*2], CONST64(0x8000000000000000), CONST64(0x8000000000000000)); | |
52 | |
53 | |
54 | |
55 NEEDS_CLEANUP // remove this definitions ? | |
56 const Register IC_Klass = rax; // where the IC klass is cached | |
57 const Register SYNC_header = rax; // synchronization header | |
58 const Register SHIFT_count = rcx; // where count for shift operations must be | |
59 | |
60 #define __ _masm-> | |
61 | |
62 | |
63 static void select_different_registers(Register preserve, | |
64 Register extra, | |
65 Register &tmp1, | |
66 Register &tmp2) { | |
67 if (tmp1 == preserve) { | |
68 assert_different_registers(tmp1, tmp2, extra); | |
69 tmp1 = extra; | |
70 } else if (tmp2 == preserve) { | |
71 assert_different_registers(tmp1, tmp2, extra); | |
72 tmp2 = extra; | |
73 } | |
74 assert_different_registers(preserve, tmp1, tmp2); | |
75 } | |
76 | |
77 | |
78 | |
79 static void select_different_registers(Register preserve, | |
80 Register extra, | |
81 Register &tmp1, | |
82 Register &tmp2, | |
83 Register &tmp3) { | |
84 if (tmp1 == preserve) { | |
85 assert_different_registers(tmp1, tmp2, tmp3, extra); | |
86 tmp1 = extra; | |
87 } else if (tmp2 == preserve) { | |
88 assert_different_registers(tmp1, tmp2, tmp3, extra); | |
89 tmp2 = extra; | |
90 } else if (tmp3 == preserve) { | |
91 assert_different_registers(tmp1, tmp2, tmp3, extra); | |
92 tmp3 = extra; | |
93 } | |
94 assert_different_registers(preserve, tmp1, tmp2, tmp3); | |
95 } | |
96 | |
97 | |
98 | |
99 bool LIR_Assembler::is_small_constant(LIR_Opr opr) { | |
100 if (opr->is_constant()) { | |
101 LIR_Const* constant = opr->as_constant_ptr(); | |
102 switch (constant->type()) { | |
103 case T_INT: { | |
104 return true; | |
105 } | |
106 | |
107 default: | |
108 return false; | |
109 } | |
110 } | |
111 return false; | |
112 } | |
113 | |
114 | |
115 LIR_Opr LIR_Assembler::receiverOpr() { | |
304 | 116 return FrameMap::receiver_opr; |
0 | 117 } |
118 | |
119 LIR_Opr LIR_Assembler::incomingReceiverOpr() { | |
120 return receiverOpr(); | |
121 } | |
122 | |
123 LIR_Opr LIR_Assembler::osrBufferPointer() { | |
304 | 124 return FrameMap::as_pointer_opr(receiverOpr()->as_register()); |
0 | 125 } |
126 | |
127 //--------------fpu register translations----------------------- | |
128 | |
129 | |
130 address LIR_Assembler::float_constant(float f) { | |
131 address const_addr = __ float_constant(f); | |
132 if (const_addr == NULL) { | |
133 bailout("const section overflow"); | |
134 return __ code()->consts()->start(); | |
135 } else { | |
136 return const_addr; | |
137 } | |
138 } | |
139 | |
140 | |
141 address LIR_Assembler::double_constant(double d) { | |
142 address const_addr = __ double_constant(d); | |
143 if (const_addr == NULL) { | |
144 bailout("const section overflow"); | |
145 return __ code()->consts()->start(); | |
146 } else { | |
147 return const_addr; | |
148 } | |
149 } | |
150 | |
151 | |
152 void LIR_Assembler::set_24bit_FPU() { | |
153 __ fldcw(ExternalAddress(StubRoutines::addr_fpu_cntrl_wrd_24())); | |
154 } | |
155 | |
156 void LIR_Assembler::reset_FPU() { | |
157 __ fldcw(ExternalAddress(StubRoutines::addr_fpu_cntrl_wrd_std())); | |
158 } | |
159 | |
160 void LIR_Assembler::fpop() { | |
161 __ fpop(); | |
162 } | |
163 | |
164 void LIR_Assembler::fxch(int i) { | |
165 __ fxch(i); | |
166 } | |
167 | |
168 void LIR_Assembler::fld(int i) { | |
169 __ fld_s(i); | |
170 } | |
171 | |
172 void LIR_Assembler::ffree(int i) { | |
173 __ ffree(i); | |
174 } | |
175 | |
176 void LIR_Assembler::breakpoint() { | |
177 __ int3(); | |
178 } | |
179 | |
180 void LIR_Assembler::push(LIR_Opr opr) { | |
181 if (opr->is_single_cpu()) { | |
182 __ push_reg(opr->as_register()); | |
183 } else if (opr->is_double_cpu()) { | |
304 | 184 NOT_LP64(__ push_reg(opr->as_register_hi())); |
0 | 185 __ push_reg(opr->as_register_lo()); |
186 } else if (opr->is_stack()) { | |
187 __ push_addr(frame_map()->address_for_slot(opr->single_stack_ix())); | |
188 } else if (opr->is_constant()) { | |
189 LIR_Const* const_opr = opr->as_constant_ptr(); | |
190 if (const_opr->type() == T_OBJECT) { | |
191 __ push_oop(const_opr->as_jobject()); | |
192 } else if (const_opr->type() == T_INT) { | |
193 __ push_jint(const_opr->as_jint()); | |
194 } else { | |
195 ShouldNotReachHere(); | |
196 } | |
197 | |
198 } else { | |
199 ShouldNotReachHere(); | |
200 } | |
201 } | |
202 | |
203 void LIR_Assembler::pop(LIR_Opr opr) { | |
204 if (opr->is_single_cpu()) { | |
304 | 205 __ pop_reg(opr->as_register()); |
0 | 206 } else { |
207 ShouldNotReachHere(); | |
208 } | |
209 } | |
210 | |
304 | 211 bool LIR_Assembler::is_literal_address(LIR_Address* addr) { |
212 return addr->base()->is_illegal() && addr->index()->is_illegal(); | |
213 } | |
214 | |
0 | 215 //------------------------------------------- |
304 | 216 |
0 | 217 Address LIR_Assembler::as_Address(LIR_Address* addr) { |
304 | 218 return as_Address(addr, rscratch1); |
219 } | |
220 | |
221 Address LIR_Assembler::as_Address(LIR_Address* addr, Register tmp) { | |
0 | 222 if (addr->base()->is_illegal()) { |
223 assert(addr->index()->is_illegal(), "must be illegal too"); | |
304 | 224 AddressLiteral laddr((address)addr->disp(), relocInfo::none); |
225 if (! __ reachable(laddr)) { | |
226 __ movptr(tmp, laddr.addr()); | |
227 Address res(tmp, 0); | |
228 return res; | |
229 } else { | |
230 return __ as_Address(laddr); | |
231 } | |
0 | 232 } |
233 | |
304 | 234 Register base = addr->base()->as_pointer_register(); |
0 | 235 |
236 if (addr->index()->is_illegal()) { | |
237 return Address( base, addr->disp()); | |
304 | 238 } else if (addr->index()->is_cpu_register()) { |
239 Register index = addr->index()->as_pointer_register(); | |
0 | 240 return Address(base, index, (Address::ScaleFactor) addr->scale(), addr->disp()); |
241 } else if (addr->index()->is_constant()) { | |
304 | 242 intptr_t addr_offset = (addr->index()->as_constant_ptr()->as_jint() << addr->scale()) + addr->disp(); |
243 assert(Assembler::is_simm32(addr_offset), "must be"); | |
0 | 244 |
245 return Address(base, addr_offset); | |
246 } else { | |
247 Unimplemented(); | |
248 return Address(); | |
249 } | |
250 } | |
251 | |
252 | |
253 Address LIR_Assembler::as_Address_hi(LIR_Address* addr) { | |
254 Address base = as_Address(addr); | |
255 return Address(base._base, base._index, base._scale, base._disp + BytesPerWord); | |
256 } | |
257 | |
258 | |
259 Address LIR_Assembler::as_Address_lo(LIR_Address* addr) { | |
260 return as_Address(addr); | |
261 } | |
262 | |
263 | |
264 void LIR_Assembler::osr_entry() { | |
265 offsets()->set_value(CodeOffsets::OSR_Entry, code_offset()); | |
266 BlockBegin* osr_entry = compilation()->hir()->osr_entry(); | |
267 ValueStack* entry_state = osr_entry->state(); | |
268 int number_of_locks = entry_state->locks_size(); | |
269 | |
270 // we jump here if osr happens with the interpreter | |
271 // state set up to continue at the beginning of the | |
272 // loop that triggered osr - in particular, we have | |
273 // the following registers setup: | |
274 // | |
275 // rcx: osr buffer | |
276 // | |
277 | |
278 // build frame | |
279 ciMethod* m = compilation()->method(); | |
280 __ build_frame(initial_frame_size_in_bytes()); | |
281 | |
282 // OSR buffer is | |
283 // | |
284 // locals[nlocals-1..0] | |
285 // monitors[0..number_of_locks] | |
286 // | |
287 // locals is a direct copy of the interpreter frame so in the osr buffer | |
288 // so first slot in the local array is the last local from the interpreter | |
289 // and last slot is local[0] (receiver) from the interpreter | |
290 // | |
291 // Similarly with locks. The first lock slot in the osr buffer is the nth lock | |
292 // from the interpreter frame, the nth lock slot in the osr buffer is 0th lock | |
293 // in the interpreter frame (the method lock if a sync method) | |
294 | |
295 // Initialize monitors in the compiled activation. | |
296 // rcx: pointer to osr buffer | |
297 // | |
298 // All other registers are dead at this point and the locals will be | |
299 // copied into place by code emitted in the IR. | |
300 | |
304 | 301 Register OSR_buf = osrBufferPointer()->as_pointer_register(); |
0 | 302 { assert(frame::interpreter_frame_monitor_size() == BasicObjectLock::size(), "adjust code below"); |
303 int monitor_offset = BytesPerWord * method()->max_locals() + | |
1060 | 304 (2 * BytesPerWord) * (number_of_locks - 1); |
305 // SharedRuntime::OSR_migration_begin() packs BasicObjectLocks in | |
306 // the OSR buffer using 2 word entries: first the lock and then | |
307 // the oop. | |
0 | 308 for (int i = 0; i < number_of_locks; i++) { |
1060 | 309 int slot_offset = monitor_offset - ((i * 2) * BytesPerWord); |
0 | 310 #ifdef ASSERT |
311 // verify the interpreter's monitor has a non-null object | |
312 { | |
313 Label L; | |
1060 | 314 __ cmpptr(Address(OSR_buf, slot_offset + 1*BytesPerWord), (int32_t)NULL_WORD); |
0 | 315 __ jcc(Assembler::notZero, L); |
316 __ stop("locked object is NULL"); | |
317 __ bind(L); | |
318 } | |
319 #endif | |
1060 | 320 __ movptr(rbx, Address(OSR_buf, slot_offset + 0)); |
304 | 321 __ movptr(frame_map()->address_for_monitor_lock(i), rbx); |
1060 | 322 __ movptr(rbx, Address(OSR_buf, slot_offset + 1*BytesPerWord)); |
304 | 323 __ movptr(frame_map()->address_for_monitor_object(i), rbx); |
0 | 324 } |
325 } | |
326 } | |
327 | |
328 | |
329 // inline cache check; done before the frame is built. | |
330 int LIR_Assembler::check_icache() { | |
331 Register receiver = FrameMap::receiver_opr->as_register(); | |
332 Register ic_klass = IC_Klass; | |
304 | 333 const int ic_cmp_size = LP64_ONLY(10) NOT_LP64(9); |
0 | 334 |
335 if (!VerifyOops) { | |
336 // insert some nops so that the verified entry point is aligned on CodeEntryAlignment | |
304 | 337 while ((__ offset() + ic_cmp_size) % CodeEntryAlignment != 0) { |
0 | 338 __ nop(); |
339 } | |
340 } | |
341 int offset = __ offset(); | |
342 __ inline_cache_check(receiver, IC_Klass); | |
343 assert(__ offset() % CodeEntryAlignment == 0 || VerifyOops, "alignment must be correct"); | |
344 if (VerifyOops) { | |
345 // force alignment after the cache check. | |
346 // It's been verified to be aligned if !VerifyOops | |
347 __ align(CodeEntryAlignment); | |
348 } | |
349 return offset; | |
350 } | |
351 | |
352 | |
353 void LIR_Assembler::jobject2reg_with_patching(Register reg, CodeEmitInfo* info) { | |
354 jobject o = NULL; | |
355 PatchingStub* patch = new PatchingStub(_masm, PatchingStub::load_klass_id); | |
356 __ movoop(reg, o); | |
357 patching_epilog(patch, lir_patch_normal, reg, info); | |
358 } | |
359 | |
360 | |
361 void LIR_Assembler::monitorexit(LIR_Opr obj_opr, LIR_Opr lock_opr, Register new_hdr, int monitor_no, Register exception) { | |
362 if (exception->is_valid()) { | |
363 // preserve exception | |
364 // note: the monitor_exit runtime call is a leaf routine | |
365 // and cannot block => no GC can happen | |
366 // The slow case (MonitorAccessStub) uses the first two stack slots | |
367 // ([esp+0] and [esp+4]), therefore we store the exception at [esp+8] | |
304 | 368 __ movptr (Address(rsp, 2*wordSize), exception); |
0 | 369 } |
370 | |
371 Register obj_reg = obj_opr->as_register(); | |
372 Register lock_reg = lock_opr->as_register(); | |
373 | |
374 // setup registers (lock_reg must be rax, for lock_object) | |
375 assert(obj_reg != SYNC_header && lock_reg != SYNC_header, "rax, must be available here"); | |
376 Register hdr = lock_reg; | |
377 assert(new_hdr == SYNC_header, "wrong register"); | |
378 lock_reg = new_hdr; | |
379 // compute pointer to BasicLock | |
380 Address lock_addr = frame_map()->address_for_monitor_lock(monitor_no); | |
304 | 381 __ lea(lock_reg, lock_addr); |
0 | 382 // unlock object |
383 MonitorAccessStub* slow_case = new MonitorExitStub(lock_opr, true, monitor_no); | |
384 // _slow_case_stubs->append(slow_case); | |
385 // temporary fix: must be created after exceptionhandler, therefore as call stub | |
386 _slow_case_stubs->append(slow_case); | |
387 if (UseFastLocking) { | |
388 // try inlined fast unlocking first, revert to slow locking if it fails | |
389 // note: lock_reg points to the displaced header since the displaced header offset is 0! | |
390 assert(BasicLock::displaced_header_offset_in_bytes() == 0, "lock_reg must point to the displaced header"); | |
391 __ unlock_object(hdr, obj_reg, lock_reg, *slow_case->entry()); | |
392 } else { | |
393 // always do slow unlocking | |
394 // note: the slow unlocking code could be inlined here, however if we use | |
395 // slow unlocking, speed doesn't matter anyway and this solution is | |
396 // simpler and requires less duplicated code - additionally, the | |
397 // slow unlocking code is the same in either case which simplifies | |
398 // debugging | |
399 __ jmp(*slow_case->entry()); | |
400 } | |
401 // done | |
402 __ bind(*slow_case->continuation()); | |
403 | |
404 if (exception->is_valid()) { | |
405 // restore exception | |
304 | 406 __ movptr (exception, Address(rsp, 2 * wordSize)); |
0 | 407 } |
408 } | |
409 | |
410 // This specifies the rsp decrement needed to build the frame | |
411 int LIR_Assembler::initial_frame_size_in_bytes() { | |
412 // if rounding, must let FrameMap know! | |
304 | 413 |
414 // The frame_map records size in slots (32bit word) | |
415 | |
416 // subtract two words to account for return address and link | |
417 return (frame_map()->framesize() - (2*VMRegImpl::slots_per_word)) * VMRegImpl::stack_slot_size; | |
0 | 418 } |
419 | |
420 | |
1204 | 421 int LIR_Assembler::emit_exception_handler() { |
0 | 422 // if the last instruction is a call (typically to do a throw which |
423 // is coming at the end after block reordering) the return address | |
424 // must still point into the code area in order to avoid assertion | |
425 // failures when searching for the corresponding bci => add a nop | |
426 // (was bug 5/14/1999 - gri) | |
427 __ nop(); | |
428 | |
429 // generate code for exception handler | |
430 address handler_base = __ start_a_stub(exception_handler_size); | |
431 if (handler_base == NULL) { | |
432 // not enough space left for the handler | |
433 bailout("exception handler overflow"); | |
1204 | 434 return -1; |
0 | 435 } |
1204 | 436 |
0 | 437 int offset = code_offset(); |
438 | |
439 // if the method does not have an exception handler, then there is | |
440 // no reason to search for one | |
780
c96bf21b756f
6788527: Server vm intermittently fails with assertion "live value must not be garbage" with fastdebug bits
kvn
parents:
647
diff
changeset
|
441 if (compilation()->has_exception_handlers() || compilation()->env()->jvmti_can_post_exceptions()) { |
0 | 442 // the exception oop and pc are in rax, and rdx |
443 // no other registers need to be preserved, so invalidate them | |
444 __ invalidate_registers(false, true, true, false, true, true); | |
445 | |
446 // check that there is really an exception | |
447 __ verify_not_null_oop(rax); | |
448 | |
449 // search an exception handler (rax: exception oop, rdx: throwing pc) | |
450 __ call(RuntimeAddress(Runtime1::entry_for(Runtime1::handle_exception_nofpu_id))); | |
451 | |
452 // if the call returns here, then the exception handler for particular | |
453 // exception doesn't exist -> unwind activation and forward exception to caller | |
454 } | |
455 | |
456 // the exception oop is in rax, | |
457 // no other registers need to be preserved, so invalidate them | |
458 __ invalidate_registers(false, true, true, true, true, true); | |
459 | |
460 // check that there is really an exception | |
461 __ verify_not_null_oop(rax); | |
462 | |
463 // unlock the receiver/klass if necessary | |
464 // rax,: exception | |
465 ciMethod* method = compilation()->method(); | |
466 if (method->is_synchronized() && GenerateSynchronizationCode) { | |
467 monitorexit(FrameMap::rbx_oop_opr, FrameMap::rcx_opr, SYNC_header, 0, rax); | |
468 } | |
469 | |
470 // unwind activation and forward exception to caller | |
471 // rax,: exception | |
472 __ jump(RuntimeAddress(Runtime1::entry_for(Runtime1::unwind_exception_id))); | |
473 assert(code_offset() - offset <= exception_handler_size, "overflow"); | |
474 __ end_a_stub(); | |
1204 | 475 |
476 return offset; | |
0 | 477 } |
478 | |
1204 | 479 |
480 int LIR_Assembler::emit_deopt_handler() { | |
0 | 481 // if the last instruction is a call (typically to do a throw which |
482 // is coming at the end after block reordering) the return address | |
483 // must still point into the code area in order to avoid assertion | |
484 // failures when searching for the corresponding bci => add a nop | |
485 // (was bug 5/14/1999 - gri) | |
486 __ nop(); | |
487 | |
488 // generate code for exception handler | |
489 address handler_base = __ start_a_stub(deopt_handler_size); | |
490 if (handler_base == NULL) { | |
491 // not enough space left for the handler | |
492 bailout("deopt handler overflow"); | |
1204 | 493 return -1; |
0 | 494 } |
1204 | 495 |
0 | 496 int offset = code_offset(); |
497 InternalAddress here(__ pc()); | |
498 __ pushptr(here.addr()); | |
499 __ jump(RuntimeAddress(SharedRuntime::deopt_blob()->unpack())); | |
500 assert(code_offset() - offset <= deopt_handler_size, "overflow"); | |
501 __ end_a_stub(); | |
502 | |
1204 | 503 return offset; |
0 | 504 } |
505 | |
506 | |
507 // This is the fast version of java.lang.String.compare; it has not | |
508 // OSR-entry and therefore, we generate a slow version for OSR's | |
509 void LIR_Assembler::emit_string_compare(LIR_Opr arg0, LIR_Opr arg1, LIR_Opr dst, CodeEmitInfo* info) { | |
304 | 510 __ movptr (rbx, rcx); // receiver is in rcx |
511 __ movptr (rax, arg1->as_register()); | |
0 | 512 |
513 // Get addresses of first characters from both Strings | |
304 | 514 __ movptr (rsi, Address(rax, java_lang_String::value_offset_in_bytes())); |
515 __ movptr (rcx, Address(rax, java_lang_String::offset_offset_in_bytes())); | |
516 __ lea (rsi, Address(rsi, rcx, Address::times_2, arrayOopDesc::base_offset_in_bytes(T_CHAR))); | |
0 | 517 |
518 | |
519 // rbx, may be NULL | |
520 add_debug_info_for_null_check_here(info); | |
304 | 521 __ movptr (rdi, Address(rbx, java_lang_String::value_offset_in_bytes())); |
522 __ movptr (rcx, Address(rbx, java_lang_String::offset_offset_in_bytes())); | |
523 __ lea (rdi, Address(rdi, rcx, Address::times_2, arrayOopDesc::base_offset_in_bytes(T_CHAR))); | |
0 | 524 |
525 // compute minimum length (in rax) and difference of lengths (on top of stack) | |
526 if (VM_Version::supports_cmov()) { | |
304 | 527 __ movl (rbx, Address(rbx, java_lang_String::count_offset_in_bytes())); |
528 __ movl (rax, Address(rax, java_lang_String::count_offset_in_bytes())); | |
529 __ mov (rcx, rbx); | |
530 __ subptr (rbx, rax); // subtract lengths | |
531 __ push (rbx); // result | |
532 __ cmov (Assembler::lessEqual, rax, rcx); | |
0 | 533 } else { |
534 Label L; | |
304 | 535 __ movl (rbx, Address(rbx, java_lang_String::count_offset_in_bytes())); |
536 __ movl (rcx, Address(rax, java_lang_String::count_offset_in_bytes())); | |
537 __ mov (rax, rbx); | |
538 __ subptr (rbx, rcx); | |
539 __ push (rbx); | |
540 __ jcc (Assembler::lessEqual, L); | |
541 __ mov (rax, rcx); | |
0 | 542 __ bind (L); |
543 } | |
544 // is minimum length 0? | |
545 Label noLoop, haveResult; | |
304 | 546 __ testptr (rax, rax); |
0 | 547 __ jcc (Assembler::zero, noLoop); |
548 | |
549 // compare first characters | |
622
56aae7be60d4
6812678: macro assembler needs delayed binding of a few constants (for 6655638)
jrose
parents:
512
diff
changeset
|
550 __ load_unsigned_short(rcx, Address(rdi, 0)); |
56aae7be60d4
6812678: macro assembler needs delayed binding of a few constants (for 6655638)
jrose
parents:
512
diff
changeset
|
551 __ load_unsigned_short(rbx, Address(rsi, 0)); |
0 | 552 __ subl(rcx, rbx); |
553 __ jcc(Assembler::notZero, haveResult); | |
554 // starting loop | |
555 __ decrement(rax); // we already tested index: skip one | |
556 __ jcc(Assembler::zero, noLoop); | |
557 | |
558 // set rsi.edi to the end of the arrays (arrays have same length) | |
559 // negate the index | |
560 | |
304 | 561 __ lea(rsi, Address(rsi, rax, Address::times_2, type2aelembytes(T_CHAR))); |
562 __ lea(rdi, Address(rdi, rax, Address::times_2, type2aelembytes(T_CHAR))); | |
563 __ negptr(rax); | |
0 | 564 |
565 // compare the strings in a loop | |
566 | |
567 Label loop; | |
568 __ align(wordSize); | |
569 __ bind(loop); | |
622
56aae7be60d4
6812678: macro assembler needs delayed binding of a few constants (for 6655638)
jrose
parents:
512
diff
changeset
|
570 __ load_unsigned_short(rcx, Address(rdi, rax, Address::times_2, 0)); |
56aae7be60d4
6812678: macro assembler needs delayed binding of a few constants (for 6655638)
jrose
parents:
512
diff
changeset
|
571 __ load_unsigned_short(rbx, Address(rsi, rax, Address::times_2, 0)); |
0 | 572 __ subl(rcx, rbx); |
573 __ jcc(Assembler::notZero, haveResult); | |
574 __ increment(rax); | |
575 __ jcc(Assembler::notZero, loop); | |
576 | |
577 // strings are equal up to min length | |
578 | |
579 __ bind(noLoop); | |
304 | 580 __ pop(rax); |
0 | 581 return_op(LIR_OprFact::illegalOpr); |
582 | |
583 __ bind(haveResult); | |
584 // leave instruction is going to discard the TOS value | |
304 | 585 __ mov (rax, rcx); // result of call is in rax, |
0 | 586 } |
587 | |
588 | |
589 void LIR_Assembler::return_op(LIR_Opr result) { | |
590 assert(result->is_illegal() || !result->is_single_cpu() || result->as_register() == rax, "word returns are in rax,"); | |
591 if (!result->is_illegal() && result->is_float_kind() && !result->is_xmm_register()) { | |
592 assert(result->fpu() == 0, "result must already be on TOS"); | |
593 } | |
594 | |
595 // Pop the stack before the safepoint code | |
596 __ leave(); | |
597 | |
598 bool result_is_oop = result->is_valid() ? result->is_oop() : false; | |
599 | |
600 // Note: we do not need to round double result; float result has the right precision | |
601 // the poll sets the condition code, but no data registers | |
602 AddressLiteral polling_page(os::get_polling_page() + (SafepointPollOffset % os::vm_page_size()), | |
603 relocInfo::poll_return_type); | |
304 | 604 |
605 // NOTE: the requires that the polling page be reachable else the reloc | |
606 // goes to the movq that loads the address and not the faulting instruction | |
607 // which breaks the signal handler code | |
608 | |
0 | 609 __ test32(rax, polling_page); |
610 | |
611 __ ret(0); | |
612 } | |
613 | |
614 | |
615 int LIR_Assembler::safepoint_poll(LIR_Opr tmp, CodeEmitInfo* info) { | |
616 AddressLiteral polling_page(os::get_polling_page() + (SafepointPollOffset % os::vm_page_size()), | |
617 relocInfo::poll_type); | |
618 | |
619 if (info != NULL) { | |
620 add_debug_info_for_branch(info); | |
621 } else { | |
622 ShouldNotReachHere(); | |
623 } | |
624 | |
625 int offset = __ offset(); | |
304 | 626 |
627 // NOTE: the requires that the polling page be reachable else the reloc | |
628 // goes to the movq that loads the address and not the faulting instruction | |
629 // which breaks the signal handler code | |
630 | |
0 | 631 __ test32(rax, polling_page); |
632 return offset; | |
633 } | |
634 | |
635 | |
636 void LIR_Assembler::move_regs(Register from_reg, Register to_reg) { | |
304 | 637 if (from_reg != to_reg) __ mov(to_reg, from_reg); |
0 | 638 } |
639 | |
640 void LIR_Assembler::swap_reg(Register a, Register b) { | |
304 | 641 __ xchgptr(a, b); |
0 | 642 } |
643 | |
644 | |
645 void LIR_Assembler::const2reg(LIR_Opr src, LIR_Opr dest, LIR_PatchCode patch_code, CodeEmitInfo* info) { | |
646 assert(src->is_constant(), "should not call otherwise"); | |
647 assert(dest->is_register(), "should not call otherwise"); | |
648 LIR_Const* c = src->as_constant_ptr(); | |
649 | |
650 switch (c->type()) { | |
651 case T_INT: { | |
652 assert(patch_code == lir_patch_none, "no patching handled here"); | |
653 __ movl(dest->as_register(), c->as_jint()); | |
654 break; | |
655 } | |
656 | |
657 case T_LONG: { | |
658 assert(patch_code == lir_patch_none, "no patching handled here"); | |
304 | 659 #ifdef _LP64 |
660 __ movptr(dest->as_register_lo(), (intptr_t)c->as_jlong()); | |
661 #else | |
662 __ movptr(dest->as_register_lo(), c->as_jint_lo()); | |
663 __ movptr(dest->as_register_hi(), c->as_jint_hi()); | |
664 #endif // _LP64 | |
0 | 665 break; |
666 } | |
667 | |
668 case T_OBJECT: { | |
669 if (patch_code != lir_patch_none) { | |
670 jobject2reg_with_patching(dest->as_register(), info); | |
671 } else { | |
672 __ movoop(dest->as_register(), c->as_jobject()); | |
673 } | |
674 break; | |
675 } | |
676 | |
677 case T_FLOAT: { | |
678 if (dest->is_single_xmm()) { | |
679 if (c->is_zero_float()) { | |
680 __ xorps(dest->as_xmm_float_reg(), dest->as_xmm_float_reg()); | |
681 } else { | |
682 __ movflt(dest->as_xmm_float_reg(), | |
683 InternalAddress(float_constant(c->as_jfloat()))); | |
684 } | |
685 } else { | |
686 assert(dest->is_single_fpu(), "must be"); | |
687 assert(dest->fpu_regnr() == 0, "dest must be TOS"); | |
688 if (c->is_zero_float()) { | |
689 __ fldz(); | |
690 } else if (c->is_one_float()) { | |
691 __ fld1(); | |
692 } else { | |
693 __ fld_s (InternalAddress(float_constant(c->as_jfloat()))); | |
694 } | |
695 } | |
696 break; | |
697 } | |
698 | |
699 case T_DOUBLE: { | |
700 if (dest->is_double_xmm()) { | |
701 if (c->is_zero_double()) { | |
702 __ xorpd(dest->as_xmm_double_reg(), dest->as_xmm_double_reg()); | |
703 } else { | |
704 __ movdbl(dest->as_xmm_double_reg(), | |
705 InternalAddress(double_constant(c->as_jdouble()))); | |
706 } | |
707 } else { | |
708 assert(dest->is_double_fpu(), "must be"); | |
709 assert(dest->fpu_regnrLo() == 0, "dest must be TOS"); | |
710 if (c->is_zero_double()) { | |
711 __ fldz(); | |
712 } else if (c->is_one_double()) { | |
713 __ fld1(); | |
714 } else { | |
715 __ fld_d (InternalAddress(double_constant(c->as_jdouble()))); | |
716 } | |
717 } | |
718 break; | |
719 } | |
720 | |
721 default: | |
722 ShouldNotReachHere(); | |
723 } | |
724 } | |
725 | |
726 void LIR_Assembler::const2stack(LIR_Opr src, LIR_Opr dest) { | |
727 assert(src->is_constant(), "should not call otherwise"); | |
728 assert(dest->is_stack(), "should not call otherwise"); | |
729 LIR_Const* c = src->as_constant_ptr(); | |
730 | |
731 switch (c->type()) { | |
732 case T_INT: // fall through | |
733 case T_FLOAT: | |
734 __ movl(frame_map()->address_for_slot(dest->single_stack_ix()), c->as_jint_bits()); | |
735 break; | |
736 | |
737 case T_OBJECT: | |
738 __ movoop(frame_map()->address_for_slot(dest->single_stack_ix()), c->as_jobject()); | |
739 break; | |
740 | |
741 case T_LONG: // fall through | |
742 case T_DOUBLE: | |
304 | 743 #ifdef _LP64 |
744 __ movptr(frame_map()->address_for_slot(dest->double_stack_ix(), | |
745 lo_word_offset_in_bytes), (intptr_t)c->as_jlong_bits()); | |
746 #else | |
747 __ movptr(frame_map()->address_for_slot(dest->double_stack_ix(), | |
748 lo_word_offset_in_bytes), c->as_jint_lo_bits()); | |
749 __ movptr(frame_map()->address_for_slot(dest->double_stack_ix(), | |
750 hi_word_offset_in_bytes), c->as_jint_hi_bits()); | |
751 #endif // _LP64 | |
0 | 752 break; |
753 | |
754 default: | |
755 ShouldNotReachHere(); | |
756 } | |
757 } | |
758 | |
759 void LIR_Assembler::const2mem(LIR_Opr src, LIR_Opr dest, BasicType type, CodeEmitInfo* info ) { | |
760 assert(src->is_constant(), "should not call otherwise"); | |
761 assert(dest->is_address(), "should not call otherwise"); | |
762 LIR_Const* c = src->as_constant_ptr(); | |
763 LIR_Address* addr = dest->as_address_ptr(); | |
764 | |
304 | 765 int null_check_here = code_offset(); |
0 | 766 switch (type) { |
767 case T_INT: // fall through | |
768 case T_FLOAT: | |
769 __ movl(as_Address(addr), c->as_jint_bits()); | |
770 break; | |
771 | |
772 case T_OBJECT: // fall through | |
773 case T_ARRAY: | |
774 if (c->as_jobject() == NULL) { | |
512
db4caa99ef11
6787106: Hotspot 32 bit build fails on platforms having different definitions for intptr_t & int32_t
xlu
parents:
321
diff
changeset
|
775 __ movptr(as_Address(addr), NULL_WORD); |
0 | 776 } else { |
304 | 777 if (is_literal_address(addr)) { |
778 ShouldNotReachHere(); | |
779 __ movoop(as_Address(addr, noreg), c->as_jobject()); | |
780 } else { | |
1060 | 781 #ifdef _LP64 |
782 __ movoop(rscratch1, c->as_jobject()); | |
783 null_check_here = code_offset(); | |
784 __ movptr(as_Address_lo(addr), rscratch1); | |
785 #else | |
304 | 786 __ movoop(as_Address(addr), c->as_jobject()); |
1060 | 787 #endif |
304 | 788 } |
0 | 789 } |
790 break; | |
791 | |
792 case T_LONG: // fall through | |
793 case T_DOUBLE: | |
304 | 794 #ifdef _LP64 |
795 if (is_literal_address(addr)) { | |
796 ShouldNotReachHere(); | |
797 __ movptr(as_Address(addr, r15_thread), (intptr_t)c->as_jlong_bits()); | |
798 } else { | |
799 __ movptr(r10, (intptr_t)c->as_jlong_bits()); | |
800 null_check_here = code_offset(); | |
801 __ movptr(as_Address_lo(addr), r10); | |
802 } | |
803 #else | |
804 // Always reachable in 32bit so this doesn't produce useless move literal | |
805 __ movptr(as_Address_hi(addr), c->as_jint_hi_bits()); | |
806 __ movptr(as_Address_lo(addr), c->as_jint_lo_bits()); | |
807 #endif // _LP64 | |
0 | 808 break; |
809 | |
810 case T_BOOLEAN: // fall through | |
811 case T_BYTE: | |
812 __ movb(as_Address(addr), c->as_jint() & 0xFF); | |
813 break; | |
814 | |
815 case T_CHAR: // fall through | |
816 case T_SHORT: | |
817 __ movw(as_Address(addr), c->as_jint() & 0xFFFF); | |
818 break; | |
819 | |
820 default: | |
821 ShouldNotReachHere(); | |
822 }; | |
304 | 823 |
824 if (info != NULL) { | |
825 add_debug_info_for_null_check(null_check_here, info); | |
826 } | |
0 | 827 } |
828 | |
829 | |
830 void LIR_Assembler::reg2reg(LIR_Opr src, LIR_Opr dest) { | |
831 assert(src->is_register(), "should not call otherwise"); | |
832 assert(dest->is_register(), "should not call otherwise"); | |
833 | |
834 // move between cpu-registers | |
835 if (dest->is_single_cpu()) { | |
304 | 836 #ifdef _LP64 |
837 if (src->type() == T_LONG) { | |
838 // Can do LONG -> OBJECT | |
839 move_regs(src->as_register_lo(), dest->as_register()); | |
840 return; | |
841 } | |
842 #endif | |
0 | 843 assert(src->is_single_cpu(), "must match"); |
844 if (src->type() == T_OBJECT) { | |
845 __ verify_oop(src->as_register()); | |
846 } | |
847 move_regs(src->as_register(), dest->as_register()); | |
848 | |
849 } else if (dest->is_double_cpu()) { | |
304 | 850 #ifdef _LP64 |
851 if (src->type() == T_OBJECT || src->type() == T_ARRAY) { | |
852 // Surprising to me but we can see move of a long to t_object | |
853 __ verify_oop(src->as_register()); | |
854 move_regs(src->as_register(), dest->as_register_lo()); | |
855 return; | |
856 } | |
857 #endif | |
0 | 858 assert(src->is_double_cpu(), "must match"); |
859 Register f_lo = src->as_register_lo(); | |
860 Register f_hi = src->as_register_hi(); | |
861 Register t_lo = dest->as_register_lo(); | |
862 Register t_hi = dest->as_register_hi(); | |
304 | 863 #ifdef _LP64 |
864 assert(f_hi == f_lo, "must be same"); | |
865 assert(t_hi == t_lo, "must be same"); | |
866 move_regs(f_lo, t_lo); | |
867 #else | |
0 | 868 assert(f_lo != f_hi && t_lo != t_hi, "invalid register allocation"); |
869 | |
304 | 870 |
0 | 871 if (f_lo == t_hi && f_hi == t_lo) { |
872 swap_reg(f_lo, f_hi); | |
873 } else if (f_hi == t_lo) { | |
874 assert(f_lo != t_hi, "overwriting register"); | |
875 move_regs(f_hi, t_hi); | |
876 move_regs(f_lo, t_lo); | |
877 } else { | |
878 assert(f_hi != t_lo, "overwriting register"); | |
879 move_regs(f_lo, t_lo); | |
880 move_regs(f_hi, t_hi); | |
881 } | |
304 | 882 #endif // LP64 |
0 | 883 |
884 // special moves from fpu-register to xmm-register | |
885 // necessary for method results | |
886 } else if (src->is_single_xmm() && !dest->is_single_xmm()) { | |
887 __ movflt(Address(rsp, 0), src->as_xmm_float_reg()); | |
888 __ fld_s(Address(rsp, 0)); | |
889 } else if (src->is_double_xmm() && !dest->is_double_xmm()) { | |
890 __ movdbl(Address(rsp, 0), src->as_xmm_double_reg()); | |
891 __ fld_d(Address(rsp, 0)); | |
892 } else if (dest->is_single_xmm() && !src->is_single_xmm()) { | |
893 __ fstp_s(Address(rsp, 0)); | |
894 __ movflt(dest->as_xmm_float_reg(), Address(rsp, 0)); | |
895 } else if (dest->is_double_xmm() && !src->is_double_xmm()) { | |
896 __ fstp_d(Address(rsp, 0)); | |
897 __ movdbl(dest->as_xmm_double_reg(), Address(rsp, 0)); | |
898 | |
899 // move between xmm-registers | |
900 } else if (dest->is_single_xmm()) { | |
901 assert(src->is_single_xmm(), "must match"); | |
902 __ movflt(dest->as_xmm_float_reg(), src->as_xmm_float_reg()); | |
903 } else if (dest->is_double_xmm()) { | |
904 assert(src->is_double_xmm(), "must match"); | |
905 __ movdbl(dest->as_xmm_double_reg(), src->as_xmm_double_reg()); | |
906 | |
907 // move between fpu-registers (no instruction necessary because of fpu-stack) | |
908 } else if (dest->is_single_fpu() || dest->is_double_fpu()) { | |
909 assert(src->is_single_fpu() || src->is_double_fpu(), "must match"); | |
910 assert(src->fpu() == dest->fpu(), "currently should be nothing to do"); | |
911 } else { | |
912 ShouldNotReachHere(); | |
913 } | |
914 } | |
915 | |
916 void LIR_Assembler::reg2stack(LIR_Opr src, LIR_Opr dest, BasicType type, bool pop_fpu_stack) { | |
917 assert(src->is_register(), "should not call otherwise"); | |
918 assert(dest->is_stack(), "should not call otherwise"); | |
919 | |
920 if (src->is_single_cpu()) { | |
921 Address dst = frame_map()->address_for_slot(dest->single_stack_ix()); | |
922 if (type == T_OBJECT || type == T_ARRAY) { | |
923 __ verify_oop(src->as_register()); | |
304 | 924 __ movptr (dst, src->as_register()); |
925 } else { | |
926 __ movl (dst, src->as_register()); | |
0 | 927 } |
928 | |
929 } else if (src->is_double_cpu()) { | |
930 Address dstLO = frame_map()->address_for_slot(dest->double_stack_ix(), lo_word_offset_in_bytes); | |
931 Address dstHI = frame_map()->address_for_slot(dest->double_stack_ix(), hi_word_offset_in_bytes); | |
304 | 932 __ movptr (dstLO, src->as_register_lo()); |
933 NOT_LP64(__ movptr (dstHI, src->as_register_hi())); | |
0 | 934 |
935 } else if (src->is_single_xmm()) { | |
936 Address dst_addr = frame_map()->address_for_slot(dest->single_stack_ix()); | |
937 __ movflt(dst_addr, src->as_xmm_float_reg()); | |
938 | |
939 } else if (src->is_double_xmm()) { | |
940 Address dst_addr = frame_map()->address_for_slot(dest->double_stack_ix()); | |
941 __ movdbl(dst_addr, src->as_xmm_double_reg()); | |
942 | |
943 } else if (src->is_single_fpu()) { | |
944 assert(src->fpu_regnr() == 0, "argument must be on TOS"); | |
945 Address dst_addr = frame_map()->address_for_slot(dest->single_stack_ix()); | |
946 if (pop_fpu_stack) __ fstp_s (dst_addr); | |
947 else __ fst_s (dst_addr); | |
948 | |
949 } else if (src->is_double_fpu()) { | |
950 assert(src->fpu_regnrLo() == 0, "argument must be on TOS"); | |
951 Address dst_addr = frame_map()->address_for_slot(dest->double_stack_ix()); | |
952 if (pop_fpu_stack) __ fstp_d (dst_addr); | |
953 else __ fst_d (dst_addr); | |
954 | |
955 } else { | |
956 ShouldNotReachHere(); | |
957 } | |
958 } | |
959 | |
960 | |
961 void LIR_Assembler::reg2mem(LIR_Opr src, LIR_Opr dest, BasicType type, LIR_PatchCode patch_code, CodeEmitInfo* info, bool pop_fpu_stack, bool /* unaligned */) { | |
962 LIR_Address* to_addr = dest->as_address_ptr(); | |
963 PatchingStub* patch = NULL; | |
964 | |
965 if (type == T_ARRAY || type == T_OBJECT) { | |
966 __ verify_oop(src->as_register()); | |
967 } | |
968 if (patch_code != lir_patch_none) { | |
969 patch = new PatchingStub(_masm, PatchingStub::access_field_id); | |
304 | 970 Address toa = as_Address(to_addr); |
971 assert(toa.disp() != 0, "must have"); | |
0 | 972 } |
973 if (info != NULL) { | |
974 add_debug_info_for_null_check_here(info); | |
975 } | |
976 | |
977 switch (type) { | |
978 case T_FLOAT: { | |
979 if (src->is_single_xmm()) { | |
980 __ movflt(as_Address(to_addr), src->as_xmm_float_reg()); | |
981 } else { | |
982 assert(src->is_single_fpu(), "must be"); | |
983 assert(src->fpu_regnr() == 0, "argument must be on TOS"); | |
984 if (pop_fpu_stack) __ fstp_s(as_Address(to_addr)); | |
985 else __ fst_s (as_Address(to_addr)); | |
986 } | |
987 break; | |
988 } | |
989 | |
990 case T_DOUBLE: { | |
991 if (src->is_double_xmm()) { | |
992 __ movdbl(as_Address(to_addr), src->as_xmm_double_reg()); | |
993 } else { | |
994 assert(src->is_double_fpu(), "must be"); | |
995 assert(src->fpu_regnrLo() == 0, "argument must be on TOS"); | |
996 if (pop_fpu_stack) __ fstp_d(as_Address(to_addr)); | |
997 else __ fst_d (as_Address(to_addr)); | |
998 } | |
999 break; | |
1000 } | |
1001 | |
1002 case T_ADDRESS: // fall through | |
1003 case T_ARRAY: // fall through | |
1004 case T_OBJECT: // fall through | |
304 | 1005 #ifdef _LP64 |
1006 __ movptr(as_Address(to_addr), src->as_register()); | |
1007 break; | |
1008 #endif // _LP64 | |
0 | 1009 case T_INT: |
1010 __ movl(as_Address(to_addr), src->as_register()); | |
1011 break; | |
1012 | |
1013 case T_LONG: { | |
1014 Register from_lo = src->as_register_lo(); | |
1015 Register from_hi = src->as_register_hi(); | |
304 | 1016 #ifdef _LP64 |
1017 __ movptr(as_Address_lo(to_addr), from_lo); | |
1018 #else | |
0 | 1019 Register base = to_addr->base()->as_register(); |
1020 Register index = noreg; | |
1021 if (to_addr->index()->is_register()) { | |
1022 index = to_addr->index()->as_register(); | |
1023 } | |
1024 if (base == from_lo || index == from_lo) { | |
1025 assert(base != from_hi, "can't be"); | |
1026 assert(index == noreg || (index != base && index != from_hi), "can't handle this"); | |
1027 __ movl(as_Address_hi(to_addr), from_hi); | |
1028 if (patch != NULL) { | |
1029 patching_epilog(patch, lir_patch_high, base, info); | |
1030 patch = new PatchingStub(_masm, PatchingStub::access_field_id); | |
1031 patch_code = lir_patch_low; | |
1032 } | |
1033 __ movl(as_Address_lo(to_addr), from_lo); | |
1034 } else { | |
1035 assert(index == noreg || (index != base && index != from_lo), "can't handle this"); | |
1036 __ movl(as_Address_lo(to_addr), from_lo); | |
1037 if (patch != NULL) { | |
1038 patching_epilog(patch, lir_patch_low, base, info); | |
1039 patch = new PatchingStub(_masm, PatchingStub::access_field_id); | |
1040 patch_code = lir_patch_high; | |
1041 } | |
1042 __ movl(as_Address_hi(to_addr), from_hi); | |
1043 } | |
304 | 1044 #endif // _LP64 |
0 | 1045 break; |
1046 } | |
1047 | |
1048 case T_BYTE: // fall through | |
1049 case T_BOOLEAN: { | |
1050 Register src_reg = src->as_register(); | |
1051 Address dst_addr = as_Address(to_addr); | |
1052 assert(VM_Version::is_P6() || src_reg->has_byte_register(), "must use byte registers if not P6"); | |
1053 __ movb(dst_addr, src_reg); | |
1054 break; | |
1055 } | |
1056 | |
1057 case T_CHAR: // fall through | |
1058 case T_SHORT: | |
1059 __ movw(as_Address(to_addr), src->as_register()); | |
1060 break; | |
1061 | |
1062 default: | |
1063 ShouldNotReachHere(); | |
1064 } | |
1065 | |
1066 if (patch_code != lir_patch_none) { | |
1067 patching_epilog(patch, patch_code, to_addr->base()->as_register(), info); | |
1068 } | |
1069 } | |
1070 | |
1071 | |
1072 void LIR_Assembler::stack2reg(LIR_Opr src, LIR_Opr dest, BasicType type) { | |
1073 assert(src->is_stack(), "should not call otherwise"); | |
1074 assert(dest->is_register(), "should not call otherwise"); | |
1075 | |
1076 if (dest->is_single_cpu()) { | |
1077 if (type == T_ARRAY || type == T_OBJECT) { | |
304 | 1078 __ movptr(dest->as_register(), frame_map()->address_for_slot(src->single_stack_ix())); |
0 | 1079 __ verify_oop(dest->as_register()); |
304 | 1080 } else { |
1081 __ movl(dest->as_register(), frame_map()->address_for_slot(src->single_stack_ix())); | |
0 | 1082 } |
1083 | |
1084 } else if (dest->is_double_cpu()) { | |
1085 Address src_addr_LO = frame_map()->address_for_slot(src->double_stack_ix(), lo_word_offset_in_bytes); | |
1086 Address src_addr_HI = frame_map()->address_for_slot(src->double_stack_ix(), hi_word_offset_in_bytes); | |
304 | 1087 __ movptr(dest->as_register_lo(), src_addr_LO); |
1088 NOT_LP64(__ movptr(dest->as_register_hi(), src_addr_HI)); | |
0 | 1089 |
1090 } else if (dest->is_single_xmm()) { | |
1091 Address src_addr = frame_map()->address_for_slot(src->single_stack_ix()); | |
1092 __ movflt(dest->as_xmm_float_reg(), src_addr); | |
1093 | |
1094 } else if (dest->is_double_xmm()) { | |
1095 Address src_addr = frame_map()->address_for_slot(src->double_stack_ix()); | |
1096 __ movdbl(dest->as_xmm_double_reg(), src_addr); | |
1097 | |
1098 } else if (dest->is_single_fpu()) { | |
1099 assert(dest->fpu_regnr() == 0, "dest must be TOS"); | |
1100 Address src_addr = frame_map()->address_for_slot(src->single_stack_ix()); | |
1101 __ fld_s(src_addr); | |
1102 | |
1103 } else if (dest->is_double_fpu()) { | |
1104 assert(dest->fpu_regnrLo() == 0, "dest must be TOS"); | |
1105 Address src_addr = frame_map()->address_for_slot(src->double_stack_ix()); | |
1106 __ fld_d(src_addr); | |
1107 | |
1108 } else { | |
1109 ShouldNotReachHere(); | |
1110 } | |
1111 } | |
1112 | |
1113 | |
1114 void LIR_Assembler::stack2stack(LIR_Opr src, LIR_Opr dest, BasicType type) { | |
1115 if (src->is_single_stack()) { | |
304 | 1116 if (type == T_OBJECT || type == T_ARRAY) { |
1117 __ pushptr(frame_map()->address_for_slot(src ->single_stack_ix())); | |
1118 __ popptr (frame_map()->address_for_slot(dest->single_stack_ix())); | |
1119 } else { | |
1060 | 1120 #ifndef _LP64 |
304 | 1121 __ pushl(frame_map()->address_for_slot(src ->single_stack_ix())); |
1122 __ popl (frame_map()->address_for_slot(dest->single_stack_ix())); | |
1060 | 1123 #else |
1124 //no pushl on 64bits | |
1125 __ movl(rscratch1, frame_map()->address_for_slot(src ->single_stack_ix())); | |
1126 __ movl(frame_map()->address_for_slot(dest->single_stack_ix()), rscratch1); | |
1127 #endif | |
304 | 1128 } |
0 | 1129 |
1130 } else if (src->is_double_stack()) { | |
304 | 1131 #ifdef _LP64 |
1132 __ pushptr(frame_map()->address_for_slot(src ->double_stack_ix())); | |
1133 __ popptr (frame_map()->address_for_slot(dest->double_stack_ix())); | |
1134 #else | |
0 | 1135 __ pushl(frame_map()->address_for_slot(src ->double_stack_ix(), 0)); |
304 | 1136 // push and pop the part at src + wordSize, adding wordSize for the previous push |
321
6e7305abe64c
6746320: Hotspot regression test for 6512111 fails in -Xmixed mode
never
parents:
304
diff
changeset
|
1137 __ pushl(frame_map()->address_for_slot(src ->double_stack_ix(), 2 * wordSize)); |
6e7305abe64c
6746320: Hotspot regression test for 6512111 fails in -Xmixed mode
never
parents:
304
diff
changeset
|
1138 __ popl (frame_map()->address_for_slot(dest->double_stack_ix(), 2 * wordSize)); |
0 | 1139 __ popl (frame_map()->address_for_slot(dest->double_stack_ix(), 0)); |
304 | 1140 #endif // _LP64 |
0 | 1141 |
1142 } else { | |
1143 ShouldNotReachHere(); | |
1144 } | |
1145 } | |
1146 | |
1147 | |
1148 void LIR_Assembler::mem2reg(LIR_Opr src, LIR_Opr dest, BasicType type, LIR_PatchCode patch_code, CodeEmitInfo* info, bool /* unaligned */) { | |
1149 assert(src->is_address(), "should not call otherwise"); | |
1150 assert(dest->is_register(), "should not call otherwise"); | |
1151 | |
1152 LIR_Address* addr = src->as_address_ptr(); | |
1153 Address from_addr = as_Address(addr); | |
1154 | |
1155 switch (type) { | |
1156 case T_BOOLEAN: // fall through | |
1157 case T_BYTE: // fall through | |
1158 case T_CHAR: // fall through | |
1159 case T_SHORT: | |
1160 if (!VM_Version::is_P6() && !from_addr.uses(dest->as_register())) { | |
1161 // on pre P6 processors we may get partial register stalls | |
1162 // so blow away the value of to_rinfo before loading a | |
1163 // partial word into it. Do it here so that it precedes | |
1164 // the potential patch point below. | |
304 | 1165 __ xorptr(dest->as_register(), dest->as_register()); |
0 | 1166 } |
1167 break; | |
1168 } | |
1169 | |
1170 PatchingStub* patch = NULL; | |
1171 if (patch_code != lir_patch_none) { | |
1172 patch = new PatchingStub(_masm, PatchingStub::access_field_id); | |
304 | 1173 assert(from_addr.disp() != 0, "must have"); |
0 | 1174 } |
1175 if (info != NULL) { | |
1176 add_debug_info_for_null_check_here(info); | |
1177 } | |
1178 | |
1179 switch (type) { | |
1180 case T_FLOAT: { | |
1181 if (dest->is_single_xmm()) { | |
1182 __ movflt(dest->as_xmm_float_reg(), from_addr); | |
1183 } else { | |
1184 assert(dest->is_single_fpu(), "must be"); | |
1185 assert(dest->fpu_regnr() == 0, "dest must be TOS"); | |
1186 __ fld_s(from_addr); | |
1187 } | |
1188 break; | |
1189 } | |
1190 | |
1191 case T_DOUBLE: { | |
1192 if (dest->is_double_xmm()) { | |
1193 __ movdbl(dest->as_xmm_double_reg(), from_addr); | |
1194 } else { | |
1195 assert(dest->is_double_fpu(), "must be"); | |
1196 assert(dest->fpu_regnrLo() == 0, "dest must be TOS"); | |
1197 __ fld_d(from_addr); | |
1198 } | |
1199 break; | |
1200 } | |
1201 | |
1202 case T_ADDRESS: // fall through | |
1203 case T_OBJECT: // fall through | |
1204 case T_ARRAY: // fall through | |
304 | 1205 #ifdef _LP64 |
1206 __ movptr(dest->as_register(), from_addr); | |
1207 break; | |
1208 #endif // _L64 | |
0 | 1209 case T_INT: |
304 | 1210 // %%% could this be a movl? this is safer but longer instruction |
1211 __ movl2ptr(dest->as_register(), from_addr); | |
0 | 1212 break; |
1213 | |
1214 case T_LONG: { | |
1215 Register to_lo = dest->as_register_lo(); | |
1216 Register to_hi = dest->as_register_hi(); | |
304 | 1217 #ifdef _LP64 |
1218 __ movptr(to_lo, as_Address_lo(addr)); | |
1219 #else | |
0 | 1220 Register base = addr->base()->as_register(); |
1221 Register index = noreg; | |
1222 if (addr->index()->is_register()) { | |
1223 index = addr->index()->as_register(); | |
1224 } | |
1225 if ((base == to_lo && index == to_hi) || | |
1226 (base == to_hi && index == to_lo)) { | |
1227 // addresses with 2 registers are only formed as a result of | |
1228 // array access so this code will never have to deal with | |
1229 // patches or null checks. | |
1230 assert(info == NULL && patch == NULL, "must be"); | |
304 | 1231 __ lea(to_hi, as_Address(addr)); |
0 | 1232 __ movl(to_lo, Address(to_hi, 0)); |
1233 __ movl(to_hi, Address(to_hi, BytesPerWord)); | |
1234 } else if (base == to_lo || index == to_lo) { | |
1235 assert(base != to_hi, "can't be"); | |
1236 assert(index == noreg || (index != base && index != to_hi), "can't handle this"); | |
1237 __ movl(to_hi, as_Address_hi(addr)); | |
1238 if (patch != NULL) { | |
1239 patching_epilog(patch, lir_patch_high, base, info); | |
1240 patch = new PatchingStub(_masm, PatchingStub::access_field_id); | |
1241 patch_code = lir_patch_low; | |
1242 } | |
1243 __ movl(to_lo, as_Address_lo(addr)); | |
1244 } else { | |
1245 assert(index == noreg || (index != base && index != to_lo), "can't handle this"); | |
1246 __ movl(to_lo, as_Address_lo(addr)); | |
1247 if (patch != NULL) { | |
1248 patching_epilog(patch, lir_patch_low, base, info); | |
1249 patch = new PatchingStub(_masm, PatchingStub::access_field_id); | |
1250 patch_code = lir_patch_high; | |
1251 } | |
1252 __ movl(to_hi, as_Address_hi(addr)); | |
1253 } | |
304 | 1254 #endif // _LP64 |
0 | 1255 break; |
1256 } | |
1257 | |
1258 case T_BOOLEAN: // fall through | |
1259 case T_BYTE: { | |
1260 Register dest_reg = dest->as_register(); | |
1261 assert(VM_Version::is_P6() || dest_reg->has_byte_register(), "must use byte registers if not P6"); | |
1262 if (VM_Version::is_P6() || from_addr.uses(dest_reg)) { | |
304 | 1263 __ movsbl(dest_reg, from_addr); |
0 | 1264 } else { |
1265 __ movb(dest_reg, from_addr); | |
1266 __ shll(dest_reg, 24); | |
1267 __ sarl(dest_reg, 24); | |
1268 } | |
304 | 1269 // These are unsigned so the zero extension on 64bit is just what we need |
0 | 1270 break; |
1271 } | |
1272 | |
1273 case T_CHAR: { | |
1274 Register dest_reg = dest->as_register(); | |
1275 assert(VM_Version::is_P6() || dest_reg->has_byte_register(), "must use byte registers if not P6"); | |
1276 if (VM_Version::is_P6() || from_addr.uses(dest_reg)) { | |
304 | 1277 __ movzwl(dest_reg, from_addr); |
0 | 1278 } else { |
1279 __ movw(dest_reg, from_addr); | |
1280 } | |
304 | 1281 // This is unsigned so the zero extension on 64bit is just what we need |
1282 // __ movl2ptr(dest_reg, dest_reg); | |
0 | 1283 break; |
1284 } | |
1285 | |
1286 case T_SHORT: { | |
1287 Register dest_reg = dest->as_register(); | |
1288 if (VM_Version::is_P6() || from_addr.uses(dest_reg)) { | |
304 | 1289 __ movswl(dest_reg, from_addr); |
0 | 1290 } else { |
1291 __ movw(dest_reg, from_addr); | |
1292 __ shll(dest_reg, 16); | |
1293 __ sarl(dest_reg, 16); | |
1294 } | |
304 | 1295 // Might not be needed in 64bit but certainly doesn't hurt (except for code size) |
1296 __ movl2ptr(dest_reg, dest_reg); | |
0 | 1297 break; |
1298 } | |
1299 | |
1300 default: | |
1301 ShouldNotReachHere(); | |
1302 } | |
1303 | |
1304 if (patch != NULL) { | |
1305 patching_epilog(patch, patch_code, addr->base()->as_register(), info); | |
1306 } | |
1307 | |
1308 if (type == T_ARRAY || type == T_OBJECT) { | |
1309 __ verify_oop(dest->as_register()); | |
1310 } | |
1311 } | |
1312 | |
1313 | |
1314 void LIR_Assembler::prefetchr(LIR_Opr src) { | |
1315 LIR_Address* addr = src->as_address_ptr(); | |
1316 Address from_addr = as_Address(addr); | |
1317 | |
1318 if (VM_Version::supports_sse()) { | |
1319 switch (ReadPrefetchInstr) { | |
1320 case 0: | |
1321 __ prefetchnta(from_addr); break; | |
1322 case 1: | |
1323 __ prefetcht0(from_addr); break; | |
1324 case 2: | |
1325 __ prefetcht2(from_addr); break; | |
1326 default: | |
1327 ShouldNotReachHere(); break; | |
1328 } | |
1329 } else if (VM_Version::supports_3dnow()) { | |
1330 __ prefetchr(from_addr); | |
1331 } | |
1332 } | |
1333 | |
1334 | |
1335 void LIR_Assembler::prefetchw(LIR_Opr src) { | |
1336 LIR_Address* addr = src->as_address_ptr(); | |
1337 Address from_addr = as_Address(addr); | |
1338 | |
1339 if (VM_Version::supports_sse()) { | |
1340 switch (AllocatePrefetchInstr) { | |
1341 case 0: | |
1342 __ prefetchnta(from_addr); break; | |
1343 case 1: | |
1344 __ prefetcht0(from_addr); break; | |
1345 case 2: | |
1346 __ prefetcht2(from_addr); break; | |
1347 case 3: | |
1348 __ prefetchw(from_addr); break; | |
1349 default: | |
1350 ShouldNotReachHere(); break; | |
1351 } | |
1352 } else if (VM_Version::supports_3dnow()) { | |
1353 __ prefetchw(from_addr); | |
1354 } | |
1355 } | |
1356 | |
1357 | |
1358 NEEDS_CLEANUP; // This could be static? | |
1359 Address::ScaleFactor LIR_Assembler::array_element_size(BasicType type) const { | |
29
d5fc211aea19
6633953: type2aelembytes{T_ADDRESS} should be 8 bytes in 64 bit VM
kvn
parents:
0
diff
changeset
|
1360 int elem_size = type2aelembytes(type); |
0 | 1361 switch (elem_size) { |
1362 case 1: return Address::times_1; | |
1363 case 2: return Address::times_2; | |
1364 case 4: return Address::times_4; | |
1365 case 8: return Address::times_8; | |
1366 } | |
1367 ShouldNotReachHere(); | |
1368 return Address::no_scale; | |
1369 } | |
1370 | |
1371 | |
1372 void LIR_Assembler::emit_op3(LIR_Op3* op) { | |
1373 switch (op->code()) { | |
1374 case lir_idiv: | |
1375 case lir_irem: | |
1376 arithmetic_idiv(op->code(), | |
1377 op->in_opr1(), | |
1378 op->in_opr2(), | |
1379 op->in_opr3(), | |
1380 op->result_opr(), | |
1381 op->info()); | |
1382 break; | |
1383 default: ShouldNotReachHere(); break; | |
1384 } | |
1385 } | |
1386 | |
1387 void LIR_Assembler::emit_opBranch(LIR_OpBranch* op) { | |
1388 #ifdef ASSERT | |
1389 assert(op->block() == NULL || op->block()->label() == op->label(), "wrong label"); | |
1390 if (op->block() != NULL) _branch_target_blocks.append(op->block()); | |
1391 if (op->ublock() != NULL) _branch_target_blocks.append(op->ublock()); | |
1392 #endif | |
1393 | |
1394 if (op->cond() == lir_cond_always) { | |
1395 if (op->info() != NULL) add_debug_info_for_branch(op->info()); | |
1396 __ jmp (*(op->label())); | |
1397 } else { | |
1398 Assembler::Condition acond = Assembler::zero; | |
1399 if (op->code() == lir_cond_float_branch) { | |
1400 assert(op->ublock() != NULL, "must have unordered successor"); | |
1401 __ jcc(Assembler::parity, *(op->ublock()->label())); | |
1402 switch(op->cond()) { | |
1403 case lir_cond_equal: acond = Assembler::equal; break; | |
1404 case lir_cond_notEqual: acond = Assembler::notEqual; break; | |
1405 case lir_cond_less: acond = Assembler::below; break; | |
1406 case lir_cond_lessEqual: acond = Assembler::belowEqual; break; | |
1407 case lir_cond_greaterEqual: acond = Assembler::aboveEqual; break; | |
1408 case lir_cond_greater: acond = Assembler::above; break; | |
1409 default: ShouldNotReachHere(); | |
1410 } | |
1411 } else { | |
1412 switch (op->cond()) { | |
1413 case lir_cond_equal: acond = Assembler::equal; break; | |
1414 case lir_cond_notEqual: acond = Assembler::notEqual; break; | |
1415 case lir_cond_less: acond = Assembler::less; break; | |
1416 case lir_cond_lessEqual: acond = Assembler::lessEqual; break; | |
1417 case lir_cond_greaterEqual: acond = Assembler::greaterEqual;break; | |
1418 case lir_cond_greater: acond = Assembler::greater; break; | |
1419 case lir_cond_belowEqual: acond = Assembler::belowEqual; break; | |
1420 case lir_cond_aboveEqual: acond = Assembler::aboveEqual; break; | |
1421 default: ShouldNotReachHere(); | |
1422 } | |
1423 } | |
1424 __ jcc(acond,*(op->label())); | |
1425 } | |
1426 } | |
1427 | |
1428 void LIR_Assembler::emit_opConvert(LIR_OpConvert* op) { | |
1429 LIR_Opr src = op->in_opr(); | |
1430 LIR_Opr dest = op->result_opr(); | |
1431 | |
1432 switch (op->bytecode()) { | |
1433 case Bytecodes::_i2l: | |
304 | 1434 #ifdef _LP64 |
1435 __ movl2ptr(dest->as_register_lo(), src->as_register()); | |
1436 #else | |
0 | 1437 move_regs(src->as_register(), dest->as_register_lo()); |
1438 move_regs(src->as_register(), dest->as_register_hi()); | |
1439 __ sarl(dest->as_register_hi(), 31); | |
304 | 1440 #endif // LP64 |
0 | 1441 break; |
1442 | |
1443 case Bytecodes::_l2i: | |
1444 move_regs(src->as_register_lo(), dest->as_register()); | |
1445 break; | |
1446 | |
1447 case Bytecodes::_i2b: | |
1448 move_regs(src->as_register(), dest->as_register()); | |
1449 __ sign_extend_byte(dest->as_register()); | |
1450 break; | |
1451 | |
1452 case Bytecodes::_i2c: | |
1453 move_regs(src->as_register(), dest->as_register()); | |
1454 __ andl(dest->as_register(), 0xFFFF); | |
1455 break; | |
1456 | |
1457 case Bytecodes::_i2s: | |
1458 move_regs(src->as_register(), dest->as_register()); | |
1459 __ sign_extend_short(dest->as_register()); | |
1460 break; | |
1461 | |
1462 | |
1463 case Bytecodes::_f2d: | |
1464 case Bytecodes::_d2f: | |
1465 if (dest->is_single_xmm()) { | |
1466 __ cvtsd2ss(dest->as_xmm_float_reg(), src->as_xmm_double_reg()); | |
1467 } else if (dest->is_double_xmm()) { | |
1468 __ cvtss2sd(dest->as_xmm_double_reg(), src->as_xmm_float_reg()); | |
1469 } else { | |
1470 assert(src->fpu() == dest->fpu(), "register must be equal"); | |
1471 // do nothing (float result is rounded later through spilling) | |
1472 } | |
1473 break; | |
1474 | |
1475 case Bytecodes::_i2f: | |
1476 case Bytecodes::_i2d: | |
1477 if (dest->is_single_xmm()) { | |
304 | 1478 __ cvtsi2ssl(dest->as_xmm_float_reg(), src->as_register()); |
0 | 1479 } else if (dest->is_double_xmm()) { |
304 | 1480 __ cvtsi2sdl(dest->as_xmm_double_reg(), src->as_register()); |
0 | 1481 } else { |
1482 assert(dest->fpu() == 0, "result must be on TOS"); | |
1483 __ movl(Address(rsp, 0), src->as_register()); | |
1484 __ fild_s(Address(rsp, 0)); | |
1485 } | |
1486 break; | |
1487 | |
1488 case Bytecodes::_f2i: | |
1489 case Bytecodes::_d2i: | |
1490 if (src->is_single_xmm()) { | |
304 | 1491 __ cvttss2sil(dest->as_register(), src->as_xmm_float_reg()); |
0 | 1492 } else if (src->is_double_xmm()) { |
304 | 1493 __ cvttsd2sil(dest->as_register(), src->as_xmm_double_reg()); |
0 | 1494 } else { |
1495 assert(src->fpu() == 0, "input must be on TOS"); | |
1496 __ fldcw(ExternalAddress(StubRoutines::addr_fpu_cntrl_wrd_trunc())); | |
1497 __ fist_s(Address(rsp, 0)); | |
1498 __ movl(dest->as_register(), Address(rsp, 0)); | |
1499 __ fldcw(ExternalAddress(StubRoutines::addr_fpu_cntrl_wrd_std())); | |
1500 } | |
1501 | |
1502 // IA32 conversion instructions do not match JLS for overflow, underflow and NaN -> fixup in stub | |
1503 assert(op->stub() != NULL, "stub required"); | |
1504 __ cmpl(dest->as_register(), 0x80000000); | |
1505 __ jcc(Assembler::equal, *op->stub()->entry()); | |
1506 __ bind(*op->stub()->continuation()); | |
1507 break; | |
1508 | |
1509 case Bytecodes::_l2f: | |
1510 case Bytecodes::_l2d: | |
1511 assert(!dest->is_xmm_register(), "result in xmm register not supported (no SSE instruction present)"); | |
1512 assert(dest->fpu() == 0, "result must be on TOS"); | |
1513 | |
304 | 1514 __ movptr(Address(rsp, 0), src->as_register_lo()); |
1515 NOT_LP64(__ movl(Address(rsp, BytesPerWord), src->as_register_hi())); | |
0 | 1516 __ fild_d(Address(rsp, 0)); |
1517 // float result is rounded later through spilling | |
1518 break; | |
1519 | |
1520 case Bytecodes::_f2l: | |
1521 case Bytecodes::_d2l: | |
1522 assert(!src->is_xmm_register(), "input in xmm register not supported (no SSE instruction present)"); | |
1523 assert(src->fpu() == 0, "input must be on TOS"); | |
304 | 1524 assert(dest == FrameMap::long0_opr, "runtime stub places result in these registers"); |
0 | 1525 |
1526 // instruction sequence too long to inline it here | |
1527 { | |
1528 __ call(RuntimeAddress(Runtime1::entry_for(Runtime1::fpu2long_stub_id))); | |
1529 } | |
1530 break; | |
1531 | |
1532 default: ShouldNotReachHere(); | |
1533 } | |
1534 } | |
1535 | |
1536 void LIR_Assembler::emit_alloc_obj(LIR_OpAllocObj* op) { | |
1537 if (op->init_check()) { | |
1538 __ cmpl(Address(op->klass()->as_register(), | |
1539 instanceKlass::init_state_offset_in_bytes() + sizeof(oopDesc)), | |
1540 instanceKlass::fully_initialized); | |
1541 add_debug_info_for_null_check_here(op->stub()->info()); | |
1542 __ jcc(Assembler::notEqual, *op->stub()->entry()); | |
1543 } | |
1544 __ allocate_object(op->obj()->as_register(), | |
1545 op->tmp1()->as_register(), | |
1546 op->tmp2()->as_register(), | |
1547 op->header_size(), | |
1548 op->object_size(), | |
1549 op->klass()->as_register(), | |
1550 *op->stub()->entry()); | |
1551 __ bind(*op->stub()->continuation()); | |
1552 } | |
1553 | |
1554 void LIR_Assembler::emit_alloc_array(LIR_OpAllocArray* op) { | |
1555 if (UseSlowPath || | |
1556 (!UseFastNewObjectArray && (op->type() == T_OBJECT || op->type() == T_ARRAY)) || | |
1557 (!UseFastNewTypeArray && (op->type() != T_OBJECT && op->type() != T_ARRAY))) { | |
1558 __ jmp(*op->stub()->entry()); | |
1559 } else { | |
1560 Register len = op->len()->as_register(); | |
1561 Register tmp1 = op->tmp1()->as_register(); | |
1562 Register tmp2 = op->tmp2()->as_register(); | |
1563 Register tmp3 = op->tmp3()->as_register(); | |
1564 if (len == tmp1) { | |
1565 tmp1 = tmp3; | |
1566 } else if (len == tmp2) { | |
1567 tmp2 = tmp3; | |
1568 } else if (len == tmp3) { | |
1569 // everything is ok | |
1570 } else { | |
304 | 1571 __ mov(tmp3, len); |
0 | 1572 } |
1573 __ allocate_array(op->obj()->as_register(), | |
1574 len, | |
1575 tmp1, | |
1576 tmp2, | |
1577 arrayOopDesc::header_size(op->type()), | |
1578 array_element_size(op->type()), | |
1579 op->klass()->as_register(), | |
1580 *op->stub()->entry()); | |
1581 } | |
1582 __ bind(*op->stub()->continuation()); | |
1583 } | |
1584 | |
1585 | |
1586 | |
1587 void LIR_Assembler::emit_opTypeCheck(LIR_OpTypeCheck* op) { | |
1588 LIR_Code code = op->code(); | |
1589 if (code == lir_store_check) { | |
1590 Register value = op->object()->as_register(); | |
1591 Register array = op->array()->as_register(); | |
1592 Register k_RInfo = op->tmp1()->as_register(); | |
1593 Register klass_RInfo = op->tmp2()->as_register(); | |
1594 Register Rtmp1 = op->tmp3()->as_register(); | |
1595 | |
1596 CodeStub* stub = op->stub(); | |
1597 Label done; | |
304 | 1598 __ cmpptr(value, (int32_t)NULL_WORD); |
0 | 1599 __ jcc(Assembler::equal, done); |
1600 add_debug_info_for_null_check_here(op->info_for_exception()); | |
304 | 1601 __ movptr(k_RInfo, Address(array, oopDesc::klass_offset_in_bytes())); |
1602 __ movptr(klass_RInfo, Address(value, oopDesc::klass_offset_in_bytes())); | |
0 | 1603 |
1604 // get instance klass | |
304 | 1605 __ movptr(k_RInfo, Address(k_RInfo, objArrayKlass::element_klass_offset_in_bytes() + sizeof(oopDesc))); |
644
c517646eef23
6813212: factor duplicated assembly code for general subclass check (for 6655638)
jrose
parents:
622
diff
changeset
|
1606 // perform the fast part of the checking logic |
c517646eef23
6813212: factor duplicated assembly code for general subclass check (for 6655638)
jrose
parents:
622
diff
changeset
|
1607 __ check_klass_subtype_fast_path(klass_RInfo, k_RInfo, Rtmp1, &done, stub->entry(), NULL); |
c517646eef23
6813212: factor duplicated assembly code for general subclass check (for 6655638)
jrose
parents:
622
diff
changeset
|
1608 // call out-of-line instance of __ check_klass_subtype_slow_path(...): |
304 | 1609 __ push(klass_RInfo); |
1610 __ push(k_RInfo); | |
0 | 1611 __ call(RuntimeAddress(Runtime1::entry_for(Runtime1::slow_subtype_check_id))); |
304 | 1612 __ pop(klass_RInfo); |
1613 __ pop(k_RInfo); | |
1614 // result is a boolean | |
0 | 1615 __ cmpl(k_RInfo, 0); |
1616 __ jcc(Assembler::equal, *stub->entry()); | |
1617 __ bind(done); | |
1618 } else if (op->code() == lir_checkcast) { | |
1619 // we always need a stub for the failure case. | |
1620 CodeStub* stub = op->stub(); | |
1621 Register obj = op->object()->as_register(); | |
1622 Register k_RInfo = op->tmp1()->as_register(); | |
1623 Register klass_RInfo = op->tmp2()->as_register(); | |
1624 Register dst = op->result_opr()->as_register(); | |
1625 ciKlass* k = op->klass(); | |
1626 Register Rtmp1 = noreg; | |
1627 | |
1628 Label done; | |
1629 if (obj == k_RInfo) { | |
1630 k_RInfo = dst; | |
1631 } else if (obj == klass_RInfo) { | |
1632 klass_RInfo = dst; | |
1633 } | |
1634 if (k->is_loaded()) { | |
1635 select_different_registers(obj, dst, k_RInfo, klass_RInfo); | |
1636 } else { | |
1637 Rtmp1 = op->tmp3()->as_register(); | |
1638 select_different_registers(obj, dst, k_RInfo, klass_RInfo, Rtmp1); | |
1639 } | |
1640 | |
1641 assert_different_registers(obj, k_RInfo, klass_RInfo); | |
1642 if (!k->is_loaded()) { | |
1643 jobject2reg_with_patching(k_RInfo, op->info_for_patch()); | |
1644 } else { | |
304 | 1645 #ifdef _LP64 |
989
148e5441d916
6863023: need non-perm oops in code cache for JSR 292
jrose
parents:
780
diff
changeset
|
1646 __ movoop(k_RInfo, k->constant_encoding()); |
304 | 1647 #else |
0 | 1648 k_RInfo = noreg; |
304 | 1649 #endif // _LP64 |
0 | 1650 } |
1651 assert(obj != k_RInfo, "must be different"); | |
304 | 1652 __ cmpptr(obj, (int32_t)NULL_WORD); |
0 | 1653 if (op->profiled_method() != NULL) { |
1654 ciMethod* method = op->profiled_method(); | |
1655 int bci = op->profiled_bci(); | |
1656 | |
1657 Label profile_done; | |
1658 __ jcc(Assembler::notEqual, profile_done); | |
1659 // Object is null; update methodDataOop | |
1660 ciMethodData* md = method->method_data(); | |
1661 if (md == NULL) { | |
1662 bailout("out of memory building methodDataOop"); | |
1663 return; | |
1664 } | |
1665 ciProfileData* data = md->bci_to_data(bci); | |
1666 assert(data != NULL, "need data for checkcast"); | |
1667 assert(data->is_BitData(), "need BitData for checkcast"); | |
1668 Register mdo = klass_RInfo; | |
989
148e5441d916
6863023: need non-perm oops in code cache for JSR 292
jrose
parents:
780
diff
changeset
|
1669 __ movoop(mdo, md->constant_encoding()); |
0 | 1670 Address data_addr(mdo, md->byte_offset_of_slot(data, DataLayout::header_offset())); |
1671 int header_bits = DataLayout::flag_mask_to_header_mask(BitData::null_seen_byte_constant()); | |
1672 __ orl(data_addr, header_bits); | |
1673 __ jmp(done); | |
1674 __ bind(profile_done); | |
1675 } else { | |
1676 __ jcc(Assembler::equal, done); | |
1677 } | |
1678 __ verify_oop(obj); | |
1679 | |
1680 if (op->fast_check()) { | |
1681 // get object classo | |
1682 // not a safepoint as obj null check happens earlier | |
1683 if (k->is_loaded()) { | |
304 | 1684 #ifdef _LP64 |
1685 __ cmpptr(k_RInfo, Address(obj, oopDesc::klass_offset_in_bytes())); | |
1686 #else | |
989
148e5441d916
6863023: need non-perm oops in code cache for JSR 292
jrose
parents:
780
diff
changeset
|
1687 __ cmpoop(Address(obj, oopDesc::klass_offset_in_bytes()), k->constant_encoding()); |
304 | 1688 #endif // _LP64 |
0 | 1689 } else { |
304 | 1690 __ cmpptr(k_RInfo, Address(obj, oopDesc::klass_offset_in_bytes())); |
0 | 1691 |
1692 } | |
1693 __ jcc(Assembler::notEqual, *stub->entry()); | |
1694 __ bind(done); | |
1695 } else { | |
1696 // get object class | |
1697 // not a safepoint as obj null check happens earlier | |
304 | 1698 __ movptr(klass_RInfo, Address(obj, oopDesc::klass_offset_in_bytes())); |
0 | 1699 if (k->is_loaded()) { |
1700 // See if we get an immediate positive hit | |
304 | 1701 #ifdef _LP64 |
1702 __ cmpptr(k_RInfo, Address(klass_RInfo, k->super_check_offset())); | |
1703 #else | |
989
148e5441d916
6863023: need non-perm oops in code cache for JSR 292
jrose
parents:
780
diff
changeset
|
1704 __ cmpoop(Address(klass_RInfo, k->super_check_offset()), k->constant_encoding()); |
304 | 1705 #endif // _LP64 |
0 | 1706 if (sizeof(oopDesc) + Klass::secondary_super_cache_offset_in_bytes() != k->super_check_offset()) { |
1707 __ jcc(Assembler::notEqual, *stub->entry()); | |
1708 } else { | |
1709 // See if we get an immediate positive hit | |
1710 __ jcc(Assembler::equal, done); | |
1711 // check for self | |
304 | 1712 #ifdef _LP64 |
1713 __ cmpptr(klass_RInfo, k_RInfo); | |
1714 #else | |
989
148e5441d916
6863023: need non-perm oops in code cache for JSR 292
jrose
parents:
780
diff
changeset
|
1715 __ cmpoop(klass_RInfo, k->constant_encoding()); |
304 | 1716 #endif // _LP64 |
0 | 1717 __ jcc(Assembler::equal, done); |
1718 | |
304 | 1719 __ push(klass_RInfo); |
1720 #ifdef _LP64 | |
1721 __ push(k_RInfo); | |
1722 #else | |
989
148e5441d916
6863023: need non-perm oops in code cache for JSR 292
jrose
parents:
780
diff
changeset
|
1723 __ pushoop(k->constant_encoding()); |
304 | 1724 #endif // _LP64 |
0 | 1725 __ call(RuntimeAddress(Runtime1::entry_for(Runtime1::slow_subtype_check_id))); |
304 | 1726 __ pop(klass_RInfo); |
1727 __ pop(klass_RInfo); | |
1728 // result is a boolean | |
0 | 1729 __ cmpl(klass_RInfo, 0); |
1730 __ jcc(Assembler::equal, *stub->entry()); | |
1731 } | |
1732 __ bind(done); | |
1733 } else { | |
644
c517646eef23
6813212: factor duplicated assembly code for general subclass check (for 6655638)
jrose
parents:
622
diff
changeset
|
1734 // perform the fast part of the checking logic |
c517646eef23
6813212: factor duplicated assembly code for general subclass check (for 6655638)
jrose
parents:
622
diff
changeset
|
1735 __ check_klass_subtype_fast_path(klass_RInfo, k_RInfo, Rtmp1, &done, stub->entry(), NULL); |
c517646eef23
6813212: factor duplicated assembly code for general subclass check (for 6655638)
jrose
parents:
622
diff
changeset
|
1736 // call out-of-line instance of __ check_klass_subtype_slow_path(...): |
304 | 1737 __ push(klass_RInfo); |
1738 __ push(k_RInfo); | |
0 | 1739 __ call(RuntimeAddress(Runtime1::entry_for(Runtime1::slow_subtype_check_id))); |
304 | 1740 __ pop(klass_RInfo); |
1741 __ pop(k_RInfo); | |
1742 // result is a boolean | |
0 | 1743 __ cmpl(k_RInfo, 0); |
1744 __ jcc(Assembler::equal, *stub->entry()); | |
1745 __ bind(done); | |
1746 } | |
1747 | |
1748 } | |
1749 if (dst != obj) { | |
304 | 1750 __ mov(dst, obj); |
0 | 1751 } |
1752 } else if (code == lir_instanceof) { | |
1753 Register obj = op->object()->as_register(); | |
1754 Register k_RInfo = op->tmp1()->as_register(); | |
1755 Register klass_RInfo = op->tmp2()->as_register(); | |
1756 Register dst = op->result_opr()->as_register(); | |
1757 ciKlass* k = op->klass(); | |
1758 | |
1759 Label done; | |
1760 Label zero; | |
1761 Label one; | |
1762 if (obj == k_RInfo) { | |
1763 k_RInfo = klass_RInfo; | |
1764 klass_RInfo = obj; | |
1765 } | |
1766 // patching may screw with our temporaries on sparc, | |
1767 // so let's do it before loading the class | |
1768 if (!k->is_loaded()) { | |
1769 jobject2reg_with_patching(k_RInfo, op->info_for_patch()); | |
304 | 1770 } else { |
989
148e5441d916
6863023: need non-perm oops in code cache for JSR 292
jrose
parents:
780
diff
changeset
|
1771 LP64_ONLY(__ movoop(k_RInfo, k->constant_encoding())); |
0 | 1772 } |
1773 assert(obj != k_RInfo, "must be different"); | |
1774 | |
1775 __ verify_oop(obj); | |
1776 if (op->fast_check()) { | |
304 | 1777 __ cmpptr(obj, (int32_t)NULL_WORD); |
0 | 1778 __ jcc(Assembler::equal, zero); |
1779 // get object class | |
1780 // not a safepoint as obj null check happens earlier | |
304 | 1781 if (LP64_ONLY(false &&) k->is_loaded()) { |
989
148e5441d916
6863023: need non-perm oops in code cache for JSR 292
jrose
parents:
780
diff
changeset
|
1782 NOT_LP64(__ cmpoop(Address(obj, oopDesc::klass_offset_in_bytes()), k->constant_encoding())); |
0 | 1783 k_RInfo = noreg; |
1784 } else { | |
304 | 1785 __ cmpptr(k_RInfo, Address(obj, oopDesc::klass_offset_in_bytes())); |
0 | 1786 |
1787 } | |
1788 __ jcc(Assembler::equal, one); | |
1789 } else { | |
1790 // get object class | |
1791 // not a safepoint as obj null check happens earlier | |
304 | 1792 __ cmpptr(obj, (int32_t)NULL_WORD); |
0 | 1793 __ jcc(Assembler::equal, zero); |
304 | 1794 __ movptr(klass_RInfo, Address(obj, oopDesc::klass_offset_in_bytes())); |
1795 | |
1796 #ifndef _LP64 | |
0 | 1797 if (k->is_loaded()) { |
1798 // See if we get an immediate positive hit | |
989
148e5441d916
6863023: need non-perm oops in code cache for JSR 292
jrose
parents:
780
diff
changeset
|
1799 __ cmpoop(Address(klass_RInfo, k->super_check_offset()), k->constant_encoding()); |
0 | 1800 __ jcc(Assembler::equal, one); |
1801 if (sizeof(oopDesc) + Klass::secondary_super_cache_offset_in_bytes() == k->super_check_offset()) { | |
1802 // check for self | |
989
148e5441d916
6863023: need non-perm oops in code cache for JSR 292
jrose
parents:
780
diff
changeset
|
1803 __ cmpoop(klass_RInfo, k->constant_encoding()); |
0 | 1804 __ jcc(Assembler::equal, one); |
304 | 1805 __ push(klass_RInfo); |
989
148e5441d916
6863023: need non-perm oops in code cache for JSR 292
jrose
parents:
780
diff
changeset
|
1806 __ pushoop(k->constant_encoding()); |
0 | 1807 __ call(RuntimeAddress(Runtime1::entry_for(Runtime1::slow_subtype_check_id))); |
304 | 1808 __ pop(klass_RInfo); |
1809 __ pop(dst); | |
0 | 1810 __ jmp(done); |
1811 } | |
644
c517646eef23
6813212: factor duplicated assembly code for general subclass check (for 6655638)
jrose
parents:
622
diff
changeset
|
1812 } |
c517646eef23
6813212: factor duplicated assembly code for general subclass check (for 6655638)
jrose
parents:
622
diff
changeset
|
1813 else // next block is unconditional if LP64: |
304 | 1814 #endif // LP64 |
644
c517646eef23
6813212: factor duplicated assembly code for general subclass check (for 6655638)
jrose
parents:
622
diff
changeset
|
1815 { |
0 | 1816 assert(dst != klass_RInfo && dst != k_RInfo, "need 3 registers"); |
1817 | |
644
c517646eef23
6813212: factor duplicated assembly code for general subclass check (for 6655638)
jrose
parents:
622
diff
changeset
|
1818 // perform the fast part of the checking logic |
c517646eef23
6813212: factor duplicated assembly code for general subclass check (for 6655638)
jrose
parents:
622
diff
changeset
|
1819 __ check_klass_subtype_fast_path(klass_RInfo, k_RInfo, dst, &one, &zero, NULL); |
c517646eef23
6813212: factor duplicated assembly code for general subclass check (for 6655638)
jrose
parents:
622
diff
changeset
|
1820 // call out-of-line instance of __ check_klass_subtype_slow_path(...): |
304 | 1821 __ push(klass_RInfo); |
1822 __ push(k_RInfo); | |
0 | 1823 __ call(RuntimeAddress(Runtime1::entry_for(Runtime1::slow_subtype_check_id))); |
304 | 1824 __ pop(klass_RInfo); |
1825 __ pop(dst); | |
0 | 1826 __ jmp(done); |
1827 } | |
1828 } | |
1829 __ bind(zero); | |
304 | 1830 __ xorptr(dst, dst); |
0 | 1831 __ jmp(done); |
1832 __ bind(one); | |
304 | 1833 __ movptr(dst, 1); |
0 | 1834 __ bind(done); |
1835 } else { | |
1836 ShouldNotReachHere(); | |
1837 } | |
1838 | |
1839 } | |
1840 | |
1841 | |
1842 void LIR_Assembler::emit_compare_and_swap(LIR_OpCompareAndSwap* op) { | |
304 | 1843 if (LP64_ONLY(false &&) op->code() == lir_cas_long && VM_Version::supports_cx8()) { |
0 | 1844 assert(op->cmp_value()->as_register_lo() == rax, "wrong register"); |
1845 assert(op->cmp_value()->as_register_hi() == rdx, "wrong register"); | |
1846 assert(op->new_value()->as_register_lo() == rbx, "wrong register"); | |
1847 assert(op->new_value()->as_register_hi() == rcx, "wrong register"); | |
1848 Register addr = op->addr()->as_register(); | |
1849 if (os::is_MP()) { | |
1850 __ lock(); | |
1851 } | |
304 | 1852 NOT_LP64(__ cmpxchg8(Address(addr, 0))); |
1853 | |
1854 } else if (op->code() == lir_cas_int || op->code() == lir_cas_obj ) { | |
1855 NOT_LP64(assert(op->addr()->is_single_cpu(), "must be single");) | |
1856 Register addr = (op->addr()->is_single_cpu() ? op->addr()->as_register() : op->addr()->as_register_lo()); | |
0 | 1857 Register newval = op->new_value()->as_register(); |
1858 Register cmpval = op->cmp_value()->as_register(); | |
1859 assert(cmpval == rax, "wrong register"); | |
1860 assert(newval != NULL, "new val must be register"); | |
1861 assert(cmpval != newval, "cmp and new values must be in different registers"); | |
1862 assert(cmpval != addr, "cmp and addr must be in different registers"); | |
1863 assert(newval != addr, "new value and addr must be in different registers"); | |
1864 if (os::is_MP()) { | |
1865 __ lock(); | |
1866 } | |
304 | 1867 if ( op->code() == lir_cas_obj) { |
1868 __ cmpxchgptr(newval, Address(addr, 0)); | |
1869 } else if (op->code() == lir_cas_int) { | |
1870 __ cmpxchgl(newval, Address(addr, 0)); | |
1871 } else { | |
1872 LP64_ONLY(__ cmpxchgq(newval, Address(addr, 0))); | |
1873 } | |
1874 #ifdef _LP64 | |
1875 } else if (op->code() == lir_cas_long) { | |
1876 Register addr = (op->addr()->is_single_cpu() ? op->addr()->as_register() : op->addr()->as_register_lo()); | |
1877 Register newval = op->new_value()->as_register_lo(); | |
1878 Register cmpval = op->cmp_value()->as_register_lo(); | |
1879 assert(cmpval == rax, "wrong register"); | |
1880 assert(newval != NULL, "new val must be register"); | |
1881 assert(cmpval != newval, "cmp and new values must be in different registers"); | |
1882 assert(cmpval != addr, "cmp and addr must be in different registers"); | |
1883 assert(newval != addr, "new value and addr must be in different registers"); | |
1884 if (os::is_MP()) { | |
1885 __ lock(); | |
1886 } | |
1887 __ cmpxchgq(newval, Address(addr, 0)); | |
1888 #endif // _LP64 | |
0 | 1889 } else { |
1890 Unimplemented(); | |
1891 } | |
1892 } | |
1893 | |
1894 | |
1895 void LIR_Assembler::cmove(LIR_Condition condition, LIR_Opr opr1, LIR_Opr opr2, LIR_Opr result) { | |
1896 Assembler::Condition acond, ncond; | |
1897 switch (condition) { | |
1898 case lir_cond_equal: acond = Assembler::equal; ncond = Assembler::notEqual; break; | |
1899 case lir_cond_notEqual: acond = Assembler::notEqual; ncond = Assembler::equal; break; | |
1900 case lir_cond_less: acond = Assembler::less; ncond = Assembler::greaterEqual; break; | |
1901 case lir_cond_lessEqual: acond = Assembler::lessEqual; ncond = Assembler::greater; break; | |
1902 case lir_cond_greaterEqual: acond = Assembler::greaterEqual; ncond = Assembler::less; break; | |
1903 case lir_cond_greater: acond = Assembler::greater; ncond = Assembler::lessEqual; break; | |
1904 case lir_cond_belowEqual: acond = Assembler::belowEqual; ncond = Assembler::above; break; | |
1905 case lir_cond_aboveEqual: acond = Assembler::aboveEqual; ncond = Assembler::below; break; | |
1906 default: ShouldNotReachHere(); | |
1907 } | |
1908 | |
1909 if (opr1->is_cpu_register()) { | |
1910 reg2reg(opr1, result); | |
1911 } else if (opr1->is_stack()) { | |
1912 stack2reg(opr1, result, result->type()); | |
1913 } else if (opr1->is_constant()) { | |
1914 const2reg(opr1, result, lir_patch_none, NULL); | |
1915 } else { | |
1916 ShouldNotReachHere(); | |
1917 } | |
1918 | |
1919 if (VM_Version::supports_cmov() && !opr2->is_constant()) { | |
1920 // optimized version that does not require a branch | |
1921 if (opr2->is_single_cpu()) { | |
1922 assert(opr2->cpu_regnr() != result->cpu_regnr(), "opr2 already overwritten by previous move"); | |
304 | 1923 __ cmov(ncond, result->as_register(), opr2->as_register()); |
0 | 1924 } else if (opr2->is_double_cpu()) { |
1925 assert(opr2->cpu_regnrLo() != result->cpu_regnrLo() && opr2->cpu_regnrLo() != result->cpu_regnrHi(), "opr2 already overwritten by previous move"); | |
1926 assert(opr2->cpu_regnrHi() != result->cpu_regnrLo() && opr2->cpu_regnrHi() != result->cpu_regnrHi(), "opr2 already overwritten by previous move"); | |
304 | 1927 __ cmovptr(ncond, result->as_register_lo(), opr2->as_register_lo()); |
1928 NOT_LP64(__ cmovptr(ncond, result->as_register_hi(), opr2->as_register_hi());) | |
0 | 1929 } else if (opr2->is_single_stack()) { |
1930 __ cmovl(ncond, result->as_register(), frame_map()->address_for_slot(opr2->single_stack_ix())); | |
1931 } else if (opr2->is_double_stack()) { | |
304 | 1932 __ cmovptr(ncond, result->as_register_lo(), frame_map()->address_for_slot(opr2->double_stack_ix(), lo_word_offset_in_bytes)); |
1933 NOT_LP64(__ cmovptr(ncond, result->as_register_hi(), frame_map()->address_for_slot(opr2->double_stack_ix(), hi_word_offset_in_bytes));) | |
0 | 1934 } else { |
1935 ShouldNotReachHere(); | |
1936 } | |
1937 | |
1938 } else { | |
1939 Label skip; | |
1940 __ jcc (acond, skip); | |
1941 if (opr2->is_cpu_register()) { | |
1942 reg2reg(opr2, result); | |
1943 } else if (opr2->is_stack()) { | |
1944 stack2reg(opr2, result, result->type()); | |
1945 } else if (opr2->is_constant()) { | |
1946 const2reg(opr2, result, lir_patch_none, NULL); | |
1947 } else { | |
1948 ShouldNotReachHere(); | |
1949 } | |
1950 __ bind(skip); | |
1951 } | |
1952 } | |
1953 | |
1954 | |
1955 void LIR_Assembler::arith_op(LIR_Code code, LIR_Opr left, LIR_Opr right, LIR_Opr dest, CodeEmitInfo* info, bool pop_fpu_stack) { | |
1956 assert(info == NULL, "should never be used, idiv/irem and ldiv/lrem not handled by this method"); | |
1957 | |
1958 if (left->is_single_cpu()) { | |
1959 assert(left == dest, "left and dest must be equal"); | |
1960 Register lreg = left->as_register(); | |
1961 | |
1962 if (right->is_single_cpu()) { | |
1963 // cpu register - cpu register | |
1964 Register rreg = right->as_register(); | |
1965 switch (code) { | |
1966 case lir_add: __ addl (lreg, rreg); break; | |
1967 case lir_sub: __ subl (lreg, rreg); break; | |
1968 case lir_mul: __ imull(lreg, rreg); break; | |
1969 default: ShouldNotReachHere(); | |
1970 } | |
1971 | |
1972 } else if (right->is_stack()) { | |
1973 // cpu register - stack | |
1974 Address raddr = frame_map()->address_for_slot(right->single_stack_ix()); | |
1975 switch (code) { | |
1976 case lir_add: __ addl(lreg, raddr); break; | |
1977 case lir_sub: __ subl(lreg, raddr); break; | |
1978 default: ShouldNotReachHere(); | |
1979 } | |
1980 | |
1981 } else if (right->is_constant()) { | |
1982 // cpu register - constant | |
1983 jint c = right->as_constant_ptr()->as_jint(); | |
1984 switch (code) { | |
1985 case lir_add: { | |
1986 __ increment(lreg, c); | |
1987 break; | |
1988 } | |
1989 case lir_sub: { | |
1990 __ decrement(lreg, c); | |
1991 break; | |
1992 } | |
1993 default: ShouldNotReachHere(); | |
1994 } | |
1995 | |
1996 } else { | |
1997 ShouldNotReachHere(); | |
1998 } | |
1999 | |
2000 } else if (left->is_double_cpu()) { | |
2001 assert(left == dest, "left and dest must be equal"); | |
2002 Register lreg_lo = left->as_register_lo(); | |
2003 Register lreg_hi = left->as_register_hi(); | |
2004 | |
2005 if (right->is_double_cpu()) { | |
2006 // cpu register - cpu register | |
2007 Register rreg_lo = right->as_register_lo(); | |
2008 Register rreg_hi = right->as_register_hi(); | |
304 | 2009 NOT_LP64(assert_different_registers(lreg_lo, lreg_hi, rreg_lo, rreg_hi)); |
2010 LP64_ONLY(assert_different_registers(lreg_lo, rreg_lo)); | |
0 | 2011 switch (code) { |
2012 case lir_add: | |
304 | 2013 __ addptr(lreg_lo, rreg_lo); |
2014 NOT_LP64(__ adcl(lreg_hi, rreg_hi)); | |
0 | 2015 break; |
2016 case lir_sub: | |
304 | 2017 __ subptr(lreg_lo, rreg_lo); |
2018 NOT_LP64(__ sbbl(lreg_hi, rreg_hi)); | |
0 | 2019 break; |
2020 case lir_mul: | |
304 | 2021 #ifdef _LP64 |
2022 __ imulq(lreg_lo, rreg_lo); | |
2023 #else | |
0 | 2024 assert(lreg_lo == rax && lreg_hi == rdx, "must be"); |
2025 __ imull(lreg_hi, rreg_lo); | |
2026 __ imull(rreg_hi, lreg_lo); | |
2027 __ addl (rreg_hi, lreg_hi); | |
2028 __ mull (rreg_lo); | |
2029 __ addl (lreg_hi, rreg_hi); | |
304 | 2030 #endif // _LP64 |
0 | 2031 break; |
2032 default: | |
2033 ShouldNotReachHere(); | |
2034 } | |
2035 | |
2036 } else if (right->is_constant()) { | |
2037 // cpu register - constant | |
304 | 2038 #ifdef _LP64 |
2039 jlong c = right->as_constant_ptr()->as_jlong_bits(); | |
2040 __ movptr(r10, (intptr_t) c); | |
2041 switch (code) { | |
2042 case lir_add: | |
2043 __ addptr(lreg_lo, r10); | |
2044 break; | |
2045 case lir_sub: | |
2046 __ subptr(lreg_lo, r10); | |
2047 break; | |
2048 default: | |
2049 ShouldNotReachHere(); | |
2050 } | |
2051 #else | |
0 | 2052 jint c_lo = right->as_constant_ptr()->as_jint_lo(); |
2053 jint c_hi = right->as_constant_ptr()->as_jint_hi(); | |
2054 switch (code) { | |
2055 case lir_add: | |
304 | 2056 __ addptr(lreg_lo, c_lo); |
0 | 2057 __ adcl(lreg_hi, c_hi); |
2058 break; | |
2059 case lir_sub: | |
304 | 2060 __ subptr(lreg_lo, c_lo); |
0 | 2061 __ sbbl(lreg_hi, c_hi); |
2062 break; | |
2063 default: | |
2064 ShouldNotReachHere(); | |
2065 } | |
304 | 2066 #endif // _LP64 |
0 | 2067 |
2068 } else { | |
2069 ShouldNotReachHere(); | |
2070 } | |
2071 | |
2072 } else if (left->is_single_xmm()) { | |
2073 assert(left == dest, "left and dest must be equal"); | |
2074 XMMRegister lreg = left->as_xmm_float_reg(); | |
2075 | |
2076 if (right->is_single_xmm()) { | |
2077 XMMRegister rreg = right->as_xmm_float_reg(); | |
2078 switch (code) { | |
2079 case lir_add: __ addss(lreg, rreg); break; | |
2080 case lir_sub: __ subss(lreg, rreg); break; | |
2081 case lir_mul_strictfp: // fall through | |
2082 case lir_mul: __ mulss(lreg, rreg); break; | |
2083 case lir_div_strictfp: // fall through | |
2084 case lir_div: __ divss(lreg, rreg); break; | |
2085 default: ShouldNotReachHere(); | |
2086 } | |
2087 } else { | |
2088 Address raddr; | |
2089 if (right->is_single_stack()) { | |
2090 raddr = frame_map()->address_for_slot(right->single_stack_ix()); | |
2091 } else if (right->is_constant()) { | |
2092 // hack for now | |
2093 raddr = __ as_Address(InternalAddress(float_constant(right->as_jfloat()))); | |
2094 } else { | |
2095 ShouldNotReachHere(); | |
2096 } | |
2097 switch (code) { | |
2098 case lir_add: __ addss(lreg, raddr); break; | |
2099 case lir_sub: __ subss(lreg, raddr); break; | |
2100 case lir_mul_strictfp: // fall through | |
2101 case lir_mul: __ mulss(lreg, raddr); break; | |
2102 case lir_div_strictfp: // fall through | |
2103 case lir_div: __ divss(lreg, raddr); break; | |
2104 default: ShouldNotReachHere(); | |
2105 } | |
2106 } | |
2107 | |
2108 } else if (left->is_double_xmm()) { | |
2109 assert(left == dest, "left and dest must be equal"); | |
2110 | |
2111 XMMRegister lreg = left->as_xmm_double_reg(); | |
2112 if (right->is_double_xmm()) { | |
2113 XMMRegister rreg = right->as_xmm_double_reg(); | |
2114 switch (code) { | |
2115 case lir_add: __ addsd(lreg, rreg); break; | |
2116 case lir_sub: __ subsd(lreg, rreg); break; | |
2117 case lir_mul_strictfp: // fall through | |
2118 case lir_mul: __ mulsd(lreg, rreg); break; | |
2119 case lir_div_strictfp: // fall through | |
2120 case lir_div: __ divsd(lreg, rreg); break; | |
2121 default: ShouldNotReachHere(); | |
2122 } | |
2123 } else { | |
2124 Address raddr; | |
2125 if (right->is_double_stack()) { | |
2126 raddr = frame_map()->address_for_slot(right->double_stack_ix()); | |
2127 } else if (right->is_constant()) { | |
2128 // hack for now | |
2129 raddr = __ as_Address(InternalAddress(double_constant(right->as_jdouble()))); | |
2130 } else { | |
2131 ShouldNotReachHere(); | |
2132 } | |
2133 switch (code) { | |
2134 case lir_add: __ addsd(lreg, raddr); break; | |
2135 case lir_sub: __ subsd(lreg, raddr); break; | |
2136 case lir_mul_strictfp: // fall through | |
2137 case lir_mul: __ mulsd(lreg, raddr); break; | |
2138 case lir_div_strictfp: // fall through | |
2139 case lir_div: __ divsd(lreg, raddr); break; | |
2140 default: ShouldNotReachHere(); | |
2141 } | |
2142 } | |
2143 | |
2144 } else if (left->is_single_fpu()) { | |
2145 assert(dest->is_single_fpu(), "fpu stack allocation required"); | |
2146 | |
2147 if (right->is_single_fpu()) { | |
2148 arith_fpu_implementation(code, left->fpu_regnr(), right->fpu_regnr(), dest->fpu_regnr(), pop_fpu_stack); | |
2149 | |
2150 } else { | |
2151 assert(left->fpu_regnr() == 0, "left must be on TOS"); | |
2152 assert(dest->fpu_regnr() == 0, "dest must be on TOS"); | |
2153 | |
2154 Address raddr; | |
2155 if (right->is_single_stack()) { | |
2156 raddr = frame_map()->address_for_slot(right->single_stack_ix()); | |
2157 } else if (right->is_constant()) { | |
2158 address const_addr = float_constant(right->as_jfloat()); | |
2159 assert(const_addr != NULL, "incorrect float/double constant maintainance"); | |
2160 // hack for now | |
2161 raddr = __ as_Address(InternalAddress(const_addr)); | |
2162 } else { | |
2163 ShouldNotReachHere(); | |
2164 } | |
2165 | |
2166 switch (code) { | |
2167 case lir_add: __ fadd_s(raddr); break; | |
2168 case lir_sub: __ fsub_s(raddr); break; | |
2169 case lir_mul_strictfp: // fall through | |
2170 case lir_mul: __ fmul_s(raddr); break; | |
2171 case lir_div_strictfp: // fall through | |
2172 case lir_div: __ fdiv_s(raddr); break; | |
2173 default: ShouldNotReachHere(); | |
2174 } | |
2175 } | |
2176 | |
2177 } else if (left->is_double_fpu()) { | |
2178 assert(dest->is_double_fpu(), "fpu stack allocation required"); | |
2179 | |
2180 if (code == lir_mul_strictfp || code == lir_div_strictfp) { | |
2181 // Double values require special handling for strictfp mul/div on x86 | |
2182 __ fld_x(ExternalAddress(StubRoutines::addr_fpu_subnormal_bias1())); | |
2183 __ fmulp(left->fpu_regnrLo() + 1); | |
2184 } | |
2185 | |
2186 if (right->is_double_fpu()) { | |
2187 arith_fpu_implementation(code, left->fpu_regnrLo(), right->fpu_regnrLo(), dest->fpu_regnrLo(), pop_fpu_stack); | |
2188 | |
2189 } else { | |
2190 assert(left->fpu_regnrLo() == 0, "left must be on TOS"); | |
2191 assert(dest->fpu_regnrLo() == 0, "dest must be on TOS"); | |
2192 | |
2193 Address raddr; | |
2194 if (right->is_double_stack()) { | |
2195 raddr = frame_map()->address_for_slot(right->double_stack_ix()); | |
2196 } else if (right->is_constant()) { | |
2197 // hack for now | |
2198 raddr = __ as_Address(InternalAddress(double_constant(right->as_jdouble()))); | |
2199 } else { | |
2200 ShouldNotReachHere(); | |
2201 } | |
2202 | |
2203 switch (code) { | |
2204 case lir_add: __ fadd_d(raddr); break; | |
2205 case lir_sub: __ fsub_d(raddr); break; | |
2206 case lir_mul_strictfp: // fall through | |
2207 case lir_mul: __ fmul_d(raddr); break; | |
2208 case lir_div_strictfp: // fall through | |
2209 case lir_div: __ fdiv_d(raddr); break; | |
2210 default: ShouldNotReachHere(); | |
2211 } | |
2212 } | |
2213 | |
2214 if (code == lir_mul_strictfp || code == lir_div_strictfp) { | |
2215 // Double values require special handling for strictfp mul/div on x86 | |
2216 __ fld_x(ExternalAddress(StubRoutines::addr_fpu_subnormal_bias2())); | |
2217 __ fmulp(dest->fpu_regnrLo() + 1); | |
2218 } | |
2219 | |
2220 } else if (left->is_single_stack() || left->is_address()) { | |
2221 assert(left == dest, "left and dest must be equal"); | |
2222 | |
2223 Address laddr; | |
2224 if (left->is_single_stack()) { | |
2225 laddr = frame_map()->address_for_slot(left->single_stack_ix()); | |
2226 } else if (left->is_address()) { | |
2227 laddr = as_Address(left->as_address_ptr()); | |
2228 } else { | |
2229 ShouldNotReachHere(); | |
2230 } | |
2231 | |
2232 if (right->is_single_cpu()) { | |
2233 Register rreg = right->as_register(); | |
2234 switch (code) { | |
2235 case lir_add: __ addl(laddr, rreg); break; | |
2236 case lir_sub: __ subl(laddr, rreg); break; | |
2237 default: ShouldNotReachHere(); | |
2238 } | |
2239 } else if (right->is_constant()) { | |
2240 jint c = right->as_constant_ptr()->as_jint(); | |
2241 switch (code) { | |
2242 case lir_add: { | |
304 | 2243 __ incrementl(laddr, c); |
0 | 2244 break; |
2245 } | |
2246 case lir_sub: { | |
304 | 2247 __ decrementl(laddr, c); |
0 | 2248 break; |
2249 } | |
2250 default: ShouldNotReachHere(); | |
2251 } | |
2252 } else { | |
2253 ShouldNotReachHere(); | |
2254 } | |
2255 | |
2256 } else { | |
2257 ShouldNotReachHere(); | |
2258 } | |
2259 } | |
2260 | |
2261 void LIR_Assembler::arith_fpu_implementation(LIR_Code code, int left_index, int right_index, int dest_index, bool pop_fpu_stack) { | |
2262 assert(pop_fpu_stack || (left_index == dest_index || right_index == dest_index), "invalid LIR"); | |
2263 assert(!pop_fpu_stack || (left_index - 1 == dest_index || right_index - 1 == dest_index), "invalid LIR"); | |
2264 assert(left_index == 0 || right_index == 0, "either must be on top of stack"); | |
2265 | |
2266 bool left_is_tos = (left_index == 0); | |
2267 bool dest_is_tos = (dest_index == 0); | |
2268 int non_tos_index = (left_is_tos ? right_index : left_index); | |
2269 | |
2270 switch (code) { | |
2271 case lir_add: | |
2272 if (pop_fpu_stack) __ faddp(non_tos_index); | |
2273 else if (dest_is_tos) __ fadd (non_tos_index); | |
2274 else __ fadda(non_tos_index); | |
2275 break; | |
2276 | |
2277 case lir_sub: | |
2278 if (left_is_tos) { | |
2279 if (pop_fpu_stack) __ fsubrp(non_tos_index); | |
2280 else if (dest_is_tos) __ fsub (non_tos_index); | |
2281 else __ fsubra(non_tos_index); | |
2282 } else { | |
2283 if (pop_fpu_stack) __ fsubp (non_tos_index); | |
2284 else if (dest_is_tos) __ fsubr (non_tos_index); | |
2285 else __ fsuba (non_tos_index); | |
2286 } | |
2287 break; | |
2288 | |
2289 case lir_mul_strictfp: // fall through | |
2290 case lir_mul: | |
2291 if (pop_fpu_stack) __ fmulp(non_tos_index); | |
2292 else if (dest_is_tos) __ fmul (non_tos_index); | |
2293 else __ fmula(non_tos_index); | |
2294 break; | |
2295 | |
2296 case lir_div_strictfp: // fall through | |
2297 case lir_div: | |
2298 if (left_is_tos) { | |
2299 if (pop_fpu_stack) __ fdivrp(non_tos_index); | |
2300 else if (dest_is_tos) __ fdiv (non_tos_index); | |
2301 else __ fdivra(non_tos_index); | |
2302 } else { | |
2303 if (pop_fpu_stack) __ fdivp (non_tos_index); | |
2304 else if (dest_is_tos) __ fdivr (non_tos_index); | |
2305 else __ fdiva (non_tos_index); | |
2306 } | |
2307 break; | |
2308 | |
2309 case lir_rem: | |
2310 assert(left_is_tos && dest_is_tos && right_index == 1, "must be guaranteed by FPU stack allocation"); | |
2311 __ fremr(noreg); | |
2312 break; | |
2313 | |
2314 default: | |
2315 ShouldNotReachHere(); | |
2316 } | |
2317 } | |
2318 | |
2319 | |
2320 void LIR_Assembler::intrinsic_op(LIR_Code code, LIR_Opr value, LIR_Opr unused, LIR_Opr dest, LIR_Op* op) { | |
2321 if (value->is_double_xmm()) { | |
2322 switch(code) { | |
2323 case lir_abs : | |
2324 { | |
2325 if (dest->as_xmm_double_reg() != value->as_xmm_double_reg()) { | |
2326 __ movdbl(dest->as_xmm_double_reg(), value->as_xmm_double_reg()); | |
2327 } | |
2328 __ andpd(dest->as_xmm_double_reg(), | |
2329 ExternalAddress((address)double_signmask_pool)); | |
2330 } | |
2331 break; | |
2332 | |
2333 case lir_sqrt: __ sqrtsd(dest->as_xmm_double_reg(), value->as_xmm_double_reg()); break; | |
2334 // all other intrinsics are not available in the SSE instruction set, so FPU is used | |
2335 default : ShouldNotReachHere(); | |
2336 } | |
2337 | |
2338 } else if (value->is_double_fpu()) { | |
2339 assert(value->fpu_regnrLo() == 0 && dest->fpu_regnrLo() == 0, "both must be on TOS"); | |
2340 switch(code) { | |
2341 case lir_log : __ flog() ; break; | |
2342 case lir_log10 : __ flog10() ; break; | |
2343 case lir_abs : __ fabs() ; break; | |
2344 case lir_sqrt : __ fsqrt(); break; | |
2345 case lir_sin : | |
2346 // Should consider not saving rbx, if not necessary | |
2347 __ trigfunc('s', op->as_Op2()->fpu_stack_size()); | |
2348 break; | |
2349 case lir_cos : | |
2350 // Should consider not saving rbx, if not necessary | |
2351 assert(op->as_Op2()->fpu_stack_size() <= 6, "sin and cos need two free stack slots"); | |
2352 __ trigfunc('c', op->as_Op2()->fpu_stack_size()); | |
2353 break; | |
2354 case lir_tan : | |
2355 // Should consider not saving rbx, if not necessary | |
2356 __ trigfunc('t', op->as_Op2()->fpu_stack_size()); | |
2357 break; | |
2358 default : ShouldNotReachHere(); | |
2359 } | |
2360 } else { | |
2361 Unimplemented(); | |
2362 } | |
2363 } | |
2364 | |
2365 void LIR_Assembler::logic_op(LIR_Code code, LIR_Opr left, LIR_Opr right, LIR_Opr dst) { | |
2366 // assert(left->destroys_register(), "check"); | |
2367 if (left->is_single_cpu()) { | |
2368 Register reg = left->as_register(); | |
2369 if (right->is_constant()) { | |
2370 int val = right->as_constant_ptr()->as_jint(); | |
2371 switch (code) { | |
2372 case lir_logic_and: __ andl (reg, val); break; | |
2373 case lir_logic_or: __ orl (reg, val); break; | |
2374 case lir_logic_xor: __ xorl (reg, val); break; | |
2375 default: ShouldNotReachHere(); | |
2376 } | |
2377 } else if (right->is_stack()) { | |
2378 // added support for stack operands | |
2379 Address raddr = frame_map()->address_for_slot(right->single_stack_ix()); | |
2380 switch (code) { | |
2381 case lir_logic_and: __ andl (reg, raddr); break; | |
2382 case lir_logic_or: __ orl (reg, raddr); break; | |
2383 case lir_logic_xor: __ xorl (reg, raddr); break; | |
2384 default: ShouldNotReachHere(); | |
2385 } | |
2386 } else { | |
2387 Register rright = right->as_register(); | |
2388 switch (code) { | |
304 | 2389 case lir_logic_and: __ andptr (reg, rright); break; |
2390 case lir_logic_or : __ orptr (reg, rright); break; | |
2391 case lir_logic_xor: __ xorptr (reg, rright); break; | |
0 | 2392 default: ShouldNotReachHere(); |
2393 } | |
2394 } | |
2395 move_regs(reg, dst->as_register()); | |
2396 } else { | |
2397 Register l_lo = left->as_register_lo(); | |
2398 Register l_hi = left->as_register_hi(); | |
2399 if (right->is_constant()) { | |
304 | 2400 #ifdef _LP64 |
2401 __ mov64(rscratch1, right->as_constant_ptr()->as_jlong()); | |
2402 switch (code) { | |
2403 case lir_logic_and: | |
2404 __ andq(l_lo, rscratch1); | |
2405 break; | |
2406 case lir_logic_or: | |
2407 __ orq(l_lo, rscratch1); | |
2408 break; | |
2409 case lir_logic_xor: | |
2410 __ xorq(l_lo, rscratch1); | |
2411 break; | |
2412 default: ShouldNotReachHere(); | |
2413 } | |
2414 #else | |
0 | 2415 int r_lo = right->as_constant_ptr()->as_jint_lo(); |
2416 int r_hi = right->as_constant_ptr()->as_jint_hi(); | |
2417 switch (code) { | |
2418 case lir_logic_and: | |
2419 __ andl(l_lo, r_lo); | |
2420 __ andl(l_hi, r_hi); | |
2421 break; | |
2422 case lir_logic_or: | |
2423 __ orl(l_lo, r_lo); | |
2424 __ orl(l_hi, r_hi); | |
2425 break; | |
2426 case lir_logic_xor: | |
2427 __ xorl(l_lo, r_lo); | |
2428 __ xorl(l_hi, r_hi); | |
2429 break; | |
2430 default: ShouldNotReachHere(); | |
2431 } | |
304 | 2432 #endif // _LP64 |
0 | 2433 } else { |
2434 Register r_lo = right->as_register_lo(); | |
2435 Register r_hi = right->as_register_hi(); | |
2436 assert(l_lo != r_hi, "overwriting registers"); | |
2437 switch (code) { | |
2438 case lir_logic_and: | |
304 | 2439 __ andptr(l_lo, r_lo); |
2440 NOT_LP64(__ andptr(l_hi, r_hi);) | |
0 | 2441 break; |
2442 case lir_logic_or: | |
304 | 2443 __ orptr(l_lo, r_lo); |
2444 NOT_LP64(__ orptr(l_hi, r_hi);) | |
0 | 2445 break; |
2446 case lir_logic_xor: | |
304 | 2447 __ xorptr(l_lo, r_lo); |
2448 NOT_LP64(__ xorptr(l_hi, r_hi);) | |
0 | 2449 break; |
2450 default: ShouldNotReachHere(); | |
2451 } | |
2452 } | |
2453 | |
2454 Register dst_lo = dst->as_register_lo(); | |
2455 Register dst_hi = dst->as_register_hi(); | |
2456 | |
304 | 2457 #ifdef _LP64 |
2458 move_regs(l_lo, dst_lo); | |
2459 #else | |
0 | 2460 if (dst_lo == l_hi) { |
2461 assert(dst_hi != l_lo, "overwriting registers"); | |
2462 move_regs(l_hi, dst_hi); | |
2463 move_regs(l_lo, dst_lo); | |
2464 } else { | |
2465 assert(dst_lo != l_hi, "overwriting registers"); | |
2466 move_regs(l_lo, dst_lo); | |
2467 move_regs(l_hi, dst_hi); | |
2468 } | |
304 | 2469 #endif // _LP64 |
0 | 2470 } |
2471 } | |
2472 | |
2473 | |
2474 // we assume that rax, and rdx can be overwritten | |
2475 void LIR_Assembler::arithmetic_idiv(LIR_Code code, LIR_Opr left, LIR_Opr right, LIR_Opr temp, LIR_Opr result, CodeEmitInfo* info) { | |
2476 | |
2477 assert(left->is_single_cpu(), "left must be register"); | |
2478 assert(right->is_single_cpu() || right->is_constant(), "right must be register or constant"); | |
2479 assert(result->is_single_cpu(), "result must be register"); | |
2480 | |
2481 // assert(left->destroys_register(), "check"); | |
2482 // assert(right->destroys_register(), "check"); | |
2483 | |
2484 Register lreg = left->as_register(); | |
2485 Register dreg = result->as_register(); | |
2486 | |
2487 if (right->is_constant()) { | |
2488 int divisor = right->as_constant_ptr()->as_jint(); | |
2489 assert(divisor > 0 && is_power_of_2(divisor), "must be"); | |
2490 if (code == lir_idiv) { | |
2491 assert(lreg == rax, "must be rax,"); | |
2492 assert(temp->as_register() == rdx, "tmp register must be rdx"); | |
2493 __ cdql(); // sign extend into rdx:rax | |
2494 if (divisor == 2) { | |
2495 __ subl(lreg, rdx); | |
2496 } else { | |
2497 __ andl(rdx, divisor - 1); | |
2498 __ addl(lreg, rdx); | |
2499 } | |
2500 __ sarl(lreg, log2_intptr(divisor)); | |
2501 move_regs(lreg, dreg); | |
2502 } else if (code == lir_irem) { | |
2503 Label done; | |
304 | 2504 __ mov(dreg, lreg); |
0 | 2505 __ andl(dreg, 0x80000000 | (divisor - 1)); |
2506 __ jcc(Assembler::positive, done); | |
2507 __ decrement(dreg); | |
2508 __ orl(dreg, ~(divisor - 1)); | |
2509 __ increment(dreg); | |
2510 __ bind(done); | |
2511 } else { | |
2512 ShouldNotReachHere(); | |
2513 } | |
2514 } else { | |
2515 Register rreg = right->as_register(); | |
2516 assert(lreg == rax, "left register must be rax,"); | |
2517 assert(rreg != rdx, "right register must not be rdx"); | |
2518 assert(temp->as_register() == rdx, "tmp register must be rdx"); | |
2519 | |
2520 move_regs(lreg, rax); | |
2521 | |
2522 int idivl_offset = __ corrected_idivl(rreg); | |
2523 add_debug_info_for_div0(idivl_offset, info); | |
2524 if (code == lir_irem) { | |
2525 move_regs(rdx, dreg); // result is in rdx | |
2526 } else { | |
2527 move_regs(rax, dreg); | |
2528 } | |
2529 } | |
2530 } | |
2531 | |
2532 | |
2533 void LIR_Assembler::comp_op(LIR_Condition condition, LIR_Opr opr1, LIR_Opr opr2, LIR_Op2* op) { | |
2534 if (opr1->is_single_cpu()) { | |
2535 Register reg1 = opr1->as_register(); | |
2536 if (opr2->is_single_cpu()) { | |
2537 // cpu register - cpu register | |
304 | 2538 if (opr1->type() == T_OBJECT || opr1->type() == T_ARRAY) { |
2539 __ cmpptr(reg1, opr2->as_register()); | |
2540 } else { | |
2541 assert(opr2->type() != T_OBJECT && opr2->type() != T_ARRAY, "cmp int, oop?"); | |
2542 __ cmpl(reg1, opr2->as_register()); | |
2543 } | |
0 | 2544 } else if (opr2->is_stack()) { |
2545 // cpu register - stack | |
304 | 2546 if (opr1->type() == T_OBJECT || opr1->type() == T_ARRAY) { |
2547 __ cmpptr(reg1, frame_map()->address_for_slot(opr2->single_stack_ix())); | |
2548 } else { | |
2549 __ cmpl(reg1, frame_map()->address_for_slot(opr2->single_stack_ix())); | |
2550 } | |
0 | 2551 } else if (opr2->is_constant()) { |
2552 // cpu register - constant | |
2553 LIR_Const* c = opr2->as_constant_ptr(); | |
2554 if (c->type() == T_INT) { | |
2555 __ cmpl(reg1, c->as_jint()); | |
304 | 2556 } else if (c->type() == T_OBJECT || c->type() == T_ARRAY) { |
2557 // In 64bit oops are single register | |
0 | 2558 jobject o = c->as_jobject(); |
2559 if (o == NULL) { | |
304 | 2560 __ cmpptr(reg1, (int32_t)NULL_WORD); |
0 | 2561 } else { |
304 | 2562 #ifdef _LP64 |
2563 __ movoop(rscratch1, o); | |
2564 __ cmpptr(reg1, rscratch1); | |
2565 #else | |
0 | 2566 __ cmpoop(reg1, c->as_jobject()); |
304 | 2567 #endif // _LP64 |
0 | 2568 } |
2569 } else { | |
2570 ShouldNotReachHere(); | |
2571 } | |
2572 // cpu register - address | |
2573 } else if (opr2->is_address()) { | |
2574 if (op->info() != NULL) { | |
2575 add_debug_info_for_null_check_here(op->info()); | |
2576 } | |
2577 __ cmpl(reg1, as_Address(opr2->as_address_ptr())); | |
2578 } else { | |
2579 ShouldNotReachHere(); | |
2580 } | |
2581 | |
2582 } else if(opr1->is_double_cpu()) { | |
2583 Register xlo = opr1->as_register_lo(); | |
2584 Register xhi = opr1->as_register_hi(); | |
2585 if (opr2->is_double_cpu()) { | |
304 | 2586 #ifdef _LP64 |
2587 __ cmpptr(xlo, opr2->as_register_lo()); | |
2588 #else | |
0 | 2589 // cpu register - cpu register |
2590 Register ylo = opr2->as_register_lo(); | |
2591 Register yhi = opr2->as_register_hi(); | |
2592 __ subl(xlo, ylo); | |
2593 __ sbbl(xhi, yhi); | |
2594 if (condition == lir_cond_equal || condition == lir_cond_notEqual) { | |
2595 __ orl(xhi, xlo); | |
2596 } | |
304 | 2597 #endif // _LP64 |
0 | 2598 } else if (opr2->is_constant()) { |
2599 // cpu register - constant 0 | |
2600 assert(opr2->as_jlong() == (jlong)0, "only handles zero"); | |
304 | 2601 #ifdef _LP64 |
2602 __ cmpptr(xlo, (int32_t)opr2->as_jlong()); | |
2603 #else | |
0 | 2604 assert(condition == lir_cond_equal || condition == lir_cond_notEqual, "only handles equals case"); |
2605 __ orl(xhi, xlo); | |
304 | 2606 #endif // _LP64 |
0 | 2607 } else { |
2608 ShouldNotReachHere(); | |
2609 } | |
2610 | |
2611 } else if (opr1->is_single_xmm()) { | |
2612 XMMRegister reg1 = opr1->as_xmm_float_reg(); | |
2613 if (opr2->is_single_xmm()) { | |
2614 // xmm register - xmm register | |
2615 __ ucomiss(reg1, opr2->as_xmm_float_reg()); | |
2616 } else if (opr2->is_stack()) { | |
2617 // xmm register - stack | |
2618 __ ucomiss(reg1, frame_map()->address_for_slot(opr2->single_stack_ix())); | |
2619 } else if (opr2->is_constant()) { | |
2620 // xmm register - constant | |
2621 __ ucomiss(reg1, InternalAddress(float_constant(opr2->as_jfloat()))); | |
2622 } else if (opr2->is_address()) { | |
2623 // xmm register - address | |
2624 if (op->info() != NULL) { | |
2625 add_debug_info_for_null_check_here(op->info()); | |
2626 } | |
2627 __ ucomiss(reg1, as_Address(opr2->as_address_ptr())); | |
2628 } else { | |
2629 ShouldNotReachHere(); | |
2630 } | |
2631 | |
2632 } else if (opr1->is_double_xmm()) { | |
2633 XMMRegister reg1 = opr1->as_xmm_double_reg(); | |
2634 if (opr2->is_double_xmm()) { | |
2635 // xmm register - xmm register | |
2636 __ ucomisd(reg1, opr2->as_xmm_double_reg()); | |
2637 } else if (opr2->is_stack()) { | |
2638 // xmm register - stack | |
2639 __ ucomisd(reg1, frame_map()->address_for_slot(opr2->double_stack_ix())); | |
2640 } else if (opr2->is_constant()) { | |
2641 // xmm register - constant | |
2642 __ ucomisd(reg1, InternalAddress(double_constant(opr2->as_jdouble()))); | |
2643 } else if (opr2->is_address()) { | |
2644 // xmm register - address | |
2645 if (op->info() != NULL) { | |
2646 add_debug_info_for_null_check_here(op->info()); | |
2647 } | |
2648 __ ucomisd(reg1, as_Address(opr2->pointer()->as_address())); | |
2649 } else { | |
2650 ShouldNotReachHere(); | |
2651 } | |
2652 | |
2653 } else if(opr1->is_single_fpu() || opr1->is_double_fpu()) { | |
2654 assert(opr1->is_fpu_register() && opr1->fpu() == 0, "currently left-hand side must be on TOS (relax this restriction)"); | |
2655 assert(opr2->is_fpu_register(), "both must be registers"); | |
2656 __ fcmp(noreg, opr2->fpu(), op->fpu_pop_count() > 0, op->fpu_pop_count() > 1); | |
2657 | |
2658 } else if (opr1->is_address() && opr2->is_constant()) { | |
304 | 2659 LIR_Const* c = opr2->as_constant_ptr(); |
2660 #ifdef _LP64 | |
2661 if (c->type() == T_OBJECT || c->type() == T_ARRAY) { | |
2662 assert(condition == lir_cond_equal || condition == lir_cond_notEqual, "need to reverse"); | |
2663 __ movoop(rscratch1, c->as_jobject()); | |
2664 } | |
2665 #endif // LP64 | |
0 | 2666 if (op->info() != NULL) { |
2667 add_debug_info_for_null_check_here(op->info()); | |
2668 } | |
2669 // special case: address - constant | |
2670 LIR_Address* addr = opr1->as_address_ptr(); | |
2671 if (c->type() == T_INT) { | |
2672 __ cmpl(as_Address(addr), c->as_jint()); | |
304 | 2673 } else if (c->type() == T_OBJECT || c->type() == T_ARRAY) { |
2674 #ifdef _LP64 | |
2675 // %%% Make this explode if addr isn't reachable until we figure out a | |
2676 // better strategy by giving noreg as the temp for as_Address | |
2677 __ cmpptr(rscratch1, as_Address(addr, noreg)); | |
2678 #else | |
0 | 2679 __ cmpoop(as_Address(addr), c->as_jobject()); |
304 | 2680 #endif // _LP64 |
0 | 2681 } else { |
2682 ShouldNotReachHere(); | |
2683 } | |
2684 | |
2685 } else { | |
2686 ShouldNotReachHere(); | |
2687 } | |
2688 } | |
2689 | |
2690 void LIR_Assembler::comp_fl2i(LIR_Code code, LIR_Opr left, LIR_Opr right, LIR_Opr dst, LIR_Op2* op) { | |
2691 if (code == lir_cmp_fd2i || code == lir_ucmp_fd2i) { | |
2692 if (left->is_single_xmm()) { | |
2693 assert(right->is_single_xmm(), "must match"); | |
2694 __ cmpss2int(left->as_xmm_float_reg(), right->as_xmm_float_reg(), dst->as_register(), code == lir_ucmp_fd2i); | |
2695 } else if (left->is_double_xmm()) { | |
2696 assert(right->is_double_xmm(), "must match"); | |
2697 __ cmpsd2int(left->as_xmm_double_reg(), right->as_xmm_double_reg(), dst->as_register(), code == lir_ucmp_fd2i); | |
2698 | |
2699 } else { | |
2700 assert(left->is_single_fpu() || left->is_double_fpu(), "must be"); | |
2701 assert(right->is_single_fpu() || right->is_double_fpu(), "must match"); | |
2702 | |
2703 assert(left->fpu() == 0, "left must be on TOS"); | |
2704 __ fcmp2int(dst->as_register(), code == lir_ucmp_fd2i, right->fpu(), | |
2705 op->fpu_pop_count() > 0, op->fpu_pop_count() > 1); | |
2706 } | |
2707 } else { | |
2708 assert(code == lir_cmp_l2i, "check"); | |
304 | 2709 #ifdef _LP64 |
2710 Register dest = dst->as_register(); | |
2711 __ xorptr(dest, dest); | |
2712 Label high, done; | |
2713 __ cmpptr(left->as_register_lo(), right->as_register_lo()); | |
2714 __ jcc(Assembler::equal, done); | |
2715 __ jcc(Assembler::greater, high); | |
2716 __ decrement(dest); | |
2717 __ jmp(done); | |
2718 __ bind(high); | |
2719 __ increment(dest); | |
2720 | |
2721 __ bind(done); | |
2722 | |
2723 #else | |
0 | 2724 __ lcmp2int(left->as_register_hi(), |
2725 left->as_register_lo(), | |
2726 right->as_register_hi(), | |
2727 right->as_register_lo()); | |
2728 move_regs(left->as_register_hi(), dst->as_register()); | |
304 | 2729 #endif // _LP64 |
0 | 2730 } |
2731 } | |
2732 | |
2733 | |
2734 void LIR_Assembler::align_call(LIR_Code code) { | |
2735 if (os::is_MP()) { | |
2736 // make sure that the displacement word of the call ends up word aligned | |
2737 int offset = __ offset(); | |
2738 switch (code) { | |
2739 case lir_static_call: | |
2740 case lir_optvirtual_call: | |
2741 offset += NativeCall::displacement_offset; | |
2742 break; | |
2743 case lir_icvirtual_call: | |
2744 offset += NativeCall::displacement_offset + NativeMovConstReg::instruction_size; | |
2745 break; | |
2746 case lir_virtual_call: // currently, sparc-specific for niagara | |
2747 default: ShouldNotReachHere(); | |
2748 } | |
2749 while (offset++ % BytesPerWord != 0) { | |
2750 __ nop(); | |
2751 } | |
2752 } | |
2753 } | |
2754 | |
2755 | |
2756 void LIR_Assembler::call(address entry, relocInfo::relocType rtype, CodeEmitInfo* info) { | |
2757 assert(!os::is_MP() || (__ offset() + NativeCall::displacement_offset) % BytesPerWord == 0, | |
2758 "must be aligned"); | |
2759 __ call(AddressLiteral(entry, rtype)); | |
2760 add_call_info(code_offset(), info); | |
2761 } | |
2762 | |
2763 | |
2764 void LIR_Assembler::ic_call(address entry, CodeEmitInfo* info) { | |
2765 RelocationHolder rh = virtual_call_Relocation::spec(pc()); | |
2766 __ movoop(IC_Klass, (jobject)Universe::non_oop_word()); | |
2767 assert(!os::is_MP() || | |
2768 (__ offset() + NativeCall::displacement_offset) % BytesPerWord == 0, | |
2769 "must be aligned"); | |
2770 __ call(AddressLiteral(entry, rh)); | |
2771 add_call_info(code_offset(), info); | |
2772 } | |
2773 | |
2774 | |
2775 /* Currently, vtable-dispatch is only enabled for sparc platforms */ | |
2776 void LIR_Assembler::vtable_call(int vtable_offset, CodeEmitInfo* info) { | |
2777 ShouldNotReachHere(); | |
2778 } | |
2779 | |
2780 void LIR_Assembler::emit_static_call_stub() { | |
2781 address call_pc = __ pc(); | |
2782 address stub = __ start_a_stub(call_stub_size); | |
2783 if (stub == NULL) { | |
2784 bailout("static call stub overflow"); | |
2785 return; | |
2786 } | |
2787 | |
2788 int start = __ offset(); | |
2789 if (os::is_MP()) { | |
2790 // make sure that the displacement word of the call ends up word aligned | |
2791 int offset = __ offset() + NativeMovConstReg::instruction_size + NativeCall::displacement_offset; | |
2792 while (offset++ % BytesPerWord != 0) { | |
2793 __ nop(); | |
2794 } | |
2795 } | |
2796 __ relocate(static_stub_Relocation::spec(call_pc)); | |
2797 __ movoop(rbx, (jobject)NULL); | |
2798 // must be set to -1 at code generation time | |
2799 assert(!os::is_MP() || ((__ offset() + 1) % BytesPerWord) == 0, "must be aligned on MP"); | |
304 | 2800 // On 64bit this will die since it will take a movq & jmp, must be only a jmp |
2801 __ jump(RuntimeAddress(__ pc())); | |
0 | 2802 |
2803 assert(__ offset() - start <= call_stub_size, "stub too big") | |
2804 __ end_a_stub(); | |
2805 } | |
2806 | |
2807 | |
2808 void LIR_Assembler::throw_op(LIR_Opr exceptionPC, LIR_Opr exceptionOop, CodeEmitInfo* info, bool unwind) { | |
2809 assert(exceptionOop->as_register() == rax, "must match"); | |
2810 assert(unwind || exceptionPC->as_register() == rdx, "must match"); | |
2811 | |
2812 // exception object is not added to oop map by LinearScan | |
2813 // (LinearScan assumes that no oops are in fixed registers) | |
2814 info->add_register_oop(exceptionOop); | |
2815 Runtime1::StubID unwind_id; | |
2816 | |
2817 if (!unwind) { | |
2818 // get current pc information | |
2819 // pc is only needed if the method has an exception handler, the unwind code does not need it. | |
2820 int pc_for_athrow_offset = __ offset(); | |
2821 InternalAddress pc_for_athrow(__ pc()); | |
2822 __ lea(exceptionPC->as_register(), pc_for_athrow); | |
2823 add_call_info(pc_for_athrow_offset, info); // for exception handler | |
2824 | |
2825 __ verify_not_null_oop(rax); | |
2826 // search an exception handler (rax: exception oop, rdx: throwing pc) | |
2827 if (compilation()->has_fpu_code()) { | |
2828 unwind_id = Runtime1::handle_exception_id; | |
2829 } else { | |
2830 unwind_id = Runtime1::handle_exception_nofpu_id; | |
2831 } | |
2832 } else { | |
2833 unwind_id = Runtime1::unwind_exception_id; | |
2834 } | |
2835 __ call(RuntimeAddress(Runtime1::entry_for(unwind_id))); | |
2836 | |
2837 // enough room for two byte trap | |
2838 __ nop(); | |
2839 } | |
2840 | |
2841 | |
2842 void LIR_Assembler::shift_op(LIR_Code code, LIR_Opr left, LIR_Opr count, LIR_Opr dest, LIR_Opr tmp) { | |
2843 | |
2844 // optimized version for linear scan: | |
2845 // * count must be already in ECX (guaranteed by LinearScan) | |
2846 // * left and dest must be equal | |
2847 // * tmp must be unused | |
2848 assert(count->as_register() == SHIFT_count, "count must be in ECX"); | |
2849 assert(left == dest, "left and dest must be equal"); | |
2850 assert(tmp->is_illegal(), "wasting a register if tmp is allocated"); | |
2851 | |
2852 if (left->is_single_cpu()) { | |
2853 Register value = left->as_register(); | |
2854 assert(value != SHIFT_count, "left cannot be ECX"); | |
2855 | |
2856 switch (code) { | |
2857 case lir_shl: __ shll(value); break; | |
2858 case lir_shr: __ sarl(value); break; | |
2859 case lir_ushr: __ shrl(value); break; | |
2860 default: ShouldNotReachHere(); | |
2861 } | |
2862 } else if (left->is_double_cpu()) { | |
2863 Register lo = left->as_register_lo(); | |
2864 Register hi = left->as_register_hi(); | |
2865 assert(lo != SHIFT_count && hi != SHIFT_count, "left cannot be ECX"); | |
304 | 2866 #ifdef _LP64 |
2867 switch (code) { | |
2868 case lir_shl: __ shlptr(lo); break; | |
2869 case lir_shr: __ sarptr(lo); break; | |
2870 case lir_ushr: __ shrptr(lo); break; | |
2871 default: ShouldNotReachHere(); | |
2872 } | |
2873 #else | |
0 | 2874 |
2875 switch (code) { | |
2876 case lir_shl: __ lshl(hi, lo); break; | |
2877 case lir_shr: __ lshr(hi, lo, true); break; | |
2878 case lir_ushr: __ lshr(hi, lo, false); break; | |
2879 default: ShouldNotReachHere(); | |
2880 } | |
304 | 2881 #endif // LP64 |
0 | 2882 } else { |
2883 ShouldNotReachHere(); | |
2884 } | |
2885 } | |
2886 | |
2887 | |
2888 void LIR_Assembler::shift_op(LIR_Code code, LIR_Opr left, jint count, LIR_Opr dest) { | |
2889 if (dest->is_single_cpu()) { | |
2890 // first move left into dest so that left is not destroyed by the shift | |
2891 Register value = dest->as_register(); | |
2892 count = count & 0x1F; // Java spec | |
2893 | |
2894 move_regs(left->as_register(), value); | |
2895 switch (code) { | |
2896 case lir_shl: __ shll(value, count); break; | |
2897 case lir_shr: __ sarl(value, count); break; | |
2898 case lir_ushr: __ shrl(value, count); break; | |
2899 default: ShouldNotReachHere(); | |
2900 } | |
2901 } else if (dest->is_double_cpu()) { | |
304 | 2902 #ifndef _LP64 |
0 | 2903 Unimplemented(); |
304 | 2904 #else |
2905 // first move left into dest so that left is not destroyed by the shift | |
2906 Register value = dest->as_register_lo(); | |
2907 count = count & 0x1F; // Java spec | |
2908 | |
2909 move_regs(left->as_register_lo(), value); | |
2910 switch (code) { | |
2911 case lir_shl: __ shlptr(value, count); break; | |
2912 case lir_shr: __ sarptr(value, count); break; | |
2913 case lir_ushr: __ shrptr(value, count); break; | |
2914 default: ShouldNotReachHere(); | |
2915 } | |
2916 #endif // _LP64 | |
0 | 2917 } else { |
2918 ShouldNotReachHere(); | |
2919 } | |
2920 } | |
2921 | |
2922 | |
2923 void LIR_Assembler::store_parameter(Register r, int offset_from_rsp_in_words) { | |
2924 assert(offset_from_rsp_in_words >= 0, "invalid offset from rsp"); | |
2925 int offset_from_rsp_in_bytes = offset_from_rsp_in_words * BytesPerWord; | |
2926 assert(offset_from_rsp_in_bytes < frame_map()->reserved_argument_area_size(), "invalid offset"); | |
304 | 2927 __ movptr (Address(rsp, offset_from_rsp_in_bytes), r); |
0 | 2928 } |
2929 | |
2930 | |
2931 void LIR_Assembler::store_parameter(jint c, int offset_from_rsp_in_words) { | |
2932 assert(offset_from_rsp_in_words >= 0, "invalid offset from rsp"); | |
2933 int offset_from_rsp_in_bytes = offset_from_rsp_in_words * BytesPerWord; | |
2934 assert(offset_from_rsp_in_bytes < frame_map()->reserved_argument_area_size(), "invalid offset"); | |
304 | 2935 __ movptr (Address(rsp, offset_from_rsp_in_bytes), c); |
0 | 2936 } |
2937 | |
2938 | |
2939 void LIR_Assembler::store_parameter(jobject o, int offset_from_rsp_in_words) { | |
2940 assert(offset_from_rsp_in_words >= 0, "invalid offset from rsp"); | |
2941 int offset_from_rsp_in_bytes = offset_from_rsp_in_words * BytesPerWord; | |
2942 assert(offset_from_rsp_in_bytes < frame_map()->reserved_argument_area_size(), "invalid offset"); | |
2943 __ movoop (Address(rsp, offset_from_rsp_in_bytes), o); | |
2944 } | |
2945 | |
2946 | |
2947 // This code replaces a call to arraycopy; no exception may | |
2948 // be thrown in this code, they must be thrown in the System.arraycopy | |
2949 // activation frame; we could save some checks if this would not be the case | |
2950 void LIR_Assembler::emit_arraycopy(LIR_OpArrayCopy* op) { | |
2951 ciArrayKlass* default_type = op->expected_type(); | |
2952 Register src = op->src()->as_register(); | |
2953 Register dst = op->dst()->as_register(); | |
2954 Register src_pos = op->src_pos()->as_register(); | |
2955 Register dst_pos = op->dst_pos()->as_register(); | |
2956 Register length = op->length()->as_register(); | |
2957 Register tmp = op->tmp()->as_register(); | |
2958 | |
2959 CodeStub* stub = op->stub(); | |
2960 int flags = op->flags(); | |
2961 BasicType basic_type = default_type != NULL ? default_type->element_type()->basic_type() : T_ILLEGAL; | |
2962 if (basic_type == T_ARRAY) basic_type = T_OBJECT; | |
2963 | |
2964 // if we don't know anything or it's an object array, just go through the generic arraycopy | |
2965 if (default_type == NULL) { | |
2966 Label done; | |
2967 // save outgoing arguments on stack in case call to System.arraycopy is needed | |
2968 // HACK ALERT. This code used to push the parameters in a hardwired fashion | |
2969 // for interpreter calling conventions. Now we have to do it in new style conventions. | |
2970 // For the moment until C1 gets the new register allocator I just force all the | |
2971 // args to the right place (except the register args) and then on the back side | |
2972 // reload the register args properly if we go slow path. Yuck | |
2973 | |
2974 // These are proper for the calling convention | |
2975 | |
2976 store_parameter(length, 2); | |
2977 store_parameter(dst_pos, 1); | |
2978 store_parameter(dst, 0); | |
2979 | |
2980 // these are just temporary placements until we need to reload | |
2981 store_parameter(src_pos, 3); | |
2982 store_parameter(src, 4); | |
304 | 2983 NOT_LP64(assert(src == rcx && src_pos == rdx, "mismatch in calling convention");) |
2984 | |
2985 address entry = CAST_FROM_FN_PTR(address, Runtime1::arraycopy); | |
0 | 2986 |
2987 // pass arguments: may push as this is not a safepoint; SP must be fix at each safepoint | |
304 | 2988 #ifdef _LP64 |
2989 // The arguments are in java calling convention so we can trivially shift them to C | |
2990 // convention | |
2991 assert_different_registers(c_rarg0, j_rarg1, j_rarg2, j_rarg3, j_rarg4); | |
2992 __ mov(c_rarg0, j_rarg0); | |
2993 assert_different_registers(c_rarg1, j_rarg2, j_rarg3, j_rarg4); | |
2994 __ mov(c_rarg1, j_rarg1); | |
2995 assert_different_registers(c_rarg2, j_rarg3, j_rarg4); | |
2996 __ mov(c_rarg2, j_rarg2); | |
2997 assert_different_registers(c_rarg3, j_rarg4); | |
2998 __ mov(c_rarg3, j_rarg3); | |
2999 #ifdef _WIN64 | |
3000 // Allocate abi space for args but be sure to keep stack aligned | |
3001 __ subptr(rsp, 6*wordSize); | |
3002 store_parameter(j_rarg4, 4); | |
3003 __ call(RuntimeAddress(entry)); | |
3004 __ addptr(rsp, 6*wordSize); | |
3005 #else | |
3006 __ mov(c_rarg4, j_rarg4); | |
3007 __ call(RuntimeAddress(entry)); | |
3008 #endif // _WIN64 | |
3009 #else | |
3010 __ push(length); | |
3011 __ push(dst_pos); | |
3012 __ push(dst); | |
3013 __ push(src_pos); | |
3014 __ push(src); | |
0 | 3015 __ call_VM_leaf(entry, 5); // removes pushed parameter from the stack |
3016 | |
304 | 3017 #endif // _LP64 |
3018 | |
0 | 3019 __ cmpl(rax, 0); |
3020 __ jcc(Assembler::equal, *stub->continuation()); | |
3021 | |
3022 // Reload values from the stack so they are where the stub | |
3023 // expects them. | |
304 | 3024 __ movptr (dst, Address(rsp, 0*BytesPerWord)); |
3025 __ movptr (dst_pos, Address(rsp, 1*BytesPerWord)); | |
3026 __ movptr (length, Address(rsp, 2*BytesPerWord)); | |
3027 __ movptr (src_pos, Address(rsp, 3*BytesPerWord)); | |
3028 __ movptr (src, Address(rsp, 4*BytesPerWord)); | |
0 | 3029 __ jmp(*stub->entry()); |
3030 | |
3031 __ bind(*stub->continuation()); | |
3032 return; | |
3033 } | |
3034 | |
3035 assert(default_type != NULL && default_type->is_array_klass() && default_type->is_loaded(), "must be true at this point"); | |
3036 | |
29
d5fc211aea19
6633953: type2aelembytes{T_ADDRESS} should be 8 bytes in 64 bit VM
kvn
parents:
0
diff
changeset
|
3037 int elem_size = type2aelembytes(basic_type); |
0 | 3038 int shift_amount; |
3039 Address::ScaleFactor scale; | |
3040 | |
3041 switch (elem_size) { | |
3042 case 1 : | |
3043 shift_amount = 0; | |
3044 scale = Address::times_1; | |
3045 break; | |
3046 case 2 : | |
3047 shift_amount = 1; | |
3048 scale = Address::times_2; | |
3049 break; | |
3050 case 4 : | |
3051 shift_amount = 2; | |
3052 scale = Address::times_4; | |
3053 break; | |
3054 case 8 : | |
3055 shift_amount = 3; | |
3056 scale = Address::times_8; | |
3057 break; | |
3058 default: | |
3059 ShouldNotReachHere(); | |
3060 } | |
3061 | |
3062 Address src_length_addr = Address(src, arrayOopDesc::length_offset_in_bytes()); | |
3063 Address dst_length_addr = Address(dst, arrayOopDesc::length_offset_in_bytes()); | |
3064 Address src_klass_addr = Address(src, oopDesc::klass_offset_in_bytes()); | |
3065 Address dst_klass_addr = Address(dst, oopDesc::klass_offset_in_bytes()); | |
3066 | |
304 | 3067 // length and pos's are all sign extended at this point on 64bit |
3068 | |
0 | 3069 // test for NULL |
3070 if (flags & LIR_OpArrayCopy::src_null_check) { | |
304 | 3071 __ testptr(src, src); |
0 | 3072 __ jcc(Assembler::zero, *stub->entry()); |
3073 } | |
3074 if (flags & LIR_OpArrayCopy::dst_null_check) { | |
304 | 3075 __ testptr(dst, dst); |
0 | 3076 __ jcc(Assembler::zero, *stub->entry()); |
3077 } | |
3078 | |
3079 // check if negative | |
3080 if (flags & LIR_OpArrayCopy::src_pos_positive_check) { | |
3081 __ testl(src_pos, src_pos); | |
3082 __ jcc(Assembler::less, *stub->entry()); | |
3083 } | |
3084 if (flags & LIR_OpArrayCopy::dst_pos_positive_check) { | |
3085 __ testl(dst_pos, dst_pos); | |
3086 __ jcc(Assembler::less, *stub->entry()); | |
3087 } | |
3088 if (flags & LIR_OpArrayCopy::length_positive_check) { | |
3089 __ testl(length, length); | |
3090 __ jcc(Assembler::less, *stub->entry()); | |
3091 } | |
3092 | |
3093 if (flags & LIR_OpArrayCopy::src_range_check) { | |
304 | 3094 __ lea(tmp, Address(src_pos, length, Address::times_1, 0)); |
0 | 3095 __ cmpl(tmp, src_length_addr); |
3096 __ jcc(Assembler::above, *stub->entry()); | |
3097 } | |
3098 if (flags & LIR_OpArrayCopy::dst_range_check) { | |
304 | 3099 __ lea(tmp, Address(dst_pos, length, Address::times_1, 0)); |
0 | 3100 __ cmpl(tmp, dst_length_addr); |
3101 __ jcc(Assembler::above, *stub->entry()); | |
3102 } | |
3103 | |
3104 if (flags & LIR_OpArrayCopy::type_check) { | |
304 | 3105 __ movptr(tmp, src_klass_addr); |
3106 __ cmpptr(tmp, dst_klass_addr); | |
0 | 3107 __ jcc(Assembler::notEqual, *stub->entry()); |
3108 } | |
3109 | |
3110 #ifdef ASSERT | |
3111 if (basic_type != T_OBJECT || !(flags & LIR_OpArrayCopy::type_check)) { | |
3112 // Sanity check the known type with the incoming class. For the | |
3113 // primitive case the types must match exactly with src.klass and | |
3114 // dst.klass each exactly matching the default type. For the | |
3115 // object array case, if no type check is needed then either the | |
3116 // dst type is exactly the expected type and the src type is a | |
3117 // subtype which we can't check or src is the same array as dst | |
3118 // but not necessarily exactly of type default_type. | |
3119 Label known_ok, halt; | |
989
148e5441d916
6863023: need non-perm oops in code cache for JSR 292
jrose
parents:
780
diff
changeset
|
3120 __ movoop(tmp, default_type->constant_encoding()); |
0 | 3121 if (basic_type != T_OBJECT) { |
304 | 3122 __ cmpptr(tmp, dst_klass_addr); |
0 | 3123 __ jcc(Assembler::notEqual, halt); |
304 | 3124 __ cmpptr(tmp, src_klass_addr); |
0 | 3125 __ jcc(Assembler::equal, known_ok); |
3126 } else { | |
304 | 3127 __ cmpptr(tmp, dst_klass_addr); |
0 | 3128 __ jcc(Assembler::equal, known_ok); |
304 | 3129 __ cmpptr(src, dst); |
0 | 3130 __ jcc(Assembler::equal, known_ok); |
3131 } | |
3132 __ bind(halt); | |
3133 __ stop("incorrect type information in arraycopy"); | |
3134 __ bind(known_ok); | |
3135 } | |
3136 #endif | |
3137 | |
304 | 3138 if (shift_amount > 0 && basic_type != T_OBJECT) { |
3139 __ shlptr(length, shift_amount); | |
3140 } | |
3141 | |
3142 #ifdef _LP64 | |
3143 assert_different_registers(c_rarg0, dst, dst_pos, length); | |
1060 | 3144 __ movl2ptr(src_pos, src_pos); //higher 32bits must be null |
304 | 3145 __ lea(c_rarg0, Address(src, src_pos, scale, arrayOopDesc::base_offset_in_bytes(basic_type))); |
3146 assert_different_registers(c_rarg1, length); | |
1060 | 3147 __ movl2ptr(dst_pos, dst_pos); //higher 32bits must be null |
304 | 3148 __ lea(c_rarg1, Address(dst, dst_pos, scale, arrayOopDesc::base_offset_in_bytes(basic_type))); |
3149 __ mov(c_rarg2, length); | |
3150 | |
3151 #else | |
3152 __ lea(tmp, Address(src, src_pos, scale, arrayOopDesc::base_offset_in_bytes(basic_type))); | |
0 | 3153 store_parameter(tmp, 0); |
304 | 3154 __ lea(tmp, Address(dst, dst_pos, scale, arrayOopDesc::base_offset_in_bytes(basic_type))); |
0 | 3155 store_parameter(tmp, 1); |
3156 store_parameter(length, 2); | |
304 | 3157 #endif // _LP64 |
0 | 3158 if (basic_type == T_OBJECT) { |
3159 __ call_VM_leaf(CAST_FROM_FN_PTR(address, Runtime1::oop_arraycopy), 0); | |
3160 } else { | |
3161 __ call_VM_leaf(CAST_FROM_FN_PTR(address, Runtime1::primitive_arraycopy), 0); | |
3162 } | |
3163 | |
3164 __ bind(*stub->continuation()); | |
3165 } | |
3166 | |
3167 | |
3168 void LIR_Assembler::emit_lock(LIR_OpLock* op) { | |
3169 Register obj = op->obj_opr()->as_register(); // may not be an oop | |
3170 Register hdr = op->hdr_opr()->as_register(); | |
3171 Register lock = op->lock_opr()->as_register(); | |
3172 if (!UseFastLocking) { | |
3173 __ jmp(*op->stub()->entry()); | |
3174 } else if (op->code() == lir_lock) { | |
3175 Register scratch = noreg; | |
3176 if (UseBiasedLocking) { | |
3177 scratch = op->scratch_opr()->as_register(); | |
3178 } | |
3179 assert(BasicLock::displaced_header_offset_in_bytes() == 0, "lock_reg must point to the displaced header"); | |
3180 // add debug info for NullPointerException only if one is possible | |
3181 int null_check_offset = __ lock_object(hdr, obj, lock, scratch, *op->stub()->entry()); | |
3182 if (op->info() != NULL) { | |
3183 add_debug_info_for_null_check(null_check_offset, op->info()); | |
3184 } | |
3185 // done | |
3186 } else if (op->code() == lir_unlock) { | |
3187 assert(BasicLock::displaced_header_offset_in_bytes() == 0, "lock_reg must point to the displaced header"); | |
3188 __ unlock_object(hdr, obj, lock, *op->stub()->entry()); | |
3189 } else { | |
3190 Unimplemented(); | |
3191 } | |
3192 __ bind(*op->stub()->continuation()); | |
3193 } | |
3194 | |
3195 | |
3196 void LIR_Assembler::emit_profile_call(LIR_OpProfileCall* op) { | |
3197 ciMethod* method = op->profiled_method(); | |
3198 int bci = op->profiled_bci(); | |
3199 | |
3200 // Update counter for all call types | |
3201 ciMethodData* md = method->method_data(); | |
3202 if (md == NULL) { | |
3203 bailout("out of memory building methodDataOop"); | |
3204 return; | |
3205 } | |
3206 ciProfileData* data = md->bci_to_data(bci); | |
3207 assert(data->is_CounterData(), "need CounterData for calls"); | |
3208 assert(op->mdo()->is_single_cpu(), "mdo must be allocated"); | |
3209 Register mdo = op->mdo()->as_register(); | |
989
148e5441d916
6863023: need non-perm oops in code cache for JSR 292
jrose
parents:
780
diff
changeset
|
3210 __ movoop(mdo, md->constant_encoding()); |
0 | 3211 Address counter_addr(mdo, md->byte_offset_of_slot(data, CounterData::count_offset())); |
3212 __ addl(counter_addr, DataLayout::counter_increment); | |
3213 Bytecodes::Code bc = method->java_code_at_bci(bci); | |
3214 // Perform additional virtual call profiling for invokevirtual and | |
3215 // invokeinterface bytecodes | |
3216 if ((bc == Bytecodes::_invokevirtual || bc == Bytecodes::_invokeinterface) && | |
3217 Tier1ProfileVirtualCalls) { | |
3218 assert(op->recv()->is_single_cpu(), "recv must be allocated"); | |
3219 Register recv = op->recv()->as_register(); | |
3220 assert_different_registers(mdo, recv); | |
3221 assert(data->is_VirtualCallData(), "need VirtualCallData for virtual calls"); | |
3222 ciKlass* known_klass = op->known_holder(); | |
3223 if (Tier1OptimizeVirtualCallProfiling && known_klass != NULL) { | |
3224 // We know the type that will be seen at this call site; we can | |
3225 // statically update the methodDataOop rather than needing to do | |
3226 // dynamic tests on the receiver type | |
3227 | |
3228 // NOTE: we should probably put a lock around this search to | |
3229 // avoid collisions by concurrent compilations | |
3230 ciVirtualCallData* vc_data = (ciVirtualCallData*) data; | |
3231 uint i; | |
3232 for (i = 0; i < VirtualCallData::row_limit(); i++) { | |
3233 ciKlass* receiver = vc_data->receiver(i); | |
3234 if (known_klass->equals(receiver)) { | |
3235 Address data_addr(mdo, md->byte_offset_of_slot(data, VirtualCallData::receiver_count_offset(i))); | |
3236 __ addl(data_addr, DataLayout::counter_increment); | |
3237 return; | |
3238 } | |
3239 } | |
3240 | |
3241 // Receiver type not found in profile data; select an empty slot | |
3242 | |
3243 // Note that this is less efficient than it should be because it | |
3244 // always does a write to the receiver part of the | |
3245 // VirtualCallData rather than just the first time | |
3246 for (i = 0; i < VirtualCallData::row_limit(); i++) { | |
3247 ciKlass* receiver = vc_data->receiver(i); | |
3248 if (receiver == NULL) { | |
3249 Address recv_addr(mdo, md->byte_offset_of_slot(data, VirtualCallData::receiver_offset(i))); | |
989
148e5441d916
6863023: need non-perm oops in code cache for JSR 292
jrose
parents:
780
diff
changeset
|
3250 __ movoop(recv_addr, known_klass->constant_encoding()); |
0 | 3251 Address data_addr(mdo, md->byte_offset_of_slot(data, VirtualCallData::receiver_count_offset(i))); |
3252 __ addl(data_addr, DataLayout::counter_increment); | |
3253 return; | |
3254 } | |
3255 } | |
3256 } else { | |
304 | 3257 __ movptr(recv, Address(recv, oopDesc::klass_offset_in_bytes())); |
0 | 3258 Label update_done; |
3259 uint i; | |
3260 for (i = 0; i < VirtualCallData::row_limit(); i++) { | |
3261 Label next_test; | |
3262 // See if the receiver is receiver[n]. | |
304 | 3263 __ cmpptr(recv, Address(mdo, md->byte_offset_of_slot(data, VirtualCallData::receiver_offset(i)))); |
0 | 3264 __ jcc(Assembler::notEqual, next_test); |
3265 Address data_addr(mdo, md->byte_offset_of_slot(data, VirtualCallData::receiver_count_offset(i))); | |
3266 __ addl(data_addr, DataLayout::counter_increment); | |
3267 __ jmp(update_done); | |
3268 __ bind(next_test); | |
3269 } | |
3270 | |
3271 // Didn't find receiver; find next empty slot and fill it in | |
3272 for (i = 0; i < VirtualCallData::row_limit(); i++) { | |
3273 Label next_test; | |
3274 Address recv_addr(mdo, md->byte_offset_of_slot(data, VirtualCallData::receiver_offset(i))); | |
304 | 3275 __ cmpptr(recv_addr, (int32_t)NULL_WORD); |
0 | 3276 __ jcc(Assembler::notEqual, next_test); |
304 | 3277 __ movptr(recv_addr, recv); |
0 | 3278 __ movl(Address(mdo, md->byte_offset_of_slot(data, VirtualCallData::receiver_count_offset(i))), DataLayout::counter_increment); |
3279 if (i < (VirtualCallData::row_limit() - 1)) { | |
3280 __ jmp(update_done); | |
3281 } | |
3282 __ bind(next_test); | |
3283 } | |
3284 | |
3285 __ bind(update_done); | |
3286 } | |
3287 } | |
3288 } | |
3289 | |
3290 | |
3291 void LIR_Assembler::emit_delay(LIR_OpDelay*) { | |
3292 Unimplemented(); | |
3293 } | |
3294 | |
3295 | |
3296 void LIR_Assembler::monitor_address(int monitor_no, LIR_Opr dst) { | |
304 | 3297 __ lea(dst->as_register(), frame_map()->address_for_monitor_lock(monitor_no)); |
0 | 3298 } |
3299 | |
3300 | |
3301 void LIR_Assembler::align_backward_branch_target() { | |
3302 __ align(BytesPerWord); | |
3303 } | |
3304 | |
3305 | |
3306 void LIR_Assembler::negate(LIR_Opr left, LIR_Opr dest) { | |
3307 if (left->is_single_cpu()) { | |
3308 __ negl(left->as_register()); | |
3309 move_regs(left->as_register(), dest->as_register()); | |
3310 | |
3311 } else if (left->is_double_cpu()) { | |
3312 Register lo = left->as_register_lo(); | |
304 | 3313 #ifdef _LP64 |
3314 Register dst = dest->as_register_lo(); | |
3315 __ movptr(dst, lo); | |
3316 __ negptr(dst); | |
3317 #else | |
0 | 3318 Register hi = left->as_register_hi(); |
3319 __ lneg(hi, lo); | |
3320 if (dest->as_register_lo() == hi) { | |
3321 assert(dest->as_register_hi() != lo, "destroying register"); | |
3322 move_regs(hi, dest->as_register_hi()); | |
3323 move_regs(lo, dest->as_register_lo()); | |
3324 } else { | |
3325 move_regs(lo, dest->as_register_lo()); | |
3326 move_regs(hi, dest->as_register_hi()); | |
3327 } | |
304 | 3328 #endif // _LP64 |
0 | 3329 |
3330 } else if (dest->is_single_xmm()) { | |
3331 if (left->as_xmm_float_reg() != dest->as_xmm_float_reg()) { | |
3332 __ movflt(dest->as_xmm_float_reg(), left->as_xmm_float_reg()); | |
3333 } | |
3334 __ xorps(dest->as_xmm_float_reg(), | |
3335 ExternalAddress((address)float_signflip_pool)); | |
3336 | |
3337 } else if (dest->is_double_xmm()) { | |
3338 if (left->as_xmm_double_reg() != dest->as_xmm_double_reg()) { | |
3339 __ movdbl(dest->as_xmm_double_reg(), left->as_xmm_double_reg()); | |
3340 } | |
3341 __ xorpd(dest->as_xmm_double_reg(), | |
3342 ExternalAddress((address)double_signflip_pool)); | |
3343 | |
3344 } else if (left->is_single_fpu() || left->is_double_fpu()) { | |
3345 assert(left->fpu() == 0, "arg must be on TOS"); | |
3346 assert(dest->fpu() == 0, "dest must be TOS"); | |
3347 __ fchs(); | |
3348 | |
3349 } else { | |
3350 ShouldNotReachHere(); | |
3351 } | |
3352 } | |
3353 | |
3354 | |
3355 void LIR_Assembler::leal(LIR_Opr addr, LIR_Opr dest) { | |
3356 assert(addr->is_address() && dest->is_register(), "check"); | |
304 | 3357 Register reg; |
3358 reg = dest->as_pointer_register(); | |
3359 __ lea(reg, as_Address(addr->as_address_ptr())); | |
0 | 3360 } |
3361 | |
3362 | |
3363 | |
3364 void LIR_Assembler::rt_call(LIR_Opr result, address dest, const LIR_OprList* args, LIR_Opr tmp, CodeEmitInfo* info) { | |
3365 assert(!tmp->is_valid(), "don't need temporary"); | |
3366 __ call(RuntimeAddress(dest)); | |
3367 if (info != NULL) { | |
3368 add_call_info_here(info); | |
3369 } | |
3370 } | |
3371 | |
3372 | |
3373 void LIR_Assembler::volatile_move_op(LIR_Opr src, LIR_Opr dest, BasicType type, CodeEmitInfo* info) { | |
3374 assert(type == T_LONG, "only for volatile long fields"); | |
3375 | |
3376 if (info != NULL) { | |
3377 add_debug_info_for_null_check_here(info); | |
3378 } | |
3379 | |
3380 if (src->is_double_xmm()) { | |
3381 if (dest->is_double_cpu()) { | |
304 | 3382 #ifdef _LP64 |
3383 __ movdq(dest->as_register_lo(), src->as_xmm_double_reg()); | |
3384 #else | |
3385 __ movdl(dest->as_register_lo(), src->as_xmm_double_reg()); | |
0 | 3386 __ psrlq(src->as_xmm_double_reg(), 32); |
304 | 3387 __ movdl(dest->as_register_hi(), src->as_xmm_double_reg()); |
3388 #endif // _LP64 | |
0 | 3389 } else if (dest->is_double_stack()) { |
3390 __ movdbl(frame_map()->address_for_slot(dest->double_stack_ix()), src->as_xmm_double_reg()); | |
3391 } else if (dest->is_address()) { | |
3392 __ movdbl(as_Address(dest->as_address_ptr()), src->as_xmm_double_reg()); | |
3393 } else { | |
3394 ShouldNotReachHere(); | |
3395 } | |
3396 | |
3397 } else if (dest->is_double_xmm()) { | |
3398 if (src->is_double_stack()) { | |
3399 __ movdbl(dest->as_xmm_double_reg(), frame_map()->address_for_slot(src->double_stack_ix())); | |
3400 } else if (src->is_address()) { | |
3401 __ movdbl(dest->as_xmm_double_reg(), as_Address(src->as_address_ptr())); | |
3402 } else { | |
3403 ShouldNotReachHere(); | |
3404 } | |
3405 | |
3406 } else if (src->is_double_fpu()) { | |
3407 assert(src->fpu_regnrLo() == 0, "must be TOS"); | |
3408 if (dest->is_double_stack()) { | |
3409 __ fistp_d(frame_map()->address_for_slot(dest->double_stack_ix())); | |
3410 } else if (dest->is_address()) { | |
3411 __ fistp_d(as_Address(dest->as_address_ptr())); | |
3412 } else { | |
3413 ShouldNotReachHere(); | |
3414 } | |
3415 | |
3416 } else if (dest->is_double_fpu()) { | |
3417 assert(dest->fpu_regnrLo() == 0, "must be TOS"); | |
3418 if (src->is_double_stack()) { | |
3419 __ fild_d(frame_map()->address_for_slot(src->double_stack_ix())); | |
3420 } else if (src->is_address()) { | |
3421 __ fild_d(as_Address(src->as_address_ptr())); | |
3422 } else { | |
3423 ShouldNotReachHere(); | |
3424 } | |
3425 } else { | |
3426 ShouldNotReachHere(); | |
3427 } | |
3428 } | |
3429 | |
3430 | |
3431 void LIR_Assembler::membar() { | |
304 | 3432 // QQQ sparc TSO uses this, |
3433 __ membar( Assembler::Membar_mask_bits(Assembler::StoreLoad)); | |
0 | 3434 } |
3435 | |
3436 void LIR_Assembler::membar_acquire() { | |
3437 // No x86 machines currently require load fences | |
3438 // __ load_fence(); | |
3439 } | |
3440 | |
3441 void LIR_Assembler::membar_release() { | |
3442 // No x86 machines currently require store fences | |
3443 // __ store_fence(); | |
3444 } | |
3445 | |
3446 void LIR_Assembler::get_thread(LIR_Opr result_reg) { | |
3447 assert(result_reg->is_register(), "check"); | |
304 | 3448 #ifdef _LP64 |
3449 // __ get_thread(result_reg->as_register_lo()); | |
3450 __ mov(result_reg->as_register(), r15_thread); | |
3451 #else | |
0 | 3452 __ get_thread(result_reg->as_register()); |
304 | 3453 #endif // _LP64 |
0 | 3454 } |
3455 | |
3456 | |
3457 void LIR_Assembler::peephole(LIR_List*) { | |
3458 // do nothing for now | |
3459 } | |
3460 | |
3461 | |
3462 #undef __ |