comparison src/cpu/x86/vm/macroAssembler_x86.cpp @ 7199:cd3d6a6b95d9

8003240: x86: move MacroAssembler into separate file Reviewed-by: kvn
author twisti
date Fri, 30 Nov 2012 15:23:16 -0800
parents
children f0c2369fda5a
comparison
equal deleted inserted replaced
7198:6ab62ad83507 7199:cd3d6a6b95d9
1 /*
2 * Copyright (c) 1997, 2012, Oracle and/or its affiliates. All rights reserved.
3 * DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER.
4 *
5 * This code is free software; you can redistribute it and/or modify it
6 * under the terms of the GNU General Public License version 2 only, as
7 * published by the Free Software Foundation.
8 *
9 * This code is distributed in the hope that it will be useful, but WITHOUT
10 * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
11 * FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License
12 * version 2 for more details (a copy is included in the LICENSE file that
13 * accompanied this code).
14 *
15 * You should have received a copy of the GNU General Public License version
16 * 2 along with this work; if not, write to the Free Software Foundation,
17 * Inc., 51 Franklin St, Fifth Floor, Boston, MA 02110-1301 USA.
18 *
19 * Please contact Oracle, 500 Oracle Parkway, Redwood Shores, CA 94065 USA
20 * or visit www.oracle.com if you need additional information or have any
21 * questions.
22 *
23 */
24
25 #include "precompiled.hpp"
26 #include "asm/assembler.hpp"
27 #include "asm/assembler.inline.hpp"
28 #include "compiler/disassembler.hpp"
29 #include "gc_interface/collectedHeap.inline.hpp"
30 #include "interpreter/interpreter.hpp"
31 #include "memory/cardTableModRefBS.hpp"
32 #include "memory/resourceArea.hpp"
33 #include "prims/methodHandles.hpp"
34 #include "runtime/biasedLocking.hpp"
35 #include "runtime/interfaceSupport.hpp"
36 #include "runtime/objectMonitor.hpp"
37 #include "runtime/os.hpp"
38 #include "runtime/sharedRuntime.hpp"
39 #include "runtime/stubRoutines.hpp"
40 #ifndef SERIALGC
41 #include "gc_implementation/g1/g1CollectedHeap.inline.hpp"
42 #include "gc_implementation/g1/g1SATBCardTableModRefBS.hpp"
43 #include "gc_implementation/g1/heapRegion.hpp"
44 #endif
45
46 #ifdef PRODUCT
47 #define BLOCK_COMMENT(str) /* nothing */
48 #define STOP(error) stop(error)
49 #else
50 #define BLOCK_COMMENT(str) block_comment(str)
51 #define STOP(error) block_comment(error); stop(error)
52 #endif
53
54 #define BIND(label) bind(label); BLOCK_COMMENT(#label ":")
55
56
57 static Assembler::Condition reverse[] = {
58 Assembler::noOverflow /* overflow = 0x0 */ ,
59 Assembler::overflow /* noOverflow = 0x1 */ ,
60 Assembler::aboveEqual /* carrySet = 0x2, below = 0x2 */ ,
61 Assembler::below /* aboveEqual = 0x3, carryClear = 0x3 */ ,
62 Assembler::notZero /* zero = 0x4, equal = 0x4 */ ,
63 Assembler::zero /* notZero = 0x5, notEqual = 0x5 */ ,
64 Assembler::above /* belowEqual = 0x6 */ ,
65 Assembler::belowEqual /* above = 0x7 */ ,
66 Assembler::positive /* negative = 0x8 */ ,
67 Assembler::negative /* positive = 0x9 */ ,
68 Assembler::noParity /* parity = 0xa */ ,
69 Assembler::parity /* noParity = 0xb */ ,
70 Assembler::greaterEqual /* less = 0xc */ ,
71 Assembler::less /* greaterEqual = 0xd */ ,
72 Assembler::greater /* lessEqual = 0xe */ ,
73 Assembler::lessEqual /* greater = 0xf, */
74
75 };
76
77
78 // Implementation of MacroAssembler
79
80 // First all the versions that have distinct versions depending on 32/64 bit
81 // Unless the difference is trivial (1 line or so).
82
83 #ifndef _LP64
84
85 // 32bit versions
86
87 Address MacroAssembler::as_Address(AddressLiteral adr) {
88 return Address(adr.target(), adr.rspec());
89 }
90
91 Address MacroAssembler::as_Address(ArrayAddress adr) {
92 return Address::make_array(adr);
93 }
94
95 int MacroAssembler::biased_locking_enter(Register lock_reg,
96 Register obj_reg,
97 Register swap_reg,
98 Register tmp_reg,
99 bool swap_reg_contains_mark,
100 Label& done,
101 Label* slow_case,
102 BiasedLockingCounters* counters) {
103 assert(UseBiasedLocking, "why call this otherwise?");
104 assert(swap_reg == rax, "swap_reg must be rax, for cmpxchg");
105 assert_different_registers(lock_reg, obj_reg, swap_reg);
106
107 if (PrintBiasedLockingStatistics && counters == NULL)
108 counters = BiasedLocking::counters();
109
110 bool need_tmp_reg = false;
111 if (tmp_reg == noreg) {
112 need_tmp_reg = true;
113 tmp_reg = lock_reg;
114 } else {
115 assert_different_registers(lock_reg, obj_reg, swap_reg, tmp_reg);
116 }
117 assert(markOopDesc::age_shift == markOopDesc::lock_bits + markOopDesc::biased_lock_bits, "biased locking makes assumptions about bit layout");
118 Address mark_addr (obj_reg, oopDesc::mark_offset_in_bytes());
119 Address klass_addr (obj_reg, oopDesc::klass_offset_in_bytes());
120 Address saved_mark_addr(lock_reg, 0);
121
122 // Biased locking
123 // See whether the lock is currently biased toward our thread and
124 // whether the epoch is still valid
125 // Note that the runtime guarantees sufficient alignment of JavaThread
126 // pointers to allow age to be placed into low bits
127 // First check to see whether biasing is even enabled for this object
128 Label cas_label;
129 int null_check_offset = -1;
130 if (!swap_reg_contains_mark) {
131 null_check_offset = offset();
132 movl(swap_reg, mark_addr);
133 }
134 if (need_tmp_reg) {
135 push(tmp_reg);
136 }
137 movl(tmp_reg, swap_reg);
138 andl(tmp_reg, markOopDesc::biased_lock_mask_in_place);
139 cmpl(tmp_reg, markOopDesc::biased_lock_pattern);
140 if (need_tmp_reg) {
141 pop(tmp_reg);
142 }
143 jcc(Assembler::notEqual, cas_label);
144 // The bias pattern is present in the object's header. Need to check
145 // whether the bias owner and the epoch are both still current.
146 // Note that because there is no current thread register on x86 we
147 // need to store off the mark word we read out of the object to
148 // avoid reloading it and needing to recheck invariants below. This
149 // store is unfortunate but it makes the overall code shorter and
150 // simpler.
151 movl(saved_mark_addr, swap_reg);
152 if (need_tmp_reg) {
153 push(tmp_reg);
154 }
155 get_thread(tmp_reg);
156 xorl(swap_reg, tmp_reg);
157 if (swap_reg_contains_mark) {
158 null_check_offset = offset();
159 }
160 movl(tmp_reg, klass_addr);
161 xorl(swap_reg, Address(tmp_reg, Klass::prototype_header_offset()));
162 andl(swap_reg, ~((int) markOopDesc::age_mask_in_place));
163 if (need_tmp_reg) {
164 pop(tmp_reg);
165 }
166 if (counters != NULL) {
167 cond_inc32(Assembler::zero,
168 ExternalAddress((address)counters->biased_lock_entry_count_addr()));
169 }
170 jcc(Assembler::equal, done);
171
172 Label try_revoke_bias;
173 Label try_rebias;
174
175 // At this point we know that the header has the bias pattern and
176 // that we are not the bias owner in the current epoch. We need to
177 // figure out more details about the state of the header in order to
178 // know what operations can be legally performed on the object's
179 // header.
180
181 // If the low three bits in the xor result aren't clear, that means
182 // the prototype header is no longer biased and we have to revoke
183 // the bias on this object.
184 testl(swap_reg, markOopDesc::biased_lock_mask_in_place);
185 jcc(Assembler::notZero, try_revoke_bias);
186
187 // Biasing is still enabled for this data type. See whether the
188 // epoch of the current bias is still valid, meaning that the epoch
189 // bits of the mark word are equal to the epoch bits of the
190 // prototype header. (Note that the prototype header's epoch bits
191 // only change at a safepoint.) If not, attempt to rebias the object
192 // toward the current thread. Note that we must be absolutely sure
193 // that the current epoch is invalid in order to do this because
194 // otherwise the manipulations it performs on the mark word are
195 // illegal.
196 testl(swap_reg, markOopDesc::epoch_mask_in_place);
197 jcc(Assembler::notZero, try_rebias);
198
199 // The epoch of the current bias is still valid but we know nothing
200 // about the owner; it might be set or it might be clear. Try to
201 // acquire the bias of the object using an atomic operation. If this
202 // fails we will go in to the runtime to revoke the object's bias.
203 // Note that we first construct the presumed unbiased header so we
204 // don't accidentally blow away another thread's valid bias.
205 movl(swap_reg, saved_mark_addr);
206 andl(swap_reg,
207 markOopDesc::biased_lock_mask_in_place | markOopDesc::age_mask_in_place | markOopDesc::epoch_mask_in_place);
208 if (need_tmp_reg) {
209 push(tmp_reg);
210 }
211 get_thread(tmp_reg);
212 orl(tmp_reg, swap_reg);
213 if (os::is_MP()) {
214 lock();
215 }
216 cmpxchgptr(tmp_reg, Address(obj_reg, 0));
217 if (need_tmp_reg) {
218 pop(tmp_reg);
219 }
220 // If the biasing toward our thread failed, this means that
221 // another thread succeeded in biasing it toward itself and we
222 // need to revoke that bias. The revocation will occur in the
223 // interpreter runtime in the slow case.
224 if (counters != NULL) {
225 cond_inc32(Assembler::zero,
226 ExternalAddress((address)counters->anonymously_biased_lock_entry_count_addr()));
227 }
228 if (slow_case != NULL) {
229 jcc(Assembler::notZero, *slow_case);
230 }
231 jmp(done);
232
233 bind(try_rebias);
234 // At this point we know the epoch has expired, meaning that the
235 // current "bias owner", if any, is actually invalid. Under these
236 // circumstances _only_, we are allowed to use the current header's
237 // value as the comparison value when doing the cas to acquire the
238 // bias in the current epoch. In other words, we allow transfer of
239 // the bias from one thread to another directly in this situation.
240 //
241 // FIXME: due to a lack of registers we currently blow away the age
242 // bits in this situation. Should attempt to preserve them.
243 if (need_tmp_reg) {
244 push(tmp_reg);
245 }
246 get_thread(tmp_reg);
247 movl(swap_reg, klass_addr);
248 orl(tmp_reg, Address(swap_reg, Klass::prototype_header_offset()));
249 movl(swap_reg, saved_mark_addr);
250 if (os::is_MP()) {
251 lock();
252 }
253 cmpxchgptr(tmp_reg, Address(obj_reg, 0));
254 if (need_tmp_reg) {
255 pop(tmp_reg);
256 }
257 // If the biasing toward our thread failed, then another thread
258 // succeeded in biasing it toward itself and we need to revoke that
259 // bias. The revocation will occur in the runtime in the slow case.
260 if (counters != NULL) {
261 cond_inc32(Assembler::zero,
262 ExternalAddress((address)counters->rebiased_lock_entry_count_addr()));
263 }
264 if (slow_case != NULL) {
265 jcc(Assembler::notZero, *slow_case);
266 }
267 jmp(done);
268
269 bind(try_revoke_bias);
270 // The prototype mark in the klass doesn't have the bias bit set any
271 // more, indicating that objects of this data type are not supposed
272 // to be biased any more. We are going to try to reset the mark of
273 // this object to the prototype value and fall through to the
274 // CAS-based locking scheme. Note that if our CAS fails, it means
275 // that another thread raced us for the privilege of revoking the
276 // bias of this particular object, so it's okay to continue in the
277 // normal locking code.
278 //
279 // FIXME: due to a lack of registers we currently blow away the age
280 // bits in this situation. Should attempt to preserve them.
281 movl(swap_reg, saved_mark_addr);
282 if (need_tmp_reg) {
283 push(tmp_reg);
284 }
285 movl(tmp_reg, klass_addr);
286 movl(tmp_reg, Address(tmp_reg, Klass::prototype_header_offset()));
287 if (os::is_MP()) {
288 lock();
289 }
290 cmpxchgptr(tmp_reg, Address(obj_reg, 0));
291 if (need_tmp_reg) {
292 pop(tmp_reg);
293 }
294 // Fall through to the normal CAS-based lock, because no matter what
295 // the result of the above CAS, some thread must have succeeded in
296 // removing the bias bit from the object's header.
297 if (counters != NULL) {
298 cond_inc32(Assembler::zero,
299 ExternalAddress((address)counters->revoked_lock_entry_count_addr()));
300 }
301
302 bind(cas_label);
303
304 return null_check_offset;
305 }
306 void MacroAssembler::call_VM_leaf_base(address entry_point,
307 int number_of_arguments) {
308 call(RuntimeAddress(entry_point));
309 increment(rsp, number_of_arguments * wordSize);
310 }
311
312 void MacroAssembler::cmpklass(Address src1, Metadata* obj) {
313 cmp_literal32(src1, (int32_t)obj, metadata_Relocation::spec_for_immediate());
314 }
315
316 void MacroAssembler::cmpklass(Register src1, Metadata* obj) {
317 cmp_literal32(src1, (int32_t)obj, metadata_Relocation::spec_for_immediate());
318 }
319
320 void MacroAssembler::cmpoop(Address src1, jobject obj) {
321 cmp_literal32(src1, (int32_t)obj, oop_Relocation::spec_for_immediate());
322 }
323
324 void MacroAssembler::cmpoop(Register src1, jobject obj) {
325 cmp_literal32(src1, (int32_t)obj, oop_Relocation::spec_for_immediate());
326 }
327
328 void MacroAssembler::extend_sign(Register hi, Register lo) {
329 // According to Intel Doc. AP-526, "Integer Divide", p.18.
330 if (VM_Version::is_P6() && hi == rdx && lo == rax) {
331 cdql();
332 } else {
333 movl(hi, lo);
334 sarl(hi, 31);
335 }
336 }
337
338 void MacroAssembler::jC2(Register tmp, Label& L) {
339 // set parity bit if FPU flag C2 is set (via rax)
340 save_rax(tmp);
341 fwait(); fnstsw_ax();
342 sahf();
343 restore_rax(tmp);
344 // branch
345 jcc(Assembler::parity, L);
346 }
347
348 void MacroAssembler::jnC2(Register tmp, Label& L) {
349 // set parity bit if FPU flag C2 is set (via rax)
350 save_rax(tmp);
351 fwait(); fnstsw_ax();
352 sahf();
353 restore_rax(tmp);
354 // branch
355 jcc(Assembler::noParity, L);
356 }
357
358 // 32bit can do a case table jump in one instruction but we no longer allow the base
359 // to be installed in the Address class
360 void MacroAssembler::jump(ArrayAddress entry) {
361 jmp(as_Address(entry));
362 }
363
364 // Note: y_lo will be destroyed
365 void MacroAssembler::lcmp2int(Register x_hi, Register x_lo, Register y_hi, Register y_lo) {
366 // Long compare for Java (semantics as described in JVM spec.)
367 Label high, low, done;
368
369 cmpl(x_hi, y_hi);
370 jcc(Assembler::less, low);
371 jcc(Assembler::greater, high);
372 // x_hi is the return register
373 xorl(x_hi, x_hi);
374 cmpl(x_lo, y_lo);
375 jcc(Assembler::below, low);
376 jcc(Assembler::equal, done);
377
378 bind(high);
379 xorl(x_hi, x_hi);
380 increment(x_hi);
381 jmp(done);
382
383 bind(low);
384 xorl(x_hi, x_hi);
385 decrementl(x_hi);
386
387 bind(done);
388 }
389
390 void MacroAssembler::lea(Register dst, AddressLiteral src) {
391 mov_literal32(dst, (int32_t)src.target(), src.rspec());
392 }
393
394 void MacroAssembler::lea(Address dst, AddressLiteral adr) {
395 // leal(dst, as_Address(adr));
396 // see note in movl as to why we must use a move
397 mov_literal32(dst, (int32_t) adr.target(), adr.rspec());
398 }
399
400 void MacroAssembler::leave() {
401 mov(rsp, rbp);
402 pop(rbp);
403 }
404
405 void MacroAssembler::lmul(int x_rsp_offset, int y_rsp_offset) {
406 // Multiplication of two Java long values stored on the stack
407 // as illustrated below. Result is in rdx:rax.
408 //
409 // rsp ---> [ ?? ] \ \
410 // .... | y_rsp_offset |
411 // [ y_lo ] / (in bytes) | x_rsp_offset
412 // [ y_hi ] | (in bytes)
413 // .... |
414 // [ x_lo ] /
415 // [ x_hi ]
416 // ....
417 //
418 // Basic idea: lo(result) = lo(x_lo * y_lo)
419 // hi(result) = hi(x_lo * y_lo) + lo(x_hi * y_lo) + lo(x_lo * y_hi)
420 Address x_hi(rsp, x_rsp_offset + wordSize); Address x_lo(rsp, x_rsp_offset);
421 Address y_hi(rsp, y_rsp_offset + wordSize); Address y_lo(rsp, y_rsp_offset);
422 Label quick;
423 // load x_hi, y_hi and check if quick
424 // multiplication is possible
425 movl(rbx, x_hi);
426 movl(rcx, y_hi);
427 movl(rax, rbx);
428 orl(rbx, rcx); // rbx, = 0 <=> x_hi = 0 and y_hi = 0
429 jcc(Assembler::zero, quick); // if rbx, = 0 do quick multiply
430 // do full multiplication
431 // 1st step
432 mull(y_lo); // x_hi * y_lo
433 movl(rbx, rax); // save lo(x_hi * y_lo) in rbx,
434 // 2nd step
435 movl(rax, x_lo);
436 mull(rcx); // x_lo * y_hi
437 addl(rbx, rax); // add lo(x_lo * y_hi) to rbx,
438 // 3rd step
439 bind(quick); // note: rbx, = 0 if quick multiply!
440 movl(rax, x_lo);
441 mull(y_lo); // x_lo * y_lo
442 addl(rdx, rbx); // correct hi(x_lo * y_lo)
443 }
444
445 void MacroAssembler::lneg(Register hi, Register lo) {
446 negl(lo);
447 adcl(hi, 0);
448 negl(hi);
449 }
450
451 void MacroAssembler::lshl(Register hi, Register lo) {
452 // Java shift left long support (semantics as described in JVM spec., p.305)
453 // (basic idea for shift counts s >= n: x << s == (x << n) << (s - n))
454 // shift value is in rcx !
455 assert(hi != rcx, "must not use rcx");
456 assert(lo != rcx, "must not use rcx");
457 const Register s = rcx; // shift count
458 const int n = BitsPerWord;
459 Label L;
460 andl(s, 0x3f); // s := s & 0x3f (s < 0x40)
461 cmpl(s, n); // if (s < n)
462 jcc(Assembler::less, L); // else (s >= n)
463 movl(hi, lo); // x := x << n
464 xorl(lo, lo);
465 // Note: subl(s, n) is not needed since the Intel shift instructions work rcx mod n!
466 bind(L); // s (mod n) < n
467 shldl(hi, lo); // x := x << s
468 shll(lo);
469 }
470
471
472 void MacroAssembler::lshr(Register hi, Register lo, bool sign_extension) {
473 // Java shift right long support (semantics as described in JVM spec., p.306 & p.310)
474 // (basic idea for shift counts s >= n: x >> s == (x >> n) >> (s - n))
475 assert(hi != rcx, "must not use rcx");
476 assert(lo != rcx, "must not use rcx");
477 const Register s = rcx; // shift count
478 const int n = BitsPerWord;
479 Label L;
480 andl(s, 0x3f); // s := s & 0x3f (s < 0x40)
481 cmpl(s, n); // if (s < n)
482 jcc(Assembler::less, L); // else (s >= n)
483 movl(lo, hi); // x := x >> n
484 if (sign_extension) sarl(hi, 31);
485 else xorl(hi, hi);
486 // Note: subl(s, n) is not needed since the Intel shift instructions work rcx mod n!
487 bind(L); // s (mod n) < n
488 shrdl(lo, hi); // x := x >> s
489 if (sign_extension) sarl(hi);
490 else shrl(hi);
491 }
492
493 void MacroAssembler::movoop(Register dst, jobject obj) {
494 mov_literal32(dst, (int32_t)obj, oop_Relocation::spec_for_immediate());
495 }
496
497 void MacroAssembler::movoop(Address dst, jobject obj) {
498 mov_literal32(dst, (int32_t)obj, oop_Relocation::spec_for_immediate());
499 }
500
501 void MacroAssembler::mov_metadata(Register dst, Metadata* obj) {
502 mov_literal32(dst, (int32_t)obj, metadata_Relocation::spec_for_immediate());
503 }
504
505 void MacroAssembler::mov_metadata(Address dst, Metadata* obj) {
506 mov_literal32(dst, (int32_t)obj, metadata_Relocation::spec_for_immediate());
507 }
508
509 void MacroAssembler::movptr(Register dst, AddressLiteral src) {
510 if (src.is_lval()) {
511 mov_literal32(dst, (intptr_t)src.target(), src.rspec());
512 } else {
513 movl(dst, as_Address(src));
514 }
515 }
516
517 void MacroAssembler::movptr(ArrayAddress dst, Register src) {
518 movl(as_Address(dst), src);
519 }
520
521 void MacroAssembler::movptr(Register dst, ArrayAddress src) {
522 movl(dst, as_Address(src));
523 }
524
525 // src should NEVER be a real pointer. Use AddressLiteral for true pointers
526 void MacroAssembler::movptr(Address dst, intptr_t src) {
527 movl(dst, src);
528 }
529
530
531 void MacroAssembler::pop_callee_saved_registers() {
532 pop(rcx);
533 pop(rdx);
534 pop(rdi);
535 pop(rsi);
536 }
537
538 void MacroAssembler::pop_fTOS() {
539 fld_d(Address(rsp, 0));
540 addl(rsp, 2 * wordSize);
541 }
542
543 void MacroAssembler::push_callee_saved_registers() {
544 push(rsi);
545 push(rdi);
546 push(rdx);
547 push(rcx);
548 }
549
550 void MacroAssembler::push_fTOS() {
551 subl(rsp, 2 * wordSize);
552 fstp_d(Address(rsp, 0));
553 }
554
555
556 void MacroAssembler::pushoop(jobject obj) {
557 push_literal32((int32_t)obj, oop_Relocation::spec_for_immediate());
558 }
559
560 void MacroAssembler::pushklass(Metadata* obj) {
561 push_literal32((int32_t)obj, metadata_Relocation::spec_for_immediate());
562 }
563
564 void MacroAssembler::pushptr(AddressLiteral src) {
565 if (src.is_lval()) {
566 push_literal32((int32_t)src.target(), src.rspec());
567 } else {
568 pushl(as_Address(src));
569 }
570 }
571
572 void MacroAssembler::set_word_if_not_zero(Register dst) {
573 xorl(dst, dst);
574 set_byte_if_not_zero(dst);
575 }
576
577 static void pass_arg0(MacroAssembler* masm, Register arg) {
578 masm->push(arg);
579 }
580
581 static void pass_arg1(MacroAssembler* masm, Register arg) {
582 masm->push(arg);
583 }
584
585 static void pass_arg2(MacroAssembler* masm, Register arg) {
586 masm->push(arg);
587 }
588
589 static void pass_arg3(MacroAssembler* masm, Register arg) {
590 masm->push(arg);
591 }
592
593 #ifndef PRODUCT
594 extern "C" void findpc(intptr_t x);
595 #endif
596
597 void MacroAssembler::debug32(int rdi, int rsi, int rbp, int rsp, int rbx, int rdx, int rcx, int rax, int eip, char* msg) {
598 // In order to get locks to work, we need to fake a in_VM state
599 JavaThread* thread = JavaThread::current();
600 JavaThreadState saved_state = thread->thread_state();
601 thread->set_thread_state(_thread_in_vm);
602 if (ShowMessageBoxOnError) {
603 JavaThread* thread = JavaThread::current();
604 JavaThreadState saved_state = thread->thread_state();
605 thread->set_thread_state(_thread_in_vm);
606 if (CountBytecodes || TraceBytecodes || StopInterpreterAt) {
607 ttyLocker ttyl;
608 BytecodeCounter::print();
609 }
610 // To see where a verify_oop failed, get $ebx+40/X for this frame.
611 // This is the value of eip which points to where verify_oop will return.
612 if (os::message_box(msg, "Execution stopped, print registers?")) {
613 print_state32(rdi, rsi, rbp, rsp, rbx, rdx, rcx, rax, eip);
614 BREAKPOINT;
615 }
616 } else {
617 ttyLocker ttyl;
618 ::tty->print_cr("=============== DEBUG MESSAGE: %s ================\n", msg);
619 }
620 // Don't assert holding the ttyLock
621 assert(false, err_msg("DEBUG MESSAGE: %s", msg));
622 ThreadStateTransition::transition(thread, _thread_in_vm, saved_state);
623 }
624
625 void MacroAssembler::print_state32(int rdi, int rsi, int rbp, int rsp, int rbx, int rdx, int rcx, int rax, int eip) {
626 ttyLocker ttyl;
627 FlagSetting fs(Debugging, true);
628 tty->print_cr("eip = 0x%08x", eip);
629 #ifndef PRODUCT
630 if ((WizardMode || Verbose) && PrintMiscellaneous) {
631 tty->cr();
632 findpc(eip);
633 tty->cr();
634 }
635 #endif
636 #define PRINT_REG(rax) \
637 { tty->print("%s = ", #rax); os::print_location(tty, rax); }
638 PRINT_REG(rax);
639 PRINT_REG(rbx);
640 PRINT_REG(rcx);
641 PRINT_REG(rdx);
642 PRINT_REG(rdi);
643 PRINT_REG(rsi);
644 PRINT_REG(rbp);
645 PRINT_REG(rsp);
646 #undef PRINT_REG
647 // Print some words near top of staack.
648 int* dump_sp = (int*) rsp;
649 for (int col1 = 0; col1 < 8; col1++) {
650 tty->print("(rsp+0x%03x) 0x%08x: ", (int)((intptr_t)dump_sp - (intptr_t)rsp), (intptr_t)dump_sp);
651 os::print_location(tty, *dump_sp++);
652 }
653 for (int row = 0; row < 16; row++) {
654 tty->print("(rsp+0x%03x) 0x%08x: ", (int)((intptr_t)dump_sp - (intptr_t)rsp), (intptr_t)dump_sp);
655 for (int col = 0; col < 8; col++) {
656 tty->print(" 0x%08x", *dump_sp++);
657 }
658 tty->cr();
659 }
660 // Print some instructions around pc:
661 Disassembler::decode((address)eip-64, (address)eip);
662 tty->print_cr("--------");
663 Disassembler::decode((address)eip, (address)eip+32);
664 }
665
666 void MacroAssembler::stop(const char* msg) {
667 ExternalAddress message((address)msg);
668 // push address of message
669 pushptr(message.addr());
670 { Label L; call(L, relocInfo::none); bind(L); } // push eip
671 pusha(); // push registers
672 call(RuntimeAddress(CAST_FROM_FN_PTR(address, MacroAssembler::debug32)));
673 hlt();
674 }
675
676 void MacroAssembler::warn(const char* msg) {
677 push_CPU_state();
678
679 ExternalAddress message((address) msg);
680 // push address of message
681 pushptr(message.addr());
682
683 call(RuntimeAddress(CAST_FROM_FN_PTR(address, warning)));
684 addl(rsp, wordSize); // discard argument
685 pop_CPU_state();
686 }
687
688 void MacroAssembler::print_state() {
689 { Label L; call(L, relocInfo::none); bind(L); } // push eip
690 pusha(); // push registers
691
692 push_CPU_state();
693 call(RuntimeAddress(CAST_FROM_FN_PTR(address, MacroAssembler::print_state32)));
694 pop_CPU_state();
695
696 popa();
697 addl(rsp, wordSize);
698 }
699
700 #else // _LP64
701
702 // 64 bit versions
703
704 Address MacroAssembler::as_Address(AddressLiteral adr) {
705 // amd64 always does this as a pc-rel
706 // we can be absolute or disp based on the instruction type
707 // jmp/call are displacements others are absolute
708 assert(!adr.is_lval(), "must be rval");
709 assert(reachable(adr), "must be");
710 return Address((int32_t)(intptr_t)(adr.target() - pc()), adr.target(), adr.reloc());
711
712 }
713
714 Address MacroAssembler::as_Address(ArrayAddress adr) {
715 AddressLiteral base = adr.base();
716 lea(rscratch1, base);
717 Address index = adr.index();
718 assert(index._disp == 0, "must not have disp"); // maybe it can?
719 Address array(rscratch1, index._index, index._scale, index._disp);
720 return array;
721 }
722
723 int MacroAssembler::biased_locking_enter(Register lock_reg,
724 Register obj_reg,
725 Register swap_reg,
726 Register tmp_reg,
727 bool swap_reg_contains_mark,
728 Label& done,
729 Label* slow_case,
730 BiasedLockingCounters* counters) {
731 assert(UseBiasedLocking, "why call this otherwise?");
732 assert(swap_reg == rax, "swap_reg must be rax for cmpxchgq");
733 assert(tmp_reg != noreg, "tmp_reg must be supplied");
734 assert_different_registers(lock_reg, obj_reg, swap_reg, tmp_reg);
735 assert(markOopDesc::age_shift == markOopDesc::lock_bits + markOopDesc::biased_lock_bits, "biased locking makes assumptions about bit layout");
736 Address mark_addr (obj_reg, oopDesc::mark_offset_in_bytes());
737 Address saved_mark_addr(lock_reg, 0);
738
739 if (PrintBiasedLockingStatistics && counters == NULL)
740 counters = BiasedLocking::counters();
741
742 // Biased locking
743 // See whether the lock is currently biased toward our thread and
744 // whether the epoch is still valid
745 // Note that the runtime guarantees sufficient alignment of JavaThread
746 // pointers to allow age to be placed into low bits
747 // First check to see whether biasing is even enabled for this object
748 Label cas_label;
749 int null_check_offset = -1;
750 if (!swap_reg_contains_mark) {
751 null_check_offset = offset();
752 movq(swap_reg, mark_addr);
753 }
754 movq(tmp_reg, swap_reg);
755 andq(tmp_reg, markOopDesc::biased_lock_mask_in_place);
756 cmpq(tmp_reg, markOopDesc::biased_lock_pattern);
757 jcc(Assembler::notEqual, cas_label);
758 // The bias pattern is present in the object's header. Need to check
759 // whether the bias owner and the epoch are both still current.
760 load_prototype_header(tmp_reg, obj_reg);
761 orq(tmp_reg, r15_thread);
762 xorq(tmp_reg, swap_reg);
763 andq(tmp_reg, ~((int) markOopDesc::age_mask_in_place));
764 if (counters != NULL) {
765 cond_inc32(Assembler::zero,
766 ExternalAddress((address) counters->anonymously_biased_lock_entry_count_addr()));
767 }
768 jcc(Assembler::equal, done);
769
770 Label try_revoke_bias;
771 Label try_rebias;
772
773 // At this point we know that the header has the bias pattern and
774 // that we are not the bias owner in the current epoch. We need to
775 // figure out more details about the state of the header in order to
776 // know what operations can be legally performed on the object's
777 // header.
778
779 // If the low three bits in the xor result aren't clear, that means
780 // the prototype header is no longer biased and we have to revoke
781 // the bias on this object.
782 testq(tmp_reg, markOopDesc::biased_lock_mask_in_place);
783 jcc(Assembler::notZero, try_revoke_bias);
784
785 // Biasing is still enabled for this data type. See whether the
786 // epoch of the current bias is still valid, meaning that the epoch
787 // bits of the mark word are equal to the epoch bits of the
788 // prototype header. (Note that the prototype header's epoch bits
789 // only change at a safepoint.) If not, attempt to rebias the object
790 // toward the current thread. Note that we must be absolutely sure
791 // that the current epoch is invalid in order to do this because
792 // otherwise the manipulations it performs on the mark word are
793 // illegal.
794 testq(tmp_reg, markOopDesc::epoch_mask_in_place);
795 jcc(Assembler::notZero, try_rebias);
796
797 // The epoch of the current bias is still valid but we know nothing
798 // about the owner; it might be set or it might be clear. Try to
799 // acquire the bias of the object using an atomic operation. If this
800 // fails we will go in to the runtime to revoke the object's bias.
801 // Note that we first construct the presumed unbiased header so we
802 // don't accidentally blow away another thread's valid bias.
803 andq(swap_reg,
804 markOopDesc::biased_lock_mask_in_place | markOopDesc::age_mask_in_place | markOopDesc::epoch_mask_in_place);
805 movq(tmp_reg, swap_reg);
806 orq(tmp_reg, r15_thread);
807 if (os::is_MP()) {
808 lock();
809 }
810 cmpxchgq(tmp_reg, Address(obj_reg, 0));
811 // If the biasing toward our thread failed, this means that
812 // another thread succeeded in biasing it toward itself and we
813 // need to revoke that bias. The revocation will occur in the
814 // interpreter runtime in the slow case.
815 if (counters != NULL) {
816 cond_inc32(Assembler::zero,
817 ExternalAddress((address) counters->anonymously_biased_lock_entry_count_addr()));
818 }
819 if (slow_case != NULL) {
820 jcc(Assembler::notZero, *slow_case);
821 }
822 jmp(done);
823
824 bind(try_rebias);
825 // At this point we know the epoch has expired, meaning that the
826 // current "bias owner", if any, is actually invalid. Under these
827 // circumstances _only_, we are allowed to use the current header's
828 // value as the comparison value when doing the cas to acquire the
829 // bias in the current epoch. In other words, we allow transfer of
830 // the bias from one thread to another directly in this situation.
831 //
832 // FIXME: due to a lack of registers we currently blow away the age
833 // bits in this situation. Should attempt to preserve them.
834 load_prototype_header(tmp_reg, obj_reg);
835 orq(tmp_reg, r15_thread);
836 if (os::is_MP()) {
837 lock();
838 }
839 cmpxchgq(tmp_reg, Address(obj_reg, 0));
840 // If the biasing toward our thread failed, then another thread
841 // succeeded in biasing it toward itself and we need to revoke that
842 // bias. The revocation will occur in the runtime in the slow case.
843 if (counters != NULL) {
844 cond_inc32(Assembler::zero,
845 ExternalAddress((address) counters->rebiased_lock_entry_count_addr()));
846 }
847 if (slow_case != NULL) {
848 jcc(Assembler::notZero, *slow_case);
849 }
850 jmp(done);
851
852 bind(try_revoke_bias);
853 // The prototype mark in the klass doesn't have the bias bit set any
854 // more, indicating that objects of this data type are not supposed
855 // to be biased any more. We are going to try to reset the mark of
856 // this object to the prototype value and fall through to the
857 // CAS-based locking scheme. Note that if our CAS fails, it means
858 // that another thread raced us for the privilege of revoking the
859 // bias of this particular object, so it's okay to continue in the
860 // normal locking code.
861 //
862 // FIXME: due to a lack of registers we currently blow away the age
863 // bits in this situation. Should attempt to preserve them.
864 load_prototype_header(tmp_reg, obj_reg);
865 if (os::is_MP()) {
866 lock();
867 }
868 cmpxchgq(tmp_reg, Address(obj_reg, 0));
869 // Fall through to the normal CAS-based lock, because no matter what
870 // the result of the above CAS, some thread must have succeeded in
871 // removing the bias bit from the object's header.
872 if (counters != NULL) {
873 cond_inc32(Assembler::zero,
874 ExternalAddress((address) counters->revoked_lock_entry_count_addr()));
875 }
876
877 bind(cas_label);
878
879 return null_check_offset;
880 }
881
882 void MacroAssembler::call_VM_leaf_base(address entry_point, int num_args) {
883 Label L, E;
884
885 #ifdef _WIN64
886 // Windows always allocates space for it's register args
887 assert(num_args <= 4, "only register arguments supported");
888 subq(rsp, frame::arg_reg_save_area_bytes);
889 #endif
890
891 // Align stack if necessary
892 testl(rsp, 15);
893 jcc(Assembler::zero, L);
894
895 subq(rsp, 8);
896 {
897 call(RuntimeAddress(entry_point));
898 }
899 addq(rsp, 8);
900 jmp(E);
901
902 bind(L);
903 {
904 call(RuntimeAddress(entry_point));
905 }
906
907 bind(E);
908
909 #ifdef _WIN64
910 // restore stack pointer
911 addq(rsp, frame::arg_reg_save_area_bytes);
912 #endif
913
914 }
915
916 void MacroAssembler::cmp64(Register src1, AddressLiteral src2) {
917 assert(!src2.is_lval(), "should use cmpptr");
918
919 if (reachable(src2)) {
920 cmpq(src1, as_Address(src2));
921 } else {
922 lea(rscratch1, src2);
923 Assembler::cmpq(src1, Address(rscratch1, 0));
924 }
925 }
926
927 int MacroAssembler::corrected_idivq(Register reg) {
928 // Full implementation of Java ldiv and lrem; checks for special
929 // case as described in JVM spec., p.243 & p.271. The function
930 // returns the (pc) offset of the idivl instruction - may be needed
931 // for implicit exceptions.
932 //
933 // normal case special case
934 //
935 // input : rax: dividend min_long
936 // reg: divisor (may not be eax/edx) -1
937 //
938 // output: rax: quotient (= rax idiv reg) min_long
939 // rdx: remainder (= rax irem reg) 0
940 assert(reg != rax && reg != rdx, "reg cannot be rax or rdx register");
941 static const int64_t min_long = 0x8000000000000000;
942 Label normal_case, special_case;
943
944 // check for special case
945 cmp64(rax, ExternalAddress((address) &min_long));
946 jcc(Assembler::notEqual, normal_case);
947 xorl(rdx, rdx); // prepare rdx for possible special case (where
948 // remainder = 0)
949 cmpq(reg, -1);
950 jcc(Assembler::equal, special_case);
951
952 // handle normal case
953 bind(normal_case);
954 cdqq();
955 int idivq_offset = offset();
956 idivq(reg);
957
958 // normal and special case exit
959 bind(special_case);
960
961 return idivq_offset;
962 }
963
964 void MacroAssembler::decrementq(Register reg, int value) {
965 if (value == min_jint) { subq(reg, value); return; }
966 if (value < 0) { incrementq(reg, -value); return; }
967 if (value == 0) { ; return; }
968 if (value == 1 && UseIncDec) { decq(reg) ; return; }
969 /* else */ { subq(reg, value) ; return; }
970 }
971
972 void MacroAssembler::decrementq(Address dst, int value) {
973 if (value == min_jint) { subq(dst, value); return; }
974 if (value < 0) { incrementq(dst, -value); return; }
975 if (value == 0) { ; return; }
976 if (value == 1 && UseIncDec) { decq(dst) ; return; }
977 /* else */ { subq(dst, value) ; return; }
978 }
979
980 void MacroAssembler::incrementq(Register reg, int value) {
981 if (value == min_jint) { addq(reg, value); return; }
982 if (value < 0) { decrementq(reg, -value); return; }
983 if (value == 0) { ; return; }
984 if (value == 1 && UseIncDec) { incq(reg) ; return; }
985 /* else */ { addq(reg, value) ; return; }
986 }
987
988 void MacroAssembler::incrementq(Address dst, int value) {
989 if (value == min_jint) { addq(dst, value); return; }
990 if (value < 0) { decrementq(dst, -value); return; }
991 if (value == 0) { ; return; }
992 if (value == 1 && UseIncDec) { incq(dst) ; return; }
993 /* else */ { addq(dst, value) ; return; }
994 }
995
996 // 32bit can do a case table jump in one instruction but we no longer allow the base
997 // to be installed in the Address class
998 void MacroAssembler::jump(ArrayAddress entry) {
999 lea(rscratch1, entry.base());
1000 Address dispatch = entry.index();
1001 assert(dispatch._base == noreg, "must be");
1002 dispatch._base = rscratch1;
1003 jmp(dispatch);
1004 }
1005
1006 void MacroAssembler::lcmp2int(Register x_hi, Register x_lo, Register y_hi, Register y_lo) {
1007 ShouldNotReachHere(); // 64bit doesn't use two regs
1008 cmpq(x_lo, y_lo);
1009 }
1010
1011 void MacroAssembler::lea(Register dst, AddressLiteral src) {
1012 mov_literal64(dst, (intptr_t)src.target(), src.rspec());
1013 }
1014
1015 void MacroAssembler::lea(Address dst, AddressLiteral adr) {
1016 mov_literal64(rscratch1, (intptr_t)adr.target(), adr.rspec());
1017 movptr(dst, rscratch1);
1018 }
1019
1020 void MacroAssembler::leave() {
1021 // %%% is this really better? Why not on 32bit too?
1022 emit_byte(0xC9); // LEAVE
1023 }
1024
1025 void MacroAssembler::lneg(Register hi, Register lo) {
1026 ShouldNotReachHere(); // 64bit doesn't use two regs
1027 negq(lo);
1028 }
1029
1030 void MacroAssembler::movoop(Register dst, jobject obj) {
1031 mov_literal64(dst, (intptr_t)obj, oop_Relocation::spec_for_immediate());
1032 }
1033
1034 void MacroAssembler::movoop(Address dst, jobject obj) {
1035 mov_literal64(rscratch1, (intptr_t)obj, oop_Relocation::spec_for_immediate());
1036 movq(dst, rscratch1);
1037 }
1038
1039 void MacroAssembler::mov_metadata(Register dst, Metadata* obj) {
1040 mov_literal64(dst, (intptr_t)obj, metadata_Relocation::spec_for_immediate());
1041 }
1042
1043 void MacroAssembler::mov_metadata(Address dst, Metadata* obj) {
1044 mov_literal64(rscratch1, (intptr_t)obj, metadata_Relocation::spec_for_immediate());
1045 movq(dst, rscratch1);
1046 }
1047
1048 void MacroAssembler::movptr(Register dst, AddressLiteral src) {
1049 if (src.is_lval()) {
1050 mov_literal64(dst, (intptr_t)src.target(), src.rspec());
1051 } else {
1052 if (reachable(src)) {
1053 movq(dst, as_Address(src));
1054 } else {
1055 lea(rscratch1, src);
1056 movq(dst, Address(rscratch1,0));
1057 }
1058 }
1059 }
1060
1061 void MacroAssembler::movptr(ArrayAddress dst, Register src) {
1062 movq(as_Address(dst), src);
1063 }
1064
1065 void MacroAssembler::movptr(Register dst, ArrayAddress src) {
1066 movq(dst, as_Address(src));
1067 }
1068
1069 // src should NEVER be a real pointer. Use AddressLiteral for true pointers
1070 void MacroAssembler::movptr(Address dst, intptr_t src) {
1071 mov64(rscratch1, src);
1072 movq(dst, rscratch1);
1073 }
1074
1075 // These are mostly for initializing NULL
1076 void MacroAssembler::movptr(Address dst, int32_t src) {
1077 movslq(dst, src);
1078 }
1079
1080 void MacroAssembler::movptr(Register dst, int32_t src) {
1081 mov64(dst, (intptr_t)src);
1082 }
1083
1084 void MacroAssembler::pushoop(jobject obj) {
1085 movoop(rscratch1, obj);
1086 push(rscratch1);
1087 }
1088
1089 void MacroAssembler::pushklass(Metadata* obj) {
1090 mov_metadata(rscratch1, obj);
1091 push(rscratch1);
1092 }
1093
1094 void MacroAssembler::pushptr(AddressLiteral src) {
1095 lea(rscratch1, src);
1096 if (src.is_lval()) {
1097 push(rscratch1);
1098 } else {
1099 pushq(Address(rscratch1, 0));
1100 }
1101 }
1102
1103 void MacroAssembler::reset_last_Java_frame(bool clear_fp,
1104 bool clear_pc) {
1105 // we must set sp to zero to clear frame
1106 movptr(Address(r15_thread, JavaThread::last_Java_sp_offset()), NULL_WORD);
1107 // must clear fp, so that compiled frames are not confused; it is
1108 // possible that we need it only for debugging
1109 if (clear_fp) {
1110 movptr(Address(r15_thread, JavaThread::last_Java_fp_offset()), NULL_WORD);
1111 }
1112
1113 if (clear_pc) {
1114 movptr(Address(r15_thread, JavaThread::last_Java_pc_offset()), NULL_WORD);
1115 }
1116 }
1117
1118 void MacroAssembler::set_last_Java_frame(Register last_java_sp,
1119 Register last_java_fp,
1120 address last_java_pc) {
1121 // determine last_java_sp register
1122 if (!last_java_sp->is_valid()) {
1123 last_java_sp = rsp;
1124 }
1125
1126 // last_java_fp is optional
1127 if (last_java_fp->is_valid()) {
1128 movptr(Address(r15_thread, JavaThread::last_Java_fp_offset()),
1129 last_java_fp);
1130 }
1131
1132 // last_java_pc is optional
1133 if (last_java_pc != NULL) {
1134 Address java_pc(r15_thread,
1135 JavaThread::frame_anchor_offset() + JavaFrameAnchor::last_Java_pc_offset());
1136 lea(rscratch1, InternalAddress(last_java_pc));
1137 movptr(java_pc, rscratch1);
1138 }
1139
1140 movptr(Address(r15_thread, JavaThread::last_Java_sp_offset()), last_java_sp);
1141 }
1142
1143 static void pass_arg0(MacroAssembler* masm, Register arg) {
1144 if (c_rarg0 != arg ) {
1145 masm->mov(c_rarg0, arg);
1146 }
1147 }
1148
1149 static void pass_arg1(MacroAssembler* masm, Register arg) {
1150 if (c_rarg1 != arg ) {
1151 masm->mov(c_rarg1, arg);
1152 }
1153 }
1154
1155 static void pass_arg2(MacroAssembler* masm, Register arg) {
1156 if (c_rarg2 != arg ) {
1157 masm->mov(c_rarg2, arg);
1158 }
1159 }
1160
1161 static void pass_arg3(MacroAssembler* masm, Register arg) {
1162 if (c_rarg3 != arg ) {
1163 masm->mov(c_rarg3, arg);
1164 }
1165 }
1166
1167 void MacroAssembler::stop(const char* msg) {
1168 address rip = pc();
1169 pusha(); // get regs on stack
1170 lea(c_rarg0, ExternalAddress((address) msg));
1171 lea(c_rarg1, InternalAddress(rip));
1172 movq(c_rarg2, rsp); // pass pointer to regs array
1173 andq(rsp, -16); // align stack as required by ABI
1174 call(RuntimeAddress(CAST_FROM_FN_PTR(address, MacroAssembler::debug64)));
1175 hlt();
1176 }
1177
1178 void MacroAssembler::warn(const char* msg) {
1179 push(rbp);
1180 movq(rbp, rsp);
1181 andq(rsp, -16); // align stack as required by push_CPU_state and call
1182 push_CPU_state(); // keeps alignment at 16 bytes
1183 lea(c_rarg0, ExternalAddress((address) msg));
1184 call_VM_leaf(CAST_FROM_FN_PTR(address, warning), c_rarg0);
1185 pop_CPU_state();
1186 mov(rsp, rbp);
1187 pop(rbp);
1188 }
1189
1190 void MacroAssembler::print_state() {
1191 address rip = pc();
1192 pusha(); // get regs on stack
1193 push(rbp);
1194 movq(rbp, rsp);
1195 andq(rsp, -16); // align stack as required by push_CPU_state and call
1196 push_CPU_state(); // keeps alignment at 16 bytes
1197
1198 lea(c_rarg0, InternalAddress(rip));
1199 lea(c_rarg1, Address(rbp, wordSize)); // pass pointer to regs array
1200 call_VM_leaf(CAST_FROM_FN_PTR(address, MacroAssembler::print_state64), c_rarg0, c_rarg1);
1201
1202 pop_CPU_state();
1203 mov(rsp, rbp);
1204 pop(rbp);
1205 popa();
1206 }
1207
1208 #ifndef PRODUCT
1209 extern "C" void findpc(intptr_t x);
1210 #endif
1211
1212 void MacroAssembler::debug64(char* msg, int64_t pc, int64_t regs[]) {
1213 // In order to get locks to work, we need to fake a in_VM state
1214 if (ShowMessageBoxOnError) {
1215 JavaThread* thread = JavaThread::current();
1216 JavaThreadState saved_state = thread->thread_state();
1217 thread->set_thread_state(_thread_in_vm);
1218 #ifndef PRODUCT
1219 if (CountBytecodes || TraceBytecodes || StopInterpreterAt) {
1220 ttyLocker ttyl;
1221 BytecodeCounter::print();
1222 }
1223 #endif
1224 // To see where a verify_oop failed, get $ebx+40/X for this frame.
1225 // XXX correct this offset for amd64
1226 // This is the value of eip which points to where verify_oop will return.
1227 if (os::message_box(msg, "Execution stopped, print registers?")) {
1228 print_state64(pc, regs);
1229 BREAKPOINT;
1230 assert(false, "start up GDB");
1231 }
1232 ThreadStateTransition::transition(thread, _thread_in_vm, saved_state);
1233 } else {
1234 ttyLocker ttyl;
1235 ::tty->print_cr("=============== DEBUG MESSAGE: %s ================\n",
1236 msg);
1237 assert(false, err_msg("DEBUG MESSAGE: %s", msg));
1238 }
1239 }
1240
1241 void MacroAssembler::print_state64(int64_t pc, int64_t regs[]) {
1242 ttyLocker ttyl;
1243 FlagSetting fs(Debugging, true);
1244 tty->print_cr("rip = 0x%016lx", pc);
1245 #ifndef PRODUCT
1246 tty->cr();
1247 findpc(pc);
1248 tty->cr();
1249 #endif
1250 #define PRINT_REG(rax, value) \
1251 { tty->print("%s = ", #rax); os::print_location(tty, value); }
1252 PRINT_REG(rax, regs[15]);
1253 PRINT_REG(rbx, regs[12]);
1254 PRINT_REG(rcx, regs[14]);
1255 PRINT_REG(rdx, regs[13]);
1256 PRINT_REG(rdi, regs[8]);
1257 PRINT_REG(rsi, regs[9]);
1258 PRINT_REG(rbp, regs[10]);
1259 PRINT_REG(rsp, regs[11]);
1260 PRINT_REG(r8 , regs[7]);
1261 PRINT_REG(r9 , regs[6]);
1262 PRINT_REG(r10, regs[5]);
1263 PRINT_REG(r11, regs[4]);
1264 PRINT_REG(r12, regs[3]);
1265 PRINT_REG(r13, regs[2]);
1266 PRINT_REG(r14, regs[1]);
1267 PRINT_REG(r15, regs[0]);
1268 #undef PRINT_REG
1269 // Print some words near top of staack.
1270 int64_t* rsp = (int64_t*) regs[11];
1271 int64_t* dump_sp = rsp;
1272 for (int col1 = 0; col1 < 8; col1++) {
1273 tty->print("(rsp+0x%03x) 0x%016lx: ", (int)((intptr_t)dump_sp - (intptr_t)rsp), (int64_t)dump_sp);
1274 os::print_location(tty, *dump_sp++);
1275 }
1276 for (int row = 0; row < 25; row++) {
1277 tty->print("(rsp+0x%03x) 0x%016lx: ", (int)((intptr_t)dump_sp - (intptr_t)rsp), (int64_t)dump_sp);
1278 for (int col = 0; col < 4; col++) {
1279 tty->print(" 0x%016lx", *dump_sp++);
1280 }
1281 tty->cr();
1282 }
1283 // Print some instructions around pc:
1284 Disassembler::decode((address)pc-64, (address)pc);
1285 tty->print_cr("--------");
1286 Disassembler::decode((address)pc, (address)pc+32);
1287 }
1288
1289 #endif // _LP64
1290
1291 // Now versions that are common to 32/64 bit
1292
1293 void MacroAssembler::addptr(Register dst, int32_t imm32) {
1294 LP64_ONLY(addq(dst, imm32)) NOT_LP64(addl(dst, imm32));
1295 }
1296
1297 void MacroAssembler::addptr(Register dst, Register src) {
1298 LP64_ONLY(addq(dst, src)) NOT_LP64(addl(dst, src));
1299 }
1300
1301 void MacroAssembler::addptr(Address dst, Register src) {
1302 LP64_ONLY(addq(dst, src)) NOT_LP64(addl(dst, src));
1303 }
1304
1305 void MacroAssembler::addsd(XMMRegister dst, AddressLiteral src) {
1306 if (reachable(src)) {
1307 Assembler::addsd(dst, as_Address(src));
1308 } else {
1309 lea(rscratch1, src);
1310 Assembler::addsd(dst, Address(rscratch1, 0));
1311 }
1312 }
1313
1314 void MacroAssembler::addss(XMMRegister dst, AddressLiteral src) {
1315 if (reachable(src)) {
1316 addss(dst, as_Address(src));
1317 } else {
1318 lea(rscratch1, src);
1319 addss(dst, Address(rscratch1, 0));
1320 }
1321 }
1322
1323 void MacroAssembler::align(int modulus) {
1324 if (offset() % modulus != 0) {
1325 nop(modulus - (offset() % modulus));
1326 }
1327 }
1328
1329 void MacroAssembler::andpd(XMMRegister dst, AddressLiteral src) {
1330 // Used in sign-masking with aligned address.
1331 assert((UseAVX > 0) || (((intptr_t)src.target() & 15) == 0), "SSE mode requires address alignment 16 bytes");
1332 if (reachable(src)) {
1333 Assembler::andpd(dst, as_Address(src));
1334 } else {
1335 lea(rscratch1, src);
1336 Assembler::andpd(dst, Address(rscratch1, 0));
1337 }
1338 }
1339
1340 void MacroAssembler::andps(XMMRegister dst, AddressLiteral src) {
1341 // Used in sign-masking with aligned address.
1342 assert((UseAVX > 0) || (((intptr_t)src.target() & 15) == 0), "SSE mode requires address alignment 16 bytes");
1343 if (reachable(src)) {
1344 Assembler::andps(dst, as_Address(src));
1345 } else {
1346 lea(rscratch1, src);
1347 Assembler::andps(dst, Address(rscratch1, 0));
1348 }
1349 }
1350
1351 void MacroAssembler::andptr(Register dst, int32_t imm32) {
1352 LP64_ONLY(andq(dst, imm32)) NOT_LP64(andl(dst, imm32));
1353 }
1354
1355 void MacroAssembler::atomic_incl(AddressLiteral counter_addr) {
1356 pushf();
1357 if (os::is_MP())
1358 lock();
1359 incrementl(counter_addr);
1360 popf();
1361 }
1362
1363 // Writes to stack successive pages until offset reached to check for
1364 // stack overflow + shadow pages. This clobbers tmp.
1365 void MacroAssembler::bang_stack_size(Register size, Register tmp) {
1366 movptr(tmp, rsp);
1367 // Bang stack for total size given plus shadow page size.
1368 // Bang one page at a time because large size can bang beyond yellow and
1369 // red zones.
1370 Label loop;
1371 bind(loop);
1372 movl(Address(tmp, (-os::vm_page_size())), size );
1373 subptr(tmp, os::vm_page_size());
1374 subl(size, os::vm_page_size());
1375 jcc(Assembler::greater, loop);
1376
1377 // Bang down shadow pages too.
1378 // The -1 because we already subtracted 1 page.
1379 for (int i = 0; i< StackShadowPages-1; i++) {
1380 // this could be any sized move but this is can be a debugging crumb
1381 // so the bigger the better.
1382 movptr(Address(tmp, (-i*os::vm_page_size())), size );
1383 }
1384 }
1385
1386 void MacroAssembler::biased_locking_exit(Register obj_reg, Register temp_reg, Label& done) {
1387 assert(UseBiasedLocking, "why call this otherwise?");
1388
1389 // Check for biased locking unlock case, which is a no-op
1390 // Note: we do not have to check the thread ID for two reasons.
1391 // First, the interpreter checks for IllegalMonitorStateException at
1392 // a higher level. Second, if the bias was revoked while we held the
1393 // lock, the object could not be rebiased toward another thread, so
1394 // the bias bit would be clear.
1395 movptr(temp_reg, Address(obj_reg, oopDesc::mark_offset_in_bytes()));
1396 andptr(temp_reg, markOopDesc::biased_lock_mask_in_place);
1397 cmpptr(temp_reg, markOopDesc::biased_lock_pattern);
1398 jcc(Assembler::equal, done);
1399 }
1400
1401 void MacroAssembler::c2bool(Register x) {
1402 // implements x == 0 ? 0 : 1
1403 // note: must only look at least-significant byte of x
1404 // since C-style booleans are stored in one byte
1405 // only! (was bug)
1406 andl(x, 0xFF);
1407 setb(Assembler::notZero, x);
1408 }
1409
1410 // Wouldn't need if AddressLiteral version had new name
1411 void MacroAssembler::call(Label& L, relocInfo::relocType rtype) {
1412 Assembler::call(L, rtype);
1413 }
1414
1415 void MacroAssembler::call(Register entry) {
1416 Assembler::call(entry);
1417 }
1418
1419 void MacroAssembler::call(AddressLiteral entry) {
1420 if (reachable(entry)) {
1421 Assembler::call_literal(entry.target(), entry.rspec());
1422 } else {
1423 lea(rscratch1, entry);
1424 Assembler::call(rscratch1);
1425 }
1426 }
1427
1428 void MacroAssembler::ic_call(address entry) {
1429 RelocationHolder rh = virtual_call_Relocation::spec(pc());
1430 movptr(rax, (intptr_t)Universe::non_oop_word());
1431 call(AddressLiteral(entry, rh));
1432 }
1433
1434 // Implementation of call_VM versions
1435
1436 void MacroAssembler::call_VM(Register oop_result,
1437 address entry_point,
1438 bool check_exceptions) {
1439 Label C, E;
1440 call(C, relocInfo::none);
1441 jmp(E);
1442
1443 bind(C);
1444 call_VM_helper(oop_result, entry_point, 0, check_exceptions);
1445 ret(0);
1446
1447 bind(E);
1448 }
1449
1450 void MacroAssembler::call_VM(Register oop_result,
1451 address entry_point,
1452 Register arg_1,
1453 bool check_exceptions) {
1454 Label C, E;
1455 call(C, relocInfo::none);
1456 jmp(E);
1457
1458 bind(C);
1459 pass_arg1(this, arg_1);
1460 call_VM_helper(oop_result, entry_point, 1, check_exceptions);
1461 ret(0);
1462
1463 bind(E);
1464 }
1465
1466 void MacroAssembler::call_VM(Register oop_result,
1467 address entry_point,
1468 Register arg_1,
1469 Register arg_2,
1470 bool check_exceptions) {
1471 Label C, E;
1472 call(C, relocInfo::none);
1473 jmp(E);
1474
1475 bind(C);
1476
1477 LP64_ONLY(assert(arg_1 != c_rarg2, "smashed arg"));
1478
1479 pass_arg2(this, arg_2);
1480 pass_arg1(this, arg_1);
1481 call_VM_helper(oop_result, entry_point, 2, check_exceptions);
1482 ret(0);
1483
1484 bind(E);
1485 }
1486
1487 void MacroAssembler::call_VM(Register oop_result,
1488 address entry_point,
1489 Register arg_1,
1490 Register arg_2,
1491 Register arg_3,
1492 bool check_exceptions) {
1493 Label C, E;
1494 call(C, relocInfo::none);
1495 jmp(E);
1496
1497 bind(C);
1498
1499 LP64_ONLY(assert(arg_1 != c_rarg3, "smashed arg"));
1500 LP64_ONLY(assert(arg_2 != c_rarg3, "smashed arg"));
1501 pass_arg3(this, arg_3);
1502
1503 LP64_ONLY(assert(arg_1 != c_rarg2, "smashed arg"));
1504 pass_arg2(this, arg_2);
1505
1506 pass_arg1(this, arg_1);
1507 call_VM_helper(oop_result, entry_point, 3, check_exceptions);
1508 ret(0);
1509
1510 bind(E);
1511 }
1512
1513 void MacroAssembler::call_VM(Register oop_result,
1514 Register last_java_sp,
1515 address entry_point,
1516 int number_of_arguments,
1517 bool check_exceptions) {
1518 Register thread = LP64_ONLY(r15_thread) NOT_LP64(noreg);
1519 call_VM_base(oop_result, thread, last_java_sp, entry_point, number_of_arguments, check_exceptions);
1520 }
1521
1522 void MacroAssembler::call_VM(Register oop_result,
1523 Register last_java_sp,
1524 address entry_point,
1525 Register arg_1,
1526 bool check_exceptions) {
1527 pass_arg1(this, arg_1);
1528 call_VM(oop_result, last_java_sp, entry_point, 1, check_exceptions);
1529 }
1530
1531 void MacroAssembler::call_VM(Register oop_result,
1532 Register last_java_sp,
1533 address entry_point,
1534 Register arg_1,
1535 Register arg_2,
1536 bool check_exceptions) {
1537
1538 LP64_ONLY(assert(arg_1 != c_rarg2, "smashed arg"));
1539 pass_arg2(this, arg_2);
1540 pass_arg1(this, arg_1);
1541 call_VM(oop_result, last_java_sp, entry_point, 2, check_exceptions);
1542 }
1543
1544 void MacroAssembler::call_VM(Register oop_result,
1545 Register last_java_sp,
1546 address entry_point,
1547 Register arg_1,
1548 Register arg_2,
1549 Register arg_3,
1550 bool check_exceptions) {
1551 LP64_ONLY(assert(arg_1 != c_rarg3, "smashed arg"));
1552 LP64_ONLY(assert(arg_2 != c_rarg3, "smashed arg"));
1553 pass_arg3(this, arg_3);
1554 LP64_ONLY(assert(arg_1 != c_rarg2, "smashed arg"));
1555 pass_arg2(this, arg_2);
1556 pass_arg1(this, arg_1);
1557 call_VM(oop_result, last_java_sp, entry_point, 3, check_exceptions);
1558 }
1559
1560 void MacroAssembler::super_call_VM(Register oop_result,
1561 Register last_java_sp,
1562 address entry_point,
1563 int number_of_arguments,
1564 bool check_exceptions) {
1565 Register thread = LP64_ONLY(r15_thread) NOT_LP64(noreg);
1566 MacroAssembler::call_VM_base(oop_result, thread, last_java_sp, entry_point, number_of_arguments, check_exceptions);
1567 }
1568
1569 void MacroAssembler::super_call_VM(Register oop_result,
1570 Register last_java_sp,
1571 address entry_point,
1572 Register arg_1,
1573 bool check_exceptions) {
1574 pass_arg1(this, arg_1);
1575 super_call_VM(oop_result, last_java_sp, entry_point, 1, check_exceptions);
1576 }
1577
1578 void MacroAssembler::super_call_VM(Register oop_result,
1579 Register last_java_sp,
1580 address entry_point,
1581 Register arg_1,
1582 Register arg_2,
1583 bool check_exceptions) {
1584
1585 LP64_ONLY(assert(arg_1 != c_rarg2, "smashed arg"));
1586 pass_arg2(this, arg_2);
1587 pass_arg1(this, arg_1);
1588 super_call_VM(oop_result, last_java_sp, entry_point, 2, check_exceptions);
1589 }
1590
1591 void MacroAssembler::super_call_VM(Register oop_result,
1592 Register last_java_sp,
1593 address entry_point,
1594 Register arg_1,
1595 Register arg_2,
1596 Register arg_3,
1597 bool check_exceptions) {
1598 LP64_ONLY(assert(arg_1 != c_rarg3, "smashed arg"));
1599 LP64_ONLY(assert(arg_2 != c_rarg3, "smashed arg"));
1600 pass_arg3(this, arg_3);
1601 LP64_ONLY(assert(arg_1 != c_rarg2, "smashed arg"));
1602 pass_arg2(this, arg_2);
1603 pass_arg1(this, arg_1);
1604 super_call_VM(oop_result, last_java_sp, entry_point, 3, check_exceptions);
1605 }
1606
1607 void MacroAssembler::call_VM_base(Register oop_result,
1608 Register java_thread,
1609 Register last_java_sp,
1610 address entry_point,
1611 int number_of_arguments,
1612 bool check_exceptions) {
1613 // determine java_thread register
1614 if (!java_thread->is_valid()) {
1615 #ifdef _LP64
1616 java_thread = r15_thread;
1617 #else
1618 java_thread = rdi;
1619 get_thread(java_thread);
1620 #endif // LP64
1621 }
1622 // determine last_java_sp register
1623 if (!last_java_sp->is_valid()) {
1624 last_java_sp = rsp;
1625 }
1626 // debugging support
1627 assert(number_of_arguments >= 0 , "cannot have negative number of arguments");
1628 LP64_ONLY(assert(java_thread == r15_thread, "unexpected register"));
1629 #ifdef ASSERT
1630 // TraceBytecodes does not use r12 but saves it over the call, so don't verify
1631 // r12 is the heapbase.
1632 LP64_ONLY(if ((UseCompressedOops || UseCompressedKlassPointers) && !TraceBytecodes) verify_heapbase("call_VM_base: heap base corrupted?");)
1633 #endif // ASSERT
1634
1635 assert(java_thread != oop_result , "cannot use the same register for java_thread & oop_result");
1636 assert(java_thread != last_java_sp, "cannot use the same register for java_thread & last_java_sp");
1637
1638 // push java thread (becomes first argument of C function)
1639
1640 NOT_LP64(push(java_thread); number_of_arguments++);
1641 LP64_ONLY(mov(c_rarg0, r15_thread));
1642
1643 // set last Java frame before call
1644 assert(last_java_sp != rbp, "can't use ebp/rbp");
1645
1646 // Only interpreter should have to set fp
1647 set_last_Java_frame(java_thread, last_java_sp, rbp, NULL);
1648
1649 // do the call, remove parameters
1650 MacroAssembler::call_VM_leaf_base(entry_point, number_of_arguments);
1651
1652 // restore the thread (cannot use the pushed argument since arguments
1653 // may be overwritten by C code generated by an optimizing compiler);
1654 // however can use the register value directly if it is callee saved.
1655 if (LP64_ONLY(true ||) java_thread == rdi || java_thread == rsi) {
1656 // rdi & rsi (also r15) are callee saved -> nothing to do
1657 #ifdef ASSERT
1658 guarantee(java_thread != rax, "change this code");
1659 push(rax);
1660 { Label L;
1661 get_thread(rax);
1662 cmpptr(java_thread, rax);
1663 jcc(Assembler::equal, L);
1664 STOP("MacroAssembler::call_VM_base: rdi not callee saved?");
1665 bind(L);
1666 }
1667 pop(rax);
1668 #endif
1669 } else {
1670 get_thread(java_thread);
1671 }
1672 // reset last Java frame
1673 // Only interpreter should have to clear fp
1674 reset_last_Java_frame(java_thread, true, false);
1675
1676 #ifndef CC_INTERP
1677 // C++ interp handles this in the interpreter
1678 check_and_handle_popframe(java_thread);
1679 check_and_handle_earlyret(java_thread);
1680 #endif /* CC_INTERP */
1681
1682 if (check_exceptions) {
1683 // check for pending exceptions (java_thread is set upon return)
1684 cmpptr(Address(java_thread, Thread::pending_exception_offset()), (int32_t) NULL_WORD);
1685 #ifndef _LP64
1686 jump_cc(Assembler::notEqual,
1687 RuntimeAddress(StubRoutines::forward_exception_entry()));
1688 #else
1689 // This used to conditionally jump to forward_exception however it is
1690 // possible if we relocate that the branch will not reach. So we must jump
1691 // around so we can always reach
1692
1693 Label ok;
1694 jcc(Assembler::equal, ok);
1695 jump(RuntimeAddress(StubRoutines::forward_exception_entry()));
1696 bind(ok);
1697 #endif // LP64
1698 }
1699
1700 // get oop result if there is one and reset the value in the thread
1701 if (oop_result->is_valid()) {
1702 get_vm_result(oop_result, java_thread);
1703 }
1704 }
1705
1706 void MacroAssembler::call_VM_helper(Register oop_result, address entry_point, int number_of_arguments, bool check_exceptions) {
1707
1708 // Calculate the value for last_Java_sp
1709 // somewhat subtle. call_VM does an intermediate call
1710 // which places a return address on the stack just under the
1711 // stack pointer as the user finsihed with it. This allows
1712 // use to retrieve last_Java_pc from last_Java_sp[-1].
1713 // On 32bit we then have to push additional args on the stack to accomplish
1714 // the actual requested call. On 64bit call_VM only can use register args
1715 // so the only extra space is the return address that call_VM created.
1716 // This hopefully explains the calculations here.
1717
1718 #ifdef _LP64
1719 // We've pushed one address, correct last_Java_sp
1720 lea(rax, Address(rsp, wordSize));
1721 #else
1722 lea(rax, Address(rsp, (1 + number_of_arguments) * wordSize));
1723 #endif // LP64
1724
1725 call_VM_base(oop_result, noreg, rax, entry_point, number_of_arguments, check_exceptions);
1726
1727 }
1728
1729 void MacroAssembler::call_VM_leaf(address entry_point, int number_of_arguments) {
1730 call_VM_leaf_base(entry_point, number_of_arguments);
1731 }
1732
1733 void MacroAssembler::call_VM_leaf(address entry_point, Register arg_0) {
1734 pass_arg0(this, arg_0);
1735 call_VM_leaf(entry_point, 1);
1736 }
1737
1738 void MacroAssembler::call_VM_leaf(address entry_point, Register arg_0, Register arg_1) {
1739
1740 LP64_ONLY(assert(arg_0 != c_rarg1, "smashed arg"));
1741 pass_arg1(this, arg_1);
1742 pass_arg0(this, arg_0);
1743 call_VM_leaf(entry_point, 2);
1744 }
1745
1746 void MacroAssembler::call_VM_leaf(address entry_point, Register arg_0, Register arg_1, Register arg_2) {
1747 LP64_ONLY(assert(arg_0 != c_rarg2, "smashed arg"));
1748 LP64_ONLY(assert(arg_1 != c_rarg2, "smashed arg"));
1749 pass_arg2(this, arg_2);
1750 LP64_ONLY(assert(arg_0 != c_rarg1, "smashed arg"));
1751 pass_arg1(this, arg_1);
1752 pass_arg0(this, arg_0);
1753 call_VM_leaf(entry_point, 3);
1754 }
1755
1756 void MacroAssembler::super_call_VM_leaf(address entry_point, Register arg_0) {
1757 pass_arg0(this, arg_0);
1758 MacroAssembler::call_VM_leaf_base(entry_point, 1);
1759 }
1760
1761 void MacroAssembler::super_call_VM_leaf(address entry_point, Register arg_0, Register arg_1) {
1762
1763 LP64_ONLY(assert(arg_0 != c_rarg1, "smashed arg"));
1764 pass_arg1(this, arg_1);
1765 pass_arg0(this, arg_0);
1766 MacroAssembler::call_VM_leaf_base(entry_point, 2);
1767 }
1768
1769 void MacroAssembler::super_call_VM_leaf(address entry_point, Register arg_0, Register arg_1, Register arg_2) {
1770 LP64_ONLY(assert(arg_0 != c_rarg2, "smashed arg"));
1771 LP64_ONLY(assert(arg_1 != c_rarg2, "smashed arg"));
1772 pass_arg2(this, arg_2);
1773 LP64_ONLY(assert(arg_0 != c_rarg1, "smashed arg"));
1774 pass_arg1(this, arg_1);
1775 pass_arg0(this, arg_0);
1776 MacroAssembler::call_VM_leaf_base(entry_point, 3);
1777 }
1778
1779 void MacroAssembler::super_call_VM_leaf(address entry_point, Register arg_0, Register arg_1, Register arg_2, Register arg_3) {
1780 LP64_ONLY(assert(arg_0 != c_rarg3, "smashed arg"));
1781 LP64_ONLY(assert(arg_1 != c_rarg3, "smashed arg"));
1782 LP64_ONLY(assert(arg_2 != c_rarg3, "smashed arg"));
1783 pass_arg3(this, arg_3);
1784 LP64_ONLY(assert(arg_0 != c_rarg2, "smashed arg"));
1785 LP64_ONLY(assert(arg_1 != c_rarg2, "smashed arg"));
1786 pass_arg2(this, arg_2);
1787 LP64_ONLY(assert(arg_0 != c_rarg1, "smashed arg"));
1788 pass_arg1(this, arg_1);
1789 pass_arg0(this, arg_0);
1790 MacroAssembler::call_VM_leaf_base(entry_point, 4);
1791 }
1792
1793 void MacroAssembler::get_vm_result(Register oop_result, Register java_thread) {
1794 movptr(oop_result, Address(java_thread, JavaThread::vm_result_offset()));
1795 movptr(Address(java_thread, JavaThread::vm_result_offset()), NULL_WORD);
1796 verify_oop(oop_result, "broken oop in call_VM_base");
1797 }
1798
1799 void MacroAssembler::get_vm_result_2(Register metadata_result, Register java_thread) {
1800 movptr(metadata_result, Address(java_thread, JavaThread::vm_result_2_offset()));
1801 movptr(Address(java_thread, JavaThread::vm_result_2_offset()), NULL_WORD);
1802 }
1803
1804 void MacroAssembler::check_and_handle_earlyret(Register java_thread) {
1805 }
1806
1807 void MacroAssembler::check_and_handle_popframe(Register java_thread) {
1808 }
1809
1810 void MacroAssembler::cmp32(AddressLiteral src1, int32_t imm) {
1811 if (reachable(src1)) {
1812 cmpl(as_Address(src1), imm);
1813 } else {
1814 lea(rscratch1, src1);
1815 cmpl(Address(rscratch1, 0), imm);
1816 }
1817 }
1818
1819 void MacroAssembler::cmp32(Register src1, AddressLiteral src2) {
1820 assert(!src2.is_lval(), "use cmpptr");
1821 if (reachable(src2)) {
1822 cmpl(src1, as_Address(src2));
1823 } else {
1824 lea(rscratch1, src2);
1825 cmpl(src1, Address(rscratch1, 0));
1826 }
1827 }
1828
1829 void MacroAssembler::cmp32(Register src1, int32_t imm) {
1830 Assembler::cmpl(src1, imm);
1831 }
1832
1833 void MacroAssembler::cmp32(Register src1, Address src2) {
1834 Assembler::cmpl(src1, src2);
1835 }
1836
1837 void MacroAssembler::cmpsd2int(XMMRegister opr1, XMMRegister opr2, Register dst, bool unordered_is_less) {
1838 ucomisd(opr1, opr2);
1839
1840 Label L;
1841 if (unordered_is_less) {
1842 movl(dst, -1);
1843 jcc(Assembler::parity, L);
1844 jcc(Assembler::below , L);
1845 movl(dst, 0);
1846 jcc(Assembler::equal , L);
1847 increment(dst);
1848 } else { // unordered is greater
1849 movl(dst, 1);
1850 jcc(Assembler::parity, L);
1851 jcc(Assembler::above , L);
1852 movl(dst, 0);
1853 jcc(Assembler::equal , L);
1854 decrementl(dst);
1855 }
1856 bind(L);
1857 }
1858
1859 void MacroAssembler::cmpss2int(XMMRegister opr1, XMMRegister opr2, Register dst, bool unordered_is_less) {
1860 ucomiss(opr1, opr2);
1861
1862 Label L;
1863 if (unordered_is_less) {
1864 movl(dst, -1);
1865 jcc(Assembler::parity, L);
1866 jcc(Assembler::below , L);
1867 movl(dst, 0);
1868 jcc(Assembler::equal , L);
1869 increment(dst);
1870 } else { // unordered is greater
1871 movl(dst, 1);
1872 jcc(Assembler::parity, L);
1873 jcc(Assembler::above , L);
1874 movl(dst, 0);
1875 jcc(Assembler::equal , L);
1876 decrementl(dst);
1877 }
1878 bind(L);
1879 }
1880
1881
1882 void MacroAssembler::cmp8(AddressLiteral src1, int imm) {
1883 if (reachable(src1)) {
1884 cmpb(as_Address(src1), imm);
1885 } else {
1886 lea(rscratch1, src1);
1887 cmpb(Address(rscratch1, 0), imm);
1888 }
1889 }
1890
1891 void MacroAssembler::cmpptr(Register src1, AddressLiteral src2) {
1892 #ifdef _LP64
1893 if (src2.is_lval()) {
1894 movptr(rscratch1, src2);
1895 Assembler::cmpq(src1, rscratch1);
1896 } else if (reachable(src2)) {
1897 cmpq(src1, as_Address(src2));
1898 } else {
1899 lea(rscratch1, src2);
1900 Assembler::cmpq(src1, Address(rscratch1, 0));
1901 }
1902 #else
1903 if (src2.is_lval()) {
1904 cmp_literal32(src1, (int32_t) src2.target(), src2.rspec());
1905 } else {
1906 cmpl(src1, as_Address(src2));
1907 }
1908 #endif // _LP64
1909 }
1910
1911 void MacroAssembler::cmpptr(Address src1, AddressLiteral src2) {
1912 assert(src2.is_lval(), "not a mem-mem compare");
1913 #ifdef _LP64
1914 // moves src2's literal address
1915 movptr(rscratch1, src2);
1916 Assembler::cmpq(src1, rscratch1);
1917 #else
1918 cmp_literal32(src1, (int32_t) src2.target(), src2.rspec());
1919 #endif // _LP64
1920 }
1921
1922 void MacroAssembler::locked_cmpxchgptr(Register reg, AddressLiteral adr) {
1923 if (reachable(adr)) {
1924 if (os::is_MP())
1925 lock();
1926 cmpxchgptr(reg, as_Address(adr));
1927 } else {
1928 lea(rscratch1, adr);
1929 if (os::is_MP())
1930 lock();
1931 cmpxchgptr(reg, Address(rscratch1, 0));
1932 }
1933 }
1934
1935 void MacroAssembler::cmpxchgptr(Register reg, Address adr) {
1936 LP64_ONLY(cmpxchgq(reg, adr)) NOT_LP64(cmpxchgl(reg, adr));
1937 }
1938
1939 void MacroAssembler::comisd(XMMRegister dst, AddressLiteral src) {
1940 if (reachable(src)) {
1941 Assembler::comisd(dst, as_Address(src));
1942 } else {
1943 lea(rscratch1, src);
1944 Assembler::comisd(dst, Address(rscratch1, 0));
1945 }
1946 }
1947
1948 void MacroAssembler::comiss(XMMRegister dst, AddressLiteral src) {
1949 if (reachable(src)) {
1950 Assembler::comiss(dst, as_Address(src));
1951 } else {
1952 lea(rscratch1, src);
1953 Assembler::comiss(dst, Address(rscratch1, 0));
1954 }
1955 }
1956
1957
1958 void MacroAssembler::cond_inc32(Condition cond, AddressLiteral counter_addr) {
1959 Condition negated_cond = negate_condition(cond);
1960 Label L;
1961 jcc(negated_cond, L);
1962 atomic_incl(counter_addr);
1963 bind(L);
1964 }
1965
1966 int MacroAssembler::corrected_idivl(Register reg) {
1967 // Full implementation of Java idiv and irem; checks for
1968 // special case as described in JVM spec., p.243 & p.271.
1969 // The function returns the (pc) offset of the idivl
1970 // instruction - may be needed for implicit exceptions.
1971 //
1972 // normal case special case
1973 //
1974 // input : rax,: dividend min_int
1975 // reg: divisor (may not be rax,/rdx) -1
1976 //
1977 // output: rax,: quotient (= rax, idiv reg) min_int
1978 // rdx: remainder (= rax, irem reg) 0
1979 assert(reg != rax && reg != rdx, "reg cannot be rax, or rdx register");
1980 const int min_int = 0x80000000;
1981 Label normal_case, special_case;
1982
1983 // check for special case
1984 cmpl(rax, min_int);
1985 jcc(Assembler::notEqual, normal_case);
1986 xorl(rdx, rdx); // prepare rdx for possible special case (where remainder = 0)
1987 cmpl(reg, -1);
1988 jcc(Assembler::equal, special_case);
1989
1990 // handle normal case
1991 bind(normal_case);
1992 cdql();
1993 int idivl_offset = offset();
1994 idivl(reg);
1995
1996 // normal and special case exit
1997 bind(special_case);
1998
1999 return idivl_offset;
2000 }
2001
2002
2003
2004 void MacroAssembler::decrementl(Register reg, int value) {
2005 if (value == min_jint) {subl(reg, value) ; return; }
2006 if (value < 0) { incrementl(reg, -value); return; }
2007 if (value == 0) { ; return; }
2008 if (value == 1 && UseIncDec) { decl(reg) ; return; }
2009 /* else */ { subl(reg, value) ; return; }
2010 }
2011
2012 void MacroAssembler::decrementl(Address dst, int value) {
2013 if (value == min_jint) {subl(dst, value) ; return; }
2014 if (value < 0) { incrementl(dst, -value); return; }
2015 if (value == 0) { ; return; }
2016 if (value == 1 && UseIncDec) { decl(dst) ; return; }
2017 /* else */ { subl(dst, value) ; return; }
2018 }
2019
2020 void MacroAssembler::division_with_shift (Register reg, int shift_value) {
2021 assert (shift_value > 0, "illegal shift value");
2022 Label _is_positive;
2023 testl (reg, reg);
2024 jcc (Assembler::positive, _is_positive);
2025 int offset = (1 << shift_value) - 1 ;
2026
2027 if (offset == 1) {
2028 incrementl(reg);
2029 } else {
2030 addl(reg, offset);
2031 }
2032
2033 bind (_is_positive);
2034 sarl(reg, shift_value);
2035 }
2036
2037 void MacroAssembler::divsd(XMMRegister dst, AddressLiteral src) {
2038 if (reachable(src)) {
2039 Assembler::divsd(dst, as_Address(src));
2040 } else {
2041 lea(rscratch1, src);
2042 Assembler::divsd(dst, Address(rscratch1, 0));
2043 }
2044 }
2045
2046 void MacroAssembler::divss(XMMRegister dst, AddressLiteral src) {
2047 if (reachable(src)) {
2048 Assembler::divss(dst, as_Address(src));
2049 } else {
2050 lea(rscratch1, src);
2051 Assembler::divss(dst, Address(rscratch1, 0));
2052 }
2053 }
2054
2055 // !defined(COMPILER2) is because of stupid core builds
2056 #if !defined(_LP64) || defined(COMPILER1) || !defined(COMPILER2)
2057 void MacroAssembler::empty_FPU_stack() {
2058 if (VM_Version::supports_mmx()) {
2059 emms();
2060 } else {
2061 for (int i = 8; i-- > 0; ) ffree(i);
2062 }
2063 }
2064 #endif // !LP64 || C1 || !C2
2065
2066
2067 // Defines obj, preserves var_size_in_bytes
2068 void MacroAssembler::eden_allocate(Register obj,
2069 Register var_size_in_bytes,
2070 int con_size_in_bytes,
2071 Register t1,
2072 Label& slow_case) {
2073 assert(obj == rax, "obj must be in rax, for cmpxchg");
2074 assert_different_registers(obj, var_size_in_bytes, t1);
2075 if (CMSIncrementalMode || !Universe::heap()->supports_inline_contig_alloc()) {
2076 jmp(slow_case);
2077 } else {
2078 Register end = t1;
2079 Label retry;
2080 bind(retry);
2081 ExternalAddress heap_top((address) Universe::heap()->top_addr());
2082 movptr(obj, heap_top);
2083 if (var_size_in_bytes == noreg) {
2084 lea(end, Address(obj, con_size_in_bytes));
2085 } else {
2086 lea(end, Address(obj, var_size_in_bytes, Address::times_1));
2087 }
2088 // if end < obj then we wrapped around => object too long => slow case
2089 cmpptr(end, obj);
2090 jcc(Assembler::below, slow_case);
2091 cmpptr(end, ExternalAddress((address) Universe::heap()->end_addr()));
2092 jcc(Assembler::above, slow_case);
2093 // Compare obj with the top addr, and if still equal, store the new top addr in
2094 // end at the address of the top addr pointer. Sets ZF if was equal, and clears
2095 // it otherwise. Use lock prefix for atomicity on MPs.
2096 locked_cmpxchgptr(end, heap_top);
2097 jcc(Assembler::notEqual, retry);
2098 }
2099 }
2100
2101 void MacroAssembler::enter() {
2102 push(rbp);
2103 mov(rbp, rsp);
2104 }
2105
2106 // A 5 byte nop that is safe for patching (see patch_verified_entry)
2107 void MacroAssembler::fat_nop() {
2108 if (UseAddressNop) {
2109 addr_nop_5();
2110 } else {
2111 emit_byte(0x26); // es:
2112 emit_byte(0x2e); // cs:
2113 emit_byte(0x64); // fs:
2114 emit_byte(0x65); // gs:
2115 emit_byte(0x90);
2116 }
2117 }
2118
2119 void MacroAssembler::fcmp(Register tmp) {
2120 fcmp(tmp, 1, true, true);
2121 }
2122
2123 void MacroAssembler::fcmp(Register tmp, int index, bool pop_left, bool pop_right) {
2124 assert(!pop_right || pop_left, "usage error");
2125 if (VM_Version::supports_cmov()) {
2126 assert(tmp == noreg, "unneeded temp");
2127 if (pop_left) {
2128 fucomip(index);
2129 } else {
2130 fucomi(index);
2131 }
2132 if (pop_right) {
2133 fpop();
2134 }
2135 } else {
2136 assert(tmp != noreg, "need temp");
2137 if (pop_left) {
2138 if (pop_right) {
2139 fcompp();
2140 } else {
2141 fcomp(index);
2142 }
2143 } else {
2144 fcom(index);
2145 }
2146 // convert FPU condition into eflags condition via rax,
2147 save_rax(tmp);
2148 fwait(); fnstsw_ax();
2149 sahf();
2150 restore_rax(tmp);
2151 }
2152 // condition codes set as follows:
2153 //
2154 // CF (corresponds to C0) if x < y
2155 // PF (corresponds to C2) if unordered
2156 // ZF (corresponds to C3) if x = y
2157 }
2158
2159 void MacroAssembler::fcmp2int(Register dst, bool unordered_is_less) {
2160 fcmp2int(dst, unordered_is_less, 1, true, true);
2161 }
2162
2163 void MacroAssembler::fcmp2int(Register dst, bool unordered_is_less, int index, bool pop_left, bool pop_right) {
2164 fcmp(VM_Version::supports_cmov() ? noreg : dst, index, pop_left, pop_right);
2165 Label L;
2166 if (unordered_is_less) {
2167 movl(dst, -1);
2168 jcc(Assembler::parity, L);
2169 jcc(Assembler::below , L);
2170 movl(dst, 0);
2171 jcc(Assembler::equal , L);
2172 increment(dst);
2173 } else { // unordered is greater
2174 movl(dst, 1);
2175 jcc(Assembler::parity, L);
2176 jcc(Assembler::above , L);
2177 movl(dst, 0);
2178 jcc(Assembler::equal , L);
2179 decrementl(dst);
2180 }
2181 bind(L);
2182 }
2183
2184 void MacroAssembler::fld_d(AddressLiteral src) {
2185 fld_d(as_Address(src));
2186 }
2187
2188 void MacroAssembler::fld_s(AddressLiteral src) {
2189 fld_s(as_Address(src));
2190 }
2191
2192 void MacroAssembler::fld_x(AddressLiteral src) {
2193 Assembler::fld_x(as_Address(src));
2194 }
2195
2196 void MacroAssembler::fldcw(AddressLiteral src) {
2197 Assembler::fldcw(as_Address(src));
2198 }
2199
2200 void MacroAssembler::pow_exp_core_encoding() {
2201 // kills rax, rcx, rdx
2202 subptr(rsp,sizeof(jdouble));
2203 // computes 2^X. Stack: X ...
2204 // f2xm1 computes 2^X-1 but only operates on -1<=X<=1. Get int(X) and
2205 // keep it on the thread's stack to compute 2^int(X) later
2206 // then compute 2^(X-int(X)) as (2^(X-int(X)-1+1)
2207 // final result is obtained with: 2^X = 2^int(X) * 2^(X-int(X))
2208 fld_s(0); // Stack: X X ...
2209 frndint(); // Stack: int(X) X ...
2210 fsuba(1); // Stack: int(X) X-int(X) ...
2211 fistp_s(Address(rsp,0)); // move int(X) as integer to thread's stack. Stack: X-int(X) ...
2212 f2xm1(); // Stack: 2^(X-int(X))-1 ...
2213 fld1(); // Stack: 1 2^(X-int(X))-1 ...
2214 faddp(1); // Stack: 2^(X-int(X))
2215 // computes 2^(int(X)): add exponent bias (1023) to int(X), then
2216 // shift int(X)+1023 to exponent position.
2217 // Exponent is limited to 11 bits if int(X)+1023 does not fit in 11
2218 // bits, set result to NaN. 0x000 and 0x7FF are reserved exponent
2219 // values so detect them and set result to NaN.
2220 movl(rax,Address(rsp,0));
2221 movl(rcx, -2048); // 11 bit mask and valid NaN binary encoding
2222 addl(rax, 1023);
2223 movl(rdx,rax);
2224 shll(rax,20);
2225 // Check that 0 < int(X)+1023 < 2047. Otherwise set rax to NaN.
2226 addl(rdx,1);
2227 // Check that 1 < int(X)+1023+1 < 2048
2228 // in 3 steps:
2229 // 1- (int(X)+1023+1)&-2048 == 0 => 0 <= int(X)+1023+1 < 2048
2230 // 2- (int(X)+1023+1)&-2048 != 0
2231 // 3- (int(X)+1023+1)&-2048 != 1
2232 // Do 2- first because addl just updated the flags.
2233 cmov32(Assembler::equal,rax,rcx);
2234 cmpl(rdx,1);
2235 cmov32(Assembler::equal,rax,rcx);
2236 testl(rdx,rcx);
2237 cmov32(Assembler::notEqual,rax,rcx);
2238 movl(Address(rsp,4),rax);
2239 movl(Address(rsp,0),0);
2240 fmul_d(Address(rsp,0)); // Stack: 2^X ...
2241 addptr(rsp,sizeof(jdouble));
2242 }
2243
2244 void MacroAssembler::increase_precision() {
2245 subptr(rsp, BytesPerWord);
2246 fnstcw(Address(rsp, 0));
2247 movl(rax, Address(rsp, 0));
2248 orl(rax, 0x300);
2249 push(rax);
2250 fldcw(Address(rsp, 0));
2251 pop(rax);
2252 }
2253
2254 void MacroAssembler::restore_precision() {
2255 fldcw(Address(rsp, 0));
2256 addptr(rsp, BytesPerWord);
2257 }
2258
2259 void MacroAssembler::fast_pow() {
2260 // computes X^Y = 2^(Y * log2(X))
2261 // if fast computation is not possible, result is NaN. Requires
2262 // fallback from user of this macro.
2263 // increase precision for intermediate steps of the computation
2264 increase_precision();
2265 fyl2x(); // Stack: (Y*log2(X)) ...
2266 pow_exp_core_encoding(); // Stack: exp(X) ...
2267 restore_precision();
2268 }
2269
2270 void MacroAssembler::fast_exp() {
2271 // computes exp(X) = 2^(X * log2(e))
2272 // if fast computation is not possible, result is NaN. Requires
2273 // fallback from user of this macro.
2274 // increase precision for intermediate steps of the computation
2275 increase_precision();
2276 fldl2e(); // Stack: log2(e) X ...
2277 fmulp(1); // Stack: (X*log2(e)) ...
2278 pow_exp_core_encoding(); // Stack: exp(X) ...
2279 restore_precision();
2280 }
2281
2282 void MacroAssembler::pow_or_exp(bool is_exp, int num_fpu_regs_in_use) {
2283 // kills rax, rcx, rdx
2284 // pow and exp needs 2 extra registers on the fpu stack.
2285 Label slow_case, done;
2286 Register tmp = noreg;
2287 if (!VM_Version::supports_cmov()) {
2288 // fcmp needs a temporary so preserve rdx,
2289 tmp = rdx;
2290 }
2291 Register tmp2 = rax;
2292 Register tmp3 = rcx;
2293
2294 if (is_exp) {
2295 // Stack: X
2296 fld_s(0); // duplicate argument for runtime call. Stack: X X
2297 fast_exp(); // Stack: exp(X) X
2298 fcmp(tmp, 0, false, false); // Stack: exp(X) X
2299 // exp(X) not equal to itself: exp(X) is NaN go to slow case.
2300 jcc(Assembler::parity, slow_case);
2301 // get rid of duplicate argument. Stack: exp(X)
2302 if (num_fpu_regs_in_use > 0) {
2303 fxch();
2304 fpop();
2305 } else {
2306 ffree(1);
2307 }
2308 jmp(done);
2309 } else {
2310 // Stack: X Y
2311 Label x_negative, y_odd;
2312
2313 fldz(); // Stack: 0 X Y
2314 fcmp(tmp, 1, true, false); // Stack: X Y
2315 jcc(Assembler::above, x_negative);
2316
2317 // X >= 0
2318
2319 fld_s(1); // duplicate arguments for runtime call. Stack: Y X Y
2320 fld_s(1); // Stack: X Y X Y
2321 fast_pow(); // Stack: X^Y X Y
2322 fcmp(tmp, 0, false, false); // Stack: X^Y X Y
2323 // X^Y not equal to itself: X^Y is NaN go to slow case.
2324 jcc(Assembler::parity, slow_case);
2325 // get rid of duplicate arguments. Stack: X^Y
2326 if (num_fpu_regs_in_use > 0) {
2327 fxch(); fpop();
2328 fxch(); fpop();
2329 } else {
2330 ffree(2);
2331 ffree(1);
2332 }
2333 jmp(done);
2334
2335 // X <= 0
2336 bind(x_negative);
2337
2338 fld_s(1); // Stack: Y X Y
2339 frndint(); // Stack: int(Y) X Y
2340 fcmp(tmp, 2, false, false); // Stack: int(Y) X Y
2341 jcc(Assembler::notEqual, slow_case);
2342
2343 subptr(rsp, 8);
2344
2345 // For X^Y, when X < 0, Y has to be an integer and the final
2346 // result depends on whether it's odd or even. We just checked
2347 // that int(Y) == Y. We move int(Y) to gp registers as a 64 bit
2348 // integer to test its parity. If int(Y) is huge and doesn't fit
2349 // in the 64 bit integer range, the integer indefinite value will
2350 // end up in the gp registers. Huge numbers are all even, the
2351 // integer indefinite number is even so it's fine.
2352
2353 #ifdef ASSERT
2354 // Let's check we don't end up with an integer indefinite number
2355 // when not expected. First test for huge numbers: check whether
2356 // int(Y)+1 == int(Y) which is true for very large numbers and
2357 // those are all even. A 64 bit integer is guaranteed to not
2358 // overflow for numbers where y+1 != y (when precision is set to
2359 // double precision).
2360 Label y_not_huge;
2361
2362 fld1(); // Stack: 1 int(Y) X Y
2363 fadd(1); // Stack: 1+int(Y) int(Y) X Y
2364
2365 #ifdef _LP64
2366 // trip to memory to force the precision down from double extended
2367 // precision
2368 fstp_d(Address(rsp, 0));
2369 fld_d(Address(rsp, 0));
2370 #endif
2371
2372 fcmp(tmp, 1, true, false); // Stack: int(Y) X Y
2373 #endif
2374
2375 // move int(Y) as 64 bit integer to thread's stack
2376 fistp_d(Address(rsp,0)); // Stack: X Y
2377
2378 #ifdef ASSERT
2379 jcc(Assembler::notEqual, y_not_huge);
2380
2381 // Y is huge so we know it's even. It may not fit in a 64 bit
2382 // integer and we don't want the debug code below to see the
2383 // integer indefinite value so overwrite int(Y) on the thread's
2384 // stack with 0.
2385 movl(Address(rsp, 0), 0);
2386 movl(Address(rsp, 4), 0);
2387
2388 bind(y_not_huge);
2389 #endif
2390
2391 fld_s(1); // duplicate arguments for runtime call. Stack: Y X Y
2392 fld_s(1); // Stack: X Y X Y
2393 fabs(); // Stack: abs(X) Y X Y
2394 fast_pow(); // Stack: abs(X)^Y X Y
2395 fcmp(tmp, 0, false, false); // Stack: abs(X)^Y X Y
2396 // abs(X)^Y not equal to itself: abs(X)^Y is NaN go to slow case.
2397
2398 pop(tmp2);
2399 NOT_LP64(pop(tmp3));
2400 jcc(Assembler::parity, slow_case);
2401
2402 #ifdef ASSERT
2403 // Check that int(Y) is not integer indefinite value (int
2404 // overflow). Shouldn't happen because for values that would
2405 // overflow, 1+int(Y)==Y which was tested earlier.
2406 #ifndef _LP64
2407 {
2408 Label integer;
2409 testl(tmp2, tmp2);
2410 jcc(Assembler::notZero, integer);
2411 cmpl(tmp3, 0x80000000);
2412 jcc(Assembler::notZero, integer);
2413 STOP("integer indefinite value shouldn't be seen here");
2414 bind(integer);
2415 }
2416 #else
2417 {
2418 Label integer;
2419 mov(tmp3, tmp2); // preserve tmp2 for parity check below
2420 shlq(tmp3, 1);
2421 jcc(Assembler::carryClear, integer);
2422 jcc(Assembler::notZero, integer);
2423 STOP("integer indefinite value shouldn't be seen here");
2424 bind(integer);
2425 }
2426 #endif
2427 #endif
2428
2429 // get rid of duplicate arguments. Stack: X^Y
2430 if (num_fpu_regs_in_use > 0) {
2431 fxch(); fpop();
2432 fxch(); fpop();
2433 } else {
2434 ffree(2);
2435 ffree(1);
2436 }
2437
2438 testl(tmp2, 1);
2439 jcc(Assembler::zero, done); // X <= 0, Y even: X^Y = abs(X)^Y
2440 // X <= 0, Y even: X^Y = -abs(X)^Y
2441
2442 fchs(); // Stack: -abs(X)^Y Y
2443 jmp(done);
2444 }
2445
2446 // slow case: runtime call
2447 bind(slow_case);
2448
2449 fpop(); // pop incorrect result or int(Y)
2450
2451 fp_runtime_fallback(is_exp ? CAST_FROM_FN_PTR(address, SharedRuntime::dexp) : CAST_FROM_FN_PTR(address, SharedRuntime::dpow),
2452 is_exp ? 1 : 2, num_fpu_regs_in_use);
2453
2454 // Come here with result in F-TOS
2455 bind(done);
2456 }
2457
2458 void MacroAssembler::fpop() {
2459 ffree();
2460 fincstp();
2461 }
2462
2463 void MacroAssembler::fremr(Register tmp) {
2464 save_rax(tmp);
2465 { Label L;
2466 bind(L);
2467 fprem();
2468 fwait(); fnstsw_ax();
2469 #ifdef _LP64
2470 testl(rax, 0x400);
2471 jcc(Assembler::notEqual, L);
2472 #else
2473 sahf();
2474 jcc(Assembler::parity, L);
2475 #endif // _LP64
2476 }
2477 restore_rax(tmp);
2478 // Result is in ST0.
2479 // Note: fxch & fpop to get rid of ST1
2480 // (otherwise FPU stack could overflow eventually)
2481 fxch(1);
2482 fpop();
2483 }
2484
2485
2486 void MacroAssembler::incrementl(AddressLiteral dst) {
2487 if (reachable(dst)) {
2488 incrementl(as_Address(dst));
2489 } else {
2490 lea(rscratch1, dst);
2491 incrementl(Address(rscratch1, 0));
2492 }
2493 }
2494
2495 void MacroAssembler::incrementl(ArrayAddress dst) {
2496 incrementl(as_Address(dst));
2497 }
2498
2499 void MacroAssembler::incrementl(Register reg, int value) {
2500 if (value == min_jint) {addl(reg, value) ; return; }
2501 if (value < 0) { decrementl(reg, -value); return; }
2502 if (value == 0) { ; return; }
2503 if (value == 1 && UseIncDec) { incl(reg) ; return; }
2504 /* else */ { addl(reg, value) ; return; }
2505 }
2506
2507 void MacroAssembler::incrementl(Address dst, int value) {
2508 if (value == min_jint) {addl(dst, value) ; return; }
2509 if (value < 0) { decrementl(dst, -value); return; }
2510 if (value == 0) { ; return; }
2511 if (value == 1 && UseIncDec) { incl(dst) ; return; }
2512 /* else */ { addl(dst, value) ; return; }
2513 }
2514
2515 void MacroAssembler::jump(AddressLiteral dst) {
2516 if (reachable(dst)) {
2517 jmp_literal(dst.target(), dst.rspec());
2518 } else {
2519 lea(rscratch1, dst);
2520 jmp(rscratch1);
2521 }
2522 }
2523
2524 void MacroAssembler::jump_cc(Condition cc, AddressLiteral dst) {
2525 if (reachable(dst)) {
2526 InstructionMark im(this);
2527 relocate(dst.reloc());
2528 const int short_size = 2;
2529 const int long_size = 6;
2530 int offs = (intptr_t)dst.target() - ((intptr_t)pc());
2531 if (dst.reloc() == relocInfo::none && is8bit(offs - short_size)) {
2532 // 0111 tttn #8-bit disp
2533 emit_byte(0x70 | cc);
2534 emit_byte((offs - short_size) & 0xFF);
2535 } else {
2536 // 0000 1111 1000 tttn #32-bit disp
2537 emit_byte(0x0F);
2538 emit_byte(0x80 | cc);
2539 emit_long(offs - long_size);
2540 }
2541 } else {
2542 #ifdef ASSERT
2543 warning("reversing conditional branch");
2544 #endif /* ASSERT */
2545 Label skip;
2546 jccb(reverse[cc], skip);
2547 lea(rscratch1, dst);
2548 Assembler::jmp(rscratch1);
2549 bind(skip);
2550 }
2551 }
2552
2553 void MacroAssembler::ldmxcsr(AddressLiteral src) {
2554 if (reachable(src)) {
2555 Assembler::ldmxcsr(as_Address(src));
2556 } else {
2557 lea(rscratch1, src);
2558 Assembler::ldmxcsr(Address(rscratch1, 0));
2559 }
2560 }
2561
2562 int MacroAssembler::load_signed_byte(Register dst, Address src) {
2563 int off;
2564 if (LP64_ONLY(true ||) VM_Version::is_P6()) {
2565 off = offset();
2566 movsbl(dst, src); // movsxb
2567 } else {
2568 off = load_unsigned_byte(dst, src);
2569 shll(dst, 24);
2570 sarl(dst, 24);
2571 }
2572 return off;
2573 }
2574
2575 // Note: load_signed_short used to be called load_signed_word.
2576 // Although the 'w' in x86 opcodes refers to the term "word" in the assembler
2577 // manual, which means 16 bits, that usage is found nowhere in HotSpot code.
2578 // The term "word" in HotSpot means a 32- or 64-bit machine word.
2579 int MacroAssembler::load_signed_short(Register dst, Address src) {
2580 int off;
2581 if (LP64_ONLY(true ||) VM_Version::is_P6()) {
2582 // This is dubious to me since it seems safe to do a signed 16 => 64 bit
2583 // version but this is what 64bit has always done. This seems to imply
2584 // that users are only using 32bits worth.
2585 off = offset();
2586 movswl(dst, src); // movsxw
2587 } else {
2588 off = load_unsigned_short(dst, src);
2589 shll(dst, 16);
2590 sarl(dst, 16);
2591 }
2592 return off;
2593 }
2594
2595 int MacroAssembler::load_unsigned_byte(Register dst, Address src) {
2596 // According to Intel Doc. AP-526, "Zero-Extension of Short", p.16,
2597 // and "3.9 Partial Register Penalties", p. 22).
2598 int off;
2599 if (LP64_ONLY(true || ) VM_Version::is_P6() || src.uses(dst)) {
2600 off = offset();
2601 movzbl(dst, src); // movzxb
2602 } else {
2603 xorl(dst, dst);
2604 off = offset();
2605 movb(dst, src);
2606 }
2607 return off;
2608 }
2609
2610 // Note: load_unsigned_short used to be called load_unsigned_word.
2611 int MacroAssembler::load_unsigned_short(Register dst, Address src) {
2612 // According to Intel Doc. AP-526, "Zero-Extension of Short", p.16,
2613 // and "3.9 Partial Register Penalties", p. 22).
2614 int off;
2615 if (LP64_ONLY(true ||) VM_Version::is_P6() || src.uses(dst)) {
2616 off = offset();
2617 movzwl(dst, src); // movzxw
2618 } else {
2619 xorl(dst, dst);
2620 off = offset();
2621 movw(dst, src);
2622 }
2623 return off;
2624 }
2625
2626 void MacroAssembler::load_sized_value(Register dst, Address src, size_t size_in_bytes, bool is_signed, Register dst2) {
2627 switch (size_in_bytes) {
2628 #ifndef _LP64
2629 case 8:
2630 assert(dst2 != noreg, "second dest register required");
2631 movl(dst, src);
2632 movl(dst2, src.plus_disp(BytesPerInt));
2633 break;
2634 #else
2635 case 8: movq(dst, src); break;
2636 #endif
2637 case 4: movl(dst, src); break;
2638 case 2: is_signed ? load_signed_short(dst, src) : load_unsigned_short(dst, src); break;
2639 case 1: is_signed ? load_signed_byte( dst, src) : load_unsigned_byte( dst, src); break;
2640 default: ShouldNotReachHere();
2641 }
2642 }
2643
2644 void MacroAssembler::store_sized_value(Address dst, Register src, size_t size_in_bytes, Register src2) {
2645 switch (size_in_bytes) {
2646 #ifndef _LP64
2647 case 8:
2648 assert(src2 != noreg, "second source register required");
2649 movl(dst, src);
2650 movl(dst.plus_disp(BytesPerInt), src2);
2651 break;
2652 #else
2653 case 8: movq(dst, src); break;
2654 #endif
2655 case 4: movl(dst, src); break;
2656 case 2: movw(dst, src); break;
2657 case 1: movb(dst, src); break;
2658 default: ShouldNotReachHere();
2659 }
2660 }
2661
2662 void MacroAssembler::mov32(AddressLiteral dst, Register src) {
2663 if (reachable(dst)) {
2664 movl(as_Address(dst), src);
2665 } else {
2666 lea(rscratch1, dst);
2667 movl(Address(rscratch1, 0), src);
2668 }
2669 }
2670
2671 void MacroAssembler::mov32(Register dst, AddressLiteral src) {
2672 if (reachable(src)) {
2673 movl(dst, as_Address(src));
2674 } else {
2675 lea(rscratch1, src);
2676 movl(dst, Address(rscratch1, 0));
2677 }
2678 }
2679
2680 // C++ bool manipulation
2681
2682 void MacroAssembler::movbool(Register dst, Address src) {
2683 if(sizeof(bool) == 1)
2684 movb(dst, src);
2685 else if(sizeof(bool) == 2)
2686 movw(dst, src);
2687 else if(sizeof(bool) == 4)
2688 movl(dst, src);
2689 else
2690 // unsupported
2691 ShouldNotReachHere();
2692 }
2693
2694 void MacroAssembler::movbool(Address dst, bool boolconst) {
2695 if(sizeof(bool) == 1)
2696 movb(dst, (int) boolconst);
2697 else if(sizeof(bool) == 2)
2698 movw(dst, (int) boolconst);
2699 else if(sizeof(bool) == 4)
2700 movl(dst, (int) boolconst);
2701 else
2702 // unsupported
2703 ShouldNotReachHere();
2704 }
2705
2706 void MacroAssembler::movbool(Address dst, Register src) {
2707 if(sizeof(bool) == 1)
2708 movb(dst, src);
2709 else if(sizeof(bool) == 2)
2710 movw(dst, src);
2711 else if(sizeof(bool) == 4)
2712 movl(dst, src);
2713 else
2714 // unsupported
2715 ShouldNotReachHere();
2716 }
2717
2718 void MacroAssembler::movbyte(ArrayAddress dst, int src) {
2719 movb(as_Address(dst), src);
2720 }
2721
2722 void MacroAssembler::movdl(XMMRegister dst, AddressLiteral src) {
2723 if (reachable(src)) {
2724 movdl(dst, as_Address(src));
2725 } else {
2726 lea(rscratch1, src);
2727 movdl(dst, Address(rscratch1, 0));
2728 }
2729 }
2730
2731 void MacroAssembler::movq(XMMRegister dst, AddressLiteral src) {
2732 if (reachable(src)) {
2733 movq(dst, as_Address(src));
2734 } else {
2735 lea(rscratch1, src);
2736 movq(dst, Address(rscratch1, 0));
2737 }
2738 }
2739
2740 void MacroAssembler::movdbl(XMMRegister dst, AddressLiteral src) {
2741 if (reachable(src)) {
2742 if (UseXmmLoadAndClearUpper) {
2743 movsd (dst, as_Address(src));
2744 } else {
2745 movlpd(dst, as_Address(src));
2746 }
2747 } else {
2748 lea(rscratch1, src);
2749 if (UseXmmLoadAndClearUpper) {
2750 movsd (dst, Address(rscratch1, 0));
2751 } else {
2752 movlpd(dst, Address(rscratch1, 0));
2753 }
2754 }
2755 }
2756
2757 void MacroAssembler::movflt(XMMRegister dst, AddressLiteral src) {
2758 if (reachable(src)) {
2759 movss(dst, as_Address(src));
2760 } else {
2761 lea(rscratch1, src);
2762 movss(dst, Address(rscratch1, 0));
2763 }
2764 }
2765
2766 void MacroAssembler::movptr(Register dst, Register src) {
2767 LP64_ONLY(movq(dst, src)) NOT_LP64(movl(dst, src));
2768 }
2769
2770 void MacroAssembler::movptr(Register dst, Address src) {
2771 LP64_ONLY(movq(dst, src)) NOT_LP64(movl(dst, src));
2772 }
2773
2774 // src should NEVER be a real pointer. Use AddressLiteral for true pointers
2775 void MacroAssembler::movptr(Register dst, intptr_t src) {
2776 LP64_ONLY(mov64(dst, src)) NOT_LP64(movl(dst, src));
2777 }
2778
2779 void MacroAssembler::movptr(Address dst, Register src) {
2780 LP64_ONLY(movq(dst, src)) NOT_LP64(movl(dst, src));
2781 }
2782
2783 void MacroAssembler::movdqu(XMMRegister dst, AddressLiteral src) {
2784 if (reachable(src)) {
2785 Assembler::movdqu(dst, as_Address(src));
2786 } else {
2787 lea(rscratch1, src);
2788 Assembler::movdqu(dst, Address(rscratch1, 0));
2789 }
2790 }
2791
2792 void MacroAssembler::movsd(XMMRegister dst, AddressLiteral src) {
2793 if (reachable(src)) {
2794 Assembler::movsd(dst, as_Address(src));
2795 } else {
2796 lea(rscratch1, src);
2797 Assembler::movsd(dst, Address(rscratch1, 0));
2798 }
2799 }
2800
2801 void MacroAssembler::movss(XMMRegister dst, AddressLiteral src) {
2802 if (reachable(src)) {
2803 Assembler::movss(dst, as_Address(src));
2804 } else {
2805 lea(rscratch1, src);
2806 Assembler::movss(dst, Address(rscratch1, 0));
2807 }
2808 }
2809
2810 void MacroAssembler::mulsd(XMMRegister dst, AddressLiteral src) {
2811 if (reachable(src)) {
2812 Assembler::mulsd(dst, as_Address(src));
2813 } else {
2814 lea(rscratch1, src);
2815 Assembler::mulsd(dst, Address(rscratch1, 0));
2816 }
2817 }
2818
2819 void MacroAssembler::mulss(XMMRegister dst, AddressLiteral src) {
2820 if (reachable(src)) {
2821 Assembler::mulss(dst, as_Address(src));
2822 } else {
2823 lea(rscratch1, src);
2824 Assembler::mulss(dst, Address(rscratch1, 0));
2825 }
2826 }
2827
2828 void MacroAssembler::null_check(Register reg, int offset) {
2829 if (needs_explicit_null_check(offset)) {
2830 // provoke OS NULL exception if reg = NULL by
2831 // accessing M[reg] w/o changing any (non-CC) registers
2832 // NOTE: cmpl is plenty here to provoke a segv
2833 cmpptr(rax, Address(reg, 0));
2834 // Note: should probably use testl(rax, Address(reg, 0));
2835 // may be shorter code (however, this version of
2836 // testl needs to be implemented first)
2837 } else {
2838 // nothing to do, (later) access of M[reg + offset]
2839 // will provoke OS NULL exception if reg = NULL
2840 }
2841 }
2842
2843 void MacroAssembler::os_breakpoint() {
2844 // instead of directly emitting a breakpoint, call os:breakpoint for better debugability
2845 // (e.g., MSVC can't call ps() otherwise)
2846 call(RuntimeAddress(CAST_FROM_FN_PTR(address, os::breakpoint)));
2847 }
2848
2849 void MacroAssembler::pop_CPU_state() {
2850 pop_FPU_state();
2851 pop_IU_state();
2852 }
2853
2854 void MacroAssembler::pop_FPU_state() {
2855 NOT_LP64(frstor(Address(rsp, 0));)
2856 LP64_ONLY(fxrstor(Address(rsp, 0));)
2857 addptr(rsp, FPUStateSizeInWords * wordSize);
2858 }
2859
2860 void MacroAssembler::pop_IU_state() {
2861 popa();
2862 LP64_ONLY(addq(rsp, 8));
2863 popf();
2864 }
2865
2866 // Save Integer and Float state
2867 // Warning: Stack must be 16 byte aligned (64bit)
2868 void MacroAssembler::push_CPU_state() {
2869 push_IU_state();
2870 push_FPU_state();
2871 }
2872
2873 void MacroAssembler::push_FPU_state() {
2874 subptr(rsp, FPUStateSizeInWords * wordSize);
2875 #ifndef _LP64
2876 fnsave(Address(rsp, 0));
2877 fwait();
2878 #else
2879 fxsave(Address(rsp, 0));
2880 #endif // LP64
2881 }
2882
2883 void MacroAssembler::push_IU_state() {
2884 // Push flags first because pusha kills them
2885 pushf();
2886 // Make sure rsp stays 16-byte aligned
2887 LP64_ONLY(subq(rsp, 8));
2888 pusha();
2889 }
2890
2891 void MacroAssembler::reset_last_Java_frame(Register java_thread, bool clear_fp, bool clear_pc) {
2892 // determine java_thread register
2893 if (!java_thread->is_valid()) {
2894 java_thread = rdi;
2895 get_thread(java_thread);
2896 }
2897 // we must set sp to zero to clear frame
2898 movptr(Address(java_thread, JavaThread::last_Java_sp_offset()), NULL_WORD);
2899 if (clear_fp) {
2900 movptr(Address(java_thread, JavaThread::last_Java_fp_offset()), NULL_WORD);
2901 }
2902
2903 if (clear_pc)
2904 movptr(Address(java_thread, JavaThread::last_Java_pc_offset()), NULL_WORD);
2905
2906 }
2907
2908 void MacroAssembler::restore_rax(Register tmp) {
2909 if (tmp == noreg) pop(rax);
2910 else if (tmp != rax) mov(rax, tmp);
2911 }
2912
2913 void MacroAssembler::round_to(Register reg, int modulus) {
2914 addptr(reg, modulus - 1);
2915 andptr(reg, -modulus);
2916 }
2917
2918 void MacroAssembler::save_rax(Register tmp) {
2919 if (tmp == noreg) push(rax);
2920 else if (tmp != rax) mov(tmp, rax);
2921 }
2922
2923 // Write serialization page so VM thread can do a pseudo remote membar.
2924 // We use the current thread pointer to calculate a thread specific
2925 // offset to write to within the page. This minimizes bus traffic
2926 // due to cache line collision.
2927 void MacroAssembler::serialize_memory(Register thread, Register tmp) {
2928 movl(tmp, thread);
2929 shrl(tmp, os::get_serialize_page_shift_count());
2930 andl(tmp, (os::vm_page_size() - sizeof(int)));
2931
2932 Address index(noreg, tmp, Address::times_1);
2933 ExternalAddress page(os::get_memory_serialize_page());
2934
2935 // Size of store must match masking code above
2936 movl(as_Address(ArrayAddress(page, index)), tmp);
2937 }
2938
2939 // Calls to C land
2940 //
2941 // When entering C land, the rbp, & rsp of the last Java frame have to be recorded
2942 // in the (thread-local) JavaThread object. When leaving C land, the last Java fp
2943 // has to be reset to 0. This is required to allow proper stack traversal.
2944 void MacroAssembler::set_last_Java_frame(Register java_thread,
2945 Register last_java_sp,
2946 Register last_java_fp,
2947 address last_java_pc) {
2948 // determine java_thread register
2949 if (!java_thread->is_valid()) {
2950 java_thread = rdi;
2951 get_thread(java_thread);
2952 }
2953 // determine last_java_sp register
2954 if (!last_java_sp->is_valid()) {
2955 last_java_sp = rsp;
2956 }
2957
2958 // last_java_fp is optional
2959
2960 if (last_java_fp->is_valid()) {
2961 movptr(Address(java_thread, JavaThread::last_Java_fp_offset()), last_java_fp);
2962 }
2963
2964 // last_java_pc is optional
2965
2966 if (last_java_pc != NULL) {
2967 lea(Address(java_thread,
2968 JavaThread::frame_anchor_offset() + JavaFrameAnchor::last_Java_pc_offset()),
2969 InternalAddress(last_java_pc));
2970
2971 }
2972 movptr(Address(java_thread, JavaThread::last_Java_sp_offset()), last_java_sp);
2973 }
2974
2975 void MacroAssembler::shlptr(Register dst, int imm8) {
2976 LP64_ONLY(shlq(dst, imm8)) NOT_LP64(shll(dst, imm8));
2977 }
2978
2979 void MacroAssembler::shrptr(Register dst, int imm8) {
2980 LP64_ONLY(shrq(dst, imm8)) NOT_LP64(shrl(dst, imm8));
2981 }
2982
2983 void MacroAssembler::sign_extend_byte(Register reg) {
2984 if (LP64_ONLY(true ||) (VM_Version::is_P6() && reg->has_byte_register())) {
2985 movsbl(reg, reg); // movsxb
2986 } else {
2987 shll(reg, 24);
2988 sarl(reg, 24);
2989 }
2990 }
2991
2992 void MacroAssembler::sign_extend_short(Register reg) {
2993 if (LP64_ONLY(true ||) VM_Version::is_P6()) {
2994 movswl(reg, reg); // movsxw
2995 } else {
2996 shll(reg, 16);
2997 sarl(reg, 16);
2998 }
2999 }
3000
3001 void MacroAssembler::testl(Register dst, AddressLiteral src) {
3002 assert(reachable(src), "Address should be reachable");
3003 testl(dst, as_Address(src));
3004 }
3005
3006 void MacroAssembler::sqrtsd(XMMRegister dst, AddressLiteral src) {
3007 if (reachable(src)) {
3008 Assembler::sqrtsd(dst, as_Address(src));
3009 } else {
3010 lea(rscratch1, src);
3011 Assembler::sqrtsd(dst, Address(rscratch1, 0));
3012 }
3013 }
3014
3015 void MacroAssembler::sqrtss(XMMRegister dst, AddressLiteral src) {
3016 if (reachable(src)) {
3017 Assembler::sqrtss(dst, as_Address(src));
3018 } else {
3019 lea(rscratch1, src);
3020 Assembler::sqrtss(dst, Address(rscratch1, 0));
3021 }
3022 }
3023
3024 void MacroAssembler::subsd(XMMRegister dst, AddressLiteral src) {
3025 if (reachable(src)) {
3026 Assembler::subsd(dst, as_Address(src));
3027 } else {
3028 lea(rscratch1, src);
3029 Assembler::subsd(dst, Address(rscratch1, 0));
3030 }
3031 }
3032
3033 void MacroAssembler::subss(XMMRegister dst, AddressLiteral src) {
3034 if (reachable(src)) {
3035 Assembler::subss(dst, as_Address(src));
3036 } else {
3037 lea(rscratch1, src);
3038 Assembler::subss(dst, Address(rscratch1, 0));
3039 }
3040 }
3041
3042 void MacroAssembler::ucomisd(XMMRegister dst, AddressLiteral src) {
3043 if (reachable(src)) {
3044 Assembler::ucomisd(dst, as_Address(src));
3045 } else {
3046 lea(rscratch1, src);
3047 Assembler::ucomisd(dst, Address(rscratch1, 0));
3048 }
3049 }
3050
3051 void MacroAssembler::ucomiss(XMMRegister dst, AddressLiteral src) {
3052 if (reachable(src)) {
3053 Assembler::ucomiss(dst, as_Address(src));
3054 } else {
3055 lea(rscratch1, src);
3056 Assembler::ucomiss(dst, Address(rscratch1, 0));
3057 }
3058 }
3059
3060 void MacroAssembler::xorpd(XMMRegister dst, AddressLiteral src) {
3061 // Used in sign-bit flipping with aligned address.
3062 assert((UseAVX > 0) || (((intptr_t)src.target() & 15) == 0), "SSE mode requires address alignment 16 bytes");
3063 if (reachable(src)) {
3064 Assembler::xorpd(dst, as_Address(src));
3065 } else {
3066 lea(rscratch1, src);
3067 Assembler::xorpd(dst, Address(rscratch1, 0));
3068 }
3069 }
3070
3071 void MacroAssembler::xorps(XMMRegister dst, AddressLiteral src) {
3072 // Used in sign-bit flipping with aligned address.
3073 assert((UseAVX > 0) || (((intptr_t)src.target() & 15) == 0), "SSE mode requires address alignment 16 bytes");
3074 if (reachable(src)) {
3075 Assembler::xorps(dst, as_Address(src));
3076 } else {
3077 lea(rscratch1, src);
3078 Assembler::xorps(dst, Address(rscratch1, 0));
3079 }
3080 }
3081
3082 void MacroAssembler::pshufb(XMMRegister dst, AddressLiteral src) {
3083 // Used in sign-bit flipping with aligned address.
3084 assert((UseAVX > 0) || (((intptr_t)src.target() & 15) == 0), "SSE mode requires address alignment 16 bytes");
3085 if (reachable(src)) {
3086 Assembler::pshufb(dst, as_Address(src));
3087 } else {
3088 lea(rscratch1, src);
3089 Assembler::pshufb(dst, Address(rscratch1, 0));
3090 }
3091 }
3092
3093 // AVX 3-operands instructions
3094
3095 void MacroAssembler::vaddsd(XMMRegister dst, XMMRegister nds, AddressLiteral src) {
3096 if (reachable(src)) {
3097 vaddsd(dst, nds, as_Address(src));
3098 } else {
3099 lea(rscratch1, src);
3100 vaddsd(dst, nds, Address(rscratch1, 0));
3101 }
3102 }
3103
3104 void MacroAssembler::vaddss(XMMRegister dst, XMMRegister nds, AddressLiteral src) {
3105 if (reachable(src)) {
3106 vaddss(dst, nds, as_Address(src));
3107 } else {
3108 lea(rscratch1, src);
3109 vaddss(dst, nds, Address(rscratch1, 0));
3110 }
3111 }
3112
3113 void MacroAssembler::vandpd(XMMRegister dst, XMMRegister nds, AddressLiteral src, bool vector256) {
3114 if (reachable(src)) {
3115 vandpd(dst, nds, as_Address(src), vector256);
3116 } else {
3117 lea(rscratch1, src);
3118 vandpd(dst, nds, Address(rscratch1, 0), vector256);
3119 }
3120 }
3121
3122 void MacroAssembler::vandps(XMMRegister dst, XMMRegister nds, AddressLiteral src, bool vector256) {
3123 if (reachable(src)) {
3124 vandps(dst, nds, as_Address(src), vector256);
3125 } else {
3126 lea(rscratch1, src);
3127 vandps(dst, nds, Address(rscratch1, 0), vector256);
3128 }
3129 }
3130
3131 void MacroAssembler::vdivsd(XMMRegister dst, XMMRegister nds, AddressLiteral src) {
3132 if (reachable(src)) {
3133 vdivsd(dst, nds, as_Address(src));
3134 } else {
3135 lea(rscratch1, src);
3136 vdivsd(dst, nds, Address(rscratch1, 0));
3137 }
3138 }
3139
3140 void MacroAssembler::vdivss(XMMRegister dst, XMMRegister nds, AddressLiteral src) {
3141 if (reachable(src)) {
3142 vdivss(dst, nds, as_Address(src));
3143 } else {
3144 lea(rscratch1, src);
3145 vdivss(dst, nds, Address(rscratch1, 0));
3146 }
3147 }
3148
3149 void MacroAssembler::vmulsd(XMMRegister dst, XMMRegister nds, AddressLiteral src) {
3150 if (reachable(src)) {
3151 vmulsd(dst, nds, as_Address(src));
3152 } else {
3153 lea(rscratch1, src);
3154 vmulsd(dst, nds, Address(rscratch1, 0));
3155 }
3156 }
3157
3158 void MacroAssembler::vmulss(XMMRegister dst, XMMRegister nds, AddressLiteral src) {
3159 if (reachable(src)) {
3160 vmulss(dst, nds, as_Address(src));
3161 } else {
3162 lea(rscratch1, src);
3163 vmulss(dst, nds, Address(rscratch1, 0));
3164 }
3165 }
3166
3167 void MacroAssembler::vsubsd(XMMRegister dst, XMMRegister nds, AddressLiteral src) {
3168 if (reachable(src)) {
3169 vsubsd(dst, nds, as_Address(src));
3170 } else {
3171 lea(rscratch1, src);
3172 vsubsd(dst, nds, Address(rscratch1, 0));
3173 }
3174 }
3175
3176 void MacroAssembler::vsubss(XMMRegister dst, XMMRegister nds, AddressLiteral src) {
3177 if (reachable(src)) {
3178 vsubss(dst, nds, as_Address(src));
3179 } else {
3180 lea(rscratch1, src);
3181 vsubss(dst, nds, Address(rscratch1, 0));
3182 }
3183 }
3184
3185 void MacroAssembler::vxorpd(XMMRegister dst, XMMRegister nds, AddressLiteral src, bool vector256) {
3186 if (reachable(src)) {
3187 vxorpd(dst, nds, as_Address(src), vector256);
3188 } else {
3189 lea(rscratch1, src);
3190 vxorpd(dst, nds, Address(rscratch1, 0), vector256);
3191 }
3192 }
3193
3194 void MacroAssembler::vxorps(XMMRegister dst, XMMRegister nds, AddressLiteral src, bool vector256) {
3195 if (reachable(src)) {
3196 vxorps(dst, nds, as_Address(src), vector256);
3197 } else {
3198 lea(rscratch1, src);
3199 vxorps(dst, nds, Address(rscratch1, 0), vector256);
3200 }
3201 }
3202
3203
3204 //////////////////////////////////////////////////////////////////////////////////
3205 #ifndef SERIALGC
3206
3207 void MacroAssembler::g1_write_barrier_pre(Register obj,
3208 Register pre_val,
3209 Register thread,
3210 Register tmp,
3211 bool tosca_live,
3212 bool expand_call) {
3213
3214 // If expand_call is true then we expand the call_VM_leaf macro
3215 // directly to skip generating the check by
3216 // InterpreterMacroAssembler::call_VM_leaf_base that checks _last_sp.
3217
3218 #ifdef _LP64
3219 assert(thread == r15_thread, "must be");
3220 #endif // _LP64
3221
3222 Label done;
3223 Label runtime;
3224
3225 assert(pre_val != noreg, "check this code");
3226
3227 if (obj != noreg) {
3228 assert_different_registers(obj, pre_val, tmp);
3229 assert(pre_val != rax, "check this code");
3230 }
3231
3232 Address in_progress(thread, in_bytes(JavaThread::satb_mark_queue_offset() +
3233 PtrQueue::byte_offset_of_active()));
3234 Address index(thread, in_bytes(JavaThread::satb_mark_queue_offset() +
3235 PtrQueue::byte_offset_of_index()));
3236 Address buffer(thread, in_bytes(JavaThread::satb_mark_queue_offset() +
3237 PtrQueue::byte_offset_of_buf()));
3238
3239
3240 // Is marking active?
3241 if (in_bytes(PtrQueue::byte_width_of_active()) == 4) {
3242 cmpl(in_progress, 0);
3243 } else {
3244 assert(in_bytes(PtrQueue::byte_width_of_active()) == 1, "Assumption");
3245 cmpb(in_progress, 0);
3246 }
3247 jcc(Assembler::equal, done);
3248
3249 // Do we need to load the previous value?
3250 if (obj != noreg) {
3251 load_heap_oop(pre_val, Address(obj, 0));
3252 }
3253
3254 // Is the previous value null?
3255 cmpptr(pre_val, (int32_t) NULL_WORD);
3256 jcc(Assembler::equal, done);
3257
3258 // Can we store original value in the thread's buffer?
3259 // Is index == 0?
3260 // (The index field is typed as size_t.)
3261
3262 movptr(tmp, index); // tmp := *index_adr
3263 cmpptr(tmp, 0); // tmp == 0?
3264 jcc(Assembler::equal, runtime); // If yes, goto runtime
3265
3266 subptr(tmp, wordSize); // tmp := tmp - wordSize
3267 movptr(index, tmp); // *index_adr := tmp
3268 addptr(tmp, buffer); // tmp := tmp + *buffer_adr
3269
3270 // Record the previous value
3271 movptr(Address(tmp, 0), pre_val);
3272 jmp(done);
3273
3274 bind(runtime);
3275 // save the live input values
3276 if(tosca_live) push(rax);
3277
3278 if (obj != noreg && obj != rax)
3279 push(obj);
3280
3281 if (pre_val != rax)
3282 push(pre_val);
3283
3284 // Calling the runtime using the regular call_VM_leaf mechanism generates
3285 // code (generated by InterpreterMacroAssember::call_VM_leaf_base)
3286 // that checks that the *(ebp+frame::interpreter_frame_last_sp) == NULL.
3287 //
3288 // If we care generating the pre-barrier without a frame (e.g. in the
3289 // intrinsified Reference.get() routine) then ebp might be pointing to
3290 // the caller frame and so this check will most likely fail at runtime.
3291 //
3292 // Expanding the call directly bypasses the generation of the check.
3293 // So when we do not have have a full interpreter frame on the stack
3294 // expand_call should be passed true.
3295
3296 NOT_LP64( push(thread); )
3297
3298 if (expand_call) {
3299 LP64_ONLY( assert(pre_val != c_rarg1, "smashed arg"); )
3300 pass_arg1(this, thread);
3301 pass_arg0(this, pre_val);
3302 MacroAssembler::call_VM_leaf_base(CAST_FROM_FN_PTR(address, SharedRuntime::g1_wb_pre), 2);
3303 } else {
3304 call_VM_leaf(CAST_FROM_FN_PTR(address, SharedRuntime::g1_wb_pre), pre_val, thread);
3305 }
3306
3307 NOT_LP64( pop(thread); )
3308
3309 // save the live input values
3310 if (pre_val != rax)
3311 pop(pre_val);
3312
3313 if (obj != noreg && obj != rax)
3314 pop(obj);
3315
3316 if(tosca_live) pop(rax);
3317
3318 bind(done);
3319 }
3320
3321 void MacroAssembler::g1_write_barrier_post(Register store_addr,
3322 Register new_val,
3323 Register thread,
3324 Register tmp,
3325 Register tmp2) {
3326 #ifdef _LP64
3327 assert(thread == r15_thread, "must be");
3328 #endif // _LP64
3329
3330 Address queue_index(thread, in_bytes(JavaThread::dirty_card_queue_offset() +
3331 PtrQueue::byte_offset_of_index()));
3332 Address buffer(thread, in_bytes(JavaThread::dirty_card_queue_offset() +
3333 PtrQueue::byte_offset_of_buf()));
3334
3335 BarrierSet* bs = Universe::heap()->barrier_set();
3336 CardTableModRefBS* ct = (CardTableModRefBS*)bs;
3337 Label done;
3338 Label runtime;
3339
3340 // Does store cross heap regions?
3341
3342 movptr(tmp, store_addr);
3343 xorptr(tmp, new_val);
3344 shrptr(tmp, HeapRegion::LogOfHRGrainBytes);
3345 jcc(Assembler::equal, done);
3346
3347 // crosses regions, storing NULL?
3348
3349 cmpptr(new_val, (int32_t) NULL_WORD);
3350 jcc(Assembler::equal, done);
3351
3352 // storing region crossing non-NULL, is card already dirty?
3353
3354 ExternalAddress cardtable((address) ct->byte_map_base);
3355 assert(sizeof(*ct->byte_map_base) == sizeof(jbyte), "adjust this code");
3356 #ifdef _LP64
3357 const Register card_addr = tmp;
3358
3359 movq(card_addr, store_addr);
3360 shrq(card_addr, CardTableModRefBS::card_shift);
3361
3362 lea(tmp2, cardtable);
3363
3364 // get the address of the card
3365 addq(card_addr, tmp2);
3366 #else
3367 const Register card_index = tmp;
3368
3369 movl(card_index, store_addr);
3370 shrl(card_index, CardTableModRefBS::card_shift);
3371
3372 Address index(noreg, card_index, Address::times_1);
3373 const Register card_addr = tmp;
3374 lea(card_addr, as_Address(ArrayAddress(cardtable, index)));
3375 #endif
3376 cmpb(Address(card_addr, 0), 0);
3377 jcc(Assembler::equal, done);
3378
3379 // storing a region crossing, non-NULL oop, card is clean.
3380 // dirty card and log.
3381
3382 movb(Address(card_addr, 0), 0);
3383
3384 cmpl(queue_index, 0);
3385 jcc(Assembler::equal, runtime);
3386 subl(queue_index, wordSize);
3387 movptr(tmp2, buffer);
3388 #ifdef _LP64
3389 movslq(rscratch1, queue_index);
3390 addq(tmp2, rscratch1);
3391 movq(Address(tmp2, 0), card_addr);
3392 #else
3393 addl(tmp2, queue_index);
3394 movl(Address(tmp2, 0), card_index);
3395 #endif
3396 jmp(done);
3397
3398 bind(runtime);
3399 // save the live input values
3400 push(store_addr);
3401 push(new_val);
3402 #ifdef _LP64
3403 call_VM_leaf(CAST_FROM_FN_PTR(address, SharedRuntime::g1_wb_post), card_addr, r15_thread);
3404 #else
3405 push(thread);
3406 call_VM_leaf(CAST_FROM_FN_PTR(address, SharedRuntime::g1_wb_post), card_addr, thread);
3407 pop(thread);
3408 #endif
3409 pop(new_val);
3410 pop(store_addr);
3411
3412 bind(done);
3413 }
3414
3415 #endif // SERIALGC
3416 //////////////////////////////////////////////////////////////////////////////////
3417
3418
3419 void MacroAssembler::store_check(Register obj) {
3420 // Does a store check for the oop in register obj. The content of
3421 // register obj is destroyed afterwards.
3422 store_check_part_1(obj);
3423 store_check_part_2(obj);
3424 }
3425
3426 void MacroAssembler::store_check(Register obj, Address dst) {
3427 store_check(obj);
3428 }
3429
3430
3431 // split the store check operation so that other instructions can be scheduled inbetween
3432 void MacroAssembler::store_check_part_1(Register obj) {
3433 BarrierSet* bs = Universe::heap()->barrier_set();
3434 assert(bs->kind() == BarrierSet::CardTableModRef, "Wrong barrier set kind");
3435 shrptr(obj, CardTableModRefBS::card_shift);
3436 }
3437
3438 void MacroAssembler::store_check_part_2(Register obj) {
3439 BarrierSet* bs = Universe::heap()->barrier_set();
3440 assert(bs->kind() == BarrierSet::CardTableModRef, "Wrong barrier set kind");
3441 CardTableModRefBS* ct = (CardTableModRefBS*)bs;
3442 assert(sizeof(*ct->byte_map_base) == sizeof(jbyte), "adjust this code");
3443
3444 // The calculation for byte_map_base is as follows:
3445 // byte_map_base = _byte_map - (uintptr_t(low_bound) >> card_shift);
3446 // So this essentially converts an address to a displacement and
3447 // it will never need to be relocated. On 64bit however the value may be too
3448 // large for a 32bit displacement
3449
3450 intptr_t disp = (intptr_t) ct->byte_map_base;
3451 if (is_simm32(disp)) {
3452 Address cardtable(noreg, obj, Address::times_1, disp);
3453 movb(cardtable, 0);
3454 } else {
3455 // By doing it as an ExternalAddress disp could be converted to a rip-relative
3456 // displacement and done in a single instruction given favorable mapping and
3457 // a smarter version of as_Address. Worst case it is two instructions which
3458 // is no worse off then loading disp into a register and doing as a simple
3459 // Address() as above.
3460 // We can't do as ExternalAddress as the only style since if disp == 0 we'll
3461 // assert since NULL isn't acceptable in a reloci (see 6644928). In any case
3462 // in some cases we'll get a single instruction version.
3463
3464 ExternalAddress cardtable((address)disp);
3465 Address index(noreg, obj, Address::times_1);
3466 movb(as_Address(ArrayAddress(cardtable, index)), 0);
3467 }
3468 }
3469
3470 void MacroAssembler::subptr(Register dst, int32_t imm32) {
3471 LP64_ONLY(subq(dst, imm32)) NOT_LP64(subl(dst, imm32));
3472 }
3473
3474 // Force generation of a 4 byte immediate value even if it fits into 8bit
3475 void MacroAssembler::subptr_imm32(Register dst, int32_t imm32) {
3476 LP64_ONLY(subq_imm32(dst, imm32)) NOT_LP64(subl_imm32(dst, imm32));
3477 }
3478
3479 void MacroAssembler::subptr(Register dst, Register src) {
3480 LP64_ONLY(subq(dst, src)) NOT_LP64(subl(dst, src));
3481 }
3482
3483 // C++ bool manipulation
3484 void MacroAssembler::testbool(Register dst) {
3485 if(sizeof(bool) == 1)
3486 testb(dst, 0xff);
3487 else if(sizeof(bool) == 2) {
3488 // testw implementation needed for two byte bools
3489 ShouldNotReachHere();
3490 } else if(sizeof(bool) == 4)
3491 testl(dst, dst);
3492 else
3493 // unsupported
3494 ShouldNotReachHere();
3495 }
3496
3497 void MacroAssembler::testptr(Register dst, Register src) {
3498 LP64_ONLY(testq(dst, src)) NOT_LP64(testl(dst, src));
3499 }
3500
3501 // Defines obj, preserves var_size_in_bytes, okay for t2 == var_size_in_bytes.
3502 void MacroAssembler::tlab_allocate(Register obj,
3503 Register var_size_in_bytes,
3504 int con_size_in_bytes,
3505 Register t1,
3506 Register t2,
3507 Label& slow_case) {
3508 assert_different_registers(obj, t1, t2);
3509 assert_different_registers(obj, var_size_in_bytes, t1);
3510 Register end = t2;
3511 Register thread = NOT_LP64(t1) LP64_ONLY(r15_thread);
3512
3513 verify_tlab();
3514
3515 NOT_LP64(get_thread(thread));
3516
3517 movptr(obj, Address(thread, JavaThread::tlab_top_offset()));
3518 if (var_size_in_bytes == noreg) {
3519 lea(end, Address(obj, con_size_in_bytes));
3520 } else {
3521 lea(end, Address(obj, var_size_in_bytes, Address::times_1));
3522 }
3523 cmpptr(end, Address(thread, JavaThread::tlab_end_offset()));
3524 jcc(Assembler::above, slow_case);
3525
3526 // update the tlab top pointer
3527 movptr(Address(thread, JavaThread::tlab_top_offset()), end);
3528
3529 // recover var_size_in_bytes if necessary
3530 if (var_size_in_bytes == end) {
3531 subptr(var_size_in_bytes, obj);
3532 }
3533 verify_tlab();
3534 }
3535
3536 // Preserves rbx, and rdx.
3537 Register MacroAssembler::tlab_refill(Label& retry,
3538 Label& try_eden,
3539 Label& slow_case) {
3540 Register top = rax;
3541 Register t1 = rcx;
3542 Register t2 = rsi;
3543 Register thread_reg = NOT_LP64(rdi) LP64_ONLY(r15_thread);
3544 assert_different_registers(top, thread_reg, t1, t2, /* preserve: */ rbx, rdx);
3545 Label do_refill, discard_tlab;
3546
3547 if (CMSIncrementalMode || !Universe::heap()->supports_inline_contig_alloc()) {
3548 // No allocation in the shared eden.
3549 jmp(slow_case);
3550 }
3551
3552 NOT_LP64(get_thread(thread_reg));
3553
3554 movptr(top, Address(thread_reg, in_bytes(JavaThread::tlab_top_offset())));
3555 movptr(t1, Address(thread_reg, in_bytes(JavaThread::tlab_end_offset())));
3556
3557 // calculate amount of free space
3558 subptr(t1, top);
3559 shrptr(t1, LogHeapWordSize);
3560
3561 // Retain tlab and allocate object in shared space if
3562 // the amount free in the tlab is too large to discard.
3563 cmpptr(t1, Address(thread_reg, in_bytes(JavaThread::tlab_refill_waste_limit_offset())));
3564 jcc(Assembler::lessEqual, discard_tlab);
3565
3566 // Retain
3567 // %%% yuck as movptr...
3568 movptr(t2, (int32_t) ThreadLocalAllocBuffer::refill_waste_limit_increment());
3569 addptr(Address(thread_reg, in_bytes(JavaThread::tlab_refill_waste_limit_offset())), t2);
3570 if (TLABStats) {
3571 // increment number of slow_allocations
3572 addl(Address(thread_reg, in_bytes(JavaThread::tlab_slow_allocations_offset())), 1);
3573 }
3574 jmp(try_eden);
3575
3576 bind(discard_tlab);
3577 if (TLABStats) {
3578 // increment number of refills
3579 addl(Address(thread_reg, in_bytes(JavaThread::tlab_number_of_refills_offset())), 1);
3580 // accumulate wastage -- t1 is amount free in tlab
3581 addl(Address(thread_reg, in_bytes(JavaThread::tlab_fast_refill_waste_offset())), t1);
3582 }
3583
3584 // if tlab is currently allocated (top or end != null) then
3585 // fill [top, end + alignment_reserve) with array object
3586 testptr(top, top);
3587 jcc(Assembler::zero, do_refill);
3588
3589 // set up the mark word
3590 movptr(Address(top, oopDesc::mark_offset_in_bytes()), (intptr_t)markOopDesc::prototype()->copy_set_hash(0x2));
3591 // set the length to the remaining space
3592 subptr(t1, typeArrayOopDesc::header_size(T_INT));
3593 addptr(t1, (int32_t)ThreadLocalAllocBuffer::alignment_reserve());
3594 shlptr(t1, log2_intptr(HeapWordSize/sizeof(jint)));
3595 movl(Address(top, arrayOopDesc::length_offset_in_bytes()), t1);
3596 // set klass to intArrayKlass
3597 // dubious reloc why not an oop reloc?
3598 movptr(t1, ExternalAddress((address)Universe::intArrayKlassObj_addr()));
3599 // store klass last. concurrent gcs assumes klass length is valid if
3600 // klass field is not null.
3601 store_klass(top, t1);
3602
3603 movptr(t1, top);
3604 subptr(t1, Address(thread_reg, in_bytes(JavaThread::tlab_start_offset())));
3605 incr_allocated_bytes(thread_reg, t1, 0);
3606
3607 // refill the tlab with an eden allocation
3608 bind(do_refill);
3609 movptr(t1, Address(thread_reg, in_bytes(JavaThread::tlab_size_offset())));
3610 shlptr(t1, LogHeapWordSize);
3611 // allocate new tlab, address returned in top
3612 eden_allocate(top, t1, 0, t2, slow_case);
3613
3614 // Check that t1 was preserved in eden_allocate.
3615 #ifdef ASSERT
3616 if (UseTLAB) {
3617 Label ok;
3618 Register tsize = rsi;
3619 assert_different_registers(tsize, thread_reg, t1);
3620 push(tsize);
3621 movptr(tsize, Address(thread_reg, in_bytes(JavaThread::tlab_size_offset())));
3622 shlptr(tsize, LogHeapWordSize);
3623 cmpptr(t1, tsize);
3624 jcc(Assembler::equal, ok);
3625 STOP("assert(t1 != tlab size)");
3626 should_not_reach_here();
3627
3628 bind(ok);
3629 pop(tsize);
3630 }
3631 #endif
3632 movptr(Address(thread_reg, in_bytes(JavaThread::tlab_start_offset())), top);
3633 movptr(Address(thread_reg, in_bytes(JavaThread::tlab_top_offset())), top);
3634 addptr(top, t1);
3635 subptr(top, (int32_t)ThreadLocalAllocBuffer::alignment_reserve_in_bytes());
3636 movptr(Address(thread_reg, in_bytes(JavaThread::tlab_end_offset())), top);
3637 verify_tlab();
3638 jmp(retry);
3639
3640 return thread_reg; // for use by caller
3641 }
3642
3643 void MacroAssembler::incr_allocated_bytes(Register thread,
3644 Register var_size_in_bytes,
3645 int con_size_in_bytes,
3646 Register t1) {
3647 if (!thread->is_valid()) {
3648 #ifdef _LP64
3649 thread = r15_thread;
3650 #else
3651 assert(t1->is_valid(), "need temp reg");
3652 thread = t1;
3653 get_thread(thread);
3654 #endif
3655 }
3656
3657 #ifdef _LP64
3658 if (var_size_in_bytes->is_valid()) {
3659 addq(Address(thread, in_bytes(JavaThread::allocated_bytes_offset())), var_size_in_bytes);
3660 } else {
3661 addq(Address(thread, in_bytes(JavaThread::allocated_bytes_offset())), con_size_in_bytes);
3662 }
3663 #else
3664 if (var_size_in_bytes->is_valid()) {
3665 addl(Address(thread, in_bytes(JavaThread::allocated_bytes_offset())), var_size_in_bytes);
3666 } else {
3667 addl(Address(thread, in_bytes(JavaThread::allocated_bytes_offset())), con_size_in_bytes);
3668 }
3669 adcl(Address(thread, in_bytes(JavaThread::allocated_bytes_offset())+4), 0);
3670 #endif
3671 }
3672
3673 void MacroAssembler::fp_runtime_fallback(address runtime_entry, int nb_args, int num_fpu_regs_in_use) {
3674 pusha();
3675
3676 // if we are coming from c1, xmm registers may be live
3677 int off = 0;
3678 if (UseSSE == 1) {
3679 subptr(rsp, sizeof(jdouble)*8);
3680 movflt(Address(rsp,off++*sizeof(jdouble)),xmm0);
3681 movflt(Address(rsp,off++*sizeof(jdouble)),xmm1);
3682 movflt(Address(rsp,off++*sizeof(jdouble)),xmm2);
3683 movflt(Address(rsp,off++*sizeof(jdouble)),xmm3);
3684 movflt(Address(rsp,off++*sizeof(jdouble)),xmm4);
3685 movflt(Address(rsp,off++*sizeof(jdouble)),xmm5);
3686 movflt(Address(rsp,off++*sizeof(jdouble)),xmm6);
3687 movflt(Address(rsp,off++*sizeof(jdouble)),xmm7);
3688 } else if (UseSSE >= 2) {
3689 #ifdef COMPILER2
3690 if (MaxVectorSize > 16) {
3691 assert(UseAVX > 0, "256bit vectors are supported only with AVX");
3692 // Save upper half of YMM registes
3693 subptr(rsp, 16 * LP64_ONLY(16) NOT_LP64(8));
3694 vextractf128h(Address(rsp, 0),xmm0);
3695 vextractf128h(Address(rsp, 16),xmm1);
3696 vextractf128h(Address(rsp, 32),xmm2);
3697 vextractf128h(Address(rsp, 48),xmm3);
3698 vextractf128h(Address(rsp, 64),xmm4);
3699 vextractf128h(Address(rsp, 80),xmm5);
3700 vextractf128h(Address(rsp, 96),xmm6);
3701 vextractf128h(Address(rsp,112),xmm7);
3702 #ifdef _LP64
3703 vextractf128h(Address(rsp,128),xmm8);
3704 vextractf128h(Address(rsp,144),xmm9);
3705 vextractf128h(Address(rsp,160),xmm10);
3706 vextractf128h(Address(rsp,176),xmm11);
3707 vextractf128h(Address(rsp,192),xmm12);
3708 vextractf128h(Address(rsp,208),xmm13);
3709 vextractf128h(Address(rsp,224),xmm14);
3710 vextractf128h(Address(rsp,240),xmm15);
3711 #endif
3712 }
3713 #endif
3714 // Save whole 128bit (16 bytes) XMM regiters
3715 subptr(rsp, 16 * LP64_ONLY(16) NOT_LP64(8));
3716 movdqu(Address(rsp,off++*16),xmm0);
3717 movdqu(Address(rsp,off++*16),xmm1);
3718 movdqu(Address(rsp,off++*16),xmm2);
3719 movdqu(Address(rsp,off++*16),xmm3);
3720 movdqu(Address(rsp,off++*16),xmm4);
3721 movdqu(Address(rsp,off++*16),xmm5);
3722 movdqu(Address(rsp,off++*16),xmm6);
3723 movdqu(Address(rsp,off++*16),xmm7);
3724 #ifdef _LP64
3725 movdqu(Address(rsp,off++*16),xmm8);
3726 movdqu(Address(rsp,off++*16),xmm9);
3727 movdqu(Address(rsp,off++*16),xmm10);
3728 movdqu(Address(rsp,off++*16),xmm11);
3729 movdqu(Address(rsp,off++*16),xmm12);
3730 movdqu(Address(rsp,off++*16),xmm13);
3731 movdqu(Address(rsp,off++*16),xmm14);
3732 movdqu(Address(rsp,off++*16),xmm15);
3733 #endif
3734 }
3735
3736 // Preserve registers across runtime call
3737 int incoming_argument_and_return_value_offset = -1;
3738 if (num_fpu_regs_in_use > 1) {
3739 // Must preserve all other FPU regs (could alternatively convert
3740 // SharedRuntime::dsin, dcos etc. into assembly routines known not to trash
3741 // FPU state, but can not trust C compiler)
3742 NEEDS_CLEANUP;
3743 // NOTE that in this case we also push the incoming argument(s) to
3744 // the stack and restore it later; we also use this stack slot to
3745 // hold the return value from dsin, dcos etc.
3746 for (int i = 0; i < num_fpu_regs_in_use; i++) {
3747 subptr(rsp, sizeof(jdouble));
3748 fstp_d(Address(rsp, 0));
3749 }
3750 incoming_argument_and_return_value_offset = sizeof(jdouble)*(num_fpu_regs_in_use-1);
3751 for (int i = nb_args-1; i >= 0; i--) {
3752 fld_d(Address(rsp, incoming_argument_and_return_value_offset-i*sizeof(jdouble)));
3753 }
3754 }
3755
3756 subptr(rsp, nb_args*sizeof(jdouble));
3757 for (int i = 0; i < nb_args; i++) {
3758 fstp_d(Address(rsp, i*sizeof(jdouble)));
3759 }
3760
3761 #ifdef _LP64
3762 if (nb_args > 0) {
3763 movdbl(xmm0, Address(rsp, 0));
3764 }
3765 if (nb_args > 1) {
3766 movdbl(xmm1, Address(rsp, sizeof(jdouble)));
3767 }
3768 assert(nb_args <= 2, "unsupported number of args");
3769 #endif // _LP64
3770
3771 // NOTE: we must not use call_VM_leaf here because that requires a
3772 // complete interpreter frame in debug mode -- same bug as 4387334
3773 // MacroAssembler::call_VM_leaf_base is perfectly safe and will
3774 // do proper 64bit abi
3775
3776 NEEDS_CLEANUP;
3777 // Need to add stack banging before this runtime call if it needs to
3778 // be taken; however, there is no generic stack banging routine at
3779 // the MacroAssembler level
3780
3781 MacroAssembler::call_VM_leaf_base(runtime_entry, 0);
3782
3783 #ifdef _LP64
3784 movsd(Address(rsp, 0), xmm0);
3785 fld_d(Address(rsp, 0));
3786 #endif // _LP64
3787 addptr(rsp, sizeof(jdouble) * nb_args);
3788 if (num_fpu_regs_in_use > 1) {
3789 // Must save return value to stack and then restore entire FPU
3790 // stack except incoming arguments
3791 fstp_d(Address(rsp, incoming_argument_and_return_value_offset));
3792 for (int i = 0; i < num_fpu_regs_in_use - nb_args; i++) {
3793 fld_d(Address(rsp, 0));
3794 addptr(rsp, sizeof(jdouble));
3795 }
3796 fld_d(Address(rsp, (nb_args-1)*sizeof(jdouble)));
3797 addptr(rsp, sizeof(jdouble) * nb_args);
3798 }
3799
3800 off = 0;
3801 if (UseSSE == 1) {
3802 movflt(xmm0, Address(rsp,off++*sizeof(jdouble)));
3803 movflt(xmm1, Address(rsp,off++*sizeof(jdouble)));
3804 movflt(xmm2, Address(rsp,off++*sizeof(jdouble)));
3805 movflt(xmm3, Address(rsp,off++*sizeof(jdouble)));
3806 movflt(xmm4, Address(rsp,off++*sizeof(jdouble)));
3807 movflt(xmm5, Address(rsp,off++*sizeof(jdouble)));
3808 movflt(xmm6, Address(rsp,off++*sizeof(jdouble)));
3809 movflt(xmm7, Address(rsp,off++*sizeof(jdouble)));
3810 addptr(rsp, sizeof(jdouble)*8);
3811 } else if (UseSSE >= 2) {
3812 // Restore whole 128bit (16 bytes) XMM regiters
3813 movdqu(xmm0, Address(rsp,off++*16));
3814 movdqu(xmm1, Address(rsp,off++*16));
3815 movdqu(xmm2, Address(rsp,off++*16));
3816 movdqu(xmm3, Address(rsp,off++*16));
3817 movdqu(xmm4, Address(rsp,off++*16));
3818 movdqu(xmm5, Address(rsp,off++*16));
3819 movdqu(xmm6, Address(rsp,off++*16));
3820 movdqu(xmm7, Address(rsp,off++*16));
3821 #ifdef _LP64
3822 movdqu(xmm8, Address(rsp,off++*16));
3823 movdqu(xmm9, Address(rsp,off++*16));
3824 movdqu(xmm10, Address(rsp,off++*16));
3825 movdqu(xmm11, Address(rsp,off++*16));
3826 movdqu(xmm12, Address(rsp,off++*16));
3827 movdqu(xmm13, Address(rsp,off++*16));
3828 movdqu(xmm14, Address(rsp,off++*16));
3829 movdqu(xmm15, Address(rsp,off++*16));
3830 #endif
3831 addptr(rsp, 16 * LP64_ONLY(16) NOT_LP64(8));
3832 #ifdef COMPILER2
3833 if (MaxVectorSize > 16) {
3834 // Restore upper half of YMM registes.
3835 vinsertf128h(xmm0, Address(rsp, 0));
3836 vinsertf128h(xmm1, Address(rsp, 16));
3837 vinsertf128h(xmm2, Address(rsp, 32));
3838 vinsertf128h(xmm3, Address(rsp, 48));
3839 vinsertf128h(xmm4, Address(rsp, 64));
3840 vinsertf128h(xmm5, Address(rsp, 80));
3841 vinsertf128h(xmm6, Address(rsp, 96));
3842 vinsertf128h(xmm7, Address(rsp,112));
3843 #ifdef _LP64
3844 vinsertf128h(xmm8, Address(rsp,128));
3845 vinsertf128h(xmm9, Address(rsp,144));
3846 vinsertf128h(xmm10, Address(rsp,160));
3847 vinsertf128h(xmm11, Address(rsp,176));
3848 vinsertf128h(xmm12, Address(rsp,192));
3849 vinsertf128h(xmm13, Address(rsp,208));
3850 vinsertf128h(xmm14, Address(rsp,224));
3851 vinsertf128h(xmm15, Address(rsp,240));
3852 #endif
3853 addptr(rsp, 16 * LP64_ONLY(16) NOT_LP64(8));
3854 }
3855 #endif
3856 }
3857 popa();
3858 }
3859
3860 static const double pi_4 = 0.7853981633974483;
3861
3862 void MacroAssembler::trigfunc(char trig, int num_fpu_regs_in_use) {
3863 // A hand-coded argument reduction for values in fabs(pi/4, pi/2)
3864 // was attempted in this code; unfortunately it appears that the
3865 // switch to 80-bit precision and back causes this to be
3866 // unprofitable compared with simply performing a runtime call if
3867 // the argument is out of the (-pi/4, pi/4) range.
3868
3869 Register tmp = noreg;
3870 if (!VM_Version::supports_cmov()) {
3871 // fcmp needs a temporary so preserve rbx,
3872 tmp = rbx;
3873 push(tmp);
3874 }
3875
3876 Label slow_case, done;
3877
3878 ExternalAddress pi4_adr = (address)&pi_4;
3879 if (reachable(pi4_adr)) {
3880 // x ?<= pi/4
3881 fld_d(pi4_adr);
3882 fld_s(1); // Stack: X PI/4 X
3883 fabs(); // Stack: |X| PI/4 X
3884 fcmp(tmp);
3885 jcc(Assembler::above, slow_case);
3886
3887 // fastest case: -pi/4 <= x <= pi/4
3888 switch(trig) {
3889 case 's':
3890 fsin();
3891 break;
3892 case 'c':
3893 fcos();
3894 break;
3895 case 't':
3896 ftan();
3897 break;
3898 default:
3899 assert(false, "bad intrinsic");
3900 break;
3901 }
3902 jmp(done);
3903 }
3904
3905 // slow case: runtime call
3906 bind(slow_case);
3907
3908 switch(trig) {
3909 case 's':
3910 {
3911 fp_runtime_fallback(CAST_FROM_FN_PTR(address, SharedRuntime::dsin), 1, num_fpu_regs_in_use);
3912 }
3913 break;
3914 case 'c':
3915 {
3916 fp_runtime_fallback(CAST_FROM_FN_PTR(address, SharedRuntime::dcos), 1, num_fpu_regs_in_use);
3917 }
3918 break;
3919 case 't':
3920 {
3921 fp_runtime_fallback(CAST_FROM_FN_PTR(address, SharedRuntime::dtan), 1, num_fpu_regs_in_use);
3922 }
3923 break;
3924 default:
3925 assert(false, "bad intrinsic");
3926 break;
3927 }
3928
3929 // Come here with result in F-TOS
3930 bind(done);
3931
3932 if (tmp != noreg) {
3933 pop(tmp);
3934 }
3935 }
3936
3937
3938 // Look up the method for a megamorphic invokeinterface call.
3939 // The target method is determined by <intf_klass, itable_index>.
3940 // The receiver klass is in recv_klass.
3941 // On success, the result will be in method_result, and execution falls through.
3942 // On failure, execution transfers to the given label.
3943 void MacroAssembler::lookup_interface_method(Register recv_klass,
3944 Register intf_klass,
3945 RegisterOrConstant itable_index,
3946 Register method_result,
3947 Register scan_temp,
3948 Label& L_no_such_interface) {
3949 assert_different_registers(recv_klass, intf_klass, method_result, scan_temp);
3950 assert(itable_index.is_constant() || itable_index.as_register() == method_result,
3951 "caller must use same register for non-constant itable index as for method");
3952
3953 // Compute start of first itableOffsetEntry (which is at the end of the vtable)
3954 int vtable_base = InstanceKlass::vtable_start_offset() * wordSize;
3955 int itentry_off = itableMethodEntry::method_offset_in_bytes();
3956 int scan_step = itableOffsetEntry::size() * wordSize;
3957 int vte_size = vtableEntry::size() * wordSize;
3958 Address::ScaleFactor times_vte_scale = Address::times_ptr;
3959 assert(vte_size == wordSize, "else adjust times_vte_scale");
3960
3961 movl(scan_temp, Address(recv_klass, InstanceKlass::vtable_length_offset() * wordSize));
3962
3963 // %%% Could store the aligned, prescaled offset in the klassoop.
3964 lea(scan_temp, Address(recv_klass, scan_temp, times_vte_scale, vtable_base));
3965 if (HeapWordsPerLong > 1) {
3966 // Round up to align_object_offset boundary
3967 // see code for InstanceKlass::start_of_itable!
3968 round_to(scan_temp, BytesPerLong);
3969 }
3970
3971 // Adjust recv_klass by scaled itable_index, so we can free itable_index.
3972 assert(itableMethodEntry::size() * wordSize == wordSize, "adjust the scaling in the code below");
3973 lea(recv_klass, Address(recv_klass, itable_index, Address::times_ptr, itentry_off));
3974
3975 // for (scan = klass->itable(); scan->interface() != NULL; scan += scan_step) {
3976 // if (scan->interface() == intf) {
3977 // result = (klass + scan->offset() + itable_index);
3978 // }
3979 // }
3980 Label search, found_method;
3981
3982 for (int peel = 1; peel >= 0; peel--) {
3983 movptr(method_result, Address(scan_temp, itableOffsetEntry::interface_offset_in_bytes()));
3984 cmpptr(intf_klass, method_result);
3985
3986 if (peel) {
3987 jccb(Assembler::equal, found_method);
3988 } else {
3989 jccb(Assembler::notEqual, search);
3990 // (invert the test to fall through to found_method...)
3991 }
3992
3993 if (!peel) break;
3994
3995 bind(search);
3996
3997 // Check that the previous entry is non-null. A null entry means that
3998 // the receiver class doesn't implement the interface, and wasn't the
3999 // same as when the caller was compiled.
4000 testptr(method_result, method_result);
4001 jcc(Assembler::zero, L_no_such_interface);
4002 addptr(scan_temp, scan_step);
4003 }
4004
4005 bind(found_method);
4006
4007 // Got a hit.
4008 movl(scan_temp, Address(scan_temp, itableOffsetEntry::offset_offset_in_bytes()));
4009 movptr(method_result, Address(recv_klass, scan_temp, Address::times_1));
4010 }
4011
4012
4013 // virtual method calling
4014 void MacroAssembler::lookup_virtual_method(Register recv_klass,
4015 RegisterOrConstant vtable_index,
4016 Register method_result) {
4017 const int base = InstanceKlass::vtable_start_offset() * wordSize;
4018 assert(vtableEntry::size() * wordSize == wordSize, "else adjust the scaling in the code below");
4019 Address vtable_entry_addr(recv_klass,
4020 vtable_index, Address::times_ptr,
4021 base + vtableEntry::method_offset_in_bytes());
4022 movptr(method_result, vtable_entry_addr);
4023 }
4024
4025
4026 void MacroAssembler::check_klass_subtype(Register sub_klass,
4027 Register super_klass,
4028 Register temp_reg,
4029 Label& L_success) {
4030 Label L_failure;
4031 check_klass_subtype_fast_path(sub_klass, super_klass, temp_reg, &L_success, &L_failure, NULL);
4032 check_klass_subtype_slow_path(sub_klass, super_klass, temp_reg, noreg, &L_success, NULL);
4033 bind(L_failure);
4034 }
4035
4036
4037 void MacroAssembler::check_klass_subtype_fast_path(Register sub_klass,
4038 Register super_klass,
4039 Register temp_reg,
4040 Label* L_success,
4041 Label* L_failure,
4042 Label* L_slow_path,
4043 RegisterOrConstant super_check_offset) {
4044 assert_different_registers(sub_klass, super_klass, temp_reg);
4045 bool must_load_sco = (super_check_offset.constant_or_zero() == -1);
4046 if (super_check_offset.is_register()) {
4047 assert_different_registers(sub_klass, super_klass,
4048 super_check_offset.as_register());
4049 } else if (must_load_sco) {
4050 assert(temp_reg != noreg, "supply either a temp or a register offset");
4051 }
4052
4053 Label L_fallthrough;
4054 int label_nulls = 0;
4055 if (L_success == NULL) { L_success = &L_fallthrough; label_nulls++; }
4056 if (L_failure == NULL) { L_failure = &L_fallthrough; label_nulls++; }
4057 if (L_slow_path == NULL) { L_slow_path = &L_fallthrough; label_nulls++; }
4058 assert(label_nulls <= 1, "at most one NULL in the batch");
4059
4060 int sc_offset = in_bytes(Klass::secondary_super_cache_offset());
4061 int sco_offset = in_bytes(Klass::super_check_offset_offset());
4062 Address super_check_offset_addr(super_klass, sco_offset);
4063
4064 // Hacked jcc, which "knows" that L_fallthrough, at least, is in
4065 // range of a jccb. If this routine grows larger, reconsider at
4066 // least some of these.
4067 #define local_jcc(assembler_cond, label) \
4068 if (&(label) == &L_fallthrough) jccb(assembler_cond, label); \
4069 else jcc( assembler_cond, label) /*omit semi*/
4070
4071 // Hacked jmp, which may only be used just before L_fallthrough.
4072 #define final_jmp(label) \
4073 if (&(label) == &L_fallthrough) { /*do nothing*/ } \
4074 else jmp(label) /*omit semi*/
4075
4076 // If the pointers are equal, we are done (e.g., String[] elements).
4077 // This self-check enables sharing of secondary supertype arrays among
4078 // non-primary types such as array-of-interface. Otherwise, each such
4079 // type would need its own customized SSA.
4080 // We move this check to the front of the fast path because many
4081 // type checks are in fact trivially successful in this manner,
4082 // so we get a nicely predicted branch right at the start of the check.
4083 cmpptr(sub_klass, super_klass);
4084 local_jcc(Assembler::equal, *L_success);
4085
4086 // Check the supertype display:
4087 if (must_load_sco) {
4088 // Positive movl does right thing on LP64.
4089 movl(temp_reg, super_check_offset_addr);
4090 super_check_offset = RegisterOrConstant(temp_reg);
4091 }
4092 Address super_check_addr(sub_klass, super_check_offset, Address::times_1, 0);
4093 cmpptr(super_klass, super_check_addr); // load displayed supertype
4094
4095 // This check has worked decisively for primary supers.
4096 // Secondary supers are sought in the super_cache ('super_cache_addr').
4097 // (Secondary supers are interfaces and very deeply nested subtypes.)
4098 // This works in the same check above because of a tricky aliasing
4099 // between the super_cache and the primary super display elements.
4100 // (The 'super_check_addr' can address either, as the case requires.)
4101 // Note that the cache is updated below if it does not help us find
4102 // what we need immediately.
4103 // So if it was a primary super, we can just fail immediately.
4104 // Otherwise, it's the slow path for us (no success at this point).
4105
4106 if (super_check_offset.is_register()) {
4107 local_jcc(Assembler::equal, *L_success);
4108 cmpl(super_check_offset.as_register(), sc_offset);
4109 if (L_failure == &L_fallthrough) {
4110 local_jcc(Assembler::equal, *L_slow_path);
4111 } else {
4112 local_jcc(Assembler::notEqual, *L_failure);
4113 final_jmp(*L_slow_path);
4114 }
4115 } else if (super_check_offset.as_constant() == sc_offset) {
4116 // Need a slow path; fast failure is impossible.
4117 if (L_slow_path == &L_fallthrough) {
4118 local_jcc(Assembler::equal, *L_success);
4119 } else {
4120 local_jcc(Assembler::notEqual, *L_slow_path);
4121 final_jmp(*L_success);
4122 }
4123 } else {
4124 // No slow path; it's a fast decision.
4125 if (L_failure == &L_fallthrough) {
4126 local_jcc(Assembler::equal, *L_success);
4127 } else {
4128 local_jcc(Assembler::notEqual, *L_failure);
4129 final_jmp(*L_success);
4130 }
4131 }
4132
4133 bind(L_fallthrough);
4134
4135 #undef local_jcc
4136 #undef final_jmp
4137 }
4138
4139
4140 void MacroAssembler::check_klass_subtype_slow_path(Register sub_klass,
4141 Register super_klass,
4142 Register temp_reg,
4143 Register temp2_reg,
4144 Label* L_success,
4145 Label* L_failure,
4146 bool set_cond_codes) {
4147 assert_different_registers(sub_klass, super_klass, temp_reg);
4148 if (temp2_reg != noreg)
4149 assert_different_registers(sub_klass, super_klass, temp_reg, temp2_reg);
4150 #define IS_A_TEMP(reg) ((reg) == temp_reg || (reg) == temp2_reg)
4151
4152 Label L_fallthrough;
4153 int label_nulls = 0;
4154 if (L_success == NULL) { L_success = &L_fallthrough; label_nulls++; }
4155 if (L_failure == NULL) { L_failure = &L_fallthrough; label_nulls++; }
4156 assert(label_nulls <= 1, "at most one NULL in the batch");
4157
4158 // a couple of useful fields in sub_klass:
4159 int ss_offset = in_bytes(Klass::secondary_supers_offset());
4160 int sc_offset = in_bytes(Klass::secondary_super_cache_offset());
4161 Address secondary_supers_addr(sub_klass, ss_offset);
4162 Address super_cache_addr( sub_klass, sc_offset);
4163
4164 // Do a linear scan of the secondary super-klass chain.
4165 // This code is rarely used, so simplicity is a virtue here.
4166 // The repne_scan instruction uses fixed registers, which we must spill.
4167 // Don't worry too much about pre-existing connections with the input regs.
4168
4169 assert(sub_klass != rax, "killed reg"); // killed by mov(rax, super)
4170 assert(sub_klass != rcx, "killed reg"); // killed by lea(rcx, &pst_counter)
4171
4172 // Get super_klass value into rax (even if it was in rdi or rcx).
4173 bool pushed_rax = false, pushed_rcx = false, pushed_rdi = false;
4174 if (super_klass != rax || UseCompressedOops) {
4175 if (!IS_A_TEMP(rax)) { push(rax); pushed_rax = true; }
4176 mov(rax, super_klass);
4177 }
4178 if (!IS_A_TEMP(rcx)) { push(rcx); pushed_rcx = true; }
4179 if (!IS_A_TEMP(rdi)) { push(rdi); pushed_rdi = true; }
4180
4181 #ifndef PRODUCT
4182 int* pst_counter = &SharedRuntime::_partial_subtype_ctr;
4183 ExternalAddress pst_counter_addr((address) pst_counter);
4184 NOT_LP64( incrementl(pst_counter_addr) );
4185 LP64_ONLY( lea(rcx, pst_counter_addr) );
4186 LP64_ONLY( incrementl(Address(rcx, 0)) );
4187 #endif //PRODUCT
4188
4189 // We will consult the secondary-super array.
4190 movptr(rdi, secondary_supers_addr);
4191 // Load the array length. (Positive movl does right thing on LP64.)
4192 movl(rcx, Address(rdi, Array<Klass*>::length_offset_in_bytes()));
4193 // Skip to start of data.
4194 addptr(rdi, Array<Klass*>::base_offset_in_bytes());
4195
4196 // Scan RCX words at [RDI] for an occurrence of RAX.
4197 // Set NZ/Z based on last compare.
4198 // Z flag value will not be set by 'repne' if RCX == 0 since 'repne' does
4199 // not change flags (only scas instruction which is repeated sets flags).
4200 // Set Z = 0 (not equal) before 'repne' to indicate that class was not found.
4201
4202 testptr(rax,rax); // Set Z = 0
4203 repne_scan();
4204
4205 // Unspill the temp. registers:
4206 if (pushed_rdi) pop(rdi);
4207 if (pushed_rcx) pop(rcx);
4208 if (pushed_rax) pop(rax);
4209
4210 if (set_cond_codes) {
4211 // Special hack for the AD files: rdi is guaranteed non-zero.
4212 assert(!pushed_rdi, "rdi must be left non-NULL");
4213 // Also, the condition codes are properly set Z/NZ on succeed/failure.
4214 }
4215
4216 if (L_failure == &L_fallthrough)
4217 jccb(Assembler::notEqual, *L_failure);
4218 else jcc(Assembler::notEqual, *L_failure);
4219
4220 // Success. Cache the super we found and proceed in triumph.
4221 movptr(super_cache_addr, super_klass);
4222
4223 if (L_success != &L_fallthrough) {
4224 jmp(*L_success);
4225 }
4226
4227 #undef IS_A_TEMP
4228
4229 bind(L_fallthrough);
4230 }
4231
4232
4233 void MacroAssembler::cmov32(Condition cc, Register dst, Address src) {
4234 if (VM_Version::supports_cmov()) {
4235 cmovl(cc, dst, src);
4236 } else {
4237 Label L;
4238 jccb(negate_condition(cc), L);
4239 movl(dst, src);
4240 bind(L);
4241 }
4242 }
4243
4244 void MacroAssembler::cmov32(Condition cc, Register dst, Register src) {
4245 if (VM_Version::supports_cmov()) {
4246 cmovl(cc, dst, src);
4247 } else {
4248 Label L;
4249 jccb(negate_condition(cc), L);
4250 movl(dst, src);
4251 bind(L);
4252 }
4253 }
4254
4255 void MacroAssembler::verify_oop(Register reg, const char* s) {
4256 if (!VerifyOops) return;
4257
4258 // Pass register number to verify_oop_subroutine
4259 char* b = new char[strlen(s) + 50];
4260 sprintf(b, "verify_oop: %s: %s", reg->name(), s);
4261 BLOCK_COMMENT("verify_oop {");
4262 #ifdef _LP64
4263 push(rscratch1); // save r10, trashed by movptr()
4264 #endif
4265 push(rax); // save rax,
4266 push(reg); // pass register argument
4267 ExternalAddress buffer((address) b);
4268 // avoid using pushptr, as it modifies scratch registers
4269 // and our contract is not to modify anything
4270 movptr(rax, buffer.addr());
4271 push(rax);
4272 // call indirectly to solve generation ordering problem
4273 movptr(rax, ExternalAddress(StubRoutines::verify_oop_subroutine_entry_address()));
4274 call(rax);
4275 // Caller pops the arguments (oop, message) and restores rax, r10
4276 BLOCK_COMMENT("} verify_oop");
4277 }
4278
4279
4280 RegisterOrConstant MacroAssembler::delayed_value_impl(intptr_t* delayed_value_addr,
4281 Register tmp,
4282 int offset) {
4283 intptr_t value = *delayed_value_addr;
4284 if (value != 0)
4285 return RegisterOrConstant(value + offset);
4286
4287 // load indirectly to solve generation ordering problem
4288 movptr(tmp, ExternalAddress((address) delayed_value_addr));
4289
4290 #ifdef ASSERT
4291 { Label L;
4292 testptr(tmp, tmp);
4293 if (WizardMode) {
4294 jcc(Assembler::notZero, L);
4295 char* buf = new char[40];
4296 sprintf(buf, "DelayedValue="INTPTR_FORMAT, delayed_value_addr[1]);
4297 STOP(buf);
4298 } else {
4299 jccb(Assembler::notZero, L);
4300 hlt();
4301 }
4302 bind(L);
4303 }
4304 #endif
4305
4306 if (offset != 0)
4307 addptr(tmp, offset);
4308
4309 return RegisterOrConstant(tmp);
4310 }
4311
4312
4313 Address MacroAssembler::argument_address(RegisterOrConstant arg_slot,
4314 int extra_slot_offset) {
4315 // cf. TemplateTable::prepare_invoke(), if (load_receiver).
4316 int stackElementSize = Interpreter::stackElementSize;
4317 int offset = Interpreter::expr_offset_in_bytes(extra_slot_offset+0);
4318 #ifdef ASSERT
4319 int offset1 = Interpreter::expr_offset_in_bytes(extra_slot_offset+1);
4320 assert(offset1 - offset == stackElementSize, "correct arithmetic");
4321 #endif
4322 Register scale_reg = noreg;
4323 Address::ScaleFactor scale_factor = Address::no_scale;
4324 if (arg_slot.is_constant()) {
4325 offset += arg_slot.as_constant() * stackElementSize;
4326 } else {
4327 scale_reg = arg_slot.as_register();
4328 scale_factor = Address::times(stackElementSize);
4329 }
4330 offset += wordSize; // return PC is on stack
4331 return Address(rsp, scale_reg, scale_factor, offset);
4332 }
4333
4334
4335 void MacroAssembler::verify_oop_addr(Address addr, const char* s) {
4336 if (!VerifyOops) return;
4337
4338 // Address adjust(addr.base(), addr.index(), addr.scale(), addr.disp() + BytesPerWord);
4339 // Pass register number to verify_oop_subroutine
4340 char* b = new char[strlen(s) + 50];
4341 sprintf(b, "verify_oop_addr: %s", s);
4342
4343 #ifdef _LP64
4344 push(rscratch1); // save r10, trashed by movptr()
4345 #endif
4346 push(rax); // save rax,
4347 // addr may contain rsp so we will have to adjust it based on the push
4348 // we just did (and on 64 bit we do two pushes)
4349 // NOTE: 64bit seemed to have had a bug in that it did movq(addr, rax); which
4350 // stores rax into addr which is backwards of what was intended.
4351 if (addr.uses(rsp)) {
4352 lea(rax, addr);
4353 pushptr(Address(rax, LP64_ONLY(2 *) BytesPerWord));
4354 } else {
4355 pushptr(addr);
4356 }
4357
4358 ExternalAddress buffer((address) b);
4359 // pass msg argument
4360 // avoid using pushptr, as it modifies scratch registers
4361 // and our contract is not to modify anything
4362 movptr(rax, buffer.addr());
4363 push(rax);
4364
4365 // call indirectly to solve generation ordering problem
4366 movptr(rax, ExternalAddress(StubRoutines::verify_oop_subroutine_entry_address()));
4367 call(rax);
4368 // Caller pops the arguments (addr, message) and restores rax, r10.
4369 }
4370
4371 void MacroAssembler::verify_tlab() {
4372 #ifdef ASSERT
4373 if (UseTLAB && VerifyOops) {
4374 Label next, ok;
4375 Register t1 = rsi;
4376 Register thread_reg = NOT_LP64(rbx) LP64_ONLY(r15_thread);
4377
4378 push(t1);
4379 NOT_LP64(push(thread_reg));
4380 NOT_LP64(get_thread(thread_reg));
4381
4382 movptr(t1, Address(thread_reg, in_bytes(JavaThread::tlab_top_offset())));
4383 cmpptr(t1, Address(thread_reg, in_bytes(JavaThread::tlab_start_offset())));
4384 jcc(Assembler::aboveEqual, next);
4385 STOP("assert(top >= start)");
4386 should_not_reach_here();
4387
4388 bind(next);
4389 movptr(t1, Address(thread_reg, in_bytes(JavaThread::tlab_end_offset())));
4390 cmpptr(t1, Address(thread_reg, in_bytes(JavaThread::tlab_top_offset())));
4391 jcc(Assembler::aboveEqual, ok);
4392 STOP("assert(top <= end)");
4393 should_not_reach_here();
4394
4395 bind(ok);
4396 NOT_LP64(pop(thread_reg));
4397 pop(t1);
4398 }
4399 #endif
4400 }
4401
4402 class ControlWord {
4403 public:
4404 int32_t _value;
4405
4406 int rounding_control() const { return (_value >> 10) & 3 ; }
4407 int precision_control() const { return (_value >> 8) & 3 ; }
4408 bool precision() const { return ((_value >> 5) & 1) != 0; }
4409 bool underflow() const { return ((_value >> 4) & 1) != 0; }
4410 bool overflow() const { return ((_value >> 3) & 1) != 0; }
4411 bool zero_divide() const { return ((_value >> 2) & 1) != 0; }
4412 bool denormalized() const { return ((_value >> 1) & 1) != 0; }
4413 bool invalid() const { return ((_value >> 0) & 1) != 0; }
4414
4415 void print() const {
4416 // rounding control
4417 const char* rc;
4418 switch (rounding_control()) {
4419 case 0: rc = "round near"; break;
4420 case 1: rc = "round down"; break;
4421 case 2: rc = "round up "; break;
4422 case 3: rc = "chop "; break;
4423 };
4424 // precision control
4425 const char* pc;
4426 switch (precision_control()) {
4427 case 0: pc = "24 bits "; break;
4428 case 1: pc = "reserved"; break;
4429 case 2: pc = "53 bits "; break;
4430 case 3: pc = "64 bits "; break;
4431 };
4432 // flags
4433 char f[9];
4434 f[0] = ' ';
4435 f[1] = ' ';
4436 f[2] = (precision ()) ? 'P' : 'p';
4437 f[3] = (underflow ()) ? 'U' : 'u';
4438 f[4] = (overflow ()) ? 'O' : 'o';
4439 f[5] = (zero_divide ()) ? 'Z' : 'z';
4440 f[6] = (denormalized()) ? 'D' : 'd';
4441 f[7] = (invalid ()) ? 'I' : 'i';
4442 f[8] = '\x0';
4443 // output
4444 printf("%04x masks = %s, %s, %s", _value & 0xFFFF, f, rc, pc);
4445 }
4446
4447 };
4448
4449 class StatusWord {
4450 public:
4451 int32_t _value;
4452
4453 bool busy() const { return ((_value >> 15) & 1) != 0; }
4454 bool C3() const { return ((_value >> 14) & 1) != 0; }
4455 bool C2() const { return ((_value >> 10) & 1) != 0; }
4456 bool C1() const { return ((_value >> 9) & 1) != 0; }
4457 bool C0() const { return ((_value >> 8) & 1) != 0; }
4458 int top() const { return (_value >> 11) & 7 ; }
4459 bool error_status() const { return ((_value >> 7) & 1) != 0; }
4460 bool stack_fault() const { return ((_value >> 6) & 1) != 0; }
4461 bool precision() const { return ((_value >> 5) & 1) != 0; }
4462 bool underflow() const { return ((_value >> 4) & 1) != 0; }
4463 bool overflow() const { return ((_value >> 3) & 1) != 0; }
4464 bool zero_divide() const { return ((_value >> 2) & 1) != 0; }
4465 bool denormalized() const { return ((_value >> 1) & 1) != 0; }
4466 bool invalid() const { return ((_value >> 0) & 1) != 0; }
4467
4468 void print() const {
4469 // condition codes
4470 char c[5];
4471 c[0] = (C3()) ? '3' : '-';
4472 c[1] = (C2()) ? '2' : '-';
4473 c[2] = (C1()) ? '1' : '-';
4474 c[3] = (C0()) ? '0' : '-';
4475 c[4] = '\x0';
4476 // flags
4477 char f[9];
4478 f[0] = (error_status()) ? 'E' : '-';
4479 f[1] = (stack_fault ()) ? 'S' : '-';
4480 f[2] = (precision ()) ? 'P' : '-';
4481 f[3] = (underflow ()) ? 'U' : '-';
4482 f[4] = (overflow ()) ? 'O' : '-';
4483 f[5] = (zero_divide ()) ? 'Z' : '-';
4484 f[6] = (denormalized()) ? 'D' : '-';
4485 f[7] = (invalid ()) ? 'I' : '-';
4486 f[8] = '\x0';
4487 // output
4488 printf("%04x flags = %s, cc = %s, top = %d", _value & 0xFFFF, f, c, top());
4489 }
4490
4491 };
4492
4493 class TagWord {
4494 public:
4495 int32_t _value;
4496
4497 int tag_at(int i) const { return (_value >> (i*2)) & 3; }
4498
4499 void print() const {
4500 printf("%04x", _value & 0xFFFF);
4501 }
4502
4503 };
4504
4505 class FPU_Register {
4506 public:
4507 int32_t _m0;
4508 int32_t _m1;
4509 int16_t _ex;
4510
4511 bool is_indefinite() const {
4512 return _ex == -1 && _m1 == (int32_t)0xC0000000 && _m0 == 0;
4513 }
4514
4515 void print() const {
4516 char sign = (_ex < 0) ? '-' : '+';
4517 const char* kind = (_ex == 0x7FFF || _ex == (int16_t)-1) ? "NaN" : " ";
4518 printf("%c%04hx.%08x%08x %s", sign, _ex, _m1, _m0, kind);
4519 };
4520
4521 };
4522
4523 class FPU_State {
4524 public:
4525 enum {
4526 register_size = 10,
4527 number_of_registers = 8,
4528 register_mask = 7
4529 };
4530
4531 ControlWord _control_word;
4532 StatusWord _status_word;
4533 TagWord _tag_word;
4534 int32_t _error_offset;
4535 int32_t _error_selector;
4536 int32_t _data_offset;
4537 int32_t _data_selector;
4538 int8_t _register[register_size * number_of_registers];
4539
4540 int tag_for_st(int i) const { return _tag_word.tag_at((_status_word.top() + i) & register_mask); }
4541 FPU_Register* st(int i) const { return (FPU_Register*)&_register[register_size * i]; }
4542
4543 const char* tag_as_string(int tag) const {
4544 switch (tag) {
4545 case 0: return "valid";
4546 case 1: return "zero";
4547 case 2: return "special";
4548 case 3: return "empty";
4549 }
4550 ShouldNotReachHere();
4551 return NULL;
4552 }
4553
4554 void print() const {
4555 // print computation registers
4556 { int t = _status_word.top();
4557 for (int i = 0; i < number_of_registers; i++) {
4558 int j = (i - t) & register_mask;
4559 printf("%c r%d = ST%d = ", (j == 0 ? '*' : ' '), i, j);
4560 st(j)->print();
4561 printf(" %s\n", tag_as_string(_tag_word.tag_at(i)));
4562 }
4563 }
4564 printf("\n");
4565 // print control registers
4566 printf("ctrl = "); _control_word.print(); printf("\n");
4567 printf("stat = "); _status_word .print(); printf("\n");
4568 printf("tags = "); _tag_word .print(); printf("\n");
4569 }
4570
4571 };
4572
4573 class Flag_Register {
4574 public:
4575 int32_t _value;
4576
4577 bool overflow() const { return ((_value >> 11) & 1) != 0; }
4578 bool direction() const { return ((_value >> 10) & 1) != 0; }
4579 bool sign() const { return ((_value >> 7) & 1) != 0; }
4580 bool zero() const { return ((_value >> 6) & 1) != 0; }
4581 bool auxiliary_carry() const { return ((_value >> 4) & 1) != 0; }
4582 bool parity() const { return ((_value >> 2) & 1) != 0; }
4583 bool carry() const { return ((_value >> 0) & 1) != 0; }
4584
4585 void print() const {
4586 // flags
4587 char f[8];
4588 f[0] = (overflow ()) ? 'O' : '-';
4589 f[1] = (direction ()) ? 'D' : '-';
4590 f[2] = (sign ()) ? 'S' : '-';
4591 f[3] = (zero ()) ? 'Z' : '-';
4592 f[4] = (auxiliary_carry()) ? 'A' : '-';
4593 f[5] = (parity ()) ? 'P' : '-';
4594 f[6] = (carry ()) ? 'C' : '-';
4595 f[7] = '\x0';
4596 // output
4597 printf("%08x flags = %s", _value, f);
4598 }
4599
4600 };
4601
4602 class IU_Register {
4603 public:
4604 int32_t _value;
4605
4606 void print() const {
4607 printf("%08x %11d", _value, _value);
4608 }
4609
4610 };
4611
4612 class IU_State {
4613 public:
4614 Flag_Register _eflags;
4615 IU_Register _rdi;
4616 IU_Register _rsi;
4617 IU_Register _rbp;
4618 IU_Register _rsp;
4619 IU_Register _rbx;
4620 IU_Register _rdx;
4621 IU_Register _rcx;
4622 IU_Register _rax;
4623
4624 void print() const {
4625 // computation registers
4626 printf("rax, = "); _rax.print(); printf("\n");
4627 printf("rbx, = "); _rbx.print(); printf("\n");
4628 printf("rcx = "); _rcx.print(); printf("\n");
4629 printf("rdx = "); _rdx.print(); printf("\n");
4630 printf("rdi = "); _rdi.print(); printf("\n");
4631 printf("rsi = "); _rsi.print(); printf("\n");
4632 printf("rbp, = "); _rbp.print(); printf("\n");
4633 printf("rsp = "); _rsp.print(); printf("\n");
4634 printf("\n");
4635 // control registers
4636 printf("flgs = "); _eflags.print(); printf("\n");
4637 }
4638 };
4639
4640
4641 class CPU_State {
4642 public:
4643 FPU_State _fpu_state;
4644 IU_State _iu_state;
4645
4646 void print() const {
4647 printf("--------------------------------------------------\n");
4648 _iu_state .print();
4649 printf("\n");
4650 _fpu_state.print();
4651 printf("--------------------------------------------------\n");
4652 }
4653
4654 };
4655
4656
4657 static void _print_CPU_state(CPU_State* state) {
4658 state->print();
4659 };
4660
4661
4662 void MacroAssembler::print_CPU_state() {
4663 push_CPU_state();
4664 push(rsp); // pass CPU state
4665 call(RuntimeAddress(CAST_FROM_FN_PTR(address, _print_CPU_state)));
4666 addptr(rsp, wordSize); // discard argument
4667 pop_CPU_state();
4668 }
4669
4670
4671 static bool _verify_FPU(int stack_depth, char* s, CPU_State* state) {
4672 static int counter = 0;
4673 FPU_State* fs = &state->_fpu_state;
4674 counter++;
4675 // For leaf calls, only verify that the top few elements remain empty.
4676 // We only need 1 empty at the top for C2 code.
4677 if( stack_depth < 0 ) {
4678 if( fs->tag_for_st(7) != 3 ) {
4679 printf("FPR7 not empty\n");
4680 state->print();
4681 assert(false, "error");
4682 return false;
4683 }
4684 return true; // All other stack states do not matter
4685 }
4686
4687 assert((fs->_control_word._value & 0xffff) == StubRoutines::_fpu_cntrl_wrd_std,
4688 "bad FPU control word");
4689
4690 // compute stack depth
4691 int i = 0;
4692 while (i < FPU_State::number_of_registers && fs->tag_for_st(i) < 3) i++;
4693 int d = i;
4694 while (i < FPU_State::number_of_registers && fs->tag_for_st(i) == 3) i++;
4695 // verify findings
4696 if (i != FPU_State::number_of_registers) {
4697 // stack not contiguous
4698 printf("%s: stack not contiguous at ST%d\n", s, i);
4699 state->print();
4700 assert(false, "error");
4701 return false;
4702 }
4703 // check if computed stack depth corresponds to expected stack depth
4704 if (stack_depth < 0) {
4705 // expected stack depth is -stack_depth or less
4706 if (d > -stack_depth) {
4707 // too many elements on the stack
4708 printf("%s: <= %d stack elements expected but found %d\n", s, -stack_depth, d);
4709 state->print();
4710 assert(false, "error");
4711 return false;
4712 }
4713 } else {
4714 // expected stack depth is stack_depth
4715 if (d != stack_depth) {
4716 // wrong stack depth
4717 printf("%s: %d stack elements expected but found %d\n", s, stack_depth, d);
4718 state->print();
4719 assert(false, "error");
4720 return false;
4721 }
4722 }
4723 // everything is cool
4724 return true;
4725 }
4726
4727
4728 void MacroAssembler::verify_FPU(int stack_depth, const char* s) {
4729 if (!VerifyFPU) return;
4730 push_CPU_state();
4731 push(rsp); // pass CPU state
4732 ExternalAddress msg((address) s);
4733 // pass message string s
4734 pushptr(msg.addr());
4735 push(stack_depth); // pass stack depth
4736 call(RuntimeAddress(CAST_FROM_FN_PTR(address, _verify_FPU)));
4737 addptr(rsp, 3 * wordSize); // discard arguments
4738 // check for error
4739 { Label L;
4740 testl(rax, rax);
4741 jcc(Assembler::notZero, L);
4742 int3(); // break if error condition
4743 bind(L);
4744 }
4745 pop_CPU_state();
4746 }
4747
4748 void MacroAssembler::load_klass(Register dst, Register src) {
4749 #ifdef _LP64
4750 if (UseCompressedKlassPointers) {
4751 movl(dst, Address(src, oopDesc::klass_offset_in_bytes()));
4752 decode_klass_not_null(dst);
4753 } else
4754 #endif
4755 movptr(dst, Address(src, oopDesc::klass_offset_in_bytes()));
4756 }
4757
4758 void MacroAssembler::load_prototype_header(Register dst, Register src) {
4759 #ifdef _LP64
4760 if (UseCompressedKlassPointers) {
4761 assert (Universe::heap() != NULL, "java heap should be initialized");
4762 movl(dst, Address(src, oopDesc::klass_offset_in_bytes()));
4763 if (Universe::narrow_klass_shift() != 0) {
4764 assert(LogKlassAlignmentInBytes == Universe::narrow_klass_shift(), "decode alg wrong");
4765 assert(LogKlassAlignmentInBytes == Address::times_8, "klass not aligned on 64bits?");
4766 movq(dst, Address(r12_heapbase, dst, Address::times_8, Klass::prototype_header_offset()));
4767 } else {
4768 movq(dst, Address(dst, Klass::prototype_header_offset()));
4769 }
4770 } else
4771 #endif
4772 {
4773 movptr(dst, Address(src, oopDesc::klass_offset_in_bytes()));
4774 movptr(dst, Address(dst, Klass::prototype_header_offset()));
4775 }
4776 }
4777
4778 void MacroAssembler::store_klass(Register dst, Register src) {
4779 #ifdef _LP64
4780 if (UseCompressedKlassPointers) {
4781 encode_klass_not_null(src);
4782 movl(Address(dst, oopDesc::klass_offset_in_bytes()), src);
4783 } else
4784 #endif
4785 movptr(Address(dst, oopDesc::klass_offset_in_bytes()), src);
4786 }
4787
4788 void MacroAssembler::load_heap_oop(Register dst, Address src) {
4789 #ifdef _LP64
4790 // FIXME: Must change all places where we try to load the klass.
4791 if (UseCompressedOops) {
4792 movl(dst, src);
4793 decode_heap_oop(dst);
4794 } else
4795 #endif
4796 movptr(dst, src);
4797 }
4798
4799 // Doesn't do verfication, generates fixed size code
4800 void MacroAssembler::load_heap_oop_not_null(Register dst, Address src) {
4801 #ifdef _LP64
4802 if (UseCompressedOops) {
4803 movl(dst, src);
4804 decode_heap_oop_not_null(dst);
4805 } else
4806 #endif
4807 movptr(dst, src);
4808 }
4809
4810 void MacroAssembler::store_heap_oop(Address dst, Register src) {
4811 #ifdef _LP64
4812 if (UseCompressedOops) {
4813 assert(!dst.uses(src), "not enough registers");
4814 encode_heap_oop(src);
4815 movl(dst, src);
4816 } else
4817 #endif
4818 movptr(dst, src);
4819 }
4820
4821 void MacroAssembler::cmp_heap_oop(Register src1, Address src2, Register tmp) {
4822 assert_different_registers(src1, tmp);
4823 #ifdef _LP64
4824 if (UseCompressedOops) {
4825 bool did_push = false;
4826 if (tmp == noreg) {
4827 tmp = rax;
4828 push(tmp);
4829 did_push = true;
4830 assert(!src2.uses(rsp), "can't push");
4831 }
4832 load_heap_oop(tmp, src2);
4833 cmpptr(src1, tmp);
4834 if (did_push) pop(tmp);
4835 } else
4836 #endif
4837 cmpptr(src1, src2);
4838 }
4839
4840 // Used for storing NULLs.
4841 void MacroAssembler::store_heap_oop_null(Address dst) {
4842 #ifdef _LP64
4843 if (UseCompressedOops) {
4844 movl(dst, (int32_t)NULL_WORD);
4845 } else {
4846 movslq(dst, (int32_t)NULL_WORD);
4847 }
4848 #else
4849 movl(dst, (int32_t)NULL_WORD);
4850 #endif
4851 }
4852
4853 #ifdef _LP64
4854 void MacroAssembler::store_klass_gap(Register dst, Register src) {
4855 if (UseCompressedKlassPointers) {
4856 // Store to klass gap in destination
4857 movl(Address(dst, oopDesc::klass_gap_offset_in_bytes()), src);
4858 }
4859 }
4860
4861 #ifdef ASSERT
4862 void MacroAssembler::verify_heapbase(const char* msg) {
4863 assert (UseCompressedOops || UseCompressedKlassPointers, "should be compressed");
4864 assert (Universe::heap() != NULL, "java heap should be initialized");
4865 if (CheckCompressedOops) {
4866 Label ok;
4867 push(rscratch1); // cmpptr trashes rscratch1
4868 cmpptr(r12_heapbase, ExternalAddress((address)Universe::narrow_ptrs_base_addr()));
4869 jcc(Assembler::equal, ok);
4870 STOP(msg);
4871 bind(ok);
4872 pop(rscratch1);
4873 }
4874 }
4875 #endif
4876
4877 // Algorithm must match oop.inline.hpp encode_heap_oop.
4878 void MacroAssembler::encode_heap_oop(Register r) {
4879 #ifdef ASSERT
4880 verify_heapbase("MacroAssembler::encode_heap_oop: heap base corrupted?");
4881 #endif
4882 verify_oop(r, "broken oop in encode_heap_oop");
4883 if (Universe::narrow_oop_base() == NULL) {
4884 if (Universe::narrow_oop_shift() != 0) {
4885 assert (LogMinObjAlignmentInBytes == Universe::narrow_oop_shift(), "decode alg wrong");
4886 shrq(r, LogMinObjAlignmentInBytes);
4887 }
4888 return;
4889 }
4890 testq(r, r);
4891 cmovq(Assembler::equal, r, r12_heapbase);
4892 subq(r, r12_heapbase);
4893 shrq(r, LogMinObjAlignmentInBytes);
4894 }
4895
4896 void MacroAssembler::encode_heap_oop_not_null(Register r) {
4897 #ifdef ASSERT
4898 verify_heapbase("MacroAssembler::encode_heap_oop_not_null: heap base corrupted?");
4899 if (CheckCompressedOops) {
4900 Label ok;
4901 testq(r, r);
4902 jcc(Assembler::notEqual, ok);
4903 STOP("null oop passed to encode_heap_oop_not_null");
4904 bind(ok);
4905 }
4906 #endif
4907 verify_oop(r, "broken oop in encode_heap_oop_not_null");
4908 if (Universe::narrow_oop_base() != NULL) {
4909 subq(r, r12_heapbase);
4910 }
4911 if (Universe::narrow_oop_shift() != 0) {
4912 assert (LogMinObjAlignmentInBytes == Universe::narrow_oop_shift(), "decode alg wrong");
4913 shrq(r, LogMinObjAlignmentInBytes);
4914 }
4915 }
4916
4917 void MacroAssembler::encode_heap_oop_not_null(Register dst, Register src) {
4918 #ifdef ASSERT
4919 verify_heapbase("MacroAssembler::encode_heap_oop_not_null2: heap base corrupted?");
4920 if (CheckCompressedOops) {
4921 Label ok;
4922 testq(src, src);
4923 jcc(Assembler::notEqual, ok);
4924 STOP("null oop passed to encode_heap_oop_not_null2");
4925 bind(ok);
4926 }
4927 #endif
4928 verify_oop(src, "broken oop in encode_heap_oop_not_null2");
4929 if (dst != src) {
4930 movq(dst, src);
4931 }
4932 if (Universe::narrow_oop_base() != NULL) {
4933 subq(dst, r12_heapbase);
4934 }
4935 if (Universe::narrow_oop_shift() != 0) {
4936 assert (LogMinObjAlignmentInBytes == Universe::narrow_oop_shift(), "decode alg wrong");
4937 shrq(dst, LogMinObjAlignmentInBytes);
4938 }
4939 }
4940
4941 void MacroAssembler::decode_heap_oop(Register r) {
4942 #ifdef ASSERT
4943 verify_heapbase("MacroAssembler::decode_heap_oop: heap base corrupted?");
4944 #endif
4945 if (Universe::narrow_oop_base() == NULL) {
4946 if (Universe::narrow_oop_shift() != 0) {
4947 assert (LogMinObjAlignmentInBytes == Universe::narrow_oop_shift(), "decode alg wrong");
4948 shlq(r, LogMinObjAlignmentInBytes);
4949 }
4950 } else {
4951 Label done;
4952 shlq(r, LogMinObjAlignmentInBytes);
4953 jccb(Assembler::equal, done);
4954 addq(r, r12_heapbase);
4955 bind(done);
4956 }
4957 verify_oop(r, "broken oop in decode_heap_oop");
4958 }
4959
4960 void MacroAssembler::decode_heap_oop_not_null(Register r) {
4961 // Note: it will change flags
4962 assert (UseCompressedOops, "should only be used for compressed headers");
4963 assert (Universe::heap() != NULL, "java heap should be initialized");
4964 // Cannot assert, unverified entry point counts instructions (see .ad file)
4965 // vtableStubs also counts instructions in pd_code_size_limit.
4966 // Also do not verify_oop as this is called by verify_oop.
4967 if (Universe::narrow_oop_shift() != 0) {
4968 assert(LogMinObjAlignmentInBytes == Universe::narrow_oop_shift(), "decode alg wrong");
4969 shlq(r, LogMinObjAlignmentInBytes);
4970 if (Universe::narrow_oop_base() != NULL) {
4971 addq(r, r12_heapbase);
4972 }
4973 } else {
4974 assert (Universe::narrow_oop_base() == NULL, "sanity");
4975 }
4976 }
4977
4978 void MacroAssembler::decode_heap_oop_not_null(Register dst, Register src) {
4979 // Note: it will change flags
4980 assert (UseCompressedOops, "should only be used for compressed headers");
4981 assert (Universe::heap() != NULL, "java heap should be initialized");
4982 // Cannot assert, unverified entry point counts instructions (see .ad file)
4983 // vtableStubs also counts instructions in pd_code_size_limit.
4984 // Also do not verify_oop as this is called by verify_oop.
4985 if (Universe::narrow_oop_shift() != 0) {
4986 assert(LogMinObjAlignmentInBytes == Universe::narrow_oop_shift(), "decode alg wrong");
4987 if (LogMinObjAlignmentInBytes == Address::times_8) {
4988 leaq(dst, Address(r12_heapbase, src, Address::times_8, 0));
4989 } else {
4990 if (dst != src) {
4991 movq(dst, src);
4992 }
4993 shlq(dst, LogMinObjAlignmentInBytes);
4994 if (Universe::narrow_oop_base() != NULL) {
4995 addq(dst, r12_heapbase);
4996 }
4997 }
4998 } else {
4999 assert (Universe::narrow_oop_base() == NULL, "sanity");
5000 if (dst != src) {
5001 movq(dst, src);
5002 }
5003 }
5004 }
5005
5006 void MacroAssembler::encode_klass_not_null(Register r) {
5007 assert(Metaspace::is_initialized(), "metaspace should be initialized");
5008 #ifdef ASSERT
5009 verify_heapbase("MacroAssembler::encode_klass_not_null: heap base corrupted?");
5010 #endif
5011 if (Universe::narrow_klass_base() != NULL) {
5012 subq(r, r12_heapbase);
5013 }
5014 if (Universe::narrow_klass_shift() != 0) {
5015 assert (LogKlassAlignmentInBytes == Universe::narrow_klass_shift(), "decode alg wrong");
5016 shrq(r, LogKlassAlignmentInBytes);
5017 }
5018 }
5019
5020 void MacroAssembler::encode_klass_not_null(Register dst, Register src) {
5021 assert(Metaspace::is_initialized(), "metaspace should be initialized");
5022 #ifdef ASSERT
5023 verify_heapbase("MacroAssembler::encode_klass_not_null2: heap base corrupted?");
5024 #endif
5025 if (dst != src) {
5026 movq(dst, src);
5027 }
5028 if (Universe::narrow_klass_base() != NULL) {
5029 subq(dst, r12_heapbase);
5030 }
5031 if (Universe::narrow_klass_shift() != 0) {
5032 assert (LogKlassAlignmentInBytes == Universe::narrow_klass_shift(), "decode alg wrong");
5033 shrq(dst, LogKlassAlignmentInBytes);
5034 }
5035 }
5036
5037 void MacroAssembler::decode_klass_not_null(Register r) {
5038 assert(Metaspace::is_initialized(), "metaspace should be initialized");
5039 // Note: it will change flags
5040 assert (UseCompressedKlassPointers, "should only be used for compressed headers");
5041 // Cannot assert, unverified entry point counts instructions (see .ad file)
5042 // vtableStubs also counts instructions in pd_code_size_limit.
5043 // Also do not verify_oop as this is called by verify_oop.
5044 if (Universe::narrow_klass_shift() != 0) {
5045 assert(LogKlassAlignmentInBytes == Universe::narrow_klass_shift(), "decode alg wrong");
5046 shlq(r, LogKlassAlignmentInBytes);
5047 if (Universe::narrow_klass_base() != NULL) {
5048 addq(r, r12_heapbase);
5049 }
5050 } else {
5051 assert (Universe::narrow_klass_base() == NULL, "sanity");
5052 }
5053 }
5054
5055 void MacroAssembler::decode_klass_not_null(Register dst, Register src) {
5056 assert(Metaspace::is_initialized(), "metaspace should be initialized");
5057 // Note: it will change flags
5058 assert (UseCompressedKlassPointers, "should only be used for compressed headers");
5059 // Cannot assert, unverified entry point counts instructions (see .ad file)
5060 // vtableStubs also counts instructions in pd_code_size_limit.
5061 // Also do not verify_oop as this is called by verify_oop.
5062 if (Universe::narrow_klass_shift() != 0) {
5063 assert(LogKlassAlignmentInBytes == Universe::narrow_klass_shift(), "decode alg wrong");
5064 assert(LogKlassAlignmentInBytes == Address::times_8, "klass not aligned on 64bits?");
5065 leaq(dst, Address(r12_heapbase, src, Address::times_8, 0));
5066 } else {
5067 assert (Universe::narrow_klass_base() == NULL, "sanity");
5068 if (dst != src) {
5069 movq(dst, src);
5070 }
5071 }
5072 }
5073
5074 void MacroAssembler::set_narrow_oop(Register dst, jobject obj) {
5075 assert (UseCompressedOops, "should only be used for compressed headers");
5076 assert (Universe::heap() != NULL, "java heap should be initialized");
5077 assert (oop_recorder() != NULL, "this assembler needs an OopRecorder");
5078 int oop_index = oop_recorder()->find_index(obj);
5079 RelocationHolder rspec = oop_Relocation::spec(oop_index);
5080 mov_narrow_oop(dst, oop_index, rspec);
5081 }
5082
5083 void MacroAssembler::set_narrow_oop(Address dst, jobject obj) {
5084 assert (UseCompressedOops, "should only be used for compressed headers");
5085 assert (Universe::heap() != NULL, "java heap should be initialized");
5086 assert (oop_recorder() != NULL, "this assembler needs an OopRecorder");
5087 int oop_index = oop_recorder()->find_index(obj);
5088 RelocationHolder rspec = oop_Relocation::spec(oop_index);
5089 mov_narrow_oop(dst, oop_index, rspec);
5090 }
5091
5092 void MacroAssembler::set_narrow_klass(Register dst, Klass* k) {
5093 assert (UseCompressedKlassPointers, "should only be used for compressed headers");
5094 assert (oop_recorder() != NULL, "this assembler needs an OopRecorder");
5095 int klass_index = oop_recorder()->find_index(k);
5096 RelocationHolder rspec = metadata_Relocation::spec(klass_index);
5097 mov_narrow_oop(dst, oopDesc::encode_klass(k), rspec);
5098 }
5099
5100 void MacroAssembler::set_narrow_klass(Address dst, Klass* k) {
5101 assert (UseCompressedKlassPointers, "should only be used for compressed headers");
5102 assert (oop_recorder() != NULL, "this assembler needs an OopRecorder");
5103 int klass_index = oop_recorder()->find_index(k);
5104 RelocationHolder rspec = metadata_Relocation::spec(klass_index);
5105 mov_narrow_oop(dst, oopDesc::encode_klass(k), rspec);
5106 }
5107
5108 void MacroAssembler::cmp_narrow_oop(Register dst, jobject obj) {
5109 assert (UseCompressedOops, "should only be used for compressed headers");
5110 assert (Universe::heap() != NULL, "java heap should be initialized");
5111 assert (oop_recorder() != NULL, "this assembler needs an OopRecorder");
5112 int oop_index = oop_recorder()->find_index(obj);
5113 RelocationHolder rspec = oop_Relocation::spec(oop_index);
5114 Assembler::cmp_narrow_oop(dst, oop_index, rspec);
5115 }
5116
5117 void MacroAssembler::cmp_narrow_oop(Address dst, jobject obj) {
5118 assert (UseCompressedOops, "should only be used for compressed headers");
5119 assert (Universe::heap() != NULL, "java heap should be initialized");
5120 assert (oop_recorder() != NULL, "this assembler needs an OopRecorder");
5121 int oop_index = oop_recorder()->find_index(obj);
5122 RelocationHolder rspec = oop_Relocation::spec(oop_index);
5123 Assembler::cmp_narrow_oop(dst, oop_index, rspec);
5124 }
5125
5126 void MacroAssembler::cmp_narrow_klass(Register dst, Klass* k) {
5127 assert (UseCompressedKlassPointers, "should only be used for compressed headers");
5128 assert (oop_recorder() != NULL, "this assembler needs an OopRecorder");
5129 int klass_index = oop_recorder()->find_index(k);
5130 RelocationHolder rspec = metadata_Relocation::spec(klass_index);
5131 Assembler::cmp_narrow_oop(dst, oopDesc::encode_klass(k), rspec);
5132 }
5133
5134 void MacroAssembler::cmp_narrow_klass(Address dst, Klass* k) {
5135 assert (UseCompressedKlassPointers, "should only be used for compressed headers");
5136 assert (oop_recorder() != NULL, "this assembler needs an OopRecorder");
5137 int klass_index = oop_recorder()->find_index(k);
5138 RelocationHolder rspec = metadata_Relocation::spec(klass_index);
5139 Assembler::cmp_narrow_oop(dst, oopDesc::encode_klass(k), rspec);
5140 }
5141
5142 void MacroAssembler::reinit_heapbase() {
5143 if (UseCompressedOops || UseCompressedKlassPointers) {
5144 movptr(r12_heapbase, ExternalAddress((address)Universe::narrow_ptrs_base_addr()));
5145 }
5146 }
5147 #endif // _LP64
5148
5149
5150 // C2 compiled method's prolog code.
5151 void MacroAssembler::verified_entry(int framesize, bool stack_bang, bool fp_mode_24b) {
5152
5153 // WARNING: Initial instruction MUST be 5 bytes or longer so that
5154 // NativeJump::patch_verified_entry will be able to patch out the entry
5155 // code safely. The push to verify stack depth is ok at 5 bytes,
5156 // the frame allocation can be either 3 or 6 bytes. So if we don't do
5157 // stack bang then we must use the 6 byte frame allocation even if
5158 // we have no frame. :-(
5159
5160 assert((framesize & (StackAlignmentInBytes-1)) == 0, "frame size not aligned");
5161 // Remove word for return addr
5162 framesize -= wordSize;
5163
5164 // Calls to C2R adapters often do not accept exceptional returns.
5165 // We require that their callers must bang for them. But be careful, because
5166 // some VM calls (such as call site linkage) can use several kilobytes of
5167 // stack. But the stack safety zone should account for that.
5168 // See bugs 4446381, 4468289, 4497237.
5169 if (stack_bang) {
5170 generate_stack_overflow_check(framesize);
5171
5172 // We always push rbp, so that on return to interpreter rbp, will be
5173 // restored correctly and we can correct the stack.
5174 push(rbp);
5175 // Remove word for ebp
5176 framesize -= wordSize;
5177
5178 // Create frame
5179 if (framesize) {
5180 subptr(rsp, framesize);
5181 }
5182 } else {
5183 // Create frame (force generation of a 4 byte immediate value)
5184 subptr_imm32(rsp, framesize);
5185
5186 // Save RBP register now.
5187 framesize -= wordSize;
5188 movptr(Address(rsp, framesize), rbp);
5189 }
5190
5191 if (VerifyStackAtCalls) { // Majik cookie to verify stack depth
5192 framesize -= wordSize;
5193 movptr(Address(rsp, framesize), (int32_t)0xbadb100d);
5194 }
5195
5196 #ifndef _LP64
5197 // If method sets FPU control word do it now
5198 if (fp_mode_24b) {
5199 fldcw(ExternalAddress(StubRoutines::addr_fpu_cntrl_wrd_24()));
5200 }
5201 if (UseSSE >= 2 && VerifyFPU) {
5202 verify_FPU(0, "FPU stack must be clean on entry");
5203 }
5204 #endif
5205
5206 #ifdef ASSERT
5207 if (VerifyStackAtCalls) {
5208 Label L;
5209 push(rax);
5210 mov(rax, rsp);
5211 andptr(rax, StackAlignmentInBytes-1);
5212 cmpptr(rax, StackAlignmentInBytes-wordSize);
5213 pop(rax);
5214 jcc(Assembler::equal, L);
5215 STOP("Stack is not properly aligned!");
5216 bind(L);
5217 }
5218 #endif
5219
5220 }
5221
5222
5223 // IndexOf for constant substrings with size >= 8 chars
5224 // which don't need to be loaded through stack.
5225 void MacroAssembler::string_indexofC8(Register str1, Register str2,
5226 Register cnt1, Register cnt2,
5227 int int_cnt2, Register result,
5228 XMMRegister vec, Register tmp) {
5229 ShortBranchVerifier sbv(this);
5230 assert(UseSSE42Intrinsics, "SSE4.2 is required");
5231
5232 // This method uses pcmpestri inxtruction with bound registers
5233 // inputs:
5234 // xmm - substring
5235 // rax - substring length (elements count)
5236 // mem - scanned string
5237 // rdx - string length (elements count)
5238 // 0xd - mode: 1100 (substring search) + 01 (unsigned shorts)
5239 // outputs:
5240 // rcx - matched index in string
5241 assert(cnt1 == rdx && cnt2 == rax && tmp == rcx, "pcmpestri");
5242
5243 Label RELOAD_SUBSTR, SCAN_TO_SUBSTR, SCAN_SUBSTR,
5244 RET_FOUND, RET_NOT_FOUND, EXIT, FOUND_SUBSTR,
5245 MATCH_SUBSTR_HEAD, RELOAD_STR, FOUND_CANDIDATE;
5246
5247 // Note, inline_string_indexOf() generates checks:
5248 // if (substr.count > string.count) return -1;
5249 // if (substr.count == 0) return 0;
5250 assert(int_cnt2 >= 8, "this code isused only for cnt2 >= 8 chars");
5251
5252 // Load substring.
5253 movdqu(vec, Address(str2, 0));
5254 movl(cnt2, int_cnt2);
5255 movptr(result, str1); // string addr
5256
5257 if (int_cnt2 > 8) {
5258 jmpb(SCAN_TO_SUBSTR);
5259
5260 // Reload substr for rescan, this code
5261 // is executed only for large substrings (> 8 chars)
5262 bind(RELOAD_SUBSTR);
5263 movdqu(vec, Address(str2, 0));
5264 negptr(cnt2); // Jumped here with negative cnt2, convert to positive
5265
5266 bind(RELOAD_STR);
5267 // We came here after the beginning of the substring was
5268 // matched but the rest of it was not so we need to search
5269 // again. Start from the next element after the previous match.
5270
5271 // cnt2 is number of substring reminding elements and
5272 // cnt1 is number of string reminding elements when cmp failed.
5273 // Restored cnt1 = cnt1 - cnt2 + int_cnt2
5274 subl(cnt1, cnt2);
5275 addl(cnt1, int_cnt2);
5276 movl(cnt2, int_cnt2); // Now restore cnt2
5277
5278 decrementl(cnt1); // Shift to next element
5279 cmpl(cnt1, cnt2);
5280 jccb(Assembler::negative, RET_NOT_FOUND); // Left less then substring
5281
5282 addptr(result, 2);
5283
5284 } // (int_cnt2 > 8)
5285
5286 // Scan string for start of substr in 16-byte vectors
5287 bind(SCAN_TO_SUBSTR);
5288 pcmpestri(vec, Address(result, 0), 0x0d);
5289 jccb(Assembler::below, FOUND_CANDIDATE); // CF == 1
5290 subl(cnt1, 8);
5291 jccb(Assembler::lessEqual, RET_NOT_FOUND); // Scanned full string
5292 cmpl(cnt1, cnt2);
5293 jccb(Assembler::negative, RET_NOT_FOUND); // Left less then substring
5294 addptr(result, 16);
5295 jmpb(SCAN_TO_SUBSTR);
5296
5297 // Found a potential substr
5298 bind(FOUND_CANDIDATE);
5299 // Matched whole vector if first element matched (tmp(rcx) == 0).
5300 if (int_cnt2 == 8) {
5301 jccb(Assembler::overflow, RET_FOUND); // OF == 1
5302 } else { // int_cnt2 > 8
5303 jccb(Assembler::overflow, FOUND_SUBSTR);
5304 }
5305 // After pcmpestri tmp(rcx) contains matched element index
5306 // Compute start addr of substr
5307 lea(result, Address(result, tmp, Address::times_2));
5308
5309 // Make sure string is still long enough
5310 subl(cnt1, tmp);
5311 cmpl(cnt1, cnt2);
5312 if (int_cnt2 == 8) {
5313 jccb(Assembler::greaterEqual, SCAN_TO_SUBSTR);
5314 } else { // int_cnt2 > 8
5315 jccb(Assembler::greaterEqual, MATCH_SUBSTR_HEAD);
5316 }
5317 // Left less then substring.
5318
5319 bind(RET_NOT_FOUND);
5320 movl(result, -1);
5321 jmpb(EXIT);
5322
5323 if (int_cnt2 > 8) {
5324 // This code is optimized for the case when whole substring
5325 // is matched if its head is matched.
5326 bind(MATCH_SUBSTR_HEAD);
5327 pcmpestri(vec, Address(result, 0), 0x0d);
5328 // Reload only string if does not match
5329 jccb(Assembler::noOverflow, RELOAD_STR); // OF == 0
5330
5331 Label CONT_SCAN_SUBSTR;
5332 // Compare the rest of substring (> 8 chars).
5333 bind(FOUND_SUBSTR);
5334 // First 8 chars are already matched.
5335 negptr(cnt2);
5336 addptr(cnt2, 8);
5337
5338 bind(SCAN_SUBSTR);
5339 subl(cnt1, 8);
5340 cmpl(cnt2, -8); // Do not read beyond substring
5341 jccb(Assembler::lessEqual, CONT_SCAN_SUBSTR);
5342 // Back-up strings to avoid reading beyond substring:
5343 // cnt1 = cnt1 - cnt2 + 8
5344 addl(cnt1, cnt2); // cnt2 is negative
5345 addl(cnt1, 8);
5346 movl(cnt2, 8); negptr(cnt2);
5347 bind(CONT_SCAN_SUBSTR);
5348 if (int_cnt2 < (int)G) {
5349 movdqu(vec, Address(str2, cnt2, Address::times_2, int_cnt2*2));
5350 pcmpestri(vec, Address(result, cnt2, Address::times_2, int_cnt2*2), 0x0d);
5351 } else {
5352 // calculate index in register to avoid integer overflow (int_cnt2*2)
5353 movl(tmp, int_cnt2);
5354 addptr(tmp, cnt2);
5355 movdqu(vec, Address(str2, tmp, Address::times_2, 0));
5356 pcmpestri(vec, Address(result, tmp, Address::times_2, 0), 0x0d);
5357 }
5358 // Need to reload strings pointers if not matched whole vector
5359 jcc(Assembler::noOverflow, RELOAD_SUBSTR); // OF == 0
5360 addptr(cnt2, 8);
5361 jcc(Assembler::negative, SCAN_SUBSTR);
5362 // Fall through if found full substring
5363
5364 } // (int_cnt2 > 8)
5365
5366 bind(RET_FOUND);
5367 // Found result if we matched full small substring.
5368 // Compute substr offset
5369 subptr(result, str1);
5370 shrl(result, 1); // index
5371 bind(EXIT);
5372
5373 } // string_indexofC8
5374
5375 // Small strings are loaded through stack if they cross page boundary.
5376 void MacroAssembler::string_indexof(Register str1, Register str2,
5377 Register cnt1, Register cnt2,
5378 int int_cnt2, Register result,
5379 XMMRegister vec, Register tmp) {
5380 ShortBranchVerifier sbv(this);
5381 assert(UseSSE42Intrinsics, "SSE4.2 is required");
5382 //
5383 // int_cnt2 is length of small (< 8 chars) constant substring
5384 // or (-1) for non constant substring in which case its length
5385 // is in cnt2 register.
5386 //
5387 // Note, inline_string_indexOf() generates checks:
5388 // if (substr.count > string.count) return -1;
5389 // if (substr.count == 0) return 0;
5390 //
5391 assert(int_cnt2 == -1 || (0 < int_cnt2 && int_cnt2 < 8), "should be != 0");
5392
5393 // This method uses pcmpestri inxtruction with bound registers
5394 // inputs:
5395 // xmm - substring
5396 // rax - substring length (elements count)
5397 // mem - scanned string
5398 // rdx - string length (elements count)
5399 // 0xd - mode: 1100 (substring search) + 01 (unsigned shorts)
5400 // outputs:
5401 // rcx - matched index in string
5402 assert(cnt1 == rdx && cnt2 == rax && tmp == rcx, "pcmpestri");
5403
5404 Label RELOAD_SUBSTR, SCAN_TO_SUBSTR, SCAN_SUBSTR, ADJUST_STR,
5405 RET_FOUND, RET_NOT_FOUND, CLEANUP, FOUND_SUBSTR,
5406 FOUND_CANDIDATE;
5407
5408 { //========================================================
5409 // We don't know where these strings are located
5410 // and we can't read beyond them. Load them through stack.
5411 Label BIG_STRINGS, CHECK_STR, COPY_SUBSTR, COPY_STR;
5412
5413 movptr(tmp, rsp); // save old SP
5414
5415 if (int_cnt2 > 0) { // small (< 8 chars) constant substring
5416 if (int_cnt2 == 1) { // One char
5417 load_unsigned_short(result, Address(str2, 0));
5418 movdl(vec, result); // move 32 bits
5419 } else if (int_cnt2 == 2) { // Two chars
5420 movdl(vec, Address(str2, 0)); // move 32 bits
5421 } else if (int_cnt2 == 4) { // Four chars
5422 movq(vec, Address(str2, 0)); // move 64 bits
5423 } else { // cnt2 = { 3, 5, 6, 7 }
5424 // Array header size is 12 bytes in 32-bit VM
5425 // + 6 bytes for 3 chars == 18 bytes,
5426 // enough space to load vec and shift.
5427 assert(HeapWordSize*TypeArrayKlass::header_size() >= 12,"sanity");
5428 movdqu(vec, Address(str2, (int_cnt2*2)-16));
5429 psrldq(vec, 16-(int_cnt2*2));
5430 }
5431 } else { // not constant substring
5432 cmpl(cnt2, 8);
5433 jccb(Assembler::aboveEqual, BIG_STRINGS); // Both strings are big enough
5434
5435 // We can read beyond string if srt+16 does not cross page boundary
5436 // since heaps are aligned and mapped by pages.
5437 assert(os::vm_page_size() < (int)G, "default page should be small");
5438 movl(result, str2); // We need only low 32 bits
5439 andl(result, (os::vm_page_size()-1));
5440 cmpl(result, (os::vm_page_size()-16));
5441 jccb(Assembler::belowEqual, CHECK_STR);
5442
5443 // Move small strings to stack to allow load 16 bytes into vec.
5444 subptr(rsp, 16);
5445 int stk_offset = wordSize-2;
5446 push(cnt2);
5447
5448 bind(COPY_SUBSTR);
5449 load_unsigned_short(result, Address(str2, cnt2, Address::times_2, -2));
5450 movw(Address(rsp, cnt2, Address::times_2, stk_offset), result);
5451 decrement(cnt2);
5452 jccb(Assembler::notZero, COPY_SUBSTR);
5453
5454 pop(cnt2);
5455 movptr(str2, rsp); // New substring address
5456 } // non constant
5457
5458 bind(CHECK_STR);
5459 cmpl(cnt1, 8);
5460 jccb(Assembler::aboveEqual, BIG_STRINGS);
5461
5462 // Check cross page boundary.
5463 movl(result, str1); // We need only low 32 bits
5464 andl(result, (os::vm_page_size()-1));
5465 cmpl(result, (os::vm_page_size()-16));
5466 jccb(Assembler::belowEqual, BIG_STRINGS);
5467
5468 subptr(rsp, 16);
5469 int stk_offset = -2;
5470 if (int_cnt2 < 0) { // not constant
5471 push(cnt2);
5472 stk_offset += wordSize;
5473 }
5474 movl(cnt2, cnt1);
5475
5476 bind(COPY_STR);
5477 load_unsigned_short(result, Address(str1, cnt2, Address::times_2, -2));
5478 movw(Address(rsp, cnt2, Address::times_2, stk_offset), result);
5479 decrement(cnt2);
5480 jccb(Assembler::notZero, COPY_STR);
5481
5482 if (int_cnt2 < 0) { // not constant
5483 pop(cnt2);
5484 }
5485 movptr(str1, rsp); // New string address
5486
5487 bind(BIG_STRINGS);
5488 // Load substring.
5489 if (int_cnt2 < 0) { // -1
5490 movdqu(vec, Address(str2, 0));
5491 push(cnt2); // substr count
5492 push(str2); // substr addr
5493 push(str1); // string addr
5494 } else {
5495 // Small (< 8 chars) constant substrings are loaded already.
5496 movl(cnt2, int_cnt2);
5497 }
5498 push(tmp); // original SP
5499
5500 } // Finished loading
5501
5502 //========================================================
5503 // Start search
5504 //
5505
5506 movptr(result, str1); // string addr
5507
5508 if (int_cnt2 < 0) { // Only for non constant substring
5509 jmpb(SCAN_TO_SUBSTR);
5510
5511 // SP saved at sp+0
5512 // String saved at sp+1*wordSize
5513 // Substr saved at sp+2*wordSize
5514 // Substr count saved at sp+3*wordSize
5515
5516 // Reload substr for rescan, this code
5517 // is executed only for large substrings (> 8 chars)
5518 bind(RELOAD_SUBSTR);
5519 movptr(str2, Address(rsp, 2*wordSize));
5520 movl(cnt2, Address(rsp, 3*wordSize));
5521 movdqu(vec, Address(str2, 0));
5522 // We came here after the beginning of the substring was
5523 // matched but the rest of it was not so we need to search
5524 // again. Start from the next element after the previous match.
5525 subptr(str1, result); // Restore counter
5526 shrl(str1, 1);
5527 addl(cnt1, str1);
5528 decrementl(cnt1); // Shift to next element
5529 cmpl(cnt1, cnt2);
5530 jccb(Assembler::negative, RET_NOT_FOUND); // Left less then substring
5531
5532 addptr(result, 2);
5533 } // non constant
5534
5535 // Scan string for start of substr in 16-byte vectors
5536 bind(SCAN_TO_SUBSTR);
5537 assert(cnt1 == rdx && cnt2 == rax && tmp == rcx, "pcmpestri");
5538 pcmpestri(vec, Address(result, 0), 0x0d);
5539 jccb(Assembler::below, FOUND_CANDIDATE); // CF == 1
5540 subl(cnt1, 8);
5541 jccb(Assembler::lessEqual, RET_NOT_FOUND); // Scanned full string
5542 cmpl(cnt1, cnt2);
5543 jccb(Assembler::negative, RET_NOT_FOUND); // Left less then substring
5544 addptr(result, 16);
5545
5546 bind(ADJUST_STR);
5547 cmpl(cnt1, 8); // Do not read beyond string
5548 jccb(Assembler::greaterEqual, SCAN_TO_SUBSTR);
5549 // Back-up string to avoid reading beyond string.
5550 lea(result, Address(result, cnt1, Address::times_2, -16));
5551 movl(cnt1, 8);
5552 jmpb(SCAN_TO_SUBSTR);
5553
5554 // Found a potential substr
5555 bind(FOUND_CANDIDATE);
5556 // After pcmpestri tmp(rcx) contains matched element index
5557
5558 // Make sure string is still long enough
5559 subl(cnt1, tmp);
5560 cmpl(cnt1, cnt2);
5561 jccb(Assembler::greaterEqual, FOUND_SUBSTR);
5562 // Left less then substring.
5563
5564 bind(RET_NOT_FOUND);
5565 movl(result, -1);
5566 jmpb(CLEANUP);
5567
5568 bind(FOUND_SUBSTR);
5569 // Compute start addr of substr
5570 lea(result, Address(result, tmp, Address::times_2));
5571
5572 if (int_cnt2 > 0) { // Constant substring
5573 // Repeat search for small substring (< 8 chars)
5574 // from new point without reloading substring.
5575 // Have to check that we don't read beyond string.
5576 cmpl(tmp, 8-int_cnt2);
5577 jccb(Assembler::greater, ADJUST_STR);
5578 // Fall through if matched whole substring.
5579 } else { // non constant
5580 assert(int_cnt2 == -1, "should be != 0");
5581
5582 addl(tmp, cnt2);
5583 // Found result if we matched whole substring.
5584 cmpl(tmp, 8);
5585 jccb(Assembler::lessEqual, RET_FOUND);
5586
5587 // Repeat search for small substring (<= 8 chars)
5588 // from new point 'str1' without reloading substring.
5589 cmpl(cnt2, 8);
5590 // Have to check that we don't read beyond string.
5591 jccb(Assembler::lessEqual, ADJUST_STR);
5592
5593 Label CHECK_NEXT, CONT_SCAN_SUBSTR, RET_FOUND_LONG;
5594 // Compare the rest of substring (> 8 chars).
5595 movptr(str1, result);
5596
5597 cmpl(tmp, cnt2);
5598 // First 8 chars are already matched.
5599 jccb(Assembler::equal, CHECK_NEXT);
5600
5601 bind(SCAN_SUBSTR);
5602 pcmpestri(vec, Address(str1, 0), 0x0d);
5603 // Need to reload strings pointers if not matched whole vector
5604 jcc(Assembler::noOverflow, RELOAD_SUBSTR); // OF == 0
5605
5606 bind(CHECK_NEXT);
5607 subl(cnt2, 8);
5608 jccb(Assembler::lessEqual, RET_FOUND_LONG); // Found full substring
5609 addptr(str1, 16);
5610 addptr(str2, 16);
5611 subl(cnt1, 8);
5612 cmpl(cnt2, 8); // Do not read beyond substring
5613 jccb(Assembler::greaterEqual, CONT_SCAN_SUBSTR);
5614 // Back-up strings to avoid reading beyond substring.
5615 lea(str2, Address(str2, cnt2, Address::times_2, -16));
5616 lea(str1, Address(str1, cnt2, Address::times_2, -16));
5617 subl(cnt1, cnt2);
5618 movl(cnt2, 8);
5619 addl(cnt1, 8);
5620 bind(CONT_SCAN_SUBSTR);
5621 movdqu(vec, Address(str2, 0));
5622 jmpb(SCAN_SUBSTR);
5623
5624 bind(RET_FOUND_LONG);
5625 movptr(str1, Address(rsp, wordSize));
5626 } // non constant
5627
5628 bind(RET_FOUND);
5629 // Compute substr offset
5630 subptr(result, str1);
5631 shrl(result, 1); // index
5632
5633 bind(CLEANUP);
5634 pop(rsp); // restore SP
5635
5636 } // string_indexof
5637
5638 // Compare strings.
5639 void MacroAssembler::string_compare(Register str1, Register str2,
5640 Register cnt1, Register cnt2, Register result,
5641 XMMRegister vec1) {
5642 ShortBranchVerifier sbv(this);
5643 Label LENGTH_DIFF_LABEL, POP_LABEL, DONE_LABEL, WHILE_HEAD_LABEL;
5644
5645 // Compute the minimum of the string lengths and the
5646 // difference of the string lengths (stack).
5647 // Do the conditional move stuff
5648 movl(result, cnt1);
5649 subl(cnt1, cnt2);
5650 push(cnt1);
5651 cmov32(Assembler::lessEqual, cnt2, result);
5652
5653 // Is the minimum length zero?
5654 testl(cnt2, cnt2);
5655 jcc(Assembler::zero, LENGTH_DIFF_LABEL);
5656
5657 // Load first characters
5658 load_unsigned_short(result, Address(str1, 0));
5659 load_unsigned_short(cnt1, Address(str2, 0));
5660
5661 // Compare first characters
5662 subl(result, cnt1);
5663 jcc(Assembler::notZero, POP_LABEL);
5664 decrementl(cnt2);
5665 jcc(Assembler::zero, LENGTH_DIFF_LABEL);
5666
5667 {
5668 // Check after comparing first character to see if strings are equivalent
5669 Label LSkip2;
5670 // Check if the strings start at same location
5671 cmpptr(str1, str2);
5672 jccb(Assembler::notEqual, LSkip2);
5673
5674 // Check if the length difference is zero (from stack)
5675 cmpl(Address(rsp, 0), 0x0);
5676 jcc(Assembler::equal, LENGTH_DIFF_LABEL);
5677
5678 // Strings might not be equivalent
5679 bind(LSkip2);
5680 }
5681
5682 Address::ScaleFactor scale = Address::times_2;
5683 int stride = 8;
5684
5685 // Advance to next element
5686 addptr(str1, 16/stride);
5687 addptr(str2, 16/stride);
5688
5689 if (UseSSE42Intrinsics) {
5690 Label COMPARE_WIDE_VECTORS, VECTOR_NOT_EQUAL, COMPARE_TAIL;
5691 int pcmpmask = 0x19;
5692 // Setup to compare 16-byte vectors
5693 movl(result, cnt2);
5694 andl(cnt2, ~(stride - 1)); // cnt2 holds the vector count
5695 jccb(Assembler::zero, COMPARE_TAIL);
5696
5697 lea(str1, Address(str1, result, scale));
5698 lea(str2, Address(str2, result, scale));
5699 negptr(result);
5700
5701 // pcmpestri
5702 // inputs:
5703 // vec1- substring
5704 // rax - negative string length (elements count)
5705 // mem - scaned string
5706 // rdx - string length (elements count)
5707 // pcmpmask - cmp mode: 11000 (string compare with negated result)
5708 // + 00 (unsigned bytes) or + 01 (unsigned shorts)
5709 // outputs:
5710 // rcx - first mismatched element index
5711 assert(result == rax && cnt2 == rdx && cnt1 == rcx, "pcmpestri");
5712
5713 bind(COMPARE_WIDE_VECTORS);
5714 movdqu(vec1, Address(str1, result, scale));
5715 pcmpestri(vec1, Address(str2, result, scale), pcmpmask);
5716 // After pcmpestri cnt1(rcx) contains mismatched element index
5717
5718 jccb(Assembler::below, VECTOR_NOT_EQUAL); // CF==1
5719 addptr(result, stride);
5720 subptr(cnt2, stride);
5721 jccb(Assembler::notZero, COMPARE_WIDE_VECTORS);
5722
5723 // compare wide vectors tail
5724 testl(result, result);
5725 jccb(Assembler::zero, LENGTH_DIFF_LABEL);
5726
5727 movl(cnt2, stride);
5728 movl(result, stride);
5729 negptr(result);
5730 movdqu(vec1, Address(str1, result, scale));
5731 pcmpestri(vec1, Address(str2, result, scale), pcmpmask);
5732 jccb(Assembler::aboveEqual, LENGTH_DIFF_LABEL);
5733
5734 // Mismatched characters in the vectors
5735 bind(VECTOR_NOT_EQUAL);
5736 addptr(result, cnt1);
5737 movptr(cnt2, result);
5738 load_unsigned_short(result, Address(str1, cnt2, scale));
5739 load_unsigned_short(cnt1, Address(str2, cnt2, scale));
5740 subl(result, cnt1);
5741 jmpb(POP_LABEL);
5742
5743 bind(COMPARE_TAIL); // limit is zero
5744 movl(cnt2, result);
5745 // Fallthru to tail compare
5746 }
5747
5748 // Shift str2 and str1 to the end of the arrays, negate min
5749 lea(str1, Address(str1, cnt2, scale, 0));
5750 lea(str2, Address(str2, cnt2, scale, 0));
5751 negptr(cnt2);
5752
5753 // Compare the rest of the elements
5754 bind(WHILE_HEAD_LABEL);
5755 load_unsigned_short(result, Address(str1, cnt2, scale, 0));
5756 load_unsigned_short(cnt1, Address(str2, cnt2, scale, 0));
5757 subl(result, cnt1);
5758 jccb(Assembler::notZero, POP_LABEL);
5759 increment(cnt2);
5760 jccb(Assembler::notZero, WHILE_HEAD_LABEL);
5761
5762 // Strings are equal up to min length. Return the length difference.
5763 bind(LENGTH_DIFF_LABEL);
5764 pop(result);
5765 jmpb(DONE_LABEL);
5766
5767 // Discard the stored length difference
5768 bind(POP_LABEL);
5769 pop(cnt1);
5770
5771 // That's it
5772 bind(DONE_LABEL);
5773 }
5774
5775 // Compare char[] arrays aligned to 4 bytes or substrings.
5776 void MacroAssembler::char_arrays_equals(bool is_array_equ, Register ary1, Register ary2,
5777 Register limit, Register result, Register chr,
5778 XMMRegister vec1, XMMRegister vec2) {
5779 ShortBranchVerifier sbv(this);
5780 Label TRUE_LABEL, FALSE_LABEL, DONE, COMPARE_VECTORS, COMPARE_CHAR;
5781
5782 int length_offset = arrayOopDesc::length_offset_in_bytes();
5783 int base_offset = arrayOopDesc::base_offset_in_bytes(T_CHAR);
5784
5785 // Check the input args
5786 cmpptr(ary1, ary2);
5787 jcc(Assembler::equal, TRUE_LABEL);
5788
5789 if (is_array_equ) {
5790 // Need additional checks for arrays_equals.
5791 testptr(ary1, ary1);
5792 jcc(Assembler::zero, FALSE_LABEL);
5793 testptr(ary2, ary2);
5794 jcc(Assembler::zero, FALSE_LABEL);
5795
5796 // Check the lengths
5797 movl(limit, Address(ary1, length_offset));
5798 cmpl(limit, Address(ary2, length_offset));
5799 jcc(Assembler::notEqual, FALSE_LABEL);
5800 }
5801
5802 // count == 0
5803 testl(limit, limit);
5804 jcc(Assembler::zero, TRUE_LABEL);
5805
5806 if (is_array_equ) {
5807 // Load array address
5808 lea(ary1, Address(ary1, base_offset));
5809 lea(ary2, Address(ary2, base_offset));
5810 }
5811
5812 shll(limit, 1); // byte count != 0
5813 movl(result, limit); // copy
5814
5815 if (UseSSE42Intrinsics) {
5816 // With SSE4.2, use double quad vector compare
5817 Label COMPARE_WIDE_VECTORS, COMPARE_TAIL;
5818
5819 // Compare 16-byte vectors
5820 andl(result, 0x0000000e); // tail count (in bytes)
5821 andl(limit, 0xfffffff0); // vector count (in bytes)
5822 jccb(Assembler::zero, COMPARE_TAIL);
5823
5824 lea(ary1, Address(ary1, limit, Address::times_1));
5825 lea(ary2, Address(ary2, limit, Address::times_1));
5826 negptr(limit);
5827
5828 bind(COMPARE_WIDE_VECTORS);
5829 movdqu(vec1, Address(ary1, limit, Address::times_1));
5830 movdqu(vec2, Address(ary2, limit, Address::times_1));
5831 pxor(vec1, vec2);
5832
5833 ptest(vec1, vec1);
5834 jccb(Assembler::notZero, FALSE_LABEL);
5835 addptr(limit, 16);
5836 jcc(Assembler::notZero, COMPARE_WIDE_VECTORS);
5837
5838 testl(result, result);
5839 jccb(Assembler::zero, TRUE_LABEL);
5840
5841 movdqu(vec1, Address(ary1, result, Address::times_1, -16));
5842 movdqu(vec2, Address(ary2, result, Address::times_1, -16));
5843 pxor(vec1, vec2);
5844
5845 ptest(vec1, vec1);
5846 jccb(Assembler::notZero, FALSE_LABEL);
5847 jmpb(TRUE_LABEL);
5848
5849 bind(COMPARE_TAIL); // limit is zero
5850 movl(limit, result);
5851 // Fallthru to tail compare
5852 }
5853
5854 // Compare 4-byte vectors
5855 andl(limit, 0xfffffffc); // vector count (in bytes)
5856 jccb(Assembler::zero, COMPARE_CHAR);
5857
5858 lea(ary1, Address(ary1, limit, Address::times_1));
5859 lea(ary2, Address(ary2, limit, Address::times_1));
5860 negptr(limit);
5861
5862 bind(COMPARE_VECTORS);
5863 movl(chr, Address(ary1, limit, Address::times_1));
5864 cmpl(chr, Address(ary2, limit, Address::times_1));
5865 jccb(Assembler::notEqual, FALSE_LABEL);
5866 addptr(limit, 4);
5867 jcc(Assembler::notZero, COMPARE_VECTORS);
5868
5869 // Compare trailing char (final 2 bytes), if any
5870 bind(COMPARE_CHAR);
5871 testl(result, 0x2); // tail char
5872 jccb(Assembler::zero, TRUE_LABEL);
5873 load_unsigned_short(chr, Address(ary1, 0));
5874 load_unsigned_short(limit, Address(ary2, 0));
5875 cmpl(chr, limit);
5876 jccb(Assembler::notEqual, FALSE_LABEL);
5877
5878 bind(TRUE_LABEL);
5879 movl(result, 1); // return true
5880 jmpb(DONE);
5881
5882 bind(FALSE_LABEL);
5883 xorl(result, result); // return false
5884
5885 // That's it
5886 bind(DONE);
5887 }
5888
5889 void MacroAssembler::generate_fill(BasicType t, bool aligned,
5890 Register to, Register value, Register count,
5891 Register rtmp, XMMRegister xtmp) {
5892 ShortBranchVerifier sbv(this);
5893 assert_different_registers(to, value, count, rtmp);
5894 Label L_exit, L_skip_align1, L_skip_align2, L_fill_byte;
5895 Label L_fill_2_bytes, L_fill_4_bytes;
5896
5897 int shift = -1;
5898 switch (t) {
5899 case T_BYTE:
5900 shift = 2;
5901 break;
5902 case T_SHORT:
5903 shift = 1;
5904 break;
5905 case T_INT:
5906 shift = 0;
5907 break;
5908 default: ShouldNotReachHere();
5909 }
5910
5911 if (t == T_BYTE) {
5912 andl(value, 0xff);
5913 movl(rtmp, value);
5914 shll(rtmp, 8);
5915 orl(value, rtmp);
5916 }
5917 if (t == T_SHORT) {
5918 andl(value, 0xffff);
5919 }
5920 if (t == T_BYTE || t == T_SHORT) {
5921 movl(rtmp, value);
5922 shll(rtmp, 16);
5923 orl(value, rtmp);
5924 }
5925
5926 cmpl(count, 2<<shift); // Short arrays (< 8 bytes) fill by element
5927 jcc(Assembler::below, L_fill_4_bytes); // use unsigned cmp
5928 if (!UseUnalignedLoadStores && !aligned && (t == T_BYTE || t == T_SHORT)) {
5929 // align source address at 4 bytes address boundary
5930 if (t == T_BYTE) {
5931 // One byte misalignment happens only for byte arrays
5932 testptr(to, 1);
5933 jccb(Assembler::zero, L_skip_align1);
5934 movb(Address(to, 0), value);
5935 increment(to);
5936 decrement(count);
5937 BIND(L_skip_align1);
5938 }
5939 // Two bytes misalignment happens only for byte and short (char) arrays
5940 testptr(to, 2);
5941 jccb(Assembler::zero, L_skip_align2);
5942 movw(Address(to, 0), value);
5943 addptr(to, 2);
5944 subl(count, 1<<(shift-1));
5945 BIND(L_skip_align2);
5946 }
5947 if (UseSSE < 2) {
5948 Label L_fill_32_bytes_loop, L_check_fill_8_bytes, L_fill_8_bytes_loop, L_fill_8_bytes;
5949 // Fill 32-byte chunks
5950 subl(count, 8 << shift);
5951 jcc(Assembler::less, L_check_fill_8_bytes);
5952 align(16);
5953
5954 BIND(L_fill_32_bytes_loop);
5955
5956 for (int i = 0; i < 32; i += 4) {
5957 movl(Address(to, i), value);
5958 }
5959
5960 addptr(to, 32);
5961 subl(count, 8 << shift);
5962 jcc(Assembler::greaterEqual, L_fill_32_bytes_loop);
5963 BIND(L_check_fill_8_bytes);
5964 addl(count, 8 << shift);
5965 jccb(Assembler::zero, L_exit);
5966 jmpb(L_fill_8_bytes);
5967
5968 //
5969 // length is too short, just fill qwords
5970 //
5971 BIND(L_fill_8_bytes_loop);
5972 movl(Address(to, 0), value);
5973 movl(Address(to, 4), value);
5974 addptr(to, 8);
5975 BIND(L_fill_8_bytes);
5976 subl(count, 1 << (shift + 1));
5977 jcc(Assembler::greaterEqual, L_fill_8_bytes_loop);
5978 // fall through to fill 4 bytes
5979 } else {
5980 Label L_fill_32_bytes;
5981 if (!UseUnalignedLoadStores) {
5982 // align to 8 bytes, we know we are 4 byte aligned to start
5983 testptr(to, 4);
5984 jccb(Assembler::zero, L_fill_32_bytes);
5985 movl(Address(to, 0), value);
5986 addptr(to, 4);
5987 subl(count, 1<<shift);
5988 }
5989 BIND(L_fill_32_bytes);
5990 {
5991 assert( UseSSE >= 2, "supported cpu only" );
5992 Label L_fill_32_bytes_loop, L_check_fill_8_bytes, L_fill_8_bytes_loop, L_fill_8_bytes;
5993 // Fill 32-byte chunks
5994 movdl(xtmp, value);
5995 pshufd(xtmp, xtmp, 0);
5996
5997 subl(count, 8 << shift);
5998 jcc(Assembler::less, L_check_fill_8_bytes);
5999 align(16);
6000
6001 BIND(L_fill_32_bytes_loop);
6002
6003 if (UseUnalignedLoadStores) {
6004 movdqu(Address(to, 0), xtmp);
6005 movdqu(Address(to, 16), xtmp);
6006 } else {
6007 movq(Address(to, 0), xtmp);
6008 movq(Address(to, 8), xtmp);
6009 movq(Address(to, 16), xtmp);
6010 movq(Address(to, 24), xtmp);
6011 }
6012
6013 addptr(to, 32);
6014 subl(count, 8 << shift);
6015 jcc(Assembler::greaterEqual, L_fill_32_bytes_loop);
6016 BIND(L_check_fill_8_bytes);
6017 addl(count, 8 << shift);
6018 jccb(Assembler::zero, L_exit);
6019 jmpb(L_fill_8_bytes);
6020
6021 //
6022 // length is too short, just fill qwords
6023 //
6024 BIND(L_fill_8_bytes_loop);
6025 movq(Address(to, 0), xtmp);
6026 addptr(to, 8);
6027 BIND(L_fill_8_bytes);
6028 subl(count, 1 << (shift + 1));
6029 jcc(Assembler::greaterEqual, L_fill_8_bytes_loop);
6030 }
6031 }
6032 // fill trailing 4 bytes
6033 BIND(L_fill_4_bytes);
6034 testl(count, 1<<shift);
6035 jccb(Assembler::zero, L_fill_2_bytes);
6036 movl(Address(to, 0), value);
6037 if (t == T_BYTE || t == T_SHORT) {
6038 addptr(to, 4);
6039 BIND(L_fill_2_bytes);
6040 // fill trailing 2 bytes
6041 testl(count, 1<<(shift-1));
6042 jccb(Assembler::zero, L_fill_byte);
6043 movw(Address(to, 0), value);
6044 if (t == T_BYTE) {
6045 addptr(to, 2);
6046 BIND(L_fill_byte);
6047 // fill trailing byte
6048 testl(count, 1);
6049 jccb(Assembler::zero, L_exit);
6050 movb(Address(to, 0), value);
6051 } else {
6052 BIND(L_fill_byte);
6053 }
6054 } else {
6055 BIND(L_fill_2_bytes);
6056 }
6057 BIND(L_exit);
6058 }
6059 #undef BIND
6060 #undef BLOCK_COMMENT
6061
6062
6063 Assembler::Condition MacroAssembler::negate_condition(Assembler::Condition cond) {
6064 switch (cond) {
6065 // Note some conditions are synonyms for others
6066 case Assembler::zero: return Assembler::notZero;
6067 case Assembler::notZero: return Assembler::zero;
6068 case Assembler::less: return Assembler::greaterEqual;
6069 case Assembler::lessEqual: return Assembler::greater;
6070 case Assembler::greater: return Assembler::lessEqual;
6071 case Assembler::greaterEqual: return Assembler::less;
6072 case Assembler::below: return Assembler::aboveEqual;
6073 case Assembler::belowEqual: return Assembler::above;
6074 case Assembler::above: return Assembler::belowEqual;
6075 case Assembler::aboveEqual: return Assembler::below;
6076 case Assembler::overflow: return Assembler::noOverflow;
6077 case Assembler::noOverflow: return Assembler::overflow;
6078 case Assembler::negative: return Assembler::positive;
6079 case Assembler::positive: return Assembler::negative;
6080 case Assembler::parity: return Assembler::noParity;
6081 case Assembler::noParity: return Assembler::parity;
6082 }
6083 ShouldNotReachHere(); return Assembler::overflow;
6084 }
6085
6086 SkipIfEqual::SkipIfEqual(
6087 MacroAssembler* masm, const bool* flag_addr, bool value) {
6088 _masm = masm;
6089 _masm->cmp8(ExternalAddress((address)flag_addr), value);
6090 _masm->jcc(Assembler::equal, _label);
6091 }
6092
6093 SkipIfEqual::~SkipIfEqual() {
6094 _masm->bind(_label);
6095 }