comparison src/cpu/x86/vm/assembler_x86.cpp @ 304:dc7f315e41f7

5108146: Merge i486 and amd64 cpu directories 6459804: Want client (c1) compiler for x86_64 (amd64) for faster start-up Reviewed-by: kvn
author never
date Wed, 27 Aug 2008 00:21:55 -0700
parents src/cpu/x86/vm/assembler_x86_32.cpp@d1605aabd0a1
children f8199438385b
comparison
equal deleted inserted replaced
303:fa4d1d240383 304:dc7f315e41f7
1 /*
2 * Copyright 1997-2008 Sun Microsystems, Inc. All Rights Reserved.
3 * DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER.
4 *
5 * This code is free software; you can redistribute it and/or modify it
6 * under the terms of the GNU General Public License version 2 only, as
7 * published by the Free Software Foundation.
8 *
9 * This code is distributed in the hope that it will be useful, but WITHOUT
10 * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
11 * FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License
12 * version 2 for more details (a copy is included in the LICENSE file that
13 * accompanied this code).
14 *
15 * You should have received a copy of the GNU General Public License version
16 * 2 along with this work; if not, write to the Free Software Foundation,
17 * Inc., 51 Franklin St, Fifth Floor, Boston, MA 02110-1301 USA.
18 *
19 * Please contact Sun Microsystems, Inc., 4150 Network Circle, Santa Clara,
20 * CA 95054 USA or visit www.sun.com if you need additional information or
21 * have any questions.
22 *
23 */
24
25 #include "incls/_precompiled.incl"
26 #include "incls/_assembler_x86.cpp.incl"
27
28 // Implementation of AddressLiteral
29
30 AddressLiteral::AddressLiteral(address target, relocInfo::relocType rtype) {
31 _is_lval = false;
32 _target = target;
33 switch (rtype) {
34 case relocInfo::oop_type:
35 // Oops are a special case. Normally they would be their own section
36 // but in cases like icBuffer they are literals in the code stream that
37 // we don't have a section for. We use none so that we get a literal address
38 // which is always patchable.
39 break;
40 case relocInfo::external_word_type:
41 _rspec = external_word_Relocation::spec(target);
42 break;
43 case relocInfo::internal_word_type:
44 _rspec = internal_word_Relocation::spec(target);
45 break;
46 case relocInfo::opt_virtual_call_type:
47 _rspec = opt_virtual_call_Relocation::spec();
48 break;
49 case relocInfo::static_call_type:
50 _rspec = static_call_Relocation::spec();
51 break;
52 case relocInfo::runtime_call_type:
53 _rspec = runtime_call_Relocation::spec();
54 break;
55 case relocInfo::poll_type:
56 case relocInfo::poll_return_type:
57 _rspec = Relocation::spec_simple(rtype);
58 break;
59 case relocInfo::none:
60 break;
61 default:
62 ShouldNotReachHere();
63 break;
64 }
65 }
66
67 // Implementation of Address
68
69 #ifdef _LP64
70
71 Address Address::make_array(ArrayAddress adr) {
72 // Not implementable on 64bit machines
73 // Should have been handled higher up the call chain.
74 ShouldNotReachHere();
75 return Address();
76 }
77
78 // exceedingly dangerous constructor
79 Address::Address(int disp, address loc, relocInfo::relocType rtype) {
80 _base = noreg;
81 _index = noreg;
82 _scale = no_scale;
83 _disp = disp;
84 switch (rtype) {
85 case relocInfo::external_word_type:
86 _rspec = external_word_Relocation::spec(loc);
87 break;
88 case relocInfo::internal_word_type:
89 _rspec = internal_word_Relocation::spec(loc);
90 break;
91 case relocInfo::runtime_call_type:
92 // HMM
93 _rspec = runtime_call_Relocation::spec();
94 break;
95 case relocInfo::poll_type:
96 case relocInfo::poll_return_type:
97 _rspec = Relocation::spec_simple(rtype);
98 break;
99 case relocInfo::none:
100 break;
101 default:
102 ShouldNotReachHere();
103 }
104 }
105 #else // LP64
106
107 Address Address::make_array(ArrayAddress adr) {
108 AddressLiteral base = adr.base();
109 Address index = adr.index();
110 assert(index._disp == 0, "must not have disp"); // maybe it can?
111 Address array(index._base, index._index, index._scale, (intptr_t) base.target());
112 array._rspec = base._rspec;
113 return array;
114 }
115
116 // exceedingly dangerous constructor
117 Address::Address(address loc, RelocationHolder spec) {
118 _base = noreg;
119 _index = noreg;
120 _scale = no_scale;
121 _disp = (intptr_t) loc;
122 _rspec = spec;
123 }
124
125 #endif // _LP64
126
127
128
129 // Convert the raw encoding form into the form expected by the constructor for
130 // Address. An index of 4 (rsp) corresponds to having no index, so convert
131 // that to noreg for the Address constructor.
132 Address Address::make_raw(int base, int index, int scale, int disp) {
133 bool valid_index = index != rsp->encoding();
134 if (valid_index) {
135 Address madr(as_Register(base), as_Register(index), (Address::ScaleFactor)scale, in_ByteSize(disp));
136 return madr;
137 } else {
138 Address madr(as_Register(base), noreg, Address::no_scale, in_ByteSize(disp));
139 return madr;
140 }
141 }
142
143 // Implementation of Assembler
144
145 int AbstractAssembler::code_fill_byte() {
146 return (u_char)'\xF4'; // hlt
147 }
148
149 // make this go away someday
150 void Assembler::emit_data(jint data, relocInfo::relocType rtype, int format) {
151 if (rtype == relocInfo::none)
152 emit_long(data);
153 else emit_data(data, Relocation::spec_simple(rtype), format);
154 }
155
156 void Assembler::emit_data(jint data, RelocationHolder const& rspec, int format) {
157 assert(imm_operand == 0, "default format must be immediate in this file");
158 assert(inst_mark() != NULL, "must be inside InstructionMark");
159 if (rspec.type() != relocInfo::none) {
160 #ifdef ASSERT
161 check_relocation(rspec, format);
162 #endif
163 // Do not use AbstractAssembler::relocate, which is not intended for
164 // embedded words. Instead, relocate to the enclosing instruction.
165
166 // hack. call32 is too wide for mask so use disp32
167 if (format == call32_operand)
168 code_section()->relocate(inst_mark(), rspec, disp32_operand);
169 else
170 code_section()->relocate(inst_mark(), rspec, format);
171 }
172 emit_long(data);
173 }
174
175 static int encode(Register r) {
176 int enc = r->encoding();
177 if (enc >= 8) {
178 enc -= 8;
179 }
180 return enc;
181 }
182
183 static int encode(XMMRegister r) {
184 int enc = r->encoding();
185 if (enc >= 8) {
186 enc -= 8;
187 }
188 return enc;
189 }
190
191 void Assembler::emit_arith_b(int op1, int op2, Register dst, int imm8) {
192 assert(dst->has_byte_register(), "must have byte register");
193 assert(isByte(op1) && isByte(op2), "wrong opcode");
194 assert(isByte(imm8), "not a byte");
195 assert((op1 & 0x01) == 0, "should be 8bit operation");
196 emit_byte(op1);
197 emit_byte(op2 | encode(dst));
198 emit_byte(imm8);
199 }
200
201
202 void Assembler::emit_arith(int op1, int op2, Register dst, int32_t imm32) {
203 assert(isByte(op1) && isByte(op2), "wrong opcode");
204 assert((op1 & 0x01) == 1, "should be 32bit operation");
205 assert((op1 & 0x02) == 0, "sign-extension bit should not be set");
206 if (is8bit(imm32)) {
207 emit_byte(op1 | 0x02); // set sign bit
208 emit_byte(op2 | encode(dst));
209 emit_byte(imm32 & 0xFF);
210 } else {
211 emit_byte(op1);
212 emit_byte(op2 | encode(dst));
213 emit_long(imm32);
214 }
215 }
216
217 // immediate-to-memory forms
218 void Assembler::emit_arith_operand(int op1, Register rm, Address adr, int32_t imm32) {
219 assert((op1 & 0x01) == 1, "should be 32bit operation");
220 assert((op1 & 0x02) == 0, "sign-extension bit should not be set");
221 if (is8bit(imm32)) {
222 emit_byte(op1 | 0x02); // set sign bit
223 emit_operand(rm, adr, 1);
224 emit_byte(imm32 & 0xFF);
225 } else {
226 emit_byte(op1);
227 emit_operand(rm, adr, 4);
228 emit_long(imm32);
229 }
230 }
231
232 void Assembler::emit_arith(int op1, int op2, Register dst, jobject obj) {
233 LP64_ONLY(ShouldNotReachHere());
234 assert(isByte(op1) && isByte(op2), "wrong opcode");
235 assert((op1 & 0x01) == 1, "should be 32bit operation");
236 assert((op1 & 0x02) == 0, "sign-extension bit should not be set");
237 InstructionMark im(this);
238 emit_byte(op1);
239 emit_byte(op2 | encode(dst));
240 emit_data((intptr_t)obj, relocInfo::oop_type, 0);
241 }
242
243
244 void Assembler::emit_arith(int op1, int op2, Register dst, Register src) {
245 assert(isByte(op1) && isByte(op2), "wrong opcode");
246 emit_byte(op1);
247 emit_byte(op2 | encode(dst) << 3 | encode(src));
248 }
249
250
251 void Assembler::emit_operand(Register reg, Register base, Register index,
252 Address::ScaleFactor scale, int disp,
253 RelocationHolder const& rspec,
254 int rip_relative_correction) {
255 relocInfo::relocType rtype = (relocInfo::relocType) rspec.type();
256
257 // Encode the registers as needed in the fields they are used in
258
259 int regenc = encode(reg) << 3;
260 int indexenc = index->is_valid() ? encode(index) << 3 : 0;
261 int baseenc = base->is_valid() ? encode(base) : 0;
262
263 if (base->is_valid()) {
264 if (index->is_valid()) {
265 assert(scale != Address::no_scale, "inconsistent address");
266 // [base + index*scale + disp]
267 if (disp == 0 && rtype == relocInfo::none &&
268 base != rbp LP64_ONLY(&& base != r13)) {
269 // [base + index*scale]
270 // [00 reg 100][ss index base]
271 assert(index != rsp, "illegal addressing mode");
272 emit_byte(0x04 | regenc);
273 emit_byte(scale << 6 | indexenc | baseenc);
274 } else if (is8bit(disp) && rtype == relocInfo::none) {
275 // [base + index*scale + imm8]
276 // [01 reg 100][ss index base] imm8
277 assert(index != rsp, "illegal addressing mode");
278 emit_byte(0x44 | regenc);
279 emit_byte(scale << 6 | indexenc | baseenc);
280 emit_byte(disp & 0xFF);
281 } else {
282 // [base + index*scale + disp32]
283 // [10 reg 100][ss index base] disp32
284 assert(index != rsp, "illegal addressing mode");
285 emit_byte(0x84 | regenc);
286 emit_byte(scale << 6 | indexenc | baseenc);
287 emit_data(disp, rspec, disp32_operand);
288 }
289 } else if (base == rsp LP64_ONLY(|| base == r12)) {
290 // [rsp + disp]
291 if (disp == 0 && rtype == relocInfo::none) {
292 // [rsp]
293 // [00 reg 100][00 100 100]
294 emit_byte(0x04 | regenc);
295 emit_byte(0x24);
296 } else if (is8bit(disp) && rtype == relocInfo::none) {
297 // [rsp + imm8]
298 // [01 reg 100][00 100 100] disp8
299 emit_byte(0x44 | regenc);
300 emit_byte(0x24);
301 emit_byte(disp & 0xFF);
302 } else {
303 // [rsp + imm32]
304 // [10 reg 100][00 100 100] disp32
305 emit_byte(0x84 | regenc);
306 emit_byte(0x24);
307 emit_data(disp, rspec, disp32_operand);
308 }
309 } else {
310 // [base + disp]
311 assert(base != rsp LP64_ONLY(&& base != r12), "illegal addressing mode");
312 if (disp == 0 && rtype == relocInfo::none &&
313 base != rbp LP64_ONLY(&& base != r13)) {
314 // [base]
315 // [00 reg base]
316 emit_byte(0x00 | regenc | baseenc);
317 } else if (is8bit(disp) && rtype == relocInfo::none) {
318 // [base + disp8]
319 // [01 reg base] disp8
320 emit_byte(0x40 | regenc | baseenc);
321 emit_byte(disp & 0xFF);
322 } else {
323 // [base + disp32]
324 // [10 reg base] disp32
325 emit_byte(0x80 | regenc | baseenc);
326 emit_data(disp, rspec, disp32_operand);
327 }
328 }
329 } else {
330 if (index->is_valid()) {
331 assert(scale != Address::no_scale, "inconsistent address");
332 // [index*scale + disp]
333 // [00 reg 100][ss index 101] disp32
334 assert(index != rsp, "illegal addressing mode");
335 emit_byte(0x04 | regenc);
336 emit_byte(scale << 6 | indexenc | 0x05);
337 emit_data(disp, rspec, disp32_operand);
338 } else if (rtype != relocInfo::none ) {
339 // [disp] (64bit) RIP-RELATIVE (32bit) abs
340 // [00 000 101] disp32
341
342 emit_byte(0x05 | regenc);
343 // Note that the RIP-rel. correction applies to the generated
344 // disp field, but _not_ to the target address in the rspec.
345
346 // disp was created by converting the target address minus the pc
347 // at the start of the instruction. That needs more correction here.
348 // intptr_t disp = target - next_ip;
349 assert(inst_mark() != NULL, "must be inside InstructionMark");
350 address next_ip = pc() + sizeof(int32_t) + rip_relative_correction;
351 int64_t adjusted = disp;
352 // Do rip-rel adjustment for 64bit
353 LP64_ONLY(adjusted -= (next_ip - inst_mark()));
354 assert(is_simm32(adjusted),
355 "must be 32bit offset (RIP relative address)");
356 emit_data((int32_t) adjusted, rspec, disp32_operand);
357
358 } else {
359 // 32bit never did this, did everything as the rip-rel/disp code above
360 // [disp] ABSOLUTE
361 // [00 reg 100][00 100 101] disp32
362 emit_byte(0x04 | regenc);
363 emit_byte(0x25);
364 emit_data(disp, rspec, disp32_operand);
365 }
366 }
367 }
368
369 void Assembler::emit_operand(XMMRegister reg, Register base, Register index,
370 Address::ScaleFactor scale, int disp,
371 RelocationHolder const& rspec) {
372 emit_operand((Register)reg, base, index, scale, disp, rspec);
373 }
374
375 // Secret local extension to Assembler::WhichOperand:
376 #define end_pc_operand (_WhichOperand_limit)
377
378 address Assembler::locate_operand(address inst, WhichOperand which) {
379 // Decode the given instruction, and return the address of
380 // an embedded 32-bit operand word.
381
382 // If "which" is disp32_operand, selects the displacement portion
383 // of an effective address specifier.
384 // If "which" is imm64_operand, selects the trailing immediate constant.
385 // If "which" is call32_operand, selects the displacement of a call or jump.
386 // Caller is responsible for ensuring that there is such an operand,
387 // and that it is 32/64 bits wide.
388
389 // If "which" is end_pc_operand, find the end of the instruction.
390
391 address ip = inst;
392 bool is_64bit = false;
393
394 debug_only(bool has_disp32 = false);
395 int tail_size = 0; // other random bytes (#32, #16, etc.) at end of insn
396
397 again_after_prefix:
398 switch (0xFF & *ip++) {
399
400 // These convenience macros generate groups of "case" labels for the switch.
401 #define REP4(x) (x)+0: case (x)+1: case (x)+2: case (x)+3
402 #define REP8(x) (x)+0: case (x)+1: case (x)+2: case (x)+3: \
403 case (x)+4: case (x)+5: case (x)+6: case (x)+7
404 #define REP16(x) REP8((x)+0): \
405 case REP8((x)+8)
406
407 case CS_segment:
408 case SS_segment:
409 case DS_segment:
410 case ES_segment:
411 case FS_segment:
412 case GS_segment:
413 // Seems dubious
414 LP64_ONLY(assert(false, "shouldn't have that prefix"));
415 assert(ip == inst+1, "only one prefix allowed");
416 goto again_after_prefix;
417
418 case 0x67:
419 case REX:
420 case REX_B:
421 case REX_X:
422 case REX_XB:
423 case REX_R:
424 case REX_RB:
425 case REX_RX:
426 case REX_RXB:
427 NOT_LP64(assert(false, "64bit prefixes"));
428 goto again_after_prefix;
429
430 case REX_W:
431 case REX_WB:
432 case REX_WX:
433 case REX_WXB:
434 case REX_WR:
435 case REX_WRB:
436 case REX_WRX:
437 case REX_WRXB:
438 NOT_LP64(assert(false, "64bit prefixes"));
439 is_64bit = true;
440 goto again_after_prefix;
441
442 case 0xFF: // pushq a; decl a; incl a; call a; jmp a
443 case 0x88: // movb a, r
444 case 0x89: // movl a, r
445 case 0x8A: // movb r, a
446 case 0x8B: // movl r, a
447 case 0x8F: // popl a
448 debug_only(has_disp32 = true);
449 break;
450
451 case 0x68: // pushq #32
452 if (which == end_pc_operand) {
453 return ip + 4;
454 }
455 assert(which == imm_operand && !is_64bit, "pushl has no disp32 or 64bit immediate");
456 return ip; // not produced by emit_operand
457
458 case 0x66: // movw ... (size prefix)
459 again_after_size_prefix2:
460 switch (0xFF & *ip++) {
461 case REX:
462 case REX_B:
463 case REX_X:
464 case REX_XB:
465 case REX_R:
466 case REX_RB:
467 case REX_RX:
468 case REX_RXB:
469 case REX_W:
470 case REX_WB:
471 case REX_WX:
472 case REX_WXB:
473 case REX_WR:
474 case REX_WRB:
475 case REX_WRX:
476 case REX_WRXB:
477 NOT_LP64(assert(false, "64bit prefix found"));
478 goto again_after_size_prefix2;
479 case 0x8B: // movw r, a
480 case 0x89: // movw a, r
481 debug_only(has_disp32 = true);
482 break;
483 case 0xC7: // movw a, #16
484 debug_only(has_disp32 = true);
485 tail_size = 2; // the imm16
486 break;
487 case 0x0F: // several SSE/SSE2 variants
488 ip--; // reparse the 0x0F
489 goto again_after_prefix;
490 default:
491 ShouldNotReachHere();
492 }
493 break;
494
495 case REP8(0xB8): // movl/q r, #32/#64(oop?)
496 if (which == end_pc_operand) return ip + (is_64bit ? 8 : 4);
497 // these asserts are somewhat nonsensical
498 #ifndef _LP64
499 assert(which == imm_operand || which == disp32_operand, "");
500 #else
501 assert((which == call32_operand || which == imm_operand) && is_64bit ||
502 which == narrow_oop_operand && !is_64bit, "");
503 #endif // _LP64
504 return ip;
505
506 case 0x69: // imul r, a, #32
507 case 0xC7: // movl a, #32(oop?)
508 tail_size = 4;
509 debug_only(has_disp32 = true); // has both kinds of operands!
510 break;
511
512 case 0x0F: // movx..., etc.
513 switch (0xFF & *ip++) {
514 case 0x12: // movlps
515 case 0x28: // movaps
516 case 0x2E: // ucomiss
517 case 0x2F: // comiss
518 case 0x54: // andps
519 case 0x55: // andnps
520 case 0x56: // orps
521 case 0x57: // xorps
522 case 0x6E: // movd
523 case 0x7E: // movd
524 case 0xAE: // ldmxcsr a
525 // 64bit side says it these have both operands but that doesn't
526 // appear to be true
527 debug_only(has_disp32 = true);
528 break;
529
530 case 0xAD: // shrd r, a, %cl
531 case 0xAF: // imul r, a
532 case 0xBE: // movsbl r, a (movsxb)
533 case 0xBF: // movswl r, a (movsxw)
534 case 0xB6: // movzbl r, a (movzxb)
535 case 0xB7: // movzwl r, a (movzxw)
536 case REP16(0x40): // cmovl cc, r, a
537 case 0xB0: // cmpxchgb
538 case 0xB1: // cmpxchg
539 case 0xC1: // xaddl
540 case 0xC7: // cmpxchg8
541 case REP16(0x90): // setcc a
542 debug_only(has_disp32 = true);
543 // fall out of the switch to decode the address
544 break;
545
546 case 0xAC: // shrd r, a, #8
547 debug_only(has_disp32 = true);
548 tail_size = 1; // the imm8
549 break;
550
551 case REP16(0x80): // jcc rdisp32
552 if (which == end_pc_operand) return ip + 4;
553 assert(which == call32_operand, "jcc has no disp32 or imm");
554 return ip;
555 default:
556 ShouldNotReachHere();
557 }
558 break;
559
560 case 0x81: // addl a, #32; addl r, #32
561 // also: orl, adcl, sbbl, andl, subl, xorl, cmpl
562 // on 32bit in the case of cmpl, the imm might be an oop
563 tail_size = 4;
564 debug_only(has_disp32 = true); // has both kinds of operands!
565 break;
566
567 case 0x83: // addl a, #8; addl r, #8
568 // also: orl, adcl, sbbl, andl, subl, xorl, cmpl
569 debug_only(has_disp32 = true); // has both kinds of operands!
570 tail_size = 1;
571 break;
572
573 case 0x9B:
574 switch (0xFF & *ip++) {
575 case 0xD9: // fnstcw a
576 debug_only(has_disp32 = true);
577 break;
578 default:
579 ShouldNotReachHere();
580 }
581 break;
582
583 case REP4(0x00): // addb a, r; addl a, r; addb r, a; addl r, a
584 case REP4(0x10): // adc...
585 case REP4(0x20): // and...
586 case REP4(0x30): // xor...
587 case REP4(0x08): // or...
588 case REP4(0x18): // sbb...
589 case REP4(0x28): // sub...
590 case 0xF7: // mull a
591 case 0x8D: // lea r, a
592 case 0x87: // xchg r, a
593 case REP4(0x38): // cmp...
594 case 0x85: // test r, a
595 debug_only(has_disp32 = true); // has both kinds of operands!
596 break;
597
598 case 0xC1: // sal a, #8; sar a, #8; shl a, #8; shr a, #8
599 case 0xC6: // movb a, #8
600 case 0x80: // cmpb a, #8
601 case 0x6B: // imul r, a, #8
602 debug_only(has_disp32 = true); // has both kinds of operands!
603 tail_size = 1; // the imm8
604 break;
605
606 case 0xE8: // call rdisp32
607 case 0xE9: // jmp rdisp32
608 if (which == end_pc_operand) return ip + 4;
609 assert(which == call32_operand, "call has no disp32 or imm");
610 return ip;
611
612 case 0xD1: // sal a, 1; sar a, 1; shl a, 1; shr a, 1
613 case 0xD3: // sal a, %cl; sar a, %cl; shl a, %cl; shr a, %cl
614 case 0xD9: // fld_s a; fst_s a; fstp_s a; fldcw a
615 case 0xDD: // fld_d a; fst_d a; fstp_d a
616 case 0xDB: // fild_s a; fistp_s a; fld_x a; fstp_x a
617 case 0xDF: // fild_d a; fistp_d a
618 case 0xD8: // fadd_s a; fsubr_s a; fmul_s a; fdivr_s a; fcomp_s a
619 case 0xDC: // fadd_d a; fsubr_d a; fmul_d a; fdivr_d a; fcomp_d a
620 case 0xDE: // faddp_d a; fsubrp_d a; fmulp_d a; fdivrp_d a; fcompp_d a
621 debug_only(has_disp32 = true);
622 break;
623
624 case 0xF3: // For SSE
625 case 0xF2: // For SSE2
626 switch (0xFF & *ip++) {
627 case REX:
628 case REX_B:
629 case REX_X:
630 case REX_XB:
631 case REX_R:
632 case REX_RB:
633 case REX_RX:
634 case REX_RXB:
635 case REX_W:
636 case REX_WB:
637 case REX_WX:
638 case REX_WXB:
639 case REX_WR:
640 case REX_WRB:
641 case REX_WRX:
642 case REX_WRXB:
643 NOT_LP64(assert(false, "found 64bit prefix"));
644 ip++;
645 default:
646 ip++;
647 }
648 debug_only(has_disp32 = true); // has both kinds of operands!
649 break;
650
651 default:
652 ShouldNotReachHere();
653
654 #undef REP8
655 #undef REP16
656 }
657
658 assert(which != call32_operand, "instruction is not a call, jmp, or jcc");
659 #ifdef _LP64
660 assert(which != imm_operand, "instruction is not a movq reg, imm64");
661 #else
662 // assert(which != imm_operand || has_imm32, "instruction has no imm32 field");
663 assert(which != imm_operand || has_disp32, "instruction has no imm32 field");
664 #endif // LP64
665 assert(which != disp32_operand || has_disp32, "instruction has no disp32 field");
666
667 // parse the output of emit_operand
668 int op2 = 0xFF & *ip++;
669 int base = op2 & 0x07;
670 int op3 = -1;
671 const int b100 = 4;
672 const int b101 = 5;
673 if (base == b100 && (op2 >> 6) != 3) {
674 op3 = 0xFF & *ip++;
675 base = op3 & 0x07; // refetch the base
676 }
677 // now ip points at the disp (if any)
678
679 switch (op2 >> 6) {
680 case 0:
681 // [00 reg 100][ss index base]
682 // [00 reg 100][00 100 esp]
683 // [00 reg base]
684 // [00 reg 100][ss index 101][disp32]
685 // [00 reg 101] [disp32]
686
687 if (base == b101) {
688 if (which == disp32_operand)
689 return ip; // caller wants the disp32
690 ip += 4; // skip the disp32
691 }
692 break;
693
694 case 1:
695 // [01 reg 100][ss index base][disp8]
696 // [01 reg 100][00 100 esp][disp8]
697 // [01 reg base] [disp8]
698 ip += 1; // skip the disp8
699 break;
700
701 case 2:
702 // [10 reg 100][ss index base][disp32]
703 // [10 reg 100][00 100 esp][disp32]
704 // [10 reg base] [disp32]
705 if (which == disp32_operand)
706 return ip; // caller wants the disp32
707 ip += 4; // skip the disp32
708 break;
709
710 case 3:
711 // [11 reg base] (not a memory addressing mode)
712 break;
713 }
714
715 if (which == end_pc_operand) {
716 return ip + tail_size;
717 }
718
719 #ifdef _LP64
720 assert(false, "fix locate_operand");
721 #else
722 assert(which == imm_operand, "instruction has only an imm field");
723 #endif // LP64
724 return ip;
725 }
726
727 address Assembler::locate_next_instruction(address inst) {
728 // Secretly share code with locate_operand:
729 return locate_operand(inst, end_pc_operand);
730 }
731
732
733 #ifdef ASSERT
734 void Assembler::check_relocation(RelocationHolder const& rspec, int format) {
735 address inst = inst_mark();
736 assert(inst != NULL && inst < pc(), "must point to beginning of instruction");
737 address opnd;
738
739 Relocation* r = rspec.reloc();
740 if (r->type() == relocInfo::none) {
741 return;
742 } else if (r->is_call() || format == call32_operand) {
743 // assert(format == imm32_operand, "cannot specify a nonzero format");
744 opnd = locate_operand(inst, call32_operand);
745 } else if (r->is_data()) {
746 assert(format == imm_operand || format == disp32_operand
747 LP64_ONLY(|| format == narrow_oop_operand), "format ok");
748 opnd = locate_operand(inst, (WhichOperand)format);
749 } else {
750 assert(format == imm_operand, "cannot specify a format");
751 return;
752 }
753 assert(opnd == pc(), "must put operand where relocs can find it");
754 }
755 #endif // ASSERT
756
757 void Assembler::emit_operand32(Register reg, Address adr) {
758 assert(reg->encoding() < 8, "no extended registers");
759 assert(!adr.base_needs_rex() && !adr.index_needs_rex(), "no extended registers");
760 emit_operand(reg, adr._base, adr._index, adr._scale, adr._disp,
761 adr._rspec);
762 }
763
764 void Assembler::emit_operand(Register reg, Address adr,
765 int rip_relative_correction) {
766 emit_operand(reg, adr._base, adr._index, adr._scale, adr._disp,
767 adr._rspec,
768 rip_relative_correction);
769 }
770
771 void Assembler::emit_operand(XMMRegister reg, Address adr) {
772 emit_operand(reg, adr._base, adr._index, adr._scale, adr._disp,
773 adr._rspec);
774 }
775
776 // MMX operations
777 void Assembler::emit_operand(MMXRegister reg, Address adr) {
778 assert(!adr.base_needs_rex() && !adr.index_needs_rex(), "no extended registers");
779 emit_operand((Register)reg, adr._base, adr._index, adr._scale, adr._disp, adr._rspec);
780 }
781
782 // work around gcc (3.2.1-7a) bug
783 void Assembler::emit_operand(Address adr, MMXRegister reg) {
784 assert(!adr.base_needs_rex() && !adr.index_needs_rex(), "no extended registers");
785 emit_operand((Register)reg, adr._base, adr._index, adr._scale, adr._disp, adr._rspec);
786 }
787
788
789 void Assembler::emit_farith(int b1, int b2, int i) {
790 assert(isByte(b1) && isByte(b2), "wrong opcode");
791 assert(0 <= i && i < 8, "illegal stack offset");
792 emit_byte(b1);
793 emit_byte(b2 + i);
794 }
795
796
797 // Now the Assembler instruction (identical for 32/64 bits)
798
799 void Assembler::adcl(Register dst, int32_t imm32) {
800 prefix(dst);
801 emit_arith(0x81, 0xD0, dst, imm32);
802 }
803
804 void Assembler::adcl(Register dst, Address src) {
805 InstructionMark im(this);
806 prefix(src, dst);
807 emit_byte(0x13);
808 emit_operand(dst, src);
809 }
810
811 void Assembler::adcl(Register dst, Register src) {
812 (void) prefix_and_encode(dst->encoding(), src->encoding());
813 emit_arith(0x13, 0xC0, dst, src);
814 }
815
816 void Assembler::addl(Address dst, int32_t imm32) {
817 InstructionMark im(this);
818 prefix(dst);
819 emit_arith_operand(0x81, rax, dst, imm32);
820 }
821
822 void Assembler::addl(Address dst, Register src) {
823 InstructionMark im(this);
824 prefix(dst, src);
825 emit_byte(0x01);
826 emit_operand(src, dst);
827 }
828
829 void Assembler::addl(Register dst, int32_t imm32) {
830 prefix(dst);
831 emit_arith(0x81, 0xC0, dst, imm32);
832 }
833
834 void Assembler::addl(Register dst, Address src) {
835 InstructionMark im(this);
836 prefix(src, dst);
837 emit_byte(0x03);
838 emit_operand(dst, src);
839 }
840
841 void Assembler::addl(Register dst, Register src) {
842 (void) prefix_and_encode(dst->encoding(), src->encoding());
843 emit_arith(0x03, 0xC0, dst, src);
844 }
845
846 void Assembler::addr_nop_4() {
847 // 4 bytes: NOP DWORD PTR [EAX+0]
848 emit_byte(0x0F);
849 emit_byte(0x1F);
850 emit_byte(0x40); // emit_rm(cbuf, 0x1, EAX_enc, EAX_enc);
851 emit_byte(0); // 8-bits offset (1 byte)
852 }
853
854 void Assembler::addr_nop_5() {
855 // 5 bytes: NOP DWORD PTR [EAX+EAX*0+0] 8-bits offset
856 emit_byte(0x0F);
857 emit_byte(0x1F);
858 emit_byte(0x44); // emit_rm(cbuf, 0x1, EAX_enc, 0x4);
859 emit_byte(0x00); // emit_rm(cbuf, 0x0, EAX_enc, EAX_enc);
860 emit_byte(0); // 8-bits offset (1 byte)
861 }
862
863 void Assembler::addr_nop_7() {
864 // 7 bytes: NOP DWORD PTR [EAX+0] 32-bits offset
865 emit_byte(0x0F);
866 emit_byte(0x1F);
867 emit_byte(0x80); // emit_rm(cbuf, 0x2, EAX_enc, EAX_enc);
868 emit_long(0); // 32-bits offset (4 bytes)
869 }
870
871 void Assembler::addr_nop_8() {
872 // 8 bytes: NOP DWORD PTR [EAX+EAX*0+0] 32-bits offset
873 emit_byte(0x0F);
874 emit_byte(0x1F);
875 emit_byte(0x84); // emit_rm(cbuf, 0x2, EAX_enc, 0x4);
876 emit_byte(0x00); // emit_rm(cbuf, 0x0, EAX_enc, EAX_enc);
877 emit_long(0); // 32-bits offset (4 bytes)
878 }
879
880 void Assembler::addsd(XMMRegister dst, XMMRegister src) {
881 NOT_LP64(assert(VM_Version::supports_sse2(), ""));
882 emit_byte(0xF2);
883 int encode = prefix_and_encode(dst->encoding(), src->encoding());
884 emit_byte(0x0F);
885 emit_byte(0x58);
886 emit_byte(0xC0 | encode);
887 }
888
889 void Assembler::addsd(XMMRegister dst, Address src) {
890 NOT_LP64(assert(VM_Version::supports_sse2(), ""));
891 InstructionMark im(this);
892 emit_byte(0xF2);
893 prefix(src, dst);
894 emit_byte(0x0F);
895 emit_byte(0x58);
896 emit_operand(dst, src);
897 }
898
899 void Assembler::addss(XMMRegister dst, XMMRegister src) {
900 NOT_LP64(assert(VM_Version::supports_sse(), ""));
901 emit_byte(0xF3);
902 int encode = prefix_and_encode(dst->encoding(), src->encoding());
903 emit_byte(0x0F);
904 emit_byte(0x58);
905 emit_byte(0xC0 | encode);
906 }
907
908 void Assembler::addss(XMMRegister dst, Address src) {
909 NOT_LP64(assert(VM_Version::supports_sse(), ""));
910 InstructionMark im(this);
911 emit_byte(0xF3);
912 prefix(src, dst);
913 emit_byte(0x0F);
914 emit_byte(0x58);
915 emit_operand(dst, src);
916 }
917
918 void Assembler::andl(Register dst, int32_t imm32) {
919 prefix(dst);
920 emit_arith(0x81, 0xE0, dst, imm32);
921 }
922
923 void Assembler::andl(Register dst, Address src) {
924 InstructionMark im(this);
925 prefix(src, dst);
926 emit_byte(0x23);
927 emit_operand(dst, src);
928 }
929
930 void Assembler::andl(Register dst, Register src) {
931 (void) prefix_and_encode(dst->encoding(), src->encoding());
932 emit_arith(0x23, 0xC0, dst, src);
933 }
934
935 void Assembler::andpd(XMMRegister dst, Address src) {
936 NOT_LP64(assert(VM_Version::supports_sse2(), ""));
937 InstructionMark im(this);
938 emit_byte(0x66);
939 prefix(src, dst);
940 emit_byte(0x0F);
941 emit_byte(0x54);
942 emit_operand(dst, src);
943 }
944
945 void Assembler::bswapl(Register reg) { // bswap
946 int encode = prefix_and_encode(reg->encoding());
947 emit_byte(0x0F);
948 emit_byte(0xC8 | encode);
949 }
950
951 void Assembler::call(Label& L, relocInfo::relocType rtype) {
952 // suspect disp32 is always good
953 int operand = LP64_ONLY(disp32_operand) NOT_LP64(imm_operand);
954
955 if (L.is_bound()) {
956 const int long_size = 5;
957 int offs = (int)( target(L) - pc() );
958 assert(offs <= 0, "assembler error");
959 InstructionMark im(this);
960 // 1110 1000 #32-bit disp
961 emit_byte(0xE8);
962 emit_data(offs - long_size, rtype, operand);
963 } else {
964 InstructionMark im(this);
965 // 1110 1000 #32-bit disp
966 L.add_patch_at(code(), locator());
967
968 emit_byte(0xE8);
969 emit_data(int(0), rtype, operand);
970 }
971 }
972
973 void Assembler::call(Register dst) {
974 // This was originally using a 32bit register encoding
975 // and surely we want 64bit!
976 // this is a 32bit encoding but in 64bit mode the default
977 // operand size is 64bit so there is no need for the
978 // wide prefix. So prefix only happens if we use the
979 // new registers. Much like push/pop.
980 int x = offset();
981 // this may be true but dbx disassembles it as if it
982 // were 32bits...
983 // int encode = prefix_and_encode(dst->encoding());
984 // if (offset() != x) assert(dst->encoding() >= 8, "what?");
985 int encode = prefixq_and_encode(dst->encoding());
986
987 emit_byte(0xFF);
988 emit_byte(0xD0 | encode);
989 }
990
991
992 void Assembler::call(Address adr) {
993 InstructionMark im(this);
994 prefix(adr);
995 emit_byte(0xFF);
996 emit_operand(rdx, adr);
997 }
998
999 void Assembler::call_literal(address entry, RelocationHolder const& rspec) {
1000 assert(entry != NULL, "call most probably wrong");
1001 InstructionMark im(this);
1002 emit_byte(0xE8);
1003 intptr_t disp = entry - (_code_pos + sizeof(int32_t));
1004 assert(is_simm32(disp), "must be 32bit offset (call2)");
1005 // Technically, should use call32_operand, but this format is
1006 // implied by the fact that we're emitting a call instruction.
1007
1008 int operand = LP64_ONLY(disp32_operand) NOT_LP64(call32_operand);
1009 emit_data((int) disp, rspec, operand);
1010 }
1011
1012 void Assembler::cdql() {
1013 emit_byte(0x99);
1014 }
1015
1016 void Assembler::cmovl(Condition cc, Register dst, Register src) {
1017 NOT_LP64(guarantee(VM_Version::supports_cmov(), "illegal instruction"));
1018 int encode = prefix_and_encode(dst->encoding(), src->encoding());
1019 emit_byte(0x0F);
1020 emit_byte(0x40 | cc);
1021 emit_byte(0xC0 | encode);
1022 }
1023
1024
1025 void Assembler::cmovl(Condition cc, Register dst, Address src) {
1026 NOT_LP64(guarantee(VM_Version::supports_cmov(), "illegal instruction"));
1027 prefix(src, dst);
1028 emit_byte(0x0F);
1029 emit_byte(0x40 | cc);
1030 emit_operand(dst, src);
1031 }
1032
1033 void Assembler::cmpb(Address dst, int imm8) {
1034 InstructionMark im(this);
1035 prefix(dst);
1036 emit_byte(0x80);
1037 emit_operand(rdi, dst, 1);
1038 emit_byte(imm8);
1039 }
1040
1041 void Assembler::cmpl(Address dst, int32_t imm32) {
1042 InstructionMark im(this);
1043 prefix(dst);
1044 emit_byte(0x81);
1045 emit_operand(rdi, dst, 4);
1046 emit_long(imm32);
1047 }
1048
1049 void Assembler::cmpl(Register dst, int32_t imm32) {
1050 prefix(dst);
1051 emit_arith(0x81, 0xF8, dst, imm32);
1052 }
1053
1054 void Assembler::cmpl(Register dst, Register src) {
1055 (void) prefix_and_encode(dst->encoding(), src->encoding());
1056 emit_arith(0x3B, 0xC0, dst, src);
1057 }
1058
1059
1060 void Assembler::cmpl(Register dst, Address src) {
1061 InstructionMark im(this);
1062 prefix(src, dst);
1063 emit_byte(0x3B);
1064 emit_operand(dst, src);
1065 }
1066
1067 void Assembler::cmpw(Address dst, int imm16) {
1068 InstructionMark im(this);
1069 assert(!dst.base_needs_rex() && !dst.index_needs_rex(), "no extended registers");
1070 emit_byte(0x66);
1071 emit_byte(0x81);
1072 emit_operand(rdi, dst, 2);
1073 emit_word(imm16);
1074 }
1075
1076 // The 32-bit cmpxchg compares the value at adr with the contents of rax,
1077 // and stores reg into adr if so; otherwise, the value at adr is loaded into rax,.
1078 // The ZF is set if the compared values were equal, and cleared otherwise.
1079 void Assembler::cmpxchgl(Register reg, Address adr) { // cmpxchg
1080 if (Atomics & 2) {
1081 // caveat: no instructionmark, so this isn't relocatable.
1082 // Emit a synthetic, non-atomic, CAS equivalent.
1083 // Beware. The synthetic form sets all ICCs, not just ZF.
1084 // cmpxchg r,[m] is equivalent to rax, = CAS (m, rax, r)
1085 cmpl(rax, adr);
1086 movl(rax, adr);
1087 if (reg != rax) {
1088 Label L ;
1089 jcc(Assembler::notEqual, L);
1090 movl(adr, reg);
1091 bind(L);
1092 }
1093 } else {
1094 InstructionMark im(this);
1095 prefix(adr, reg);
1096 emit_byte(0x0F);
1097 emit_byte(0xB1);
1098 emit_operand(reg, adr);
1099 }
1100 }
1101
1102 void Assembler::comisd(XMMRegister dst, Address src) {
1103 // NOTE: dbx seems to decode this as comiss even though the
1104 // 0x66 is there. Strangly ucomisd comes out correct
1105 NOT_LP64(assert(VM_Version::supports_sse2(), ""));
1106 emit_byte(0x66);
1107 comiss(dst, src);
1108 }
1109
1110 void Assembler::comiss(XMMRegister dst, Address src) {
1111 NOT_LP64(assert(VM_Version::supports_sse(), ""));
1112
1113 InstructionMark im(this);
1114 prefix(src, dst);
1115 emit_byte(0x0F);
1116 emit_byte(0x2F);
1117 emit_operand(dst, src);
1118 }
1119
1120 void Assembler::cvtdq2pd(XMMRegister dst, XMMRegister src) {
1121 NOT_LP64(assert(VM_Version::supports_sse2(), ""));
1122 emit_byte(0xF3);
1123 int encode = prefix_and_encode(dst->encoding(), src->encoding());
1124 emit_byte(0x0F);
1125 emit_byte(0xE6);
1126 emit_byte(0xC0 | encode);
1127 }
1128
1129 void Assembler::cvtdq2ps(XMMRegister dst, XMMRegister src) {
1130 NOT_LP64(assert(VM_Version::supports_sse2(), ""));
1131 int encode = prefix_and_encode(dst->encoding(), src->encoding());
1132 emit_byte(0x0F);
1133 emit_byte(0x5B);
1134 emit_byte(0xC0 | encode);
1135 }
1136
1137 void Assembler::cvtsd2ss(XMMRegister dst, XMMRegister src) {
1138 NOT_LP64(assert(VM_Version::supports_sse2(), ""));
1139 emit_byte(0xF2);
1140 int encode = prefix_and_encode(dst->encoding(), src->encoding());
1141 emit_byte(0x0F);
1142 emit_byte(0x5A);
1143 emit_byte(0xC0 | encode);
1144 }
1145
1146 void Assembler::cvtsi2sdl(XMMRegister dst, Register src) {
1147 NOT_LP64(assert(VM_Version::supports_sse2(), ""));
1148 emit_byte(0xF2);
1149 int encode = prefix_and_encode(dst->encoding(), src->encoding());
1150 emit_byte(0x0F);
1151 emit_byte(0x2A);
1152 emit_byte(0xC0 | encode);
1153 }
1154
1155 void Assembler::cvtsi2ssl(XMMRegister dst, Register src) {
1156 NOT_LP64(assert(VM_Version::supports_sse(), ""));
1157 emit_byte(0xF3);
1158 int encode = prefix_and_encode(dst->encoding(), src->encoding());
1159 emit_byte(0x0F);
1160 emit_byte(0x2A);
1161 emit_byte(0xC0 | encode);
1162 }
1163
1164 void Assembler::cvtss2sd(XMMRegister dst, XMMRegister src) {
1165 NOT_LP64(assert(VM_Version::supports_sse2(), ""));
1166 emit_byte(0xF3);
1167 int encode = prefix_and_encode(dst->encoding(), src->encoding());
1168 emit_byte(0x0F);
1169 emit_byte(0x5A);
1170 emit_byte(0xC0 | encode);
1171 }
1172
1173 void Assembler::cvttsd2sil(Register dst, XMMRegister src) {
1174 NOT_LP64(assert(VM_Version::supports_sse2(), ""));
1175 emit_byte(0xF2);
1176 int encode = prefix_and_encode(dst->encoding(), src->encoding());
1177 emit_byte(0x0F);
1178 emit_byte(0x2C);
1179 emit_byte(0xC0 | encode);
1180 }
1181
1182 void Assembler::cvttss2sil(Register dst, XMMRegister src) {
1183 NOT_LP64(assert(VM_Version::supports_sse(), ""));
1184 emit_byte(0xF3);
1185 int encode = prefix_and_encode(dst->encoding(), src->encoding());
1186 emit_byte(0x0F);
1187 emit_byte(0x2C);
1188 emit_byte(0xC0 | encode);
1189 }
1190
1191 void Assembler::decl(Address dst) {
1192 // Don't use it directly. Use MacroAssembler::decrement() instead.
1193 InstructionMark im(this);
1194 prefix(dst);
1195 emit_byte(0xFF);
1196 emit_operand(rcx, dst);
1197 }
1198
1199 void Assembler::divsd(XMMRegister dst, Address src) {
1200 NOT_LP64(assert(VM_Version::supports_sse2(), ""));
1201 InstructionMark im(this);
1202 emit_byte(0xF2);
1203 prefix(src, dst);
1204 emit_byte(0x0F);
1205 emit_byte(0x5E);
1206 emit_operand(dst, src);
1207 }
1208
1209 void Assembler::divsd(XMMRegister dst, XMMRegister src) {
1210 NOT_LP64(assert(VM_Version::supports_sse2(), ""));
1211 emit_byte(0xF2);
1212 int encode = prefix_and_encode(dst->encoding(), src->encoding());
1213 emit_byte(0x0F);
1214 emit_byte(0x5E);
1215 emit_byte(0xC0 | encode);
1216 }
1217
1218 void Assembler::divss(XMMRegister dst, Address src) {
1219 NOT_LP64(assert(VM_Version::supports_sse(), ""));
1220 InstructionMark im(this);
1221 emit_byte(0xF3);
1222 prefix(src, dst);
1223 emit_byte(0x0F);
1224 emit_byte(0x5E);
1225 emit_operand(dst, src);
1226 }
1227
1228 void Assembler::divss(XMMRegister dst, XMMRegister src) {
1229 NOT_LP64(assert(VM_Version::supports_sse(), ""));
1230 emit_byte(0xF3);
1231 int encode = prefix_and_encode(dst->encoding(), src->encoding());
1232 emit_byte(0x0F);
1233 emit_byte(0x5E);
1234 emit_byte(0xC0 | encode);
1235 }
1236
1237 void Assembler::emms() {
1238 NOT_LP64(assert(VM_Version::supports_mmx(), ""));
1239 emit_byte(0x0F);
1240 emit_byte(0x77);
1241 }
1242
1243 void Assembler::hlt() {
1244 emit_byte(0xF4);
1245 }
1246
1247 void Assembler::idivl(Register src) {
1248 int encode = prefix_and_encode(src->encoding());
1249 emit_byte(0xF7);
1250 emit_byte(0xF8 | encode);
1251 }
1252
1253 void Assembler::imull(Register dst, Register src) {
1254 int encode = prefix_and_encode(dst->encoding(), src->encoding());
1255 emit_byte(0x0F);
1256 emit_byte(0xAF);
1257 emit_byte(0xC0 | encode);
1258 }
1259
1260
1261 void Assembler::imull(Register dst, Register src, int value) {
1262 int encode = prefix_and_encode(dst->encoding(), src->encoding());
1263 if (is8bit(value)) {
1264 emit_byte(0x6B);
1265 emit_byte(0xC0 | encode);
1266 emit_byte(value);
1267 } else {
1268 emit_byte(0x69);
1269 emit_byte(0xC0 | encode);
1270 emit_long(value);
1271 }
1272 }
1273
1274 void Assembler::incl(Address dst) {
1275 // Don't use it directly. Use MacroAssembler::increment() instead.
1276 InstructionMark im(this);
1277 prefix(dst);
1278 emit_byte(0xFF);
1279 emit_operand(rax, dst);
1280 }
1281
1282 void Assembler::jcc(Condition cc, Label& L, relocInfo::relocType rtype) {
1283 InstructionMark im(this);
1284 relocate(rtype);
1285 assert((0 <= cc) && (cc < 16), "illegal cc");
1286 if (L.is_bound()) {
1287 address dst = target(L);
1288 assert(dst != NULL, "jcc most probably wrong");
1289
1290 const int short_size = 2;
1291 const int long_size = 6;
1292 intptr_t offs = (intptr_t)dst - (intptr_t)_code_pos;
1293 if (rtype == relocInfo::none && is8bit(offs - short_size)) {
1294 // 0111 tttn #8-bit disp
1295 emit_byte(0x70 | cc);
1296 emit_byte((offs - short_size) & 0xFF);
1297 } else {
1298 // 0000 1111 1000 tttn #32-bit disp
1299 assert(is_simm32(offs - long_size),
1300 "must be 32bit offset (call4)");
1301 emit_byte(0x0F);
1302 emit_byte(0x80 | cc);
1303 emit_long(offs - long_size);
1304 }
1305 } else {
1306 // Note: could eliminate cond. jumps to this jump if condition
1307 // is the same however, seems to be rather unlikely case.
1308 // Note: use jccb() if label to be bound is very close to get
1309 // an 8-bit displacement
1310 L.add_patch_at(code(), locator());
1311 emit_byte(0x0F);
1312 emit_byte(0x80 | cc);
1313 emit_long(0);
1314 }
1315 }
1316
1317 void Assembler::jccb(Condition cc, Label& L) {
1318 if (L.is_bound()) {
1319 const int short_size = 2;
1320 address entry = target(L);
1321 assert(is8bit((intptr_t)entry - ((intptr_t)_code_pos + short_size)),
1322 "Dispacement too large for a short jmp");
1323 intptr_t offs = (intptr_t)entry - (intptr_t)_code_pos;
1324 // 0111 tttn #8-bit disp
1325 emit_byte(0x70 | cc);
1326 emit_byte((offs - short_size) & 0xFF);
1327 } else {
1328 InstructionMark im(this);
1329 L.add_patch_at(code(), locator());
1330 emit_byte(0x70 | cc);
1331 emit_byte(0);
1332 }
1333 }
1334
1335 void Assembler::jmp(Address adr) {
1336 InstructionMark im(this);
1337 prefix(adr);
1338 emit_byte(0xFF);
1339 emit_operand(rsp, adr);
1340 }
1341
1342 void Assembler::jmp(Label& L, relocInfo::relocType rtype) {
1343 if (L.is_bound()) {
1344 address entry = target(L);
1345 assert(entry != NULL, "jmp most probably wrong");
1346 InstructionMark im(this);
1347 const int short_size = 2;
1348 const int long_size = 5;
1349 intptr_t offs = entry - _code_pos;
1350 if (rtype == relocInfo::none && is8bit(offs - short_size)) {
1351 emit_byte(0xEB);
1352 emit_byte((offs - short_size) & 0xFF);
1353 } else {
1354 emit_byte(0xE9);
1355 emit_long(offs - long_size);
1356 }
1357 } else {
1358 // By default, forward jumps are always 32-bit displacements, since
1359 // we can't yet know where the label will be bound. If you're sure that
1360 // the forward jump will not run beyond 256 bytes, use jmpb to
1361 // force an 8-bit displacement.
1362 InstructionMark im(this);
1363 relocate(rtype);
1364 L.add_patch_at(code(), locator());
1365 emit_byte(0xE9);
1366 emit_long(0);
1367 }
1368 }
1369
1370 void Assembler::jmp(Register entry) {
1371 int encode = prefix_and_encode(entry->encoding());
1372 emit_byte(0xFF);
1373 emit_byte(0xE0 | encode);
1374 }
1375
1376 void Assembler::jmp_literal(address dest, RelocationHolder const& rspec) {
1377 InstructionMark im(this);
1378 emit_byte(0xE9);
1379 assert(dest != NULL, "must have a target");
1380 intptr_t disp = dest - (_code_pos + sizeof(int32_t));
1381 assert(is_simm32(disp), "must be 32bit offset (jmp)");
1382 emit_data(disp, rspec.reloc(), call32_operand);
1383 }
1384
1385 void Assembler::jmpb(Label& L) {
1386 if (L.is_bound()) {
1387 const int short_size = 2;
1388 address entry = target(L);
1389 assert(is8bit((entry - _code_pos) + short_size),
1390 "Dispacement too large for a short jmp");
1391 assert(entry != NULL, "jmp most probably wrong");
1392 intptr_t offs = entry - _code_pos;
1393 emit_byte(0xEB);
1394 emit_byte((offs - short_size) & 0xFF);
1395 } else {
1396 InstructionMark im(this);
1397 L.add_patch_at(code(), locator());
1398 emit_byte(0xEB);
1399 emit_byte(0);
1400 }
1401 }
1402
1403 void Assembler::ldmxcsr( Address src) {
1404 NOT_LP64(assert(VM_Version::supports_sse(), ""));
1405 InstructionMark im(this);
1406 prefix(src);
1407 emit_byte(0x0F);
1408 emit_byte(0xAE);
1409 emit_operand(as_Register(2), src);
1410 }
1411
1412 void Assembler::leal(Register dst, Address src) {
1413 InstructionMark im(this);
1414 #ifdef _LP64
1415 emit_byte(0x67); // addr32
1416 prefix(src, dst);
1417 #endif // LP64
1418 emit_byte(0x8D);
1419 emit_operand(dst, src);
1420 }
1421
1422 void Assembler::lock() {
1423 if (Atomics & 1) {
1424 // Emit either nothing, a NOP, or a NOP: prefix
1425 emit_byte(0x90) ;
1426 } else {
1427 emit_byte(0xF0);
1428 }
1429 }
1430
1431 // Serializes memory.
1432 void Assembler::mfence() {
1433 // Memory barriers are only needed on multiprocessors
1434 if (os::is_MP()) {
1435 if( LP64_ONLY(true ||) VM_Version::supports_sse2() ) {
1436 emit_byte( 0x0F ); // MFENCE; faster blows no regs
1437 emit_byte( 0xAE );
1438 emit_byte( 0xF0 );
1439 } else {
1440 // All usable chips support "locked" instructions which suffice
1441 // as barriers, and are much faster than the alternative of
1442 // using cpuid instruction. We use here a locked add [esp],0.
1443 // This is conveniently otherwise a no-op except for blowing
1444 // flags (which we save and restore.)
1445 pushf(); // Save eflags register
1446 lock();
1447 addl(Address(rsp, 0), 0);// Assert the lock# signal here
1448 popf(); // Restore eflags register
1449 }
1450 }
1451 }
1452
1453 void Assembler::mov(Register dst, Register src) {
1454 LP64_ONLY(movq(dst, src)) NOT_LP64(movl(dst, src));
1455 }
1456
1457 void Assembler::movapd(XMMRegister dst, XMMRegister src) {
1458 NOT_LP64(assert(VM_Version::supports_sse2(), ""));
1459 int dstenc = dst->encoding();
1460 int srcenc = src->encoding();
1461 emit_byte(0x66);
1462 if (dstenc < 8) {
1463 if (srcenc >= 8) {
1464 prefix(REX_B);
1465 srcenc -= 8;
1466 }
1467 } else {
1468 if (srcenc < 8) {
1469 prefix(REX_R);
1470 } else {
1471 prefix(REX_RB);
1472 srcenc -= 8;
1473 }
1474 dstenc -= 8;
1475 }
1476 emit_byte(0x0F);
1477 emit_byte(0x28);
1478 emit_byte(0xC0 | dstenc << 3 | srcenc);
1479 }
1480
1481 void Assembler::movaps(XMMRegister dst, XMMRegister src) {
1482 NOT_LP64(assert(VM_Version::supports_sse(), ""));
1483 int dstenc = dst->encoding();
1484 int srcenc = src->encoding();
1485 if (dstenc < 8) {
1486 if (srcenc >= 8) {
1487 prefix(REX_B);
1488 srcenc -= 8;
1489 }
1490 } else {
1491 if (srcenc < 8) {
1492 prefix(REX_R);
1493 } else {
1494 prefix(REX_RB);
1495 srcenc -= 8;
1496 }
1497 dstenc -= 8;
1498 }
1499 emit_byte(0x0F);
1500 emit_byte(0x28);
1501 emit_byte(0xC0 | dstenc << 3 | srcenc);
1502 }
1503
1504 void Assembler::movb(Register dst, Address src) {
1505 NOT_LP64(assert(dst->has_byte_register(), "must have byte register"));
1506 InstructionMark im(this);
1507 prefix(src, dst, true);
1508 emit_byte(0x8A);
1509 emit_operand(dst, src);
1510 }
1511
1512
1513 void Assembler::movb(Address dst, int imm8) {
1514 InstructionMark im(this);
1515 prefix(dst);
1516 emit_byte(0xC6);
1517 emit_operand(rax, dst, 1);
1518 emit_byte(imm8);
1519 }
1520
1521
1522 void Assembler::movb(Address dst, Register src) {
1523 assert(src->has_byte_register(), "must have byte register");
1524 InstructionMark im(this);
1525 prefix(dst, src, true);
1526 emit_byte(0x88);
1527 emit_operand(src, dst);
1528 }
1529
1530 void Assembler::movdl(XMMRegister dst, Register src) {
1531 NOT_LP64(assert(VM_Version::supports_sse2(), ""));
1532 emit_byte(0x66);
1533 int encode = prefix_and_encode(dst->encoding(), src->encoding());
1534 emit_byte(0x0F);
1535 emit_byte(0x6E);
1536 emit_byte(0xC0 | encode);
1537 }
1538
1539 void Assembler::movdl(Register dst, XMMRegister src) {
1540 NOT_LP64(assert(VM_Version::supports_sse2(), ""));
1541 emit_byte(0x66);
1542 // swap src/dst to get correct prefix
1543 int encode = prefix_and_encode(src->encoding(), dst->encoding());
1544 emit_byte(0x0F);
1545 emit_byte(0x7E);
1546 emit_byte(0xC0 | encode);
1547 }
1548
1549 void Assembler::movdqa(XMMRegister dst, Address src) {
1550 NOT_LP64(assert(VM_Version::supports_sse2(), ""));
1551 InstructionMark im(this);
1552 emit_byte(0x66);
1553 prefix(src, dst);
1554 emit_byte(0x0F);
1555 emit_byte(0x6F);
1556 emit_operand(dst, src);
1557 }
1558
1559 void Assembler::movdqa(XMMRegister dst, XMMRegister src) {
1560 NOT_LP64(assert(VM_Version::supports_sse2(), ""));
1561 emit_byte(0x66);
1562 int encode = prefixq_and_encode(dst->encoding(), src->encoding());
1563 emit_byte(0x0F);
1564 emit_byte(0x6F);
1565 emit_byte(0xC0 | encode);
1566 }
1567
1568 void Assembler::movdqa(Address dst, XMMRegister src) {
1569 NOT_LP64(assert(VM_Version::supports_sse2(), ""));
1570 InstructionMark im(this);
1571 emit_byte(0x66);
1572 prefix(dst, src);
1573 emit_byte(0x0F);
1574 emit_byte(0x7F);
1575 emit_operand(src, dst);
1576 }
1577
1578 // Uses zero extension on 64bit
1579
1580 void Assembler::movl(Register dst, int32_t imm32) {
1581 int encode = prefix_and_encode(dst->encoding());
1582 emit_byte(0xB8 | encode);
1583 emit_long(imm32);
1584 }
1585
1586 void Assembler::movl(Register dst, Register src) {
1587 int encode = prefix_and_encode(dst->encoding(), src->encoding());
1588 emit_byte(0x8B);
1589 emit_byte(0xC0 | encode);
1590 }
1591
1592 void Assembler::movl(Register dst, Address src) {
1593 InstructionMark im(this);
1594 prefix(src, dst);
1595 emit_byte(0x8B);
1596 emit_operand(dst, src);
1597 }
1598
1599 void Assembler::movl(Address dst, int32_t imm32) {
1600 InstructionMark im(this);
1601 prefix(dst);
1602 emit_byte(0xC7);
1603 emit_operand(rax, dst, 4);
1604 emit_long(imm32);
1605 }
1606
1607 void Assembler::movl(Address dst, Register src) {
1608 InstructionMark im(this);
1609 prefix(dst, src);
1610 emit_byte(0x89);
1611 emit_operand(src, dst);
1612 }
1613
1614 // New cpus require to use movsd and movss to avoid partial register stall
1615 // when loading from memory. But for old Opteron use movlpd instead of movsd.
1616 // The selection is done in MacroAssembler::movdbl() and movflt().
1617 void Assembler::movlpd(XMMRegister dst, Address src) {
1618 NOT_LP64(assert(VM_Version::supports_sse2(), ""));
1619 InstructionMark im(this);
1620 emit_byte(0x66);
1621 prefix(src, dst);
1622 emit_byte(0x0F);
1623 emit_byte(0x12);
1624 emit_operand(dst, src);
1625 }
1626
1627 void Assembler::movq( MMXRegister dst, Address src ) {
1628 assert( VM_Version::supports_mmx(), "" );
1629 emit_byte(0x0F);
1630 emit_byte(0x6F);
1631 emit_operand(dst, src);
1632 }
1633
1634 void Assembler::movq( Address dst, MMXRegister src ) {
1635 assert( VM_Version::supports_mmx(), "" );
1636 emit_byte(0x0F);
1637 emit_byte(0x7F);
1638 // workaround gcc (3.2.1-7a) bug
1639 // In that version of gcc with only an emit_operand(MMX, Address)
1640 // gcc will tail jump and try and reverse the parameters completely
1641 // obliterating dst in the process. By having a version available
1642 // that doesn't need to swap the args at the tail jump the bug is
1643 // avoided.
1644 emit_operand(dst, src);
1645 }
1646
1647 void Assembler::movq(XMMRegister dst, Address src) {
1648 NOT_LP64(assert(VM_Version::supports_sse2(), ""));
1649 InstructionMark im(this);
1650 emit_byte(0xF3);
1651 prefix(src, dst);
1652 emit_byte(0x0F);
1653 emit_byte(0x7E);
1654 emit_operand(dst, src);
1655 }
1656
1657 void Assembler::movq(Address dst, XMMRegister src) {
1658 NOT_LP64(assert(VM_Version::supports_sse2(), ""));
1659 InstructionMark im(this);
1660 emit_byte(0x66);
1661 prefix(dst, src);
1662 emit_byte(0x0F);
1663 emit_byte(0xD6);
1664 emit_operand(src, dst);
1665 }
1666
1667 void Assembler::movsbl(Register dst, Address src) { // movsxb
1668 InstructionMark im(this);
1669 prefix(src, dst);
1670 emit_byte(0x0F);
1671 emit_byte(0xBE);
1672 emit_operand(dst, src);
1673 }
1674
1675 void Assembler::movsbl(Register dst, Register src) { // movsxb
1676 NOT_LP64(assert(src->has_byte_register(), "must have byte register"));
1677 int encode = prefix_and_encode(dst->encoding(), src->encoding(), true);
1678 emit_byte(0x0F);
1679 emit_byte(0xBE);
1680 emit_byte(0xC0 | encode);
1681 }
1682
1683 void Assembler::movsd(XMMRegister dst, XMMRegister src) {
1684 NOT_LP64(assert(VM_Version::supports_sse2(), ""));
1685 emit_byte(0xF2);
1686 int encode = prefix_and_encode(dst->encoding(), src->encoding());
1687 emit_byte(0x0F);
1688 emit_byte(0x10);
1689 emit_byte(0xC0 | encode);
1690 }
1691
1692 void Assembler::movsd(XMMRegister dst, Address src) {
1693 NOT_LP64(assert(VM_Version::supports_sse2(), ""));
1694 InstructionMark im(this);
1695 emit_byte(0xF2);
1696 prefix(src, dst);
1697 emit_byte(0x0F);
1698 emit_byte(0x10);
1699 emit_operand(dst, src);
1700 }
1701
1702 void Assembler::movsd(Address dst, XMMRegister src) {
1703 NOT_LP64(assert(VM_Version::supports_sse2(), ""));
1704 InstructionMark im(this);
1705 emit_byte(0xF2);
1706 prefix(dst, src);
1707 emit_byte(0x0F);
1708 emit_byte(0x11);
1709 emit_operand(src, dst);
1710 }
1711
1712 void Assembler::movss(XMMRegister dst, XMMRegister src) {
1713 NOT_LP64(assert(VM_Version::supports_sse(), ""));
1714 emit_byte(0xF3);
1715 int encode = prefix_and_encode(dst->encoding(), src->encoding());
1716 emit_byte(0x0F);
1717 emit_byte(0x10);
1718 emit_byte(0xC0 | encode);
1719 }
1720
1721 void Assembler::movss(XMMRegister dst, Address src) {
1722 NOT_LP64(assert(VM_Version::supports_sse(), ""));
1723 InstructionMark im(this);
1724 emit_byte(0xF3);
1725 prefix(src, dst);
1726 emit_byte(0x0F);
1727 emit_byte(0x10);
1728 emit_operand(dst, src);
1729 }
1730
1731 void Assembler::movss(Address dst, XMMRegister src) {
1732 NOT_LP64(assert(VM_Version::supports_sse(), ""));
1733 InstructionMark im(this);
1734 emit_byte(0xF3);
1735 prefix(dst, src);
1736 emit_byte(0x0F);
1737 emit_byte(0x11);
1738 emit_operand(src, dst);
1739 }
1740
1741 void Assembler::movswl(Register dst, Address src) { // movsxw
1742 InstructionMark im(this);
1743 prefix(src, dst);
1744 emit_byte(0x0F);
1745 emit_byte(0xBF);
1746 emit_operand(dst, src);
1747 }
1748
1749 void Assembler::movswl(Register dst, Register src) { // movsxw
1750 int encode = prefix_and_encode(dst->encoding(), src->encoding());
1751 emit_byte(0x0F);
1752 emit_byte(0xBF);
1753 emit_byte(0xC0 | encode);
1754 }
1755
1756 void Assembler::movw(Address dst, int imm16) {
1757 InstructionMark im(this);
1758
1759 emit_byte(0x66); // switch to 16-bit mode
1760 prefix(dst);
1761 emit_byte(0xC7);
1762 emit_operand(rax, dst, 2);
1763 emit_word(imm16);
1764 }
1765
1766 void Assembler::movw(Register dst, Address src) {
1767 InstructionMark im(this);
1768 emit_byte(0x66);
1769 prefix(src, dst);
1770 emit_byte(0x8B);
1771 emit_operand(dst, src);
1772 }
1773
1774 void Assembler::movw(Address dst, Register src) {
1775 InstructionMark im(this);
1776 emit_byte(0x66);
1777 prefix(dst, src);
1778 emit_byte(0x89);
1779 emit_operand(src, dst);
1780 }
1781
1782 void Assembler::movzbl(Register dst, Address src) { // movzxb
1783 InstructionMark im(this);
1784 prefix(src, dst);
1785 emit_byte(0x0F);
1786 emit_byte(0xB6);
1787 emit_operand(dst, src);
1788 }
1789
1790 void Assembler::movzbl(Register dst, Register src) { // movzxb
1791 NOT_LP64(assert(src->has_byte_register(), "must have byte register"));
1792 int encode = prefix_and_encode(dst->encoding(), src->encoding(), true);
1793 emit_byte(0x0F);
1794 emit_byte(0xB6);
1795 emit_byte(0xC0 | encode);
1796 }
1797
1798 void Assembler::movzwl(Register dst, Address src) { // movzxw
1799 InstructionMark im(this);
1800 prefix(src, dst);
1801 emit_byte(0x0F);
1802 emit_byte(0xB7);
1803 emit_operand(dst, src);
1804 }
1805
1806 void Assembler::movzwl(Register dst, Register src) { // movzxw
1807 int encode = prefix_and_encode(dst->encoding(), src->encoding());
1808 emit_byte(0x0F);
1809 emit_byte(0xB7);
1810 emit_byte(0xC0 | encode);
1811 }
1812
1813 void Assembler::mull(Address src) {
1814 InstructionMark im(this);
1815 prefix(src);
1816 emit_byte(0xF7);
1817 emit_operand(rsp, src);
1818 }
1819
1820 void Assembler::mull(Register src) {
1821 int encode = prefix_and_encode(src->encoding());
1822 emit_byte(0xF7);
1823 emit_byte(0xE0 | encode);
1824 }
1825
1826 void Assembler::mulsd(XMMRegister dst, Address src) {
1827 NOT_LP64(assert(VM_Version::supports_sse2(), ""));
1828 InstructionMark im(this);
1829 emit_byte(0xF2);
1830 prefix(src, dst);
1831 emit_byte(0x0F);
1832 emit_byte(0x59);
1833 emit_operand(dst, src);
1834 }
1835
1836 void Assembler::mulsd(XMMRegister dst, XMMRegister src) {
1837 NOT_LP64(assert(VM_Version::supports_sse2(), ""));
1838 emit_byte(0xF2);
1839 int encode = prefix_and_encode(dst->encoding(), src->encoding());
1840 emit_byte(0x0F);
1841 emit_byte(0x59);
1842 emit_byte(0xC0 | encode);
1843 }
1844
1845 void Assembler::mulss(XMMRegister dst, Address src) {
1846 NOT_LP64(assert(VM_Version::supports_sse(), ""));
1847 InstructionMark im(this);
1848 emit_byte(0xF3);
1849 prefix(src, dst);
1850 emit_byte(0x0F);
1851 emit_byte(0x59);
1852 emit_operand(dst, src);
1853 }
1854
1855 void Assembler::mulss(XMMRegister dst, XMMRegister src) {
1856 NOT_LP64(assert(VM_Version::supports_sse(), ""));
1857 emit_byte(0xF3);
1858 int encode = prefix_and_encode(dst->encoding(), src->encoding());
1859 emit_byte(0x0F);
1860 emit_byte(0x59);
1861 emit_byte(0xC0 | encode);
1862 }
1863
1864 void Assembler::negl(Register dst) {
1865 int encode = prefix_and_encode(dst->encoding());
1866 emit_byte(0xF7);
1867 emit_byte(0xD8 | encode);
1868 }
1869
1870 void Assembler::nop(int i) {
1871 #ifdef ASSERT
1872 assert(i > 0, " ");
1873 // The fancy nops aren't currently recognized by debuggers making it a
1874 // pain to disassemble code while debugging. If asserts are on clearly
1875 // speed is not an issue so simply use the single byte traditional nop
1876 // to do alignment.
1877
1878 for (; i > 0 ; i--) emit_byte(0x90);
1879 return;
1880
1881 #endif // ASSERT
1882
1883 if (UseAddressNop && VM_Version::is_intel()) {
1884 //
1885 // Using multi-bytes nops "0x0F 0x1F [address]" for Intel
1886 // 1: 0x90
1887 // 2: 0x66 0x90
1888 // 3: 0x66 0x66 0x90 (don't use "0x0F 0x1F 0x00" - need patching safe padding)
1889 // 4: 0x0F 0x1F 0x40 0x00
1890 // 5: 0x0F 0x1F 0x44 0x00 0x00
1891 // 6: 0x66 0x0F 0x1F 0x44 0x00 0x00
1892 // 7: 0x0F 0x1F 0x80 0x00 0x00 0x00 0x00
1893 // 8: 0x0F 0x1F 0x84 0x00 0x00 0x00 0x00 0x00
1894 // 9: 0x66 0x0F 0x1F 0x84 0x00 0x00 0x00 0x00 0x00
1895 // 10: 0x66 0x66 0x0F 0x1F 0x84 0x00 0x00 0x00 0x00 0x00
1896 // 11: 0x66 0x66 0x66 0x0F 0x1F 0x84 0x00 0x00 0x00 0x00 0x00
1897
1898 // The rest coding is Intel specific - don't use consecutive address nops
1899
1900 // 12: 0x0F 0x1F 0x84 0x00 0x00 0x00 0x00 0x00 0x66 0x66 0x66 0x90
1901 // 13: 0x66 0x0F 0x1F 0x84 0x00 0x00 0x00 0x00 0x00 0x66 0x66 0x66 0x90
1902 // 14: 0x66 0x66 0x0F 0x1F 0x84 0x00 0x00 0x00 0x00 0x00 0x66 0x66 0x66 0x90
1903 // 15: 0x66 0x66 0x66 0x0F 0x1F 0x84 0x00 0x00 0x00 0x00 0x00 0x66 0x66 0x66 0x90
1904
1905 while(i >= 15) {
1906 // For Intel don't generate consecutive addess nops (mix with regular nops)
1907 i -= 15;
1908 emit_byte(0x66); // size prefix
1909 emit_byte(0x66); // size prefix
1910 emit_byte(0x66); // size prefix
1911 addr_nop_8();
1912 emit_byte(0x66); // size prefix
1913 emit_byte(0x66); // size prefix
1914 emit_byte(0x66); // size prefix
1915 emit_byte(0x90); // nop
1916 }
1917 switch (i) {
1918 case 14:
1919 emit_byte(0x66); // size prefix
1920 case 13:
1921 emit_byte(0x66); // size prefix
1922 case 12:
1923 addr_nop_8();
1924 emit_byte(0x66); // size prefix
1925 emit_byte(0x66); // size prefix
1926 emit_byte(0x66); // size prefix
1927 emit_byte(0x90); // nop
1928 break;
1929 case 11:
1930 emit_byte(0x66); // size prefix
1931 case 10:
1932 emit_byte(0x66); // size prefix
1933 case 9:
1934 emit_byte(0x66); // size prefix
1935 case 8:
1936 addr_nop_8();
1937 break;
1938 case 7:
1939 addr_nop_7();
1940 break;
1941 case 6:
1942 emit_byte(0x66); // size prefix
1943 case 5:
1944 addr_nop_5();
1945 break;
1946 case 4:
1947 addr_nop_4();
1948 break;
1949 case 3:
1950 // Don't use "0x0F 0x1F 0x00" - need patching safe padding
1951 emit_byte(0x66); // size prefix
1952 case 2:
1953 emit_byte(0x66); // size prefix
1954 case 1:
1955 emit_byte(0x90); // nop
1956 break;
1957 default:
1958 assert(i == 0, " ");
1959 }
1960 return;
1961 }
1962 if (UseAddressNop && VM_Version::is_amd()) {
1963 //
1964 // Using multi-bytes nops "0x0F 0x1F [address]" for AMD.
1965 // 1: 0x90
1966 // 2: 0x66 0x90
1967 // 3: 0x66 0x66 0x90 (don't use "0x0F 0x1F 0x00" - need patching safe padding)
1968 // 4: 0x0F 0x1F 0x40 0x00
1969 // 5: 0x0F 0x1F 0x44 0x00 0x00
1970 // 6: 0x66 0x0F 0x1F 0x44 0x00 0x00
1971 // 7: 0x0F 0x1F 0x80 0x00 0x00 0x00 0x00
1972 // 8: 0x0F 0x1F 0x84 0x00 0x00 0x00 0x00 0x00
1973 // 9: 0x66 0x0F 0x1F 0x84 0x00 0x00 0x00 0x00 0x00
1974 // 10: 0x66 0x66 0x0F 0x1F 0x84 0x00 0x00 0x00 0x00 0x00
1975 // 11: 0x66 0x66 0x66 0x0F 0x1F 0x84 0x00 0x00 0x00 0x00 0x00
1976
1977 // The rest coding is AMD specific - use consecutive address nops
1978
1979 // 12: 0x66 0x0F 0x1F 0x44 0x00 0x00 0x66 0x0F 0x1F 0x44 0x00 0x00
1980 // 13: 0x0F 0x1F 0x80 0x00 0x00 0x00 0x00 0x66 0x0F 0x1F 0x44 0x00 0x00
1981 // 14: 0x0F 0x1F 0x80 0x00 0x00 0x00 0x00 0x0F 0x1F 0x80 0x00 0x00 0x00 0x00
1982 // 15: 0x0F 0x1F 0x84 0x00 0x00 0x00 0x00 0x00 0x0F 0x1F 0x80 0x00 0x00 0x00 0x00
1983 // 16: 0x0F 0x1F 0x84 0x00 0x00 0x00 0x00 0x00 0x0F 0x1F 0x84 0x00 0x00 0x00 0x00 0x00
1984 // Size prefixes (0x66) are added for larger sizes
1985
1986 while(i >= 22) {
1987 i -= 11;
1988 emit_byte(0x66); // size prefix
1989 emit_byte(0x66); // size prefix
1990 emit_byte(0x66); // size prefix
1991 addr_nop_8();
1992 }
1993 // Generate first nop for size between 21-12
1994 switch (i) {
1995 case 21:
1996 i -= 1;
1997 emit_byte(0x66); // size prefix
1998 case 20:
1999 case 19:
2000 i -= 1;
2001 emit_byte(0x66); // size prefix
2002 case 18:
2003 case 17:
2004 i -= 1;
2005 emit_byte(0x66); // size prefix
2006 case 16:
2007 case 15:
2008 i -= 8;
2009 addr_nop_8();
2010 break;
2011 case 14:
2012 case 13:
2013 i -= 7;
2014 addr_nop_7();
2015 break;
2016 case 12:
2017 i -= 6;
2018 emit_byte(0x66); // size prefix
2019 addr_nop_5();
2020 break;
2021 default:
2022 assert(i < 12, " ");
2023 }
2024
2025 // Generate second nop for size between 11-1
2026 switch (i) {
2027 case 11:
2028 emit_byte(0x66); // size prefix
2029 case 10:
2030 emit_byte(0x66); // size prefix
2031 case 9:
2032 emit_byte(0x66); // size prefix
2033 case 8:
2034 addr_nop_8();
2035 break;
2036 case 7:
2037 addr_nop_7();
2038 break;
2039 case 6:
2040 emit_byte(0x66); // size prefix
2041 case 5:
2042 addr_nop_5();
2043 break;
2044 case 4:
2045 addr_nop_4();
2046 break;
2047 case 3:
2048 // Don't use "0x0F 0x1F 0x00" - need patching safe padding
2049 emit_byte(0x66); // size prefix
2050 case 2:
2051 emit_byte(0x66); // size prefix
2052 case 1:
2053 emit_byte(0x90); // nop
2054 break;
2055 default:
2056 assert(i == 0, " ");
2057 }
2058 return;
2059 }
2060
2061 // Using nops with size prefixes "0x66 0x90".
2062 // From AMD Optimization Guide:
2063 // 1: 0x90
2064 // 2: 0x66 0x90
2065 // 3: 0x66 0x66 0x90
2066 // 4: 0x66 0x66 0x66 0x90
2067 // 5: 0x66 0x66 0x90 0x66 0x90
2068 // 6: 0x66 0x66 0x90 0x66 0x66 0x90
2069 // 7: 0x66 0x66 0x66 0x90 0x66 0x66 0x90
2070 // 8: 0x66 0x66 0x66 0x90 0x66 0x66 0x66 0x90
2071 // 9: 0x66 0x66 0x90 0x66 0x66 0x90 0x66 0x66 0x90
2072 // 10: 0x66 0x66 0x66 0x90 0x66 0x66 0x90 0x66 0x66 0x90
2073 //
2074 while(i > 12) {
2075 i -= 4;
2076 emit_byte(0x66); // size prefix
2077 emit_byte(0x66);
2078 emit_byte(0x66);
2079 emit_byte(0x90); // nop
2080 }
2081 // 1 - 12 nops
2082 if(i > 8) {
2083 if(i > 9) {
2084 i -= 1;
2085 emit_byte(0x66);
2086 }
2087 i -= 3;
2088 emit_byte(0x66);
2089 emit_byte(0x66);
2090 emit_byte(0x90);
2091 }
2092 // 1 - 8 nops
2093 if(i > 4) {
2094 if(i > 6) {
2095 i -= 1;
2096 emit_byte(0x66);
2097 }
2098 i -= 3;
2099 emit_byte(0x66);
2100 emit_byte(0x66);
2101 emit_byte(0x90);
2102 }
2103 switch (i) {
2104 case 4:
2105 emit_byte(0x66);
2106 case 3:
2107 emit_byte(0x66);
2108 case 2:
2109 emit_byte(0x66);
2110 case 1:
2111 emit_byte(0x90);
2112 break;
2113 default:
2114 assert(i == 0, " ");
2115 }
2116 }
2117
2118 void Assembler::notl(Register dst) {
2119 int encode = prefix_and_encode(dst->encoding());
2120 emit_byte(0xF7);
2121 emit_byte(0xD0 | encode );
2122 }
2123
2124 void Assembler::orl(Address dst, int32_t imm32) {
2125 InstructionMark im(this);
2126 prefix(dst);
2127 emit_byte(0x81);
2128 emit_operand(rcx, dst, 4);
2129 emit_long(imm32);
2130 }
2131
2132 void Assembler::orl(Register dst, int32_t imm32) {
2133 prefix(dst);
2134 emit_arith(0x81, 0xC8, dst, imm32);
2135 }
2136
2137
2138 void Assembler::orl(Register dst, Address src) {
2139 InstructionMark im(this);
2140 prefix(src, dst);
2141 emit_byte(0x0B);
2142 emit_operand(dst, src);
2143 }
2144
2145
2146 void Assembler::orl(Register dst, Register src) {
2147 (void) prefix_and_encode(dst->encoding(), src->encoding());
2148 emit_arith(0x0B, 0xC0, dst, src);
2149 }
2150
2151 // generic
2152 void Assembler::pop(Register dst) {
2153 int encode = prefix_and_encode(dst->encoding());
2154 emit_byte(0x58 | encode);
2155 }
2156
2157 void Assembler::popf() {
2158 emit_byte(0x9D);
2159 }
2160
2161 void Assembler::popl(Address dst) {
2162 // NOTE: this will adjust stack by 8byte on 64bits
2163 InstructionMark im(this);
2164 prefix(dst);
2165 emit_byte(0x8F);
2166 emit_operand(rax, dst);
2167 }
2168
2169 void Assembler::prefetch_prefix(Address src) {
2170 prefix(src);
2171 emit_byte(0x0F);
2172 }
2173
2174 void Assembler::prefetchnta(Address src) {
2175 NOT_LP64(assert(VM_Version::supports_sse2(), "must support"));
2176 InstructionMark im(this);
2177 prefetch_prefix(src);
2178 emit_byte(0x18);
2179 emit_operand(rax, src); // 0, src
2180 }
2181
2182 void Assembler::prefetchr(Address src) {
2183 NOT_LP64(assert(VM_Version::supports_3dnow(), "must support"));
2184 InstructionMark im(this);
2185 prefetch_prefix(src);
2186 emit_byte(0x0D);
2187 emit_operand(rax, src); // 0, src
2188 }
2189
2190 void Assembler::prefetcht0(Address src) {
2191 NOT_LP64(assert(VM_Version::supports_sse(), "must support"));
2192 InstructionMark im(this);
2193 prefetch_prefix(src);
2194 emit_byte(0x18);
2195 emit_operand(rcx, src); // 1, src
2196 }
2197
2198 void Assembler::prefetcht1(Address src) {
2199 NOT_LP64(assert(VM_Version::supports_sse(), "must support"));
2200 InstructionMark im(this);
2201 prefetch_prefix(src);
2202 emit_byte(0x18);
2203 emit_operand(rdx, src); // 2, src
2204 }
2205
2206 void Assembler::prefetcht2(Address src) {
2207 NOT_LP64(assert(VM_Version::supports_sse(), "must support"));
2208 InstructionMark im(this);
2209 prefetch_prefix(src);
2210 emit_byte(0x18);
2211 emit_operand(rbx, src); // 3, src
2212 }
2213
2214 void Assembler::prefetchw(Address src) {
2215 NOT_LP64(assert(VM_Version::supports_3dnow(), "must support"));
2216 InstructionMark im(this);
2217 prefetch_prefix(src);
2218 emit_byte(0x0D);
2219 emit_operand(rcx, src); // 1, src
2220 }
2221
2222 void Assembler::prefix(Prefix p) {
2223 a_byte(p);
2224 }
2225
2226 void Assembler::pshufd(XMMRegister dst, XMMRegister src, int mode) {
2227 assert(isByte(mode), "invalid value");
2228 NOT_LP64(assert(VM_Version::supports_sse2(), ""));
2229
2230 emit_byte(0x66);
2231 int encode = prefix_and_encode(dst->encoding(), src->encoding());
2232 emit_byte(0x0F);
2233 emit_byte(0x70);
2234 emit_byte(0xC0 | encode);
2235 emit_byte(mode & 0xFF);
2236
2237 }
2238
2239 void Assembler::pshufd(XMMRegister dst, Address src, int mode) {
2240 assert(isByte(mode), "invalid value");
2241 NOT_LP64(assert(VM_Version::supports_sse2(), ""));
2242
2243 InstructionMark im(this);
2244 emit_byte(0x66);
2245 prefix(src, dst);
2246 emit_byte(0x0F);
2247 emit_byte(0x70);
2248 emit_operand(dst, src);
2249 emit_byte(mode & 0xFF);
2250 }
2251
2252 void Assembler::pshuflw(XMMRegister dst, XMMRegister src, int mode) {
2253 assert(isByte(mode), "invalid value");
2254 NOT_LP64(assert(VM_Version::supports_sse2(), ""));
2255
2256 emit_byte(0xF2);
2257 int encode = prefix_and_encode(dst->encoding(), src->encoding());
2258 emit_byte(0x0F);
2259 emit_byte(0x70);
2260 emit_byte(0xC0 | encode);
2261 emit_byte(mode & 0xFF);
2262 }
2263
2264 void Assembler::pshuflw(XMMRegister dst, Address src, int mode) {
2265 assert(isByte(mode), "invalid value");
2266 NOT_LP64(assert(VM_Version::supports_sse2(), ""));
2267
2268 InstructionMark im(this);
2269 emit_byte(0xF2);
2270 prefix(src, dst); // QQ new
2271 emit_byte(0x0F);
2272 emit_byte(0x70);
2273 emit_operand(dst, src);
2274 emit_byte(mode & 0xFF);
2275 }
2276
2277 void Assembler::psrlq(XMMRegister dst, int shift) {
2278 // HMM Table D-1 says sse2 or mmx
2279 NOT_LP64(assert(VM_Version::supports_sse(), ""));
2280
2281 int encode = prefixq_and_encode(xmm2->encoding(), dst->encoding());
2282 emit_byte(0x66);
2283 emit_byte(0x0F);
2284 emit_byte(0x73);
2285 emit_byte(0xC0 | encode);
2286 emit_byte(shift);
2287 }
2288
2289 void Assembler::punpcklbw(XMMRegister dst, XMMRegister src) {
2290 NOT_LP64(assert(VM_Version::supports_sse2(), ""));
2291 emit_byte(0x66);
2292 int encode = prefix_and_encode(dst->encoding(), src->encoding());
2293 emit_byte(0x0F);
2294 emit_byte(0x60);
2295 emit_byte(0xC0 | encode);
2296 }
2297
2298 void Assembler::push(int32_t imm32) {
2299 // in 64bits we push 64bits onto the stack but only
2300 // take a 32bit immediate
2301 emit_byte(0x68);
2302 emit_long(imm32);
2303 }
2304
2305 void Assembler::push(Register src) {
2306 int encode = prefix_and_encode(src->encoding());
2307
2308 emit_byte(0x50 | encode);
2309 }
2310
2311 void Assembler::pushf() {
2312 emit_byte(0x9C);
2313 }
2314
2315 void Assembler::pushl(Address src) {
2316 // Note this will push 64bit on 64bit
2317 InstructionMark im(this);
2318 prefix(src);
2319 emit_byte(0xFF);
2320 emit_operand(rsi, src);
2321 }
2322
2323 void Assembler::pxor(XMMRegister dst, Address src) {
2324 NOT_LP64(assert(VM_Version::supports_sse2(), ""));
2325 InstructionMark im(this);
2326 emit_byte(0x66);
2327 prefix(src, dst);
2328 emit_byte(0x0F);
2329 emit_byte(0xEF);
2330 emit_operand(dst, src);
2331 }
2332
2333 void Assembler::pxor(XMMRegister dst, XMMRegister src) {
2334 NOT_LP64(assert(VM_Version::supports_sse2(), ""));
2335 InstructionMark im(this);
2336 emit_byte(0x66);
2337 int encode = prefix_and_encode(dst->encoding(), src->encoding());
2338 emit_byte(0x0F);
2339 emit_byte(0xEF);
2340 emit_byte(0xC0 | encode);
2341 }
2342
2343 void Assembler::rcll(Register dst, int imm8) {
2344 assert(isShiftCount(imm8), "illegal shift count");
2345 int encode = prefix_and_encode(dst->encoding());
2346 if (imm8 == 1) {
2347 emit_byte(0xD1);
2348 emit_byte(0xD0 | encode);
2349 } else {
2350 emit_byte(0xC1);
2351 emit_byte(0xD0 | encode);
2352 emit_byte(imm8);
2353 }
2354 }
2355
2356 // copies data from [esi] to [edi] using rcx pointer sized words
2357 // generic
2358 void Assembler::rep_mov() {
2359 emit_byte(0xF3);
2360 // MOVSQ
2361 LP64_ONLY(prefix(REX_W));
2362 emit_byte(0xA5);
2363 }
2364
2365 // sets rcx pointer sized words with rax, value at [edi]
2366 // generic
2367 void Assembler::rep_set() { // rep_set
2368 emit_byte(0xF3);
2369 // STOSQ
2370 LP64_ONLY(prefix(REX_W));
2371 emit_byte(0xAB);
2372 }
2373
2374 // scans rcx pointer sized words at [edi] for occurance of rax,
2375 // generic
2376 void Assembler::repne_scan() { // repne_scan
2377 emit_byte(0xF2);
2378 // SCASQ
2379 LP64_ONLY(prefix(REX_W));
2380 emit_byte(0xAF);
2381 }
2382
2383 #ifdef _LP64
2384 // scans rcx 4 byte words at [edi] for occurance of rax,
2385 // generic
2386 void Assembler::repne_scanl() { // repne_scan
2387 emit_byte(0xF2);
2388 // SCASL
2389 emit_byte(0xAF);
2390 }
2391 #endif
2392
2393 void Assembler::ret(int imm16) {
2394 if (imm16 == 0) {
2395 emit_byte(0xC3);
2396 } else {
2397 emit_byte(0xC2);
2398 emit_word(imm16);
2399 }
2400 }
2401
2402 void Assembler::sahf() {
2403 #ifdef _LP64
2404 // Not supported in 64bit mode
2405 ShouldNotReachHere();
2406 #endif
2407 emit_byte(0x9E);
2408 }
2409
2410 void Assembler::sarl(Register dst, int imm8) {
2411 int encode = prefix_and_encode(dst->encoding());
2412 assert(isShiftCount(imm8), "illegal shift count");
2413 if (imm8 == 1) {
2414 emit_byte(0xD1);
2415 emit_byte(0xF8 | encode);
2416 } else {
2417 emit_byte(0xC1);
2418 emit_byte(0xF8 | encode);
2419 emit_byte(imm8);
2420 }
2421 }
2422
2423 void Assembler::sarl(Register dst) {
2424 int encode = prefix_and_encode(dst->encoding());
2425 emit_byte(0xD3);
2426 emit_byte(0xF8 | encode);
2427 }
2428
2429 void Assembler::sbbl(Address dst, int32_t imm32) {
2430 InstructionMark im(this);
2431 prefix(dst);
2432 emit_arith_operand(0x81, rbx, dst, imm32);
2433 }
2434
2435 void Assembler::sbbl(Register dst, int32_t imm32) {
2436 prefix(dst);
2437 emit_arith(0x81, 0xD8, dst, imm32);
2438 }
2439
2440
2441 void Assembler::sbbl(Register dst, Address src) {
2442 InstructionMark im(this);
2443 prefix(src, dst);
2444 emit_byte(0x1B);
2445 emit_operand(dst, src);
2446 }
2447
2448 void Assembler::sbbl(Register dst, Register src) {
2449 (void) prefix_and_encode(dst->encoding(), src->encoding());
2450 emit_arith(0x1B, 0xC0, dst, src);
2451 }
2452
2453 void Assembler::setb(Condition cc, Register dst) {
2454 assert(0 <= cc && cc < 16, "illegal cc");
2455 int encode = prefix_and_encode(dst->encoding(), true);
2456 emit_byte(0x0F);
2457 emit_byte(0x90 | cc);
2458 emit_byte(0xC0 | encode);
2459 }
2460
2461 void Assembler::shll(Register dst, int imm8) {
2462 assert(isShiftCount(imm8), "illegal shift count");
2463 int encode = prefix_and_encode(dst->encoding());
2464 if (imm8 == 1 ) {
2465 emit_byte(0xD1);
2466 emit_byte(0xE0 | encode);
2467 } else {
2468 emit_byte(0xC1);
2469 emit_byte(0xE0 | encode);
2470 emit_byte(imm8);
2471 }
2472 }
2473
2474 void Assembler::shll(Register dst) {
2475 int encode = prefix_and_encode(dst->encoding());
2476 emit_byte(0xD3);
2477 emit_byte(0xE0 | encode);
2478 }
2479
2480 void Assembler::shrl(Register dst, int imm8) {
2481 assert(isShiftCount(imm8), "illegal shift count");
2482 int encode = prefix_and_encode(dst->encoding());
2483 emit_byte(0xC1);
2484 emit_byte(0xE8 | encode);
2485 emit_byte(imm8);
2486 }
2487
2488 void Assembler::shrl(Register dst) {
2489 int encode = prefix_and_encode(dst->encoding());
2490 emit_byte(0xD3);
2491 emit_byte(0xE8 | encode);
2492 }
2493
2494 // copies a single word from [esi] to [edi]
2495 void Assembler::smovl() {
2496 emit_byte(0xA5);
2497 }
2498
2499 void Assembler::sqrtsd(XMMRegister dst, XMMRegister src) {
2500 // HMM Table D-1 says sse2
2501 // NOT_LP64(assert(VM_Version::supports_sse(), ""));
2502 NOT_LP64(assert(VM_Version::supports_sse2(), ""));
2503 emit_byte(0xF2);
2504 int encode = prefix_and_encode(dst->encoding(), src->encoding());
2505 emit_byte(0x0F);
2506 emit_byte(0x51);
2507 emit_byte(0xC0 | encode);
2508 }
2509
2510 void Assembler::stmxcsr( Address dst) {
2511 NOT_LP64(assert(VM_Version::supports_sse(), ""));
2512 InstructionMark im(this);
2513 prefix(dst);
2514 emit_byte(0x0F);
2515 emit_byte(0xAE);
2516 emit_operand(as_Register(3), dst);
2517 }
2518
2519 void Assembler::subl(Address dst, int32_t imm32) {
2520 InstructionMark im(this);
2521 prefix(dst);
2522 if (is8bit(imm32)) {
2523 emit_byte(0x83);
2524 emit_operand(rbp, dst, 1);
2525 emit_byte(imm32 & 0xFF);
2526 } else {
2527 emit_byte(0x81);
2528 emit_operand(rbp, dst, 4);
2529 emit_long(imm32);
2530 }
2531 }
2532
2533 void Assembler::subl(Register dst, int32_t imm32) {
2534 prefix(dst);
2535 emit_arith(0x81, 0xE8, dst, imm32);
2536 }
2537
2538 void Assembler::subl(Address dst, Register src) {
2539 InstructionMark im(this);
2540 prefix(dst, src);
2541 emit_byte(0x29);
2542 emit_operand(src, dst);
2543 }
2544
2545 void Assembler::subl(Register dst, Address src) {
2546 InstructionMark im(this);
2547 prefix(src, dst);
2548 emit_byte(0x2B);
2549 emit_operand(dst, src);
2550 }
2551
2552 void Assembler::subl(Register dst, Register src) {
2553 (void) prefix_and_encode(dst->encoding(), src->encoding());
2554 emit_arith(0x2B, 0xC0, dst, src);
2555 }
2556
2557 void Assembler::subsd(XMMRegister dst, XMMRegister src) {
2558 NOT_LP64(assert(VM_Version::supports_sse2(), ""));
2559 emit_byte(0xF2);
2560 int encode = prefix_and_encode(dst->encoding(), src->encoding());
2561 emit_byte(0x0F);
2562 emit_byte(0x5C);
2563 emit_byte(0xC0 | encode);
2564 }
2565
2566 void Assembler::subsd(XMMRegister dst, Address src) {
2567 NOT_LP64(assert(VM_Version::supports_sse2(), ""));
2568 InstructionMark im(this);
2569 emit_byte(0xF2);
2570 prefix(src, dst);
2571 emit_byte(0x0F);
2572 emit_byte(0x5C);
2573 emit_operand(dst, src);
2574 }
2575
2576 void Assembler::subss(XMMRegister dst, XMMRegister src) {
2577 NOT_LP64(assert(VM_Version::supports_sse(), ""));
2578 emit_byte(0xF3);
2579 int encode = prefix_and_encode(dst->encoding(), src->encoding());
2580 emit_byte(0x0F);
2581 emit_byte(0x5C);
2582 emit_byte(0xC0 | encode);
2583 }
2584
2585 void Assembler::subss(XMMRegister dst, Address src) {
2586 NOT_LP64(assert(VM_Version::supports_sse(), ""));
2587 InstructionMark im(this);
2588 emit_byte(0xF3);
2589 prefix(src, dst);
2590 emit_byte(0x0F);
2591 emit_byte(0x5C);
2592 emit_operand(dst, src);
2593 }
2594
2595 void Assembler::testb(Register dst, int imm8) {
2596 NOT_LP64(assert(dst->has_byte_register(), "must have byte register"));
2597 (void) prefix_and_encode(dst->encoding(), true);
2598 emit_arith_b(0xF6, 0xC0, dst, imm8);
2599 }
2600
2601 void Assembler::testl(Register dst, int32_t imm32) {
2602 // not using emit_arith because test
2603 // doesn't support sign-extension of
2604 // 8bit operands
2605 int encode = dst->encoding();
2606 if (encode == 0) {
2607 emit_byte(0xA9);
2608 } else {
2609 encode = prefix_and_encode(encode);
2610 emit_byte(0xF7);
2611 emit_byte(0xC0 | encode);
2612 }
2613 emit_long(imm32);
2614 }
2615
2616 void Assembler::testl(Register dst, Register src) {
2617 (void) prefix_and_encode(dst->encoding(), src->encoding());
2618 emit_arith(0x85, 0xC0, dst, src);
2619 }
2620
2621 void Assembler::testl(Register dst, Address src) {
2622 InstructionMark im(this);
2623 prefix(src, dst);
2624 emit_byte(0x85);
2625 emit_operand(dst, src);
2626 }
2627
2628 void Assembler::ucomisd(XMMRegister dst, Address src) {
2629 NOT_LP64(assert(VM_Version::supports_sse2(), ""));
2630 emit_byte(0x66);
2631 ucomiss(dst, src);
2632 }
2633
2634 void Assembler::ucomisd(XMMRegister dst, XMMRegister src) {
2635 NOT_LP64(assert(VM_Version::supports_sse2(), ""));
2636 emit_byte(0x66);
2637 ucomiss(dst, src);
2638 }
2639
2640 void Assembler::ucomiss(XMMRegister dst, Address src) {
2641 NOT_LP64(assert(VM_Version::supports_sse(), ""));
2642
2643 InstructionMark im(this);
2644 prefix(src, dst);
2645 emit_byte(0x0F);
2646 emit_byte(0x2E);
2647 emit_operand(dst, src);
2648 }
2649
2650 void Assembler::ucomiss(XMMRegister dst, XMMRegister src) {
2651 NOT_LP64(assert(VM_Version::supports_sse(), ""));
2652 int encode = prefix_and_encode(dst->encoding(), src->encoding());
2653 emit_byte(0x0F);
2654 emit_byte(0x2E);
2655 emit_byte(0xC0 | encode);
2656 }
2657
2658
2659 void Assembler::xaddl(Address dst, Register src) {
2660 InstructionMark im(this);
2661 prefix(dst, src);
2662 emit_byte(0x0F);
2663 emit_byte(0xC1);
2664 emit_operand(src, dst);
2665 }
2666
2667 void Assembler::xchgl(Register dst, Address src) { // xchg
2668 InstructionMark im(this);
2669 prefix(src, dst);
2670 emit_byte(0x87);
2671 emit_operand(dst, src);
2672 }
2673
2674 void Assembler::xchgl(Register dst, Register src) {
2675 int encode = prefix_and_encode(dst->encoding(), src->encoding());
2676 emit_byte(0x87);
2677 emit_byte(0xc0 | encode);
2678 }
2679
2680 void Assembler::xorl(Register dst, int32_t imm32) {
2681 prefix(dst);
2682 emit_arith(0x81, 0xF0, dst, imm32);
2683 }
2684
2685 void Assembler::xorl(Register dst, Address src) {
2686 InstructionMark im(this);
2687 prefix(src, dst);
2688 emit_byte(0x33);
2689 emit_operand(dst, src);
2690 }
2691
2692 void Assembler::xorl(Register dst, Register src) {
2693 (void) prefix_and_encode(dst->encoding(), src->encoding());
2694 emit_arith(0x33, 0xC0, dst, src);
2695 }
2696
2697 void Assembler::xorpd(XMMRegister dst, XMMRegister src) {
2698 NOT_LP64(assert(VM_Version::supports_sse2(), ""));
2699 emit_byte(0x66);
2700 xorps(dst, src);
2701 }
2702
2703 void Assembler::xorpd(XMMRegister dst, Address src) {
2704 NOT_LP64(assert(VM_Version::supports_sse2(), ""));
2705 InstructionMark im(this);
2706 emit_byte(0x66);
2707 prefix(src, dst);
2708 emit_byte(0x0F);
2709 emit_byte(0x57);
2710 emit_operand(dst, src);
2711 }
2712
2713
2714 void Assembler::xorps(XMMRegister dst, XMMRegister src) {
2715 NOT_LP64(assert(VM_Version::supports_sse(), ""));
2716 int encode = prefix_and_encode(dst->encoding(), src->encoding());
2717 emit_byte(0x0F);
2718 emit_byte(0x57);
2719 emit_byte(0xC0 | encode);
2720 }
2721
2722 void Assembler::xorps(XMMRegister dst, Address src) {
2723 NOT_LP64(assert(VM_Version::supports_sse(), ""));
2724 InstructionMark im(this);
2725 prefix(src, dst);
2726 emit_byte(0x0F);
2727 emit_byte(0x57);
2728 emit_operand(dst, src);
2729 }
2730
2731 #ifndef _LP64
2732 // 32bit only pieces of the assembler
2733
2734 void Assembler::cmp_literal32(Register src1, int32_t imm32, RelocationHolder const& rspec) {
2735 // NO PREFIX AS NEVER 64BIT
2736 InstructionMark im(this);
2737 emit_byte(0x81);
2738 emit_byte(0xF8 | src1->encoding());
2739 emit_data(imm32, rspec, 0);
2740 }
2741
2742 void Assembler::cmp_literal32(Address src1, int32_t imm32, RelocationHolder const& rspec) {
2743 // NO PREFIX AS NEVER 64BIT (not even 32bit versions of 64bit regs
2744 InstructionMark im(this);
2745 emit_byte(0x81);
2746 emit_operand(rdi, src1);
2747 emit_data(imm32, rspec, 0);
2748 }
2749
2750 // The 64-bit (32bit platform) cmpxchg compares the value at adr with the contents of rdx:rax,
2751 // and stores rcx:rbx into adr if so; otherwise, the value at adr is loaded
2752 // into rdx:rax. The ZF is set if the compared values were equal, and cleared otherwise.
2753 void Assembler::cmpxchg8(Address adr) {
2754 InstructionMark im(this);
2755 emit_byte(0x0F);
2756 emit_byte(0xc7);
2757 emit_operand(rcx, adr);
2758 }
2759
2760 void Assembler::decl(Register dst) {
2761 // Don't use it directly. Use MacroAssembler::decrementl() instead.
2762 emit_byte(0x48 | dst->encoding());
2763 }
2764
2765 #endif // _LP64
2766
2767 // 64bit typically doesn't use the x87 but needs to for the trig funcs
2768
2769 void Assembler::fabs() {
2770 emit_byte(0xD9);
2771 emit_byte(0xE1);
2772 }
2773
2774 void Assembler::fadd(int i) {
2775 emit_farith(0xD8, 0xC0, i);
2776 }
2777
2778 void Assembler::fadd_d(Address src) {
2779 InstructionMark im(this);
2780 emit_byte(0xDC);
2781 emit_operand32(rax, src);
2782 }
2783
2784 void Assembler::fadd_s(Address src) {
2785 InstructionMark im(this);
2786 emit_byte(0xD8);
2787 emit_operand32(rax, src);
2788 }
2789
2790 void Assembler::fadda(int i) {
2791 emit_farith(0xDC, 0xC0, i);
2792 }
2793
2794 void Assembler::faddp(int i) {
2795 emit_farith(0xDE, 0xC0, i);
2796 }
2797
2798 void Assembler::fchs() {
2799 emit_byte(0xD9);
2800 emit_byte(0xE0);
2801 }
2802
2803 void Assembler::fcom(int i) {
2804 emit_farith(0xD8, 0xD0, i);
2805 }
2806
2807 void Assembler::fcomp(int i) {
2808 emit_farith(0xD8, 0xD8, i);
2809 }
2810
2811 void Assembler::fcomp_d(Address src) {
2812 InstructionMark im(this);
2813 emit_byte(0xDC);
2814 emit_operand32(rbx, src);
2815 }
2816
2817 void Assembler::fcomp_s(Address src) {
2818 InstructionMark im(this);
2819 emit_byte(0xD8);
2820 emit_operand32(rbx, src);
2821 }
2822
2823 void Assembler::fcompp() {
2824 emit_byte(0xDE);
2825 emit_byte(0xD9);
2826 }
2827
2828 void Assembler::fcos() {
2829 emit_byte(0xD9);
2830 emit_byte(0xFF);
2831 }
2832
2833 void Assembler::fdecstp() {
2834 emit_byte(0xD9);
2835 emit_byte(0xF6);
2836 }
2837
2838 void Assembler::fdiv(int i) {
2839 emit_farith(0xD8, 0xF0, i);
2840 }
2841
2842 void Assembler::fdiv_d(Address src) {
2843 InstructionMark im(this);
2844 emit_byte(0xDC);
2845 emit_operand32(rsi, src);
2846 }
2847
2848 void Assembler::fdiv_s(Address src) {
2849 InstructionMark im(this);
2850 emit_byte(0xD8);
2851 emit_operand32(rsi, src);
2852 }
2853
2854 void Assembler::fdiva(int i) {
2855 emit_farith(0xDC, 0xF8, i);
2856 }
2857
2858 // Note: The Intel manual (Pentium Processor User's Manual, Vol.3, 1994)
2859 // is erroneous for some of the floating-point instructions below.
2860
2861 void Assembler::fdivp(int i) {
2862 emit_farith(0xDE, 0xF8, i); // ST(0) <- ST(0) / ST(1) and pop (Intel manual wrong)
2863 }
2864
2865 void Assembler::fdivr(int i) {
2866 emit_farith(0xD8, 0xF8, i);
2867 }
2868
2869 void Assembler::fdivr_d(Address src) {
2870 InstructionMark im(this);
2871 emit_byte(0xDC);
2872 emit_operand32(rdi, src);
2873 }
2874
2875 void Assembler::fdivr_s(Address src) {
2876 InstructionMark im(this);
2877 emit_byte(0xD8);
2878 emit_operand32(rdi, src);
2879 }
2880
2881 void Assembler::fdivra(int i) {
2882 emit_farith(0xDC, 0xF0, i);
2883 }
2884
2885 void Assembler::fdivrp(int i) {
2886 emit_farith(0xDE, 0xF0, i); // ST(0) <- ST(1) / ST(0) and pop (Intel manual wrong)
2887 }
2888
2889 void Assembler::ffree(int i) {
2890 emit_farith(0xDD, 0xC0, i);
2891 }
2892
2893 void Assembler::fild_d(Address adr) {
2894 InstructionMark im(this);
2895 emit_byte(0xDF);
2896 emit_operand32(rbp, adr);
2897 }
2898
2899 void Assembler::fild_s(Address adr) {
2900 InstructionMark im(this);
2901 emit_byte(0xDB);
2902 emit_operand32(rax, adr);
2903 }
2904
2905 void Assembler::fincstp() {
2906 emit_byte(0xD9);
2907 emit_byte(0xF7);
2908 }
2909
2910 void Assembler::finit() {
2911 emit_byte(0x9B);
2912 emit_byte(0xDB);
2913 emit_byte(0xE3);
2914 }
2915
2916 void Assembler::fist_s(Address adr) {
2917 InstructionMark im(this);
2918 emit_byte(0xDB);
2919 emit_operand32(rdx, adr);
2920 }
2921
2922 void Assembler::fistp_d(Address adr) {
2923 InstructionMark im(this);
2924 emit_byte(0xDF);
2925 emit_operand32(rdi, adr);
2926 }
2927
2928 void Assembler::fistp_s(Address adr) {
2929 InstructionMark im(this);
2930 emit_byte(0xDB);
2931 emit_operand32(rbx, adr);
2932 }
2933
2934 void Assembler::fld1() {
2935 emit_byte(0xD9);
2936 emit_byte(0xE8);
2937 }
2938
2939 void Assembler::fld_d(Address adr) {
2940 InstructionMark im(this);
2941 emit_byte(0xDD);
2942 emit_operand32(rax, adr);
2943 }
2944
2945 void Assembler::fld_s(Address adr) {
2946 InstructionMark im(this);
2947 emit_byte(0xD9);
2948 emit_operand32(rax, adr);
2949 }
2950
2951
2952 void Assembler::fld_s(int index) {
2953 emit_farith(0xD9, 0xC0, index);
2954 }
2955
2956 void Assembler::fld_x(Address adr) {
2957 InstructionMark im(this);
2958 emit_byte(0xDB);
2959 emit_operand32(rbp, adr);
2960 }
2961
2962 void Assembler::fldcw(Address src) {
2963 InstructionMark im(this);
2964 emit_byte(0xd9);
2965 emit_operand32(rbp, src);
2966 }
2967
2968 void Assembler::fldenv(Address src) {
2969 InstructionMark im(this);
2970 emit_byte(0xD9);
2971 emit_operand32(rsp, src);
2972 }
2973
2974 void Assembler::fldlg2() {
2975 emit_byte(0xD9);
2976 emit_byte(0xEC);
2977 }
2978
2979 void Assembler::fldln2() {
2980 emit_byte(0xD9);
2981 emit_byte(0xED);
2982 }
2983
2984 void Assembler::fldz() {
2985 emit_byte(0xD9);
2986 emit_byte(0xEE);
2987 }
2988
2989 void Assembler::flog() {
2990 fldln2();
2991 fxch();
2992 fyl2x();
2993 }
2994
2995 void Assembler::flog10() {
2996 fldlg2();
2997 fxch();
2998 fyl2x();
2999 }
3000
3001 void Assembler::fmul(int i) {
3002 emit_farith(0xD8, 0xC8, i);
3003 }
3004
3005 void Assembler::fmul_d(Address src) {
3006 InstructionMark im(this);
3007 emit_byte(0xDC);
3008 emit_operand32(rcx, src);
3009 }
3010
3011 void Assembler::fmul_s(Address src) {
3012 InstructionMark im(this);
3013 emit_byte(0xD8);
3014 emit_operand32(rcx, src);
3015 }
3016
3017 void Assembler::fmula(int i) {
3018 emit_farith(0xDC, 0xC8, i);
3019 }
3020
3021 void Assembler::fmulp(int i) {
3022 emit_farith(0xDE, 0xC8, i);
3023 }
3024
3025 void Assembler::fnsave(Address dst) {
3026 InstructionMark im(this);
3027 emit_byte(0xDD);
3028 emit_operand32(rsi, dst);
3029 }
3030
3031 void Assembler::fnstcw(Address src) {
3032 InstructionMark im(this);
3033 emit_byte(0x9B);
3034 emit_byte(0xD9);
3035 emit_operand32(rdi, src);
3036 }
3037
3038 void Assembler::fnstsw_ax() {
3039 emit_byte(0xdF);
3040 emit_byte(0xE0);
3041 }
3042
3043 void Assembler::fprem() {
3044 emit_byte(0xD9);
3045 emit_byte(0xF8);
3046 }
3047
3048 void Assembler::fprem1() {
3049 emit_byte(0xD9);
3050 emit_byte(0xF5);
3051 }
3052
3053 void Assembler::frstor(Address src) {
3054 InstructionMark im(this);
3055 emit_byte(0xDD);
3056 emit_operand32(rsp, src);
3057 }
3058
3059 void Assembler::fsin() {
3060 emit_byte(0xD9);
3061 emit_byte(0xFE);
3062 }
3063
3064 void Assembler::fsqrt() {
3065 emit_byte(0xD9);
3066 emit_byte(0xFA);
3067 }
3068
3069 void Assembler::fst_d(Address adr) {
3070 InstructionMark im(this);
3071 emit_byte(0xDD);
3072 emit_operand32(rdx, adr);
3073 }
3074
3075 void Assembler::fst_s(Address adr) {
3076 InstructionMark im(this);
3077 emit_byte(0xD9);
3078 emit_operand32(rdx, adr);
3079 }
3080
3081 void Assembler::fstp_d(Address adr) {
3082 InstructionMark im(this);
3083 emit_byte(0xDD);
3084 emit_operand32(rbx, adr);
3085 }
3086
3087 void Assembler::fstp_d(int index) {
3088 emit_farith(0xDD, 0xD8, index);
3089 }
3090
3091 void Assembler::fstp_s(Address adr) {
3092 InstructionMark im(this);
3093 emit_byte(0xD9);
3094 emit_operand32(rbx, adr);
3095 }
3096
3097 void Assembler::fstp_x(Address adr) {
3098 InstructionMark im(this);
3099 emit_byte(0xDB);
3100 emit_operand32(rdi, adr);
3101 }
3102
3103 void Assembler::fsub(int i) {
3104 emit_farith(0xD8, 0xE0, i);
3105 }
3106
3107 void Assembler::fsub_d(Address src) {
3108 InstructionMark im(this);
3109 emit_byte(0xDC);
3110 emit_operand32(rsp, src);
3111 }
3112
3113 void Assembler::fsub_s(Address src) {
3114 InstructionMark im(this);
3115 emit_byte(0xD8);
3116 emit_operand32(rsp, src);
3117 }
3118
3119 void Assembler::fsuba(int i) {
3120 emit_farith(0xDC, 0xE8, i);
3121 }
3122
3123 void Assembler::fsubp(int i) {
3124 emit_farith(0xDE, 0xE8, i); // ST(0) <- ST(0) - ST(1) and pop (Intel manual wrong)
3125 }
3126
3127 void Assembler::fsubr(int i) {
3128 emit_farith(0xD8, 0xE8, i);
3129 }
3130
3131 void Assembler::fsubr_d(Address src) {
3132 InstructionMark im(this);
3133 emit_byte(0xDC);
3134 emit_operand32(rbp, src);
3135 }
3136
3137 void Assembler::fsubr_s(Address src) {
3138 InstructionMark im(this);
3139 emit_byte(0xD8);
3140 emit_operand32(rbp, src);
3141 }
3142
3143 void Assembler::fsubra(int i) {
3144 emit_farith(0xDC, 0xE0, i);
3145 }
3146
3147 void Assembler::fsubrp(int i) {
3148 emit_farith(0xDE, 0xE0, i); // ST(0) <- ST(1) - ST(0) and pop (Intel manual wrong)
3149 }
3150
3151 void Assembler::ftan() {
3152 emit_byte(0xD9);
3153 emit_byte(0xF2);
3154 emit_byte(0xDD);
3155 emit_byte(0xD8);
3156 }
3157
3158 void Assembler::ftst() {
3159 emit_byte(0xD9);
3160 emit_byte(0xE4);
3161 }
3162
3163 void Assembler::fucomi(int i) {
3164 // make sure the instruction is supported (introduced for P6, together with cmov)
3165 guarantee(VM_Version::supports_cmov(), "illegal instruction");
3166 emit_farith(0xDB, 0xE8, i);
3167 }
3168
3169 void Assembler::fucomip(int i) {
3170 // make sure the instruction is supported (introduced for P6, together with cmov)
3171 guarantee(VM_Version::supports_cmov(), "illegal instruction");
3172 emit_farith(0xDF, 0xE8, i);
3173 }
3174
3175 void Assembler::fwait() {
3176 emit_byte(0x9B);
3177 }
3178
3179 void Assembler::fxch(int i) {
3180 emit_farith(0xD9, 0xC8, i);
3181 }
3182
3183 void Assembler::fyl2x() {
3184 emit_byte(0xD9);
3185 emit_byte(0xF1);
3186 }
3187
3188 void Assembler::mov_literal32(Register dst, int32_t imm32, RelocationHolder const& rspec, int format) {
3189 InstructionMark im(this);
3190 int encode = prefix_and_encode(dst->encoding());
3191 emit_byte(0xB8 | encode);
3192 emit_data((int)imm32, rspec, format);
3193 }
3194
3195 #ifndef _LP64
3196
3197 void Assembler::incl(Register dst) {
3198 // Don't use it directly. Use MacroAssembler::incrementl() instead.
3199 emit_byte(0x40 | dst->encoding());
3200 }
3201
3202 void Assembler::lea(Register dst, Address src) {
3203 leal(dst, src);
3204 }
3205
3206 void Assembler::mov_literal32(Address dst, int32_t imm32, RelocationHolder const& rspec) {
3207 InstructionMark im(this);
3208 emit_byte(0xC7);
3209 emit_operand(rax, dst);
3210 emit_data((int)imm32, rspec, 0);
3211 }
3212
3213
3214 void Assembler::popa() { // 32bit
3215 emit_byte(0x61);
3216 }
3217
3218 void Assembler::push_literal32(int32_t imm32, RelocationHolder const& rspec) {
3219 InstructionMark im(this);
3220 emit_byte(0x68);
3221 emit_data(imm32, rspec, 0);
3222 }
3223
3224 void Assembler::pusha() { // 32bit
3225 emit_byte(0x60);
3226 }
3227
3228 void Assembler::set_byte_if_not_zero(Register dst) {
3229 emit_byte(0x0F);
3230 emit_byte(0x95);
3231 emit_byte(0xE0 | dst->encoding());
3232 }
3233
3234 void Assembler::shldl(Register dst, Register src) {
3235 emit_byte(0x0F);
3236 emit_byte(0xA5);
3237 emit_byte(0xC0 | src->encoding() << 3 | dst->encoding());
3238 }
3239
3240 void Assembler::shrdl(Register dst, Register src) {
3241 emit_byte(0x0F);
3242 emit_byte(0xAD);
3243 emit_byte(0xC0 | src->encoding() << 3 | dst->encoding());
3244 }
3245
3246 #else // LP64
3247
3248 // 64bit only pieces of the assembler
3249 // This should only be used by 64bit instructions that can use rip-relative
3250 // it cannot be used by instructions that want an immediate value.
3251
3252 bool Assembler::reachable(AddressLiteral adr) {
3253 int64_t disp;
3254 // None will force a 64bit literal to the code stream. Likely a placeholder
3255 // for something that will be patched later and we need to certain it will
3256 // always be reachable.
3257 if (adr.reloc() == relocInfo::none) {
3258 return false;
3259 }
3260 if (adr.reloc() == relocInfo::internal_word_type) {
3261 // This should be rip relative and easily reachable.
3262 return true;
3263 }
3264 if (adr.reloc() == relocInfo::virtual_call_type ||
3265 adr.reloc() == relocInfo::opt_virtual_call_type ||
3266 adr.reloc() == relocInfo::static_call_type ||
3267 adr.reloc() == relocInfo::static_stub_type ) {
3268 // This should be rip relative within the code cache and easily
3269 // reachable until we get huge code caches. (At which point
3270 // ic code is going to have issues).
3271 return true;
3272 }
3273 if (adr.reloc() != relocInfo::external_word_type &&
3274 adr.reloc() != relocInfo::poll_return_type && // these are really external_word but need special
3275 adr.reloc() != relocInfo::poll_type && // relocs to identify them
3276 adr.reloc() != relocInfo::runtime_call_type ) {
3277 return false;
3278 }
3279
3280 // Stress the correction code
3281 if (ForceUnreachable) {
3282 // Must be runtimecall reloc, see if it is in the codecache
3283 // Flipping stuff in the codecache to be unreachable causes issues
3284 // with things like inline caches where the additional instructions
3285 // are not handled.
3286 if (CodeCache::find_blob(adr._target) == NULL) {
3287 return false;
3288 }
3289 }
3290 // For external_word_type/runtime_call_type if it is reachable from where we
3291 // are now (possibly a temp buffer) and where we might end up
3292 // anywhere in the codeCache then we are always reachable.
3293 // This would have to change if we ever save/restore shared code
3294 // to be more pessimistic.
3295
3296 disp = (int64_t)adr._target - ((int64_t)CodeCache::low_bound() + sizeof(int));
3297 if (!is_simm32(disp)) return false;
3298 disp = (int64_t)adr._target - ((int64_t)CodeCache::high_bound() + sizeof(int));
3299 if (!is_simm32(disp)) return false;
3300
3301 disp = (int64_t)adr._target - ((int64_t)_code_pos + sizeof(int));
3302
3303 // Because rip relative is a disp + address_of_next_instruction and we
3304 // don't know the value of address_of_next_instruction we apply a fudge factor
3305 // to make sure we will be ok no matter the size of the instruction we get placed into.
3306 // We don't have to fudge the checks above here because they are already worst case.
3307
3308 // 12 == override/rex byte, opcode byte, rm byte, sib byte, a 4-byte disp , 4-byte literal
3309 // + 4 because better safe than sorry.
3310 const int fudge = 12 + 4;
3311 if (disp < 0) {
3312 disp -= fudge;
3313 } else {
3314 disp += fudge;
3315 }
3316 return is_simm32(disp);
3317 }
3318
3319 void Assembler::emit_data64(jlong data,
3320 relocInfo::relocType rtype,
3321 int format) {
3322 if (rtype == relocInfo::none) {
3323 emit_long64(data);
3324 } else {
3325 emit_data64(data, Relocation::spec_simple(rtype), format);
3326 }
3327 }
3328
3329 void Assembler::emit_data64(jlong data,
3330 RelocationHolder const& rspec,
3331 int format) {
3332 assert(imm_operand == 0, "default format must be immediate in this file");
3333 assert(imm_operand == format, "must be immediate");
3334 assert(inst_mark() != NULL, "must be inside InstructionMark");
3335 // Do not use AbstractAssembler::relocate, which is not intended for
3336 // embedded words. Instead, relocate to the enclosing instruction.
3337 code_section()->relocate(inst_mark(), rspec, format);
3338 #ifdef ASSERT
3339 check_relocation(rspec, format);
3340 #endif
3341 emit_long64(data);
3342 }
3343
3344 int Assembler::prefix_and_encode(int reg_enc, bool byteinst) {
3345 if (reg_enc >= 8) {
3346 prefix(REX_B);
3347 reg_enc -= 8;
3348 } else if (byteinst && reg_enc >= 4) {
3349 prefix(REX);
3350 }
3351 return reg_enc;
3352 }
3353
3354 int Assembler::prefixq_and_encode(int reg_enc) {
3355 if (reg_enc < 8) {
3356 prefix(REX_W);
3357 } else {
3358 prefix(REX_WB);
3359 reg_enc -= 8;
3360 }
3361 return reg_enc;
3362 }
3363
3364 int Assembler::prefix_and_encode(int dst_enc, int src_enc, bool byteinst) {
3365 if (dst_enc < 8) {
3366 if (src_enc >= 8) {
3367 prefix(REX_B);
3368 src_enc -= 8;
3369 } else if (byteinst && src_enc >= 4) {
3370 prefix(REX);
3371 }
3372 } else {
3373 if (src_enc < 8) {
3374 prefix(REX_R);
3375 } else {
3376 prefix(REX_RB);
3377 src_enc -= 8;
3378 }
3379 dst_enc -= 8;
3380 }
3381 return dst_enc << 3 | src_enc;
3382 }
3383
3384 int Assembler::prefixq_and_encode(int dst_enc, int src_enc) {
3385 if (dst_enc < 8) {
3386 if (src_enc < 8) {
3387 prefix(REX_W);
3388 } else {
3389 prefix(REX_WB);
3390 src_enc -= 8;
3391 }
3392 } else {
3393 if (src_enc < 8) {
3394 prefix(REX_WR);
3395 } else {
3396 prefix(REX_WRB);
3397 src_enc -= 8;
3398 }
3399 dst_enc -= 8;
3400 }
3401 return dst_enc << 3 | src_enc;
3402 }
3403
3404 void Assembler::prefix(Register reg) {
3405 if (reg->encoding() >= 8) {
3406 prefix(REX_B);
3407 }
3408 }
3409
3410 void Assembler::prefix(Address adr) {
3411 if (adr.base_needs_rex()) {
3412 if (adr.index_needs_rex()) {
3413 prefix(REX_XB);
3414 } else {
3415 prefix(REX_B);
3416 }
3417 } else {
3418 if (adr.index_needs_rex()) {
3419 prefix(REX_X);
3420 }
3421 }
3422 }
3423
3424 void Assembler::prefixq(Address adr) {
3425 if (adr.base_needs_rex()) {
3426 if (adr.index_needs_rex()) {
3427 prefix(REX_WXB);
3428 } else {
3429 prefix(REX_WB);
3430 }
3431 } else {
3432 if (adr.index_needs_rex()) {
3433 prefix(REX_WX);
3434 } else {
3435 prefix(REX_W);
3436 }
3437 }
3438 }
3439
3440
3441 void Assembler::prefix(Address adr, Register reg, bool byteinst) {
3442 if (reg->encoding() < 8) {
3443 if (adr.base_needs_rex()) {
3444 if (adr.index_needs_rex()) {
3445 prefix(REX_XB);
3446 } else {
3447 prefix(REX_B);
3448 }
3449 } else {
3450 if (adr.index_needs_rex()) {
3451 prefix(REX_X);
3452 } else if (reg->encoding() >= 4 ) {
3453 prefix(REX);
3454 }
3455 }
3456 } else {
3457 if (adr.base_needs_rex()) {
3458 if (adr.index_needs_rex()) {
3459 prefix(REX_RXB);
3460 } else {
3461 prefix(REX_RB);
3462 }
3463 } else {
3464 if (adr.index_needs_rex()) {
3465 prefix(REX_RX);
3466 } else {
3467 prefix(REX_R);
3468 }
3469 }
3470 }
3471 }
3472
3473 void Assembler::prefixq(Address adr, Register src) {
3474 if (src->encoding() < 8) {
3475 if (adr.base_needs_rex()) {
3476 if (adr.index_needs_rex()) {
3477 prefix(REX_WXB);
3478 } else {
3479 prefix(REX_WB);
3480 }
3481 } else {
3482 if (adr.index_needs_rex()) {
3483 prefix(REX_WX);
3484 } else {
3485 prefix(REX_W);
3486 }
3487 }
3488 } else {
3489 if (adr.base_needs_rex()) {
3490 if (adr.index_needs_rex()) {
3491 prefix(REX_WRXB);
3492 } else {
3493 prefix(REX_WRB);
3494 }
3495 } else {
3496 if (adr.index_needs_rex()) {
3497 prefix(REX_WRX);
3498 } else {
3499 prefix(REX_WR);
3500 }
3501 }
3502 }
3503 }
3504
3505 void Assembler::prefix(Address adr, XMMRegister reg) {
3506 if (reg->encoding() < 8) {
3507 if (adr.base_needs_rex()) {
3508 if (adr.index_needs_rex()) {
3509 prefix(REX_XB);
3510 } else {
3511 prefix(REX_B);
3512 }
3513 } else {
3514 if (adr.index_needs_rex()) {
3515 prefix(REX_X);
3516 }
3517 }
3518 } else {
3519 if (adr.base_needs_rex()) {
3520 if (adr.index_needs_rex()) {
3521 prefix(REX_RXB);
3522 } else {
3523 prefix(REX_RB);
3524 }
3525 } else {
3526 if (adr.index_needs_rex()) {
3527 prefix(REX_RX);
3528 } else {
3529 prefix(REX_R);
3530 }
3531 }
3532 }
3533 }
3534
3535 void Assembler::adcq(Register dst, int32_t imm32) {
3536 (void) prefixq_and_encode(dst->encoding());
3537 emit_arith(0x81, 0xD0, dst, imm32);
3538 }
3539
3540 void Assembler::adcq(Register dst, Address src) {
3541 InstructionMark im(this);
3542 prefixq(src, dst);
3543 emit_byte(0x13);
3544 emit_operand(dst, src);
3545 }
3546
3547 void Assembler::adcq(Register dst, Register src) {
3548 (int) prefixq_and_encode(dst->encoding(), src->encoding());
3549 emit_arith(0x13, 0xC0, dst, src);
3550 }
3551
3552 void Assembler::addq(Address dst, int32_t imm32) {
3553 InstructionMark im(this);
3554 prefixq(dst);
3555 emit_arith_operand(0x81, rax, dst,imm32);
3556 }
3557
3558 void Assembler::addq(Address dst, Register src) {
3559 InstructionMark im(this);
3560 prefixq(dst, src);
3561 emit_byte(0x01);
3562 emit_operand(src, dst);
3563 }
3564
3565 void Assembler::addq(Register dst, int32_t imm32) {
3566 (void) prefixq_and_encode(dst->encoding());
3567 emit_arith(0x81, 0xC0, dst, imm32);
3568 }
3569
3570 void Assembler::addq(Register dst, Address src) {
3571 InstructionMark im(this);
3572 prefixq(src, dst);
3573 emit_byte(0x03);
3574 emit_operand(dst, src);
3575 }
3576
3577 void Assembler::addq(Register dst, Register src) {
3578 (void) prefixq_and_encode(dst->encoding(), src->encoding());
3579 emit_arith(0x03, 0xC0, dst, src);
3580 }
3581
3582 void Assembler::andq(Register dst, int32_t imm32) {
3583 (void) prefixq_and_encode(dst->encoding());
3584 emit_arith(0x81, 0xE0, dst, imm32);
3585 }
3586
3587 void Assembler::andq(Register dst, Address src) {
3588 InstructionMark im(this);
3589 prefixq(src, dst);
3590 emit_byte(0x23);
3591 emit_operand(dst, src);
3592 }
3593
3594 void Assembler::andq(Register dst, Register src) {
3595 (int) prefixq_and_encode(dst->encoding(), src->encoding());
3596 emit_arith(0x23, 0xC0, dst, src);
3597 }
3598
3599 void Assembler::bswapq(Register reg) {
3600 int encode = prefixq_and_encode(reg->encoding());
3601 emit_byte(0x0F);
3602 emit_byte(0xC8 | encode);
3603 }
3604
3605 void Assembler::cdqq() {
3606 prefix(REX_W);
3607 emit_byte(0x99);
3608 }
3609
3610 void Assembler::clflush(Address adr) {
3611 prefix(adr);
3612 emit_byte(0x0F);
3613 emit_byte(0xAE);
3614 emit_operand(rdi, adr);
3615 }
3616
3617 void Assembler::cmovq(Condition cc, Register dst, Register src) {
3618 int encode = prefixq_and_encode(dst->encoding(), src->encoding());
3619 emit_byte(0x0F);
3620 emit_byte(0x40 | cc);
3621 emit_byte(0xC0 | encode);
3622 }
3623
3624 void Assembler::cmovq(Condition cc, Register dst, Address src) {
3625 InstructionMark im(this);
3626 prefixq(src, dst);
3627 emit_byte(0x0F);
3628 emit_byte(0x40 | cc);
3629 emit_operand(dst, src);
3630 }
3631
3632 void Assembler::cmpq(Address dst, int32_t imm32) {
3633 InstructionMark im(this);
3634 prefixq(dst);
3635 emit_byte(0x81);
3636 emit_operand(rdi, dst, 4);
3637 emit_long(imm32);
3638 }
3639
3640 void Assembler::cmpq(Register dst, int32_t imm32) {
3641 (void) prefixq_and_encode(dst->encoding());
3642 emit_arith(0x81, 0xF8, dst, imm32);
3643 }
3644
3645 void Assembler::cmpq(Address dst, Register src) {
3646 InstructionMark im(this);
3647 prefixq(dst, src);
3648 emit_byte(0x3B);
3649 emit_operand(src, dst);
3650 }
3651
3652 void Assembler::cmpq(Register dst, Register src) {
3653 (void) prefixq_and_encode(dst->encoding(), src->encoding());
3654 emit_arith(0x3B, 0xC0, dst, src);
3655 }
3656
3657 void Assembler::cmpq(Register dst, Address src) {
3658 InstructionMark im(this);
3659 prefixq(src, dst);
3660 emit_byte(0x3B);
3661 emit_operand(dst, src);
3662 }
3663
3664 void Assembler::cmpxchgq(Register reg, Address adr) {
3665 InstructionMark im(this);
3666 prefixq(adr, reg);
3667 emit_byte(0x0F);
3668 emit_byte(0xB1);
3669 emit_operand(reg, adr);
3670 }
3671
3672 void Assembler::cvtsi2sdq(XMMRegister dst, Register src) {
3673 NOT_LP64(assert(VM_Version::supports_sse2(), ""));
3674 emit_byte(0xF2);
3675 int encode = prefixq_and_encode(dst->encoding(), src->encoding());
3676 emit_byte(0x0F);
3677 emit_byte(0x2A);
3678 emit_byte(0xC0 | encode);
3679 }
3680
3681 void Assembler::cvtsi2ssq(XMMRegister dst, Register src) {
3682 NOT_LP64(assert(VM_Version::supports_sse(), ""));
3683 emit_byte(0xF3);
3684 int encode = prefixq_and_encode(dst->encoding(), src->encoding());
3685 emit_byte(0x0F);
3686 emit_byte(0x2A);
3687 emit_byte(0xC0 | encode);
3688 }
3689
3690 void Assembler::cvttsd2siq(Register dst, XMMRegister src) {
3691 NOT_LP64(assert(VM_Version::supports_sse2(), ""));
3692 emit_byte(0xF2);
3693 int encode = prefixq_and_encode(dst->encoding(), src->encoding());
3694 emit_byte(0x0F);
3695 emit_byte(0x2C);
3696 emit_byte(0xC0 | encode);
3697 }
3698
3699 void Assembler::cvttss2siq(Register dst, XMMRegister src) {
3700 NOT_LP64(assert(VM_Version::supports_sse(), ""));
3701 emit_byte(0xF3);
3702 int encode = prefixq_and_encode(dst->encoding(), src->encoding());
3703 emit_byte(0x0F);
3704 emit_byte(0x2C);
3705 emit_byte(0xC0 | encode);
3706 }
3707
3708 void Assembler::decl(Register dst) {
3709 // Don't use it directly. Use MacroAssembler::decrementl() instead.
3710 // Use two-byte form (one-byte form is a REX prefix in 64-bit mode)
3711 int encode = prefix_and_encode(dst->encoding());
3712 emit_byte(0xFF);
3713 emit_byte(0xC8 | encode);
3714 }
3715
3716 void Assembler::decq(Register dst) {
3717 // Don't use it directly. Use MacroAssembler::decrementq() instead.
3718 // Use two-byte form (one-byte from is a REX prefix in 64-bit mode)
3719 int encode = prefixq_and_encode(dst->encoding());
3720 emit_byte(0xFF);
3721 emit_byte(0xC8 | encode);
3722 }
3723
3724 void Assembler::decq(Address dst) {
3725 // Don't use it directly. Use MacroAssembler::decrementq() instead.
3726 InstructionMark im(this);
3727 prefixq(dst);
3728 emit_byte(0xFF);
3729 emit_operand(rcx, dst);
3730 }
3731
3732 void Assembler::fxrstor(Address src) {
3733 prefixq(src);
3734 emit_byte(0x0F);
3735 emit_byte(0xAE);
3736 emit_operand(as_Register(1), src);
3737 }
3738
3739 void Assembler::fxsave(Address dst) {
3740 prefixq(dst);
3741 emit_byte(0x0F);
3742 emit_byte(0xAE);
3743 emit_operand(as_Register(0), dst);
3744 }
3745
3746 void Assembler::idivq(Register src) {
3747 int encode = prefixq_and_encode(src->encoding());
3748 emit_byte(0xF7);
3749 emit_byte(0xF8 | encode);
3750 }
3751
3752 void Assembler::imulq(Register dst, Register src) {
3753 int encode = prefixq_and_encode(dst->encoding(), src->encoding());
3754 emit_byte(0x0F);
3755 emit_byte(0xAF);
3756 emit_byte(0xC0 | encode);
3757 }
3758
3759 void Assembler::imulq(Register dst, Register src, int value) {
3760 int encode = prefixq_and_encode(dst->encoding(), src->encoding());
3761 if (is8bit(value)) {
3762 emit_byte(0x6B);
3763 emit_byte(0xC0 | encode);
3764 emit_byte(value);
3765 } else {
3766 emit_byte(0x69);
3767 emit_byte(0xC0 | encode);
3768 emit_long(value);
3769 }
3770 }
3771
3772 void Assembler::incl(Register dst) {
3773 // Don't use it directly. Use MacroAssembler::incrementl() instead.
3774 // Use two-byte form (one-byte from is a REX prefix in 64-bit mode)
3775 int encode = prefix_and_encode(dst->encoding());
3776 emit_byte(0xFF);
3777 emit_byte(0xC0 | encode);
3778 }
3779
3780 void Assembler::incq(Register dst) {
3781 // Don't use it directly. Use MacroAssembler::incrementq() instead.
3782 // Use two-byte form (one-byte from is a REX prefix in 64-bit mode)
3783 int encode = prefixq_and_encode(dst->encoding());
3784 emit_byte(0xFF);
3785 emit_byte(0xC0 | encode);
3786 }
3787
3788 void Assembler::incq(Address dst) {
3789 // Don't use it directly. Use MacroAssembler::incrementq() instead.
3790 InstructionMark im(this);
3791 prefixq(dst);
3792 emit_byte(0xFF);
3793 emit_operand(rax, dst);
3794 }
3795
3796 void Assembler::lea(Register dst, Address src) {
3797 leaq(dst, src);
3798 }
3799
3800 void Assembler::leaq(Register dst, Address src) {
3801 InstructionMark im(this);
3802 prefixq(src, dst);
3803 emit_byte(0x8D);
3804 emit_operand(dst, src);
3805 }
3806
3807 void Assembler::mov64(Register dst, int64_t imm64) {
3808 InstructionMark im(this);
3809 int encode = prefixq_and_encode(dst->encoding());
3810 emit_byte(0xB8 | encode);
3811 emit_long64(imm64);
3812 }
3813
3814 void Assembler::mov_literal64(Register dst, intptr_t imm64, RelocationHolder const& rspec) {
3815 InstructionMark im(this);
3816 int encode = prefixq_and_encode(dst->encoding());
3817 emit_byte(0xB8 | encode);
3818 emit_data64(imm64, rspec);
3819 }
3820
3821 void Assembler::movdq(XMMRegister dst, Register src) {
3822 // table D-1 says MMX/SSE2
3823 NOT_LP64(assert(VM_Version::supports_sse2() || VM_Version::supports_mmx(), ""));
3824 emit_byte(0x66);
3825 int encode = prefixq_and_encode(dst->encoding(), src->encoding());
3826 emit_byte(0x0F);
3827 emit_byte(0x6E);
3828 emit_byte(0xC0 | encode);
3829 }
3830
3831 void Assembler::movdq(Register dst, XMMRegister src) {
3832 // table D-1 says MMX/SSE2
3833 NOT_LP64(assert(VM_Version::supports_sse2() || VM_Version::supports_mmx(), ""));
3834 emit_byte(0x66);
3835 // swap src/dst to get correct prefix
3836 int encode = prefixq_and_encode(src->encoding(), dst->encoding());
3837 emit_byte(0x0F);
3838 emit_byte(0x7E);
3839 emit_byte(0xC0 | encode);
3840 }
3841
3842 void Assembler::movq(Register dst, Register src) {
3843 int encode = prefixq_and_encode(dst->encoding(), src->encoding());
3844 emit_byte(0x8B);
3845 emit_byte(0xC0 | encode);
3846 }
3847
3848 void Assembler::movq(Register dst, Address src) {
3849 InstructionMark im(this);
3850 prefixq(src, dst);
3851 emit_byte(0x8B);
3852 emit_operand(dst, src);
3853 }
3854
3855 void Assembler::movq(Address dst, Register src) {
3856 InstructionMark im(this);
3857 prefixq(dst, src);
3858 emit_byte(0x89);
3859 emit_operand(src, dst);
3860 }
3861
3862 void Assembler::movslq(Register dst, int32_t imm32) {
3863 // dbx shows movslq(rcx, 3) as movq $0x0000000049000000,(%rbx)
3864 // and movslq(r8, 3); as movl $0x0000000048000000,(%rbx)
3865 // as a result we shouldn't use until tested at runtime...
3866 ShouldNotReachHere();
3867 InstructionMark im(this);
3868 int encode = prefixq_and_encode(dst->encoding());
3869 emit_byte(0xC7 | encode);
3870 emit_long(imm32);
3871 }
3872
3873 void Assembler::movslq(Address dst, int32_t imm32) {
3874 assert(is_simm32(imm32), "lost bits");
3875 InstructionMark im(this);
3876 prefixq(dst);
3877 emit_byte(0xC7);
3878 emit_operand(rax, dst, 4);
3879 emit_long(imm32);
3880 }
3881
3882 void Assembler::movslq(Register dst, Address src) {
3883 InstructionMark im(this);
3884 prefixq(src, dst);
3885 emit_byte(0x63);
3886 emit_operand(dst, src);
3887 }
3888
3889 void Assembler::movslq(Register dst, Register src) {
3890 int encode = prefixq_and_encode(dst->encoding(), src->encoding());
3891 emit_byte(0x63);
3892 emit_byte(0xC0 | encode);
3893 }
3894
3895 void Assembler::negq(Register dst) {
3896 int encode = prefixq_and_encode(dst->encoding());
3897 emit_byte(0xF7);
3898 emit_byte(0xD8 | encode);
3899 }
3900
3901 void Assembler::notq(Register dst) {
3902 int encode = prefixq_and_encode(dst->encoding());
3903 emit_byte(0xF7);
3904 emit_byte(0xD0 | encode);
3905 }
3906
3907 void Assembler::orq(Address dst, int32_t imm32) {
3908 InstructionMark im(this);
3909 prefixq(dst);
3910 emit_byte(0x81);
3911 emit_operand(rcx, dst, 4);
3912 emit_long(imm32);
3913 }
3914
3915 void Assembler::orq(Register dst, int32_t imm32) {
3916 (void) prefixq_and_encode(dst->encoding());
3917 emit_arith(0x81, 0xC8, dst, imm32);
3918 }
3919
3920 void Assembler::orq(Register dst, Address src) {
3921 InstructionMark im(this);
3922 prefixq(src, dst);
3923 emit_byte(0x0B);
3924 emit_operand(dst, src);
3925 }
3926
3927 void Assembler::orq(Register dst, Register src) {
3928 (void) prefixq_and_encode(dst->encoding(), src->encoding());
3929 emit_arith(0x0B, 0xC0, dst, src);
3930 }
3931
3932 void Assembler::popa() { // 64bit
3933 movq(r15, Address(rsp, 0));
3934 movq(r14, Address(rsp, wordSize));
3935 movq(r13, Address(rsp, 2 * wordSize));
3936 movq(r12, Address(rsp, 3 * wordSize));
3937 movq(r11, Address(rsp, 4 * wordSize));
3938 movq(r10, Address(rsp, 5 * wordSize));
3939 movq(r9, Address(rsp, 6 * wordSize));
3940 movq(r8, Address(rsp, 7 * wordSize));
3941 movq(rdi, Address(rsp, 8 * wordSize));
3942 movq(rsi, Address(rsp, 9 * wordSize));
3943 movq(rbp, Address(rsp, 10 * wordSize));
3944 // skip rsp
3945 movq(rbx, Address(rsp, 12 * wordSize));
3946 movq(rdx, Address(rsp, 13 * wordSize));
3947 movq(rcx, Address(rsp, 14 * wordSize));
3948 movq(rax, Address(rsp, 15 * wordSize));
3949
3950 addq(rsp, 16 * wordSize);
3951 }
3952
3953 void Assembler::popq(Address dst) {
3954 InstructionMark im(this);
3955 prefixq(dst);
3956 emit_byte(0x8F);
3957 emit_operand(rax, dst);
3958 }
3959
3960 void Assembler::pusha() { // 64bit
3961 // we have to store original rsp. ABI says that 128 bytes
3962 // below rsp are local scratch.
3963 movq(Address(rsp, -5 * wordSize), rsp);
3964
3965 subq(rsp, 16 * wordSize);
3966
3967 movq(Address(rsp, 15 * wordSize), rax);
3968 movq(Address(rsp, 14 * wordSize), rcx);
3969 movq(Address(rsp, 13 * wordSize), rdx);
3970 movq(Address(rsp, 12 * wordSize), rbx);
3971 // skip rsp
3972 movq(Address(rsp, 10 * wordSize), rbp);
3973 movq(Address(rsp, 9 * wordSize), rsi);
3974 movq(Address(rsp, 8 * wordSize), rdi);
3975 movq(Address(rsp, 7 * wordSize), r8);
3976 movq(Address(rsp, 6 * wordSize), r9);
3977 movq(Address(rsp, 5 * wordSize), r10);
3978 movq(Address(rsp, 4 * wordSize), r11);
3979 movq(Address(rsp, 3 * wordSize), r12);
3980 movq(Address(rsp, 2 * wordSize), r13);
3981 movq(Address(rsp, wordSize), r14);
3982 movq(Address(rsp, 0), r15);
3983 }
3984
3985 void Assembler::pushq(Address src) {
3986 InstructionMark im(this);
3987 prefixq(src);
3988 emit_byte(0xFF);
3989 emit_operand(rsi, src);
3990 }
3991
3992 void Assembler::rclq(Register dst, int imm8) {
3993 assert(isShiftCount(imm8 >> 1), "illegal shift count");
3994 int encode = prefixq_and_encode(dst->encoding());
3995 if (imm8 == 1) {
3996 emit_byte(0xD1);
3997 emit_byte(0xD0 | encode);
3998 } else {
3999 emit_byte(0xC1);
4000 emit_byte(0xD0 | encode);
4001 emit_byte(imm8);
4002 }
4003 }
4004 void Assembler::sarq(Register dst, int imm8) {
4005 assert(isShiftCount(imm8 >> 1), "illegal shift count");
4006 int encode = prefixq_and_encode(dst->encoding());
4007 if (imm8 == 1) {
4008 emit_byte(0xD1);
4009 emit_byte(0xF8 | encode);
4010 } else {
4011 emit_byte(0xC1);
4012 emit_byte(0xF8 | encode);
4013 emit_byte(imm8);
4014 }
4015 }
4016
4017 void Assembler::sarq(Register dst) {
4018 int encode = prefixq_and_encode(dst->encoding());
4019 emit_byte(0xD3);
4020 emit_byte(0xF8 | encode);
4021 }
4022 void Assembler::sbbq(Address dst, int32_t imm32) {
4023 InstructionMark im(this);
4024 prefixq(dst);
4025 emit_arith_operand(0x81, rbx, dst, imm32);
4026 }
4027
4028 void Assembler::sbbq(Register dst, int32_t imm32) {
4029 (void) prefixq_and_encode(dst->encoding());
4030 emit_arith(0x81, 0xD8, dst, imm32);
4031 }
4032
4033 void Assembler::sbbq(Register dst, Address src) {
4034 InstructionMark im(this);
4035 prefixq(src, dst);
4036 emit_byte(0x1B);
4037 emit_operand(dst, src);
4038 }
4039
4040 void Assembler::sbbq(Register dst, Register src) {
4041 (void) prefixq_and_encode(dst->encoding(), src->encoding());
4042 emit_arith(0x1B, 0xC0, dst, src);
4043 }
4044
4045 void Assembler::shlq(Register dst, int imm8) {
4046 assert(isShiftCount(imm8 >> 1), "illegal shift count");
4047 int encode = prefixq_and_encode(dst->encoding());
4048 if (imm8 == 1) {
4049 emit_byte(0xD1);
4050 emit_byte(0xE0 | encode);
4051 } else {
4052 emit_byte(0xC1);
4053 emit_byte(0xE0 | encode);
4054 emit_byte(imm8);
4055 }
4056 }
4057
4058 void Assembler::shlq(Register dst) {
4059 int encode = prefixq_and_encode(dst->encoding());
4060 emit_byte(0xD3);
4061 emit_byte(0xE0 | encode);
4062 }
4063
4064 void Assembler::shrq(Register dst, int imm8) {
4065 assert(isShiftCount(imm8 >> 1), "illegal shift count");
4066 int encode = prefixq_and_encode(dst->encoding());
4067 emit_byte(0xC1);
4068 emit_byte(0xE8 | encode);
4069 emit_byte(imm8);
4070 }
4071
4072 void Assembler::shrq(Register dst) {
4073 int encode = prefixq_and_encode(dst->encoding());
4074 emit_byte(0xD3);
4075 emit_byte(0xE8 | encode);
4076 }
4077
4078 void Assembler::sqrtsd(XMMRegister dst, Address src) {
4079 NOT_LP64(assert(VM_Version::supports_sse2(), ""));
4080 InstructionMark im(this);
4081 emit_byte(0xF2);
4082 prefix(src, dst);
4083 emit_byte(0x0F);
4084 emit_byte(0x51);
4085 emit_operand(dst, src);
4086 }
4087
4088 void Assembler::subq(Address dst, int32_t imm32) {
4089 InstructionMark im(this);
4090 prefixq(dst);
4091 if (is8bit(imm32)) {
4092 emit_byte(0x83);
4093 emit_operand(rbp, dst, 1);
4094 emit_byte(imm32 & 0xFF);
4095 } else {
4096 emit_byte(0x81);
4097 emit_operand(rbp, dst, 4);
4098 emit_long(imm32);
4099 }
4100 }
4101
4102 void Assembler::subq(Register dst, int32_t imm32) {
4103 (void) prefixq_and_encode(dst->encoding());
4104 emit_arith(0x81, 0xE8, dst, imm32);
4105 }
4106
4107 void Assembler::subq(Address dst, Register src) {
4108 InstructionMark im(this);
4109 prefixq(dst, src);
4110 emit_byte(0x29);
4111 emit_operand(src, dst);
4112 }
4113
4114 void Assembler::subq(Register dst, Address src) {
4115 InstructionMark im(this);
4116 prefixq(src, dst);
4117 emit_byte(0x2B);
4118 emit_operand(dst, src);
4119 }
4120
4121 void Assembler::subq(Register dst, Register src) {
4122 (void) prefixq_and_encode(dst->encoding(), src->encoding());
4123 emit_arith(0x2B, 0xC0, dst, src);
4124 }
4125
4126 void Assembler::testq(Register dst, int32_t imm32) {
4127 // not using emit_arith because test
4128 // doesn't support sign-extension of
4129 // 8bit operands
4130 int encode = dst->encoding();
4131 if (encode == 0) {
4132 prefix(REX_W);
4133 emit_byte(0xA9);
4134 } else {
4135 encode = prefixq_and_encode(encode);
4136 emit_byte(0xF7);
4137 emit_byte(0xC0 | encode);
4138 }
4139 emit_long(imm32);
4140 }
4141
4142 void Assembler::testq(Register dst, Register src) {
4143 (void) prefixq_and_encode(dst->encoding(), src->encoding());
4144 emit_arith(0x85, 0xC0, dst, src);
4145 }
4146
4147 void Assembler::xaddq(Address dst, Register src) {
4148 InstructionMark im(this);
4149 prefixq(dst, src);
4150 emit_byte(0x0F);
4151 emit_byte(0xC1);
4152 emit_operand(src, dst);
4153 }
4154
4155 void Assembler::xchgq(Register dst, Address src) {
4156 InstructionMark im(this);
4157 prefixq(src, dst);
4158 emit_byte(0x87);
4159 emit_operand(dst, src);
4160 }
4161
4162 void Assembler::xchgq(Register dst, Register src) {
4163 int encode = prefixq_and_encode(dst->encoding(), src->encoding());
4164 emit_byte(0x87);
4165 emit_byte(0xc0 | encode);
4166 }
4167
4168 void Assembler::xorq(Register dst, Register src) {
4169 (void) prefixq_and_encode(dst->encoding(), src->encoding());
4170 emit_arith(0x33, 0xC0, dst, src);
4171 }
4172
4173 void Assembler::xorq(Register dst, Address src) {
4174 InstructionMark im(this);
4175 prefixq(src, dst);
4176 emit_byte(0x33);
4177 emit_operand(dst, src);
4178 }
4179
4180 #endif // !LP64
4181
4182 static Assembler::Condition reverse[] = {
4183 Assembler::noOverflow /* overflow = 0x0 */ ,
4184 Assembler::overflow /* noOverflow = 0x1 */ ,
4185 Assembler::aboveEqual /* carrySet = 0x2, below = 0x2 */ ,
4186 Assembler::below /* aboveEqual = 0x3, carryClear = 0x3 */ ,
4187 Assembler::notZero /* zero = 0x4, equal = 0x4 */ ,
4188 Assembler::zero /* notZero = 0x5, notEqual = 0x5 */ ,
4189 Assembler::above /* belowEqual = 0x6 */ ,
4190 Assembler::belowEqual /* above = 0x7 */ ,
4191 Assembler::positive /* negative = 0x8 */ ,
4192 Assembler::negative /* positive = 0x9 */ ,
4193 Assembler::noParity /* parity = 0xa */ ,
4194 Assembler::parity /* noParity = 0xb */ ,
4195 Assembler::greaterEqual /* less = 0xc */ ,
4196 Assembler::less /* greaterEqual = 0xd */ ,
4197 Assembler::greater /* lessEqual = 0xe */ ,
4198 Assembler::lessEqual /* greater = 0xf, */
4199
4200 };
4201
4202
4203 // Implementation of MacroAssembler
4204
4205 // First all the versions that have distinct versions depending on 32/64 bit
4206 // Unless the difference is trivial (1 line or so).
4207
4208 #ifndef _LP64
4209
4210 // 32bit versions
4211
4212 Address MacroAssembler::as_Address(AddressLiteral adr) {
4213 return Address(adr.target(), adr.rspec());
4214 }
4215
4216 Address MacroAssembler::as_Address(ArrayAddress adr) {
4217 return Address::make_array(adr);
4218 }
4219
4220 int MacroAssembler::biased_locking_enter(Register lock_reg,
4221 Register obj_reg,
4222 Register swap_reg,
4223 Register tmp_reg,
4224 bool swap_reg_contains_mark,
4225 Label& done,
4226 Label* slow_case,
4227 BiasedLockingCounters* counters) {
4228 assert(UseBiasedLocking, "why call this otherwise?");
4229 assert(swap_reg == rax, "swap_reg must be rax, for cmpxchg");
4230 assert_different_registers(lock_reg, obj_reg, swap_reg);
4231
4232 if (PrintBiasedLockingStatistics && counters == NULL)
4233 counters = BiasedLocking::counters();
4234
4235 bool need_tmp_reg = false;
4236 if (tmp_reg == noreg) {
4237 need_tmp_reg = true;
4238 tmp_reg = lock_reg;
4239 } else {
4240 assert_different_registers(lock_reg, obj_reg, swap_reg, tmp_reg);
4241 }
4242 assert(markOopDesc::age_shift == markOopDesc::lock_bits + markOopDesc::biased_lock_bits, "biased locking makes assumptions about bit layout");
4243 Address mark_addr (obj_reg, oopDesc::mark_offset_in_bytes());
4244 Address klass_addr (obj_reg, oopDesc::klass_offset_in_bytes());
4245 Address saved_mark_addr(lock_reg, 0);
4246
4247 // Biased locking
4248 // See whether the lock is currently biased toward our thread and
4249 // whether the epoch is still valid
4250 // Note that the runtime guarantees sufficient alignment of JavaThread
4251 // pointers to allow age to be placed into low bits
4252 // First check to see whether biasing is even enabled for this object
4253 Label cas_label;
4254 int null_check_offset = -1;
4255 if (!swap_reg_contains_mark) {
4256 null_check_offset = offset();
4257 movl(swap_reg, mark_addr);
4258 }
4259 if (need_tmp_reg) {
4260 push(tmp_reg);
4261 }
4262 movl(tmp_reg, swap_reg);
4263 andl(tmp_reg, markOopDesc::biased_lock_mask_in_place);
4264 cmpl(tmp_reg, markOopDesc::biased_lock_pattern);
4265 if (need_tmp_reg) {
4266 pop(tmp_reg);
4267 }
4268 jcc(Assembler::notEqual, cas_label);
4269 // The bias pattern is present in the object's header. Need to check
4270 // whether the bias owner and the epoch are both still current.
4271 // Note that because there is no current thread register on x86 we
4272 // need to store off the mark word we read out of the object to
4273 // avoid reloading it and needing to recheck invariants below. This
4274 // store is unfortunate but it makes the overall code shorter and
4275 // simpler.
4276 movl(saved_mark_addr, swap_reg);
4277 if (need_tmp_reg) {
4278 push(tmp_reg);
4279 }
4280 get_thread(tmp_reg);
4281 xorl(swap_reg, tmp_reg);
4282 if (swap_reg_contains_mark) {
4283 null_check_offset = offset();
4284 }
4285 movl(tmp_reg, klass_addr);
4286 xorl(swap_reg, Address(tmp_reg, Klass::prototype_header_offset_in_bytes() + klassOopDesc::klass_part_offset_in_bytes()));
4287 andl(swap_reg, ~((int) markOopDesc::age_mask_in_place));
4288 if (need_tmp_reg) {
4289 pop(tmp_reg);
4290 }
4291 if (counters != NULL) {
4292 cond_inc32(Assembler::zero,
4293 ExternalAddress((address)counters->biased_lock_entry_count_addr()));
4294 }
4295 jcc(Assembler::equal, done);
4296
4297 Label try_revoke_bias;
4298 Label try_rebias;
4299
4300 // At this point we know that the header has the bias pattern and
4301 // that we are not the bias owner in the current epoch. We need to
4302 // figure out more details about the state of the header in order to
4303 // know what operations can be legally performed on the object's
4304 // header.
4305
4306 // If the low three bits in the xor result aren't clear, that means
4307 // the prototype header is no longer biased and we have to revoke
4308 // the bias on this object.
4309 testl(swap_reg, markOopDesc::biased_lock_mask_in_place);
4310 jcc(Assembler::notZero, try_revoke_bias);
4311
4312 // Biasing is still enabled for this data type. See whether the
4313 // epoch of the current bias is still valid, meaning that the epoch
4314 // bits of the mark word are equal to the epoch bits of the
4315 // prototype header. (Note that the prototype header's epoch bits
4316 // only change at a safepoint.) If not, attempt to rebias the object
4317 // toward the current thread. Note that we must be absolutely sure
4318 // that the current epoch is invalid in order to do this because
4319 // otherwise the manipulations it performs on the mark word are
4320 // illegal.
4321 testl(swap_reg, markOopDesc::epoch_mask_in_place);
4322 jcc(Assembler::notZero, try_rebias);
4323
4324 // The epoch of the current bias is still valid but we know nothing
4325 // about the owner; it might be set or it might be clear. Try to
4326 // acquire the bias of the object using an atomic operation. If this
4327 // fails we will go in to the runtime to revoke the object's bias.
4328 // Note that we first construct the presumed unbiased header so we
4329 // don't accidentally blow away another thread's valid bias.
4330 movl(swap_reg, saved_mark_addr);
4331 andl(swap_reg,
4332 markOopDesc::biased_lock_mask_in_place | markOopDesc::age_mask_in_place | markOopDesc::epoch_mask_in_place);
4333 if (need_tmp_reg) {
4334 push(tmp_reg);
4335 }
4336 get_thread(tmp_reg);
4337 orl(tmp_reg, swap_reg);
4338 if (os::is_MP()) {
4339 lock();
4340 }
4341 cmpxchgptr(tmp_reg, Address(obj_reg, 0));
4342 if (need_tmp_reg) {
4343 pop(tmp_reg);
4344 }
4345 // If the biasing toward our thread failed, this means that
4346 // another thread succeeded in biasing it toward itself and we
4347 // need to revoke that bias. The revocation will occur in the
4348 // interpreter runtime in the slow case.
4349 if (counters != NULL) {
4350 cond_inc32(Assembler::zero,
4351 ExternalAddress((address)counters->anonymously_biased_lock_entry_count_addr()));
4352 }
4353 if (slow_case != NULL) {
4354 jcc(Assembler::notZero, *slow_case);
4355 }
4356 jmp(done);
4357
4358 bind(try_rebias);
4359 // At this point we know the epoch has expired, meaning that the
4360 // current "bias owner", if any, is actually invalid. Under these
4361 // circumstances _only_, we are allowed to use the current header's
4362 // value as the comparison value when doing the cas to acquire the
4363 // bias in the current epoch. In other words, we allow transfer of
4364 // the bias from one thread to another directly in this situation.
4365 //
4366 // FIXME: due to a lack of registers we currently blow away the age
4367 // bits in this situation. Should attempt to preserve them.
4368 if (need_tmp_reg) {
4369 push(tmp_reg);
4370 }
4371 get_thread(tmp_reg);
4372 movl(swap_reg, klass_addr);
4373 orl(tmp_reg, Address(swap_reg, Klass::prototype_header_offset_in_bytes() + klassOopDesc::klass_part_offset_in_bytes()));
4374 movl(swap_reg, saved_mark_addr);
4375 if (os::is_MP()) {
4376 lock();
4377 }
4378 cmpxchgptr(tmp_reg, Address(obj_reg, 0));
4379 if (need_tmp_reg) {
4380 pop(tmp_reg);
4381 }
4382 // If the biasing toward our thread failed, then another thread
4383 // succeeded in biasing it toward itself and we need to revoke that
4384 // bias. The revocation will occur in the runtime in the slow case.
4385 if (counters != NULL) {
4386 cond_inc32(Assembler::zero,
4387 ExternalAddress((address)counters->rebiased_lock_entry_count_addr()));
4388 }
4389 if (slow_case != NULL) {
4390 jcc(Assembler::notZero, *slow_case);
4391 }
4392 jmp(done);
4393
4394 bind(try_revoke_bias);
4395 // The prototype mark in the klass doesn't have the bias bit set any
4396 // more, indicating that objects of this data type are not supposed
4397 // to be biased any more. We are going to try to reset the mark of
4398 // this object to the prototype value and fall through to the
4399 // CAS-based locking scheme. Note that if our CAS fails, it means
4400 // that another thread raced us for the privilege of revoking the
4401 // bias of this particular object, so it's okay to continue in the
4402 // normal locking code.
4403 //
4404 // FIXME: due to a lack of registers we currently blow away the age
4405 // bits in this situation. Should attempt to preserve them.
4406 movl(swap_reg, saved_mark_addr);
4407 if (need_tmp_reg) {
4408 push(tmp_reg);
4409 }
4410 movl(tmp_reg, klass_addr);
4411 movl(tmp_reg, Address(tmp_reg, Klass::prototype_header_offset_in_bytes() + klassOopDesc::klass_part_offset_in_bytes()));
4412 if (os::is_MP()) {
4413 lock();
4414 }
4415 cmpxchgptr(tmp_reg, Address(obj_reg, 0));
4416 if (need_tmp_reg) {
4417 pop(tmp_reg);
4418 }
4419 // Fall through to the normal CAS-based lock, because no matter what
4420 // the result of the above CAS, some thread must have succeeded in
4421 // removing the bias bit from the object's header.
4422 if (counters != NULL) {
4423 cond_inc32(Assembler::zero,
4424 ExternalAddress((address)counters->revoked_lock_entry_count_addr()));
4425 }
4426
4427 bind(cas_label);
4428
4429 return null_check_offset;
4430 }
4431 void MacroAssembler::call_VM_leaf_base(address entry_point,
4432 int number_of_arguments) {
4433 call(RuntimeAddress(entry_point));
4434 increment(rsp, number_of_arguments * wordSize);
4435 }
4436
4437 void MacroAssembler::cmpoop(Address src1, jobject obj) {
4438 cmp_literal32(src1, (int32_t)obj, oop_Relocation::spec_for_immediate());
4439 }
4440
4441 void MacroAssembler::cmpoop(Register src1, jobject obj) {
4442 cmp_literal32(src1, (int32_t)obj, oop_Relocation::spec_for_immediate());
4443 }
4444
4445 void MacroAssembler::extend_sign(Register hi, Register lo) {
4446 // According to Intel Doc. AP-526, "Integer Divide", p.18.
4447 if (VM_Version::is_P6() && hi == rdx && lo == rax) {
4448 cdql();
4449 } else {
4450 movl(hi, lo);
4451 sarl(hi, 31);
4452 }
4453 }
4454
4455 void MacroAssembler::fat_nop() {
4456 // A 5 byte nop that is safe for patching (see patch_verified_entry)
4457 emit_byte(0x26); // es:
4458 emit_byte(0x2e); // cs:
4459 emit_byte(0x64); // fs:
4460 emit_byte(0x65); // gs:
4461 emit_byte(0x90);
4462 }
4463
4464 void MacroAssembler::jC2(Register tmp, Label& L) {
4465 // set parity bit if FPU flag C2 is set (via rax)
4466 save_rax(tmp);
4467 fwait(); fnstsw_ax();
4468 sahf();
4469 restore_rax(tmp);
4470 // branch
4471 jcc(Assembler::parity, L);
4472 }
4473
4474 void MacroAssembler::jnC2(Register tmp, Label& L) {
4475 // set parity bit if FPU flag C2 is set (via rax)
4476 save_rax(tmp);
4477 fwait(); fnstsw_ax();
4478 sahf();
4479 restore_rax(tmp);
4480 // branch
4481 jcc(Assembler::noParity, L);
4482 }
4483
4484 // 32bit can do a case table jump in one instruction but we no longer allow the base
4485 // to be installed in the Address class
4486 void MacroAssembler::jump(ArrayAddress entry) {
4487 jmp(as_Address(entry));
4488 }
4489
4490 // Note: y_lo will be destroyed
4491 void MacroAssembler::lcmp2int(Register x_hi, Register x_lo, Register y_hi, Register y_lo) {
4492 // Long compare for Java (semantics as described in JVM spec.)
4493 Label high, low, done;
4494
4495 cmpl(x_hi, y_hi);
4496 jcc(Assembler::less, low);
4497 jcc(Assembler::greater, high);
4498 // x_hi is the return register
4499 xorl(x_hi, x_hi);
4500 cmpl(x_lo, y_lo);
4501 jcc(Assembler::below, low);
4502 jcc(Assembler::equal, done);
4503
4504 bind(high);
4505 xorl(x_hi, x_hi);
4506 increment(x_hi);
4507 jmp(done);
4508
4509 bind(low);
4510 xorl(x_hi, x_hi);
4511 decrementl(x_hi);
4512
4513 bind(done);
4514 }
4515
4516 void MacroAssembler::lea(Register dst, AddressLiteral src) {
4517 mov_literal32(dst, (int32_t)src.target(), src.rspec());
4518 }
4519
4520 void MacroAssembler::lea(Address dst, AddressLiteral adr) {
4521 // leal(dst, as_Address(adr));
4522 // see note in movl as to why we must use a move
4523 mov_literal32(dst, (int32_t) adr.target(), adr.rspec());
4524 }
4525
4526 void MacroAssembler::leave() {
4527 mov(rsp, rbp);
4528 pop(rbp);
4529 }
4530
4531 void MacroAssembler::lmul(int x_rsp_offset, int y_rsp_offset) {
4532 // Multiplication of two Java long values stored on the stack
4533 // as illustrated below. Result is in rdx:rax.
4534 //
4535 // rsp ---> [ ?? ] \ \
4536 // .... | y_rsp_offset |
4537 // [ y_lo ] / (in bytes) | x_rsp_offset
4538 // [ y_hi ] | (in bytes)
4539 // .... |
4540 // [ x_lo ] /
4541 // [ x_hi ]
4542 // ....
4543 //
4544 // Basic idea: lo(result) = lo(x_lo * y_lo)
4545 // hi(result) = hi(x_lo * y_lo) + lo(x_hi * y_lo) + lo(x_lo * y_hi)
4546 Address x_hi(rsp, x_rsp_offset + wordSize); Address x_lo(rsp, x_rsp_offset);
4547 Address y_hi(rsp, y_rsp_offset + wordSize); Address y_lo(rsp, y_rsp_offset);
4548 Label quick;
4549 // load x_hi, y_hi and check if quick
4550 // multiplication is possible
4551 movl(rbx, x_hi);
4552 movl(rcx, y_hi);
4553 movl(rax, rbx);
4554 orl(rbx, rcx); // rbx, = 0 <=> x_hi = 0 and y_hi = 0
4555 jcc(Assembler::zero, quick); // if rbx, = 0 do quick multiply
4556 // do full multiplication
4557 // 1st step
4558 mull(y_lo); // x_hi * y_lo
4559 movl(rbx, rax); // save lo(x_hi * y_lo) in rbx,
4560 // 2nd step
4561 movl(rax, x_lo);
4562 mull(rcx); // x_lo * y_hi
4563 addl(rbx, rax); // add lo(x_lo * y_hi) to rbx,
4564 // 3rd step
4565 bind(quick); // note: rbx, = 0 if quick multiply!
4566 movl(rax, x_lo);
4567 mull(y_lo); // x_lo * y_lo
4568 addl(rdx, rbx); // correct hi(x_lo * y_lo)
4569 }
4570
4571 void MacroAssembler::lneg(Register hi, Register lo) {
4572 negl(lo);
4573 adcl(hi, 0);
4574 negl(hi);
4575 }
4576
4577 void MacroAssembler::lshl(Register hi, Register lo) {
4578 // Java shift left long support (semantics as described in JVM spec., p.305)
4579 // (basic idea for shift counts s >= n: x << s == (x << n) << (s - n))
4580 // shift value is in rcx !
4581 assert(hi != rcx, "must not use rcx");
4582 assert(lo != rcx, "must not use rcx");
4583 const Register s = rcx; // shift count
4584 const int n = BitsPerWord;
4585 Label L;
4586 andl(s, 0x3f); // s := s & 0x3f (s < 0x40)
4587 cmpl(s, n); // if (s < n)
4588 jcc(Assembler::less, L); // else (s >= n)
4589 movl(hi, lo); // x := x << n
4590 xorl(lo, lo);
4591 // Note: subl(s, n) is not needed since the Intel shift instructions work rcx mod n!
4592 bind(L); // s (mod n) < n
4593 shldl(hi, lo); // x := x << s
4594 shll(lo);
4595 }
4596
4597
4598 void MacroAssembler::lshr(Register hi, Register lo, bool sign_extension) {
4599 // Java shift right long support (semantics as described in JVM spec., p.306 & p.310)
4600 // (basic idea for shift counts s >= n: x >> s == (x >> n) >> (s - n))
4601 assert(hi != rcx, "must not use rcx");
4602 assert(lo != rcx, "must not use rcx");
4603 const Register s = rcx; // shift count
4604 const int n = BitsPerWord;
4605 Label L;
4606 andl(s, 0x3f); // s := s & 0x3f (s < 0x40)
4607 cmpl(s, n); // if (s < n)
4608 jcc(Assembler::less, L); // else (s >= n)
4609 movl(lo, hi); // x := x >> n
4610 if (sign_extension) sarl(hi, 31);
4611 else xorl(hi, hi);
4612 // Note: subl(s, n) is not needed since the Intel shift instructions work rcx mod n!
4613 bind(L); // s (mod n) < n
4614 shrdl(lo, hi); // x := x >> s
4615 if (sign_extension) sarl(hi);
4616 else shrl(hi);
4617 }
4618
4619 void MacroAssembler::movoop(Register dst, jobject obj) {
4620 mov_literal32(dst, (int32_t)obj, oop_Relocation::spec_for_immediate());
4621 }
4622
4623 void MacroAssembler::movoop(Address dst, jobject obj) {
4624 mov_literal32(dst, (int32_t)obj, oop_Relocation::spec_for_immediate());
4625 }
4626
4627 void MacroAssembler::movptr(Register dst, AddressLiteral src) {
4628 if (src.is_lval()) {
4629 mov_literal32(dst, (intptr_t)src.target(), src.rspec());
4630 } else {
4631 movl(dst, as_Address(src));
4632 }
4633 }
4634
4635 void MacroAssembler::movptr(ArrayAddress dst, Register src) {
4636 movl(as_Address(dst), src);
4637 }
4638
4639 void MacroAssembler::movptr(Register dst, ArrayAddress src) {
4640 movl(dst, as_Address(src));
4641 }
4642
4643 // src should NEVER be a real pointer. Use AddressLiteral for true pointers
4644 void MacroAssembler::movptr(Address dst, intptr_t src) {
4645 movl(dst, src);
4646 }
4647
4648
4649 void MacroAssembler::movsd(XMMRegister dst, AddressLiteral src) {
4650 movsd(dst, as_Address(src));
4651 }
4652
4653 void MacroAssembler::pop_callee_saved_registers() {
4654 pop(rcx);
4655 pop(rdx);
4656 pop(rdi);
4657 pop(rsi);
4658 }
4659
4660 void MacroAssembler::pop_fTOS() {
4661 fld_d(Address(rsp, 0));
4662 addl(rsp, 2 * wordSize);
4663 }
4664
4665 void MacroAssembler::push_callee_saved_registers() {
4666 push(rsi);
4667 push(rdi);
4668 push(rdx);
4669 push(rcx);
4670 }
4671
4672 void MacroAssembler::push_fTOS() {
4673 subl(rsp, 2 * wordSize);
4674 fstp_d(Address(rsp, 0));
4675 }
4676
4677
4678 void MacroAssembler::pushoop(jobject obj) {
4679 push_literal32((int32_t)obj, oop_Relocation::spec_for_immediate());
4680 }
4681
4682
4683 void MacroAssembler::pushptr(AddressLiteral src) {
4684 if (src.is_lval()) {
4685 push_literal32((int32_t)src.target(), src.rspec());
4686 } else {
4687 pushl(as_Address(src));
4688 }
4689 }
4690
4691 void MacroAssembler::set_word_if_not_zero(Register dst) {
4692 xorl(dst, dst);
4693 set_byte_if_not_zero(dst);
4694 }
4695
4696 static void pass_arg0(MacroAssembler* masm, Register arg) {
4697 masm->push(arg);
4698 }
4699
4700 static void pass_arg1(MacroAssembler* masm, Register arg) {
4701 masm->push(arg);
4702 }
4703
4704 static void pass_arg2(MacroAssembler* masm, Register arg) {
4705 masm->push(arg);
4706 }
4707
4708 static void pass_arg3(MacroAssembler* masm, Register arg) {
4709 masm->push(arg);
4710 }
4711
4712 #ifndef PRODUCT
4713 extern "C" void findpc(intptr_t x);
4714 #endif
4715
4716 void MacroAssembler::debug32(int rdi, int rsi, int rbp, int rsp, int rbx, int rdx, int rcx, int rax, int eip, char* msg) {
4717 // In order to get locks to work, we need to fake a in_VM state
4718 JavaThread* thread = JavaThread::current();
4719 JavaThreadState saved_state = thread->thread_state();
4720 thread->set_thread_state(_thread_in_vm);
4721 if (ShowMessageBoxOnError) {
4722 JavaThread* thread = JavaThread::current();
4723 JavaThreadState saved_state = thread->thread_state();
4724 thread->set_thread_state(_thread_in_vm);
4725 if (CountBytecodes || TraceBytecodes || StopInterpreterAt) {
4726 ttyLocker ttyl;
4727 BytecodeCounter::print();
4728 }
4729 // To see where a verify_oop failed, get $ebx+40/X for this frame.
4730 // This is the value of eip which points to where verify_oop will return.
4731 if (os::message_box(msg, "Execution stopped, print registers?")) {
4732 ttyLocker ttyl;
4733 tty->print_cr("eip = 0x%08x", eip);
4734 #ifndef PRODUCT
4735 tty->cr();
4736 findpc(eip);
4737 tty->cr();
4738 #endif
4739 tty->print_cr("rax, = 0x%08x", rax);
4740 tty->print_cr("rbx, = 0x%08x", rbx);
4741 tty->print_cr("rcx = 0x%08x", rcx);
4742 tty->print_cr("rdx = 0x%08x", rdx);
4743 tty->print_cr("rdi = 0x%08x", rdi);
4744 tty->print_cr("rsi = 0x%08x", rsi);
4745 tty->print_cr("rbp, = 0x%08x", rbp);
4746 tty->print_cr("rsp = 0x%08x", rsp);
4747 BREAKPOINT;
4748 }
4749 } else {
4750 ttyLocker ttyl;
4751 ::tty->print_cr("=============== DEBUG MESSAGE: %s ================\n", msg);
4752 assert(false, "DEBUG MESSAGE");
4753 }
4754 ThreadStateTransition::transition(thread, _thread_in_vm, saved_state);
4755 }
4756
4757 void MacroAssembler::stop(const char* msg) {
4758 ExternalAddress message((address)msg);
4759 // push address of message
4760 pushptr(message.addr());
4761 { Label L; call(L, relocInfo::none); bind(L); } // push eip
4762 pusha(); // push registers
4763 call(RuntimeAddress(CAST_FROM_FN_PTR(address, MacroAssembler::debug32)));
4764 hlt();
4765 }
4766
4767 void MacroAssembler::warn(const char* msg) {
4768 push_CPU_state();
4769
4770 ExternalAddress message((address) msg);
4771 // push address of message
4772 pushptr(message.addr());
4773
4774 call(RuntimeAddress(CAST_FROM_FN_PTR(address, warning)));
4775 addl(rsp, wordSize); // discard argument
4776 pop_CPU_state();
4777 }
4778
4779 #else // _LP64
4780
4781 // 64 bit versions
4782
4783 Address MacroAssembler::as_Address(AddressLiteral adr) {
4784 // amd64 always does this as a pc-rel
4785 // we can be absolute or disp based on the instruction type
4786 // jmp/call are displacements others are absolute
4787 assert(!adr.is_lval(), "must be rval");
4788 assert(reachable(adr), "must be");
4789 return Address((int32_t)(intptr_t)(adr.target() - pc()), adr.target(), adr.reloc());
4790
4791 }
4792
4793 Address MacroAssembler::as_Address(ArrayAddress adr) {
4794 AddressLiteral base = adr.base();
4795 lea(rscratch1, base);
4796 Address index = adr.index();
4797 assert(index._disp == 0, "must not have disp"); // maybe it can?
4798 Address array(rscratch1, index._index, index._scale, index._disp);
4799 return array;
4800 }
4801
4802 int MacroAssembler::biased_locking_enter(Register lock_reg,
4803 Register obj_reg,
4804 Register swap_reg,
4805 Register tmp_reg,
4806 bool swap_reg_contains_mark,
4807 Label& done,
4808 Label* slow_case,
4809 BiasedLockingCounters* counters) {
4810 assert(UseBiasedLocking, "why call this otherwise?");
4811 assert(swap_reg == rax, "swap_reg must be rax for cmpxchgq");
4812 assert(tmp_reg != noreg, "tmp_reg must be supplied");
4813 assert_different_registers(lock_reg, obj_reg, swap_reg, tmp_reg);
4814 assert(markOopDesc::age_shift == markOopDesc::lock_bits + markOopDesc::biased_lock_bits, "biased locking makes assumptions about bit layout");
4815 Address mark_addr (obj_reg, oopDesc::mark_offset_in_bytes());
4816 Address saved_mark_addr(lock_reg, 0);
4817
4818 if (PrintBiasedLockingStatistics && counters == NULL)
4819 counters = BiasedLocking::counters();
4820
4821 // Biased locking
4822 // See whether the lock is currently biased toward our thread and
4823 // whether the epoch is still valid
4824 // Note that the runtime guarantees sufficient alignment of JavaThread
4825 // pointers to allow age to be placed into low bits
4826 // First check to see whether biasing is even enabled for this object
4827 Label cas_label;
4828 int null_check_offset = -1;
4829 if (!swap_reg_contains_mark) {
4830 null_check_offset = offset();
4831 movq(swap_reg, mark_addr);
4832 }
4833 movq(tmp_reg, swap_reg);
4834 andq(tmp_reg, markOopDesc::biased_lock_mask_in_place);
4835 cmpq(tmp_reg, markOopDesc::biased_lock_pattern);
4836 jcc(Assembler::notEqual, cas_label);
4837 // The bias pattern is present in the object's header. Need to check
4838 // whether the bias owner and the epoch are both still current.
4839 load_prototype_header(tmp_reg, obj_reg);
4840 orq(tmp_reg, r15_thread);
4841 xorq(tmp_reg, swap_reg);
4842 andq(tmp_reg, ~((int) markOopDesc::age_mask_in_place));
4843 if (counters != NULL) {
4844 cond_inc32(Assembler::zero,
4845 ExternalAddress((address) counters->anonymously_biased_lock_entry_count_addr()));
4846 }
4847 jcc(Assembler::equal, done);
4848
4849 Label try_revoke_bias;
4850 Label try_rebias;
4851
4852 // At this point we know that the header has the bias pattern and
4853 // that we are not the bias owner in the current epoch. We need to
4854 // figure out more details about the state of the header in order to
4855 // know what operations can be legally performed on the object's
4856 // header.
4857
4858 // If the low three bits in the xor result aren't clear, that means
4859 // the prototype header is no longer biased and we have to revoke
4860 // the bias on this object.
4861 testq(tmp_reg, markOopDesc::biased_lock_mask_in_place);
4862 jcc(Assembler::notZero, try_revoke_bias);
4863
4864 // Biasing is still enabled for this data type. See whether the
4865 // epoch of the current bias is still valid, meaning that the epoch
4866 // bits of the mark word are equal to the epoch bits of the
4867 // prototype header. (Note that the prototype header's epoch bits
4868 // only change at a safepoint.) If not, attempt to rebias the object
4869 // toward the current thread. Note that we must be absolutely sure
4870 // that the current epoch is invalid in order to do this because
4871 // otherwise the manipulations it performs on the mark word are
4872 // illegal.
4873 testq(tmp_reg, markOopDesc::epoch_mask_in_place);
4874 jcc(Assembler::notZero, try_rebias);
4875
4876 // The epoch of the current bias is still valid but we know nothing
4877 // about the owner; it might be set or it might be clear. Try to
4878 // acquire the bias of the object using an atomic operation. If this
4879 // fails we will go in to the runtime to revoke the object's bias.
4880 // Note that we first construct the presumed unbiased header so we
4881 // don't accidentally blow away another thread's valid bias.
4882 andq(swap_reg,
4883 markOopDesc::biased_lock_mask_in_place | markOopDesc::age_mask_in_place | markOopDesc::epoch_mask_in_place);
4884 movq(tmp_reg, swap_reg);
4885 orq(tmp_reg, r15_thread);
4886 if (os::is_MP()) {
4887 lock();
4888 }
4889 cmpxchgq(tmp_reg, Address(obj_reg, 0));
4890 // If the biasing toward our thread failed, this means that
4891 // another thread succeeded in biasing it toward itself and we
4892 // need to revoke that bias. The revocation will occur in the
4893 // interpreter runtime in the slow case.
4894 if (counters != NULL) {
4895 cond_inc32(Assembler::zero,
4896 ExternalAddress((address) counters->anonymously_biased_lock_entry_count_addr()));
4897 }
4898 if (slow_case != NULL) {
4899 jcc(Assembler::notZero, *slow_case);
4900 }
4901 jmp(done);
4902
4903 bind(try_rebias);
4904 // At this point we know the epoch has expired, meaning that the
4905 // current "bias owner", if any, is actually invalid. Under these
4906 // circumstances _only_, we are allowed to use the current header's
4907 // value as the comparison value when doing the cas to acquire the
4908 // bias in the current epoch. In other words, we allow transfer of
4909 // the bias from one thread to another directly in this situation.
4910 //
4911 // FIXME: due to a lack of registers we currently blow away the age
4912 // bits in this situation. Should attempt to preserve them.
4913 load_prototype_header(tmp_reg, obj_reg);
4914 orq(tmp_reg, r15_thread);
4915 if (os::is_MP()) {
4916 lock();
4917 }
4918 cmpxchgq(tmp_reg, Address(obj_reg, 0));
4919 // If the biasing toward our thread failed, then another thread
4920 // succeeded in biasing it toward itself and we need to revoke that
4921 // bias. The revocation will occur in the runtime in the slow case.
4922 if (counters != NULL) {
4923 cond_inc32(Assembler::zero,
4924 ExternalAddress((address) counters->rebiased_lock_entry_count_addr()));
4925 }
4926 if (slow_case != NULL) {
4927 jcc(Assembler::notZero, *slow_case);
4928 }
4929 jmp(done);
4930
4931 bind(try_revoke_bias);
4932 // The prototype mark in the klass doesn't have the bias bit set any
4933 // more, indicating that objects of this data type are not supposed
4934 // to be biased any more. We are going to try to reset the mark of
4935 // this object to the prototype value and fall through to the
4936 // CAS-based locking scheme. Note that if our CAS fails, it means
4937 // that another thread raced us for the privilege of revoking the
4938 // bias of this particular object, so it's okay to continue in the
4939 // normal locking code.
4940 //
4941 // FIXME: due to a lack of registers we currently blow away the age
4942 // bits in this situation. Should attempt to preserve them.
4943 load_prototype_header(tmp_reg, obj_reg);
4944 if (os::is_MP()) {
4945 lock();
4946 }
4947 cmpxchgq(tmp_reg, Address(obj_reg, 0));
4948 // Fall through to the normal CAS-based lock, because no matter what
4949 // the result of the above CAS, some thread must have succeeded in
4950 // removing the bias bit from the object's header.
4951 if (counters != NULL) {
4952 cond_inc32(Assembler::zero,
4953 ExternalAddress((address) counters->revoked_lock_entry_count_addr()));
4954 }
4955
4956 bind(cas_label);
4957
4958 return null_check_offset;
4959 }
4960
4961 void MacroAssembler::call_VM_leaf_base(address entry_point, int num_args) {
4962 Label L, E;
4963
4964 #ifdef _WIN64
4965 // Windows always allocates space for it's register args
4966 assert(num_args <= 4, "only register arguments supported");
4967 subq(rsp, frame::arg_reg_save_area_bytes);
4968 #endif
4969
4970 // Align stack if necessary
4971 testl(rsp, 15);
4972 jcc(Assembler::zero, L);
4973
4974 subq(rsp, 8);
4975 {
4976 call(RuntimeAddress(entry_point));
4977 }
4978 addq(rsp, 8);
4979 jmp(E);
4980
4981 bind(L);
4982 {
4983 call(RuntimeAddress(entry_point));
4984 }
4985
4986 bind(E);
4987
4988 #ifdef _WIN64
4989 // restore stack pointer
4990 addq(rsp, frame::arg_reg_save_area_bytes);
4991 #endif
4992
4993 }
4994
4995 void MacroAssembler::cmp64(Register src1, AddressLiteral src2) {
4996 assert(!src2.is_lval(), "should use cmpptr");
4997
4998 if (reachable(src2)) {
4999 cmpq(src1, as_Address(src2));
5000 } else {
5001 lea(rscratch1, src2);
5002 Assembler::cmpq(src1, Address(rscratch1, 0));
5003 }
5004 }
5005
5006 int MacroAssembler::corrected_idivq(Register reg) {
5007 // Full implementation of Java ldiv and lrem; checks for special
5008 // case as described in JVM spec., p.243 & p.271. The function
5009 // returns the (pc) offset of the idivl instruction - may be needed
5010 // for implicit exceptions.
5011 //
5012 // normal case special case
5013 //
5014 // input : rax: dividend min_long
5015 // reg: divisor (may not be eax/edx) -1
5016 //
5017 // output: rax: quotient (= rax idiv reg) min_long
5018 // rdx: remainder (= rax irem reg) 0
5019 assert(reg != rax && reg != rdx, "reg cannot be rax or rdx register");
5020 static const int64_t min_long = 0x8000000000000000;
5021 Label normal_case, special_case;
5022
5023 // check for special case
5024 cmp64(rax, ExternalAddress((address) &min_long));
5025 jcc(Assembler::notEqual, normal_case);
5026 xorl(rdx, rdx); // prepare rdx for possible special case (where
5027 // remainder = 0)
5028 cmpq(reg, -1);
5029 jcc(Assembler::equal, special_case);
5030
5031 // handle normal case
5032 bind(normal_case);
5033 cdqq();
5034 int idivq_offset = offset();
5035 idivq(reg);
5036
5037 // normal and special case exit
5038 bind(special_case);
5039
5040 return idivq_offset;
5041 }
5042
5043 void MacroAssembler::decrementq(Register reg, int value) {
5044 if (value == min_jint) { subq(reg, value); return; }
5045 if (value < 0) { incrementq(reg, -value); return; }
5046 if (value == 0) { ; return; }
5047 if (value == 1 && UseIncDec) { decq(reg) ; return; }
5048 /* else */ { subq(reg, value) ; return; }
5049 }
5050
5051 void MacroAssembler::decrementq(Address dst, int value) {
5052 if (value == min_jint) { subq(dst, value); return; }
5053 if (value < 0) { incrementq(dst, -value); return; }
5054 if (value == 0) { ; return; }
5055 if (value == 1 && UseIncDec) { decq(dst) ; return; }
5056 /* else */ { subq(dst, value) ; return; }
5057 }
5058
5059 void MacroAssembler::fat_nop() {
5060 // A 5 byte nop that is safe for patching (see patch_verified_entry)
5061 // Recommened sequence from 'Software Optimization Guide for the AMD
5062 // Hammer Processor'
5063 emit_byte(0x66);
5064 emit_byte(0x66);
5065 emit_byte(0x90);
5066 emit_byte(0x66);
5067 emit_byte(0x90);
5068 }
5069
5070 void MacroAssembler::incrementq(Register reg, int value) {
5071 if (value == min_jint) { addq(reg, value); return; }
5072 if (value < 0) { decrementq(reg, -value); return; }
5073 if (value == 0) { ; return; }
5074 if (value == 1 && UseIncDec) { incq(reg) ; return; }
5075 /* else */ { addq(reg, value) ; return; }
5076 }
5077
5078 void MacroAssembler::incrementq(Address dst, int value) {
5079 if (value == min_jint) { addq(dst, value); return; }
5080 if (value < 0) { decrementq(dst, -value); return; }
5081 if (value == 0) { ; return; }
5082 if (value == 1 && UseIncDec) { incq(dst) ; return; }
5083 /* else */ { addq(dst, value) ; return; }
5084 }
5085
5086 // 32bit can do a case table jump in one instruction but we no longer allow the base
5087 // to be installed in the Address class
5088 void MacroAssembler::jump(ArrayAddress entry) {
5089 lea(rscratch1, entry.base());
5090 Address dispatch = entry.index();
5091 assert(dispatch._base == noreg, "must be");
5092 dispatch._base = rscratch1;
5093 jmp(dispatch);
5094 }
5095
5096 void MacroAssembler::lcmp2int(Register x_hi, Register x_lo, Register y_hi, Register y_lo) {
5097 ShouldNotReachHere(); // 64bit doesn't use two regs
5098 cmpq(x_lo, y_lo);
5099 }
5100
5101 void MacroAssembler::lea(Register dst, AddressLiteral src) {
5102 mov_literal64(dst, (intptr_t)src.target(), src.rspec());
5103 }
5104
5105 void MacroAssembler::lea(Address dst, AddressLiteral adr) {
5106 mov_literal64(rscratch1, (intptr_t)adr.target(), adr.rspec());
5107 movptr(dst, rscratch1);
5108 }
5109
5110 void MacroAssembler::leave() {
5111 // %%% is this really better? Why not on 32bit too?
5112 emit_byte(0xC9); // LEAVE
5113 }
5114
5115 void MacroAssembler::lneg(Register hi, Register lo) {
5116 ShouldNotReachHere(); // 64bit doesn't use two regs
5117 negq(lo);
5118 }
5119
5120 void MacroAssembler::movoop(Register dst, jobject obj) {
5121 mov_literal64(dst, (intptr_t)obj, oop_Relocation::spec_for_immediate());
5122 }
5123
5124 void MacroAssembler::movoop(Address dst, jobject obj) {
5125 mov_literal64(rscratch1, (intptr_t)obj, oop_Relocation::spec_for_immediate());
5126 movq(dst, rscratch1);
5127 }
5128
5129 void MacroAssembler::movptr(Register dst, AddressLiteral src) {
5130 if (src.is_lval()) {
5131 mov_literal64(dst, (intptr_t)src.target(), src.rspec());
5132 } else {
5133 if (reachable(src)) {
5134 movq(dst, as_Address(src));
5135 } else {
5136 lea(rscratch1, src);
5137 movq(dst, Address(rscratch1,0));
5138 }
5139 }
5140 }
5141
5142 void MacroAssembler::movptr(ArrayAddress dst, Register src) {
5143 movq(as_Address(dst), src);
5144 }
5145
5146 void MacroAssembler::movptr(Register dst, ArrayAddress src) {
5147 movq(dst, as_Address(src));
5148 }
5149
5150 // src should NEVER be a real pointer. Use AddressLiteral for true pointers
5151 void MacroAssembler::movptr(Address dst, intptr_t src) {
5152 mov64(rscratch1, src);
5153 movq(dst, rscratch1);
5154 }
5155
5156 // These are mostly for initializing NULL
5157 void MacroAssembler::movptr(Address dst, int32_t src) {
5158 movslq(dst, src);
5159 }
5160
5161 void MacroAssembler::movptr(Register dst, int32_t src) {
5162 mov64(dst, (intptr_t)src);
5163 }
5164
5165 void MacroAssembler::pushoop(jobject obj) {
5166 movoop(rscratch1, obj);
5167 push(rscratch1);
5168 }
5169
5170 void MacroAssembler::pushptr(AddressLiteral src) {
5171 lea(rscratch1, src);
5172 if (src.is_lval()) {
5173 push(rscratch1);
5174 } else {
5175 pushq(Address(rscratch1, 0));
5176 }
5177 }
5178
5179 void MacroAssembler::reset_last_Java_frame(bool clear_fp,
5180 bool clear_pc) {
5181 // we must set sp to zero to clear frame
5182 movptr(Address(r15_thread, JavaThread::last_Java_sp_offset()), (int32_t)NULL_WORD);
5183 // must clear fp, so that compiled frames are not confused; it is
5184 // possible that we need it only for debugging
5185 if (clear_fp) {
5186 movptr(Address(r15_thread, JavaThread::last_Java_fp_offset()), (int32_t)NULL_WORD);
5187 }
5188
5189 if (clear_pc) {
5190 movptr(Address(r15_thread, JavaThread::last_Java_pc_offset()), (int32_t)NULL_WORD);
5191 }
5192 }
5193
5194 void MacroAssembler::set_last_Java_frame(Register last_java_sp,
5195 Register last_java_fp,
5196 address last_java_pc) {
5197 // determine last_java_sp register
5198 if (!last_java_sp->is_valid()) {
5199 last_java_sp = rsp;
5200 }
5201
5202 // last_java_fp is optional
5203 if (last_java_fp->is_valid()) {
5204 movptr(Address(r15_thread, JavaThread::last_Java_fp_offset()),
5205 last_java_fp);
5206 }
5207
5208 // last_java_pc is optional
5209 if (last_java_pc != NULL) {
5210 Address java_pc(r15_thread,
5211 JavaThread::frame_anchor_offset() + JavaFrameAnchor::last_Java_pc_offset());
5212 lea(rscratch1, InternalAddress(last_java_pc));
5213 movptr(java_pc, rscratch1);
5214 }
5215
5216 movptr(Address(r15_thread, JavaThread::last_Java_sp_offset()), last_java_sp);
5217 }
5218
5219 static void pass_arg0(MacroAssembler* masm, Register arg) {
5220 if (c_rarg0 != arg ) {
5221 masm->mov(c_rarg0, arg);
5222 }
5223 }
5224
5225 static void pass_arg1(MacroAssembler* masm, Register arg) {
5226 if (c_rarg1 != arg ) {
5227 masm->mov(c_rarg1, arg);
5228 }
5229 }
5230
5231 static void pass_arg2(MacroAssembler* masm, Register arg) {
5232 if (c_rarg2 != arg ) {
5233 masm->mov(c_rarg2, arg);
5234 }
5235 }
5236
5237 static void pass_arg3(MacroAssembler* masm, Register arg) {
5238 if (c_rarg3 != arg ) {
5239 masm->mov(c_rarg3, arg);
5240 }
5241 }
5242
5243 void MacroAssembler::stop(const char* msg) {
5244 address rip = pc();
5245 pusha(); // get regs on stack
5246 lea(c_rarg0, ExternalAddress((address) msg));
5247 lea(c_rarg1, InternalAddress(rip));
5248 movq(c_rarg2, rsp); // pass pointer to regs array
5249 andq(rsp, -16); // align stack as required by ABI
5250 call(RuntimeAddress(CAST_FROM_FN_PTR(address, MacroAssembler::debug64)));
5251 hlt();
5252 }
5253
5254 void MacroAssembler::warn(const char* msg) {
5255 push(r12);
5256 movq(r12, rsp);
5257 andq(rsp, -16); // align stack as required by push_CPU_state and call
5258
5259 push_CPU_state(); // keeps alignment at 16 bytes
5260 lea(c_rarg0, ExternalAddress((address) msg));
5261 call_VM_leaf(CAST_FROM_FN_PTR(address, warning), c_rarg0);
5262 pop_CPU_state();
5263
5264 movq(rsp, r12);
5265 pop(r12);
5266 }
5267
5268 #ifndef PRODUCT
5269 extern "C" void findpc(intptr_t x);
5270 #endif
5271
5272 void MacroAssembler::debug64(char* msg, int64_t pc, int64_t regs[]) {
5273 // In order to get locks to work, we need to fake a in_VM state
5274 if (ShowMessageBoxOnError ) {
5275 JavaThread* thread = JavaThread::current();
5276 JavaThreadState saved_state = thread->thread_state();
5277 thread->set_thread_state(_thread_in_vm);
5278 #ifndef PRODUCT
5279 if (CountBytecodes || TraceBytecodes || StopInterpreterAt) {
5280 ttyLocker ttyl;
5281 BytecodeCounter::print();
5282 }
5283 #endif
5284 // To see where a verify_oop failed, get $ebx+40/X for this frame.
5285 // XXX correct this offset for amd64
5286 // This is the value of eip which points to where verify_oop will return.
5287 if (os::message_box(msg, "Execution stopped, print registers?")) {
5288 ttyLocker ttyl;
5289 tty->print_cr("rip = 0x%016lx", pc);
5290 #ifndef PRODUCT
5291 tty->cr();
5292 findpc(pc);
5293 tty->cr();
5294 #endif
5295 tty->print_cr("rax = 0x%016lx", regs[15]);
5296 tty->print_cr("rbx = 0x%016lx", regs[12]);
5297 tty->print_cr("rcx = 0x%016lx", regs[14]);
5298 tty->print_cr("rdx = 0x%016lx", regs[13]);
5299 tty->print_cr("rdi = 0x%016lx", regs[8]);
5300 tty->print_cr("rsi = 0x%016lx", regs[9]);
5301 tty->print_cr("rbp = 0x%016lx", regs[10]);
5302 tty->print_cr("rsp = 0x%016lx", regs[11]);
5303 tty->print_cr("r8 = 0x%016lx", regs[7]);
5304 tty->print_cr("r9 = 0x%016lx", regs[6]);
5305 tty->print_cr("r10 = 0x%016lx", regs[5]);
5306 tty->print_cr("r11 = 0x%016lx", regs[4]);
5307 tty->print_cr("r12 = 0x%016lx", regs[3]);
5308 tty->print_cr("r13 = 0x%016lx", regs[2]);
5309 tty->print_cr("r14 = 0x%016lx", regs[1]);
5310 tty->print_cr("r15 = 0x%016lx", regs[0]);
5311 BREAKPOINT;
5312 }
5313 ThreadStateTransition::transition(thread, _thread_in_vm, saved_state);
5314 } else {
5315 ttyLocker ttyl;
5316 ::tty->print_cr("=============== DEBUG MESSAGE: %s ================\n",
5317 msg);
5318 }
5319 }
5320
5321 #endif // _LP64
5322
5323 // Now versions that are common to 32/64 bit
5324
5325 void MacroAssembler::addptr(Register dst, int32_t imm32) {
5326 LP64_ONLY(addq(dst, imm32)) NOT_LP64(addl(dst, imm32));
5327 }
5328
5329 void MacroAssembler::addptr(Register dst, Register src) {
5330 LP64_ONLY(addq(dst, src)) NOT_LP64(addl(dst, src));
5331 }
5332
5333 void MacroAssembler::addptr(Address dst, Register src) {
5334 LP64_ONLY(addq(dst, src)) NOT_LP64(addl(dst, src));
5335 }
5336
5337 void MacroAssembler::align(int modulus) {
5338 if (offset() % modulus != 0) {
5339 nop(modulus - (offset() % modulus));
5340 }
5341 }
5342
5343 void MacroAssembler::andpd(XMMRegister dst, AddressLiteral src) {
5344 andpd(dst, as_Address(src));
5345 }
5346
5347 void MacroAssembler::andptr(Register dst, int32_t imm32) {
5348 LP64_ONLY(andq(dst, imm32)) NOT_LP64(andl(dst, imm32));
5349 }
5350
5351 void MacroAssembler::atomic_incl(AddressLiteral counter_addr) {
5352 pushf();
5353 if (os::is_MP())
5354 lock();
5355 incrementl(counter_addr);
5356 popf();
5357 }
5358
5359 // Writes to stack successive pages until offset reached to check for
5360 // stack overflow + shadow pages. This clobbers tmp.
5361 void MacroAssembler::bang_stack_size(Register size, Register tmp) {
5362 movptr(tmp, rsp);
5363 // Bang stack for total size given plus shadow page size.
5364 // Bang one page at a time because large size can bang beyond yellow and
5365 // red zones.
5366 Label loop;
5367 bind(loop);
5368 movl(Address(tmp, (-os::vm_page_size())), size );
5369 subptr(tmp, os::vm_page_size());
5370 subl(size, os::vm_page_size());
5371 jcc(Assembler::greater, loop);
5372
5373 // Bang down shadow pages too.
5374 // The -1 because we already subtracted 1 page.
5375 for (int i = 0; i< StackShadowPages-1; i++) {
5376 // this could be any sized move but this is can be a debugging crumb
5377 // so the bigger the better.
5378 movptr(Address(tmp, (-i*os::vm_page_size())), size );
5379 }
5380 }
5381
5382 void MacroAssembler::biased_locking_exit(Register obj_reg, Register temp_reg, Label& done) {
5383 assert(UseBiasedLocking, "why call this otherwise?");
5384
5385 // Check for biased locking unlock case, which is a no-op
5386 // Note: we do not have to check the thread ID for two reasons.
5387 // First, the interpreter checks for IllegalMonitorStateException at
5388 // a higher level. Second, if the bias was revoked while we held the
5389 // lock, the object could not be rebiased toward another thread, so
5390 // the bias bit would be clear.
5391 movptr(temp_reg, Address(obj_reg, oopDesc::mark_offset_in_bytes()));
5392 andptr(temp_reg, markOopDesc::biased_lock_mask_in_place);
5393 cmpptr(temp_reg, markOopDesc::biased_lock_pattern);
5394 jcc(Assembler::equal, done);
5395 }
5396
5397 void MacroAssembler::c2bool(Register x) {
5398 // implements x == 0 ? 0 : 1
5399 // note: must only look at least-significant byte of x
5400 // since C-style booleans are stored in one byte
5401 // only! (was bug)
5402 andl(x, 0xFF);
5403 setb(Assembler::notZero, x);
5404 }
5405
5406 // Wouldn't need if AddressLiteral version had new name
5407 void MacroAssembler::call(Label& L, relocInfo::relocType rtype) {
5408 Assembler::call(L, rtype);
5409 }
5410
5411 void MacroAssembler::call(Register entry) {
5412 Assembler::call(entry);
5413 }
5414
5415 void MacroAssembler::call(AddressLiteral entry) {
5416 if (reachable(entry)) {
5417 Assembler::call_literal(entry.target(), entry.rspec());
5418 } else {
5419 lea(rscratch1, entry);
5420 Assembler::call(rscratch1);
5421 }
5422 }
5423
5424 // Implementation of call_VM versions
5425
5426 void MacroAssembler::call_VM(Register oop_result,
5427 address entry_point,
5428 bool check_exceptions) {
5429 Label C, E;
5430 call(C, relocInfo::none);
5431 jmp(E);
5432
5433 bind(C);
5434 call_VM_helper(oop_result, entry_point, 0, check_exceptions);
5435 ret(0);
5436
5437 bind(E);
5438 }
5439
5440 void MacroAssembler::call_VM(Register oop_result,
5441 address entry_point,
5442 Register arg_1,
5443 bool check_exceptions) {
5444 Label C, E;
5445 call(C, relocInfo::none);
5446 jmp(E);
5447
5448 bind(C);
5449 pass_arg1(this, arg_1);
5450 call_VM_helper(oop_result, entry_point, 1, check_exceptions);
5451 ret(0);
5452
5453 bind(E);
5454 }
5455
5456 void MacroAssembler::call_VM(Register oop_result,
5457 address entry_point,
5458 Register arg_1,
5459 Register arg_2,
5460 bool check_exceptions) {
5461 Label C, E;
5462 call(C, relocInfo::none);
5463 jmp(E);
5464
5465 bind(C);
5466
5467 LP64_ONLY(assert(arg_1 != c_rarg2, "smashed arg"));
5468
5469 pass_arg2(this, arg_2);
5470 pass_arg1(this, arg_1);
5471 call_VM_helper(oop_result, entry_point, 2, check_exceptions);
5472 ret(0);
5473
5474 bind(E);
5475 }
5476
5477 void MacroAssembler::call_VM(Register oop_result,
5478 address entry_point,
5479 Register arg_1,
5480 Register arg_2,
5481 Register arg_3,
5482 bool check_exceptions) {
5483 Label C, E;
5484 call(C, relocInfo::none);
5485 jmp(E);
5486
5487 bind(C);
5488
5489 LP64_ONLY(assert(arg_1 != c_rarg3, "smashed arg"));
5490 LP64_ONLY(assert(arg_2 != c_rarg3, "smashed arg"));
5491 pass_arg3(this, arg_3);
5492
5493 LP64_ONLY(assert(arg_1 != c_rarg2, "smashed arg"));
5494 pass_arg2(this, arg_2);
5495
5496 pass_arg1(this, arg_1);
5497 call_VM_helper(oop_result, entry_point, 3, check_exceptions);
5498 ret(0);
5499
5500 bind(E);
5501 }
5502
5503 void MacroAssembler::call_VM(Register oop_result,
5504 Register last_java_sp,
5505 address entry_point,
5506 int number_of_arguments,
5507 bool check_exceptions) {
5508 Register thread = LP64_ONLY(r15_thread) NOT_LP64(noreg);
5509 call_VM_base(oop_result, thread, last_java_sp, entry_point, number_of_arguments, check_exceptions);
5510 }
5511
5512 void MacroAssembler::call_VM(Register oop_result,
5513 Register last_java_sp,
5514 address entry_point,
5515 Register arg_1,
5516 bool check_exceptions) {
5517 pass_arg1(this, arg_1);
5518 call_VM(oop_result, last_java_sp, entry_point, 1, check_exceptions);
5519 }
5520
5521 void MacroAssembler::call_VM(Register oop_result,
5522 Register last_java_sp,
5523 address entry_point,
5524 Register arg_1,
5525 Register arg_2,
5526 bool check_exceptions) {
5527
5528 LP64_ONLY(assert(arg_1 != c_rarg2, "smashed arg"));
5529 pass_arg2(this, arg_2);
5530 pass_arg1(this, arg_1);
5531 call_VM(oop_result, last_java_sp, entry_point, 2, check_exceptions);
5532 }
5533
5534 void MacroAssembler::call_VM(Register oop_result,
5535 Register last_java_sp,
5536 address entry_point,
5537 Register arg_1,
5538 Register arg_2,
5539 Register arg_3,
5540 bool check_exceptions) {
5541 LP64_ONLY(assert(arg_1 != c_rarg3, "smashed arg"));
5542 LP64_ONLY(assert(arg_2 != c_rarg3, "smashed arg"));
5543 pass_arg3(this, arg_3);
5544 LP64_ONLY(assert(arg_1 != c_rarg2, "smashed arg"));
5545 pass_arg2(this, arg_2);
5546 pass_arg1(this, arg_1);
5547 call_VM(oop_result, last_java_sp, entry_point, 3, check_exceptions);
5548 }
5549
5550 void MacroAssembler::call_VM_base(Register oop_result,
5551 Register java_thread,
5552 Register last_java_sp,
5553 address entry_point,
5554 int number_of_arguments,
5555 bool check_exceptions) {
5556 // determine java_thread register
5557 if (!java_thread->is_valid()) {
5558 #ifdef _LP64
5559 java_thread = r15_thread;
5560 #else
5561 java_thread = rdi;
5562 get_thread(java_thread);
5563 #endif // LP64
5564 }
5565 // determine last_java_sp register
5566 if (!last_java_sp->is_valid()) {
5567 last_java_sp = rsp;
5568 }
5569 // debugging support
5570 assert(number_of_arguments >= 0 , "cannot have negative number of arguments");
5571 LP64_ONLY(assert(java_thread == r15_thread, "unexpected register"));
5572 assert(java_thread != oop_result , "cannot use the same register for java_thread & oop_result");
5573 assert(java_thread != last_java_sp, "cannot use the same register for java_thread & last_java_sp");
5574
5575 // push java thread (becomes first argument of C function)
5576
5577 NOT_LP64(push(java_thread); number_of_arguments++);
5578 LP64_ONLY(mov(c_rarg0, r15_thread));
5579
5580 // set last Java frame before call
5581 assert(last_java_sp != rbp, "can't use ebp/rbp");
5582
5583 // Only interpreter should have to set fp
5584 set_last_Java_frame(java_thread, last_java_sp, rbp, NULL);
5585
5586 // do the call, remove parameters
5587 MacroAssembler::call_VM_leaf_base(entry_point, number_of_arguments);
5588
5589 // restore the thread (cannot use the pushed argument since arguments
5590 // may be overwritten by C code generated by an optimizing compiler);
5591 // however can use the register value directly if it is callee saved.
5592 if (LP64_ONLY(true ||) java_thread == rdi || java_thread == rsi) {
5593 // rdi & rsi (also r15) are callee saved -> nothing to do
5594 #ifdef ASSERT
5595 guarantee(java_thread != rax, "change this code");
5596 push(rax);
5597 { Label L;
5598 get_thread(rax);
5599 cmpptr(java_thread, rax);
5600 jcc(Assembler::equal, L);
5601 stop("MacroAssembler::call_VM_base: rdi not callee saved?");
5602 bind(L);
5603 }
5604 pop(rax);
5605 #endif
5606 } else {
5607 get_thread(java_thread);
5608 }
5609 // reset last Java frame
5610 // Only interpreter should have to clear fp
5611 reset_last_Java_frame(java_thread, true, false);
5612
5613 #ifndef CC_INTERP
5614 // C++ interp handles this in the interpreter
5615 check_and_handle_popframe(java_thread);
5616 check_and_handle_earlyret(java_thread);
5617 #endif /* CC_INTERP */
5618
5619 if (check_exceptions) {
5620 // check for pending exceptions (java_thread is set upon return)
5621 cmpptr(Address(java_thread, Thread::pending_exception_offset()), (int32_t) NULL_WORD);
5622 #ifndef _LP64
5623 jump_cc(Assembler::notEqual,
5624 RuntimeAddress(StubRoutines::forward_exception_entry()));
5625 #else
5626 // This used to conditionally jump to forward_exception however it is
5627 // possible if we relocate that the branch will not reach. So we must jump
5628 // around so we can always reach
5629
5630 Label ok;
5631 jcc(Assembler::equal, ok);
5632 jump(RuntimeAddress(StubRoutines::forward_exception_entry()));
5633 bind(ok);
5634 #endif // LP64
5635 }
5636
5637 // get oop result if there is one and reset the value in the thread
5638 if (oop_result->is_valid()) {
5639 movptr(oop_result, Address(java_thread, JavaThread::vm_result_offset()));
5640 movptr(Address(java_thread, JavaThread::vm_result_offset()), (int32_t)NULL_WORD);
5641 verify_oop(oop_result, "broken oop in call_VM_base");
5642 }
5643 }
5644
5645 void MacroAssembler::call_VM_helper(Register oop_result, address entry_point, int number_of_arguments, bool check_exceptions) {
5646
5647 // Calculate the value for last_Java_sp
5648 // somewhat subtle. call_VM does an intermediate call
5649 // which places a return address on the stack just under the
5650 // stack pointer as the user finsihed with it. This allows
5651 // use to retrieve last_Java_pc from last_Java_sp[-1].
5652 // On 32bit we then have to push additional args on the stack to accomplish
5653 // the actual requested call. On 64bit call_VM only can use register args
5654 // so the only extra space is the return address that call_VM created.
5655 // This hopefully explains the calculations here.
5656
5657 #ifdef _LP64
5658 // We've pushed one address, correct last_Java_sp
5659 lea(rax, Address(rsp, wordSize));
5660 #else
5661 lea(rax, Address(rsp, (1 + number_of_arguments) * wordSize));
5662 #endif // LP64
5663
5664 call_VM_base(oop_result, noreg, rax, entry_point, number_of_arguments, check_exceptions);
5665
5666 }
5667
5668 void MacroAssembler::call_VM_leaf(address entry_point, int number_of_arguments) {
5669 call_VM_leaf_base(entry_point, number_of_arguments);
5670 }
5671
5672 void MacroAssembler::call_VM_leaf(address entry_point, Register arg_0) {
5673 pass_arg0(this, arg_0);
5674 call_VM_leaf(entry_point, 1);
5675 }
5676
5677 void MacroAssembler::call_VM_leaf(address entry_point, Register arg_0, Register arg_1) {
5678
5679 LP64_ONLY(assert(arg_0 != c_rarg1, "smashed arg"));
5680 pass_arg1(this, arg_1);
5681 pass_arg0(this, arg_0);
5682 call_VM_leaf(entry_point, 2);
5683 }
5684
5685 void MacroAssembler::call_VM_leaf(address entry_point, Register arg_0, Register arg_1, Register arg_2) {
5686 LP64_ONLY(assert(arg_0 != c_rarg2, "smashed arg"));
5687 LP64_ONLY(assert(arg_1 != c_rarg2, "smashed arg"));
5688 pass_arg2(this, arg_2);
5689 LP64_ONLY(assert(arg_0 != c_rarg1, "smashed arg"));
5690 pass_arg1(this, arg_1);
5691 pass_arg0(this, arg_0);
5692 call_VM_leaf(entry_point, 3);
5693 }
5694
5695 void MacroAssembler::check_and_handle_earlyret(Register java_thread) {
5696 }
5697
5698 void MacroAssembler::check_and_handle_popframe(Register java_thread) {
5699 }
5700
5701 void MacroAssembler::cmp32(AddressLiteral src1, int32_t imm) {
5702 if (reachable(src1)) {
5703 cmpl(as_Address(src1), imm);
5704 } else {
5705 lea(rscratch1, src1);
5706 cmpl(Address(rscratch1, 0), imm);
5707 }
5708 }
5709
5710 void MacroAssembler::cmp32(Register src1, AddressLiteral src2) {
5711 assert(!src2.is_lval(), "use cmpptr");
5712 if (reachable(src2)) {
5713 cmpl(src1, as_Address(src2));
5714 } else {
5715 lea(rscratch1, src2);
5716 cmpl(src1, Address(rscratch1, 0));
5717 }
5718 }
5719
5720 void MacroAssembler::cmp32(Register src1, int32_t imm) {
5721 Assembler::cmpl(src1, imm);
5722 }
5723
5724 void MacroAssembler::cmp32(Register src1, Address src2) {
5725 Assembler::cmpl(src1, src2);
5726 }
5727
5728 void MacroAssembler::cmpsd2int(XMMRegister opr1, XMMRegister opr2, Register dst, bool unordered_is_less) {
5729 ucomisd(opr1, opr2);
5730
5731 Label L;
5732 if (unordered_is_less) {
5733 movl(dst, -1);
5734 jcc(Assembler::parity, L);
5735 jcc(Assembler::below , L);
5736 movl(dst, 0);
5737 jcc(Assembler::equal , L);
5738 increment(dst);
5739 } else { // unordered is greater
5740 movl(dst, 1);
5741 jcc(Assembler::parity, L);
5742 jcc(Assembler::above , L);
5743 movl(dst, 0);
5744 jcc(Assembler::equal , L);
5745 decrementl(dst);
5746 }
5747 bind(L);
5748 }
5749
5750 void MacroAssembler::cmpss2int(XMMRegister opr1, XMMRegister opr2, Register dst, bool unordered_is_less) {
5751 ucomiss(opr1, opr2);
5752
5753 Label L;
5754 if (unordered_is_less) {
5755 movl(dst, -1);
5756 jcc(Assembler::parity, L);
5757 jcc(Assembler::below , L);
5758 movl(dst, 0);
5759 jcc(Assembler::equal , L);
5760 increment(dst);
5761 } else { // unordered is greater
5762 movl(dst, 1);
5763 jcc(Assembler::parity, L);
5764 jcc(Assembler::above , L);
5765 movl(dst, 0);
5766 jcc(Assembler::equal , L);
5767 decrementl(dst);
5768 }
5769 bind(L);
5770 }
5771
5772
5773 void MacroAssembler::cmp8(AddressLiteral src1, int imm) {
5774 if (reachable(src1)) {
5775 cmpb(as_Address(src1), imm);
5776 } else {
5777 lea(rscratch1, src1);
5778 cmpb(Address(rscratch1, 0), imm);
5779 }
5780 }
5781
5782 void MacroAssembler::cmpptr(Register src1, AddressLiteral src2) {
5783 #ifdef _LP64
5784 if (src2.is_lval()) {
5785 movptr(rscratch1, src2);
5786 Assembler::cmpq(src1, rscratch1);
5787 } else if (reachable(src2)) {
5788 cmpq(src1, as_Address(src2));
5789 } else {
5790 lea(rscratch1, src2);
5791 Assembler::cmpq(src1, Address(rscratch1, 0));
5792 }
5793 #else
5794 if (src2.is_lval()) {
5795 cmp_literal32(src1, (int32_t) src2.target(), src2.rspec());
5796 } else {
5797 cmpl(src1, as_Address(src2));
5798 }
5799 #endif // _LP64
5800 }
5801
5802 void MacroAssembler::cmpptr(Address src1, AddressLiteral src2) {
5803 assert(src2.is_lval(), "not a mem-mem compare");
5804 #ifdef _LP64
5805 // moves src2's literal address
5806 movptr(rscratch1, src2);
5807 Assembler::cmpq(src1, rscratch1);
5808 #else
5809 cmp_literal32(src1, (int32_t) src2.target(), src2.rspec());
5810 #endif // _LP64
5811 }
5812
5813 void MacroAssembler::locked_cmpxchgptr(Register reg, AddressLiteral adr) {
5814 if (reachable(adr)) {
5815 if (os::is_MP())
5816 lock();
5817 cmpxchgptr(reg, as_Address(adr));
5818 } else {
5819 lea(rscratch1, adr);
5820 if (os::is_MP())
5821 lock();
5822 cmpxchgptr(reg, Address(rscratch1, 0));
5823 }
5824 }
5825
5826 void MacroAssembler::cmpxchgptr(Register reg, Address adr) {
5827 LP64_ONLY(cmpxchgq(reg, adr)) NOT_LP64(cmpxchgl(reg, adr));
5828 }
5829
5830 void MacroAssembler::comisd(XMMRegister dst, AddressLiteral src) {
5831 comisd(dst, as_Address(src));
5832 }
5833
5834 void MacroAssembler::comiss(XMMRegister dst, AddressLiteral src) {
5835 comiss(dst, as_Address(src));
5836 }
5837
5838
5839 void MacroAssembler::cond_inc32(Condition cond, AddressLiteral counter_addr) {
5840 Condition negated_cond = negate_condition(cond);
5841 Label L;
5842 jcc(negated_cond, L);
5843 atomic_incl(counter_addr);
5844 bind(L);
5845 }
5846
5847 int MacroAssembler::corrected_idivl(Register reg) {
5848 // Full implementation of Java idiv and irem; checks for
5849 // special case as described in JVM spec., p.243 & p.271.
5850 // The function returns the (pc) offset of the idivl
5851 // instruction - may be needed for implicit exceptions.
5852 //
5853 // normal case special case
5854 //
5855 // input : rax,: dividend min_int
5856 // reg: divisor (may not be rax,/rdx) -1
5857 //
5858 // output: rax,: quotient (= rax, idiv reg) min_int
5859 // rdx: remainder (= rax, irem reg) 0
5860 assert(reg != rax && reg != rdx, "reg cannot be rax, or rdx register");
5861 const int min_int = 0x80000000;
5862 Label normal_case, special_case;
5863
5864 // check for special case
5865 cmpl(rax, min_int);
5866 jcc(Assembler::notEqual, normal_case);
5867 xorl(rdx, rdx); // prepare rdx for possible special case (where remainder = 0)
5868 cmpl(reg, -1);
5869 jcc(Assembler::equal, special_case);
5870
5871 // handle normal case
5872 bind(normal_case);
5873 cdql();
5874 int idivl_offset = offset();
5875 idivl(reg);
5876
5877 // normal and special case exit
5878 bind(special_case);
5879
5880 return idivl_offset;
5881 }
5882
5883
5884
5885 void MacroAssembler::decrementl(Register reg, int value) {
5886 if (value == min_jint) {subl(reg, value) ; return; }
5887 if (value < 0) { incrementl(reg, -value); return; }
5888 if (value == 0) { ; return; }
5889 if (value == 1 && UseIncDec) { decl(reg) ; return; }
5890 /* else */ { subl(reg, value) ; return; }
5891 }
5892
5893 void MacroAssembler::decrementl(Address dst, int value) {
5894 if (value == min_jint) {subl(dst, value) ; return; }
5895 if (value < 0) { incrementl(dst, -value); return; }
5896 if (value == 0) { ; return; }
5897 if (value == 1 && UseIncDec) { decl(dst) ; return; }
5898 /* else */ { subl(dst, value) ; return; }
5899 }
5900
5901 void MacroAssembler::division_with_shift (Register reg, int shift_value) {
5902 assert (shift_value > 0, "illegal shift value");
5903 Label _is_positive;
5904 testl (reg, reg);
5905 jcc (Assembler::positive, _is_positive);
5906 int offset = (1 << shift_value) - 1 ;
5907
5908 if (offset == 1) {
5909 incrementl(reg);
5910 } else {
5911 addl(reg, offset);
5912 }
5913
5914 bind (_is_positive);
5915 sarl(reg, shift_value);
5916 }
5917
5918 // !defined(COMPILER2) is because of stupid core builds
5919 #if !defined(_LP64) || defined(COMPILER1) || !defined(COMPILER2)
5920 void MacroAssembler::empty_FPU_stack() {
5921 if (VM_Version::supports_mmx()) {
5922 emms();
5923 } else {
5924 for (int i = 8; i-- > 0; ) ffree(i);
5925 }
5926 }
5927 #endif // !LP64 || C1 || !C2
5928
5929
5930 // Defines obj, preserves var_size_in_bytes
5931 void MacroAssembler::eden_allocate(Register obj,
5932 Register var_size_in_bytes,
5933 int con_size_in_bytes,
5934 Register t1,
5935 Label& slow_case) {
5936 assert(obj == rax, "obj must be in rax, for cmpxchg");
5937 assert_different_registers(obj, var_size_in_bytes, t1);
5938 Register end = t1;
5939 Label retry;
5940 bind(retry);
5941 ExternalAddress heap_top((address) Universe::heap()->top_addr());
5942 movptr(obj, heap_top);
5943 if (var_size_in_bytes == noreg) {
5944 lea(end, Address(obj, con_size_in_bytes));
5945 } else {
5946 lea(end, Address(obj, var_size_in_bytes, Address::times_1));
5947 }
5948 // if end < obj then we wrapped around => object too long => slow case
5949 cmpptr(end, obj);
5950 jcc(Assembler::below, slow_case);
5951 cmpptr(end, ExternalAddress((address) Universe::heap()->end_addr()));
5952 jcc(Assembler::above, slow_case);
5953 // Compare obj with the top addr, and if still equal, store the new top addr in
5954 // end at the address of the top addr pointer. Sets ZF if was equal, and clears
5955 // it otherwise. Use lock prefix for atomicity on MPs.
5956 locked_cmpxchgptr(end, heap_top);
5957 jcc(Assembler::notEqual, retry);
5958 }
5959
5960 void MacroAssembler::enter() {
5961 push(rbp);
5962 mov(rbp, rsp);
5963 }
5964
5965 void MacroAssembler::fcmp(Register tmp) {
5966 fcmp(tmp, 1, true, true);
5967 }
5968
5969 void MacroAssembler::fcmp(Register tmp, int index, bool pop_left, bool pop_right) {
5970 assert(!pop_right || pop_left, "usage error");
5971 if (VM_Version::supports_cmov()) {
5972 assert(tmp == noreg, "unneeded temp");
5973 if (pop_left) {
5974 fucomip(index);
5975 } else {
5976 fucomi(index);
5977 }
5978 if (pop_right) {
5979 fpop();
5980 }
5981 } else {
5982 assert(tmp != noreg, "need temp");
5983 if (pop_left) {
5984 if (pop_right) {
5985 fcompp();
5986 } else {
5987 fcomp(index);
5988 }
5989 } else {
5990 fcom(index);
5991 }
5992 // convert FPU condition into eflags condition via rax,
5993 save_rax(tmp);
5994 fwait(); fnstsw_ax();
5995 sahf();
5996 restore_rax(tmp);
5997 }
5998 // condition codes set as follows:
5999 //
6000 // CF (corresponds to C0) if x < y
6001 // PF (corresponds to C2) if unordered
6002 // ZF (corresponds to C3) if x = y
6003 }
6004
6005 void MacroAssembler::fcmp2int(Register dst, bool unordered_is_less) {
6006 fcmp2int(dst, unordered_is_less, 1, true, true);
6007 }
6008
6009 void MacroAssembler::fcmp2int(Register dst, bool unordered_is_less, int index, bool pop_left, bool pop_right) {
6010 fcmp(VM_Version::supports_cmov() ? noreg : dst, index, pop_left, pop_right);
6011 Label L;
6012 if (unordered_is_less) {
6013 movl(dst, -1);
6014 jcc(Assembler::parity, L);
6015 jcc(Assembler::below , L);
6016 movl(dst, 0);
6017 jcc(Assembler::equal , L);
6018 increment(dst);
6019 } else { // unordered is greater
6020 movl(dst, 1);
6021 jcc(Assembler::parity, L);
6022 jcc(Assembler::above , L);
6023 movl(dst, 0);
6024 jcc(Assembler::equal , L);
6025 decrementl(dst);
6026 }
6027 bind(L);
6028 }
6029
6030 void MacroAssembler::fld_d(AddressLiteral src) {
6031 fld_d(as_Address(src));
6032 }
6033
6034 void MacroAssembler::fld_s(AddressLiteral src) {
6035 fld_s(as_Address(src));
6036 }
6037
6038 void MacroAssembler::fld_x(AddressLiteral src) {
6039 Assembler::fld_x(as_Address(src));
6040 }
6041
6042 void MacroAssembler::fldcw(AddressLiteral src) {
6043 Assembler::fldcw(as_Address(src));
6044 }
6045
6046 void MacroAssembler::fpop() {
6047 ffree();
6048 fincstp();
6049 }
6050
6051 void MacroAssembler::fremr(Register tmp) {
6052 save_rax(tmp);
6053 { Label L;
6054 bind(L);
6055 fprem();
6056 fwait(); fnstsw_ax();
6057 #ifdef _LP64
6058 testl(rax, 0x400);
6059 jcc(Assembler::notEqual, L);
6060 #else
6061 sahf();
6062 jcc(Assembler::parity, L);
6063 #endif // _LP64
6064 }
6065 restore_rax(tmp);
6066 // Result is in ST0.
6067 // Note: fxch & fpop to get rid of ST1
6068 // (otherwise FPU stack could overflow eventually)
6069 fxch(1);
6070 fpop();
6071 }
6072
6073
6074 void MacroAssembler::incrementl(AddressLiteral dst) {
6075 if (reachable(dst)) {
6076 incrementl(as_Address(dst));
6077 } else {
6078 lea(rscratch1, dst);
6079 incrementl(Address(rscratch1, 0));
6080 }
6081 }
6082
6083 void MacroAssembler::incrementl(ArrayAddress dst) {
6084 incrementl(as_Address(dst));
6085 }
6086
6087 void MacroAssembler::incrementl(Register reg, int value) {
6088 if (value == min_jint) {addl(reg, value) ; return; }
6089 if (value < 0) { decrementl(reg, -value); return; }
6090 if (value == 0) { ; return; }
6091 if (value == 1 && UseIncDec) { incl(reg) ; return; }
6092 /* else */ { addl(reg, value) ; return; }
6093 }
6094
6095 void MacroAssembler::incrementl(Address dst, int value) {
6096 if (value == min_jint) {addl(dst, value) ; return; }
6097 if (value < 0) { decrementl(dst, -value); return; }
6098 if (value == 0) { ; return; }
6099 if (value == 1 && UseIncDec) { incl(dst) ; return; }
6100 /* else */ { addl(dst, value) ; return; }
6101 }
6102
6103 void MacroAssembler::jump(AddressLiteral dst) {
6104 if (reachable(dst)) {
6105 jmp_literal(dst.target(), dst.rspec());
6106 } else {
6107 lea(rscratch1, dst);
6108 jmp(rscratch1);
6109 }
6110 }
6111
6112 void MacroAssembler::jump_cc(Condition cc, AddressLiteral dst) {
6113 if (reachable(dst)) {
6114 InstructionMark im(this);
6115 relocate(dst.reloc());
6116 const int short_size = 2;
6117 const int long_size = 6;
6118 int offs = (intptr_t)dst.target() - ((intptr_t)_code_pos);
6119 if (dst.reloc() == relocInfo::none && is8bit(offs - short_size)) {
6120 // 0111 tttn #8-bit disp
6121 emit_byte(0x70 | cc);
6122 emit_byte((offs - short_size) & 0xFF);
6123 } else {
6124 // 0000 1111 1000 tttn #32-bit disp
6125 emit_byte(0x0F);
6126 emit_byte(0x80 | cc);
6127 emit_long(offs - long_size);
6128 }
6129 } else {
6130 #ifdef ASSERT
6131 warning("reversing conditional branch");
6132 #endif /* ASSERT */
6133 Label skip;
6134 jccb(reverse[cc], skip);
6135 lea(rscratch1, dst);
6136 Assembler::jmp(rscratch1);
6137 bind(skip);
6138 }
6139 }
6140
6141 void MacroAssembler::ldmxcsr(AddressLiteral src) {
6142 if (reachable(src)) {
6143 Assembler::ldmxcsr(as_Address(src));
6144 } else {
6145 lea(rscratch1, src);
6146 Assembler::ldmxcsr(Address(rscratch1, 0));
6147 }
6148 }
6149
6150 int MacroAssembler::load_signed_byte(Register dst, Address src) {
6151 int off;
6152 if (LP64_ONLY(true ||) VM_Version::is_P6()) {
6153 off = offset();
6154 movsbl(dst, src); // movsxb
6155 } else {
6156 off = load_unsigned_byte(dst, src);
6157 shll(dst, 24);
6158 sarl(dst, 24);
6159 }
6160 return off;
6161 }
6162
6163 // word => int32 which seems bad for 64bit
6164 int MacroAssembler::load_signed_word(Register dst, Address src) {
6165 int off;
6166 if (LP64_ONLY(true ||) VM_Version::is_P6()) {
6167 // This is dubious to me since it seems safe to do a signed 16 => 64 bit
6168 // version but this is what 64bit has always done. This seems to imply
6169 // that users are only using 32bits worth.
6170 off = offset();
6171 movswl(dst, src); // movsxw
6172 } else {
6173 off = load_unsigned_word(dst, src);
6174 shll(dst, 16);
6175 sarl(dst, 16);
6176 }
6177 return off;
6178 }
6179
6180 int MacroAssembler::load_unsigned_byte(Register dst, Address src) {
6181 // According to Intel Doc. AP-526, "Zero-Extension of Short", p.16,
6182 // and "3.9 Partial Register Penalties", p. 22).
6183 int off;
6184 if (LP64_ONLY(true || ) VM_Version::is_P6() || src.uses(dst)) {
6185 off = offset();
6186 movzbl(dst, src); // movzxb
6187 } else {
6188 xorl(dst, dst);
6189 off = offset();
6190 movb(dst, src);
6191 }
6192 return off;
6193 }
6194
6195 int MacroAssembler::load_unsigned_word(Register dst, Address src) {
6196 // According to Intel Doc. AP-526, "Zero-Extension of Short", p.16,
6197 // and "3.9 Partial Register Penalties", p. 22).
6198 int off;
6199 if (LP64_ONLY(true ||) VM_Version::is_P6() || src.uses(dst)) {
6200 off = offset();
6201 movzwl(dst, src); // movzxw
6202 } else {
6203 xorl(dst, dst);
6204 off = offset();
6205 movw(dst, src);
6206 }
6207 return off;
6208 }
6209
6210 void MacroAssembler::mov32(AddressLiteral dst, Register src) {
6211 if (reachable(dst)) {
6212 movl(as_Address(dst), src);
6213 } else {
6214 lea(rscratch1, dst);
6215 movl(Address(rscratch1, 0), src);
6216 }
6217 }
6218
6219 void MacroAssembler::mov32(Register dst, AddressLiteral src) {
6220 if (reachable(src)) {
6221 movl(dst, as_Address(src));
6222 } else {
6223 lea(rscratch1, src);
6224 movl(dst, Address(rscratch1, 0));
6225 }
6226 }
6227
6228 // C++ bool manipulation
6229
6230 void MacroAssembler::movbool(Register dst, Address src) {
6231 if(sizeof(bool) == 1)
6232 movb(dst, src);
6233 else if(sizeof(bool) == 2)
6234 movw(dst, src);
6235 else if(sizeof(bool) == 4)
6236 movl(dst, src);
6237 else
6238 // unsupported
6239 ShouldNotReachHere();
6240 }
6241
6242 void MacroAssembler::movbool(Address dst, bool boolconst) {
6243 if(sizeof(bool) == 1)
6244 movb(dst, (int) boolconst);
6245 else if(sizeof(bool) == 2)
6246 movw(dst, (int) boolconst);
6247 else if(sizeof(bool) == 4)
6248 movl(dst, (int) boolconst);
6249 else
6250 // unsupported
6251 ShouldNotReachHere();
6252 }
6253
6254 void MacroAssembler::movbool(Address dst, Register src) {
6255 if(sizeof(bool) == 1)
6256 movb(dst, src);
6257 else if(sizeof(bool) == 2)
6258 movw(dst, src);
6259 else if(sizeof(bool) == 4)
6260 movl(dst, src);
6261 else
6262 // unsupported
6263 ShouldNotReachHere();
6264 }
6265
6266 void MacroAssembler::movbyte(ArrayAddress dst, int src) {
6267 movb(as_Address(dst), src);
6268 }
6269
6270 void MacroAssembler::movdbl(XMMRegister dst, AddressLiteral src) {
6271 if (reachable(src)) {
6272 if (UseXmmLoadAndClearUpper) {
6273 movsd (dst, as_Address(src));
6274 } else {
6275 movlpd(dst, as_Address(src));
6276 }
6277 } else {
6278 lea(rscratch1, src);
6279 if (UseXmmLoadAndClearUpper) {
6280 movsd (dst, Address(rscratch1, 0));
6281 } else {
6282 movlpd(dst, Address(rscratch1, 0));
6283 }
6284 }
6285 }
6286
6287 void MacroAssembler::movflt(XMMRegister dst, AddressLiteral src) {
6288 if (reachable(src)) {
6289 movss(dst, as_Address(src));
6290 } else {
6291 lea(rscratch1, src);
6292 movss(dst, Address(rscratch1, 0));
6293 }
6294 }
6295
6296 void MacroAssembler::movptr(Register dst, Register src) {
6297 LP64_ONLY(movq(dst, src)) NOT_LP64(movl(dst, src));
6298 }
6299
6300 void MacroAssembler::movptr(Register dst, Address src) {
6301 LP64_ONLY(movq(dst, src)) NOT_LP64(movl(dst, src));
6302 }
6303
6304 // src should NEVER be a real pointer. Use AddressLiteral for true pointers
6305 void MacroAssembler::movptr(Register dst, intptr_t src) {
6306 LP64_ONLY(mov64(dst, src)) NOT_LP64(movl(dst, src));
6307 }
6308
6309 void MacroAssembler::movptr(Address dst, Register src) {
6310 LP64_ONLY(movq(dst, src)) NOT_LP64(movl(dst, src));
6311 }
6312
6313 void MacroAssembler::movss(XMMRegister dst, AddressLiteral src) {
6314 if (reachable(src)) {
6315 movss(dst, as_Address(src));
6316 } else {
6317 lea(rscratch1, src);
6318 movss(dst, Address(rscratch1, 0));
6319 }
6320 }
6321
6322 void MacroAssembler::null_check(Register reg, int offset) {
6323 if (needs_explicit_null_check(offset)) {
6324 // provoke OS NULL exception if reg = NULL by
6325 // accessing M[reg] w/o changing any (non-CC) registers
6326 // NOTE: cmpl is plenty here to provoke a segv
6327 cmpptr(rax, Address(reg, 0));
6328 // Note: should probably use testl(rax, Address(reg, 0));
6329 // may be shorter code (however, this version of
6330 // testl needs to be implemented first)
6331 } else {
6332 // nothing to do, (later) access of M[reg + offset]
6333 // will provoke OS NULL exception if reg = NULL
6334 }
6335 }
6336
6337 void MacroAssembler::os_breakpoint() {
6338 // instead of directly emitting a breakpoint, call os:breakpoint for better debugability
6339 // (e.g., MSVC can't call ps() otherwise)
6340 call(RuntimeAddress(CAST_FROM_FN_PTR(address, os::breakpoint)));
6341 }
6342
6343 void MacroAssembler::pop_CPU_state() {
6344 pop_FPU_state();
6345 pop_IU_state();
6346 }
6347
6348 void MacroAssembler::pop_FPU_state() {
6349 NOT_LP64(frstor(Address(rsp, 0));)
6350 LP64_ONLY(fxrstor(Address(rsp, 0));)
6351 addptr(rsp, FPUStateSizeInWords * wordSize);
6352 }
6353
6354 void MacroAssembler::pop_IU_state() {
6355 popa();
6356 LP64_ONLY(addq(rsp, 8));
6357 popf();
6358 }
6359
6360 // Save Integer and Float state
6361 // Warning: Stack must be 16 byte aligned (64bit)
6362 void MacroAssembler::push_CPU_state() {
6363 push_IU_state();
6364 push_FPU_state();
6365 }
6366
6367 void MacroAssembler::push_FPU_state() {
6368 subptr(rsp, FPUStateSizeInWords * wordSize);
6369 #ifndef _LP64
6370 fnsave(Address(rsp, 0));
6371 fwait();
6372 #else
6373 fxsave(Address(rsp, 0));
6374 #endif // LP64
6375 }
6376
6377 void MacroAssembler::push_IU_state() {
6378 // Push flags first because pusha kills them
6379 pushf();
6380 // Make sure rsp stays 16-byte aligned
6381 LP64_ONLY(subq(rsp, 8));
6382 pusha();
6383 }
6384
6385 void MacroAssembler::reset_last_Java_frame(Register java_thread, bool clear_fp, bool clear_pc) {
6386 // determine java_thread register
6387 if (!java_thread->is_valid()) {
6388 java_thread = rdi;
6389 get_thread(java_thread);
6390 }
6391 // we must set sp to zero to clear frame
6392 movptr(Address(java_thread, JavaThread::last_Java_sp_offset()), (int32_t)NULL_WORD);
6393 if (clear_fp) {
6394 movptr(Address(java_thread, JavaThread::last_Java_fp_offset()), (int32_t)NULL_WORD);
6395 }
6396
6397 if (clear_pc)
6398 movptr(Address(java_thread, JavaThread::last_Java_pc_offset()), (int32_t)NULL_WORD);
6399
6400 }
6401
6402 void MacroAssembler::restore_rax(Register tmp) {
6403 if (tmp == noreg) pop(rax);
6404 else if (tmp != rax) mov(rax, tmp);
6405 }
6406
6407 void MacroAssembler::round_to(Register reg, int modulus) {
6408 addptr(reg, modulus - 1);
6409 andptr(reg, -modulus);
6410 }
6411
6412 void MacroAssembler::save_rax(Register tmp) {
6413 if (tmp == noreg) push(rax);
6414 else if (tmp != rax) mov(tmp, rax);
6415 }
6416
6417 // Write serialization page so VM thread can do a pseudo remote membar.
6418 // We use the current thread pointer to calculate a thread specific
6419 // offset to write to within the page. This minimizes bus traffic
6420 // due to cache line collision.
6421 void MacroAssembler::serialize_memory(Register thread, Register tmp) {
6422 movl(tmp, thread);
6423 shrl(tmp, os::get_serialize_page_shift_count());
6424 andl(tmp, (os::vm_page_size() - sizeof(int)));
6425
6426 Address index(noreg, tmp, Address::times_1);
6427 ExternalAddress page(os::get_memory_serialize_page());
6428
6429 movptr(ArrayAddress(page, index), tmp);
6430 }
6431
6432 // Calls to C land
6433 //
6434 // When entering C land, the rbp, & rsp of the last Java frame have to be recorded
6435 // in the (thread-local) JavaThread object. When leaving C land, the last Java fp
6436 // has to be reset to 0. This is required to allow proper stack traversal.
6437 void MacroAssembler::set_last_Java_frame(Register java_thread,
6438 Register last_java_sp,
6439 Register last_java_fp,
6440 address last_java_pc) {
6441 // determine java_thread register
6442 if (!java_thread->is_valid()) {
6443 java_thread = rdi;
6444 get_thread(java_thread);
6445 }
6446 // determine last_java_sp register
6447 if (!last_java_sp->is_valid()) {
6448 last_java_sp = rsp;
6449 }
6450
6451 // last_java_fp is optional
6452
6453 if (last_java_fp->is_valid()) {
6454 movptr(Address(java_thread, JavaThread::last_Java_fp_offset()), last_java_fp);
6455 }
6456
6457 // last_java_pc is optional
6458
6459 if (last_java_pc != NULL) {
6460 lea(Address(java_thread,
6461 JavaThread::frame_anchor_offset() + JavaFrameAnchor::last_Java_pc_offset()),
6462 InternalAddress(last_java_pc));
6463
6464 }
6465 movptr(Address(java_thread, JavaThread::last_Java_sp_offset()), last_java_sp);
6466 }
6467
6468 void MacroAssembler::shlptr(Register dst, int imm8) {
6469 LP64_ONLY(shlq(dst, imm8)) NOT_LP64(shll(dst, imm8));
6470 }
6471
6472 void MacroAssembler::shrptr(Register dst, int imm8) {
6473 LP64_ONLY(shrq(dst, imm8)) NOT_LP64(shrl(dst, imm8));
6474 }
6475
6476 void MacroAssembler::sign_extend_byte(Register reg) {
6477 if (LP64_ONLY(true ||) (VM_Version::is_P6() && reg->has_byte_register())) {
6478 movsbl(reg, reg); // movsxb
6479 } else {
6480 shll(reg, 24);
6481 sarl(reg, 24);
6482 }
6483 }
6484
6485 void MacroAssembler::sign_extend_short(Register reg) {
6486 if (LP64_ONLY(true ||) VM_Version::is_P6()) {
6487 movswl(reg, reg); // movsxw
6488 } else {
6489 shll(reg, 16);
6490 sarl(reg, 16);
6491 }
6492 }
6493
6494 void MacroAssembler::store_check(Register obj) {
6495 // Does a store check for the oop in register obj. The content of
6496 // register obj is destroyed afterwards.
6497 store_check_part_1(obj);
6498 store_check_part_2(obj);
6499 }
6500
6501 void MacroAssembler::store_check(Register obj, Address dst) {
6502 store_check(obj);
6503 }
6504
6505
6506 // split the store check operation so that other instructions can be scheduled inbetween
6507 void MacroAssembler::store_check_part_1(Register obj) {
6508 BarrierSet* bs = Universe::heap()->barrier_set();
6509 assert(bs->kind() == BarrierSet::CardTableModRef, "Wrong barrier set kind");
6510 shrptr(obj, CardTableModRefBS::card_shift);
6511 }
6512
6513 void MacroAssembler::store_check_part_2(Register obj) {
6514 BarrierSet* bs = Universe::heap()->barrier_set();
6515 assert(bs->kind() == BarrierSet::CardTableModRef, "Wrong barrier set kind");
6516 CardTableModRefBS* ct = (CardTableModRefBS*)bs;
6517 assert(sizeof(*ct->byte_map_base) == sizeof(jbyte), "adjust this code");
6518
6519 // The calculation for byte_map_base is as follows:
6520 // byte_map_base = _byte_map - (uintptr_t(low_bound) >> card_shift);
6521 // So this essentially converts an address to a displacement and
6522 // it will never need to be relocated. On 64bit however the value may be too
6523 // large for a 32bit displacement
6524
6525 intptr_t disp = (intptr_t) ct->byte_map_base;
6526 if (is_simm32(disp)) {
6527 Address cardtable(noreg, obj, Address::times_1, disp);
6528 movb(cardtable, 0);
6529 } else {
6530 // By doing it as an ExternalAddress disp could be converted to a rip-relative
6531 // displacement and done in a single instruction given favorable mapping and
6532 // a smarter version of as_Address. Worst case it is two instructions which
6533 // is no worse off then loading disp into a register and doing as a simple
6534 // Address() as above.
6535 // We can't do as ExternalAddress as the only style since if disp == 0 we'll
6536 // assert since NULL isn't acceptable in a reloci (see 6644928). In any case
6537 // in some cases we'll get a single instruction version.
6538
6539 ExternalAddress cardtable((address)disp);
6540 Address index(noreg, obj, Address::times_1);
6541 movb(as_Address(ArrayAddress(cardtable, index)), 0);
6542 }
6543 }
6544
6545 void MacroAssembler::subptr(Register dst, int32_t imm32) {
6546 LP64_ONLY(subq(dst, imm32)) NOT_LP64(subl(dst, imm32));
6547 }
6548
6549 void MacroAssembler::subptr(Register dst, Register src) {
6550 LP64_ONLY(subq(dst, src)) NOT_LP64(subl(dst, src));
6551 }
6552
6553 void MacroAssembler::test32(Register src1, AddressLiteral src2) {
6554 // src2 must be rval
6555
6556 if (reachable(src2)) {
6557 testl(src1, as_Address(src2));
6558 } else {
6559 lea(rscratch1, src2);
6560 testl(src1, Address(rscratch1, 0));
6561 }
6562 }
6563
6564 // C++ bool manipulation
6565 void MacroAssembler::testbool(Register dst) {
6566 if(sizeof(bool) == 1)
6567 testb(dst, 0xff);
6568 else if(sizeof(bool) == 2) {
6569 // testw implementation needed for two byte bools
6570 ShouldNotReachHere();
6571 } else if(sizeof(bool) == 4)
6572 testl(dst, dst);
6573 else
6574 // unsupported
6575 ShouldNotReachHere();
6576 }
6577
6578 void MacroAssembler::testptr(Register dst, Register src) {
6579 LP64_ONLY(testq(dst, src)) NOT_LP64(testl(dst, src));
6580 }
6581
6582 // Defines obj, preserves var_size_in_bytes, okay for t2 == var_size_in_bytes.
6583 void MacroAssembler::tlab_allocate(Register obj,
6584 Register var_size_in_bytes,
6585 int con_size_in_bytes,
6586 Register t1,
6587 Register t2,
6588 Label& slow_case) {
6589 assert_different_registers(obj, t1, t2);
6590 assert_different_registers(obj, var_size_in_bytes, t1);
6591 Register end = t2;
6592 Register thread = NOT_LP64(t1) LP64_ONLY(r15_thread);
6593
6594 verify_tlab();
6595
6596 NOT_LP64(get_thread(thread));
6597
6598 movptr(obj, Address(thread, JavaThread::tlab_top_offset()));
6599 if (var_size_in_bytes == noreg) {
6600 lea(end, Address(obj, con_size_in_bytes));
6601 } else {
6602 lea(end, Address(obj, var_size_in_bytes, Address::times_1));
6603 }
6604 cmpptr(end, Address(thread, JavaThread::tlab_end_offset()));
6605 jcc(Assembler::above, slow_case);
6606
6607 // update the tlab top pointer
6608 movptr(Address(thread, JavaThread::tlab_top_offset()), end);
6609
6610 // recover var_size_in_bytes if necessary
6611 if (var_size_in_bytes == end) {
6612 subptr(var_size_in_bytes, obj);
6613 }
6614 verify_tlab();
6615 }
6616
6617 // Preserves rbx, and rdx.
6618 void MacroAssembler::tlab_refill(Label& retry,
6619 Label& try_eden,
6620 Label& slow_case) {
6621 Register top = rax;
6622 Register t1 = rcx;
6623 Register t2 = rsi;
6624 Register thread_reg = NOT_LP64(rdi) LP64_ONLY(r15_thread);
6625 assert_different_registers(top, thread_reg, t1, t2, /* preserve: */ rbx, rdx);
6626 Label do_refill, discard_tlab;
6627
6628 if (CMSIncrementalMode || !Universe::heap()->supports_inline_contig_alloc()) {
6629 // No allocation in the shared eden.
6630 jmp(slow_case);
6631 }
6632
6633 NOT_LP64(get_thread(thread_reg));
6634
6635 movptr(top, Address(thread_reg, in_bytes(JavaThread::tlab_top_offset())));
6636 movptr(t1, Address(thread_reg, in_bytes(JavaThread::tlab_end_offset())));
6637
6638 // calculate amount of free space
6639 subptr(t1, top);
6640 shrptr(t1, LogHeapWordSize);
6641
6642 // Retain tlab and allocate object in shared space if
6643 // the amount free in the tlab is too large to discard.
6644 cmpptr(t1, Address(thread_reg, in_bytes(JavaThread::tlab_refill_waste_limit_offset())));
6645 jcc(Assembler::lessEqual, discard_tlab);
6646
6647 // Retain
6648 // %%% yuck as movptr...
6649 movptr(t2, (int32_t) ThreadLocalAllocBuffer::refill_waste_limit_increment());
6650 addptr(Address(thread_reg, in_bytes(JavaThread::tlab_refill_waste_limit_offset())), t2);
6651 if (TLABStats) {
6652 // increment number of slow_allocations
6653 addl(Address(thread_reg, in_bytes(JavaThread::tlab_slow_allocations_offset())), 1);
6654 }
6655 jmp(try_eden);
6656
6657 bind(discard_tlab);
6658 if (TLABStats) {
6659 // increment number of refills
6660 addl(Address(thread_reg, in_bytes(JavaThread::tlab_number_of_refills_offset())), 1);
6661 // accumulate wastage -- t1 is amount free in tlab
6662 addl(Address(thread_reg, in_bytes(JavaThread::tlab_fast_refill_waste_offset())), t1);
6663 }
6664
6665 // if tlab is currently allocated (top or end != null) then
6666 // fill [top, end + alignment_reserve) with array object
6667 testptr (top, top);
6668 jcc(Assembler::zero, do_refill);
6669
6670 // set up the mark word
6671 movptr(Address(top, oopDesc::mark_offset_in_bytes()), (intptr_t)markOopDesc::prototype()->copy_set_hash(0x2));
6672 // set the length to the remaining space
6673 subptr(t1, typeArrayOopDesc::header_size(T_INT));
6674 addptr(t1, (int32_t)ThreadLocalAllocBuffer::alignment_reserve());
6675 shlptr(t1, log2_intptr(HeapWordSize/sizeof(jint)));
6676 movptr(Address(top, arrayOopDesc::length_offset_in_bytes()), t1);
6677 // set klass to intArrayKlass
6678 // dubious reloc why not an oop reloc?
6679 movptr(t1, ExternalAddress((address) Universe::intArrayKlassObj_addr()));
6680 // store klass last. concurrent gcs assumes klass length is valid if
6681 // klass field is not null.
6682 store_klass(top, t1);
6683
6684 // refill the tlab with an eden allocation
6685 bind(do_refill);
6686 movptr(t1, Address(thread_reg, in_bytes(JavaThread::tlab_size_offset())));
6687 shlptr(t1, LogHeapWordSize);
6688 // add object_size ??
6689 eden_allocate(top, t1, 0, t2, slow_case);
6690
6691 // Check that t1 was preserved in eden_allocate.
6692 #ifdef ASSERT
6693 if (UseTLAB) {
6694 Label ok;
6695 Register tsize = rsi;
6696 assert_different_registers(tsize, thread_reg, t1);
6697 push(tsize);
6698 movptr(tsize, Address(thread_reg, in_bytes(JavaThread::tlab_size_offset())));
6699 shlptr(tsize, LogHeapWordSize);
6700 cmpptr(t1, tsize);
6701 jcc(Assembler::equal, ok);
6702 stop("assert(t1 != tlab size)");
6703 should_not_reach_here();
6704
6705 bind(ok);
6706 pop(tsize);
6707 }
6708 #endif
6709 movptr(Address(thread_reg, in_bytes(JavaThread::tlab_start_offset())), top);
6710 movptr(Address(thread_reg, in_bytes(JavaThread::tlab_top_offset())), top);
6711 addptr(top, t1);
6712 subptr(top, (int32_t)ThreadLocalAllocBuffer::alignment_reserve_in_bytes());
6713 movptr(Address(thread_reg, in_bytes(JavaThread::tlab_end_offset())), top);
6714 verify_tlab();
6715 jmp(retry);
6716 }
6717
6718 static const double pi_4 = 0.7853981633974483;
6719
6720 void MacroAssembler::trigfunc(char trig, int num_fpu_regs_in_use) {
6721 // A hand-coded argument reduction for values in fabs(pi/4, pi/2)
6722 // was attempted in this code; unfortunately it appears that the
6723 // switch to 80-bit precision and back causes this to be
6724 // unprofitable compared with simply performing a runtime call if
6725 // the argument is out of the (-pi/4, pi/4) range.
6726
6727 Register tmp = noreg;
6728 if (!VM_Version::supports_cmov()) {
6729 // fcmp needs a temporary so preserve rbx,
6730 tmp = rbx;
6731 push(tmp);
6732 }
6733
6734 Label slow_case, done;
6735
6736 // x ?<= pi/4
6737 fld_d(ExternalAddress((address)&pi_4));
6738 fld_s(1); // Stack: X PI/4 X
6739 fabs(); // Stack: |X| PI/4 X
6740 fcmp(tmp);
6741 jcc(Assembler::above, slow_case);
6742
6743 // fastest case: -pi/4 <= x <= pi/4
6744 switch(trig) {
6745 case 's':
6746 fsin();
6747 break;
6748 case 'c':
6749 fcos();
6750 break;
6751 case 't':
6752 ftan();
6753 break;
6754 default:
6755 assert(false, "bad intrinsic");
6756 break;
6757 }
6758 jmp(done);
6759
6760 // slow case: runtime call
6761 bind(slow_case);
6762 // Preserve registers across runtime call
6763 pusha();
6764 int incoming_argument_and_return_value_offset = -1;
6765 if (num_fpu_regs_in_use > 1) {
6766 // Must preserve all other FPU regs (could alternatively convert
6767 // SharedRuntime::dsin and dcos into assembly routines known not to trash
6768 // FPU state, but can not trust C compiler)
6769 NEEDS_CLEANUP;
6770 // NOTE that in this case we also push the incoming argument to
6771 // the stack and restore it later; we also use this stack slot to
6772 // hold the return value from dsin or dcos.
6773 for (int i = 0; i < num_fpu_regs_in_use; i++) {
6774 subptr(rsp, sizeof(jdouble));
6775 fstp_d(Address(rsp, 0));
6776 }
6777 incoming_argument_and_return_value_offset = sizeof(jdouble)*(num_fpu_regs_in_use-1);
6778 fld_d(Address(rsp, incoming_argument_and_return_value_offset));
6779 }
6780 subptr(rsp, sizeof(jdouble));
6781 fstp_d(Address(rsp, 0));
6782 #ifdef _LP64
6783 movdbl(xmm0, Address(rsp, 0));
6784 #endif // _LP64
6785
6786 // NOTE: we must not use call_VM_leaf here because that requires a
6787 // complete interpreter frame in debug mode -- same bug as 4387334
6788 // MacroAssembler::call_VM_leaf_base is perfectly safe and will
6789 // do proper 64bit abi
6790
6791 NEEDS_CLEANUP;
6792 // Need to add stack banging before this runtime call if it needs to
6793 // be taken; however, there is no generic stack banging routine at
6794 // the MacroAssembler level
6795 switch(trig) {
6796 case 's':
6797 {
6798 MacroAssembler::call_VM_leaf_base(CAST_FROM_FN_PTR(address, SharedRuntime::dsin), 0);
6799 }
6800 break;
6801 case 'c':
6802 {
6803 MacroAssembler::call_VM_leaf_base(CAST_FROM_FN_PTR(address, SharedRuntime::dcos), 0);
6804 }
6805 break;
6806 case 't':
6807 {
6808 MacroAssembler::call_VM_leaf_base(CAST_FROM_FN_PTR(address, SharedRuntime::dtan), 0);
6809 }
6810 break;
6811 default:
6812 assert(false, "bad intrinsic");
6813 break;
6814 }
6815 #ifdef _LP64
6816 movsd(Address(rsp, 0), xmm0);
6817 fld_d(Address(rsp, 0));
6818 #endif // _LP64
6819 addptr(rsp, sizeof(jdouble));
6820 if (num_fpu_regs_in_use > 1) {
6821 // Must save return value to stack and then restore entire FPU stack
6822 fstp_d(Address(rsp, incoming_argument_and_return_value_offset));
6823 for (int i = 0; i < num_fpu_regs_in_use; i++) {
6824 fld_d(Address(rsp, 0));
6825 addptr(rsp, sizeof(jdouble));
6826 }
6827 }
6828 popa();
6829
6830 // Come here with result in F-TOS
6831 bind(done);
6832
6833 if (tmp != noreg) {
6834 pop(tmp);
6835 }
6836 }
6837
6838
6839 void MacroAssembler::ucomisd(XMMRegister dst, AddressLiteral src) {
6840 ucomisd(dst, as_Address(src));
6841 }
6842
6843 void MacroAssembler::ucomiss(XMMRegister dst, AddressLiteral src) {
6844 ucomiss(dst, as_Address(src));
6845 }
6846
6847 void MacroAssembler::xorpd(XMMRegister dst, AddressLiteral src) {
6848 if (reachable(src)) {
6849 xorpd(dst, as_Address(src));
6850 } else {
6851 lea(rscratch1, src);
6852 xorpd(dst, Address(rscratch1, 0));
6853 }
6854 }
6855
6856 void MacroAssembler::xorps(XMMRegister dst, AddressLiteral src) {
6857 if (reachable(src)) {
6858 xorps(dst, as_Address(src));
6859 } else {
6860 lea(rscratch1, src);
6861 xorps(dst, Address(rscratch1, 0));
6862 }
6863 }
6864
6865 void MacroAssembler::verify_oop(Register reg, const char* s) {
6866 if (!VerifyOops) return;
6867
6868 // Pass register number to verify_oop_subroutine
6869 char* b = new char[strlen(s) + 50];
6870 sprintf(b, "verify_oop: %s: %s", reg->name(), s);
6871 push(rax); // save rax,
6872 push(reg); // pass register argument
6873 ExternalAddress buffer((address) b);
6874 // avoid using pushptr, as it modifies scratch registers
6875 // and our contract is not to modify anything
6876 movptr(rax, buffer.addr());
6877 push(rax);
6878 // call indirectly to solve generation ordering problem
6879 movptr(rax, ExternalAddress(StubRoutines::verify_oop_subroutine_entry_address()));
6880 call(rax);
6881 }
6882
6883
6884 void MacroAssembler::verify_oop_addr(Address addr, const char* s) {
6885 if (!VerifyOops) return;
6886
6887 // Address adjust(addr.base(), addr.index(), addr.scale(), addr.disp() + BytesPerWord);
6888 // Pass register number to verify_oop_subroutine
6889 char* b = new char[strlen(s) + 50];
6890 sprintf(b, "verify_oop_addr: %s", s);
6891
6892 push(rax); // save rax,
6893 // addr may contain rsp so we will have to adjust it based on the push
6894 // we just did
6895 // NOTE: 64bit seemed to have had a bug in that it did movq(addr, rax); which
6896 // stores rax into addr which is backwards of what was intended.
6897 if (addr.uses(rsp)) {
6898 lea(rax, addr);
6899 pushptr(Address(rax, BytesPerWord));
6900 } else {
6901 pushptr(addr);
6902 }
6903
6904 ExternalAddress buffer((address) b);
6905 // pass msg argument
6906 // avoid using pushptr, as it modifies scratch registers
6907 // and our contract is not to modify anything
6908 movptr(rax, buffer.addr());
6909 push(rax);
6910
6911 // call indirectly to solve generation ordering problem
6912 movptr(rax, ExternalAddress(StubRoutines::verify_oop_subroutine_entry_address()));
6913 call(rax);
6914 // Caller pops the arguments and restores rax, from the stack
6915 }
6916
6917 void MacroAssembler::verify_tlab() {
6918 #ifdef ASSERT
6919 if (UseTLAB && VerifyOops) {
6920 Label next, ok;
6921 Register t1 = rsi;
6922 Register thread_reg = NOT_LP64(rbx) LP64_ONLY(r15_thread);
6923
6924 push(t1);
6925 NOT_LP64(push(thread_reg));
6926 NOT_LP64(get_thread(thread_reg));
6927
6928 movptr(t1, Address(thread_reg, in_bytes(JavaThread::tlab_top_offset())));
6929 cmpptr(t1, Address(thread_reg, in_bytes(JavaThread::tlab_start_offset())));
6930 jcc(Assembler::aboveEqual, next);
6931 stop("assert(top >= start)");
6932 should_not_reach_here();
6933
6934 bind(next);
6935 movptr(t1, Address(thread_reg, in_bytes(JavaThread::tlab_end_offset())));
6936 cmpptr(t1, Address(thread_reg, in_bytes(JavaThread::tlab_top_offset())));
6937 jcc(Assembler::aboveEqual, ok);
6938 stop("assert(top <= end)");
6939 should_not_reach_here();
6940
6941 bind(ok);
6942 NOT_LP64(pop(thread_reg));
6943 pop(t1);
6944 }
6945 #endif
6946 }
6947
6948 class ControlWord {
6949 public:
6950 int32_t _value;
6951
6952 int rounding_control() const { return (_value >> 10) & 3 ; }
6953 int precision_control() const { return (_value >> 8) & 3 ; }
6954 bool precision() const { return ((_value >> 5) & 1) != 0; }
6955 bool underflow() const { return ((_value >> 4) & 1) != 0; }
6956 bool overflow() const { return ((_value >> 3) & 1) != 0; }
6957 bool zero_divide() const { return ((_value >> 2) & 1) != 0; }
6958 bool denormalized() const { return ((_value >> 1) & 1) != 0; }
6959 bool invalid() const { return ((_value >> 0) & 1) != 0; }
6960
6961 void print() const {
6962 // rounding control
6963 const char* rc;
6964 switch (rounding_control()) {
6965 case 0: rc = "round near"; break;
6966 case 1: rc = "round down"; break;
6967 case 2: rc = "round up "; break;
6968 case 3: rc = "chop "; break;
6969 };
6970 // precision control
6971 const char* pc;
6972 switch (precision_control()) {
6973 case 0: pc = "24 bits "; break;
6974 case 1: pc = "reserved"; break;
6975 case 2: pc = "53 bits "; break;
6976 case 3: pc = "64 bits "; break;
6977 };
6978 // flags
6979 char f[9];
6980 f[0] = ' ';
6981 f[1] = ' ';
6982 f[2] = (precision ()) ? 'P' : 'p';
6983 f[3] = (underflow ()) ? 'U' : 'u';
6984 f[4] = (overflow ()) ? 'O' : 'o';
6985 f[5] = (zero_divide ()) ? 'Z' : 'z';
6986 f[6] = (denormalized()) ? 'D' : 'd';
6987 f[7] = (invalid ()) ? 'I' : 'i';
6988 f[8] = '\x0';
6989 // output
6990 printf("%04x masks = %s, %s, %s", _value & 0xFFFF, f, rc, pc);
6991 }
6992
6993 };
6994
6995 class StatusWord {
6996 public:
6997 int32_t _value;
6998
6999 bool busy() const { return ((_value >> 15) & 1) != 0; }
7000 bool C3() const { return ((_value >> 14) & 1) != 0; }
7001 bool C2() const { return ((_value >> 10) & 1) != 0; }
7002 bool C1() const { return ((_value >> 9) & 1) != 0; }
7003 bool C0() const { return ((_value >> 8) & 1) != 0; }
7004 int top() const { return (_value >> 11) & 7 ; }
7005 bool error_status() const { return ((_value >> 7) & 1) != 0; }
7006 bool stack_fault() const { return ((_value >> 6) & 1) != 0; }
7007 bool precision() const { return ((_value >> 5) & 1) != 0; }
7008 bool underflow() const { return ((_value >> 4) & 1) != 0; }
7009 bool overflow() const { return ((_value >> 3) & 1) != 0; }
7010 bool zero_divide() const { return ((_value >> 2) & 1) != 0; }
7011 bool denormalized() const { return ((_value >> 1) & 1) != 0; }
7012 bool invalid() const { return ((_value >> 0) & 1) != 0; }
7013
7014 void print() const {
7015 // condition codes
7016 char c[5];
7017 c[0] = (C3()) ? '3' : '-';
7018 c[1] = (C2()) ? '2' : '-';
7019 c[2] = (C1()) ? '1' : '-';
7020 c[3] = (C0()) ? '0' : '-';
7021 c[4] = '\x0';
7022 // flags
7023 char f[9];
7024 f[0] = (error_status()) ? 'E' : '-';
7025 f[1] = (stack_fault ()) ? 'S' : '-';
7026 f[2] = (precision ()) ? 'P' : '-';
7027 f[3] = (underflow ()) ? 'U' : '-';
7028 f[4] = (overflow ()) ? 'O' : '-';
7029 f[5] = (zero_divide ()) ? 'Z' : '-';
7030 f[6] = (denormalized()) ? 'D' : '-';
7031 f[7] = (invalid ()) ? 'I' : '-';
7032 f[8] = '\x0';
7033 // output
7034 printf("%04x flags = %s, cc = %s, top = %d", _value & 0xFFFF, f, c, top());
7035 }
7036
7037 };
7038
7039 class TagWord {
7040 public:
7041 int32_t _value;
7042
7043 int tag_at(int i) const { return (_value >> (i*2)) & 3; }
7044
7045 void print() const {
7046 printf("%04x", _value & 0xFFFF);
7047 }
7048
7049 };
7050
7051 class FPU_Register {
7052 public:
7053 int32_t _m0;
7054 int32_t _m1;
7055 int16_t _ex;
7056
7057 bool is_indefinite() const {
7058 return _ex == -1 && _m1 == (int32_t)0xC0000000 && _m0 == 0;
7059 }
7060
7061 void print() const {
7062 char sign = (_ex < 0) ? '-' : '+';
7063 const char* kind = (_ex == 0x7FFF || _ex == (int16_t)-1) ? "NaN" : " ";
7064 printf("%c%04hx.%08x%08x %s", sign, _ex, _m1, _m0, kind);
7065 };
7066
7067 };
7068
7069 class FPU_State {
7070 public:
7071 enum {
7072 register_size = 10,
7073 number_of_registers = 8,
7074 register_mask = 7
7075 };
7076
7077 ControlWord _control_word;
7078 StatusWord _status_word;
7079 TagWord _tag_word;
7080 int32_t _error_offset;
7081 int32_t _error_selector;
7082 int32_t _data_offset;
7083 int32_t _data_selector;
7084 int8_t _register[register_size * number_of_registers];
7085
7086 int tag_for_st(int i) const { return _tag_word.tag_at((_status_word.top() + i) & register_mask); }
7087 FPU_Register* st(int i) const { return (FPU_Register*)&_register[register_size * i]; }
7088
7089 const char* tag_as_string(int tag) const {
7090 switch (tag) {
7091 case 0: return "valid";
7092 case 1: return "zero";
7093 case 2: return "special";
7094 case 3: return "empty";
7095 }
7096 ShouldNotReachHere()
7097 return NULL;
7098 }
7099
7100 void print() const {
7101 // print computation registers
7102 { int t = _status_word.top();
7103 for (int i = 0; i < number_of_registers; i++) {
7104 int j = (i - t) & register_mask;
7105 printf("%c r%d = ST%d = ", (j == 0 ? '*' : ' '), i, j);
7106 st(j)->print();
7107 printf(" %s\n", tag_as_string(_tag_word.tag_at(i)));
7108 }
7109 }
7110 printf("\n");
7111 // print control registers
7112 printf("ctrl = "); _control_word.print(); printf("\n");
7113 printf("stat = "); _status_word .print(); printf("\n");
7114 printf("tags = "); _tag_word .print(); printf("\n");
7115 }
7116
7117 };
7118
7119 class Flag_Register {
7120 public:
7121 int32_t _value;
7122
7123 bool overflow() const { return ((_value >> 11) & 1) != 0; }
7124 bool direction() const { return ((_value >> 10) & 1) != 0; }
7125 bool sign() const { return ((_value >> 7) & 1) != 0; }
7126 bool zero() const { return ((_value >> 6) & 1) != 0; }
7127 bool auxiliary_carry() const { return ((_value >> 4) & 1) != 0; }
7128 bool parity() const { return ((_value >> 2) & 1) != 0; }
7129 bool carry() const { return ((_value >> 0) & 1) != 0; }
7130
7131 void print() const {
7132 // flags
7133 char f[8];
7134 f[0] = (overflow ()) ? 'O' : '-';
7135 f[1] = (direction ()) ? 'D' : '-';
7136 f[2] = (sign ()) ? 'S' : '-';
7137 f[3] = (zero ()) ? 'Z' : '-';
7138 f[4] = (auxiliary_carry()) ? 'A' : '-';
7139 f[5] = (parity ()) ? 'P' : '-';
7140 f[6] = (carry ()) ? 'C' : '-';
7141 f[7] = '\x0';
7142 // output
7143 printf("%08x flags = %s", _value, f);
7144 }
7145
7146 };
7147
7148 class IU_Register {
7149 public:
7150 int32_t _value;
7151
7152 void print() const {
7153 printf("%08x %11d", _value, _value);
7154 }
7155
7156 };
7157
7158 class IU_State {
7159 public:
7160 Flag_Register _eflags;
7161 IU_Register _rdi;
7162 IU_Register _rsi;
7163 IU_Register _rbp;
7164 IU_Register _rsp;
7165 IU_Register _rbx;
7166 IU_Register _rdx;
7167 IU_Register _rcx;
7168 IU_Register _rax;
7169
7170 void print() const {
7171 // computation registers
7172 printf("rax, = "); _rax.print(); printf("\n");
7173 printf("rbx, = "); _rbx.print(); printf("\n");
7174 printf("rcx = "); _rcx.print(); printf("\n");
7175 printf("rdx = "); _rdx.print(); printf("\n");
7176 printf("rdi = "); _rdi.print(); printf("\n");
7177 printf("rsi = "); _rsi.print(); printf("\n");
7178 printf("rbp, = "); _rbp.print(); printf("\n");
7179 printf("rsp = "); _rsp.print(); printf("\n");
7180 printf("\n");
7181 // control registers
7182 printf("flgs = "); _eflags.print(); printf("\n");
7183 }
7184 };
7185
7186
7187 class CPU_State {
7188 public:
7189 FPU_State _fpu_state;
7190 IU_State _iu_state;
7191
7192 void print() const {
7193 printf("--------------------------------------------------\n");
7194 _iu_state .print();
7195 printf("\n");
7196 _fpu_state.print();
7197 printf("--------------------------------------------------\n");
7198 }
7199
7200 };
7201
7202
7203 static void _print_CPU_state(CPU_State* state) {
7204 state->print();
7205 };
7206
7207
7208 void MacroAssembler::print_CPU_state() {
7209 push_CPU_state();
7210 push(rsp); // pass CPU state
7211 call(RuntimeAddress(CAST_FROM_FN_PTR(address, _print_CPU_state)));
7212 addptr(rsp, wordSize); // discard argument
7213 pop_CPU_state();
7214 }
7215
7216
7217 static bool _verify_FPU(int stack_depth, char* s, CPU_State* state) {
7218 static int counter = 0;
7219 FPU_State* fs = &state->_fpu_state;
7220 counter++;
7221 // For leaf calls, only verify that the top few elements remain empty.
7222 // We only need 1 empty at the top for C2 code.
7223 if( stack_depth < 0 ) {
7224 if( fs->tag_for_st(7) != 3 ) {
7225 printf("FPR7 not empty\n");
7226 state->print();
7227 assert(false, "error");
7228 return false;
7229 }
7230 return true; // All other stack states do not matter
7231 }
7232
7233 assert((fs->_control_word._value & 0xffff) == StubRoutines::_fpu_cntrl_wrd_std,
7234 "bad FPU control word");
7235
7236 // compute stack depth
7237 int i = 0;
7238 while (i < FPU_State::number_of_registers && fs->tag_for_st(i) < 3) i++;
7239 int d = i;
7240 while (i < FPU_State::number_of_registers && fs->tag_for_st(i) == 3) i++;
7241 // verify findings
7242 if (i != FPU_State::number_of_registers) {
7243 // stack not contiguous
7244 printf("%s: stack not contiguous at ST%d\n", s, i);
7245 state->print();
7246 assert(false, "error");
7247 return false;
7248 }
7249 // check if computed stack depth corresponds to expected stack depth
7250 if (stack_depth < 0) {
7251 // expected stack depth is -stack_depth or less
7252 if (d > -stack_depth) {
7253 // too many elements on the stack
7254 printf("%s: <= %d stack elements expected but found %d\n", s, -stack_depth, d);
7255 state->print();
7256 assert(false, "error");
7257 return false;
7258 }
7259 } else {
7260 // expected stack depth is stack_depth
7261 if (d != stack_depth) {
7262 // wrong stack depth
7263 printf("%s: %d stack elements expected but found %d\n", s, stack_depth, d);
7264 state->print();
7265 assert(false, "error");
7266 return false;
7267 }
7268 }
7269 // everything is cool
7270 return true;
7271 }
7272
7273
7274 void MacroAssembler::verify_FPU(int stack_depth, const char* s) {
7275 if (!VerifyFPU) return;
7276 push_CPU_state();
7277 push(rsp); // pass CPU state
7278 ExternalAddress msg((address) s);
7279 // pass message string s
7280 pushptr(msg.addr());
7281 push(stack_depth); // pass stack depth
7282 call(RuntimeAddress(CAST_FROM_FN_PTR(address, _verify_FPU)));
7283 addptr(rsp, 3 * wordSize); // discard arguments
7284 // check for error
7285 { Label L;
7286 testl(rax, rax);
7287 jcc(Assembler::notZero, L);
7288 int3(); // break if error condition
7289 bind(L);
7290 }
7291 pop_CPU_state();
7292 }
7293
7294 void MacroAssembler::load_klass(Register dst, Register src) {
7295 #ifdef _LP64
7296 if (UseCompressedOops) {
7297 movl(dst, Address(src, oopDesc::klass_offset_in_bytes()));
7298 decode_heap_oop_not_null(dst);
7299 } else
7300 #endif
7301 movptr(dst, Address(src, oopDesc::klass_offset_in_bytes()));
7302 }
7303
7304 void MacroAssembler::load_prototype_header(Register dst, Register src) {
7305 #ifdef _LP64
7306 if (UseCompressedOops) {
7307 movl(dst, Address(src, oopDesc::klass_offset_in_bytes()));
7308 movq(dst, Address(r12_heapbase, dst, Address::times_8, Klass::prototype_header_offset_in_bytes() + klassOopDesc::klass_part_offset_in_bytes()));
7309 } else
7310 #endif
7311 {
7312 movptr(dst, Address(src, oopDesc::klass_offset_in_bytes()));
7313 movptr(dst, Address(dst, Klass::prototype_header_offset_in_bytes() + klassOopDesc::klass_part_offset_in_bytes()));
7314 }
7315 }
7316
7317 void MacroAssembler::store_klass(Register dst, Register src) {
7318 #ifdef _LP64
7319 if (UseCompressedOops) {
7320 encode_heap_oop_not_null(src);
7321 movl(Address(dst, oopDesc::klass_offset_in_bytes()), src);
7322 } else
7323 #endif
7324 movptr(Address(dst, oopDesc::klass_offset_in_bytes()), src);
7325 }
7326
7327 #ifdef _LP64
7328 void MacroAssembler::store_klass_gap(Register dst, Register src) {
7329 if (UseCompressedOops) {
7330 // Store to klass gap in destination
7331 movl(Address(dst, oopDesc::klass_gap_offset_in_bytes()), src);
7332 }
7333 }
7334
7335 void MacroAssembler::load_heap_oop(Register dst, Address src) {
7336 if (UseCompressedOops) {
7337 movl(dst, src);
7338 decode_heap_oop(dst);
7339 } else {
7340 movq(dst, src);
7341 }
7342 }
7343
7344 void MacroAssembler::store_heap_oop(Address dst, Register src) {
7345 if (UseCompressedOops) {
7346 assert(!dst.uses(src), "not enough registers");
7347 encode_heap_oop(src);
7348 movl(dst, src);
7349 } else {
7350 movq(dst, src);
7351 }
7352 }
7353
7354 // Algorithm must match oop.inline.hpp encode_heap_oop.
7355 void MacroAssembler::encode_heap_oop(Register r) {
7356 assert (UseCompressedOops, "should be compressed");
7357 #ifdef ASSERT
7358 if (CheckCompressedOops) {
7359 Label ok;
7360 push(rscratch1); // cmpptr trashes rscratch1
7361 cmpptr(r12_heapbase, ExternalAddress((address)Universe::heap_base_addr()));
7362 jcc(Assembler::equal, ok);
7363 stop("MacroAssembler::encode_heap_oop: heap base corrupted?");
7364 bind(ok);
7365 pop(rscratch1);
7366 }
7367 #endif
7368 verify_oop(r, "broken oop in encode_heap_oop");
7369 testq(r, r);
7370 cmovq(Assembler::equal, r, r12_heapbase);
7371 subq(r, r12_heapbase);
7372 shrq(r, LogMinObjAlignmentInBytes);
7373 }
7374
7375 void MacroAssembler::encode_heap_oop_not_null(Register r) {
7376 assert (UseCompressedOops, "should be compressed");
7377 #ifdef ASSERT
7378 if (CheckCompressedOops) {
7379 Label ok;
7380 testq(r, r);
7381 jcc(Assembler::notEqual, ok);
7382 stop("null oop passed to encode_heap_oop_not_null");
7383 bind(ok);
7384 }
7385 #endif
7386 verify_oop(r, "broken oop in encode_heap_oop_not_null");
7387 subq(r, r12_heapbase);
7388 shrq(r, LogMinObjAlignmentInBytes);
7389 }
7390
7391 void MacroAssembler::encode_heap_oop_not_null(Register dst, Register src) {
7392 assert (UseCompressedOops, "should be compressed");
7393 #ifdef ASSERT
7394 if (CheckCompressedOops) {
7395 Label ok;
7396 testq(src, src);
7397 jcc(Assembler::notEqual, ok);
7398 stop("null oop passed to encode_heap_oop_not_null2");
7399 bind(ok);
7400 }
7401 #endif
7402 verify_oop(src, "broken oop in encode_heap_oop_not_null2");
7403 if (dst != src) {
7404 movq(dst, src);
7405 }
7406 subq(dst, r12_heapbase);
7407 shrq(dst, LogMinObjAlignmentInBytes);
7408 }
7409
7410 void MacroAssembler::decode_heap_oop(Register r) {
7411 assert (UseCompressedOops, "should be compressed");
7412 #ifdef ASSERT
7413 if (CheckCompressedOops) {
7414 Label ok;
7415 push(rscratch1);
7416 cmpptr(r12_heapbase,
7417 ExternalAddress((address)Universe::heap_base_addr()));
7418 jcc(Assembler::equal, ok);
7419 stop("MacroAssembler::decode_heap_oop: heap base corrupted?");
7420 bind(ok);
7421 pop(rscratch1);
7422 }
7423 #endif
7424
7425 Label done;
7426 shlq(r, LogMinObjAlignmentInBytes);
7427 jccb(Assembler::equal, done);
7428 addq(r, r12_heapbase);
7429 #if 0
7430 // alternate decoding probably a wash.
7431 testq(r, r);
7432 jccb(Assembler::equal, done);
7433 leaq(r, Address(r12_heapbase, r, Address::times_8, 0));
7434 #endif
7435 bind(done);
7436 verify_oop(r, "broken oop in decode_heap_oop");
7437 }
7438
7439 void MacroAssembler::decode_heap_oop_not_null(Register r) {
7440 assert (UseCompressedOops, "should only be used for compressed headers");
7441 // Cannot assert, unverified entry point counts instructions (see .ad file)
7442 // vtableStubs also counts instructions in pd_code_size_limit.
7443 // Also do not verify_oop as this is called by verify_oop.
7444 assert(Address::times_8 == LogMinObjAlignmentInBytes, "decode alg wrong");
7445 leaq(r, Address(r12_heapbase, r, Address::times_8, 0));
7446 }
7447
7448 void MacroAssembler::decode_heap_oop_not_null(Register dst, Register src) {
7449 assert (UseCompressedOops, "should only be used for compressed headers");
7450 // Cannot assert, unverified entry point counts instructions (see .ad file)
7451 // vtableStubs also counts instructions in pd_code_size_limit.
7452 // Also do not verify_oop as this is called by verify_oop.
7453 assert(Address::times_8 == LogMinObjAlignmentInBytes, "decode alg wrong");
7454 leaq(dst, Address(r12_heapbase, src, Address::times_8, 0));
7455 }
7456
7457 void MacroAssembler::set_narrow_oop(Register dst, jobject obj) {
7458 assert(oop_recorder() != NULL, "this assembler needs an OopRecorder");
7459 int oop_index = oop_recorder()->find_index(obj);
7460 RelocationHolder rspec = oop_Relocation::spec(oop_index);
7461 mov_literal32(dst, oop_index, rspec, narrow_oop_operand);
7462 }
7463
7464 void MacroAssembler::reinit_heapbase() {
7465 if (UseCompressedOops) {
7466 movptr(r12_heapbase, ExternalAddress((address)Universe::heap_base_addr()));
7467 }
7468 }
7469 #endif // _LP64
7470
7471 Assembler::Condition MacroAssembler::negate_condition(Assembler::Condition cond) {
7472 switch (cond) {
7473 // Note some conditions are synonyms for others
7474 case Assembler::zero: return Assembler::notZero;
7475 case Assembler::notZero: return Assembler::zero;
7476 case Assembler::less: return Assembler::greaterEqual;
7477 case Assembler::lessEqual: return Assembler::greater;
7478 case Assembler::greater: return Assembler::lessEqual;
7479 case Assembler::greaterEqual: return Assembler::less;
7480 case Assembler::below: return Assembler::aboveEqual;
7481 case Assembler::belowEqual: return Assembler::above;
7482 case Assembler::above: return Assembler::belowEqual;
7483 case Assembler::aboveEqual: return Assembler::below;
7484 case Assembler::overflow: return Assembler::noOverflow;
7485 case Assembler::noOverflow: return Assembler::overflow;
7486 case Assembler::negative: return Assembler::positive;
7487 case Assembler::positive: return Assembler::negative;
7488 case Assembler::parity: return Assembler::noParity;
7489 case Assembler::noParity: return Assembler::parity;
7490 }
7491 ShouldNotReachHere(); return Assembler::overflow;
7492 }
7493
7494 SkipIfEqual::SkipIfEqual(
7495 MacroAssembler* masm, const bool* flag_addr, bool value) {
7496 _masm = masm;
7497 _masm->cmp8(ExternalAddress((address)flag_addr), value);
7498 _masm->jcc(Assembler::equal, _label);
7499 }
7500
7501 SkipIfEqual::~SkipIfEqual() {
7502 _masm->bind(_label);
7503 }