comparison src/cpu/x86/vm/assembler_x86_32.cpp @ 0:a61af66fc99e jdk7-b24

Initial load
author duke
date Sat, 01 Dec 2007 00:00:00 +0000
parents
children 3d62cb85208d
comparison
equal deleted inserted replaced
-1:000000000000 0:a61af66fc99e
1 /*
2 * Copyright 1997-2007 Sun Microsystems, Inc. All Rights Reserved.
3 * DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER.
4 *
5 * This code is free software; you can redistribute it and/or modify it
6 * under the terms of the GNU General Public License version 2 only, as
7 * published by the Free Software Foundation.
8 *
9 * This code is distributed in the hope that it will be useful, but WITHOUT
10 * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
11 * FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License
12 * version 2 for more details (a copy is included in the LICENSE file that
13 * accompanied this code).
14 *
15 * You should have received a copy of the GNU General Public License version
16 * 2 along with this work; if not, write to the Free Software Foundation,
17 * Inc., 51 Franklin St, Fifth Floor, Boston, MA 02110-1301 USA.
18 *
19 * Please contact Sun Microsystems, Inc., 4150 Network Circle, Santa Clara,
20 * CA 95054 USA or visit www.sun.com if you need additional information or
21 * have any questions.
22 *
23 */
24
25 #include "incls/_precompiled.incl"
26 #include "incls/_assembler_x86_32.cpp.incl"
27
28 // Implementation of AddressLiteral
29
30 AddressLiteral::AddressLiteral(address target, relocInfo::relocType rtype) {
31 _is_lval = false;
32 _target = target;
33 switch (rtype) {
34 case relocInfo::oop_type:
35 // Oops are a special case. Normally they would be their own section
36 // but in cases like icBuffer they are literals in the code stream that
37 // we don't have a section for. We use none so that we get a literal address
38 // which is always patchable.
39 break;
40 case relocInfo::external_word_type:
41 _rspec = external_word_Relocation::spec(target);
42 break;
43 case relocInfo::internal_word_type:
44 _rspec = internal_word_Relocation::spec(target);
45 break;
46 case relocInfo::opt_virtual_call_type:
47 _rspec = opt_virtual_call_Relocation::spec();
48 break;
49 case relocInfo::static_call_type:
50 _rspec = static_call_Relocation::spec();
51 break;
52 case relocInfo::runtime_call_type:
53 _rspec = runtime_call_Relocation::spec();
54 break;
55 case relocInfo::poll_type:
56 case relocInfo::poll_return_type:
57 _rspec = Relocation::spec_simple(rtype);
58 break;
59 case relocInfo::none:
60 break;
61 default:
62 ShouldNotReachHere();
63 break;
64 }
65 }
66
67 // Implementation of Address
68
69 Address Address::make_array(ArrayAddress adr) {
70 #ifdef _LP64
71 // Not implementable on 64bit machines
72 // Should have been handled higher up the call chain.
73 ShouldNotReachHere();
74 #else
75 AddressLiteral base = adr.base();
76 Address index = adr.index();
77 assert(index._disp == 0, "must not have disp"); // maybe it can?
78 Address array(index._base, index._index, index._scale, (intptr_t) base.target());
79 array._rspec = base._rspec;
80 return array;
81 #endif // _LP64
82 }
83
84 #ifndef _LP64
85
86 // exceedingly dangerous constructor
87 Address::Address(address loc, RelocationHolder spec) {
88 _base = noreg;
89 _index = noreg;
90 _scale = no_scale;
91 _disp = (intptr_t) loc;
92 _rspec = spec;
93 }
94 #endif // _LP64
95
96 // Convert the raw encoding form into the form expected by the constructor for
97 // Address. An index of 4 (rsp) corresponds to having no index, so convert
98 // that to noreg for the Address constructor.
99 Address Address::make_raw(int base, int index, int scale, int disp) {
100 bool valid_index = index != rsp->encoding();
101 if (valid_index) {
102 Address madr(as_Register(base), as_Register(index), (Address::ScaleFactor)scale, in_ByteSize(disp));
103 return madr;
104 } else {
105 Address madr(as_Register(base), noreg, Address::no_scale, in_ByteSize(disp));
106 return madr;
107 }
108 }
109
110 // Implementation of Assembler
111
112 int AbstractAssembler::code_fill_byte() {
113 return (u_char)'\xF4'; // hlt
114 }
115
116 // make this go away someday
117 void Assembler::emit_data(jint data, relocInfo::relocType rtype, int format) {
118 if (rtype == relocInfo::none)
119 emit_long(data);
120 else emit_data(data, Relocation::spec_simple(rtype), format);
121 }
122
123
124 void Assembler::emit_data(jint data, RelocationHolder const& rspec, int format) {
125 assert(imm32_operand == 0, "default format must be imm32 in this file");
126 assert(inst_mark() != NULL, "must be inside InstructionMark");
127 if (rspec.type() != relocInfo::none) {
128 #ifdef ASSERT
129 check_relocation(rspec, format);
130 #endif
131 // Do not use AbstractAssembler::relocate, which is not intended for
132 // embedded words. Instead, relocate to the enclosing instruction.
133
134 // hack. call32 is too wide for mask so use disp32
135 if (format == call32_operand)
136 code_section()->relocate(inst_mark(), rspec, disp32_operand);
137 else
138 code_section()->relocate(inst_mark(), rspec, format);
139 }
140 emit_long(data);
141 }
142
143
144 void Assembler::emit_arith_b(int op1, int op2, Register dst, int imm8) {
145 assert(dst->has_byte_register(), "must have byte register");
146 assert(isByte(op1) && isByte(op2), "wrong opcode");
147 assert(isByte(imm8), "not a byte");
148 assert((op1 & 0x01) == 0, "should be 8bit operation");
149 emit_byte(op1);
150 emit_byte(op2 | dst->encoding());
151 emit_byte(imm8);
152 }
153
154
155 void Assembler::emit_arith(int op1, int op2, Register dst, int imm32) {
156 assert(isByte(op1) && isByte(op2), "wrong opcode");
157 assert((op1 & 0x01) == 1, "should be 32bit operation");
158 assert((op1 & 0x02) == 0, "sign-extension bit should not be set");
159 if (is8bit(imm32)) {
160 emit_byte(op1 | 0x02); // set sign bit
161 emit_byte(op2 | dst->encoding());
162 emit_byte(imm32 & 0xFF);
163 } else {
164 emit_byte(op1);
165 emit_byte(op2 | dst->encoding());
166 emit_long(imm32);
167 }
168 }
169
170 // immediate-to-memory forms
171 void Assembler::emit_arith_operand(int op1, Register rm, Address adr, int imm32) {
172 assert((op1 & 0x01) == 1, "should be 32bit operation");
173 assert((op1 & 0x02) == 0, "sign-extension bit should not be set");
174 if (is8bit(imm32)) {
175 emit_byte(op1 | 0x02); // set sign bit
176 emit_operand(rm,adr);
177 emit_byte(imm32 & 0xFF);
178 } else {
179 emit_byte(op1);
180 emit_operand(rm,adr);
181 emit_long(imm32);
182 }
183 }
184
185 void Assembler::emit_arith(int op1, int op2, Register dst, jobject obj) {
186 assert(isByte(op1) && isByte(op2), "wrong opcode");
187 assert((op1 & 0x01) == 1, "should be 32bit operation");
188 assert((op1 & 0x02) == 0, "sign-extension bit should not be set");
189 InstructionMark im(this);
190 emit_byte(op1);
191 emit_byte(op2 | dst->encoding());
192 emit_data((int)obj, relocInfo::oop_type, 0);
193 }
194
195
196 void Assembler::emit_arith(int op1, int op2, Register dst, Register src) {
197 assert(isByte(op1) && isByte(op2), "wrong opcode");
198 emit_byte(op1);
199 emit_byte(op2 | dst->encoding() << 3 | src->encoding());
200 }
201
202
203 void Assembler::emit_operand(Register reg,
204 Register base,
205 Register index,
206 Address::ScaleFactor scale,
207 int disp,
208 RelocationHolder const& rspec) {
209
210 relocInfo::relocType rtype = (relocInfo::relocType) rspec.type();
211 if (base->is_valid()) {
212 if (index->is_valid()) {
213 assert(scale != Address::no_scale, "inconsistent address");
214 // [base + index*scale + disp]
215 if (disp == 0 && rtype == relocInfo::none && base != rbp) {
216 // [base + index*scale]
217 // [00 reg 100][ss index base]
218 assert(index != rsp, "illegal addressing mode");
219 emit_byte(0x04 | reg->encoding() << 3);
220 emit_byte(scale << 6 | index->encoding() << 3 | base->encoding());
221 } else if (is8bit(disp) && rtype == relocInfo::none) {
222 // [base + index*scale + imm8]
223 // [01 reg 100][ss index base] imm8
224 assert(index != rsp, "illegal addressing mode");
225 emit_byte(0x44 | reg->encoding() << 3);
226 emit_byte(scale << 6 | index->encoding() << 3 | base->encoding());
227 emit_byte(disp & 0xFF);
228 } else {
229 // [base + index*scale + imm32]
230 // [10 reg 100][ss index base] imm32
231 assert(index != rsp, "illegal addressing mode");
232 emit_byte(0x84 | reg->encoding() << 3);
233 emit_byte(scale << 6 | index->encoding() << 3 | base->encoding());
234 emit_data(disp, rspec, disp32_operand);
235 }
236 } else if (base == rsp) {
237 // [esp + disp]
238 if (disp == 0 && rtype == relocInfo::none) {
239 // [esp]
240 // [00 reg 100][00 100 100]
241 emit_byte(0x04 | reg->encoding() << 3);
242 emit_byte(0x24);
243 } else if (is8bit(disp) && rtype == relocInfo::none) {
244 // [esp + imm8]
245 // [01 reg 100][00 100 100] imm8
246 emit_byte(0x44 | reg->encoding() << 3);
247 emit_byte(0x24);
248 emit_byte(disp & 0xFF);
249 } else {
250 // [esp + imm32]
251 // [10 reg 100][00 100 100] imm32
252 emit_byte(0x84 | reg->encoding() << 3);
253 emit_byte(0x24);
254 emit_data(disp, rspec, disp32_operand);
255 }
256 } else {
257 // [base + disp]
258 assert(base != rsp, "illegal addressing mode");
259 if (disp == 0 && rtype == relocInfo::none && base != rbp) {
260 // [base]
261 // [00 reg base]
262 assert(base != rbp, "illegal addressing mode");
263 emit_byte(0x00 | reg->encoding() << 3 | base->encoding());
264 } else if (is8bit(disp) && rtype == relocInfo::none) {
265 // [base + imm8]
266 // [01 reg base] imm8
267 emit_byte(0x40 | reg->encoding() << 3 | base->encoding());
268 emit_byte(disp & 0xFF);
269 } else {
270 // [base + imm32]
271 // [10 reg base] imm32
272 emit_byte(0x80 | reg->encoding() << 3 | base->encoding());
273 emit_data(disp, rspec, disp32_operand);
274 }
275 }
276 } else {
277 if (index->is_valid()) {
278 assert(scale != Address::no_scale, "inconsistent address");
279 // [index*scale + disp]
280 // [00 reg 100][ss index 101] imm32
281 assert(index != rsp, "illegal addressing mode");
282 emit_byte(0x04 | reg->encoding() << 3);
283 emit_byte(scale << 6 | index->encoding() << 3 | 0x05);
284 emit_data(disp, rspec, disp32_operand);
285 } else {
286 // [disp]
287 // [00 reg 101] imm32
288 emit_byte(0x05 | reg->encoding() << 3);
289 emit_data(disp, rspec, disp32_operand);
290 }
291 }
292 }
293
294 // Secret local extension to Assembler::WhichOperand:
295 #define end_pc_operand (_WhichOperand_limit)
296
297 address Assembler::locate_operand(address inst, WhichOperand which) {
298 // Decode the given instruction, and return the address of
299 // an embedded 32-bit operand word.
300
301 // If "which" is disp32_operand, selects the displacement portion
302 // of an effective address specifier.
303 // If "which" is imm32_operand, selects the trailing immediate constant.
304 // If "which" is call32_operand, selects the displacement of a call or jump.
305 // Caller is responsible for ensuring that there is such an operand,
306 // and that it is 32 bits wide.
307
308 // If "which" is end_pc_operand, find the end of the instruction.
309
310 address ip = inst;
311
312 debug_only(bool has_imm32 = false);
313 int tail_size = 0; // other random bytes (#32, #16, etc.) at end of insn
314
315 again_after_prefix:
316 switch (0xFF & *ip++) {
317
318 // These convenience macros generate groups of "case" labels for the switch.
319 #define REP4(x) (x)+0: case (x)+1: case (x)+2: case (x)+3
320 #define REP8(x) (x)+0: case (x)+1: case (x)+2: case (x)+3: \
321 case (x)+4: case (x)+5: case (x)+6: case (x)+7
322 #define REP16(x) REP8((x)+0): \
323 case REP8((x)+8)
324
325 case CS_segment:
326 case SS_segment:
327 case DS_segment:
328 case ES_segment:
329 case FS_segment:
330 case GS_segment:
331 assert(ip == inst+1, "only one prefix allowed");
332 goto again_after_prefix;
333
334 case 0xFF: // pushl a; decl a; incl a; call a; jmp a
335 case 0x88: // movb a, r
336 case 0x89: // movl a, r
337 case 0x8A: // movb r, a
338 case 0x8B: // movl r, a
339 case 0x8F: // popl a
340 break;
341
342 case 0x68: // pushl #32(oop?)
343 if (which == end_pc_operand) return ip + 4;
344 assert(which == imm32_operand, "pushl has no disp32");
345 return ip; // not produced by emit_operand
346
347 case 0x66: // movw ... (size prefix)
348 switch (0xFF & *ip++) {
349 case 0x8B: // movw r, a
350 case 0x89: // movw a, r
351 break;
352 case 0xC7: // movw a, #16
353 tail_size = 2; // the imm16
354 break;
355 case 0x0F: // several SSE/SSE2 variants
356 ip--; // reparse the 0x0F
357 goto again_after_prefix;
358 default:
359 ShouldNotReachHere();
360 }
361 break;
362
363 case REP8(0xB8): // movl r, #32(oop?)
364 if (which == end_pc_operand) return ip + 4;
365 assert(which == imm32_operand || which == disp32_operand, "");
366 return ip;
367
368 case 0x69: // imul r, a, #32
369 case 0xC7: // movl a, #32(oop?)
370 tail_size = 4;
371 debug_only(has_imm32 = true); // has both kinds of operands!
372 break;
373
374 case 0x0F: // movx..., etc.
375 switch (0xFF & *ip++) {
376 case 0x12: // movlps
377 case 0x28: // movaps
378 case 0x2E: // ucomiss
379 case 0x2F: // comiss
380 case 0x54: // andps
381 case 0x55: // andnps
382 case 0x56: // orps
383 case 0x57: // xorps
384 case 0x6E: // movd
385 case 0x7E: // movd
386 case 0xAE: // ldmxcsr a
387 // amd side says it these have both operands but that doesn't
388 // appear to be true.
389 // debug_only(has_imm32 = true); // has both kinds of operands!
390 break;
391
392 case 0xAD: // shrd r, a, %cl
393 case 0xAF: // imul r, a
394 case 0xBE: // movsxb r, a
395 case 0xBF: // movsxw r, a
396 case 0xB6: // movzxb r, a
397 case 0xB7: // movzxw r, a
398 case REP16(0x40): // cmovl cc, r, a
399 case 0xB0: // cmpxchgb
400 case 0xB1: // cmpxchg
401 case 0xC1: // xaddl
402 case 0xC7: // cmpxchg8
403 case REP16(0x90): // setcc a
404 // fall out of the switch to decode the address
405 break;
406 case 0xAC: // shrd r, a, #8
407 tail_size = 1; // the imm8
408 break;
409 case REP16(0x80): // jcc rdisp32
410 if (which == end_pc_operand) return ip + 4;
411 assert(which == call32_operand, "jcc has no disp32 or imm32");
412 return ip;
413 default:
414 ShouldNotReachHere();
415 }
416 break;
417
418 case 0x81: // addl a, #32; addl r, #32
419 // also: orl, adcl, sbbl, andl, subl, xorl, cmpl
420 // in the case of cmpl, the imm32 might be an oop
421 tail_size = 4;
422 debug_only(has_imm32 = true); // has both kinds of operands!
423 break;
424
425 case 0x85: // test r/m, r
426 break;
427
428 case 0x83: // addl a, #8; addl r, #8
429 // also: orl, adcl, sbbl, andl, subl, xorl, cmpl
430 tail_size = 1;
431 break;
432
433 case 0x9B:
434 switch (0xFF & *ip++) {
435 case 0xD9: // fnstcw a
436 break;
437 default:
438 ShouldNotReachHere();
439 }
440 break;
441
442 case REP4(0x00): // addb a, r; addl a, r; addb r, a; addl r, a
443 case REP4(0x10): // adc...
444 case REP4(0x20): // and...
445 case REP4(0x30): // xor...
446 case REP4(0x08): // or...
447 case REP4(0x18): // sbb...
448 case REP4(0x28): // sub...
449 case REP4(0x38): // cmp...
450 case 0xF7: // mull a
451 case 0x8D: // leal r, a
452 case 0x87: // xchg r, a
453 break;
454
455 case 0xC1: // sal a, #8; sar a, #8; shl a, #8; shr a, #8
456 case 0xC6: // movb a, #8
457 case 0x80: // cmpb a, #8
458 case 0x6B: // imul r, a, #8
459 tail_size = 1; // the imm8
460 break;
461
462 case 0xE8: // call rdisp32
463 case 0xE9: // jmp rdisp32
464 if (which == end_pc_operand) return ip + 4;
465 assert(which == call32_operand, "call has no disp32 or imm32");
466 return ip;
467
468 case 0xD1: // sal a, 1; sar a, 1; shl a, 1; shr a, 1
469 case 0xD3: // sal a, %cl; sar a, %cl; shl a, %cl; shr a, %cl
470 case 0xD9: // fld_s a; fst_s a; fstp_s a; fldcw a
471 case 0xDD: // fld_d a; fst_d a; fstp_d a
472 case 0xDB: // fild_s a; fistp_s a; fld_x a; fstp_x a
473 case 0xDF: // fild_d a; fistp_d a
474 case 0xD8: // fadd_s a; fsubr_s a; fmul_s a; fdivr_s a; fcomp_s a
475 case 0xDC: // fadd_d a; fsubr_d a; fmul_d a; fdivr_d a; fcomp_d a
476 case 0xDE: // faddp_d a; fsubrp_d a; fmulp_d a; fdivrp_d a; fcompp_d a
477 break;
478
479 case 0xF3: // For SSE
480 case 0xF2: // For SSE2
481 ip++; ip++;
482 break;
483
484 default:
485 ShouldNotReachHere();
486
487 #undef REP8
488 #undef REP16
489 }
490
491 assert(which != call32_operand, "instruction is not a call, jmp, or jcc");
492 assert(which != imm32_operand || has_imm32, "instruction has no imm32 field");
493
494 // parse the output of emit_operand
495 int op2 = 0xFF & *ip++;
496 int base = op2 & 0x07;
497 int op3 = -1;
498 const int b100 = 4;
499 const int b101 = 5;
500 if (base == b100 && (op2 >> 6) != 3) {
501 op3 = 0xFF & *ip++;
502 base = op3 & 0x07; // refetch the base
503 }
504 // now ip points at the disp (if any)
505
506 switch (op2 >> 6) {
507 case 0:
508 // [00 reg 100][ss index base]
509 // [00 reg 100][00 100 rsp]
510 // [00 reg base]
511 // [00 reg 100][ss index 101][disp32]
512 // [00 reg 101] [disp32]
513
514 if (base == b101) {
515 if (which == disp32_operand)
516 return ip; // caller wants the disp32
517 ip += 4; // skip the disp32
518 }
519 break;
520
521 case 1:
522 // [01 reg 100][ss index base][disp8]
523 // [01 reg 100][00 100 rsp][disp8]
524 // [01 reg base] [disp8]
525 ip += 1; // skip the disp8
526 break;
527
528 case 2:
529 // [10 reg 100][ss index base][disp32]
530 // [10 reg 100][00 100 rsp][disp32]
531 // [10 reg base] [disp32]
532 if (which == disp32_operand)
533 return ip; // caller wants the disp32
534 ip += 4; // skip the disp32
535 break;
536
537 case 3:
538 // [11 reg base] (not a memory addressing mode)
539 break;
540 }
541
542 if (which == end_pc_operand) {
543 return ip + tail_size;
544 }
545
546 assert(which == imm32_operand, "instruction has only an imm32 field");
547 return ip;
548 }
549
550 address Assembler::locate_next_instruction(address inst) {
551 // Secretly share code with locate_operand:
552 return locate_operand(inst, end_pc_operand);
553 }
554
555
556 #ifdef ASSERT
557 void Assembler::check_relocation(RelocationHolder const& rspec, int format) {
558 address inst = inst_mark();
559 assert(inst != NULL && inst < pc(), "must point to beginning of instruction");
560 address opnd;
561
562 Relocation* r = rspec.reloc();
563 if (r->type() == relocInfo::none) {
564 return;
565 } else if (r->is_call() || format == call32_operand) {
566 // assert(format == imm32_operand, "cannot specify a nonzero format");
567 opnd = locate_operand(inst, call32_operand);
568 } else if (r->is_data()) {
569 assert(format == imm32_operand || format == disp32_operand, "format ok");
570 opnd = locate_operand(inst, (WhichOperand)format);
571 } else {
572 assert(format == imm32_operand, "cannot specify a format");
573 return;
574 }
575 assert(opnd == pc(), "must put operand where relocs can find it");
576 }
577 #endif
578
579
580
581 void Assembler::emit_operand(Register reg, Address adr) {
582 emit_operand(reg, adr._base, adr._index, adr._scale, adr._disp, adr._rspec);
583 }
584
585
586 void Assembler::emit_farith(int b1, int b2, int i) {
587 assert(isByte(b1) && isByte(b2), "wrong opcode");
588 assert(0 <= i && i < 8, "illegal stack offset");
589 emit_byte(b1);
590 emit_byte(b2 + i);
591 }
592
593
594 void Assembler::pushad() {
595 emit_byte(0x60);
596 }
597
598 void Assembler::popad() {
599 emit_byte(0x61);
600 }
601
602 void Assembler::pushfd() {
603 emit_byte(0x9C);
604 }
605
606 void Assembler::popfd() {
607 emit_byte(0x9D);
608 }
609
610 void Assembler::pushl(int imm32) {
611 emit_byte(0x68);
612 emit_long(imm32);
613 }
614
615 #ifndef _LP64
616 void Assembler::push_literal32(int32_t imm32, RelocationHolder const& rspec) {
617 InstructionMark im(this);
618 emit_byte(0x68);
619 emit_data(imm32, rspec, 0);
620 }
621 #endif // _LP64
622
623 void Assembler::pushl(Register src) {
624 emit_byte(0x50 | src->encoding());
625 }
626
627
628 void Assembler::pushl(Address src) {
629 InstructionMark im(this);
630 emit_byte(0xFF);
631 emit_operand(rsi, src);
632 }
633
634 void Assembler::popl(Register dst) {
635 emit_byte(0x58 | dst->encoding());
636 }
637
638
639 void Assembler::popl(Address dst) {
640 InstructionMark im(this);
641 emit_byte(0x8F);
642 emit_operand(rax, dst);
643 }
644
645
646 void Assembler::prefix(Prefix p) {
647 a_byte(p);
648 }
649
650
651 void Assembler::movb(Register dst, Address src) {
652 assert(dst->has_byte_register(), "must have byte register");
653 InstructionMark im(this);
654 emit_byte(0x8A);
655 emit_operand(dst, src);
656 }
657
658
659 void Assembler::movb(Address dst, int imm8) {
660 InstructionMark im(this);
661 emit_byte(0xC6);
662 emit_operand(rax, dst);
663 emit_byte(imm8);
664 }
665
666
667 void Assembler::movb(Address dst, Register src) {
668 assert(src->has_byte_register(), "must have byte register");
669 InstructionMark im(this);
670 emit_byte(0x88);
671 emit_operand(src, dst);
672 }
673
674
675 void Assembler::movw(Address dst, int imm16) {
676 InstructionMark im(this);
677
678 emit_byte(0x66); // switch to 16-bit mode
679 emit_byte(0xC7);
680 emit_operand(rax, dst);
681 emit_word(imm16);
682 }
683
684
685 void Assembler::movw(Register dst, Address src) {
686 InstructionMark im(this);
687 emit_byte(0x66);
688 emit_byte(0x8B);
689 emit_operand(dst, src);
690 }
691
692
693 void Assembler::movw(Address dst, Register src) {
694 InstructionMark im(this);
695 emit_byte(0x66);
696 emit_byte(0x89);
697 emit_operand(src, dst);
698 }
699
700
701 void Assembler::movl(Register dst, int imm32) {
702 emit_byte(0xB8 | dst->encoding());
703 emit_long(imm32);
704 }
705
706 #ifndef _LP64
707 void Assembler::mov_literal32(Register dst, int32_t imm32, RelocationHolder const& rspec) {
708
709 InstructionMark im(this);
710 emit_byte(0xB8 | dst->encoding());
711 emit_data((int)imm32, rspec, 0);
712 }
713 #endif // _LP64
714
715 void Assembler::movl(Register dst, Register src) {
716 emit_byte(0x8B);
717 emit_byte(0xC0 | (dst->encoding() << 3) | src->encoding());
718 }
719
720
721 void Assembler::movl(Register dst, Address src) {
722 InstructionMark im(this);
723 emit_byte(0x8B);
724 emit_operand(dst, src);
725 }
726
727
728 void Assembler::movl(Address dst, int imm32) {
729 InstructionMark im(this);
730 emit_byte(0xC7);
731 emit_operand(rax, dst);
732 emit_long(imm32);
733 }
734
735 #ifndef _LP64
736 void Assembler::mov_literal32(Address dst, int32_t imm32, RelocationHolder const& rspec) {
737 InstructionMark im(this);
738 emit_byte(0xC7);
739 emit_operand(rax, dst);
740 emit_data((int)imm32, rspec, 0);
741 }
742 #endif // _LP64
743
744 void Assembler::movl(Address dst, Register src) {
745 InstructionMark im(this);
746 emit_byte(0x89);
747 emit_operand(src, dst);
748 }
749
750 void Assembler::movsxb(Register dst, Address src) {
751 InstructionMark im(this);
752 emit_byte(0x0F);
753 emit_byte(0xBE);
754 emit_operand(dst, src);
755 }
756
757 void Assembler::movsxb(Register dst, Register src) {
758 assert(src->has_byte_register(), "must have byte register");
759 emit_byte(0x0F);
760 emit_byte(0xBE);
761 emit_byte(0xC0 | (dst->encoding() << 3) | src->encoding());
762 }
763
764
765 void Assembler::movsxw(Register dst, Address src) {
766 InstructionMark im(this);
767 emit_byte(0x0F);
768 emit_byte(0xBF);
769 emit_operand(dst, src);
770 }
771
772
773 void Assembler::movsxw(Register dst, Register src) {
774 emit_byte(0x0F);
775 emit_byte(0xBF);
776 emit_byte(0xC0 | (dst->encoding() << 3) | src->encoding());
777 }
778
779
780 void Assembler::movzxb(Register dst, Address src) {
781 InstructionMark im(this);
782 emit_byte(0x0F);
783 emit_byte(0xB6);
784 emit_operand(dst, src);
785 }
786
787
788 void Assembler::movzxb(Register dst, Register src) {
789 assert(src->has_byte_register(), "must have byte register");
790 emit_byte(0x0F);
791 emit_byte(0xB6);
792 emit_byte(0xC0 | (dst->encoding() << 3) | src->encoding());
793 }
794
795
796 void Assembler::movzxw(Register dst, Address src) {
797 InstructionMark im(this);
798 emit_byte(0x0F);
799 emit_byte(0xB7);
800 emit_operand(dst, src);
801 }
802
803
804 void Assembler::movzxw(Register dst, Register src) {
805 emit_byte(0x0F);
806 emit_byte(0xB7);
807 emit_byte(0xC0 | (dst->encoding() << 3) | src->encoding());
808 }
809
810
811 void Assembler::cmovl(Condition cc, Register dst, Register src) {
812 guarantee(VM_Version::supports_cmov(), "illegal instruction");
813 emit_byte(0x0F);
814 emit_byte(0x40 | cc);
815 emit_byte(0xC0 | (dst->encoding() << 3) | src->encoding());
816 }
817
818
819 void Assembler::cmovl(Condition cc, Register dst, Address src) {
820 guarantee(VM_Version::supports_cmov(), "illegal instruction");
821 // The code below seems to be wrong - however the manual is inconclusive
822 // do not use for now (remember to enable all callers when fixing this)
823 Unimplemented();
824 // wrong bytes?
825 InstructionMark im(this);
826 emit_byte(0x0F);
827 emit_byte(0x40 | cc);
828 emit_operand(dst, src);
829 }
830
831
832 void Assembler::prefetcht0(Address src) {
833 assert(VM_Version::supports_sse(), "must support");
834 InstructionMark im(this);
835 emit_byte(0x0F);
836 emit_byte(0x18);
837 emit_operand(rcx, src); // 1, src
838 }
839
840
841 void Assembler::prefetcht1(Address src) {
842 assert(VM_Version::supports_sse(), "must support");
843 InstructionMark im(this);
844 emit_byte(0x0F);
845 emit_byte(0x18);
846 emit_operand(rdx, src); // 2, src
847 }
848
849
850 void Assembler::prefetcht2(Address src) {
851 assert(VM_Version::supports_sse(), "must support");
852 InstructionMark im(this);
853 emit_byte(0x0F);
854 emit_byte(0x18);
855 emit_operand(rbx, src); // 3, src
856 }
857
858
859 void Assembler::prefetchnta(Address src) {
860 assert(VM_Version::supports_sse2(), "must support");
861 InstructionMark im(this);
862 emit_byte(0x0F);
863 emit_byte(0x18);
864 emit_operand(rax, src); // 0, src
865 }
866
867
868 void Assembler::prefetchw(Address src) {
869 assert(VM_Version::supports_3dnow(), "must support");
870 InstructionMark im(this);
871 emit_byte(0x0F);
872 emit_byte(0x0D);
873 emit_operand(rcx, src); // 1, src
874 }
875
876
877 void Assembler::prefetchr(Address src) {
878 assert(VM_Version::supports_3dnow(), "must support");
879 InstructionMark im(this);
880 emit_byte(0x0F);
881 emit_byte(0x0D);
882 emit_operand(rax, src); // 0, src
883 }
884
885
886 void Assembler::adcl(Register dst, int imm32) {
887 emit_arith(0x81, 0xD0, dst, imm32);
888 }
889
890
891 void Assembler::adcl(Register dst, Address src) {
892 InstructionMark im(this);
893 emit_byte(0x13);
894 emit_operand(dst, src);
895 }
896
897
898 void Assembler::adcl(Register dst, Register src) {
899 emit_arith(0x13, 0xC0, dst, src);
900 }
901
902
903 void Assembler::addl(Address dst, int imm32) {
904 InstructionMark im(this);
905 emit_arith_operand(0x81,rax,dst,imm32);
906 }
907
908
909 void Assembler::addl(Address dst, Register src) {
910 InstructionMark im(this);
911 emit_byte(0x01);
912 emit_operand(src, dst);
913 }
914
915
916 void Assembler::addl(Register dst, int imm32) {
917 emit_arith(0x81, 0xC0, dst, imm32);
918 }
919
920
921 void Assembler::addl(Register dst, Address src) {
922 InstructionMark im(this);
923 emit_byte(0x03);
924 emit_operand(dst, src);
925 }
926
927
928 void Assembler::addl(Register dst, Register src) {
929 emit_arith(0x03, 0xC0, dst, src);
930 }
931
932
933 void Assembler::andl(Register dst, int imm32) {
934 emit_arith(0x81, 0xE0, dst, imm32);
935 }
936
937
938 void Assembler::andl(Register dst, Address src) {
939 InstructionMark im(this);
940 emit_byte(0x23);
941 emit_operand(dst, src);
942 }
943
944
945 void Assembler::andl(Register dst, Register src) {
946 emit_arith(0x23, 0xC0, dst, src);
947 }
948
949
950 void Assembler::cmpb(Address dst, int imm8) {
951 InstructionMark im(this);
952 emit_byte(0x80);
953 emit_operand(rdi, dst);
954 emit_byte(imm8);
955 }
956
957 void Assembler::cmpw(Address dst, int imm16) {
958 InstructionMark im(this);
959 emit_byte(0x66);
960 emit_byte(0x81);
961 emit_operand(rdi, dst);
962 emit_word(imm16);
963 }
964
965 void Assembler::cmpl(Address dst, int imm32) {
966 InstructionMark im(this);
967 emit_byte(0x81);
968 emit_operand(rdi, dst);
969 emit_long(imm32);
970 }
971
972 #ifndef _LP64
973 void Assembler::cmp_literal32(Register src1, int32_t imm32, RelocationHolder const& rspec) {
974 InstructionMark im(this);
975 emit_byte(0x81);
976 emit_byte(0xF8 | src1->encoding());
977 emit_data(imm32, rspec, 0);
978 }
979
980 void Assembler::cmp_literal32(Address src1, int32_t imm32, RelocationHolder const& rspec) {
981 InstructionMark im(this);
982 emit_byte(0x81);
983 emit_operand(rdi, src1);
984 emit_data(imm32, rspec, 0);
985 }
986 #endif // _LP64
987
988
989 void Assembler::cmpl(Register dst, int imm32) {
990 emit_arith(0x81, 0xF8, dst, imm32);
991 }
992
993
994 void Assembler::cmpl(Register dst, Register src) {
995 emit_arith(0x3B, 0xC0, dst, src);
996 }
997
998
999 void Assembler::cmpl(Register dst, Address src) {
1000 InstructionMark im(this);
1001 emit_byte(0x3B);
1002 emit_operand(dst, src);
1003 }
1004
1005
1006 void Assembler::decl(Register dst) {
1007 // Don't use it directly. Use MacroAssembler::decrement() instead.
1008 emit_byte(0x48 | dst->encoding());
1009 }
1010
1011
1012 void Assembler::decl(Address dst) {
1013 // Don't use it directly. Use MacroAssembler::decrement() instead.
1014 InstructionMark im(this);
1015 emit_byte(0xFF);
1016 emit_operand(rcx, dst);
1017 }
1018
1019
1020 void Assembler::idivl(Register src) {
1021 emit_byte(0xF7);
1022 emit_byte(0xF8 | src->encoding());
1023 }
1024
1025
1026 void Assembler::cdql() {
1027 emit_byte(0x99);
1028 }
1029
1030
1031 void Assembler::imull(Register dst, Register src) {
1032 emit_byte(0x0F);
1033 emit_byte(0xAF);
1034 emit_byte(0xC0 | dst->encoding() << 3 | src->encoding());
1035 }
1036
1037
1038 void Assembler::imull(Register dst, Register src, int value) {
1039 if (is8bit(value)) {
1040 emit_byte(0x6B);
1041 emit_byte(0xC0 | dst->encoding() << 3 | src->encoding());
1042 emit_byte(value);
1043 } else {
1044 emit_byte(0x69);
1045 emit_byte(0xC0 | dst->encoding() << 3 | src->encoding());
1046 emit_long(value);
1047 }
1048 }
1049
1050
1051 void Assembler::incl(Register dst) {
1052 // Don't use it directly. Use MacroAssembler::increment() instead.
1053 emit_byte(0x40 | dst->encoding());
1054 }
1055
1056
1057 void Assembler::incl(Address dst) {
1058 // Don't use it directly. Use MacroAssembler::increment() instead.
1059 InstructionMark im(this);
1060 emit_byte(0xFF);
1061 emit_operand(rax, dst);
1062 }
1063
1064
1065 void Assembler::leal(Register dst, Address src) {
1066 InstructionMark im(this);
1067 emit_byte(0x8D);
1068 emit_operand(dst, src);
1069 }
1070
1071 void Assembler::mull(Address src) {
1072 InstructionMark im(this);
1073 emit_byte(0xF7);
1074 emit_operand(rsp, src);
1075 }
1076
1077
1078 void Assembler::mull(Register src) {
1079 emit_byte(0xF7);
1080 emit_byte(0xE0 | src->encoding());
1081 }
1082
1083
1084 void Assembler::negl(Register dst) {
1085 emit_byte(0xF7);
1086 emit_byte(0xD8 | dst->encoding());
1087 }
1088
1089
1090 void Assembler::notl(Register dst) {
1091 emit_byte(0xF7);
1092 emit_byte(0xD0 | dst->encoding());
1093 }
1094
1095
1096 void Assembler::orl(Address dst, int imm32) {
1097 InstructionMark im(this);
1098 emit_byte(0x81);
1099 emit_operand(rcx, dst);
1100 emit_long(imm32);
1101 }
1102
1103 void Assembler::orl(Register dst, int imm32) {
1104 emit_arith(0x81, 0xC8, dst, imm32);
1105 }
1106
1107
1108 void Assembler::orl(Register dst, Address src) {
1109 InstructionMark im(this);
1110 emit_byte(0x0B);
1111 emit_operand(dst, src);
1112 }
1113
1114
1115 void Assembler::orl(Register dst, Register src) {
1116 emit_arith(0x0B, 0xC0, dst, src);
1117 }
1118
1119
1120 void Assembler::rcll(Register dst, int imm8) {
1121 assert(isShiftCount(imm8), "illegal shift count");
1122 if (imm8 == 1) {
1123 emit_byte(0xD1);
1124 emit_byte(0xD0 | dst->encoding());
1125 } else {
1126 emit_byte(0xC1);
1127 emit_byte(0xD0 | dst->encoding());
1128 emit_byte(imm8);
1129 }
1130 }
1131
1132
1133 void Assembler::sarl(Register dst, int imm8) {
1134 assert(isShiftCount(imm8), "illegal shift count");
1135 if (imm8 == 1) {
1136 emit_byte(0xD1);
1137 emit_byte(0xF8 | dst->encoding());
1138 } else {
1139 emit_byte(0xC1);
1140 emit_byte(0xF8 | dst->encoding());
1141 emit_byte(imm8);
1142 }
1143 }
1144
1145
1146 void Assembler::sarl(Register dst) {
1147 emit_byte(0xD3);
1148 emit_byte(0xF8 | dst->encoding());
1149 }
1150
1151
1152 void Assembler::sbbl(Address dst, int imm32) {
1153 InstructionMark im(this);
1154 emit_arith_operand(0x81,rbx,dst,imm32);
1155 }
1156
1157
1158 void Assembler::sbbl(Register dst, int imm32) {
1159 emit_arith(0x81, 0xD8, dst, imm32);
1160 }
1161
1162
1163 void Assembler::sbbl(Register dst, Address src) {
1164 InstructionMark im(this);
1165 emit_byte(0x1B);
1166 emit_operand(dst, src);
1167 }
1168
1169
1170 void Assembler::sbbl(Register dst, Register src) {
1171 emit_arith(0x1B, 0xC0, dst, src);
1172 }
1173
1174
1175 void Assembler::shldl(Register dst, Register src) {
1176 emit_byte(0x0F);
1177 emit_byte(0xA5);
1178 emit_byte(0xC0 | src->encoding() << 3 | dst->encoding());
1179 }
1180
1181
1182 void Assembler::shll(Register dst, int imm8) {
1183 assert(isShiftCount(imm8), "illegal shift count");
1184 if (imm8 == 1 ) {
1185 emit_byte(0xD1);
1186 emit_byte(0xE0 | dst->encoding());
1187 } else {
1188 emit_byte(0xC1);
1189 emit_byte(0xE0 | dst->encoding());
1190 emit_byte(imm8);
1191 }
1192 }
1193
1194
1195 void Assembler::shll(Register dst) {
1196 emit_byte(0xD3);
1197 emit_byte(0xE0 | dst->encoding());
1198 }
1199
1200
1201 void Assembler::shrdl(Register dst, Register src) {
1202 emit_byte(0x0F);
1203 emit_byte(0xAD);
1204 emit_byte(0xC0 | src->encoding() << 3 | dst->encoding());
1205 }
1206
1207
1208 void Assembler::shrl(Register dst, int imm8) {
1209 assert(isShiftCount(imm8), "illegal shift count");
1210 emit_byte(0xC1);
1211 emit_byte(0xE8 | dst->encoding());
1212 emit_byte(imm8);
1213 }
1214
1215
1216 void Assembler::shrl(Register dst) {
1217 emit_byte(0xD3);
1218 emit_byte(0xE8 | dst->encoding());
1219 }
1220
1221
1222 void Assembler::subl(Address dst, int imm32) {
1223 if (is8bit(imm32)) {
1224 InstructionMark im(this);
1225 emit_byte(0x83);
1226 emit_operand(rbp, dst);
1227 emit_byte(imm32 & 0xFF);
1228 } else {
1229 InstructionMark im(this);
1230 emit_byte(0x81);
1231 emit_operand(rbp, dst);
1232 emit_long(imm32);
1233 }
1234 }
1235
1236
1237 void Assembler::subl(Register dst, int imm32) {
1238 emit_arith(0x81, 0xE8, dst, imm32);
1239 }
1240
1241
1242 void Assembler::subl(Address dst, Register src) {
1243 InstructionMark im(this);
1244 emit_byte(0x29);
1245 emit_operand(src, dst);
1246 }
1247
1248
1249 void Assembler::subl(Register dst, Address src) {
1250 InstructionMark im(this);
1251 emit_byte(0x2B);
1252 emit_operand(dst, src);
1253 }
1254
1255
1256 void Assembler::subl(Register dst, Register src) {
1257 emit_arith(0x2B, 0xC0, dst, src);
1258 }
1259
1260
1261 void Assembler::testb(Register dst, int imm8) {
1262 assert(dst->has_byte_register(), "must have byte register");
1263 emit_arith_b(0xF6, 0xC0, dst, imm8);
1264 }
1265
1266
1267 void Assembler::testl(Register dst, int imm32) {
1268 // not using emit_arith because test
1269 // doesn't support sign-extension of
1270 // 8bit operands
1271 if (dst->encoding() == 0) {
1272 emit_byte(0xA9);
1273 } else {
1274 emit_byte(0xF7);
1275 emit_byte(0xC0 | dst->encoding());
1276 }
1277 emit_long(imm32);
1278 }
1279
1280
1281 void Assembler::testl(Register dst, Register src) {
1282 emit_arith(0x85, 0xC0, dst, src);
1283 }
1284
1285 void Assembler::testl(Register dst, Address src) {
1286 InstructionMark im(this);
1287 emit_byte(0x85);
1288 emit_operand(dst, src);
1289 }
1290
1291 void Assembler::xaddl(Address dst, Register src) {
1292 InstructionMark im(this);
1293 emit_byte(0x0F);
1294 emit_byte(0xC1);
1295 emit_operand(src, dst);
1296 }
1297
1298 void Assembler::xorl(Register dst, int imm32) {
1299 emit_arith(0x81, 0xF0, dst, imm32);
1300 }
1301
1302
1303 void Assembler::xorl(Register dst, Address src) {
1304 InstructionMark im(this);
1305 emit_byte(0x33);
1306 emit_operand(dst, src);
1307 }
1308
1309
1310 void Assembler::xorl(Register dst, Register src) {
1311 emit_arith(0x33, 0xC0, dst, src);
1312 }
1313
1314
1315 void Assembler::bswap(Register reg) {
1316 emit_byte(0x0F);
1317 emit_byte(0xC8 | reg->encoding());
1318 }
1319
1320
1321 void Assembler::lock() {
1322 if (Atomics & 1) {
1323 // Emit either nothing, a NOP, or a NOP: prefix
1324 emit_byte(0x90) ;
1325 } else {
1326 emit_byte(0xF0);
1327 }
1328 }
1329
1330
1331 void Assembler::xchg(Register reg, Address adr) {
1332 InstructionMark im(this);
1333 emit_byte(0x87);
1334 emit_operand(reg, adr);
1335 }
1336
1337
1338 void Assembler::xchgl(Register dst, Register src) {
1339 emit_byte(0x87);
1340 emit_byte(0xc0 | dst->encoding() << 3 | src->encoding());
1341 }
1342
1343
1344 // The 32-bit cmpxchg compares the value at adr with the contents of rax,
1345 // and stores reg into adr if so; otherwise, the value at adr is loaded into rax,.
1346 // The ZF is set if the compared values were equal, and cleared otherwise.
1347 void Assembler::cmpxchg(Register reg, Address adr) {
1348 if (Atomics & 2) {
1349 // caveat: no instructionmark, so this isn't relocatable.
1350 // Emit a synthetic, non-atomic, CAS equivalent.
1351 // Beware. The synthetic form sets all ICCs, not just ZF.
1352 // cmpxchg r,[m] is equivalent to rax, = CAS (m, rax, r)
1353 cmpl (rax, adr) ;
1354 movl (rax, adr) ;
1355 if (reg != rax) {
1356 Label L ;
1357 jcc (Assembler::notEqual, L) ;
1358 movl (adr, reg) ;
1359 bind (L) ;
1360 }
1361 } else {
1362 InstructionMark im(this);
1363 emit_byte(0x0F);
1364 emit_byte(0xB1);
1365 emit_operand(reg, adr);
1366 }
1367 }
1368
1369 // The 64-bit cmpxchg compares the value at adr with the contents of rdx:rax,
1370 // and stores rcx:rbx into adr if so; otherwise, the value at adr is loaded
1371 // into rdx:rax. The ZF is set if the compared values were equal, and cleared otherwise.
1372 void Assembler::cmpxchg8(Address adr) {
1373 InstructionMark im(this);
1374 emit_byte(0x0F);
1375 emit_byte(0xc7);
1376 emit_operand(rcx, adr);
1377 }
1378
1379 void Assembler::hlt() {
1380 emit_byte(0xF4);
1381 }
1382
1383
1384 void Assembler::addr_nop_4() {
1385 // 4 bytes: NOP DWORD PTR [EAX+0]
1386 emit_byte(0x0F);
1387 emit_byte(0x1F);
1388 emit_byte(0x40); // emit_rm(cbuf, 0x1, EAX_enc, EAX_enc);
1389 emit_byte(0); // 8-bits offset (1 byte)
1390 }
1391
1392 void Assembler::addr_nop_5() {
1393 // 5 bytes: NOP DWORD PTR [EAX+EAX*0+0] 8-bits offset
1394 emit_byte(0x0F);
1395 emit_byte(0x1F);
1396 emit_byte(0x44); // emit_rm(cbuf, 0x1, EAX_enc, 0x4);
1397 emit_byte(0x00); // emit_rm(cbuf, 0x0, EAX_enc, EAX_enc);
1398 emit_byte(0); // 8-bits offset (1 byte)
1399 }
1400
1401 void Assembler::addr_nop_7() {
1402 // 7 bytes: NOP DWORD PTR [EAX+0] 32-bits offset
1403 emit_byte(0x0F);
1404 emit_byte(0x1F);
1405 emit_byte(0x80); // emit_rm(cbuf, 0x2, EAX_enc, EAX_enc);
1406 emit_long(0); // 32-bits offset (4 bytes)
1407 }
1408
1409 void Assembler::addr_nop_8() {
1410 // 8 bytes: NOP DWORD PTR [EAX+EAX*0+0] 32-bits offset
1411 emit_byte(0x0F);
1412 emit_byte(0x1F);
1413 emit_byte(0x84); // emit_rm(cbuf, 0x2, EAX_enc, 0x4);
1414 emit_byte(0x00); // emit_rm(cbuf, 0x0, EAX_enc, EAX_enc);
1415 emit_long(0); // 32-bits offset (4 bytes)
1416 }
1417
1418 void Assembler::nop(int i) {
1419 assert(i > 0, " ");
1420 if (UseAddressNop && VM_Version::is_intel()) {
1421 //
1422 // Using multi-bytes nops "0x0F 0x1F [address]" for Intel
1423 // 1: 0x90
1424 // 2: 0x66 0x90
1425 // 3: 0x66 0x66 0x90 (don't use "0x0F 0x1F 0x00" - need patching safe padding)
1426 // 4: 0x0F 0x1F 0x40 0x00
1427 // 5: 0x0F 0x1F 0x44 0x00 0x00
1428 // 6: 0x66 0x0F 0x1F 0x44 0x00 0x00
1429 // 7: 0x0F 0x1F 0x80 0x00 0x00 0x00 0x00
1430 // 8: 0x0F 0x1F 0x84 0x00 0x00 0x00 0x00 0x00
1431 // 9: 0x66 0x0F 0x1F 0x84 0x00 0x00 0x00 0x00 0x00
1432 // 10: 0x66 0x66 0x0F 0x1F 0x84 0x00 0x00 0x00 0x00 0x00
1433 // 11: 0x66 0x66 0x66 0x0F 0x1F 0x84 0x00 0x00 0x00 0x00 0x00
1434
1435 // The rest coding is Intel specific - don't use consecutive address nops
1436
1437 // 12: 0x0F 0x1F 0x84 0x00 0x00 0x00 0x00 0x00 0x66 0x66 0x66 0x90
1438 // 13: 0x66 0x0F 0x1F 0x84 0x00 0x00 0x00 0x00 0x00 0x66 0x66 0x66 0x90
1439 // 14: 0x66 0x66 0x0F 0x1F 0x84 0x00 0x00 0x00 0x00 0x00 0x66 0x66 0x66 0x90
1440 // 15: 0x66 0x66 0x66 0x0F 0x1F 0x84 0x00 0x00 0x00 0x00 0x00 0x66 0x66 0x66 0x90
1441
1442 while(i >= 15) {
1443 // For Intel don't generate consecutive addess nops (mix with regular nops)
1444 i -= 15;
1445 emit_byte(0x66); // size prefix
1446 emit_byte(0x66); // size prefix
1447 emit_byte(0x66); // size prefix
1448 addr_nop_8();
1449 emit_byte(0x66); // size prefix
1450 emit_byte(0x66); // size prefix
1451 emit_byte(0x66); // size prefix
1452 emit_byte(0x90); // nop
1453 }
1454 switch (i) {
1455 case 14:
1456 emit_byte(0x66); // size prefix
1457 case 13:
1458 emit_byte(0x66); // size prefix
1459 case 12:
1460 addr_nop_8();
1461 emit_byte(0x66); // size prefix
1462 emit_byte(0x66); // size prefix
1463 emit_byte(0x66); // size prefix
1464 emit_byte(0x90); // nop
1465 break;
1466 case 11:
1467 emit_byte(0x66); // size prefix
1468 case 10:
1469 emit_byte(0x66); // size prefix
1470 case 9:
1471 emit_byte(0x66); // size prefix
1472 case 8:
1473 addr_nop_8();
1474 break;
1475 case 7:
1476 addr_nop_7();
1477 break;
1478 case 6:
1479 emit_byte(0x66); // size prefix
1480 case 5:
1481 addr_nop_5();
1482 break;
1483 case 4:
1484 addr_nop_4();
1485 break;
1486 case 3:
1487 // Don't use "0x0F 0x1F 0x00" - need patching safe padding
1488 emit_byte(0x66); // size prefix
1489 case 2:
1490 emit_byte(0x66); // size prefix
1491 case 1:
1492 emit_byte(0x90); // nop
1493 break;
1494 default:
1495 assert(i == 0, " ");
1496 }
1497 return;
1498 }
1499 if (UseAddressNop && VM_Version::is_amd()) {
1500 //
1501 // Using multi-bytes nops "0x0F 0x1F [address]" for AMD.
1502 // 1: 0x90
1503 // 2: 0x66 0x90
1504 // 3: 0x66 0x66 0x90 (don't use "0x0F 0x1F 0x00" - need patching safe padding)
1505 // 4: 0x0F 0x1F 0x40 0x00
1506 // 5: 0x0F 0x1F 0x44 0x00 0x00
1507 // 6: 0x66 0x0F 0x1F 0x44 0x00 0x00
1508 // 7: 0x0F 0x1F 0x80 0x00 0x00 0x00 0x00
1509 // 8: 0x0F 0x1F 0x84 0x00 0x00 0x00 0x00 0x00
1510 // 9: 0x66 0x0F 0x1F 0x84 0x00 0x00 0x00 0x00 0x00
1511 // 10: 0x66 0x66 0x0F 0x1F 0x84 0x00 0x00 0x00 0x00 0x00
1512 // 11: 0x66 0x66 0x66 0x0F 0x1F 0x84 0x00 0x00 0x00 0x00 0x00
1513
1514 // The rest coding is AMD specific - use consecutive address nops
1515
1516 // 12: 0x66 0x0F 0x1F 0x44 0x00 0x00 0x66 0x0F 0x1F 0x44 0x00 0x00
1517 // 13: 0x0F 0x1F 0x80 0x00 0x00 0x00 0x00 0x66 0x0F 0x1F 0x44 0x00 0x00
1518 // 14: 0x0F 0x1F 0x80 0x00 0x00 0x00 0x00 0x0F 0x1F 0x80 0x00 0x00 0x00 0x00
1519 // 15: 0x0F 0x1F 0x84 0x00 0x00 0x00 0x00 0x00 0x0F 0x1F 0x80 0x00 0x00 0x00 0x00
1520 // 16: 0x0F 0x1F 0x84 0x00 0x00 0x00 0x00 0x00 0x0F 0x1F 0x84 0x00 0x00 0x00 0x00 0x00
1521 // Size prefixes (0x66) are added for larger sizes
1522
1523 while(i >= 22) {
1524 i -= 11;
1525 emit_byte(0x66); // size prefix
1526 emit_byte(0x66); // size prefix
1527 emit_byte(0x66); // size prefix
1528 addr_nop_8();
1529 }
1530 // Generate first nop for size between 21-12
1531 switch (i) {
1532 case 21:
1533 i -= 1;
1534 emit_byte(0x66); // size prefix
1535 case 20:
1536 case 19:
1537 i -= 1;
1538 emit_byte(0x66); // size prefix
1539 case 18:
1540 case 17:
1541 i -= 1;
1542 emit_byte(0x66); // size prefix
1543 case 16:
1544 case 15:
1545 i -= 8;
1546 addr_nop_8();
1547 break;
1548 case 14:
1549 case 13:
1550 i -= 7;
1551 addr_nop_7();
1552 break;
1553 case 12:
1554 i -= 6;
1555 emit_byte(0x66); // size prefix
1556 addr_nop_5();
1557 break;
1558 default:
1559 assert(i < 12, " ");
1560 }
1561
1562 // Generate second nop for size between 11-1
1563 switch (i) {
1564 case 11:
1565 emit_byte(0x66); // size prefix
1566 case 10:
1567 emit_byte(0x66); // size prefix
1568 case 9:
1569 emit_byte(0x66); // size prefix
1570 case 8:
1571 addr_nop_8();
1572 break;
1573 case 7:
1574 addr_nop_7();
1575 break;
1576 case 6:
1577 emit_byte(0x66); // size prefix
1578 case 5:
1579 addr_nop_5();
1580 break;
1581 case 4:
1582 addr_nop_4();
1583 break;
1584 case 3:
1585 // Don't use "0x0F 0x1F 0x00" - need patching safe padding
1586 emit_byte(0x66); // size prefix
1587 case 2:
1588 emit_byte(0x66); // size prefix
1589 case 1:
1590 emit_byte(0x90); // nop
1591 break;
1592 default:
1593 assert(i == 0, " ");
1594 }
1595 return;
1596 }
1597
1598 // Using nops with size prefixes "0x66 0x90".
1599 // From AMD Optimization Guide:
1600 // 1: 0x90
1601 // 2: 0x66 0x90
1602 // 3: 0x66 0x66 0x90
1603 // 4: 0x66 0x66 0x66 0x90
1604 // 5: 0x66 0x66 0x90 0x66 0x90
1605 // 6: 0x66 0x66 0x90 0x66 0x66 0x90
1606 // 7: 0x66 0x66 0x66 0x90 0x66 0x66 0x90
1607 // 8: 0x66 0x66 0x66 0x90 0x66 0x66 0x66 0x90
1608 // 9: 0x66 0x66 0x90 0x66 0x66 0x90 0x66 0x66 0x90
1609 // 10: 0x66 0x66 0x66 0x90 0x66 0x66 0x90 0x66 0x66 0x90
1610 //
1611 while(i > 12) {
1612 i -= 4;
1613 emit_byte(0x66); // size prefix
1614 emit_byte(0x66);
1615 emit_byte(0x66);
1616 emit_byte(0x90); // nop
1617 }
1618 // 1 - 12 nops
1619 if(i > 8) {
1620 if(i > 9) {
1621 i -= 1;
1622 emit_byte(0x66);
1623 }
1624 i -= 3;
1625 emit_byte(0x66);
1626 emit_byte(0x66);
1627 emit_byte(0x90);
1628 }
1629 // 1 - 8 nops
1630 if(i > 4) {
1631 if(i > 6) {
1632 i -= 1;
1633 emit_byte(0x66);
1634 }
1635 i -= 3;
1636 emit_byte(0x66);
1637 emit_byte(0x66);
1638 emit_byte(0x90);
1639 }
1640 switch (i) {
1641 case 4:
1642 emit_byte(0x66);
1643 case 3:
1644 emit_byte(0x66);
1645 case 2:
1646 emit_byte(0x66);
1647 case 1:
1648 emit_byte(0x90);
1649 break;
1650 default:
1651 assert(i == 0, " ");
1652 }
1653 }
1654
1655 void Assembler::ret(int imm16) {
1656 if (imm16 == 0) {
1657 emit_byte(0xC3);
1658 } else {
1659 emit_byte(0xC2);
1660 emit_word(imm16);
1661 }
1662 }
1663
1664
1665 void Assembler::set_byte_if_not_zero(Register dst) {
1666 emit_byte(0x0F);
1667 emit_byte(0x95);
1668 emit_byte(0xE0 | dst->encoding());
1669 }
1670
1671
1672 // copies a single word from [esi] to [edi]
1673 void Assembler::smovl() {
1674 emit_byte(0xA5);
1675 }
1676
1677 // copies data from [esi] to [edi] using rcx double words (m32)
1678 void Assembler::rep_movl() {
1679 emit_byte(0xF3);
1680 emit_byte(0xA5);
1681 }
1682
1683
1684 // sets rcx double words (m32) with rax, value at [edi]
1685 void Assembler::rep_set() {
1686 emit_byte(0xF3);
1687 emit_byte(0xAB);
1688 }
1689
1690 // scans rcx double words (m32) at [edi] for occurance of rax,
1691 void Assembler::repne_scan() {
1692 emit_byte(0xF2);
1693 emit_byte(0xAF);
1694 }
1695
1696
1697 void Assembler::setb(Condition cc, Register dst) {
1698 assert(0 <= cc && cc < 16, "illegal cc");
1699 emit_byte(0x0F);
1700 emit_byte(0x90 | cc);
1701 emit_byte(0xC0 | dst->encoding());
1702 }
1703
1704 void Assembler::cld() {
1705 emit_byte(0xfc);
1706 }
1707
1708 void Assembler::std() {
1709 emit_byte(0xfd);
1710 }
1711
1712 void Assembler::emit_raw (unsigned char b) {
1713 emit_byte (b) ;
1714 }
1715
1716 // Serializes memory.
1717 void Assembler::membar() {
1718 // Memory barriers are only needed on multiprocessors
1719 if (os::is_MP()) {
1720 if( VM_Version::supports_sse2() ) {
1721 emit_byte( 0x0F ); // MFENCE; faster blows no regs
1722 emit_byte( 0xAE );
1723 emit_byte( 0xF0 );
1724 } else {
1725 // All usable chips support "locked" instructions which suffice
1726 // as barriers, and are much faster than the alternative of
1727 // using cpuid instruction. We use here a locked add [esp],0.
1728 // This is conveniently otherwise a no-op except for blowing
1729 // flags (which we save and restore.)
1730 pushfd(); // Save eflags register
1731 lock();
1732 addl(Address(rsp, 0), 0);// Assert the lock# signal here
1733 popfd(); // Restore eflags register
1734 }
1735 }
1736 }
1737
1738 // Identify processor type and features
1739 void Assembler::cpuid() {
1740 // Note: we can't assert VM_Version::supports_cpuid() here
1741 // because this instruction is used in the processor
1742 // identification code.
1743 emit_byte( 0x0F );
1744 emit_byte( 0xA2 );
1745 }
1746
1747 void Assembler::call(Label& L, relocInfo::relocType rtype) {
1748 if (L.is_bound()) {
1749 const int long_size = 5;
1750 int offs = target(L) - pc();
1751 assert(offs <= 0, "assembler error");
1752 InstructionMark im(this);
1753 // 1110 1000 #32-bit disp
1754 emit_byte(0xE8);
1755 emit_data(offs - long_size, rtype, 0);
1756 } else {
1757 InstructionMark im(this);
1758 // 1110 1000 #32-bit disp
1759 L.add_patch_at(code(), locator());
1760 emit_byte(0xE8);
1761 emit_data(int(0), rtype, 0);
1762 }
1763 }
1764
1765 void Assembler::call(Register dst) {
1766 emit_byte(0xFF);
1767 emit_byte(0xD0 | dst->encoding());
1768 }
1769
1770
1771 void Assembler::call(Address adr) {
1772 InstructionMark im(this);
1773 relocInfo::relocType rtype = adr.reloc();
1774 if (rtype != relocInfo::runtime_call_type) {
1775 emit_byte(0xFF);
1776 emit_operand(rdx, adr);
1777 } else {
1778 assert(false, "ack");
1779 }
1780
1781 }
1782
1783 void Assembler::call_literal(address dest, RelocationHolder const& rspec) {
1784 InstructionMark im(this);
1785 emit_byte(0xE8);
1786 intptr_t disp = dest - (_code_pos + sizeof(int32_t));
1787 assert(dest != NULL, "must have a target");
1788 emit_data(disp, rspec, call32_operand);
1789
1790 }
1791
1792 void Assembler::jmp(Register entry) {
1793 emit_byte(0xFF);
1794 emit_byte(0xE0 | entry->encoding());
1795 }
1796
1797
1798 void Assembler::jmp(Address adr) {
1799 InstructionMark im(this);
1800 emit_byte(0xFF);
1801 emit_operand(rsp, adr);
1802 }
1803
1804 void Assembler::jmp_literal(address dest, RelocationHolder const& rspec) {
1805 InstructionMark im(this);
1806 emit_byte(0xE9);
1807 assert(dest != NULL, "must have a target");
1808 intptr_t disp = dest - (_code_pos + sizeof(int32_t));
1809 emit_data(disp, rspec.reloc(), call32_operand);
1810 }
1811
1812 void Assembler::jmp(Label& L, relocInfo::relocType rtype) {
1813 if (L.is_bound()) {
1814 address entry = target(L);
1815 assert(entry != NULL, "jmp most probably wrong");
1816 InstructionMark im(this);
1817 const int short_size = 2;
1818 const int long_size = 5;
1819 intptr_t offs = entry - _code_pos;
1820 if (rtype == relocInfo::none && is8bit(offs - short_size)) {
1821 emit_byte(0xEB);
1822 emit_byte((offs - short_size) & 0xFF);
1823 } else {
1824 emit_byte(0xE9);
1825 emit_long(offs - long_size);
1826 }
1827 } else {
1828 // By default, forward jumps are always 32-bit displacements, since
1829 // we can't yet know where the label will be bound. If you're sure that
1830 // the forward jump will not run beyond 256 bytes, use jmpb to
1831 // force an 8-bit displacement.
1832 InstructionMark im(this);
1833 relocate(rtype);
1834 L.add_patch_at(code(), locator());
1835 emit_byte(0xE9);
1836 emit_long(0);
1837 }
1838 }
1839
1840 void Assembler::jmpb(Label& L) {
1841 if (L.is_bound()) {
1842 const int short_size = 2;
1843 address entry = target(L);
1844 assert(is8bit((entry - _code_pos) + short_size),
1845 "Dispacement too large for a short jmp");
1846 assert(entry != NULL, "jmp most probably wrong");
1847 intptr_t offs = entry - _code_pos;
1848 emit_byte(0xEB);
1849 emit_byte((offs - short_size) & 0xFF);
1850 } else {
1851 InstructionMark im(this);
1852 L.add_patch_at(code(), locator());
1853 emit_byte(0xEB);
1854 emit_byte(0);
1855 }
1856 }
1857
1858 void Assembler::jcc(Condition cc, Label& L, relocInfo::relocType rtype) {
1859 InstructionMark im(this);
1860 relocate(rtype);
1861 assert((0 <= cc) && (cc < 16), "illegal cc");
1862 if (L.is_bound()) {
1863 address dst = target(L);
1864 assert(dst != NULL, "jcc most probably wrong");
1865
1866 const int short_size = 2;
1867 const int long_size = 6;
1868 int offs = (int)dst - ((int)_code_pos);
1869 if (rtype == relocInfo::none && is8bit(offs - short_size)) {
1870 // 0111 tttn #8-bit disp
1871 emit_byte(0x70 | cc);
1872 emit_byte((offs - short_size) & 0xFF);
1873 } else {
1874 // 0000 1111 1000 tttn #32-bit disp
1875 emit_byte(0x0F);
1876 emit_byte(0x80 | cc);
1877 emit_long(offs - long_size);
1878 }
1879 } else {
1880 // Note: could eliminate cond. jumps to this jump if condition
1881 // is the same however, seems to be rather unlikely case.
1882 // Note: use jccb() if label to be bound is very close to get
1883 // an 8-bit displacement
1884 L.add_patch_at(code(), locator());
1885 emit_byte(0x0F);
1886 emit_byte(0x80 | cc);
1887 emit_long(0);
1888 }
1889 }
1890
1891 void Assembler::jccb(Condition cc, Label& L) {
1892 if (L.is_bound()) {
1893 const int short_size = 2;
1894 address entry = target(L);
1895 assert(is8bit((intptr_t)entry - ((intptr_t)_code_pos + short_size)),
1896 "Dispacement too large for a short jmp");
1897 intptr_t offs = (intptr_t)entry - (intptr_t)_code_pos;
1898 // 0111 tttn #8-bit disp
1899 emit_byte(0x70 | cc);
1900 emit_byte((offs - short_size) & 0xFF);
1901 jcc(cc, L);
1902 } else {
1903 InstructionMark im(this);
1904 L.add_patch_at(code(), locator());
1905 emit_byte(0x70 | cc);
1906 emit_byte(0);
1907 }
1908 }
1909
1910 // FPU instructions
1911
1912 void Assembler::fld1() {
1913 emit_byte(0xD9);
1914 emit_byte(0xE8);
1915 }
1916
1917
1918 void Assembler::fldz() {
1919 emit_byte(0xD9);
1920 emit_byte(0xEE);
1921 }
1922
1923
1924 void Assembler::fld_s(Address adr) {
1925 InstructionMark im(this);
1926 emit_byte(0xD9);
1927 emit_operand(rax, adr);
1928 }
1929
1930
1931 void Assembler::fld_s (int index) {
1932 emit_farith(0xD9, 0xC0, index);
1933 }
1934
1935
1936 void Assembler::fld_d(Address adr) {
1937 InstructionMark im(this);
1938 emit_byte(0xDD);
1939 emit_operand(rax, adr);
1940 }
1941
1942
1943 void Assembler::fld_x(Address adr) {
1944 InstructionMark im(this);
1945 emit_byte(0xDB);
1946 emit_operand(rbp, adr);
1947 }
1948
1949
1950 void Assembler::fst_s(Address adr) {
1951 InstructionMark im(this);
1952 emit_byte(0xD9);
1953 emit_operand(rdx, adr);
1954 }
1955
1956
1957 void Assembler::fst_d(Address adr) {
1958 InstructionMark im(this);
1959 emit_byte(0xDD);
1960 emit_operand(rdx, adr);
1961 }
1962
1963
1964 void Assembler::fstp_s(Address adr) {
1965 InstructionMark im(this);
1966 emit_byte(0xD9);
1967 emit_operand(rbx, adr);
1968 }
1969
1970
1971 void Assembler::fstp_d(Address adr) {
1972 InstructionMark im(this);
1973 emit_byte(0xDD);
1974 emit_operand(rbx, adr);
1975 }
1976
1977
1978 void Assembler::fstp_x(Address adr) {
1979 InstructionMark im(this);
1980 emit_byte(0xDB);
1981 emit_operand(rdi, adr);
1982 }
1983
1984
1985 void Assembler::fstp_d(int index) {
1986 emit_farith(0xDD, 0xD8, index);
1987 }
1988
1989
1990 void Assembler::fild_s(Address adr) {
1991 InstructionMark im(this);
1992 emit_byte(0xDB);
1993 emit_operand(rax, adr);
1994 }
1995
1996
1997 void Assembler::fild_d(Address adr) {
1998 InstructionMark im(this);
1999 emit_byte(0xDF);
2000 emit_operand(rbp, adr);
2001 }
2002
2003
2004 void Assembler::fistp_s(Address adr) {
2005 InstructionMark im(this);
2006 emit_byte(0xDB);
2007 emit_operand(rbx, adr);
2008 }
2009
2010
2011 void Assembler::fistp_d(Address adr) {
2012 InstructionMark im(this);
2013 emit_byte(0xDF);
2014 emit_operand(rdi, adr);
2015 }
2016
2017
2018 void Assembler::fist_s(Address adr) {
2019 InstructionMark im(this);
2020 emit_byte(0xDB);
2021 emit_operand(rdx, adr);
2022 }
2023
2024
2025 void Assembler::fabs() {
2026 emit_byte(0xD9);
2027 emit_byte(0xE1);
2028 }
2029
2030
2031 void Assembler::fldln2() {
2032 emit_byte(0xD9);
2033 emit_byte(0xED);
2034 }
2035
2036 void Assembler::fyl2x() {
2037 emit_byte(0xD9);
2038 emit_byte(0xF1);
2039 }
2040
2041
2042 void Assembler::fldlg2() {
2043 emit_byte(0xD9);
2044 emit_byte(0xEC);
2045 }
2046
2047
2048 void Assembler::flog() {
2049 fldln2();
2050 fxch();
2051 fyl2x();
2052 }
2053
2054
2055 void Assembler::flog10() {
2056 fldlg2();
2057 fxch();
2058 fyl2x();
2059 }
2060
2061
2062 void Assembler::fsin() {
2063 emit_byte(0xD9);
2064 emit_byte(0xFE);
2065 }
2066
2067
2068 void Assembler::fcos() {
2069 emit_byte(0xD9);
2070 emit_byte(0xFF);
2071 }
2072
2073 void Assembler::ftan() {
2074 emit_byte(0xD9);
2075 emit_byte(0xF2);
2076 emit_byte(0xDD);
2077 emit_byte(0xD8);
2078 }
2079
2080 void Assembler::fsqrt() {
2081 emit_byte(0xD9);
2082 emit_byte(0xFA);
2083 }
2084
2085
2086 void Assembler::fchs() {
2087 emit_byte(0xD9);
2088 emit_byte(0xE0);
2089 }
2090
2091
2092 void Assembler::fadd_s(Address src) {
2093 InstructionMark im(this);
2094 emit_byte(0xD8);
2095 emit_operand(rax, src);
2096 }
2097
2098
2099 void Assembler::fadd_d(Address src) {
2100 InstructionMark im(this);
2101 emit_byte(0xDC);
2102 emit_operand(rax, src);
2103 }
2104
2105
2106 void Assembler::fadd(int i) {
2107 emit_farith(0xD8, 0xC0, i);
2108 }
2109
2110
2111 void Assembler::fadda(int i) {
2112 emit_farith(0xDC, 0xC0, i);
2113 }
2114
2115
2116 void Assembler::fsub_d(Address src) {
2117 InstructionMark im(this);
2118 emit_byte(0xDC);
2119 emit_operand(rsp, src);
2120 }
2121
2122
2123 void Assembler::fsub_s(Address src) {
2124 InstructionMark im(this);
2125 emit_byte(0xD8);
2126 emit_operand(rsp, src);
2127 }
2128
2129
2130 void Assembler::fsubr_s(Address src) {
2131 InstructionMark im(this);
2132 emit_byte(0xD8);
2133 emit_operand(rbp, src);
2134 }
2135
2136
2137 void Assembler::fsubr_d(Address src) {
2138 InstructionMark im(this);
2139 emit_byte(0xDC);
2140 emit_operand(rbp, src);
2141 }
2142
2143
2144 void Assembler::fmul_s(Address src) {
2145 InstructionMark im(this);
2146 emit_byte(0xD8);
2147 emit_operand(rcx, src);
2148 }
2149
2150
2151 void Assembler::fmul_d(Address src) {
2152 InstructionMark im(this);
2153 emit_byte(0xDC);
2154 emit_operand(rcx, src);
2155 }
2156
2157
2158 void Assembler::fmul(int i) {
2159 emit_farith(0xD8, 0xC8, i);
2160 }
2161
2162
2163 void Assembler::fmula(int i) {
2164 emit_farith(0xDC, 0xC8, i);
2165 }
2166
2167
2168 void Assembler::fdiv_s(Address src) {
2169 InstructionMark im(this);
2170 emit_byte(0xD8);
2171 emit_operand(rsi, src);
2172 }
2173
2174
2175 void Assembler::fdiv_d(Address src) {
2176 InstructionMark im(this);
2177 emit_byte(0xDC);
2178 emit_operand(rsi, src);
2179 }
2180
2181
2182 void Assembler::fdivr_s(Address src) {
2183 InstructionMark im(this);
2184 emit_byte(0xD8);
2185 emit_operand(rdi, src);
2186 }
2187
2188
2189 void Assembler::fdivr_d(Address src) {
2190 InstructionMark im(this);
2191 emit_byte(0xDC);
2192 emit_operand(rdi, src);
2193 }
2194
2195
2196 void Assembler::fsub(int i) {
2197 emit_farith(0xD8, 0xE0, i);
2198 }
2199
2200
2201 void Assembler::fsuba(int i) {
2202 emit_farith(0xDC, 0xE8, i);
2203 }
2204
2205
2206 void Assembler::fsubr(int i) {
2207 emit_farith(0xD8, 0xE8, i);
2208 }
2209
2210
2211 void Assembler::fsubra(int i) {
2212 emit_farith(0xDC, 0xE0, i);
2213 }
2214
2215
2216 void Assembler::fdiv(int i) {
2217 emit_farith(0xD8, 0xF0, i);
2218 }
2219
2220
2221 void Assembler::fdiva(int i) {
2222 emit_farith(0xDC, 0xF8, i);
2223 }
2224
2225
2226 void Assembler::fdivr(int i) {
2227 emit_farith(0xD8, 0xF8, i);
2228 }
2229
2230
2231 void Assembler::fdivra(int i) {
2232 emit_farith(0xDC, 0xF0, i);
2233 }
2234
2235
2236 // Note: The Intel manual (Pentium Processor User's Manual, Vol.3, 1994)
2237 // is erroneous for some of the floating-point instructions below.
2238
2239 void Assembler::fdivp(int i) {
2240 emit_farith(0xDE, 0xF8, i); // ST(0) <- ST(0) / ST(1) and pop (Intel manual wrong)
2241 }
2242
2243
2244 void Assembler::fdivrp(int i) {
2245 emit_farith(0xDE, 0xF0, i); // ST(0) <- ST(1) / ST(0) and pop (Intel manual wrong)
2246 }
2247
2248
2249 void Assembler::fsubp(int i) {
2250 emit_farith(0xDE, 0xE8, i); // ST(0) <- ST(0) - ST(1) and pop (Intel manual wrong)
2251 }
2252
2253
2254 void Assembler::fsubrp(int i) {
2255 emit_farith(0xDE, 0xE0, i); // ST(0) <- ST(1) - ST(0) and pop (Intel manual wrong)
2256 }
2257
2258
2259 void Assembler::faddp(int i) {
2260 emit_farith(0xDE, 0xC0, i);
2261 }
2262
2263
2264 void Assembler::fmulp(int i) {
2265 emit_farith(0xDE, 0xC8, i);
2266 }
2267
2268
2269 void Assembler::fprem() {
2270 emit_byte(0xD9);
2271 emit_byte(0xF8);
2272 }
2273
2274
2275 void Assembler::fprem1() {
2276 emit_byte(0xD9);
2277 emit_byte(0xF5);
2278 }
2279
2280
2281 void Assembler::fxch(int i) {
2282 emit_farith(0xD9, 0xC8, i);
2283 }
2284
2285
2286 void Assembler::fincstp() {
2287 emit_byte(0xD9);
2288 emit_byte(0xF7);
2289 }
2290
2291
2292 void Assembler::fdecstp() {
2293 emit_byte(0xD9);
2294 emit_byte(0xF6);
2295 }
2296
2297
2298 void Assembler::ffree(int i) {
2299 emit_farith(0xDD, 0xC0, i);
2300 }
2301
2302
2303 void Assembler::fcomp_s(Address src) {
2304 InstructionMark im(this);
2305 emit_byte(0xD8);
2306 emit_operand(rbx, src);
2307 }
2308
2309
2310 void Assembler::fcomp_d(Address src) {
2311 InstructionMark im(this);
2312 emit_byte(0xDC);
2313 emit_operand(rbx, src);
2314 }
2315
2316
2317 void Assembler::fcom(int i) {
2318 emit_farith(0xD8, 0xD0, i);
2319 }
2320
2321
2322 void Assembler::fcomp(int i) {
2323 emit_farith(0xD8, 0xD8, i);
2324 }
2325
2326
2327 void Assembler::fcompp() {
2328 emit_byte(0xDE);
2329 emit_byte(0xD9);
2330 }
2331
2332
2333 void Assembler::fucomi(int i) {
2334 // make sure the instruction is supported (introduced for P6, together with cmov)
2335 guarantee(VM_Version::supports_cmov(), "illegal instruction");
2336 emit_farith(0xDB, 0xE8, i);
2337 }
2338
2339
2340 void Assembler::fucomip(int i) {
2341 // make sure the instruction is supported (introduced for P6, together with cmov)
2342 guarantee(VM_Version::supports_cmov(), "illegal instruction");
2343 emit_farith(0xDF, 0xE8, i);
2344 }
2345
2346
2347 void Assembler::ftst() {
2348 emit_byte(0xD9);
2349 emit_byte(0xE4);
2350 }
2351
2352
2353 void Assembler::fnstsw_ax() {
2354 emit_byte(0xdF);
2355 emit_byte(0xE0);
2356 }
2357
2358
2359 void Assembler::fwait() {
2360 emit_byte(0x9B);
2361 }
2362
2363
2364 void Assembler::finit() {
2365 emit_byte(0x9B);
2366 emit_byte(0xDB);
2367 emit_byte(0xE3);
2368 }
2369
2370
2371 void Assembler::fldcw(Address src) {
2372 InstructionMark im(this);
2373 emit_byte(0xd9);
2374 emit_operand(rbp, src);
2375 }
2376
2377
2378 void Assembler::fnstcw(Address src) {
2379 InstructionMark im(this);
2380 emit_byte(0x9B);
2381 emit_byte(0xD9);
2382 emit_operand(rdi, src);
2383 }
2384
2385 void Assembler::fnsave(Address dst) {
2386 InstructionMark im(this);
2387 emit_byte(0xDD);
2388 emit_operand(rsi, dst);
2389 }
2390
2391
2392 void Assembler::frstor(Address src) {
2393 InstructionMark im(this);
2394 emit_byte(0xDD);
2395 emit_operand(rsp, src);
2396 }
2397
2398
2399 void Assembler::fldenv(Address src) {
2400 InstructionMark im(this);
2401 emit_byte(0xD9);
2402 emit_operand(rsp, src);
2403 }
2404
2405
2406 void Assembler::sahf() {
2407 emit_byte(0x9E);
2408 }
2409
2410 // MMX operations
2411 void Assembler::emit_operand(MMXRegister reg, Address adr) {
2412 emit_operand((Register)reg, adr._base, adr._index, adr._scale, adr._disp, adr._rspec);
2413 }
2414
2415 void Assembler::movq( MMXRegister dst, Address src ) {
2416 assert( VM_Version::supports_mmx(), "" );
2417 emit_byte(0x0F);
2418 emit_byte(0x6F);
2419 emit_operand(dst,src);
2420 }
2421
2422 void Assembler::movq( Address dst, MMXRegister src ) {
2423 assert( VM_Version::supports_mmx(), "" );
2424 emit_byte(0x0F);
2425 emit_byte(0x7F);
2426 emit_operand(src,dst);
2427 }
2428
2429 void Assembler::emms() {
2430 emit_byte(0x0F);
2431 emit_byte(0x77);
2432 }
2433
2434
2435
2436
2437 // SSE and SSE2 instructions
2438 inline void Assembler::emit_sse_operand(XMMRegister reg, Address adr) {
2439 assert(((Register)reg)->encoding() == reg->encoding(), "otherwise typecast is invalid");
2440 emit_operand((Register)reg, adr._base, adr._index, adr._scale, adr._disp, adr._rspec);
2441 }
2442 inline void Assembler::emit_sse_operand(Register reg, Address adr) {
2443 emit_operand(reg, adr._base, adr._index, adr._scale, adr._disp, adr._rspec);
2444 }
2445
2446 inline void Assembler::emit_sse_operand(XMMRegister dst, XMMRegister src) {
2447 emit_byte(0xC0 | dst->encoding() << 3 | src->encoding());
2448 }
2449 inline void Assembler::emit_sse_operand(XMMRegister dst, Register src) {
2450 emit_byte(0xC0 | dst->encoding() << 3 | src->encoding());
2451 }
2452 inline void Assembler::emit_sse_operand(Register dst, XMMRegister src) {
2453 emit_byte(0xC0 | dst->encoding() << 3 | src->encoding());
2454 }
2455
2456
2457 // Macro for creation of SSE2 instructions
2458 // The SSE2 instricution set is highly regular, so this macro saves
2459 // a lot of cut&paste
2460 // Each macro expansion creates two methods (same name with different
2461 // parameter list)
2462 //
2463 // Macro parameters:
2464 // * name: name of the created methods
2465 // * sse_version: either sse or sse2 for the assertion if instruction supported by processor
2466 // * prefix: first opcode byte of the instruction (or 0 if no prefix byte)
2467 // * opcode: last opcode byte of the instruction
2468 // * conversion instruction have parameters of type Register instead of XMMRegister,
2469 // so this can also configured with macro parameters
2470 #define emit_sse_instruction(name, sse_version, prefix, opcode, dst_register_type, src_register_type) \
2471 \
2472 void Assembler:: name (dst_register_type dst, Address src) { \
2473 assert(VM_Version::supports_##sse_version(), ""); \
2474 \
2475 InstructionMark im(this); \
2476 if (prefix != 0) emit_byte(prefix); \
2477 emit_byte(0x0F); \
2478 emit_byte(opcode); \
2479 emit_sse_operand(dst, src); \
2480 } \
2481 \
2482 void Assembler:: name (dst_register_type dst, src_register_type src) { \
2483 assert(VM_Version::supports_##sse_version(), ""); \
2484 \
2485 if (prefix != 0) emit_byte(prefix); \
2486 emit_byte(0x0F); \
2487 emit_byte(opcode); \
2488 emit_sse_operand(dst, src); \
2489 } \
2490
2491 emit_sse_instruction(addss, sse, 0xF3, 0x58, XMMRegister, XMMRegister);
2492 emit_sse_instruction(addsd, sse2, 0xF2, 0x58, XMMRegister, XMMRegister)
2493 emit_sse_instruction(subss, sse, 0xF3, 0x5C, XMMRegister, XMMRegister)
2494 emit_sse_instruction(subsd, sse2, 0xF2, 0x5C, XMMRegister, XMMRegister)
2495 emit_sse_instruction(mulss, sse, 0xF3, 0x59, XMMRegister, XMMRegister)
2496 emit_sse_instruction(mulsd, sse2, 0xF2, 0x59, XMMRegister, XMMRegister)
2497 emit_sse_instruction(divss, sse, 0xF3, 0x5E, XMMRegister, XMMRegister)
2498 emit_sse_instruction(divsd, sse2, 0xF2, 0x5E, XMMRegister, XMMRegister)
2499 emit_sse_instruction(sqrtss, sse, 0xF3, 0x51, XMMRegister, XMMRegister)
2500 emit_sse_instruction(sqrtsd, sse2, 0xF2, 0x51, XMMRegister, XMMRegister)
2501
2502 emit_sse_instruction(pxor, sse2, 0x66, 0xEF, XMMRegister, XMMRegister)
2503
2504 emit_sse_instruction(comiss, sse, 0, 0x2F, XMMRegister, XMMRegister)
2505 emit_sse_instruction(comisd, sse2, 0x66, 0x2F, XMMRegister, XMMRegister)
2506 emit_sse_instruction(ucomiss, sse, 0, 0x2E, XMMRegister, XMMRegister)
2507 emit_sse_instruction(ucomisd, sse2, 0x66, 0x2E, XMMRegister, XMMRegister)
2508
2509 emit_sse_instruction(cvtss2sd, sse2, 0xF3, 0x5A, XMMRegister, XMMRegister);
2510 emit_sse_instruction(cvtsd2ss, sse2, 0xF2, 0x5A, XMMRegister, XMMRegister)
2511 emit_sse_instruction(cvtsi2ss, sse, 0xF3, 0x2A, XMMRegister, Register);
2512 emit_sse_instruction(cvtsi2sd, sse2, 0xF2, 0x2A, XMMRegister, Register)
2513 emit_sse_instruction(cvtss2si, sse, 0xF3, 0x2D, Register, XMMRegister);
2514 emit_sse_instruction(cvtsd2si, sse2, 0xF2, 0x2D, Register, XMMRegister)
2515 emit_sse_instruction(cvttss2si, sse, 0xF3, 0x2C, Register, XMMRegister);
2516 emit_sse_instruction(cvttsd2si, sse2, 0xF2, 0x2C, Register, XMMRegister)
2517
2518 emit_sse_instruction(movss, sse, 0xF3, 0x10, XMMRegister, XMMRegister)
2519 emit_sse_instruction(movsd, sse2, 0xF2, 0x10, XMMRegister, XMMRegister)
2520
2521 emit_sse_instruction(movq, sse2, 0xF3, 0x7E, XMMRegister, XMMRegister);
2522 emit_sse_instruction(movd, sse2, 0x66, 0x6E, XMMRegister, Register);
2523 emit_sse_instruction(movdqa, sse2, 0x66, 0x6F, XMMRegister, XMMRegister);
2524
2525 emit_sse_instruction(punpcklbw, sse2, 0x66, 0x60, XMMRegister, XMMRegister);
2526
2527
2528 // Instruction not covered by macro
2529 void Assembler::movq(Address dst, XMMRegister src) {
2530 assert(VM_Version::supports_sse2(), "");
2531
2532 InstructionMark im(this);
2533 emit_byte(0x66);
2534 emit_byte(0x0F);
2535 emit_byte(0xD6);
2536 emit_sse_operand(src, dst);
2537 }
2538
2539 void Assembler::movd(Address dst, XMMRegister src) {
2540 assert(VM_Version::supports_sse2(), "");
2541
2542 InstructionMark im(this);
2543 emit_byte(0x66);
2544 emit_byte(0x0F);
2545 emit_byte(0x7E);
2546 emit_sse_operand(src, dst);
2547 }
2548
2549 void Assembler::movd(Register dst, XMMRegister src) {
2550 assert(VM_Version::supports_sse2(), "");
2551
2552 emit_byte(0x66);
2553 emit_byte(0x0F);
2554 emit_byte(0x7E);
2555 emit_sse_operand(src, dst);
2556 }
2557
2558 void Assembler::movdqa(Address dst, XMMRegister src) {
2559 assert(VM_Version::supports_sse2(), "");
2560
2561 InstructionMark im(this);
2562 emit_byte(0x66);
2563 emit_byte(0x0F);
2564 emit_byte(0x7F);
2565 emit_sse_operand(src, dst);
2566 }
2567
2568 void Assembler::pshufd(XMMRegister dst, XMMRegister src, int mode) {
2569 assert(isByte(mode), "invalid value");
2570 assert(VM_Version::supports_sse2(), "");
2571
2572 emit_byte(0x66);
2573 emit_byte(0x0F);
2574 emit_byte(0x70);
2575 emit_sse_operand(dst, src);
2576 emit_byte(mode & 0xFF);
2577 }
2578
2579 void Assembler::pshufd(XMMRegister dst, Address src, int mode) {
2580 assert(isByte(mode), "invalid value");
2581 assert(VM_Version::supports_sse2(), "");
2582
2583 InstructionMark im(this);
2584 emit_byte(0x66);
2585 emit_byte(0x0F);
2586 emit_byte(0x70);
2587 emit_sse_operand(dst, src);
2588 emit_byte(mode & 0xFF);
2589 }
2590
2591 void Assembler::pshuflw(XMMRegister dst, XMMRegister src, int mode) {
2592 assert(isByte(mode), "invalid value");
2593 assert(VM_Version::supports_sse2(), "");
2594
2595 emit_byte(0xF2);
2596 emit_byte(0x0F);
2597 emit_byte(0x70);
2598 emit_sse_operand(dst, src);
2599 emit_byte(mode & 0xFF);
2600 }
2601
2602 void Assembler::pshuflw(XMMRegister dst, Address src, int mode) {
2603 assert(isByte(mode), "invalid value");
2604 assert(VM_Version::supports_sse2(), "");
2605
2606 InstructionMark im(this);
2607 emit_byte(0xF2);
2608 emit_byte(0x0F);
2609 emit_byte(0x70);
2610 emit_sse_operand(dst, src);
2611 emit_byte(mode & 0xFF);
2612 }
2613
2614 void Assembler::psrlq(XMMRegister dst, int shift) {
2615 assert(VM_Version::supports_sse2(), "");
2616
2617 emit_byte(0x66);
2618 emit_byte(0x0F);
2619 emit_byte(0x73);
2620 emit_sse_operand(xmm2, dst);
2621 emit_byte(shift);
2622 }
2623
2624 void Assembler::movss( Address dst, XMMRegister src ) {
2625 assert(VM_Version::supports_sse(), "");
2626
2627 InstructionMark im(this);
2628 emit_byte(0xF3); // single
2629 emit_byte(0x0F);
2630 emit_byte(0x11); // store
2631 emit_sse_operand(src, dst);
2632 }
2633
2634 void Assembler::movsd( Address dst, XMMRegister src ) {
2635 assert(VM_Version::supports_sse2(), "");
2636
2637 InstructionMark im(this);
2638 emit_byte(0xF2); // double
2639 emit_byte(0x0F);
2640 emit_byte(0x11); // store
2641 emit_sse_operand(src,dst);
2642 }
2643
2644 // New cpus require to use movaps and movapd to avoid partial register stall
2645 // when moving between registers.
2646 void Assembler::movaps(XMMRegister dst, XMMRegister src) {
2647 assert(VM_Version::supports_sse(), "");
2648
2649 emit_byte(0x0F);
2650 emit_byte(0x28);
2651 emit_sse_operand(dst, src);
2652 }
2653 void Assembler::movapd(XMMRegister dst, XMMRegister src) {
2654 assert(VM_Version::supports_sse2(), "");
2655
2656 emit_byte(0x66);
2657 emit_byte(0x0F);
2658 emit_byte(0x28);
2659 emit_sse_operand(dst, src);
2660 }
2661
2662 // New cpus require to use movsd and movss to avoid partial register stall
2663 // when loading from memory. But for old Opteron use movlpd instead of movsd.
2664 // The selection is done in MacroAssembler::movdbl() and movflt().
2665 void Assembler::movlpd(XMMRegister dst, Address src) {
2666 assert(VM_Version::supports_sse(), "");
2667
2668 InstructionMark im(this);
2669 emit_byte(0x66);
2670 emit_byte(0x0F);
2671 emit_byte(0x12);
2672 emit_sse_operand(dst, src);
2673 }
2674
2675
2676 emit_sse_instruction(andps, sse, 0, 0x54, XMMRegister, XMMRegister);
2677 emit_sse_instruction(andpd, sse2, 0x66, 0x54, XMMRegister, XMMRegister);
2678 emit_sse_instruction(andnps, sse, 0, 0x55, XMMRegister, XMMRegister);
2679 emit_sse_instruction(andnpd, sse2, 0x66, 0x55, XMMRegister, XMMRegister);
2680 emit_sse_instruction(orps, sse, 0, 0x56, XMMRegister, XMMRegister);
2681 emit_sse_instruction(orpd, sse2, 0x66, 0x56, XMMRegister, XMMRegister);
2682 emit_sse_instruction(xorps, sse, 0, 0x57, XMMRegister, XMMRegister);
2683 emit_sse_instruction(xorpd, sse2, 0x66, 0x57, XMMRegister, XMMRegister);
2684
2685
2686 void Assembler::ldmxcsr( Address src) {
2687 InstructionMark im(this);
2688 emit_byte(0x0F);
2689 emit_byte(0xAE);
2690 emit_operand(rdx /* 2 */, src);
2691 }
2692
2693 void Assembler::stmxcsr( Address dst) {
2694 InstructionMark im(this);
2695 emit_byte(0x0F);
2696 emit_byte(0xAE);
2697 emit_operand(rbx /* 3 */, dst);
2698 }
2699
2700 // Implementation of MacroAssembler
2701
2702 Address MacroAssembler::as_Address(AddressLiteral adr) {
2703 // amd64 always does this as a pc-rel
2704 // we can be absolute or disp based on the instruction type
2705 // jmp/call are displacements others are absolute
2706 assert(!adr.is_lval(), "must be rval");
2707
2708 return Address(adr.target(), adr.rspec());
2709 }
2710
2711 Address MacroAssembler::as_Address(ArrayAddress adr) {
2712 return Address::make_array(adr);
2713 }
2714
2715 void MacroAssembler::fat_nop() {
2716 // A 5 byte nop that is safe for patching (see patch_verified_entry)
2717 emit_byte(0x26); // es:
2718 emit_byte(0x2e); // cs:
2719 emit_byte(0x64); // fs:
2720 emit_byte(0x65); // gs:
2721 emit_byte(0x90);
2722 }
2723
2724 // 32bit can do a case table jump in one instruction but we no longer allow the base
2725 // to be installed in the Address class
2726 void MacroAssembler::jump(ArrayAddress entry) {
2727 jmp(as_Address(entry));
2728 }
2729
2730 void MacroAssembler::jump(AddressLiteral dst) {
2731 jmp_literal(dst.target(), dst.rspec());
2732 }
2733
2734 void MacroAssembler::jump_cc(Condition cc, AddressLiteral dst) {
2735 assert((0 <= cc) && (cc < 16), "illegal cc");
2736
2737 InstructionMark im(this);
2738
2739 relocInfo::relocType rtype = dst.reloc();
2740 relocate(rtype);
2741 const int short_size = 2;
2742 const int long_size = 6;
2743 int offs = (int)dst.target() - ((int)_code_pos);
2744 if (rtype == relocInfo::none && is8bit(offs - short_size)) {
2745 // 0111 tttn #8-bit disp
2746 emit_byte(0x70 | cc);
2747 emit_byte((offs - short_size) & 0xFF);
2748 } else {
2749 // 0000 1111 1000 tttn #32-bit disp
2750 emit_byte(0x0F);
2751 emit_byte(0x80 | cc);
2752 emit_long(offs - long_size);
2753 }
2754 }
2755
2756 // Calls
2757 void MacroAssembler::call(Label& L, relocInfo::relocType rtype) {
2758 Assembler::call(L, rtype);
2759 }
2760
2761 void MacroAssembler::call(Register entry) {
2762 Assembler::call(entry);
2763 }
2764
2765 void MacroAssembler::call(AddressLiteral entry) {
2766 Assembler::call_literal(entry.target(), entry.rspec());
2767 }
2768
2769
2770 void MacroAssembler::cmp8(AddressLiteral src1, int8_t imm) {
2771 Assembler::cmpb(as_Address(src1), imm);
2772 }
2773
2774 void MacroAssembler::cmp32(AddressLiteral src1, int32_t imm) {
2775 Assembler::cmpl(as_Address(src1), imm);
2776 }
2777
2778 void MacroAssembler::cmp32(Register src1, AddressLiteral src2) {
2779 if (src2.is_lval()) {
2780 cmp_literal32(src1, (int32_t) src2.target(), src2.rspec());
2781 } else {
2782 Assembler::cmpl(src1, as_Address(src2));
2783 }
2784 }
2785
2786 void MacroAssembler::cmp32(Register src1, int32_t imm) {
2787 Assembler::cmpl(src1, imm);
2788 }
2789
2790 void MacroAssembler::cmp32(Register src1, Address src2) {
2791 Assembler::cmpl(src1, src2);
2792 }
2793
2794 void MacroAssembler::cmpoop(Address src1, jobject obj) {
2795 cmp_literal32(src1, (int32_t)obj, oop_Relocation::spec_for_immediate());
2796 }
2797
2798 void MacroAssembler::cmpoop(Register src1, jobject obj) {
2799 cmp_literal32(src1, (int32_t)obj, oop_Relocation::spec_for_immediate());
2800 }
2801
2802 void MacroAssembler::cmpptr(Register src1, AddressLiteral src2) {
2803 if (src2.is_lval()) {
2804 // compare the effect address of src2 to src1
2805 cmp_literal32(src1, (int32_t)src2.target(), src2.rspec());
2806 } else {
2807 Assembler::cmpl(src1, as_Address(src2));
2808 }
2809 }
2810
2811 void MacroAssembler::cmpptr(Address src1, AddressLiteral src2) {
2812 assert(src2.is_lval(), "not a mem-mem compare");
2813 cmp_literal32(src1, (int32_t) src2.target(), src2.rspec());
2814 }
2815
2816
2817 void MacroAssembler::cmpxchgptr(Register reg, AddressLiteral adr) {
2818 cmpxchg(reg, as_Address(adr));
2819 }
2820
2821 void MacroAssembler::increment(AddressLiteral dst) {
2822 increment(as_Address(dst));
2823 }
2824
2825 void MacroAssembler::increment(ArrayAddress dst) {
2826 increment(as_Address(dst));
2827 }
2828
2829 void MacroAssembler::lea(Register dst, AddressLiteral adr) {
2830 // leal(dst, as_Address(adr));
2831 // see note in movl as to why we musr use a move
2832 mov_literal32(dst, (int32_t) adr.target(), adr.rspec());
2833 }
2834
2835 void MacroAssembler::lea(Address dst, AddressLiteral adr) {
2836 // leal(dst, as_Address(adr));
2837 // see note in movl as to why we musr use a move
2838 mov_literal32(dst, (int32_t) adr.target(), adr.rspec());
2839 }
2840
2841 void MacroAssembler::mov32(AddressLiteral dst, Register src) {
2842 Assembler::movl(as_Address(dst), src);
2843 }
2844
2845 void MacroAssembler::mov32(Register dst, AddressLiteral src) {
2846 Assembler::movl(dst, as_Address(src));
2847 }
2848
2849 void MacroAssembler::movbyte(ArrayAddress dst, int src) {
2850 movb(as_Address(dst), src);
2851 }
2852
2853 void MacroAssembler::movoop(Address dst, jobject obj) {
2854 mov_literal32(dst, (int32_t)obj, oop_Relocation::spec_for_immediate());
2855 }
2856
2857 void MacroAssembler::movoop(Register dst, jobject obj) {
2858 mov_literal32(dst, (int32_t)obj, oop_Relocation::spec_for_immediate());
2859 }
2860
2861 void MacroAssembler::movptr(Register dst, AddressLiteral src) {
2862 if (src.is_lval()) {
2863 // essentially an lea
2864 mov_literal32(dst, (int32_t) src.target(), src.rspec());
2865 } else {
2866 // mov 32bits from an absolute address
2867 movl(dst, as_Address(src));
2868 }
2869 }
2870
2871 void MacroAssembler::movptr(ArrayAddress dst, Register src) {
2872 movl(as_Address(dst), src);
2873 }
2874
2875 void MacroAssembler::movptr(Register dst, ArrayAddress src) {
2876 movl(dst, as_Address(src));
2877 }
2878
2879 void MacroAssembler::movflt(XMMRegister dst, AddressLiteral src) {
2880 movss(dst, as_Address(src));
2881 }
2882
2883 void MacroAssembler::movdbl(XMMRegister dst, AddressLiteral src) {
2884 if (UseXmmLoadAndClearUpper) { movsd (dst, as_Address(src)); return; }
2885 else { movlpd(dst, as_Address(src)); return; }
2886 }
2887
2888 void Assembler::pushoop(jobject obj) {
2889 push_literal32((int32_t)obj, oop_Relocation::spec_for_immediate());
2890 }
2891
2892
2893 void MacroAssembler::pushptr(AddressLiteral src) {
2894 if (src.is_lval()) {
2895 push_literal32((int32_t)src.target(), src.rspec());
2896 } else {
2897 pushl(as_Address(src));
2898 }
2899 }
2900
2901 void MacroAssembler::test32(Register src1, AddressLiteral src2) {
2902 // src2 must be rval
2903 testl(src1, as_Address(src2));
2904 }
2905
2906 // FPU
2907
2908 void MacroAssembler::fld_x(AddressLiteral src) {
2909 Assembler::fld_x(as_Address(src));
2910 }
2911
2912 void MacroAssembler::fld_d(AddressLiteral src) {
2913 fld_d(as_Address(src));
2914 }
2915
2916 void MacroAssembler::fld_s(AddressLiteral src) {
2917 fld_s(as_Address(src));
2918 }
2919
2920 void MacroAssembler::fldcw(AddressLiteral src) {
2921 Assembler::fldcw(as_Address(src));
2922 }
2923
2924 void MacroAssembler::ldmxcsr(AddressLiteral src) {
2925 Assembler::ldmxcsr(as_Address(src));
2926 }
2927
2928 // SSE
2929
2930 void MacroAssembler::andpd(XMMRegister dst, AddressLiteral src) {
2931 andpd(dst, as_Address(src));
2932 }
2933
2934 void MacroAssembler::comisd(XMMRegister dst, AddressLiteral src) {
2935 comisd(dst, as_Address(src));
2936 }
2937
2938 void MacroAssembler::comiss(XMMRegister dst, AddressLiteral src) {
2939 comiss(dst, as_Address(src));
2940 }
2941
2942 void MacroAssembler::movsd(XMMRegister dst, AddressLiteral src) {
2943 movsd(dst, as_Address(src));
2944 }
2945
2946 void MacroAssembler::movss(XMMRegister dst, AddressLiteral src) {
2947 movss(dst, as_Address(src));
2948 }
2949
2950 void MacroAssembler::xorpd(XMMRegister dst, AddressLiteral src) {
2951 xorpd(dst, as_Address(src));
2952 }
2953
2954 void MacroAssembler::xorps(XMMRegister dst, AddressLiteral src) {
2955 xorps(dst, as_Address(src));
2956 }
2957
2958 void MacroAssembler::ucomisd(XMMRegister dst, AddressLiteral src) {
2959 ucomisd(dst, as_Address(src));
2960 }
2961
2962 void MacroAssembler::ucomiss(XMMRegister dst, AddressLiteral src) {
2963 ucomiss(dst, as_Address(src));
2964 }
2965
2966 void MacroAssembler::null_check(Register reg, int offset) {
2967 if (needs_explicit_null_check(offset)) {
2968 // provoke OS NULL exception if reg = NULL by
2969 // accessing M[reg] w/o changing any (non-CC) registers
2970 cmpl(rax, Address(reg, 0));
2971 // Note: should probably use testl(rax, Address(reg, 0));
2972 // may be shorter code (however, this version of
2973 // testl needs to be implemented first)
2974 } else {
2975 // nothing to do, (later) access of M[reg + offset]
2976 // will provoke OS NULL exception if reg = NULL
2977 }
2978 }
2979
2980
2981 int MacroAssembler::load_unsigned_byte(Register dst, Address src) {
2982 // According to Intel Doc. AP-526, "Zero-Extension of Short", p.16,
2983 // and "3.9 Partial Register Penalties", p. 22).
2984 int off;
2985 if (VM_Version::is_P6() || src.uses(dst)) {
2986 off = offset();
2987 movzxb(dst, src);
2988 } else {
2989 xorl(dst, dst);
2990 off = offset();
2991 movb(dst, src);
2992 }
2993 return off;
2994 }
2995
2996
2997 int MacroAssembler::load_unsigned_word(Register dst, Address src) {
2998 // According to Intel Doc. AP-526, "Zero-Extension of Short", p.16,
2999 // and "3.9 Partial Register Penalties", p. 22).
3000 int off;
3001 if (VM_Version::is_P6() || src.uses(dst)) {
3002 off = offset();
3003 movzxw(dst, src);
3004 } else {
3005 xorl(dst, dst);
3006 off = offset();
3007 movw(dst, src);
3008 }
3009 return off;
3010 }
3011
3012
3013 int MacroAssembler::load_signed_byte(Register dst, Address src) {
3014 int off;
3015 if (VM_Version::is_P6()) {
3016 off = offset();
3017 movsxb(dst, src);
3018 } else {
3019 off = load_unsigned_byte(dst, src);
3020 shll(dst, 24);
3021 sarl(dst, 24);
3022 }
3023 return off;
3024 }
3025
3026
3027 int MacroAssembler::load_signed_word(Register dst, Address src) {
3028 int off;
3029 if (VM_Version::is_P6()) {
3030 off = offset();
3031 movsxw(dst, src);
3032 } else {
3033 off = load_unsigned_word(dst, src);
3034 shll(dst, 16);
3035 sarl(dst, 16);
3036 }
3037 return off;
3038 }
3039
3040
3041 void MacroAssembler::extend_sign(Register hi, Register lo) {
3042 // According to Intel Doc. AP-526, "Integer Divide", p.18.
3043 if (VM_Version::is_P6() && hi == rdx && lo == rax) {
3044 cdql();
3045 } else {
3046 movl(hi, lo);
3047 sarl(hi, 31);
3048 }
3049 }
3050
3051
3052 void MacroAssembler::increment(Register reg, int value) {
3053 if (value == min_jint) {addl(reg, value); return; }
3054 if (value < 0) { decrement(reg, -value); return; }
3055 if (value == 0) { ; return; }
3056 if (value == 1 && UseIncDec) { incl(reg); return; }
3057 /* else */ { addl(reg, value) ; return; }
3058 }
3059
3060 void MacroAssembler::increment(Address dst, int value) {
3061 if (value == min_jint) {addl(dst, value); return; }
3062 if (value < 0) { decrement(dst, -value); return; }
3063 if (value == 0) { ; return; }
3064 if (value == 1 && UseIncDec) { incl(dst); return; }
3065 /* else */ { addl(dst, value) ; return; }
3066 }
3067
3068 void MacroAssembler::decrement(Register reg, int value) {
3069 if (value == min_jint) {subl(reg, value); return; }
3070 if (value < 0) { increment(reg, -value); return; }
3071 if (value == 0) { ; return; }
3072 if (value == 1 && UseIncDec) { decl(reg); return; }
3073 /* else */ { subl(reg, value) ; return; }
3074 }
3075
3076 void MacroAssembler::decrement(Address dst, int value) {
3077 if (value == min_jint) {subl(dst, value); return; }
3078 if (value < 0) { increment(dst, -value); return; }
3079 if (value == 0) { ; return; }
3080 if (value == 1 && UseIncDec) { decl(dst); return; }
3081 /* else */ { subl(dst, value) ; return; }
3082 }
3083
3084 void MacroAssembler::align(int modulus) {
3085 if (offset() % modulus != 0) nop(modulus - (offset() % modulus));
3086 }
3087
3088
3089 void MacroAssembler::enter() {
3090 pushl(rbp);
3091 movl(rbp, rsp);
3092 }
3093
3094
3095 void MacroAssembler::leave() {
3096 movl(rsp, rbp);
3097 popl(rbp);
3098 }
3099
3100 void MacroAssembler::set_last_Java_frame(Register java_thread,
3101 Register last_java_sp,
3102 Register last_java_fp,
3103 address last_java_pc) {
3104 // determine java_thread register
3105 if (!java_thread->is_valid()) {
3106 java_thread = rdi;
3107 get_thread(java_thread);
3108 }
3109 // determine last_java_sp register
3110 if (!last_java_sp->is_valid()) {
3111 last_java_sp = rsp;
3112 }
3113
3114 // last_java_fp is optional
3115
3116 if (last_java_fp->is_valid()) {
3117 movl(Address(java_thread, JavaThread::last_Java_fp_offset()), last_java_fp);
3118 }
3119
3120 // last_java_pc is optional
3121
3122 if (last_java_pc != NULL) {
3123 lea(Address(java_thread,
3124 JavaThread::frame_anchor_offset() + JavaFrameAnchor::last_Java_pc_offset()),
3125 InternalAddress(last_java_pc));
3126
3127 }
3128 movl(Address(java_thread, JavaThread::last_Java_sp_offset()), last_java_sp);
3129 }
3130
3131 void MacroAssembler::reset_last_Java_frame(Register java_thread, bool clear_fp, bool clear_pc) {
3132 // determine java_thread register
3133 if (!java_thread->is_valid()) {
3134 java_thread = rdi;
3135 get_thread(java_thread);
3136 }
3137 // we must set sp to zero to clear frame
3138 movl(Address(java_thread, JavaThread::last_Java_sp_offset()), 0);
3139 if (clear_fp) {
3140 movl(Address(java_thread, JavaThread::last_Java_fp_offset()), 0);
3141 }
3142
3143 if (clear_pc)
3144 movl(Address(java_thread, JavaThread::last_Java_pc_offset()), 0);
3145
3146 }
3147
3148
3149
3150 // Implementation of call_VM versions
3151
3152 void MacroAssembler::call_VM_leaf_base(
3153 address entry_point,
3154 int number_of_arguments
3155 ) {
3156 call(RuntimeAddress(entry_point));
3157 increment(rsp, number_of_arguments * wordSize);
3158 }
3159
3160
3161 void MacroAssembler::call_VM_base(
3162 Register oop_result,
3163 Register java_thread,
3164 Register last_java_sp,
3165 address entry_point,
3166 int number_of_arguments,
3167 bool check_exceptions
3168 ) {
3169 // determine java_thread register
3170 if (!java_thread->is_valid()) {
3171 java_thread = rdi;
3172 get_thread(java_thread);
3173 }
3174 // determine last_java_sp register
3175 if (!last_java_sp->is_valid()) {
3176 last_java_sp = rsp;
3177 }
3178 // debugging support
3179 assert(number_of_arguments >= 0 , "cannot have negative number of arguments");
3180 assert(java_thread != oop_result , "cannot use the same register for java_thread & oop_result");
3181 assert(java_thread != last_java_sp, "cannot use the same register for java_thread & last_java_sp");
3182 // push java thread (becomes first argument of C function)
3183 pushl(java_thread);
3184 // set last Java frame before call
3185 assert(last_java_sp != rbp, "this code doesn't work for last_java_sp == rbp, which currently can't portably work anyway since C2 doesn't save rbp,");
3186 // Only interpreter should have to set fp
3187 set_last_Java_frame(java_thread, last_java_sp, rbp, NULL);
3188 // do the call
3189 call(RuntimeAddress(entry_point));
3190 // restore the thread (cannot use the pushed argument since arguments
3191 // may be overwritten by C code generated by an optimizing compiler);
3192 // however can use the register value directly if it is callee saved.
3193 if (java_thread == rdi || java_thread == rsi) {
3194 // rdi & rsi are callee saved -> nothing to do
3195 #ifdef ASSERT
3196 guarantee(java_thread != rax, "change this code");
3197 pushl(rax);
3198 { Label L;
3199 get_thread(rax);
3200 cmpl(java_thread, rax);
3201 jcc(Assembler::equal, L);
3202 stop("MacroAssembler::call_VM_base: rdi not callee saved?");
3203 bind(L);
3204 }
3205 popl(rax);
3206 #endif
3207 } else {
3208 get_thread(java_thread);
3209 }
3210 // reset last Java frame
3211 // Only interpreter should have to clear fp
3212 reset_last_Java_frame(java_thread, true, false);
3213 // discard thread and arguments
3214 addl(rsp, (1 + number_of_arguments)*wordSize);
3215
3216 #ifndef CC_INTERP
3217 // C++ interp handles this in the interpreter
3218 check_and_handle_popframe(java_thread);
3219 check_and_handle_earlyret(java_thread);
3220 #endif /* CC_INTERP */
3221
3222 if (check_exceptions) {
3223 // check for pending exceptions (java_thread is set upon return)
3224 cmpl(Address(java_thread, Thread::pending_exception_offset()), NULL_WORD);
3225 jump_cc(Assembler::notEqual,
3226 RuntimeAddress(StubRoutines::forward_exception_entry()));
3227 }
3228
3229 // get oop result if there is one and reset the value in the thread
3230 if (oop_result->is_valid()) {
3231 movl(oop_result, Address(java_thread, JavaThread::vm_result_offset()));
3232 movl(Address(java_thread, JavaThread::vm_result_offset()), NULL_WORD);
3233 verify_oop(oop_result);
3234 }
3235 }
3236
3237
3238 void MacroAssembler::check_and_handle_popframe(Register java_thread) {
3239 }
3240
3241 void MacroAssembler::check_and_handle_earlyret(Register java_thread) {
3242 }
3243
3244 void MacroAssembler::call_VM_helper(Register oop_result, address entry_point, int number_of_arguments, bool check_exceptions) {
3245 leal(rax, Address(rsp, (1 + number_of_arguments) * wordSize));
3246 call_VM_base(oop_result, noreg, rax, entry_point, number_of_arguments, check_exceptions);
3247 }
3248
3249
3250 void MacroAssembler::call_VM(Register oop_result, address entry_point, bool check_exceptions) {
3251 Label C, E;
3252 call(C, relocInfo::none);
3253 jmp(E);
3254
3255 bind(C);
3256 call_VM_helper(oop_result, entry_point, 0, check_exceptions);
3257 ret(0);
3258
3259 bind(E);
3260 }
3261
3262
3263 void MacroAssembler::call_VM(Register oop_result, address entry_point, Register arg_1, bool check_exceptions) {
3264 Label C, E;
3265 call(C, relocInfo::none);
3266 jmp(E);
3267
3268 bind(C);
3269 pushl(arg_1);
3270 call_VM_helper(oop_result, entry_point, 1, check_exceptions);
3271 ret(0);
3272
3273 bind(E);
3274 }
3275
3276
3277 void MacroAssembler::call_VM(Register oop_result, address entry_point, Register arg_1, Register arg_2, bool check_exceptions) {
3278 Label C, E;
3279 call(C, relocInfo::none);
3280 jmp(E);
3281
3282 bind(C);
3283 pushl(arg_2);
3284 pushl(arg_1);
3285 call_VM_helper(oop_result, entry_point, 2, check_exceptions);
3286 ret(0);
3287
3288 bind(E);
3289 }
3290
3291
3292 void MacroAssembler::call_VM(Register oop_result, address entry_point, Register arg_1, Register arg_2, Register arg_3, bool check_exceptions) {
3293 Label C, E;
3294 call(C, relocInfo::none);
3295 jmp(E);
3296
3297 bind(C);
3298 pushl(arg_3);
3299 pushl(arg_2);
3300 pushl(arg_1);
3301 call_VM_helper(oop_result, entry_point, 3, check_exceptions);
3302 ret(0);
3303
3304 bind(E);
3305 }
3306
3307
3308 void MacroAssembler::call_VM(Register oop_result, Register last_java_sp, address entry_point, int number_of_arguments, bool check_exceptions) {
3309 call_VM_base(oop_result, noreg, last_java_sp, entry_point, number_of_arguments, check_exceptions);
3310 }
3311
3312
3313 void MacroAssembler::call_VM(Register oop_result, Register last_java_sp, address entry_point, Register arg_1, bool check_exceptions) {
3314 pushl(arg_1);
3315 call_VM(oop_result, last_java_sp, entry_point, 1, check_exceptions);
3316 }
3317
3318
3319 void MacroAssembler::call_VM(Register oop_result, Register last_java_sp, address entry_point, Register arg_1, Register arg_2, bool check_exceptions) {
3320 pushl(arg_2);
3321 pushl(arg_1);
3322 call_VM(oop_result, last_java_sp, entry_point, 2, check_exceptions);
3323 }
3324
3325
3326 void MacroAssembler::call_VM(Register oop_result, Register last_java_sp, address entry_point, Register arg_1, Register arg_2, Register arg_3, bool check_exceptions) {
3327 pushl(arg_3);
3328 pushl(arg_2);
3329 pushl(arg_1);
3330 call_VM(oop_result, last_java_sp, entry_point, 3, check_exceptions);
3331 }
3332
3333
3334 void MacroAssembler::call_VM_leaf(address entry_point, int number_of_arguments) {
3335 call_VM_leaf_base(entry_point, number_of_arguments);
3336 }
3337
3338
3339 void MacroAssembler::call_VM_leaf(address entry_point, Register arg_1) {
3340 pushl(arg_1);
3341 call_VM_leaf(entry_point, 1);
3342 }
3343
3344
3345 void MacroAssembler::call_VM_leaf(address entry_point, Register arg_1, Register arg_2) {
3346 pushl(arg_2);
3347 pushl(arg_1);
3348 call_VM_leaf(entry_point, 2);
3349 }
3350
3351
3352 void MacroAssembler::call_VM_leaf(address entry_point, Register arg_1, Register arg_2, Register arg_3) {
3353 pushl(arg_3);
3354 pushl(arg_2);
3355 pushl(arg_1);
3356 call_VM_leaf(entry_point, 3);
3357 }
3358
3359
3360 // Calls to C land
3361 //
3362 // When entering C land, the rbp, & rsp of the last Java frame have to be recorded
3363 // in the (thread-local) JavaThread object. When leaving C land, the last Java fp
3364 // has to be reset to 0. This is required to allow proper stack traversal.
3365
3366 void MacroAssembler::store_check(Register obj) {
3367 // Does a store check for the oop in register obj. The content of
3368 // register obj is destroyed afterwards.
3369 store_check_part_1(obj);
3370 store_check_part_2(obj);
3371 }
3372
3373
3374 void MacroAssembler::store_check(Register obj, Address dst) {
3375 store_check(obj);
3376 }
3377
3378
3379 // split the store check operation so that other instructions can be scheduled inbetween
3380 void MacroAssembler::store_check_part_1(Register obj) {
3381 BarrierSet* bs = Universe::heap()->barrier_set();
3382 assert(bs->kind() == BarrierSet::CardTableModRef, "Wrong barrier set kind");
3383 shrl(obj, CardTableModRefBS::card_shift);
3384 }
3385
3386
3387 void MacroAssembler::store_check_part_2(Register obj) {
3388 BarrierSet* bs = Universe::heap()->barrier_set();
3389 assert(bs->kind() == BarrierSet::CardTableModRef, "Wrong barrier set kind");
3390 CardTableModRefBS* ct = (CardTableModRefBS*)bs;
3391 assert(sizeof(*ct->byte_map_base) == sizeof(jbyte), "adjust this code");
3392 ExternalAddress cardtable((address)ct->byte_map_base);
3393 Address index(noreg, obj, Address::times_1);
3394
3395 movb(as_Address(ArrayAddress(cardtable, index)), 0);
3396 }
3397
3398
3399 void MacroAssembler::c2bool(Register x) {
3400 // implements x == 0 ? 0 : 1
3401 // note: must only look at least-significant byte of x
3402 // since C-style booleans are stored in one byte
3403 // only! (was bug)
3404 andl(x, 0xFF);
3405 setb(Assembler::notZero, x);
3406 }
3407
3408
3409 int MacroAssembler::corrected_idivl(Register reg) {
3410 // Full implementation of Java idiv and irem; checks for
3411 // special case as described in JVM spec., p.243 & p.271.
3412 // The function returns the (pc) offset of the idivl
3413 // instruction - may be needed for implicit exceptions.
3414 //
3415 // normal case special case
3416 //
3417 // input : rax,: dividend min_int
3418 // reg: divisor (may not be rax,/rdx) -1
3419 //
3420 // output: rax,: quotient (= rax, idiv reg) min_int
3421 // rdx: remainder (= rax, irem reg) 0
3422 assert(reg != rax && reg != rdx, "reg cannot be rax, or rdx register");
3423 const int min_int = 0x80000000;
3424 Label normal_case, special_case;
3425
3426 // check for special case
3427 cmpl(rax, min_int);
3428 jcc(Assembler::notEqual, normal_case);
3429 xorl(rdx, rdx); // prepare rdx for possible special case (where remainder = 0)
3430 cmpl(reg, -1);
3431 jcc(Assembler::equal, special_case);
3432
3433 // handle normal case
3434 bind(normal_case);
3435 cdql();
3436 int idivl_offset = offset();
3437 idivl(reg);
3438
3439 // normal and special case exit
3440 bind(special_case);
3441
3442 return idivl_offset;
3443 }
3444
3445
3446 void MacroAssembler::lneg(Register hi, Register lo) {
3447 negl(lo);
3448 adcl(hi, 0);
3449 negl(hi);
3450 }
3451
3452
3453 void MacroAssembler::lmul(int x_rsp_offset, int y_rsp_offset) {
3454 // Multiplication of two Java long values stored on the stack
3455 // as illustrated below. Result is in rdx:rax.
3456 //
3457 // rsp ---> [ ?? ] \ \
3458 // .... | y_rsp_offset |
3459 // [ y_lo ] / (in bytes) | x_rsp_offset
3460 // [ y_hi ] | (in bytes)
3461 // .... |
3462 // [ x_lo ] /
3463 // [ x_hi ]
3464 // ....
3465 //
3466 // Basic idea: lo(result) = lo(x_lo * y_lo)
3467 // hi(result) = hi(x_lo * y_lo) + lo(x_hi * y_lo) + lo(x_lo * y_hi)
3468 Address x_hi(rsp, x_rsp_offset + wordSize); Address x_lo(rsp, x_rsp_offset);
3469 Address y_hi(rsp, y_rsp_offset + wordSize); Address y_lo(rsp, y_rsp_offset);
3470 Label quick;
3471 // load x_hi, y_hi and check if quick
3472 // multiplication is possible
3473 movl(rbx, x_hi);
3474 movl(rcx, y_hi);
3475 movl(rax, rbx);
3476 orl(rbx, rcx); // rbx, = 0 <=> x_hi = 0 and y_hi = 0
3477 jcc(Assembler::zero, quick); // if rbx, = 0 do quick multiply
3478 // do full multiplication
3479 // 1st step
3480 mull(y_lo); // x_hi * y_lo
3481 movl(rbx, rax); // save lo(x_hi * y_lo) in rbx,
3482 // 2nd step
3483 movl(rax, x_lo);
3484 mull(rcx); // x_lo * y_hi
3485 addl(rbx, rax); // add lo(x_lo * y_hi) to rbx,
3486 // 3rd step
3487 bind(quick); // note: rbx, = 0 if quick multiply!
3488 movl(rax, x_lo);
3489 mull(y_lo); // x_lo * y_lo
3490 addl(rdx, rbx); // correct hi(x_lo * y_lo)
3491 }
3492
3493
3494 void MacroAssembler::lshl(Register hi, Register lo) {
3495 // Java shift left long support (semantics as described in JVM spec., p.305)
3496 // (basic idea for shift counts s >= n: x << s == (x << n) << (s - n))
3497 // shift value is in rcx !
3498 assert(hi != rcx, "must not use rcx");
3499 assert(lo != rcx, "must not use rcx");
3500 const Register s = rcx; // shift count
3501 const int n = BitsPerWord;
3502 Label L;
3503 andl(s, 0x3f); // s := s & 0x3f (s < 0x40)
3504 cmpl(s, n); // if (s < n)
3505 jcc(Assembler::less, L); // else (s >= n)
3506 movl(hi, lo); // x := x << n
3507 xorl(lo, lo);
3508 // Note: subl(s, n) is not needed since the Intel shift instructions work rcx mod n!
3509 bind(L); // s (mod n) < n
3510 shldl(hi, lo); // x := x << s
3511 shll(lo);
3512 }
3513
3514
3515 void MacroAssembler::lshr(Register hi, Register lo, bool sign_extension) {
3516 // Java shift right long support (semantics as described in JVM spec., p.306 & p.310)
3517 // (basic idea for shift counts s >= n: x >> s == (x >> n) >> (s - n))
3518 assert(hi != rcx, "must not use rcx");
3519 assert(lo != rcx, "must not use rcx");
3520 const Register s = rcx; // shift count
3521 const int n = BitsPerWord;
3522 Label L;
3523 andl(s, 0x3f); // s := s & 0x3f (s < 0x40)
3524 cmpl(s, n); // if (s < n)
3525 jcc(Assembler::less, L); // else (s >= n)
3526 movl(lo, hi); // x := x >> n
3527 if (sign_extension) sarl(hi, 31);
3528 else xorl(hi, hi);
3529 // Note: subl(s, n) is not needed since the Intel shift instructions work rcx mod n!
3530 bind(L); // s (mod n) < n
3531 shrdl(lo, hi); // x := x >> s
3532 if (sign_extension) sarl(hi);
3533 else shrl(hi);
3534 }
3535
3536
3537 // Note: y_lo will be destroyed
3538 void MacroAssembler::lcmp2int(Register x_hi, Register x_lo, Register y_hi, Register y_lo) {
3539 // Long compare for Java (semantics as described in JVM spec.)
3540 Label high, low, done;
3541
3542 cmpl(x_hi, y_hi);
3543 jcc(Assembler::less, low);
3544 jcc(Assembler::greater, high);
3545 // x_hi is the return register
3546 xorl(x_hi, x_hi);
3547 cmpl(x_lo, y_lo);
3548 jcc(Assembler::below, low);
3549 jcc(Assembler::equal, done);
3550
3551 bind(high);
3552 xorl(x_hi, x_hi);
3553 increment(x_hi);
3554 jmp(done);
3555
3556 bind(low);
3557 xorl(x_hi, x_hi);
3558 decrement(x_hi);
3559
3560 bind(done);
3561 }
3562
3563
3564 void MacroAssembler::save_rax(Register tmp) {
3565 if (tmp == noreg) pushl(rax);
3566 else if (tmp != rax) movl(tmp, rax);
3567 }
3568
3569
3570 void MacroAssembler::restore_rax(Register tmp) {
3571 if (tmp == noreg) popl(rax);
3572 else if (tmp != rax) movl(rax, tmp);
3573 }
3574
3575
3576 void MacroAssembler::fremr(Register tmp) {
3577 save_rax(tmp);
3578 { Label L;
3579 bind(L);
3580 fprem();
3581 fwait(); fnstsw_ax();
3582 sahf();
3583 jcc(Assembler::parity, L);
3584 }
3585 restore_rax(tmp);
3586 // Result is in ST0.
3587 // Note: fxch & fpop to get rid of ST1
3588 // (otherwise FPU stack could overflow eventually)
3589 fxch(1);
3590 fpop();
3591 }
3592
3593
3594 static const double pi_4 = 0.7853981633974483;
3595
3596 void MacroAssembler::trigfunc(char trig, int num_fpu_regs_in_use) {
3597 // A hand-coded argument reduction for values in fabs(pi/4, pi/2)
3598 // was attempted in this code; unfortunately it appears that the
3599 // switch to 80-bit precision and back causes this to be
3600 // unprofitable compared with simply performing a runtime call if
3601 // the argument is out of the (-pi/4, pi/4) range.
3602
3603 Register tmp = noreg;
3604 if (!VM_Version::supports_cmov()) {
3605 // fcmp needs a temporary so preserve rbx,
3606 tmp = rbx;
3607 pushl(tmp);
3608 }
3609
3610 Label slow_case, done;
3611
3612 // x ?<= pi/4
3613 fld_d(ExternalAddress((address)&pi_4));
3614 fld_s(1); // Stack: X PI/4 X
3615 fabs(); // Stack: |X| PI/4 X
3616 fcmp(tmp);
3617 jcc(Assembler::above, slow_case);
3618
3619 // fastest case: -pi/4 <= x <= pi/4
3620 switch(trig) {
3621 case 's':
3622 fsin();
3623 break;
3624 case 'c':
3625 fcos();
3626 break;
3627 case 't':
3628 ftan();
3629 break;
3630 default:
3631 assert(false, "bad intrinsic");
3632 break;
3633 }
3634 jmp(done);
3635
3636 // slow case: runtime call
3637 bind(slow_case);
3638 // Preserve registers across runtime call
3639 pushad();
3640 int incoming_argument_and_return_value_offset = -1;
3641 if (num_fpu_regs_in_use > 1) {
3642 // Must preserve all other FPU regs (could alternatively convert
3643 // SharedRuntime::dsin and dcos into assembly routines known not to trash
3644 // FPU state, but can not trust C compiler)
3645 NEEDS_CLEANUP;
3646 // NOTE that in this case we also push the incoming argument to
3647 // the stack and restore it later; we also use this stack slot to
3648 // hold the return value from dsin or dcos.
3649 for (int i = 0; i < num_fpu_regs_in_use; i++) {
3650 subl(rsp, wordSize*2);
3651 fstp_d(Address(rsp, 0));
3652 }
3653 incoming_argument_and_return_value_offset = 2*wordSize*(num_fpu_regs_in_use-1);
3654 fld_d(Address(rsp, incoming_argument_and_return_value_offset));
3655 }
3656 subl(rsp, wordSize*2);
3657 fstp_d(Address(rsp, 0));
3658 // NOTE: we must not use call_VM_leaf here because that requires a
3659 // complete interpreter frame in debug mode -- same bug as 4387334
3660 NEEDS_CLEANUP;
3661 // Need to add stack banging before this runtime call if it needs to
3662 // be taken; however, there is no generic stack banging routine at
3663 // the MacroAssembler level
3664 switch(trig) {
3665 case 's':
3666 {
3667 call(RuntimeAddress(CAST_FROM_FN_PTR(address, SharedRuntime::dsin)));
3668 }
3669 break;
3670 case 'c':
3671 {
3672 call(RuntimeAddress(CAST_FROM_FN_PTR(address, SharedRuntime::dcos)));
3673 }
3674 break;
3675 case 't':
3676 {
3677 call(RuntimeAddress(CAST_FROM_FN_PTR(address, SharedRuntime::dtan)));
3678 }
3679 break;
3680 default:
3681 assert(false, "bad intrinsic");
3682 break;
3683 }
3684 addl(rsp, wordSize * 2);
3685 if (num_fpu_regs_in_use > 1) {
3686 // Must save return value to stack and then restore entire FPU stack
3687 fstp_d(Address(rsp, incoming_argument_and_return_value_offset));
3688 for (int i = 0; i < num_fpu_regs_in_use; i++) {
3689 fld_d(Address(rsp, 0));
3690 addl(rsp, wordSize*2);
3691 }
3692 }
3693 popad();
3694
3695 // Come here with result in F-TOS
3696 bind(done);
3697
3698 if (tmp != noreg) {
3699 popl(tmp);
3700 }
3701 }
3702
3703 void MacroAssembler::jC2(Register tmp, Label& L) {
3704 // set parity bit if FPU flag C2 is set (via rax)
3705 save_rax(tmp);
3706 fwait(); fnstsw_ax();
3707 sahf();
3708 restore_rax(tmp);
3709 // branch
3710 jcc(Assembler::parity, L);
3711 }
3712
3713
3714 void MacroAssembler::jnC2(Register tmp, Label& L) {
3715 // set parity bit if FPU flag C2 is set (via rax)
3716 save_rax(tmp);
3717 fwait(); fnstsw_ax();
3718 sahf();
3719 restore_rax(tmp);
3720 // branch
3721 jcc(Assembler::noParity, L);
3722 }
3723
3724
3725 void MacroAssembler::fcmp(Register tmp) {
3726 fcmp(tmp, 1, true, true);
3727 }
3728
3729
3730 void MacroAssembler::fcmp(Register tmp, int index, bool pop_left, bool pop_right) {
3731 assert(!pop_right || pop_left, "usage error");
3732 if (VM_Version::supports_cmov()) {
3733 assert(tmp == noreg, "unneeded temp");
3734 if (pop_left) {
3735 fucomip(index);
3736 } else {
3737 fucomi(index);
3738 }
3739 if (pop_right) {
3740 fpop();
3741 }
3742 } else {
3743 assert(tmp != noreg, "need temp");
3744 if (pop_left) {
3745 if (pop_right) {
3746 fcompp();
3747 } else {
3748 fcomp(index);
3749 }
3750 } else {
3751 fcom(index);
3752 }
3753 // convert FPU condition into eflags condition via rax,
3754 save_rax(tmp);
3755 fwait(); fnstsw_ax();
3756 sahf();
3757 restore_rax(tmp);
3758 }
3759 // condition codes set as follows:
3760 //
3761 // CF (corresponds to C0) if x < y
3762 // PF (corresponds to C2) if unordered
3763 // ZF (corresponds to C3) if x = y
3764 }
3765
3766
3767 void MacroAssembler::fcmp2int(Register dst, bool unordered_is_less) {
3768 fcmp2int(dst, unordered_is_less, 1, true, true);
3769 }
3770
3771
3772 void MacroAssembler::fcmp2int(Register dst, bool unordered_is_less, int index, bool pop_left, bool pop_right) {
3773 fcmp(VM_Version::supports_cmov() ? noreg : dst, index, pop_left, pop_right);
3774 Label L;
3775 if (unordered_is_less) {
3776 movl(dst, -1);
3777 jcc(Assembler::parity, L);
3778 jcc(Assembler::below , L);
3779 movl(dst, 0);
3780 jcc(Assembler::equal , L);
3781 increment(dst);
3782 } else { // unordered is greater
3783 movl(dst, 1);
3784 jcc(Assembler::parity, L);
3785 jcc(Assembler::above , L);
3786 movl(dst, 0);
3787 jcc(Assembler::equal , L);
3788 decrement(dst);
3789 }
3790 bind(L);
3791 }
3792
3793 void MacroAssembler::cmpss2int(XMMRegister opr1, XMMRegister opr2, Register dst, bool unordered_is_less) {
3794 ucomiss(opr1, opr2);
3795
3796 Label L;
3797 if (unordered_is_less) {
3798 movl(dst, -1);
3799 jcc(Assembler::parity, L);
3800 jcc(Assembler::below , L);
3801 movl(dst, 0);
3802 jcc(Assembler::equal , L);
3803 increment(dst);
3804 } else { // unordered is greater
3805 movl(dst, 1);
3806 jcc(Assembler::parity, L);
3807 jcc(Assembler::above , L);
3808 movl(dst, 0);
3809 jcc(Assembler::equal , L);
3810 decrement(dst);
3811 }
3812 bind(L);
3813 }
3814
3815 void MacroAssembler::cmpsd2int(XMMRegister opr1, XMMRegister opr2, Register dst, bool unordered_is_less) {
3816 ucomisd(opr1, opr2);
3817
3818 Label L;
3819 if (unordered_is_less) {
3820 movl(dst, -1);
3821 jcc(Assembler::parity, L);
3822 jcc(Assembler::below , L);
3823 movl(dst, 0);
3824 jcc(Assembler::equal , L);
3825 increment(dst);
3826 } else { // unordered is greater
3827 movl(dst, 1);
3828 jcc(Assembler::parity, L);
3829 jcc(Assembler::above , L);
3830 movl(dst, 0);
3831 jcc(Assembler::equal , L);
3832 decrement(dst);
3833 }
3834 bind(L);
3835 }
3836
3837
3838
3839 void MacroAssembler::fpop() {
3840 ffree();
3841 fincstp();
3842 }
3843
3844
3845 void MacroAssembler::sign_extend_short(Register reg) {
3846 if (VM_Version::is_P6()) {
3847 movsxw(reg, reg);
3848 } else {
3849 shll(reg, 16);
3850 sarl(reg, 16);
3851 }
3852 }
3853
3854
3855 void MacroAssembler::sign_extend_byte(Register reg) {
3856 if (VM_Version::is_P6() && reg->has_byte_register()) {
3857 movsxb(reg, reg);
3858 } else {
3859 shll(reg, 24);
3860 sarl(reg, 24);
3861 }
3862 }
3863
3864
3865 void MacroAssembler::division_with_shift (Register reg, int shift_value) {
3866 assert (shift_value > 0, "illegal shift value");
3867 Label _is_positive;
3868 testl (reg, reg);
3869 jcc (Assembler::positive, _is_positive);
3870 int offset = (1 << shift_value) - 1 ;
3871
3872 increment(reg, offset);
3873
3874 bind (_is_positive);
3875 sarl(reg, shift_value);
3876 }
3877
3878
3879 void MacroAssembler::round_to(Register reg, int modulus) {
3880 addl(reg, modulus - 1);
3881 andl(reg, -modulus);
3882 }
3883
3884 // C++ bool manipulation
3885
3886 void MacroAssembler::movbool(Register dst, Address src) {
3887 if(sizeof(bool) == 1)
3888 movb(dst, src);
3889 else if(sizeof(bool) == 2)
3890 movw(dst, src);
3891 else if(sizeof(bool) == 4)
3892 movl(dst, src);
3893 else
3894 // unsupported
3895 ShouldNotReachHere();
3896 }
3897
3898 void MacroAssembler::movbool(Address dst, bool boolconst) {
3899 if(sizeof(bool) == 1)
3900 movb(dst, (int) boolconst);
3901 else if(sizeof(bool) == 2)
3902 movw(dst, (int) boolconst);
3903 else if(sizeof(bool) == 4)
3904 movl(dst, (int) boolconst);
3905 else
3906 // unsupported
3907 ShouldNotReachHere();
3908 }
3909
3910 void MacroAssembler::movbool(Address dst, Register src) {
3911 if(sizeof(bool) == 1)
3912 movb(dst, src);
3913 else if(sizeof(bool) == 2)
3914 movw(dst, src);
3915 else if(sizeof(bool) == 4)
3916 movl(dst, src);
3917 else
3918 // unsupported
3919 ShouldNotReachHere();
3920 }
3921
3922 void MacroAssembler::testbool(Register dst) {
3923 if(sizeof(bool) == 1)
3924 testb(dst, (int) 0xff);
3925 else if(sizeof(bool) == 2) {
3926 // testw implementation needed for two byte bools
3927 ShouldNotReachHere();
3928 } else if(sizeof(bool) == 4)
3929 testl(dst, dst);
3930 else
3931 // unsupported
3932 ShouldNotReachHere();
3933 }
3934
3935 void MacroAssembler::verify_oop(Register reg, const char* s) {
3936 if (!VerifyOops) return;
3937 // Pass register number to verify_oop_subroutine
3938 char* b = new char[strlen(s) + 50];
3939 sprintf(b, "verify_oop: %s: %s", reg->name(), s);
3940 pushl(rax); // save rax,
3941 pushl(reg); // pass register argument
3942 ExternalAddress buffer((address) b);
3943 pushptr(buffer.addr());
3944 // call indirectly to solve generation ordering problem
3945 movptr(rax, ExternalAddress(StubRoutines::verify_oop_subroutine_entry_address()));
3946 call(rax);
3947 }
3948
3949
3950 void MacroAssembler::verify_oop_addr(Address addr, const char* s) {
3951 if (!VerifyOops) return;
3952 // QQQ fix this
3953 // Address adjust(addr.base(), addr.index(), addr.scale(), addr.disp() + BytesPerWord);
3954 // Pass register number to verify_oop_subroutine
3955 char* b = new char[strlen(s) + 50];
3956 sprintf(b, "verify_oop_addr: %s", s);
3957 pushl(rax); // save rax,
3958 // addr may contain rsp so we will have to adjust it based on the push
3959 // we just did
3960 if (addr.uses(rsp)) {
3961 leal(rax, addr);
3962 pushl(Address(rax, BytesPerWord));
3963 } else {
3964 pushl(addr);
3965 }
3966 ExternalAddress buffer((address) b);
3967 // pass msg argument
3968 pushptr(buffer.addr());
3969 // call indirectly to solve generation ordering problem
3970 movptr(rax, ExternalAddress(StubRoutines::verify_oop_subroutine_entry_address()));
3971 call(rax);
3972 // Caller pops the arguments and restores rax, from the stack
3973 }
3974
3975
3976 void MacroAssembler::stop(const char* msg) {
3977 ExternalAddress message((address)msg);
3978 // push address of message
3979 pushptr(message.addr());
3980 { Label L; call(L, relocInfo::none); bind(L); } // push eip
3981 pushad(); // push registers
3982 call(RuntimeAddress(CAST_FROM_FN_PTR(address, MacroAssembler::debug)));
3983 hlt();
3984 }
3985
3986
3987 void MacroAssembler::warn(const char* msg) {
3988 push_CPU_state();
3989
3990 ExternalAddress message((address) msg);
3991 // push address of message
3992 pushptr(message.addr());
3993
3994 call(RuntimeAddress(CAST_FROM_FN_PTR(address, warning)));
3995 addl(rsp, wordSize); // discard argument
3996 pop_CPU_state();
3997 }
3998
3999
4000 void MacroAssembler::debug(int rdi, int rsi, int rbp, int rsp, int rbx, int rdx, int rcx, int rax, int eip, char* msg) {
4001 // In order to get locks to work, we need to fake a in_VM state
4002 JavaThread* thread = JavaThread::current();
4003 JavaThreadState saved_state = thread->thread_state();
4004 thread->set_thread_state(_thread_in_vm);
4005 if (ShowMessageBoxOnError) {
4006 JavaThread* thread = JavaThread::current();
4007 JavaThreadState saved_state = thread->thread_state();
4008 thread->set_thread_state(_thread_in_vm);
4009 ttyLocker ttyl;
4010 if (CountBytecodes || TraceBytecodes || StopInterpreterAt) {
4011 BytecodeCounter::print();
4012 }
4013 // To see where a verify_oop failed, get $ebx+40/X for this frame.
4014 // This is the value of eip which points to where verify_oop will return.
4015 if (os::message_box(msg, "Execution stopped, print registers?")) {
4016 tty->print_cr("eip = 0x%08x", eip);
4017 tty->print_cr("rax, = 0x%08x", rax);
4018 tty->print_cr("rbx, = 0x%08x", rbx);
4019 tty->print_cr("rcx = 0x%08x", rcx);
4020 tty->print_cr("rdx = 0x%08x", rdx);
4021 tty->print_cr("rdi = 0x%08x", rdi);
4022 tty->print_cr("rsi = 0x%08x", rsi);
4023 tty->print_cr("rbp, = 0x%08x", rbp);
4024 tty->print_cr("rsp = 0x%08x", rsp);
4025 BREAKPOINT;
4026 }
4027 } else {
4028 ::tty->print_cr("=============== DEBUG MESSAGE: %s ================\n", msg);
4029 assert(false, "DEBUG MESSAGE");
4030 }
4031 ThreadStateTransition::transition(thread, _thread_in_vm, saved_state);
4032 }
4033
4034
4035
4036 void MacroAssembler::os_breakpoint() {
4037 // instead of directly emitting a breakpoint, call os:breakpoint for better debugability
4038 // (e.g., MSVC can't call ps() otherwise)
4039 call(RuntimeAddress(CAST_FROM_FN_PTR(address, os::breakpoint)));
4040 }
4041
4042
4043 void MacroAssembler::push_fTOS() {
4044 subl(rsp, 2 * wordSize);
4045 fstp_d(Address(rsp, 0));
4046 }
4047
4048
4049 void MacroAssembler::pop_fTOS() {
4050 fld_d(Address(rsp, 0));
4051 addl(rsp, 2 * wordSize);
4052 }
4053
4054
4055 void MacroAssembler::empty_FPU_stack() {
4056 if (VM_Version::supports_mmx()) {
4057 emms();
4058 } else {
4059 for (int i = 8; i-- > 0; ) ffree(i);
4060 }
4061 }
4062
4063
4064 class ControlWord {
4065 public:
4066 int32_t _value;
4067
4068 int rounding_control() const { return (_value >> 10) & 3 ; }
4069 int precision_control() const { return (_value >> 8) & 3 ; }
4070 bool precision() const { return ((_value >> 5) & 1) != 0; }
4071 bool underflow() const { return ((_value >> 4) & 1) != 0; }
4072 bool overflow() const { return ((_value >> 3) & 1) != 0; }
4073 bool zero_divide() const { return ((_value >> 2) & 1) != 0; }
4074 bool denormalized() const { return ((_value >> 1) & 1) != 0; }
4075 bool invalid() const { return ((_value >> 0) & 1) != 0; }
4076
4077 void print() const {
4078 // rounding control
4079 const char* rc;
4080 switch (rounding_control()) {
4081 case 0: rc = "round near"; break;
4082 case 1: rc = "round down"; break;
4083 case 2: rc = "round up "; break;
4084 case 3: rc = "chop "; break;
4085 };
4086 // precision control
4087 const char* pc;
4088 switch (precision_control()) {
4089 case 0: pc = "24 bits "; break;
4090 case 1: pc = "reserved"; break;
4091 case 2: pc = "53 bits "; break;
4092 case 3: pc = "64 bits "; break;
4093 };
4094 // flags
4095 char f[9];
4096 f[0] = ' ';
4097 f[1] = ' ';
4098 f[2] = (precision ()) ? 'P' : 'p';
4099 f[3] = (underflow ()) ? 'U' : 'u';
4100 f[4] = (overflow ()) ? 'O' : 'o';
4101 f[5] = (zero_divide ()) ? 'Z' : 'z';
4102 f[6] = (denormalized()) ? 'D' : 'd';
4103 f[7] = (invalid ()) ? 'I' : 'i';
4104 f[8] = '\x0';
4105 // output
4106 printf("%04x masks = %s, %s, %s", _value & 0xFFFF, f, rc, pc);
4107 }
4108
4109 };
4110
4111
4112 class StatusWord {
4113 public:
4114 int32_t _value;
4115
4116 bool busy() const { return ((_value >> 15) & 1) != 0; }
4117 bool C3() const { return ((_value >> 14) & 1) != 0; }
4118 bool C2() const { return ((_value >> 10) & 1) != 0; }
4119 bool C1() const { return ((_value >> 9) & 1) != 0; }
4120 bool C0() const { return ((_value >> 8) & 1) != 0; }
4121 int top() const { return (_value >> 11) & 7 ; }
4122 bool error_status() const { return ((_value >> 7) & 1) != 0; }
4123 bool stack_fault() const { return ((_value >> 6) & 1) != 0; }
4124 bool precision() const { return ((_value >> 5) & 1) != 0; }
4125 bool underflow() const { return ((_value >> 4) & 1) != 0; }
4126 bool overflow() const { return ((_value >> 3) & 1) != 0; }
4127 bool zero_divide() const { return ((_value >> 2) & 1) != 0; }
4128 bool denormalized() const { return ((_value >> 1) & 1) != 0; }
4129 bool invalid() const { return ((_value >> 0) & 1) != 0; }
4130
4131 void print() const {
4132 // condition codes
4133 char c[5];
4134 c[0] = (C3()) ? '3' : '-';
4135 c[1] = (C2()) ? '2' : '-';
4136 c[2] = (C1()) ? '1' : '-';
4137 c[3] = (C0()) ? '0' : '-';
4138 c[4] = '\x0';
4139 // flags
4140 char f[9];
4141 f[0] = (error_status()) ? 'E' : '-';
4142 f[1] = (stack_fault ()) ? 'S' : '-';
4143 f[2] = (precision ()) ? 'P' : '-';
4144 f[3] = (underflow ()) ? 'U' : '-';
4145 f[4] = (overflow ()) ? 'O' : '-';
4146 f[5] = (zero_divide ()) ? 'Z' : '-';
4147 f[6] = (denormalized()) ? 'D' : '-';
4148 f[7] = (invalid ()) ? 'I' : '-';
4149 f[8] = '\x0';
4150 // output
4151 printf("%04x flags = %s, cc = %s, top = %d", _value & 0xFFFF, f, c, top());
4152 }
4153
4154 };
4155
4156
4157 class TagWord {
4158 public:
4159 int32_t _value;
4160
4161 int tag_at(int i) const { return (_value >> (i*2)) & 3; }
4162
4163 void print() const {
4164 printf("%04x", _value & 0xFFFF);
4165 }
4166
4167 };
4168
4169
4170 class FPU_Register {
4171 public:
4172 int32_t _m0;
4173 int32_t _m1;
4174 int16_t _ex;
4175
4176 bool is_indefinite() const {
4177 return _ex == -1 && _m1 == (int32_t)0xC0000000 && _m0 == 0;
4178 }
4179
4180 void print() const {
4181 char sign = (_ex < 0) ? '-' : '+';
4182 const char* kind = (_ex == 0x7FFF || _ex == (int16_t)-1) ? "NaN" : " ";
4183 printf("%c%04hx.%08x%08x %s", sign, _ex, _m1, _m0, kind);
4184 };
4185
4186 };
4187
4188
4189 class FPU_State {
4190 public:
4191 enum {
4192 register_size = 10,
4193 number_of_registers = 8,
4194 register_mask = 7
4195 };
4196
4197 ControlWord _control_word;
4198 StatusWord _status_word;
4199 TagWord _tag_word;
4200 int32_t _error_offset;
4201 int32_t _error_selector;
4202 int32_t _data_offset;
4203 int32_t _data_selector;
4204 int8_t _register[register_size * number_of_registers];
4205
4206 int tag_for_st(int i) const { return _tag_word.tag_at((_status_word.top() + i) & register_mask); }
4207 FPU_Register* st(int i) const { return (FPU_Register*)&_register[register_size * i]; }
4208
4209 const char* tag_as_string(int tag) const {
4210 switch (tag) {
4211 case 0: return "valid";
4212 case 1: return "zero";
4213 case 2: return "special";
4214 case 3: return "empty";
4215 }
4216 ShouldNotReachHere()
4217 return NULL;
4218 }
4219
4220 void print() const {
4221 // print computation registers
4222 { int t = _status_word.top();
4223 for (int i = 0; i < number_of_registers; i++) {
4224 int j = (i - t) & register_mask;
4225 printf("%c r%d = ST%d = ", (j == 0 ? '*' : ' '), i, j);
4226 st(j)->print();
4227 printf(" %s\n", tag_as_string(_tag_word.tag_at(i)));
4228 }
4229 }
4230 printf("\n");
4231 // print control registers
4232 printf("ctrl = "); _control_word.print(); printf("\n");
4233 printf("stat = "); _status_word .print(); printf("\n");
4234 printf("tags = "); _tag_word .print(); printf("\n");
4235 }
4236
4237 };
4238
4239
4240 class Flag_Register {
4241 public:
4242 int32_t _value;
4243
4244 bool overflow() const { return ((_value >> 11) & 1) != 0; }
4245 bool direction() const { return ((_value >> 10) & 1) != 0; }
4246 bool sign() const { return ((_value >> 7) & 1) != 0; }
4247 bool zero() const { return ((_value >> 6) & 1) != 0; }
4248 bool auxiliary_carry() const { return ((_value >> 4) & 1) != 0; }
4249 bool parity() const { return ((_value >> 2) & 1) != 0; }
4250 bool carry() const { return ((_value >> 0) & 1) != 0; }
4251
4252 void print() const {
4253 // flags
4254 char f[8];
4255 f[0] = (overflow ()) ? 'O' : '-';
4256 f[1] = (direction ()) ? 'D' : '-';
4257 f[2] = (sign ()) ? 'S' : '-';
4258 f[3] = (zero ()) ? 'Z' : '-';
4259 f[4] = (auxiliary_carry()) ? 'A' : '-';
4260 f[5] = (parity ()) ? 'P' : '-';
4261 f[6] = (carry ()) ? 'C' : '-';
4262 f[7] = '\x0';
4263 // output
4264 printf("%08x flags = %s", _value, f);
4265 }
4266
4267 };
4268
4269
4270 class IU_Register {
4271 public:
4272 int32_t _value;
4273
4274 void print() const {
4275 printf("%08x %11d", _value, _value);
4276 }
4277
4278 };
4279
4280
4281 class IU_State {
4282 public:
4283 Flag_Register _eflags;
4284 IU_Register _rdi;
4285 IU_Register _rsi;
4286 IU_Register _rbp;
4287 IU_Register _rsp;
4288 IU_Register _rbx;
4289 IU_Register _rdx;
4290 IU_Register _rcx;
4291 IU_Register _rax;
4292
4293 void print() const {
4294 // computation registers
4295 printf("rax, = "); _rax.print(); printf("\n");
4296 printf("rbx, = "); _rbx.print(); printf("\n");
4297 printf("rcx = "); _rcx.print(); printf("\n");
4298 printf("rdx = "); _rdx.print(); printf("\n");
4299 printf("rdi = "); _rdi.print(); printf("\n");
4300 printf("rsi = "); _rsi.print(); printf("\n");
4301 printf("rbp, = "); _rbp.print(); printf("\n");
4302 printf("rsp = "); _rsp.print(); printf("\n");
4303 printf("\n");
4304 // control registers
4305 printf("flgs = "); _eflags.print(); printf("\n");
4306 }
4307 };
4308
4309
4310 class CPU_State {
4311 public:
4312 FPU_State _fpu_state;
4313 IU_State _iu_state;
4314
4315 void print() const {
4316 printf("--------------------------------------------------\n");
4317 _iu_state .print();
4318 printf("\n");
4319 _fpu_state.print();
4320 printf("--------------------------------------------------\n");
4321 }
4322
4323 };
4324
4325
4326 static void _print_CPU_state(CPU_State* state) {
4327 state->print();
4328 };
4329
4330
4331 void MacroAssembler::print_CPU_state() {
4332 push_CPU_state();
4333 pushl(rsp); // pass CPU state
4334 call(RuntimeAddress(CAST_FROM_FN_PTR(address, _print_CPU_state)));
4335 addl(rsp, wordSize); // discard argument
4336 pop_CPU_state();
4337 }
4338
4339
4340 static bool _verify_FPU(int stack_depth, char* s, CPU_State* state) {
4341 static int counter = 0;
4342 FPU_State* fs = &state->_fpu_state;
4343 counter++;
4344 // For leaf calls, only verify that the top few elements remain empty.
4345 // We only need 1 empty at the top for C2 code.
4346 if( stack_depth < 0 ) {
4347 if( fs->tag_for_st(7) != 3 ) {
4348 printf("FPR7 not empty\n");
4349 state->print();
4350 assert(false, "error");
4351 return false;
4352 }
4353 return true; // All other stack states do not matter
4354 }
4355
4356 assert((fs->_control_word._value & 0xffff) == StubRoutines::_fpu_cntrl_wrd_std,
4357 "bad FPU control word");
4358
4359 // compute stack depth
4360 int i = 0;
4361 while (i < FPU_State::number_of_registers && fs->tag_for_st(i) < 3) i++;
4362 int d = i;
4363 while (i < FPU_State::number_of_registers && fs->tag_for_st(i) == 3) i++;
4364 // verify findings
4365 if (i != FPU_State::number_of_registers) {
4366 // stack not contiguous
4367 printf("%s: stack not contiguous at ST%d\n", s, i);
4368 state->print();
4369 assert(false, "error");
4370 return false;
4371 }
4372 // check if computed stack depth corresponds to expected stack depth
4373 if (stack_depth < 0) {
4374 // expected stack depth is -stack_depth or less
4375 if (d > -stack_depth) {
4376 // too many elements on the stack
4377 printf("%s: <= %d stack elements expected but found %d\n", s, -stack_depth, d);
4378 state->print();
4379 assert(false, "error");
4380 return false;
4381 }
4382 } else {
4383 // expected stack depth is stack_depth
4384 if (d != stack_depth) {
4385 // wrong stack depth
4386 printf("%s: %d stack elements expected but found %d\n", s, stack_depth, d);
4387 state->print();
4388 assert(false, "error");
4389 return false;
4390 }
4391 }
4392 // everything is cool
4393 return true;
4394 }
4395
4396
4397 void MacroAssembler::verify_FPU(int stack_depth, const char* s) {
4398 if (!VerifyFPU) return;
4399 push_CPU_state();
4400 pushl(rsp); // pass CPU state
4401 ExternalAddress msg((address) s);
4402 // pass message string s
4403 pushptr(msg.addr());
4404 pushl(stack_depth); // pass stack depth
4405 call(RuntimeAddress(CAST_FROM_FN_PTR(address, _verify_FPU)));
4406 addl(rsp, 3 * wordSize); // discard arguments
4407 // check for error
4408 { Label L;
4409 testl(rax, rax);
4410 jcc(Assembler::notZero, L);
4411 int3(); // break if error condition
4412 bind(L);
4413 }
4414 pop_CPU_state();
4415 }
4416
4417
4418 void MacroAssembler::push_IU_state() {
4419 pushad();
4420 pushfd();
4421 }
4422
4423
4424 void MacroAssembler::pop_IU_state() {
4425 popfd();
4426 popad();
4427 }
4428
4429
4430 void MacroAssembler::push_FPU_state() {
4431 subl(rsp, FPUStateSizeInWords * wordSize);
4432 fnsave(Address(rsp, 0));
4433 fwait();
4434 }
4435
4436
4437 void MacroAssembler::pop_FPU_state() {
4438 frstor(Address(rsp, 0));
4439 addl(rsp, FPUStateSizeInWords * wordSize);
4440 }
4441
4442
4443 void MacroAssembler::push_CPU_state() {
4444 push_IU_state();
4445 push_FPU_state();
4446 }
4447
4448
4449 void MacroAssembler::pop_CPU_state() {
4450 pop_FPU_state();
4451 pop_IU_state();
4452 }
4453
4454
4455 void MacroAssembler::push_callee_saved_registers() {
4456 pushl(rsi);
4457 pushl(rdi);
4458 pushl(rdx);
4459 pushl(rcx);
4460 }
4461
4462
4463 void MacroAssembler::pop_callee_saved_registers() {
4464 popl(rcx);
4465 popl(rdx);
4466 popl(rdi);
4467 popl(rsi);
4468 }
4469
4470
4471 void MacroAssembler::set_word_if_not_zero(Register dst) {
4472 xorl(dst, dst);
4473 set_byte_if_not_zero(dst);
4474 }
4475
4476 // Write serialization page so VM thread can do a pseudo remote membar.
4477 // We use the current thread pointer to calculate a thread specific
4478 // offset to write to within the page. This minimizes bus traffic
4479 // due to cache line collision.
4480 void MacroAssembler::serialize_memory(Register thread, Register tmp) {
4481 movl(tmp, thread);
4482 shrl(tmp, os::get_serialize_page_shift_count());
4483 andl(tmp, (os::vm_page_size() - sizeof(int)));
4484
4485 Address index(noreg, tmp, Address::times_1);
4486 ExternalAddress page(os::get_memory_serialize_page());
4487
4488 movptr(ArrayAddress(page, index), tmp);
4489 }
4490
4491
4492 void MacroAssembler::verify_tlab() {
4493 #ifdef ASSERT
4494 if (UseTLAB && VerifyOops) {
4495 Label next, ok;
4496 Register t1 = rsi;
4497 Register thread_reg = rbx;
4498
4499 pushl(t1);
4500 pushl(thread_reg);
4501 get_thread(thread_reg);
4502
4503 movl(t1, Address(thread_reg, in_bytes(JavaThread::tlab_top_offset())));
4504 cmpl(t1, Address(thread_reg, in_bytes(JavaThread::tlab_start_offset())));
4505 jcc(Assembler::aboveEqual, next);
4506 stop("assert(top >= start)");
4507 should_not_reach_here();
4508
4509 bind(next);
4510 movl(t1, Address(thread_reg, in_bytes(JavaThread::tlab_end_offset())));
4511 cmpl(t1, Address(thread_reg, in_bytes(JavaThread::tlab_top_offset())));
4512 jcc(Assembler::aboveEqual, ok);
4513 stop("assert(top <= end)");
4514 should_not_reach_here();
4515
4516 bind(ok);
4517 popl(thread_reg);
4518 popl(t1);
4519 }
4520 #endif
4521 }
4522
4523
4524 // Defines obj, preserves var_size_in_bytes
4525 void MacroAssembler::eden_allocate(Register obj, Register var_size_in_bytes, int con_size_in_bytes,
4526 Register t1, Label& slow_case) {
4527 assert(obj == rax, "obj must be in rax, for cmpxchg");
4528 assert_different_registers(obj, var_size_in_bytes, t1);
4529 Register end = t1;
4530 Label retry;
4531 bind(retry);
4532 ExternalAddress heap_top((address) Universe::heap()->top_addr());
4533 movptr(obj, heap_top);
4534 if (var_size_in_bytes == noreg) {
4535 leal(end, Address(obj, con_size_in_bytes));
4536 } else {
4537 leal(end, Address(obj, var_size_in_bytes, Address::times_1));
4538 }
4539 // if end < obj then we wrapped around => object too long => slow case
4540 cmpl(end, obj);
4541 jcc(Assembler::below, slow_case);
4542 cmpptr(end, ExternalAddress((address) Universe::heap()->end_addr()));
4543 jcc(Assembler::above, slow_case);
4544 // Compare obj with the top addr, and if still equal, store the new top addr in
4545 // end at the address of the top addr pointer. Sets ZF if was equal, and clears
4546 // it otherwise. Use lock prefix for atomicity on MPs.
4547 if (os::is_MP()) {
4548 lock();
4549 }
4550 cmpxchgptr(end, heap_top);
4551 jcc(Assembler::notEqual, retry);
4552 }
4553
4554
4555 // Defines obj, preserves var_size_in_bytes, okay for t2 == var_size_in_bytes.
4556 void MacroAssembler::tlab_allocate(Register obj, Register var_size_in_bytes, int con_size_in_bytes,
4557 Register t1, Register t2, Label& slow_case) {
4558 assert_different_registers(obj, t1, t2);
4559 assert_different_registers(obj, var_size_in_bytes, t1);
4560 Register end = t2;
4561 Register thread = t1;
4562
4563 verify_tlab();
4564
4565 get_thread(thread);
4566
4567 movl(obj, Address(thread, JavaThread::tlab_top_offset()));
4568 if (var_size_in_bytes == noreg) {
4569 leal(end, Address(obj, con_size_in_bytes));
4570 } else {
4571 leal(end, Address(obj, var_size_in_bytes, Address::times_1));
4572 }
4573 cmpl(end, Address(thread, JavaThread::tlab_end_offset()));
4574 jcc(Assembler::above, slow_case);
4575
4576 // update the tlab top pointer
4577 movl(Address(thread, JavaThread::tlab_top_offset()), end);
4578
4579 // recover var_size_in_bytes if necessary
4580 if (var_size_in_bytes == end) {
4581 subl(var_size_in_bytes, obj);
4582 }
4583 verify_tlab();
4584 }
4585
4586 // Preserves rbx, and rdx.
4587 void MacroAssembler::tlab_refill(Label& retry, Label& try_eden, Label& slow_case) {
4588 Register top = rax;
4589 Register t1 = rcx;
4590 Register t2 = rsi;
4591 Register thread_reg = rdi;
4592 assert_different_registers(top, thread_reg, t1, t2, /* preserve: */ rbx, rdx);
4593 Label do_refill, discard_tlab;
4594
4595 if (CMSIncrementalMode || !Universe::heap()->supports_inline_contig_alloc()) {
4596 // No allocation in the shared eden.
4597 jmp(slow_case);
4598 }
4599
4600 get_thread(thread_reg);
4601
4602 movl(top, Address(thread_reg, in_bytes(JavaThread::tlab_top_offset())));
4603 movl(t1, Address(thread_reg, in_bytes(JavaThread::tlab_end_offset())));
4604
4605 // calculate amount of free space
4606 subl(t1, top);
4607 shrl(t1, LogHeapWordSize);
4608
4609 // Retain tlab and allocate object in shared space if
4610 // the amount free in the tlab is too large to discard.
4611 cmpl(t1, Address(thread_reg, in_bytes(JavaThread::tlab_refill_waste_limit_offset())));
4612 jcc(Assembler::lessEqual, discard_tlab);
4613
4614 // Retain
4615 movl(t2, ThreadLocalAllocBuffer::refill_waste_limit_increment());
4616 addl(Address(thread_reg, in_bytes(JavaThread::tlab_refill_waste_limit_offset())), t2);
4617 if (TLABStats) {
4618 // increment number of slow_allocations
4619 addl(Address(thread_reg, in_bytes(JavaThread::tlab_slow_allocations_offset())), 1);
4620 }
4621 jmp(try_eden);
4622
4623 bind(discard_tlab);
4624 if (TLABStats) {
4625 // increment number of refills
4626 addl(Address(thread_reg, in_bytes(JavaThread::tlab_number_of_refills_offset())), 1);
4627 // accumulate wastage -- t1 is amount free in tlab
4628 addl(Address(thread_reg, in_bytes(JavaThread::tlab_fast_refill_waste_offset())), t1);
4629 }
4630
4631 // if tlab is currently allocated (top or end != null) then
4632 // fill [top, end + alignment_reserve) with array object
4633 testl (top, top);
4634 jcc(Assembler::zero, do_refill);
4635
4636 // set up the mark word
4637 movl(Address(top, oopDesc::mark_offset_in_bytes()), (int)markOopDesc::prototype()->copy_set_hash(0x2));
4638 // set the length to the remaining space
4639 subl(t1, typeArrayOopDesc::header_size(T_INT));
4640 addl(t1, ThreadLocalAllocBuffer::alignment_reserve());
4641 shll(t1, log2_intptr(HeapWordSize/sizeof(jint)));
4642 movl(Address(top, arrayOopDesc::length_offset_in_bytes()), t1);
4643 // set klass to intArrayKlass
4644 // dubious reloc why not an oop reloc?
4645 movptr(t1, ExternalAddress((address) Universe::intArrayKlassObj_addr()));
4646 movl(Address(top, oopDesc::klass_offset_in_bytes()), t1);
4647
4648 // refill the tlab with an eden allocation
4649 bind(do_refill);
4650 movl(t1, Address(thread_reg, in_bytes(JavaThread::tlab_size_offset())));
4651 shll(t1, LogHeapWordSize);
4652 // add object_size ??
4653 eden_allocate(top, t1, 0, t2, slow_case);
4654
4655 // Check that t1 was preserved in eden_allocate.
4656 #ifdef ASSERT
4657 if (UseTLAB) {
4658 Label ok;
4659 Register tsize = rsi;
4660 assert_different_registers(tsize, thread_reg, t1);
4661 pushl(tsize);
4662 movl(tsize, Address(thread_reg, in_bytes(JavaThread::tlab_size_offset())));
4663 shll(tsize, LogHeapWordSize);
4664 cmpl(t1, tsize);
4665 jcc(Assembler::equal, ok);
4666 stop("assert(t1 != tlab size)");
4667 should_not_reach_here();
4668
4669 bind(ok);
4670 popl(tsize);
4671 }
4672 #endif
4673 movl(Address(thread_reg, in_bytes(JavaThread::tlab_start_offset())), top);
4674 movl(Address(thread_reg, in_bytes(JavaThread::tlab_top_offset())), top);
4675 addl(top, t1);
4676 subl(top, ThreadLocalAllocBuffer::alignment_reserve_in_bytes());
4677 movl(Address(thread_reg, in_bytes(JavaThread::tlab_end_offset())), top);
4678 verify_tlab();
4679 jmp(retry);
4680 }
4681
4682
4683 int MacroAssembler::biased_locking_enter(Register lock_reg, Register obj_reg, Register swap_reg, Register tmp_reg,
4684 bool swap_reg_contains_mark,
4685 Label& done, Label* slow_case,
4686 BiasedLockingCounters* counters) {
4687 assert(UseBiasedLocking, "why call this otherwise?");
4688 assert(swap_reg == rax, "swap_reg must be rax, for cmpxchg");
4689 assert_different_registers(lock_reg, obj_reg, swap_reg);
4690
4691 if (PrintBiasedLockingStatistics && counters == NULL)
4692 counters = BiasedLocking::counters();
4693
4694 bool need_tmp_reg = false;
4695 if (tmp_reg == noreg) {
4696 need_tmp_reg = true;
4697 tmp_reg = lock_reg;
4698 } else {
4699 assert_different_registers(lock_reg, obj_reg, swap_reg, tmp_reg);
4700 }
4701 assert(markOopDesc::age_shift == markOopDesc::lock_bits + markOopDesc::biased_lock_bits, "biased locking makes assumptions about bit layout");
4702 Address mark_addr (obj_reg, oopDesc::mark_offset_in_bytes());
4703 Address klass_addr (obj_reg, oopDesc::klass_offset_in_bytes());
4704 Address saved_mark_addr(lock_reg, 0);
4705
4706 // Biased locking
4707 // See whether the lock is currently biased toward our thread and
4708 // whether the epoch is still valid
4709 // Note that the runtime guarantees sufficient alignment of JavaThread
4710 // pointers to allow age to be placed into low bits
4711 // First check to see whether biasing is even enabled for this object
4712 Label cas_label;
4713 int null_check_offset = -1;
4714 if (!swap_reg_contains_mark) {
4715 null_check_offset = offset();
4716 movl(swap_reg, mark_addr);
4717 }
4718 if (need_tmp_reg) {
4719 pushl(tmp_reg);
4720 }
4721 movl(tmp_reg, swap_reg);
4722 andl(tmp_reg, markOopDesc::biased_lock_mask_in_place);
4723 cmpl(tmp_reg, markOopDesc::biased_lock_pattern);
4724 if (need_tmp_reg) {
4725 popl(tmp_reg);
4726 }
4727 jcc(Assembler::notEqual, cas_label);
4728 // The bias pattern is present in the object's header. Need to check
4729 // whether the bias owner and the epoch are both still current.
4730 // Note that because there is no current thread register on x86 we
4731 // need to store off the mark word we read out of the object to
4732 // avoid reloading it and needing to recheck invariants below. This
4733 // store is unfortunate but it makes the overall code shorter and
4734 // simpler.
4735 movl(saved_mark_addr, swap_reg);
4736 if (need_tmp_reg) {
4737 pushl(tmp_reg);
4738 }
4739 get_thread(tmp_reg);
4740 xorl(swap_reg, tmp_reg);
4741 if (swap_reg_contains_mark) {
4742 null_check_offset = offset();
4743 }
4744 movl(tmp_reg, klass_addr);
4745 xorl(swap_reg, Address(tmp_reg, Klass::prototype_header_offset_in_bytes() + klassOopDesc::klass_part_offset_in_bytes()));
4746 andl(swap_reg, ~((int) markOopDesc::age_mask_in_place));
4747 if (need_tmp_reg) {
4748 popl(tmp_reg);
4749 }
4750 if (counters != NULL) {
4751 cond_inc32(Assembler::zero,
4752 ExternalAddress((address)counters->biased_lock_entry_count_addr()));
4753 }
4754 jcc(Assembler::equal, done);
4755
4756 Label try_revoke_bias;
4757 Label try_rebias;
4758
4759 // At this point we know that the header has the bias pattern and
4760 // that we are not the bias owner in the current epoch. We need to
4761 // figure out more details about the state of the header in order to
4762 // know what operations can be legally performed on the object's
4763 // header.
4764
4765 // If the low three bits in the xor result aren't clear, that means
4766 // the prototype header is no longer biased and we have to revoke
4767 // the bias on this object.
4768 testl(swap_reg, markOopDesc::biased_lock_mask_in_place);
4769 jcc(Assembler::notZero, try_revoke_bias);
4770
4771 // Biasing is still enabled for this data type. See whether the
4772 // epoch of the current bias is still valid, meaning that the epoch
4773 // bits of the mark word are equal to the epoch bits of the
4774 // prototype header. (Note that the prototype header's epoch bits
4775 // only change at a safepoint.) If not, attempt to rebias the object
4776 // toward the current thread. Note that we must be absolutely sure
4777 // that the current epoch is invalid in order to do this because
4778 // otherwise the manipulations it performs on the mark word are
4779 // illegal.
4780 testl(swap_reg, markOopDesc::epoch_mask_in_place);
4781 jcc(Assembler::notZero, try_rebias);
4782
4783 // The epoch of the current bias is still valid but we know nothing
4784 // about the owner; it might be set or it might be clear. Try to
4785 // acquire the bias of the object using an atomic operation. If this
4786 // fails we will go in to the runtime to revoke the object's bias.
4787 // Note that we first construct the presumed unbiased header so we
4788 // don't accidentally blow away another thread's valid bias.
4789 movl(swap_reg, saved_mark_addr);
4790 andl(swap_reg,
4791 markOopDesc::biased_lock_mask_in_place | markOopDesc::age_mask_in_place | markOopDesc::epoch_mask_in_place);
4792 if (need_tmp_reg) {
4793 pushl(tmp_reg);
4794 }
4795 get_thread(tmp_reg);
4796 orl(tmp_reg, swap_reg);
4797 if (os::is_MP()) {
4798 lock();
4799 }
4800 cmpxchg(tmp_reg, Address(obj_reg, 0));
4801 if (need_tmp_reg) {
4802 popl(tmp_reg);
4803 }
4804 // If the biasing toward our thread failed, this means that
4805 // another thread succeeded in biasing it toward itself and we
4806 // need to revoke that bias. The revocation will occur in the
4807 // interpreter runtime in the slow case.
4808 if (counters != NULL) {
4809 cond_inc32(Assembler::zero,
4810 ExternalAddress((address)counters->anonymously_biased_lock_entry_count_addr()));
4811 }
4812 if (slow_case != NULL) {
4813 jcc(Assembler::notZero, *slow_case);
4814 }
4815 jmp(done);
4816
4817 bind(try_rebias);
4818 // At this point we know the epoch has expired, meaning that the
4819 // current "bias owner", if any, is actually invalid. Under these
4820 // circumstances _only_, we are allowed to use the current header's
4821 // value as the comparison value when doing the cas to acquire the
4822 // bias in the current epoch. In other words, we allow transfer of
4823 // the bias from one thread to another directly in this situation.
4824 //
4825 // FIXME: due to a lack of registers we currently blow away the age
4826 // bits in this situation. Should attempt to preserve them.
4827 if (need_tmp_reg) {
4828 pushl(tmp_reg);
4829 }
4830 get_thread(tmp_reg);
4831 movl(swap_reg, klass_addr);
4832 orl(tmp_reg, Address(swap_reg, Klass::prototype_header_offset_in_bytes() + klassOopDesc::klass_part_offset_in_bytes()));
4833 movl(swap_reg, saved_mark_addr);
4834 if (os::is_MP()) {
4835 lock();
4836 }
4837 cmpxchg(tmp_reg, Address(obj_reg, 0));
4838 if (need_tmp_reg) {
4839 popl(tmp_reg);
4840 }
4841 // If the biasing toward our thread failed, then another thread
4842 // succeeded in biasing it toward itself and we need to revoke that
4843 // bias. The revocation will occur in the runtime in the slow case.
4844 if (counters != NULL) {
4845 cond_inc32(Assembler::zero,
4846 ExternalAddress((address)counters->rebiased_lock_entry_count_addr()));
4847 }
4848 if (slow_case != NULL) {
4849 jcc(Assembler::notZero, *slow_case);
4850 }
4851 jmp(done);
4852
4853 bind(try_revoke_bias);
4854 // The prototype mark in the klass doesn't have the bias bit set any
4855 // more, indicating that objects of this data type are not supposed
4856 // to be biased any more. We are going to try to reset the mark of
4857 // this object to the prototype value and fall through to the
4858 // CAS-based locking scheme. Note that if our CAS fails, it means
4859 // that another thread raced us for the privilege of revoking the
4860 // bias of this particular object, so it's okay to continue in the
4861 // normal locking code.
4862 //
4863 // FIXME: due to a lack of registers we currently blow away the age
4864 // bits in this situation. Should attempt to preserve them.
4865 movl(swap_reg, saved_mark_addr);
4866 if (need_tmp_reg) {
4867 pushl(tmp_reg);
4868 }
4869 movl(tmp_reg, klass_addr);
4870 movl(tmp_reg, Address(tmp_reg, Klass::prototype_header_offset_in_bytes() + klassOopDesc::klass_part_offset_in_bytes()));
4871 if (os::is_MP()) {
4872 lock();
4873 }
4874 cmpxchg(tmp_reg, Address(obj_reg, 0));
4875 if (need_tmp_reg) {
4876 popl(tmp_reg);
4877 }
4878 // Fall through to the normal CAS-based lock, because no matter what
4879 // the result of the above CAS, some thread must have succeeded in
4880 // removing the bias bit from the object's header.
4881 if (counters != NULL) {
4882 cond_inc32(Assembler::zero,
4883 ExternalAddress((address)counters->revoked_lock_entry_count_addr()));
4884 }
4885
4886 bind(cas_label);
4887
4888 return null_check_offset;
4889 }
4890
4891
4892 void MacroAssembler::biased_locking_exit(Register obj_reg, Register temp_reg, Label& done) {
4893 assert(UseBiasedLocking, "why call this otherwise?");
4894
4895 // Check for biased locking unlock case, which is a no-op
4896 // Note: we do not have to check the thread ID for two reasons.
4897 // First, the interpreter checks for IllegalMonitorStateException at
4898 // a higher level. Second, if the bias was revoked while we held the
4899 // lock, the object could not be rebiased toward another thread, so
4900 // the bias bit would be clear.
4901 movl(temp_reg, Address(obj_reg, oopDesc::mark_offset_in_bytes()));
4902 andl(temp_reg, markOopDesc::biased_lock_mask_in_place);
4903 cmpl(temp_reg, markOopDesc::biased_lock_pattern);
4904 jcc(Assembler::equal, done);
4905 }
4906
4907
4908 Assembler::Condition MacroAssembler::negate_condition(Assembler::Condition cond) {
4909 switch (cond) {
4910 // Note some conditions are synonyms for others
4911 case Assembler::zero: return Assembler::notZero;
4912 case Assembler::notZero: return Assembler::zero;
4913 case Assembler::less: return Assembler::greaterEqual;
4914 case Assembler::lessEqual: return Assembler::greater;
4915 case Assembler::greater: return Assembler::lessEqual;
4916 case Assembler::greaterEqual: return Assembler::less;
4917 case Assembler::below: return Assembler::aboveEqual;
4918 case Assembler::belowEqual: return Assembler::above;
4919 case Assembler::above: return Assembler::belowEqual;
4920 case Assembler::aboveEqual: return Assembler::below;
4921 case Assembler::overflow: return Assembler::noOverflow;
4922 case Assembler::noOverflow: return Assembler::overflow;
4923 case Assembler::negative: return Assembler::positive;
4924 case Assembler::positive: return Assembler::negative;
4925 case Assembler::parity: return Assembler::noParity;
4926 case Assembler::noParity: return Assembler::parity;
4927 }
4928 ShouldNotReachHere(); return Assembler::overflow;
4929 }
4930
4931
4932 void MacroAssembler::cond_inc32(Condition cond, AddressLiteral counter_addr) {
4933 Condition negated_cond = negate_condition(cond);
4934 Label L;
4935 jcc(negated_cond, L);
4936 atomic_incl(counter_addr);
4937 bind(L);
4938 }
4939
4940 void MacroAssembler::atomic_incl(AddressLiteral counter_addr) {
4941 pushfd();
4942 if (os::is_MP())
4943 lock();
4944 increment(counter_addr);
4945 popfd();
4946 }
4947
4948 SkipIfEqual::SkipIfEqual(
4949 MacroAssembler* masm, const bool* flag_addr, bool value) {
4950 _masm = masm;
4951 _masm->cmp8(ExternalAddress((address)flag_addr), value);
4952 _masm->jcc(Assembler::equal, _label);
4953 }
4954
4955 SkipIfEqual::~SkipIfEqual() {
4956 _masm->bind(_label);
4957 }
4958
4959
4960 // Writes to stack successive pages until offset reached to check for
4961 // stack overflow + shadow pages. This clobbers tmp.
4962 void MacroAssembler::bang_stack_size(Register size, Register tmp) {
4963 movl(tmp, rsp);
4964 // Bang stack for total size given plus shadow page size.
4965 // Bang one page at a time because large size can bang beyond yellow and
4966 // red zones.
4967 Label loop;
4968 bind(loop);
4969 movl(Address(tmp, (-os::vm_page_size())), size );
4970 subl(tmp, os::vm_page_size());
4971 subl(size, os::vm_page_size());
4972 jcc(Assembler::greater, loop);
4973
4974 // Bang down shadow pages too.
4975 // The -1 because we already subtracted 1 page.
4976 for (int i = 0; i< StackShadowPages-1; i++) {
4977 movl(Address(tmp, (-i*os::vm_page_size())), size );
4978 }
4979 }