Mercurial > hg > truffle
annotate src/cpu/x86/vm/assembler_x86.cpp @ 642:660978a2a31a
6791178: Specialize for zero as the compressed oop vm heap base
Summary: Use zero based compressed oops if java heap is below 32gb and unscaled compressed oops if java heap is below 4gb.
Reviewed-by: never, twisti, jcoomes, coleenp
author | kvn |
---|---|
date | Thu, 12 Mar 2009 10:37:46 -0700 |
parents | 337400e7a5dd |
children | c771b7f43bbf |
rev | line source |
---|---|
0 | 1 /* |
624 | 2 * Copyright 1997-2009 Sun Microsystems, Inc. All Rights Reserved. |
0 | 3 * DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER. |
4 * | |
5 * This code is free software; you can redistribute it and/or modify it | |
6 * under the terms of the GNU General Public License version 2 only, as | |
7 * published by the Free Software Foundation. | |
8 * | |
9 * This code is distributed in the hope that it will be useful, but WITHOUT | |
10 * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or | |
11 * FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License | |
12 * version 2 for more details (a copy is included in the LICENSE file that | |
13 * accompanied this code). | |
14 * | |
15 * You should have received a copy of the GNU General Public License version | |
16 * 2 along with this work; if not, write to the Free Software Foundation, | |
17 * Inc., 51 Franklin St, Fifth Floor, Boston, MA 02110-1301 USA. | |
18 * | |
19 * Please contact Sun Microsystems, Inc., 4150 Network Circle, Santa Clara, | |
20 * CA 95054 USA or visit www.sun.com if you need additional information or | |
21 * have any questions. | |
22 * | |
23 */ | |
24 | |
25 #include "incls/_precompiled.incl" | |
304 | 26 #include "incls/_assembler_x86.cpp.incl" |
0 | 27 |
28 // Implementation of AddressLiteral | |
29 | |
30 AddressLiteral::AddressLiteral(address target, relocInfo::relocType rtype) { | |
31 _is_lval = false; | |
32 _target = target; | |
33 switch (rtype) { | |
34 case relocInfo::oop_type: | |
35 // Oops are a special case. Normally they would be their own section | |
36 // but in cases like icBuffer they are literals in the code stream that | |
37 // we don't have a section for. We use none so that we get a literal address | |
38 // which is always patchable. | |
39 break; | |
40 case relocInfo::external_word_type: | |
41 _rspec = external_word_Relocation::spec(target); | |
42 break; | |
43 case relocInfo::internal_word_type: | |
44 _rspec = internal_word_Relocation::spec(target); | |
45 break; | |
46 case relocInfo::opt_virtual_call_type: | |
47 _rspec = opt_virtual_call_Relocation::spec(); | |
48 break; | |
49 case relocInfo::static_call_type: | |
50 _rspec = static_call_Relocation::spec(); | |
51 break; | |
52 case relocInfo::runtime_call_type: | |
53 _rspec = runtime_call_Relocation::spec(); | |
54 break; | |
55 case relocInfo::poll_type: | |
56 case relocInfo::poll_return_type: | |
57 _rspec = Relocation::spec_simple(rtype); | |
58 break; | |
59 case relocInfo::none: | |
60 break; | |
61 default: | |
62 ShouldNotReachHere(); | |
63 break; | |
64 } | |
65 } | |
66 | |
67 // Implementation of Address | |
68 | |
304 | 69 #ifdef _LP64 |
70 | |
0 | 71 Address Address::make_array(ArrayAddress adr) { |
72 // Not implementable on 64bit machines | |
73 // Should have been handled higher up the call chain. | |
74 ShouldNotReachHere(); | |
304 | 75 return Address(); |
76 } | |
77 | |
78 // exceedingly dangerous constructor | |
79 Address::Address(int disp, address loc, relocInfo::relocType rtype) { | |
80 _base = noreg; | |
81 _index = noreg; | |
82 _scale = no_scale; | |
83 _disp = disp; | |
84 switch (rtype) { | |
85 case relocInfo::external_word_type: | |
86 _rspec = external_word_Relocation::spec(loc); | |
87 break; | |
88 case relocInfo::internal_word_type: | |
89 _rspec = internal_word_Relocation::spec(loc); | |
90 break; | |
91 case relocInfo::runtime_call_type: | |
92 // HMM | |
93 _rspec = runtime_call_Relocation::spec(); | |
94 break; | |
95 case relocInfo::poll_type: | |
96 case relocInfo::poll_return_type: | |
97 _rspec = Relocation::spec_simple(rtype); | |
98 break; | |
99 case relocInfo::none: | |
100 break; | |
101 default: | |
102 ShouldNotReachHere(); | |
103 } | |
104 } | |
105 #else // LP64 | |
106 | |
107 Address Address::make_array(ArrayAddress adr) { | |
0 | 108 AddressLiteral base = adr.base(); |
109 Address index = adr.index(); | |
110 assert(index._disp == 0, "must not have disp"); // maybe it can? | |
111 Address array(index._base, index._index, index._scale, (intptr_t) base.target()); | |
112 array._rspec = base._rspec; | |
113 return array; | |
304 | 114 } |
0 | 115 |
116 // exceedingly dangerous constructor | |
117 Address::Address(address loc, RelocationHolder spec) { | |
118 _base = noreg; | |
119 _index = noreg; | |
120 _scale = no_scale; | |
121 _disp = (intptr_t) loc; | |
122 _rspec = spec; | |
123 } | |
304 | 124 |
0 | 125 #endif // _LP64 |
126 | |
304 | 127 |
128 | |
0 | 129 // Convert the raw encoding form into the form expected by the constructor for |
130 // Address. An index of 4 (rsp) corresponds to having no index, so convert | |
131 // that to noreg for the Address constructor. | |
624 | 132 Address Address::make_raw(int base, int index, int scale, int disp, bool disp_is_oop) { |
133 RelocationHolder rspec; | |
134 if (disp_is_oop) { | |
135 rspec = Relocation::spec_simple(relocInfo::oop_type); | |
136 } | |
0 | 137 bool valid_index = index != rsp->encoding(); |
138 if (valid_index) { | |
139 Address madr(as_Register(base), as_Register(index), (Address::ScaleFactor)scale, in_ByteSize(disp)); | |
624 | 140 madr._rspec = rspec; |
0 | 141 return madr; |
142 } else { | |
143 Address madr(as_Register(base), noreg, Address::no_scale, in_ByteSize(disp)); | |
624 | 144 madr._rspec = rspec; |
0 | 145 return madr; |
146 } | |
147 } | |
148 | |
149 // Implementation of Assembler | |
150 | |
151 int AbstractAssembler::code_fill_byte() { | |
152 return (u_char)'\xF4'; // hlt | |
153 } | |
154 | |
155 // make this go away someday | |
156 void Assembler::emit_data(jint data, relocInfo::relocType rtype, int format) { | |
157 if (rtype == relocInfo::none) | |
158 emit_long(data); | |
159 else emit_data(data, Relocation::spec_simple(rtype), format); | |
160 } | |
161 | |
162 void Assembler::emit_data(jint data, RelocationHolder const& rspec, int format) { | |
304 | 163 assert(imm_operand == 0, "default format must be immediate in this file"); |
0 | 164 assert(inst_mark() != NULL, "must be inside InstructionMark"); |
165 if (rspec.type() != relocInfo::none) { | |
166 #ifdef ASSERT | |
167 check_relocation(rspec, format); | |
168 #endif | |
169 // Do not use AbstractAssembler::relocate, which is not intended for | |
170 // embedded words. Instead, relocate to the enclosing instruction. | |
171 | |
172 // hack. call32 is too wide for mask so use disp32 | |
173 if (format == call32_operand) | |
174 code_section()->relocate(inst_mark(), rspec, disp32_operand); | |
175 else | |
176 code_section()->relocate(inst_mark(), rspec, format); | |
177 } | |
178 emit_long(data); | |
179 } | |
180 | |
304 | 181 static int encode(Register r) { |
182 int enc = r->encoding(); | |
183 if (enc >= 8) { | |
184 enc -= 8; | |
185 } | |
186 return enc; | |
187 } | |
188 | |
189 static int encode(XMMRegister r) { | |
190 int enc = r->encoding(); | |
191 if (enc >= 8) { | |
192 enc -= 8; | |
193 } | |
194 return enc; | |
195 } | |
0 | 196 |
197 void Assembler::emit_arith_b(int op1, int op2, Register dst, int imm8) { | |
198 assert(dst->has_byte_register(), "must have byte register"); | |
199 assert(isByte(op1) && isByte(op2), "wrong opcode"); | |
200 assert(isByte(imm8), "not a byte"); | |
201 assert((op1 & 0x01) == 0, "should be 8bit operation"); | |
202 emit_byte(op1); | |
304 | 203 emit_byte(op2 | encode(dst)); |
0 | 204 emit_byte(imm8); |
205 } | |
206 | |
207 | |
304 | 208 void Assembler::emit_arith(int op1, int op2, Register dst, int32_t imm32) { |
0 | 209 assert(isByte(op1) && isByte(op2), "wrong opcode"); |
210 assert((op1 & 0x01) == 1, "should be 32bit operation"); | |
211 assert((op1 & 0x02) == 0, "sign-extension bit should not be set"); | |
212 if (is8bit(imm32)) { | |
213 emit_byte(op1 | 0x02); // set sign bit | |
304 | 214 emit_byte(op2 | encode(dst)); |
0 | 215 emit_byte(imm32 & 0xFF); |
216 } else { | |
217 emit_byte(op1); | |
304 | 218 emit_byte(op2 | encode(dst)); |
0 | 219 emit_long(imm32); |
220 } | |
221 } | |
222 | |
223 // immediate-to-memory forms | |
304 | 224 void Assembler::emit_arith_operand(int op1, Register rm, Address adr, int32_t imm32) { |
0 | 225 assert((op1 & 0x01) == 1, "should be 32bit operation"); |
226 assert((op1 & 0x02) == 0, "sign-extension bit should not be set"); | |
227 if (is8bit(imm32)) { | |
228 emit_byte(op1 | 0x02); // set sign bit | |
304 | 229 emit_operand(rm, adr, 1); |
0 | 230 emit_byte(imm32 & 0xFF); |
231 } else { | |
232 emit_byte(op1); | |
304 | 233 emit_operand(rm, adr, 4); |
0 | 234 emit_long(imm32); |
235 } | |
236 } | |
237 | |
238 void Assembler::emit_arith(int op1, int op2, Register dst, jobject obj) { | |
304 | 239 LP64_ONLY(ShouldNotReachHere()); |
0 | 240 assert(isByte(op1) && isByte(op2), "wrong opcode"); |
241 assert((op1 & 0x01) == 1, "should be 32bit operation"); | |
242 assert((op1 & 0x02) == 0, "sign-extension bit should not be set"); | |
243 InstructionMark im(this); | |
244 emit_byte(op1); | |
304 | 245 emit_byte(op2 | encode(dst)); |
246 emit_data((intptr_t)obj, relocInfo::oop_type, 0); | |
0 | 247 } |
248 | |
249 | |
250 void Assembler::emit_arith(int op1, int op2, Register dst, Register src) { | |
251 assert(isByte(op1) && isByte(op2), "wrong opcode"); | |
252 emit_byte(op1); | |
304 | 253 emit_byte(op2 | encode(dst) << 3 | encode(src)); |
254 } | |
255 | |
256 | |
257 void Assembler::emit_operand(Register reg, Register base, Register index, | |
258 Address::ScaleFactor scale, int disp, | |
259 RelocationHolder const& rspec, | |
260 int rip_relative_correction) { | |
0 | 261 relocInfo::relocType rtype = (relocInfo::relocType) rspec.type(); |
304 | 262 |
263 // Encode the registers as needed in the fields they are used in | |
264 | |
265 int regenc = encode(reg) << 3; | |
266 int indexenc = index->is_valid() ? encode(index) << 3 : 0; | |
267 int baseenc = base->is_valid() ? encode(base) : 0; | |
268 | |
0 | 269 if (base->is_valid()) { |
270 if (index->is_valid()) { | |
271 assert(scale != Address::no_scale, "inconsistent address"); | |
272 // [base + index*scale + disp] | |
304 | 273 if (disp == 0 && rtype == relocInfo::none && |
274 base != rbp LP64_ONLY(&& base != r13)) { | |
0 | 275 // [base + index*scale] |
276 // [00 reg 100][ss index base] | |
277 assert(index != rsp, "illegal addressing mode"); | |
304 | 278 emit_byte(0x04 | regenc); |
279 emit_byte(scale << 6 | indexenc | baseenc); | |
0 | 280 } else if (is8bit(disp) && rtype == relocInfo::none) { |
281 // [base + index*scale + imm8] | |
282 // [01 reg 100][ss index base] imm8 | |
283 assert(index != rsp, "illegal addressing mode"); | |
304 | 284 emit_byte(0x44 | regenc); |
285 emit_byte(scale << 6 | indexenc | baseenc); | |
0 | 286 emit_byte(disp & 0xFF); |
287 } else { | |
304 | 288 // [base + index*scale + disp32] |
289 // [10 reg 100][ss index base] disp32 | |
0 | 290 assert(index != rsp, "illegal addressing mode"); |
304 | 291 emit_byte(0x84 | regenc); |
292 emit_byte(scale << 6 | indexenc | baseenc); | |
0 | 293 emit_data(disp, rspec, disp32_operand); |
294 } | |
304 | 295 } else if (base == rsp LP64_ONLY(|| base == r12)) { |
296 // [rsp + disp] | |
0 | 297 if (disp == 0 && rtype == relocInfo::none) { |
304 | 298 // [rsp] |
0 | 299 // [00 reg 100][00 100 100] |
304 | 300 emit_byte(0x04 | regenc); |
0 | 301 emit_byte(0x24); |
302 } else if (is8bit(disp) && rtype == relocInfo::none) { | |
304 | 303 // [rsp + imm8] |
304 // [01 reg 100][00 100 100] disp8 | |
305 emit_byte(0x44 | regenc); | |
0 | 306 emit_byte(0x24); |
307 emit_byte(disp & 0xFF); | |
308 } else { | |
304 | 309 // [rsp + imm32] |
310 // [10 reg 100][00 100 100] disp32 | |
311 emit_byte(0x84 | regenc); | |
0 | 312 emit_byte(0x24); |
313 emit_data(disp, rspec, disp32_operand); | |
314 } | |
315 } else { | |
316 // [base + disp] | |
304 | 317 assert(base != rsp LP64_ONLY(&& base != r12), "illegal addressing mode"); |
318 if (disp == 0 && rtype == relocInfo::none && | |
319 base != rbp LP64_ONLY(&& base != r13)) { | |
0 | 320 // [base] |
321 // [00 reg base] | |
304 | 322 emit_byte(0x00 | regenc | baseenc); |
0 | 323 } else if (is8bit(disp) && rtype == relocInfo::none) { |
304 | 324 // [base + disp8] |
325 // [01 reg base] disp8 | |
326 emit_byte(0x40 | regenc | baseenc); | |
0 | 327 emit_byte(disp & 0xFF); |
328 } else { | |
304 | 329 // [base + disp32] |
330 // [10 reg base] disp32 | |
331 emit_byte(0x80 | regenc | baseenc); | |
0 | 332 emit_data(disp, rspec, disp32_operand); |
333 } | |
334 } | |
335 } else { | |
336 if (index->is_valid()) { | |
337 assert(scale != Address::no_scale, "inconsistent address"); | |
338 // [index*scale + disp] | |
304 | 339 // [00 reg 100][ss index 101] disp32 |
0 | 340 assert(index != rsp, "illegal addressing mode"); |
304 | 341 emit_byte(0x04 | regenc); |
342 emit_byte(scale << 6 | indexenc | 0x05); | |
0 | 343 emit_data(disp, rspec, disp32_operand); |
304 | 344 } else if (rtype != relocInfo::none ) { |
345 // [disp] (64bit) RIP-RELATIVE (32bit) abs | |
346 // [00 000 101] disp32 | |
347 | |
348 emit_byte(0x05 | regenc); | |
349 // Note that the RIP-rel. correction applies to the generated | |
350 // disp field, but _not_ to the target address in the rspec. | |
351 | |
352 // disp was created by converting the target address minus the pc | |
353 // at the start of the instruction. That needs more correction here. | |
354 // intptr_t disp = target - next_ip; | |
355 assert(inst_mark() != NULL, "must be inside InstructionMark"); | |
356 address next_ip = pc() + sizeof(int32_t) + rip_relative_correction; | |
357 int64_t adjusted = disp; | |
358 // Do rip-rel adjustment for 64bit | |
359 LP64_ONLY(adjusted -= (next_ip - inst_mark())); | |
360 assert(is_simm32(adjusted), | |
361 "must be 32bit offset (RIP relative address)"); | |
362 emit_data((int32_t) adjusted, rspec, disp32_operand); | |
363 | |
0 | 364 } else { |
304 | 365 // 32bit never did this, did everything as the rip-rel/disp code above |
366 // [disp] ABSOLUTE | |
367 // [00 reg 100][00 100 101] disp32 | |
368 emit_byte(0x04 | regenc); | |
369 emit_byte(0x25); | |
0 | 370 emit_data(disp, rspec, disp32_operand); |
371 } | |
372 } | |
373 } | |
374 | |
304 | 375 void Assembler::emit_operand(XMMRegister reg, Register base, Register index, |
376 Address::ScaleFactor scale, int disp, | |
377 RelocationHolder const& rspec) { | |
378 emit_operand((Register)reg, base, index, scale, disp, rspec); | |
379 } | |
380 | |
0 | 381 // Secret local extension to Assembler::WhichOperand: |
382 #define end_pc_operand (_WhichOperand_limit) | |
383 | |
384 address Assembler::locate_operand(address inst, WhichOperand which) { | |
385 // Decode the given instruction, and return the address of | |
386 // an embedded 32-bit operand word. | |
387 | |
388 // If "which" is disp32_operand, selects the displacement portion | |
389 // of an effective address specifier. | |
304 | 390 // If "which" is imm64_operand, selects the trailing immediate constant. |
0 | 391 // If "which" is call32_operand, selects the displacement of a call or jump. |
392 // Caller is responsible for ensuring that there is such an operand, | |
304 | 393 // and that it is 32/64 bits wide. |
0 | 394 |
395 // If "which" is end_pc_operand, find the end of the instruction. | |
396 | |
397 address ip = inst; | |
304 | 398 bool is_64bit = false; |
399 | |
400 debug_only(bool has_disp32 = false); | |
401 int tail_size = 0; // other random bytes (#32, #16, etc.) at end of insn | |
402 | |
403 again_after_prefix: | |
0 | 404 switch (0xFF & *ip++) { |
405 | |
406 // These convenience macros generate groups of "case" labels for the switch. | |
304 | 407 #define REP4(x) (x)+0: case (x)+1: case (x)+2: case (x)+3 |
408 #define REP8(x) (x)+0: case (x)+1: case (x)+2: case (x)+3: \ | |
0 | 409 case (x)+4: case (x)+5: case (x)+6: case (x)+7 |
304 | 410 #define REP16(x) REP8((x)+0): \ |
0 | 411 case REP8((x)+8) |
412 | |
413 case CS_segment: | |
414 case SS_segment: | |
415 case DS_segment: | |
416 case ES_segment: | |
417 case FS_segment: | |
418 case GS_segment: | |
304 | 419 // Seems dubious |
420 LP64_ONLY(assert(false, "shouldn't have that prefix")); | |
0 | 421 assert(ip == inst+1, "only one prefix allowed"); |
422 goto again_after_prefix; | |
423 | |
304 | 424 case 0x67: |
425 case REX: | |
426 case REX_B: | |
427 case REX_X: | |
428 case REX_XB: | |
429 case REX_R: | |
430 case REX_RB: | |
431 case REX_RX: | |
432 case REX_RXB: | |
433 NOT_LP64(assert(false, "64bit prefixes")); | |
434 goto again_after_prefix; | |
435 | |
436 case REX_W: | |
437 case REX_WB: | |
438 case REX_WX: | |
439 case REX_WXB: | |
440 case REX_WR: | |
441 case REX_WRB: | |
442 case REX_WRX: | |
443 case REX_WRXB: | |
444 NOT_LP64(assert(false, "64bit prefixes")); | |
445 is_64bit = true; | |
446 goto again_after_prefix; | |
447 | |
448 case 0xFF: // pushq a; decl a; incl a; call a; jmp a | |
0 | 449 case 0x88: // movb a, r |
450 case 0x89: // movl a, r | |
451 case 0x8A: // movb r, a | |
452 case 0x8B: // movl r, a | |
453 case 0x8F: // popl a | |
304 | 454 debug_only(has_disp32 = true); |
0 | 455 break; |
456 | |
304 | 457 case 0x68: // pushq #32 |
458 if (which == end_pc_operand) { | |
459 return ip + 4; | |
460 } | |
461 assert(which == imm_operand && !is_64bit, "pushl has no disp32 or 64bit immediate"); | |
0 | 462 return ip; // not produced by emit_operand |
463 | |
464 case 0x66: // movw ... (size prefix) | |
304 | 465 again_after_size_prefix2: |
0 | 466 switch (0xFF & *ip++) { |
304 | 467 case REX: |
468 case REX_B: | |
469 case REX_X: | |
470 case REX_XB: | |
471 case REX_R: | |
472 case REX_RB: | |
473 case REX_RX: | |
474 case REX_RXB: | |
475 case REX_W: | |
476 case REX_WB: | |
477 case REX_WX: | |
478 case REX_WXB: | |
479 case REX_WR: | |
480 case REX_WRB: | |
481 case REX_WRX: | |
482 case REX_WRXB: | |
483 NOT_LP64(assert(false, "64bit prefix found")); | |
484 goto again_after_size_prefix2; | |
0 | 485 case 0x8B: // movw r, a |
486 case 0x89: // movw a, r | |
304 | 487 debug_only(has_disp32 = true); |
0 | 488 break; |
489 case 0xC7: // movw a, #16 | |
304 | 490 debug_only(has_disp32 = true); |
0 | 491 tail_size = 2; // the imm16 |
492 break; | |
493 case 0x0F: // several SSE/SSE2 variants | |
494 ip--; // reparse the 0x0F | |
495 goto again_after_prefix; | |
496 default: | |
497 ShouldNotReachHere(); | |
498 } | |
499 break; | |
500 | |
304 | 501 case REP8(0xB8): // movl/q r, #32/#64(oop?) |
502 if (which == end_pc_operand) return ip + (is_64bit ? 8 : 4); | |
503 // these asserts are somewhat nonsensical | |
504 #ifndef _LP64 | |
505 assert(which == imm_operand || which == disp32_operand, ""); | |
506 #else | |
507 assert((which == call32_operand || which == imm_operand) && is_64bit || | |
508 which == narrow_oop_operand && !is_64bit, ""); | |
509 #endif // _LP64 | |
0 | 510 return ip; |
511 | |
512 case 0x69: // imul r, a, #32 | |
513 case 0xC7: // movl a, #32(oop?) | |
514 tail_size = 4; | |
304 | 515 debug_only(has_disp32 = true); // has both kinds of operands! |
0 | 516 break; |
517 | |
518 case 0x0F: // movx..., etc. | |
519 switch (0xFF & *ip++) { | |
520 case 0x12: // movlps | |
521 case 0x28: // movaps | |
522 case 0x2E: // ucomiss | |
523 case 0x2F: // comiss | |
524 case 0x54: // andps | |
525 case 0x55: // andnps | |
526 case 0x56: // orps | |
527 case 0x57: // xorps | |
528 case 0x6E: // movd | |
529 case 0x7E: // movd | |
530 case 0xAE: // ldmxcsr a | |
304 | 531 // 64bit side says it these have both operands but that doesn't |
532 // appear to be true | |
533 debug_only(has_disp32 = true); | |
0 | 534 break; |
535 | |
536 case 0xAD: // shrd r, a, %cl | |
537 case 0xAF: // imul r, a | |
304 | 538 case 0xBE: // movsbl r, a (movsxb) |
539 case 0xBF: // movswl r, a (movsxw) | |
540 case 0xB6: // movzbl r, a (movzxb) | |
541 case 0xB7: // movzwl r, a (movzxw) | |
0 | 542 case REP16(0x40): // cmovl cc, r, a |
543 case 0xB0: // cmpxchgb | |
544 case 0xB1: // cmpxchg | |
545 case 0xC1: // xaddl | |
546 case 0xC7: // cmpxchg8 | |
547 case REP16(0x90): // setcc a | |
304 | 548 debug_only(has_disp32 = true); |
0 | 549 // fall out of the switch to decode the address |
550 break; | |
304 | 551 |
0 | 552 case 0xAC: // shrd r, a, #8 |
304 | 553 debug_only(has_disp32 = true); |
0 | 554 tail_size = 1; // the imm8 |
555 break; | |
304 | 556 |
0 | 557 case REP16(0x80): // jcc rdisp32 |
558 if (which == end_pc_operand) return ip + 4; | |
304 | 559 assert(which == call32_operand, "jcc has no disp32 or imm"); |
0 | 560 return ip; |
561 default: | |
562 ShouldNotReachHere(); | |
563 } | |
564 break; | |
565 | |
566 case 0x81: // addl a, #32; addl r, #32 | |
567 // also: orl, adcl, sbbl, andl, subl, xorl, cmpl | |
304 | 568 // on 32bit in the case of cmpl, the imm might be an oop |
0 | 569 tail_size = 4; |
304 | 570 debug_only(has_disp32 = true); // has both kinds of operands! |
0 | 571 break; |
572 | |
573 case 0x83: // addl a, #8; addl r, #8 | |
574 // also: orl, adcl, sbbl, andl, subl, xorl, cmpl | |
304 | 575 debug_only(has_disp32 = true); // has both kinds of operands! |
0 | 576 tail_size = 1; |
577 break; | |
578 | |
579 case 0x9B: | |
580 switch (0xFF & *ip++) { | |
581 case 0xD9: // fnstcw a | |
304 | 582 debug_only(has_disp32 = true); |
0 | 583 break; |
584 default: | |
585 ShouldNotReachHere(); | |
586 } | |
587 break; | |
588 | |
589 case REP4(0x00): // addb a, r; addl a, r; addb r, a; addl r, a | |
590 case REP4(0x10): // adc... | |
591 case REP4(0x20): // and... | |
592 case REP4(0x30): // xor... | |
593 case REP4(0x08): // or... | |
594 case REP4(0x18): // sbb... | |
595 case REP4(0x28): // sub... | |
304 | 596 case 0xF7: // mull a |
597 case 0x8D: // lea r, a | |
598 case 0x87: // xchg r, a | |
0 | 599 case REP4(0x38): // cmp... |
304 | 600 case 0x85: // test r, a |
601 debug_only(has_disp32 = true); // has both kinds of operands! | |
0 | 602 break; |
603 | |
604 case 0xC1: // sal a, #8; sar a, #8; shl a, #8; shr a, #8 | |
605 case 0xC6: // movb a, #8 | |
606 case 0x80: // cmpb a, #8 | |
607 case 0x6B: // imul r, a, #8 | |
304 | 608 debug_only(has_disp32 = true); // has both kinds of operands! |
0 | 609 tail_size = 1; // the imm8 |
610 break; | |
611 | |
612 case 0xE8: // call rdisp32 | |
613 case 0xE9: // jmp rdisp32 | |
614 if (which == end_pc_operand) return ip + 4; | |
304 | 615 assert(which == call32_operand, "call has no disp32 or imm"); |
0 | 616 return ip; |
617 | |
618 case 0xD1: // sal a, 1; sar a, 1; shl a, 1; shr a, 1 | |
619 case 0xD3: // sal a, %cl; sar a, %cl; shl a, %cl; shr a, %cl | |
620 case 0xD9: // fld_s a; fst_s a; fstp_s a; fldcw a | |
621 case 0xDD: // fld_d a; fst_d a; fstp_d a | |
622 case 0xDB: // fild_s a; fistp_s a; fld_x a; fstp_x a | |
623 case 0xDF: // fild_d a; fistp_d a | |
624 case 0xD8: // fadd_s a; fsubr_s a; fmul_s a; fdivr_s a; fcomp_s a | |
625 case 0xDC: // fadd_d a; fsubr_d a; fmul_d a; fdivr_d a; fcomp_d a | |
626 case 0xDE: // faddp_d a; fsubrp_d a; fmulp_d a; fdivrp_d a; fcompp_d a | |
304 | 627 debug_only(has_disp32 = true); |
0 | 628 break; |
629 | |
420
a1980da045cc
6462850: generate biased locking code in C2 ideal graph
kvn
parents:
405
diff
changeset
|
630 case 0xF0: // Lock |
a1980da045cc
6462850: generate biased locking code in C2 ideal graph
kvn
parents:
405
diff
changeset
|
631 assert(os::is_MP(), "only on MP"); |
a1980da045cc
6462850: generate biased locking code in C2 ideal graph
kvn
parents:
405
diff
changeset
|
632 goto again_after_prefix; |
a1980da045cc
6462850: generate biased locking code in C2 ideal graph
kvn
parents:
405
diff
changeset
|
633 |
0 | 634 case 0xF3: // For SSE |
635 case 0xF2: // For SSE2 | |
304 | 636 switch (0xFF & *ip++) { |
637 case REX: | |
638 case REX_B: | |
639 case REX_X: | |
640 case REX_XB: | |
641 case REX_R: | |
642 case REX_RB: | |
643 case REX_RX: | |
644 case REX_RXB: | |
645 case REX_W: | |
646 case REX_WB: | |
647 case REX_WX: | |
648 case REX_WXB: | |
649 case REX_WR: | |
650 case REX_WRB: | |
651 case REX_WRX: | |
652 case REX_WRXB: | |
653 NOT_LP64(assert(false, "found 64bit prefix")); | |
654 ip++; | |
655 default: | |
656 ip++; | |
657 } | |
658 debug_only(has_disp32 = true); // has both kinds of operands! | |
0 | 659 break; |
660 | |
661 default: | |
662 ShouldNotReachHere(); | |
663 | |
304 | 664 #undef REP8 |
665 #undef REP16 | |
0 | 666 } |
667 | |
668 assert(which != call32_operand, "instruction is not a call, jmp, or jcc"); | |
304 | 669 #ifdef _LP64 |
670 assert(which != imm_operand, "instruction is not a movq reg, imm64"); | |
671 #else | |
672 // assert(which != imm_operand || has_imm32, "instruction has no imm32 field"); | |
673 assert(which != imm_operand || has_disp32, "instruction has no imm32 field"); | |
674 #endif // LP64 | |
675 assert(which != disp32_operand || has_disp32, "instruction has no disp32 field"); | |
0 | 676 |
677 // parse the output of emit_operand | |
678 int op2 = 0xFF & *ip++; | |
679 int base = op2 & 0x07; | |
680 int op3 = -1; | |
681 const int b100 = 4; | |
682 const int b101 = 5; | |
683 if (base == b100 && (op2 >> 6) != 3) { | |
684 op3 = 0xFF & *ip++; | |
685 base = op3 & 0x07; // refetch the base | |
686 } | |
687 // now ip points at the disp (if any) | |
688 | |
689 switch (op2 >> 6) { | |
690 case 0: | |
691 // [00 reg 100][ss index base] | |
304 | 692 // [00 reg 100][00 100 esp] |
0 | 693 // [00 reg base] |
694 // [00 reg 100][ss index 101][disp32] | |
695 // [00 reg 101] [disp32] | |
696 | |
697 if (base == b101) { | |
698 if (which == disp32_operand) | |
699 return ip; // caller wants the disp32 | |
700 ip += 4; // skip the disp32 | |
701 } | |
702 break; | |
703 | |
704 case 1: | |
705 // [01 reg 100][ss index base][disp8] | |
304 | 706 // [01 reg 100][00 100 esp][disp8] |
0 | 707 // [01 reg base] [disp8] |
708 ip += 1; // skip the disp8 | |
709 break; | |
710 | |
711 case 2: | |
712 // [10 reg 100][ss index base][disp32] | |
304 | 713 // [10 reg 100][00 100 esp][disp32] |
0 | 714 // [10 reg base] [disp32] |
715 if (which == disp32_operand) | |
716 return ip; // caller wants the disp32 | |
717 ip += 4; // skip the disp32 | |
718 break; | |
719 | |
720 case 3: | |
721 // [11 reg base] (not a memory addressing mode) | |
722 break; | |
723 } | |
724 | |
725 if (which == end_pc_operand) { | |
726 return ip + tail_size; | |
727 } | |
728 | |
304 | 729 #ifdef _LP64 |
642
660978a2a31a
6791178: Specialize for zero as the compressed oop vm heap base
kvn
parents:
624
diff
changeset
|
730 assert(which == narrow_oop_operand && !is_64bit, "instruction is not a movl adr, imm32"); |
304 | 731 #else |
732 assert(which == imm_operand, "instruction has only an imm field"); | |
733 #endif // LP64 | |
0 | 734 return ip; |
735 } | |
736 | |
737 address Assembler::locate_next_instruction(address inst) { | |
738 // Secretly share code with locate_operand: | |
739 return locate_operand(inst, end_pc_operand); | |
740 } | |
741 | |
742 | |
743 #ifdef ASSERT | |
744 void Assembler::check_relocation(RelocationHolder const& rspec, int format) { | |
745 address inst = inst_mark(); | |
746 assert(inst != NULL && inst < pc(), "must point to beginning of instruction"); | |
747 address opnd; | |
748 | |
749 Relocation* r = rspec.reloc(); | |
750 if (r->type() == relocInfo::none) { | |
751 return; | |
752 } else if (r->is_call() || format == call32_operand) { | |
753 // assert(format == imm32_operand, "cannot specify a nonzero format"); | |
754 opnd = locate_operand(inst, call32_operand); | |
755 } else if (r->is_data()) { | |
304 | 756 assert(format == imm_operand || format == disp32_operand |
757 LP64_ONLY(|| format == narrow_oop_operand), "format ok"); | |
0 | 758 opnd = locate_operand(inst, (WhichOperand)format); |
759 } else { | |
304 | 760 assert(format == imm_operand, "cannot specify a format"); |
0 | 761 return; |
762 } | |
763 assert(opnd == pc(), "must put operand where relocs can find it"); | |
764 } | |
304 | 765 #endif // ASSERT |
766 | |
767 void Assembler::emit_operand32(Register reg, Address adr) { | |
768 assert(reg->encoding() < 8, "no extended registers"); | |
769 assert(!adr.base_needs_rex() && !adr.index_needs_rex(), "no extended registers"); | |
770 emit_operand(reg, adr._base, adr._index, adr._scale, adr._disp, | |
771 adr._rspec); | |
772 } | |
773 | |
774 void Assembler::emit_operand(Register reg, Address adr, | |
775 int rip_relative_correction) { | |
776 emit_operand(reg, adr._base, adr._index, adr._scale, adr._disp, | |
777 adr._rspec, | |
778 rip_relative_correction); | |
779 } | |
780 | |
781 void Assembler::emit_operand(XMMRegister reg, Address adr) { | |
782 emit_operand(reg, adr._base, adr._index, adr._scale, adr._disp, | |
783 adr._rspec); | |
784 } | |
785 | |
786 // MMX operations | |
787 void Assembler::emit_operand(MMXRegister reg, Address adr) { | |
788 assert(!adr.base_needs_rex() && !adr.index_needs_rex(), "no extended registers"); | |
789 emit_operand((Register)reg, adr._base, adr._index, adr._scale, adr._disp, adr._rspec); | |
790 } | |
791 | |
792 // work around gcc (3.2.1-7a) bug | |
793 void Assembler::emit_operand(Address adr, MMXRegister reg) { | |
794 assert(!adr.base_needs_rex() && !adr.index_needs_rex(), "no extended registers"); | |
795 emit_operand((Register)reg, adr._base, adr._index, adr._scale, adr._disp, adr._rspec); | |
0 | 796 } |
797 | |
798 | |
799 void Assembler::emit_farith(int b1, int b2, int i) { | |
800 assert(isByte(b1) && isByte(b2), "wrong opcode"); | |
801 assert(0 <= i && i < 8, "illegal stack offset"); | |
802 emit_byte(b1); | |
803 emit_byte(b2 + i); | |
804 } | |
805 | |
806 | |
304 | 807 // Now the Assembler instruction (identical for 32/64 bits) |
808 | |
809 void Assembler::adcl(Register dst, int32_t imm32) { | |
810 prefix(dst); | |
0 | 811 emit_arith(0x81, 0xD0, dst, imm32); |
812 } | |
813 | |
814 void Assembler::adcl(Register dst, Address src) { | |
815 InstructionMark im(this); | |
304 | 816 prefix(src, dst); |
0 | 817 emit_byte(0x13); |
818 emit_operand(dst, src); | |
819 } | |
820 | |
821 void Assembler::adcl(Register dst, Register src) { | |
304 | 822 (void) prefix_and_encode(dst->encoding(), src->encoding()); |
0 | 823 emit_arith(0x13, 0xC0, dst, src); |
824 } | |
825 | |
304 | 826 void Assembler::addl(Address dst, int32_t imm32) { |
827 InstructionMark im(this); | |
828 prefix(dst); | |
829 emit_arith_operand(0x81, rax, dst, imm32); | |
830 } | |
0 | 831 |
832 void Assembler::addl(Address dst, Register src) { | |
833 InstructionMark im(this); | |
304 | 834 prefix(dst, src); |
0 | 835 emit_byte(0x01); |
836 emit_operand(src, dst); | |
837 } | |
838 | |
304 | 839 void Assembler::addl(Register dst, int32_t imm32) { |
840 prefix(dst); | |
0 | 841 emit_arith(0x81, 0xC0, dst, imm32); |
842 } | |
843 | |
844 void Assembler::addl(Register dst, Address src) { | |
845 InstructionMark im(this); | |
304 | 846 prefix(src, dst); |
0 | 847 emit_byte(0x03); |
848 emit_operand(dst, src); | |
849 } | |
850 | |
851 void Assembler::addl(Register dst, Register src) { | |
304 | 852 (void) prefix_and_encode(dst->encoding(), src->encoding()); |
0 | 853 emit_arith(0x03, 0xC0, dst, src); |
854 } | |
855 | |
856 void Assembler::addr_nop_4() { | |
857 // 4 bytes: NOP DWORD PTR [EAX+0] | |
858 emit_byte(0x0F); | |
859 emit_byte(0x1F); | |
860 emit_byte(0x40); // emit_rm(cbuf, 0x1, EAX_enc, EAX_enc); | |
861 emit_byte(0); // 8-bits offset (1 byte) | |
862 } | |
863 | |
864 void Assembler::addr_nop_5() { | |
865 // 5 bytes: NOP DWORD PTR [EAX+EAX*0+0] 8-bits offset | |
866 emit_byte(0x0F); | |
867 emit_byte(0x1F); | |
868 emit_byte(0x44); // emit_rm(cbuf, 0x1, EAX_enc, 0x4); | |
869 emit_byte(0x00); // emit_rm(cbuf, 0x0, EAX_enc, EAX_enc); | |
870 emit_byte(0); // 8-bits offset (1 byte) | |
871 } | |
872 | |
873 void Assembler::addr_nop_7() { | |
874 // 7 bytes: NOP DWORD PTR [EAX+0] 32-bits offset | |
875 emit_byte(0x0F); | |
876 emit_byte(0x1F); | |
877 emit_byte(0x80); // emit_rm(cbuf, 0x2, EAX_enc, EAX_enc); | |
878 emit_long(0); // 32-bits offset (4 bytes) | |
879 } | |
880 | |
881 void Assembler::addr_nop_8() { | |
882 // 8 bytes: NOP DWORD PTR [EAX+EAX*0+0] 32-bits offset | |
883 emit_byte(0x0F); | |
884 emit_byte(0x1F); | |
885 emit_byte(0x84); // emit_rm(cbuf, 0x2, EAX_enc, 0x4); | |
886 emit_byte(0x00); // emit_rm(cbuf, 0x0, EAX_enc, EAX_enc); | |
887 emit_long(0); // 32-bits offset (4 bytes) | |
888 } | |
889 | |
304 | 890 void Assembler::addsd(XMMRegister dst, XMMRegister src) { |
891 NOT_LP64(assert(VM_Version::supports_sse2(), "")); | |
892 emit_byte(0xF2); | |
893 int encode = prefix_and_encode(dst->encoding(), src->encoding()); | |
894 emit_byte(0x0F); | |
895 emit_byte(0x58); | |
896 emit_byte(0xC0 | encode); | |
897 } | |
898 | |
899 void Assembler::addsd(XMMRegister dst, Address src) { | |
900 NOT_LP64(assert(VM_Version::supports_sse2(), "")); | |
901 InstructionMark im(this); | |
902 emit_byte(0xF2); | |
903 prefix(src, dst); | |
904 emit_byte(0x0F); | |
905 emit_byte(0x58); | |
906 emit_operand(dst, src); | |
907 } | |
908 | |
909 void Assembler::addss(XMMRegister dst, XMMRegister src) { | |
910 NOT_LP64(assert(VM_Version::supports_sse(), "")); | |
911 emit_byte(0xF3); | |
912 int encode = prefix_and_encode(dst->encoding(), src->encoding()); | |
913 emit_byte(0x0F); | |
914 emit_byte(0x58); | |
915 emit_byte(0xC0 | encode); | |
916 } | |
917 | |
918 void Assembler::addss(XMMRegister dst, Address src) { | |
919 NOT_LP64(assert(VM_Version::supports_sse(), "")); | |
920 InstructionMark im(this); | |
921 emit_byte(0xF3); | |
922 prefix(src, dst); | |
923 emit_byte(0x0F); | |
924 emit_byte(0x58); | |
925 emit_operand(dst, src); | |
926 } | |
927 | |
928 void Assembler::andl(Register dst, int32_t imm32) { | |
929 prefix(dst); | |
930 emit_arith(0x81, 0xE0, dst, imm32); | |
931 } | |
932 | |
933 void Assembler::andl(Register dst, Address src) { | |
934 InstructionMark im(this); | |
935 prefix(src, dst); | |
936 emit_byte(0x23); | |
937 emit_operand(dst, src); | |
938 } | |
939 | |
940 void Assembler::andl(Register dst, Register src) { | |
941 (void) prefix_and_encode(dst->encoding(), src->encoding()); | |
942 emit_arith(0x23, 0xC0, dst, src); | |
943 } | |
944 | |
945 void Assembler::andpd(XMMRegister dst, Address src) { | |
946 NOT_LP64(assert(VM_Version::supports_sse2(), "")); | |
947 InstructionMark im(this); | |
948 emit_byte(0x66); | |
949 prefix(src, dst); | |
950 emit_byte(0x0F); | |
951 emit_byte(0x54); | |
952 emit_operand(dst, src); | |
953 } | |
954 | |
955 void Assembler::bswapl(Register reg) { // bswap | |
956 int encode = prefix_and_encode(reg->encoding()); | |
957 emit_byte(0x0F); | |
958 emit_byte(0xC8 | encode); | |
959 } | |
960 | |
961 void Assembler::call(Label& L, relocInfo::relocType rtype) { | |
962 // suspect disp32 is always good | |
963 int operand = LP64_ONLY(disp32_operand) NOT_LP64(imm_operand); | |
964 | |
965 if (L.is_bound()) { | |
966 const int long_size = 5; | |
967 int offs = (int)( target(L) - pc() ); | |
968 assert(offs <= 0, "assembler error"); | |
969 InstructionMark im(this); | |
970 // 1110 1000 #32-bit disp | |
971 emit_byte(0xE8); | |
972 emit_data(offs - long_size, rtype, operand); | |
973 } else { | |
974 InstructionMark im(this); | |
975 // 1110 1000 #32-bit disp | |
976 L.add_patch_at(code(), locator()); | |
977 | |
978 emit_byte(0xE8); | |
979 emit_data(int(0), rtype, operand); | |
980 } | |
981 } | |
982 | |
983 void Assembler::call(Register dst) { | |
984 // This was originally using a 32bit register encoding | |
985 // and surely we want 64bit! | |
986 // this is a 32bit encoding but in 64bit mode the default | |
987 // operand size is 64bit so there is no need for the | |
988 // wide prefix. So prefix only happens if we use the | |
989 // new registers. Much like push/pop. | |
990 int x = offset(); | |
991 // this may be true but dbx disassembles it as if it | |
992 // were 32bits... | |
993 // int encode = prefix_and_encode(dst->encoding()); | |
994 // if (offset() != x) assert(dst->encoding() >= 8, "what?"); | |
995 int encode = prefixq_and_encode(dst->encoding()); | |
996 | |
997 emit_byte(0xFF); | |
998 emit_byte(0xD0 | encode); | |
999 } | |
1000 | |
1001 | |
1002 void Assembler::call(Address adr) { | |
1003 InstructionMark im(this); | |
1004 prefix(adr); | |
1005 emit_byte(0xFF); | |
1006 emit_operand(rdx, adr); | |
1007 } | |
1008 | |
1009 void Assembler::call_literal(address entry, RelocationHolder const& rspec) { | |
1010 assert(entry != NULL, "call most probably wrong"); | |
1011 InstructionMark im(this); | |
1012 emit_byte(0xE8); | |
1013 intptr_t disp = entry - (_code_pos + sizeof(int32_t)); | |
1014 assert(is_simm32(disp), "must be 32bit offset (call2)"); | |
1015 // Technically, should use call32_operand, but this format is | |
1016 // implied by the fact that we're emitting a call instruction. | |
1017 | |
1018 int operand = LP64_ONLY(disp32_operand) NOT_LP64(call32_operand); | |
1019 emit_data((int) disp, rspec, operand); | |
1020 } | |
1021 | |
1022 void Assembler::cdql() { | |
1023 emit_byte(0x99); | |
1024 } | |
1025 | |
1026 void Assembler::cmovl(Condition cc, Register dst, Register src) { | |
1027 NOT_LP64(guarantee(VM_Version::supports_cmov(), "illegal instruction")); | |
1028 int encode = prefix_and_encode(dst->encoding(), src->encoding()); | |
1029 emit_byte(0x0F); | |
1030 emit_byte(0x40 | cc); | |
1031 emit_byte(0xC0 | encode); | |
1032 } | |
1033 | |
1034 | |
1035 void Assembler::cmovl(Condition cc, Register dst, Address src) { | |
1036 NOT_LP64(guarantee(VM_Version::supports_cmov(), "illegal instruction")); | |
1037 prefix(src, dst); | |
1038 emit_byte(0x0F); | |
1039 emit_byte(0x40 | cc); | |
1040 emit_operand(dst, src); | |
1041 } | |
1042 | |
1043 void Assembler::cmpb(Address dst, int imm8) { | |
1044 InstructionMark im(this); | |
1045 prefix(dst); | |
1046 emit_byte(0x80); | |
1047 emit_operand(rdi, dst, 1); | |
1048 emit_byte(imm8); | |
1049 } | |
1050 | |
1051 void Assembler::cmpl(Address dst, int32_t imm32) { | |
1052 InstructionMark im(this); | |
1053 prefix(dst); | |
1054 emit_byte(0x81); | |
1055 emit_operand(rdi, dst, 4); | |
1056 emit_long(imm32); | |
1057 } | |
1058 | |
1059 void Assembler::cmpl(Register dst, int32_t imm32) { | |
1060 prefix(dst); | |
1061 emit_arith(0x81, 0xF8, dst, imm32); | |
1062 } | |
1063 | |
1064 void Assembler::cmpl(Register dst, Register src) { | |
1065 (void) prefix_and_encode(dst->encoding(), src->encoding()); | |
1066 emit_arith(0x3B, 0xC0, dst, src); | |
1067 } | |
1068 | |
1069 | |
1070 void Assembler::cmpl(Register dst, Address src) { | |
1071 InstructionMark im(this); | |
1072 prefix(src, dst); | |
1073 emit_byte(0x3B); | |
1074 emit_operand(dst, src); | |
1075 } | |
1076 | |
1077 void Assembler::cmpw(Address dst, int imm16) { | |
1078 InstructionMark im(this); | |
1079 assert(!dst.base_needs_rex() && !dst.index_needs_rex(), "no extended registers"); | |
1080 emit_byte(0x66); | |
1081 emit_byte(0x81); | |
1082 emit_operand(rdi, dst, 2); | |
1083 emit_word(imm16); | |
1084 } | |
1085 | |
1086 // The 32-bit cmpxchg compares the value at adr with the contents of rax, | |
1087 // and stores reg into adr if so; otherwise, the value at adr is loaded into rax,. | |
1088 // The ZF is set if the compared values were equal, and cleared otherwise. | |
1089 void Assembler::cmpxchgl(Register reg, Address adr) { // cmpxchg | |
1090 if (Atomics & 2) { | |
1091 // caveat: no instructionmark, so this isn't relocatable. | |
1092 // Emit a synthetic, non-atomic, CAS equivalent. | |
1093 // Beware. The synthetic form sets all ICCs, not just ZF. | |
1094 // cmpxchg r,[m] is equivalent to rax, = CAS (m, rax, r) | |
1095 cmpl(rax, adr); | |
1096 movl(rax, adr); | |
1097 if (reg != rax) { | |
1098 Label L ; | |
1099 jcc(Assembler::notEqual, L); | |
1100 movl(adr, reg); | |
1101 bind(L); | |
1102 } | |
1103 } else { | |
1104 InstructionMark im(this); | |
1105 prefix(adr, reg); | |
1106 emit_byte(0x0F); | |
1107 emit_byte(0xB1); | |
1108 emit_operand(reg, adr); | |
1109 } | |
1110 } | |
1111 | |
1112 void Assembler::comisd(XMMRegister dst, Address src) { | |
1113 // NOTE: dbx seems to decode this as comiss even though the | |
1114 // 0x66 is there. Strangly ucomisd comes out correct | |
1115 NOT_LP64(assert(VM_Version::supports_sse2(), "")); | |
1116 emit_byte(0x66); | |
1117 comiss(dst, src); | |
1118 } | |
1119 | |
1120 void Assembler::comiss(XMMRegister dst, Address src) { | |
1121 NOT_LP64(assert(VM_Version::supports_sse(), "")); | |
1122 | |
1123 InstructionMark im(this); | |
1124 prefix(src, dst); | |
1125 emit_byte(0x0F); | |
1126 emit_byte(0x2F); | |
1127 emit_operand(dst, src); | |
1128 } | |
1129 | |
1130 void Assembler::cvtdq2pd(XMMRegister dst, XMMRegister src) { | |
1131 NOT_LP64(assert(VM_Version::supports_sse2(), "")); | |
1132 emit_byte(0xF3); | |
1133 int encode = prefix_and_encode(dst->encoding(), src->encoding()); | |
1134 emit_byte(0x0F); | |
1135 emit_byte(0xE6); | |
1136 emit_byte(0xC0 | encode); | |
1137 } | |
1138 | |
1139 void Assembler::cvtdq2ps(XMMRegister dst, XMMRegister src) { | |
1140 NOT_LP64(assert(VM_Version::supports_sse2(), "")); | |
1141 int encode = prefix_and_encode(dst->encoding(), src->encoding()); | |
1142 emit_byte(0x0F); | |
1143 emit_byte(0x5B); | |
1144 emit_byte(0xC0 | encode); | |
1145 } | |
1146 | |
1147 void Assembler::cvtsd2ss(XMMRegister dst, XMMRegister src) { | |
1148 NOT_LP64(assert(VM_Version::supports_sse2(), "")); | |
1149 emit_byte(0xF2); | |
1150 int encode = prefix_and_encode(dst->encoding(), src->encoding()); | |
1151 emit_byte(0x0F); | |
1152 emit_byte(0x5A); | |
1153 emit_byte(0xC0 | encode); | |
1154 } | |
1155 | |
1156 void Assembler::cvtsi2sdl(XMMRegister dst, Register src) { | |
1157 NOT_LP64(assert(VM_Version::supports_sse2(), "")); | |
1158 emit_byte(0xF2); | |
1159 int encode = prefix_and_encode(dst->encoding(), src->encoding()); | |
1160 emit_byte(0x0F); | |
1161 emit_byte(0x2A); | |
1162 emit_byte(0xC0 | encode); | |
1163 } | |
1164 | |
1165 void Assembler::cvtsi2ssl(XMMRegister dst, Register src) { | |
1166 NOT_LP64(assert(VM_Version::supports_sse(), "")); | |
1167 emit_byte(0xF3); | |
1168 int encode = prefix_and_encode(dst->encoding(), src->encoding()); | |
1169 emit_byte(0x0F); | |
1170 emit_byte(0x2A); | |
1171 emit_byte(0xC0 | encode); | |
1172 } | |
1173 | |
1174 void Assembler::cvtss2sd(XMMRegister dst, XMMRegister src) { | |
1175 NOT_LP64(assert(VM_Version::supports_sse2(), "")); | |
1176 emit_byte(0xF3); | |
1177 int encode = prefix_and_encode(dst->encoding(), src->encoding()); | |
1178 emit_byte(0x0F); | |
1179 emit_byte(0x5A); | |
1180 emit_byte(0xC0 | encode); | |
1181 } | |
1182 | |
1183 void Assembler::cvttsd2sil(Register dst, XMMRegister src) { | |
1184 NOT_LP64(assert(VM_Version::supports_sse2(), "")); | |
1185 emit_byte(0xF2); | |
1186 int encode = prefix_and_encode(dst->encoding(), src->encoding()); | |
1187 emit_byte(0x0F); | |
1188 emit_byte(0x2C); | |
1189 emit_byte(0xC0 | encode); | |
1190 } | |
1191 | |
1192 void Assembler::cvttss2sil(Register dst, XMMRegister src) { | |
1193 NOT_LP64(assert(VM_Version::supports_sse(), "")); | |
1194 emit_byte(0xF3); | |
1195 int encode = prefix_and_encode(dst->encoding(), src->encoding()); | |
1196 emit_byte(0x0F); | |
1197 emit_byte(0x2C); | |
1198 emit_byte(0xC0 | encode); | |
1199 } | |
1200 | |
1201 void Assembler::decl(Address dst) { | |
1202 // Don't use it directly. Use MacroAssembler::decrement() instead. | |
1203 InstructionMark im(this); | |
1204 prefix(dst); | |
1205 emit_byte(0xFF); | |
1206 emit_operand(rcx, dst); | |
1207 } | |
1208 | |
1209 void Assembler::divsd(XMMRegister dst, Address src) { | |
1210 NOT_LP64(assert(VM_Version::supports_sse2(), "")); | |
1211 InstructionMark im(this); | |
1212 emit_byte(0xF2); | |
1213 prefix(src, dst); | |
1214 emit_byte(0x0F); | |
1215 emit_byte(0x5E); | |
1216 emit_operand(dst, src); | |
1217 } | |
1218 | |
1219 void Assembler::divsd(XMMRegister dst, XMMRegister src) { | |
1220 NOT_LP64(assert(VM_Version::supports_sse2(), "")); | |
1221 emit_byte(0xF2); | |
1222 int encode = prefix_and_encode(dst->encoding(), src->encoding()); | |
1223 emit_byte(0x0F); | |
1224 emit_byte(0x5E); | |
1225 emit_byte(0xC0 | encode); | |
1226 } | |
1227 | |
1228 void Assembler::divss(XMMRegister dst, Address src) { | |
1229 NOT_LP64(assert(VM_Version::supports_sse(), "")); | |
1230 InstructionMark im(this); | |
1231 emit_byte(0xF3); | |
1232 prefix(src, dst); | |
1233 emit_byte(0x0F); | |
1234 emit_byte(0x5E); | |
1235 emit_operand(dst, src); | |
1236 } | |
1237 | |
1238 void Assembler::divss(XMMRegister dst, XMMRegister src) { | |
1239 NOT_LP64(assert(VM_Version::supports_sse(), "")); | |
1240 emit_byte(0xF3); | |
1241 int encode = prefix_and_encode(dst->encoding(), src->encoding()); | |
1242 emit_byte(0x0F); | |
1243 emit_byte(0x5E); | |
1244 emit_byte(0xC0 | encode); | |
1245 } | |
1246 | |
1247 void Assembler::emms() { | |
1248 NOT_LP64(assert(VM_Version::supports_mmx(), "")); | |
1249 emit_byte(0x0F); | |
1250 emit_byte(0x77); | |
1251 } | |
1252 | |
1253 void Assembler::hlt() { | |
1254 emit_byte(0xF4); | |
1255 } | |
1256 | |
1257 void Assembler::idivl(Register src) { | |
1258 int encode = prefix_and_encode(src->encoding()); | |
1259 emit_byte(0xF7); | |
1260 emit_byte(0xF8 | encode); | |
1261 } | |
1262 | |
1263 void Assembler::imull(Register dst, Register src) { | |
1264 int encode = prefix_and_encode(dst->encoding(), src->encoding()); | |
1265 emit_byte(0x0F); | |
1266 emit_byte(0xAF); | |
1267 emit_byte(0xC0 | encode); | |
1268 } | |
1269 | |
1270 | |
1271 void Assembler::imull(Register dst, Register src, int value) { | |
1272 int encode = prefix_and_encode(dst->encoding(), src->encoding()); | |
1273 if (is8bit(value)) { | |
1274 emit_byte(0x6B); | |
1275 emit_byte(0xC0 | encode); | |
1276 emit_byte(value); | |
1277 } else { | |
1278 emit_byte(0x69); | |
1279 emit_byte(0xC0 | encode); | |
1280 emit_long(value); | |
1281 } | |
1282 } | |
1283 | |
1284 void Assembler::incl(Address dst) { | |
1285 // Don't use it directly. Use MacroAssembler::increment() instead. | |
1286 InstructionMark im(this); | |
1287 prefix(dst); | |
1288 emit_byte(0xFF); | |
1289 emit_operand(rax, dst); | |
1290 } | |
1291 | |
1292 void Assembler::jcc(Condition cc, Label& L, relocInfo::relocType rtype) { | |
1293 InstructionMark im(this); | |
1294 relocate(rtype); | |
1295 assert((0 <= cc) && (cc < 16), "illegal cc"); | |
1296 if (L.is_bound()) { | |
1297 address dst = target(L); | |
1298 assert(dst != NULL, "jcc most probably wrong"); | |
1299 | |
1300 const int short_size = 2; | |
1301 const int long_size = 6; | |
1302 intptr_t offs = (intptr_t)dst - (intptr_t)_code_pos; | |
1303 if (rtype == relocInfo::none && is8bit(offs - short_size)) { | |
1304 // 0111 tttn #8-bit disp | |
1305 emit_byte(0x70 | cc); | |
1306 emit_byte((offs - short_size) & 0xFF); | |
1307 } else { | |
1308 // 0000 1111 1000 tttn #32-bit disp | |
1309 assert(is_simm32(offs - long_size), | |
1310 "must be 32bit offset (call4)"); | |
1311 emit_byte(0x0F); | |
1312 emit_byte(0x80 | cc); | |
1313 emit_long(offs - long_size); | |
1314 } | |
1315 } else { | |
1316 // Note: could eliminate cond. jumps to this jump if condition | |
1317 // is the same however, seems to be rather unlikely case. | |
1318 // Note: use jccb() if label to be bound is very close to get | |
1319 // an 8-bit displacement | |
1320 L.add_patch_at(code(), locator()); | |
1321 emit_byte(0x0F); | |
1322 emit_byte(0x80 | cc); | |
1323 emit_long(0); | |
1324 } | |
1325 } | |
1326 | |
1327 void Assembler::jccb(Condition cc, Label& L) { | |
1328 if (L.is_bound()) { | |
1329 const int short_size = 2; | |
1330 address entry = target(L); | |
1331 assert(is8bit((intptr_t)entry - ((intptr_t)_code_pos + short_size)), | |
1332 "Dispacement too large for a short jmp"); | |
1333 intptr_t offs = (intptr_t)entry - (intptr_t)_code_pos; | |
1334 // 0111 tttn #8-bit disp | |
1335 emit_byte(0x70 | cc); | |
1336 emit_byte((offs - short_size) & 0xFF); | |
1337 } else { | |
1338 InstructionMark im(this); | |
1339 L.add_patch_at(code(), locator()); | |
1340 emit_byte(0x70 | cc); | |
1341 emit_byte(0); | |
1342 } | |
1343 } | |
1344 | |
1345 void Assembler::jmp(Address adr) { | |
1346 InstructionMark im(this); | |
1347 prefix(adr); | |
1348 emit_byte(0xFF); | |
1349 emit_operand(rsp, adr); | |
1350 } | |
1351 | |
1352 void Assembler::jmp(Label& L, relocInfo::relocType rtype) { | |
1353 if (L.is_bound()) { | |
1354 address entry = target(L); | |
1355 assert(entry != NULL, "jmp most probably wrong"); | |
1356 InstructionMark im(this); | |
1357 const int short_size = 2; | |
1358 const int long_size = 5; | |
1359 intptr_t offs = entry - _code_pos; | |
1360 if (rtype == relocInfo::none && is8bit(offs - short_size)) { | |
1361 emit_byte(0xEB); | |
1362 emit_byte((offs - short_size) & 0xFF); | |
1363 } else { | |
1364 emit_byte(0xE9); | |
1365 emit_long(offs - long_size); | |
1366 } | |
1367 } else { | |
1368 // By default, forward jumps are always 32-bit displacements, since | |
1369 // we can't yet know where the label will be bound. If you're sure that | |
1370 // the forward jump will not run beyond 256 bytes, use jmpb to | |
1371 // force an 8-bit displacement. | |
1372 InstructionMark im(this); | |
1373 relocate(rtype); | |
1374 L.add_patch_at(code(), locator()); | |
1375 emit_byte(0xE9); | |
1376 emit_long(0); | |
1377 } | |
1378 } | |
1379 | |
1380 void Assembler::jmp(Register entry) { | |
1381 int encode = prefix_and_encode(entry->encoding()); | |
1382 emit_byte(0xFF); | |
1383 emit_byte(0xE0 | encode); | |
1384 } | |
1385 | |
1386 void Assembler::jmp_literal(address dest, RelocationHolder const& rspec) { | |
1387 InstructionMark im(this); | |
1388 emit_byte(0xE9); | |
1389 assert(dest != NULL, "must have a target"); | |
1390 intptr_t disp = dest - (_code_pos + sizeof(int32_t)); | |
1391 assert(is_simm32(disp), "must be 32bit offset (jmp)"); | |
1392 emit_data(disp, rspec.reloc(), call32_operand); | |
1393 } | |
1394 | |
1395 void Assembler::jmpb(Label& L) { | |
1396 if (L.is_bound()) { | |
1397 const int short_size = 2; | |
1398 address entry = target(L); | |
1399 assert(is8bit((entry - _code_pos) + short_size), | |
1400 "Dispacement too large for a short jmp"); | |
1401 assert(entry != NULL, "jmp most probably wrong"); | |
1402 intptr_t offs = entry - _code_pos; | |
1403 emit_byte(0xEB); | |
1404 emit_byte((offs - short_size) & 0xFF); | |
1405 } else { | |
1406 InstructionMark im(this); | |
1407 L.add_patch_at(code(), locator()); | |
1408 emit_byte(0xEB); | |
1409 emit_byte(0); | |
1410 } | |
1411 } | |
1412 | |
1413 void Assembler::ldmxcsr( Address src) { | |
1414 NOT_LP64(assert(VM_Version::supports_sse(), "")); | |
1415 InstructionMark im(this); | |
1416 prefix(src); | |
1417 emit_byte(0x0F); | |
1418 emit_byte(0xAE); | |
1419 emit_operand(as_Register(2), src); | |
1420 } | |
1421 | |
1422 void Assembler::leal(Register dst, Address src) { | |
1423 InstructionMark im(this); | |
1424 #ifdef _LP64 | |
1425 emit_byte(0x67); // addr32 | |
1426 prefix(src, dst); | |
1427 #endif // LP64 | |
1428 emit_byte(0x8D); | |
1429 emit_operand(dst, src); | |
1430 } | |
1431 | |
1432 void Assembler::lock() { | |
1433 if (Atomics & 1) { | |
1434 // Emit either nothing, a NOP, or a NOP: prefix | |
1435 emit_byte(0x90) ; | |
1436 } else { | |
1437 emit_byte(0xF0); | |
1438 } | |
1439 } | |
1440 | |
1441 // Serializes memory. | |
1442 void Assembler::mfence() { | |
1443 // Memory barriers are only needed on multiprocessors | |
1444 if (os::is_MP()) { | |
1445 if( LP64_ONLY(true ||) VM_Version::supports_sse2() ) { | |
1446 emit_byte( 0x0F ); // MFENCE; faster blows no regs | |
1447 emit_byte( 0xAE ); | |
1448 emit_byte( 0xF0 ); | |
1449 } else { | |
1450 // All usable chips support "locked" instructions which suffice | |
1451 // as barriers, and are much faster than the alternative of | |
1452 // using cpuid instruction. We use here a locked add [esp],0. | |
1453 // This is conveniently otherwise a no-op except for blowing | |
1454 // flags (which we save and restore.) | |
1455 pushf(); // Save eflags register | |
1456 lock(); | |
1457 addl(Address(rsp, 0), 0);// Assert the lock# signal here | |
1458 popf(); // Restore eflags register | |
1459 } | |
1460 } | |
1461 } | |
1462 | |
1463 void Assembler::mov(Register dst, Register src) { | |
1464 LP64_ONLY(movq(dst, src)) NOT_LP64(movl(dst, src)); | |
1465 } | |
1466 | |
1467 void Assembler::movapd(XMMRegister dst, XMMRegister src) { | |
1468 NOT_LP64(assert(VM_Version::supports_sse2(), "")); | |
1469 int dstenc = dst->encoding(); | |
1470 int srcenc = src->encoding(); | |
1471 emit_byte(0x66); | |
1472 if (dstenc < 8) { | |
1473 if (srcenc >= 8) { | |
1474 prefix(REX_B); | |
1475 srcenc -= 8; | |
1476 } | |
1477 } else { | |
1478 if (srcenc < 8) { | |
1479 prefix(REX_R); | |
1480 } else { | |
1481 prefix(REX_RB); | |
1482 srcenc -= 8; | |
1483 } | |
1484 dstenc -= 8; | |
1485 } | |
1486 emit_byte(0x0F); | |
1487 emit_byte(0x28); | |
1488 emit_byte(0xC0 | dstenc << 3 | srcenc); | |
1489 } | |
1490 | |
1491 void Assembler::movaps(XMMRegister dst, XMMRegister src) { | |
1492 NOT_LP64(assert(VM_Version::supports_sse(), "")); | |
1493 int dstenc = dst->encoding(); | |
1494 int srcenc = src->encoding(); | |
1495 if (dstenc < 8) { | |
1496 if (srcenc >= 8) { | |
1497 prefix(REX_B); | |
1498 srcenc -= 8; | |
1499 } | |
1500 } else { | |
1501 if (srcenc < 8) { | |
1502 prefix(REX_R); | |
1503 } else { | |
1504 prefix(REX_RB); | |
1505 srcenc -= 8; | |
1506 } | |
1507 dstenc -= 8; | |
1508 } | |
1509 emit_byte(0x0F); | |
1510 emit_byte(0x28); | |
1511 emit_byte(0xC0 | dstenc << 3 | srcenc); | |
1512 } | |
1513 | |
1514 void Assembler::movb(Register dst, Address src) { | |
1515 NOT_LP64(assert(dst->has_byte_register(), "must have byte register")); | |
1516 InstructionMark im(this); | |
1517 prefix(src, dst, true); | |
1518 emit_byte(0x8A); | |
1519 emit_operand(dst, src); | |
1520 } | |
1521 | |
1522 | |
1523 void Assembler::movb(Address dst, int imm8) { | |
1524 InstructionMark im(this); | |
1525 prefix(dst); | |
1526 emit_byte(0xC6); | |
1527 emit_operand(rax, dst, 1); | |
1528 emit_byte(imm8); | |
1529 } | |
1530 | |
1531 | |
1532 void Assembler::movb(Address dst, Register src) { | |
1533 assert(src->has_byte_register(), "must have byte register"); | |
1534 InstructionMark im(this); | |
1535 prefix(dst, src, true); | |
1536 emit_byte(0x88); | |
1537 emit_operand(src, dst); | |
1538 } | |
1539 | |
1540 void Assembler::movdl(XMMRegister dst, Register src) { | |
1541 NOT_LP64(assert(VM_Version::supports_sse2(), "")); | |
1542 emit_byte(0x66); | |
1543 int encode = prefix_and_encode(dst->encoding(), src->encoding()); | |
1544 emit_byte(0x0F); | |
1545 emit_byte(0x6E); | |
1546 emit_byte(0xC0 | encode); | |
1547 } | |
1548 | |
1549 void Assembler::movdl(Register dst, XMMRegister src) { | |
1550 NOT_LP64(assert(VM_Version::supports_sse2(), "")); | |
1551 emit_byte(0x66); | |
1552 // swap src/dst to get correct prefix | |
1553 int encode = prefix_and_encode(src->encoding(), dst->encoding()); | |
1554 emit_byte(0x0F); | |
1555 emit_byte(0x7E); | |
1556 emit_byte(0xC0 | encode); | |
1557 } | |
1558 | |
1559 void Assembler::movdqa(XMMRegister dst, Address src) { | |
1560 NOT_LP64(assert(VM_Version::supports_sse2(), "")); | |
1561 InstructionMark im(this); | |
1562 emit_byte(0x66); | |
1563 prefix(src, dst); | |
1564 emit_byte(0x0F); | |
1565 emit_byte(0x6F); | |
1566 emit_operand(dst, src); | |
1567 } | |
1568 | |
1569 void Assembler::movdqa(XMMRegister dst, XMMRegister src) { | |
1570 NOT_LP64(assert(VM_Version::supports_sse2(), "")); | |
1571 emit_byte(0x66); | |
1572 int encode = prefixq_and_encode(dst->encoding(), src->encoding()); | |
1573 emit_byte(0x0F); | |
1574 emit_byte(0x6F); | |
1575 emit_byte(0xC0 | encode); | |
1576 } | |
1577 | |
1578 void Assembler::movdqa(Address dst, XMMRegister src) { | |
1579 NOT_LP64(assert(VM_Version::supports_sse2(), "")); | |
1580 InstructionMark im(this); | |
1581 emit_byte(0x66); | |
1582 prefix(dst, src); | |
1583 emit_byte(0x0F); | |
1584 emit_byte(0x7F); | |
1585 emit_operand(src, dst); | |
1586 } | |
1587 | |
405 | 1588 void Assembler::movdqu(XMMRegister dst, Address src) { |
1589 NOT_LP64(assert(VM_Version::supports_sse2(), "")); | |
1590 InstructionMark im(this); | |
1591 emit_byte(0xF3); | |
1592 prefix(src, dst); | |
1593 emit_byte(0x0F); | |
1594 emit_byte(0x6F); | |
1595 emit_operand(dst, src); | |
1596 } | |
1597 | |
1598 void Assembler::movdqu(XMMRegister dst, XMMRegister src) { | |
1599 NOT_LP64(assert(VM_Version::supports_sse2(), "")); | |
1600 emit_byte(0xF3); | |
1601 int encode = prefixq_and_encode(dst->encoding(), src->encoding()); | |
1602 emit_byte(0x0F); | |
1603 emit_byte(0x6F); | |
1604 emit_byte(0xC0 | encode); | |
1605 } | |
1606 | |
1607 void Assembler::movdqu(Address dst, XMMRegister src) { | |
1608 NOT_LP64(assert(VM_Version::supports_sse2(), "")); | |
1609 InstructionMark im(this); | |
1610 emit_byte(0xF3); | |
1611 prefix(dst, src); | |
1612 emit_byte(0x0F); | |
1613 emit_byte(0x7F); | |
1614 emit_operand(src, dst); | |
1615 } | |
1616 | |
304 | 1617 // Uses zero extension on 64bit |
1618 | |
1619 void Assembler::movl(Register dst, int32_t imm32) { | |
1620 int encode = prefix_and_encode(dst->encoding()); | |
1621 emit_byte(0xB8 | encode); | |
1622 emit_long(imm32); | |
1623 } | |
1624 | |
1625 void Assembler::movl(Register dst, Register src) { | |
1626 int encode = prefix_and_encode(dst->encoding(), src->encoding()); | |
1627 emit_byte(0x8B); | |
1628 emit_byte(0xC0 | encode); | |
1629 } | |
1630 | |
1631 void Assembler::movl(Register dst, Address src) { | |
1632 InstructionMark im(this); | |
1633 prefix(src, dst); | |
1634 emit_byte(0x8B); | |
1635 emit_operand(dst, src); | |
1636 } | |
1637 | |
1638 void Assembler::movl(Address dst, int32_t imm32) { | |
1639 InstructionMark im(this); | |
1640 prefix(dst); | |
1641 emit_byte(0xC7); | |
1642 emit_operand(rax, dst, 4); | |
1643 emit_long(imm32); | |
1644 } | |
1645 | |
1646 void Assembler::movl(Address dst, Register src) { | |
1647 InstructionMark im(this); | |
1648 prefix(dst, src); | |
1649 emit_byte(0x89); | |
1650 emit_operand(src, dst); | |
1651 } | |
1652 | |
1653 // New cpus require to use movsd and movss to avoid partial register stall | |
1654 // when loading from memory. But for old Opteron use movlpd instead of movsd. | |
1655 // The selection is done in MacroAssembler::movdbl() and movflt(). | |
1656 void Assembler::movlpd(XMMRegister dst, Address src) { | |
1657 NOT_LP64(assert(VM_Version::supports_sse2(), "")); | |
1658 InstructionMark im(this); | |
1659 emit_byte(0x66); | |
1660 prefix(src, dst); | |
1661 emit_byte(0x0F); | |
1662 emit_byte(0x12); | |
1663 emit_operand(dst, src); | |
1664 } | |
1665 | |
1666 void Assembler::movq( MMXRegister dst, Address src ) { | |
1667 assert( VM_Version::supports_mmx(), "" ); | |
1668 emit_byte(0x0F); | |
1669 emit_byte(0x6F); | |
1670 emit_operand(dst, src); | |
1671 } | |
1672 | |
1673 void Assembler::movq( Address dst, MMXRegister src ) { | |
1674 assert( VM_Version::supports_mmx(), "" ); | |
1675 emit_byte(0x0F); | |
1676 emit_byte(0x7F); | |
1677 // workaround gcc (3.2.1-7a) bug | |
1678 // In that version of gcc with only an emit_operand(MMX, Address) | |
1679 // gcc will tail jump and try and reverse the parameters completely | |
1680 // obliterating dst in the process. By having a version available | |
1681 // that doesn't need to swap the args at the tail jump the bug is | |
1682 // avoided. | |
1683 emit_operand(dst, src); | |
1684 } | |
1685 | |
1686 void Assembler::movq(XMMRegister dst, Address src) { | |
1687 NOT_LP64(assert(VM_Version::supports_sse2(), "")); | |
1688 InstructionMark im(this); | |
1689 emit_byte(0xF3); | |
1690 prefix(src, dst); | |
1691 emit_byte(0x0F); | |
1692 emit_byte(0x7E); | |
1693 emit_operand(dst, src); | |
1694 } | |
1695 | |
1696 void Assembler::movq(Address dst, XMMRegister src) { | |
1697 NOT_LP64(assert(VM_Version::supports_sse2(), "")); | |
1698 InstructionMark im(this); | |
1699 emit_byte(0x66); | |
1700 prefix(dst, src); | |
1701 emit_byte(0x0F); | |
1702 emit_byte(0xD6); | |
1703 emit_operand(src, dst); | |
1704 } | |
1705 | |
1706 void Assembler::movsbl(Register dst, Address src) { // movsxb | |
1707 InstructionMark im(this); | |
1708 prefix(src, dst); | |
1709 emit_byte(0x0F); | |
1710 emit_byte(0xBE); | |
1711 emit_operand(dst, src); | |
1712 } | |
1713 | |
1714 void Assembler::movsbl(Register dst, Register src) { // movsxb | |
1715 NOT_LP64(assert(src->has_byte_register(), "must have byte register")); | |
1716 int encode = prefix_and_encode(dst->encoding(), src->encoding(), true); | |
1717 emit_byte(0x0F); | |
1718 emit_byte(0xBE); | |
1719 emit_byte(0xC0 | encode); | |
1720 } | |
1721 | |
1722 void Assembler::movsd(XMMRegister dst, XMMRegister src) { | |
1723 NOT_LP64(assert(VM_Version::supports_sse2(), "")); | |
1724 emit_byte(0xF2); | |
1725 int encode = prefix_and_encode(dst->encoding(), src->encoding()); | |
1726 emit_byte(0x0F); | |
1727 emit_byte(0x10); | |
1728 emit_byte(0xC0 | encode); | |
1729 } | |
1730 | |
1731 void Assembler::movsd(XMMRegister dst, Address src) { | |
1732 NOT_LP64(assert(VM_Version::supports_sse2(), "")); | |
1733 InstructionMark im(this); | |
1734 emit_byte(0xF2); | |
1735 prefix(src, dst); | |
1736 emit_byte(0x0F); | |
1737 emit_byte(0x10); | |
1738 emit_operand(dst, src); | |
1739 } | |
1740 | |
1741 void Assembler::movsd(Address dst, XMMRegister src) { | |
1742 NOT_LP64(assert(VM_Version::supports_sse2(), "")); | |
1743 InstructionMark im(this); | |
1744 emit_byte(0xF2); | |
1745 prefix(dst, src); | |
1746 emit_byte(0x0F); | |
1747 emit_byte(0x11); | |
1748 emit_operand(src, dst); | |
1749 } | |
1750 | |
1751 void Assembler::movss(XMMRegister dst, XMMRegister src) { | |
1752 NOT_LP64(assert(VM_Version::supports_sse(), "")); | |
1753 emit_byte(0xF3); | |
1754 int encode = prefix_and_encode(dst->encoding(), src->encoding()); | |
1755 emit_byte(0x0F); | |
1756 emit_byte(0x10); | |
1757 emit_byte(0xC0 | encode); | |
1758 } | |
1759 | |
1760 void Assembler::movss(XMMRegister dst, Address src) { | |
1761 NOT_LP64(assert(VM_Version::supports_sse(), "")); | |
1762 InstructionMark im(this); | |
1763 emit_byte(0xF3); | |
1764 prefix(src, dst); | |
1765 emit_byte(0x0F); | |
1766 emit_byte(0x10); | |
1767 emit_operand(dst, src); | |
1768 } | |
1769 | |
1770 void Assembler::movss(Address dst, XMMRegister src) { | |
1771 NOT_LP64(assert(VM_Version::supports_sse(), "")); | |
1772 InstructionMark im(this); | |
1773 emit_byte(0xF3); | |
1774 prefix(dst, src); | |
1775 emit_byte(0x0F); | |
1776 emit_byte(0x11); | |
1777 emit_operand(src, dst); | |
1778 } | |
1779 | |
1780 void Assembler::movswl(Register dst, Address src) { // movsxw | |
1781 InstructionMark im(this); | |
1782 prefix(src, dst); | |
1783 emit_byte(0x0F); | |
1784 emit_byte(0xBF); | |
1785 emit_operand(dst, src); | |
1786 } | |
1787 | |
1788 void Assembler::movswl(Register dst, Register src) { // movsxw | |
1789 int encode = prefix_and_encode(dst->encoding(), src->encoding()); | |
1790 emit_byte(0x0F); | |
1791 emit_byte(0xBF); | |
1792 emit_byte(0xC0 | encode); | |
1793 } | |
1794 | |
1795 void Assembler::movw(Address dst, int imm16) { | |
1796 InstructionMark im(this); | |
1797 | |
1798 emit_byte(0x66); // switch to 16-bit mode | |
1799 prefix(dst); | |
1800 emit_byte(0xC7); | |
1801 emit_operand(rax, dst, 2); | |
1802 emit_word(imm16); | |
1803 } | |
1804 | |
1805 void Assembler::movw(Register dst, Address src) { | |
1806 InstructionMark im(this); | |
1807 emit_byte(0x66); | |
1808 prefix(src, dst); | |
1809 emit_byte(0x8B); | |
1810 emit_operand(dst, src); | |
1811 } | |
1812 | |
1813 void Assembler::movw(Address dst, Register src) { | |
1814 InstructionMark im(this); | |
1815 emit_byte(0x66); | |
1816 prefix(dst, src); | |
1817 emit_byte(0x89); | |
1818 emit_operand(src, dst); | |
1819 } | |
1820 | |
1821 void Assembler::movzbl(Register dst, Address src) { // movzxb | |
1822 InstructionMark im(this); | |
1823 prefix(src, dst); | |
1824 emit_byte(0x0F); | |
1825 emit_byte(0xB6); | |
1826 emit_operand(dst, src); | |
1827 } | |
1828 | |
1829 void Assembler::movzbl(Register dst, Register src) { // movzxb | |
1830 NOT_LP64(assert(src->has_byte_register(), "must have byte register")); | |
1831 int encode = prefix_and_encode(dst->encoding(), src->encoding(), true); | |
1832 emit_byte(0x0F); | |
1833 emit_byte(0xB6); | |
1834 emit_byte(0xC0 | encode); | |
1835 } | |
1836 | |
1837 void Assembler::movzwl(Register dst, Address src) { // movzxw | |
1838 InstructionMark im(this); | |
1839 prefix(src, dst); | |
1840 emit_byte(0x0F); | |
1841 emit_byte(0xB7); | |
1842 emit_operand(dst, src); | |
1843 } | |
1844 | |
1845 void Assembler::movzwl(Register dst, Register src) { // movzxw | |
1846 int encode = prefix_and_encode(dst->encoding(), src->encoding()); | |
1847 emit_byte(0x0F); | |
1848 emit_byte(0xB7); | |
1849 emit_byte(0xC0 | encode); | |
1850 } | |
1851 | |
1852 void Assembler::mull(Address src) { | |
1853 InstructionMark im(this); | |
1854 prefix(src); | |
1855 emit_byte(0xF7); | |
1856 emit_operand(rsp, src); | |
1857 } | |
1858 | |
1859 void Assembler::mull(Register src) { | |
1860 int encode = prefix_and_encode(src->encoding()); | |
1861 emit_byte(0xF7); | |
1862 emit_byte(0xE0 | encode); | |
1863 } | |
1864 | |
1865 void Assembler::mulsd(XMMRegister dst, Address src) { | |
1866 NOT_LP64(assert(VM_Version::supports_sse2(), "")); | |
1867 InstructionMark im(this); | |
1868 emit_byte(0xF2); | |
1869 prefix(src, dst); | |
1870 emit_byte(0x0F); | |
1871 emit_byte(0x59); | |
1872 emit_operand(dst, src); | |
1873 } | |
1874 | |
1875 void Assembler::mulsd(XMMRegister dst, XMMRegister src) { | |
1876 NOT_LP64(assert(VM_Version::supports_sse2(), "")); | |
1877 emit_byte(0xF2); | |
1878 int encode = prefix_and_encode(dst->encoding(), src->encoding()); | |
1879 emit_byte(0x0F); | |
1880 emit_byte(0x59); | |
1881 emit_byte(0xC0 | encode); | |
1882 } | |
1883 | |
1884 void Assembler::mulss(XMMRegister dst, Address src) { | |
1885 NOT_LP64(assert(VM_Version::supports_sse(), "")); | |
1886 InstructionMark im(this); | |
1887 emit_byte(0xF3); | |
1888 prefix(src, dst); | |
1889 emit_byte(0x0F); | |
1890 emit_byte(0x59); | |
1891 emit_operand(dst, src); | |
1892 } | |
1893 | |
1894 void Assembler::mulss(XMMRegister dst, XMMRegister src) { | |
1895 NOT_LP64(assert(VM_Version::supports_sse(), "")); | |
1896 emit_byte(0xF3); | |
1897 int encode = prefix_and_encode(dst->encoding(), src->encoding()); | |
1898 emit_byte(0x0F); | |
1899 emit_byte(0x59); | |
1900 emit_byte(0xC0 | encode); | |
1901 } | |
1902 | |
1903 void Assembler::negl(Register dst) { | |
1904 int encode = prefix_and_encode(dst->encoding()); | |
1905 emit_byte(0xF7); | |
1906 emit_byte(0xD8 | encode); | |
1907 } | |
1908 | |
0 | 1909 void Assembler::nop(int i) { |
304 | 1910 #ifdef ASSERT |
0 | 1911 assert(i > 0, " "); |
304 | 1912 // The fancy nops aren't currently recognized by debuggers making it a |
1913 // pain to disassemble code while debugging. If asserts are on clearly | |
1914 // speed is not an issue so simply use the single byte traditional nop | |
1915 // to do alignment. | |
1916 | |
1917 for (; i > 0 ; i--) emit_byte(0x90); | |
1918 return; | |
1919 | |
1920 #endif // ASSERT | |
1921 | |
0 | 1922 if (UseAddressNop && VM_Version::is_intel()) { |
1923 // | |
1924 // Using multi-bytes nops "0x0F 0x1F [address]" for Intel | |
1925 // 1: 0x90 | |
1926 // 2: 0x66 0x90 | |
1927 // 3: 0x66 0x66 0x90 (don't use "0x0F 0x1F 0x00" - need patching safe padding) | |
1928 // 4: 0x0F 0x1F 0x40 0x00 | |
1929 // 5: 0x0F 0x1F 0x44 0x00 0x00 | |
1930 // 6: 0x66 0x0F 0x1F 0x44 0x00 0x00 | |
1931 // 7: 0x0F 0x1F 0x80 0x00 0x00 0x00 0x00 | |
1932 // 8: 0x0F 0x1F 0x84 0x00 0x00 0x00 0x00 0x00 | |
1933 // 9: 0x66 0x0F 0x1F 0x84 0x00 0x00 0x00 0x00 0x00 | |
1934 // 10: 0x66 0x66 0x0F 0x1F 0x84 0x00 0x00 0x00 0x00 0x00 | |
1935 // 11: 0x66 0x66 0x66 0x0F 0x1F 0x84 0x00 0x00 0x00 0x00 0x00 | |
1936 | |
1937 // The rest coding is Intel specific - don't use consecutive address nops | |
1938 | |
1939 // 12: 0x0F 0x1F 0x84 0x00 0x00 0x00 0x00 0x00 0x66 0x66 0x66 0x90 | |
1940 // 13: 0x66 0x0F 0x1F 0x84 0x00 0x00 0x00 0x00 0x00 0x66 0x66 0x66 0x90 | |
1941 // 14: 0x66 0x66 0x0F 0x1F 0x84 0x00 0x00 0x00 0x00 0x00 0x66 0x66 0x66 0x90 | |
1942 // 15: 0x66 0x66 0x66 0x0F 0x1F 0x84 0x00 0x00 0x00 0x00 0x00 0x66 0x66 0x66 0x90 | |
1943 | |
1944 while(i >= 15) { | |
1945 // For Intel don't generate consecutive addess nops (mix with regular nops) | |
1946 i -= 15; | |
1947 emit_byte(0x66); // size prefix | |
1948 emit_byte(0x66); // size prefix | |
1949 emit_byte(0x66); // size prefix | |
1950 addr_nop_8(); | |
1951 emit_byte(0x66); // size prefix | |
1952 emit_byte(0x66); // size prefix | |
1953 emit_byte(0x66); // size prefix | |
1954 emit_byte(0x90); // nop | |
1955 } | |
1956 switch (i) { | |
1957 case 14: | |
1958 emit_byte(0x66); // size prefix | |
1959 case 13: | |
1960 emit_byte(0x66); // size prefix | |
1961 case 12: | |
1962 addr_nop_8(); | |
1963 emit_byte(0x66); // size prefix | |
1964 emit_byte(0x66); // size prefix | |
1965 emit_byte(0x66); // size prefix | |
1966 emit_byte(0x90); // nop | |
1967 break; | |
1968 case 11: | |
1969 emit_byte(0x66); // size prefix | |
1970 case 10: | |
1971 emit_byte(0x66); // size prefix | |
1972 case 9: | |
1973 emit_byte(0x66); // size prefix | |
1974 case 8: | |
1975 addr_nop_8(); | |
1976 break; | |
1977 case 7: | |
1978 addr_nop_7(); | |
1979 break; | |
1980 case 6: | |
1981 emit_byte(0x66); // size prefix | |
1982 case 5: | |
1983 addr_nop_5(); | |
1984 break; | |
1985 case 4: | |
1986 addr_nop_4(); | |
1987 break; | |
1988 case 3: | |
1989 // Don't use "0x0F 0x1F 0x00" - need patching safe padding | |
1990 emit_byte(0x66); // size prefix | |
1991 case 2: | |
1992 emit_byte(0x66); // size prefix | |
1993 case 1: | |
1994 emit_byte(0x90); // nop | |
1995 break; | |
1996 default: | |
1997 assert(i == 0, " "); | |
1998 } | |
1999 return; | |
2000 } | |
2001 if (UseAddressNop && VM_Version::is_amd()) { | |
2002 // | |
2003 // Using multi-bytes nops "0x0F 0x1F [address]" for AMD. | |
2004 // 1: 0x90 | |
2005 // 2: 0x66 0x90 | |
2006 // 3: 0x66 0x66 0x90 (don't use "0x0F 0x1F 0x00" - need patching safe padding) | |
2007 // 4: 0x0F 0x1F 0x40 0x00 | |
2008 // 5: 0x0F 0x1F 0x44 0x00 0x00 | |
2009 // 6: 0x66 0x0F 0x1F 0x44 0x00 0x00 | |
2010 // 7: 0x0F 0x1F 0x80 0x00 0x00 0x00 0x00 | |
2011 // 8: 0x0F 0x1F 0x84 0x00 0x00 0x00 0x00 0x00 | |
2012 // 9: 0x66 0x0F 0x1F 0x84 0x00 0x00 0x00 0x00 0x00 | |
2013 // 10: 0x66 0x66 0x0F 0x1F 0x84 0x00 0x00 0x00 0x00 0x00 | |
2014 // 11: 0x66 0x66 0x66 0x0F 0x1F 0x84 0x00 0x00 0x00 0x00 0x00 | |
2015 | |
2016 // The rest coding is AMD specific - use consecutive address nops | |
2017 | |
2018 // 12: 0x66 0x0F 0x1F 0x44 0x00 0x00 0x66 0x0F 0x1F 0x44 0x00 0x00 | |
2019 // 13: 0x0F 0x1F 0x80 0x00 0x00 0x00 0x00 0x66 0x0F 0x1F 0x44 0x00 0x00 | |
2020 // 14: 0x0F 0x1F 0x80 0x00 0x00 0x00 0x00 0x0F 0x1F 0x80 0x00 0x00 0x00 0x00 | |
2021 // 15: 0x0F 0x1F 0x84 0x00 0x00 0x00 0x00 0x00 0x0F 0x1F 0x80 0x00 0x00 0x00 0x00 | |
2022 // 16: 0x0F 0x1F 0x84 0x00 0x00 0x00 0x00 0x00 0x0F 0x1F 0x84 0x00 0x00 0x00 0x00 0x00 | |
2023 // Size prefixes (0x66) are added for larger sizes | |
2024 | |
2025 while(i >= 22) { | |
2026 i -= 11; | |
2027 emit_byte(0x66); // size prefix | |
2028 emit_byte(0x66); // size prefix | |
2029 emit_byte(0x66); // size prefix | |
2030 addr_nop_8(); | |
2031 } | |
2032 // Generate first nop for size between 21-12 | |
2033 switch (i) { | |
2034 case 21: | |
2035 i -= 1; | |
2036 emit_byte(0x66); // size prefix | |
2037 case 20: | |
2038 case 19: | |
2039 i -= 1; | |
2040 emit_byte(0x66); // size prefix | |
2041 case 18: | |
2042 case 17: | |
2043 i -= 1; | |
2044 emit_byte(0x66); // size prefix | |
2045 case 16: | |
2046 case 15: | |
2047 i -= 8; | |
2048 addr_nop_8(); | |
2049 break; | |
2050 case 14: | |
2051 case 13: | |
2052 i -= 7; | |
2053 addr_nop_7(); | |
2054 break; | |
2055 case 12: | |
2056 i -= 6; | |
2057 emit_byte(0x66); // size prefix | |
2058 addr_nop_5(); | |
2059 break; | |
2060 default: | |
2061 assert(i < 12, " "); | |
2062 } | |
2063 | |
2064 // Generate second nop for size between 11-1 | |
2065 switch (i) { | |
2066 case 11: | |
2067 emit_byte(0x66); // size prefix | |
2068 case 10: | |
2069 emit_byte(0x66); // size prefix | |
2070 case 9: | |
2071 emit_byte(0x66); // size prefix | |
2072 case 8: | |
2073 addr_nop_8(); | |
2074 break; | |
2075 case 7: | |
2076 addr_nop_7(); | |
2077 break; | |
2078 case 6: | |
2079 emit_byte(0x66); // size prefix | |
2080 case 5: | |
2081 addr_nop_5(); | |
2082 break; | |
2083 case 4: | |
2084 addr_nop_4(); | |
2085 break; | |
2086 case 3: | |
2087 // Don't use "0x0F 0x1F 0x00" - need patching safe padding | |
2088 emit_byte(0x66); // size prefix | |
2089 case 2: | |
2090 emit_byte(0x66); // size prefix | |
2091 case 1: | |
2092 emit_byte(0x90); // nop | |
2093 break; | |
2094 default: | |
2095 assert(i == 0, " "); | |
2096 } | |
2097 return; | |
2098 } | |
2099 | |
2100 // Using nops with size prefixes "0x66 0x90". | |
2101 // From AMD Optimization Guide: | |
2102 // 1: 0x90 | |
2103 // 2: 0x66 0x90 | |
2104 // 3: 0x66 0x66 0x90 | |
2105 // 4: 0x66 0x66 0x66 0x90 | |
2106 // 5: 0x66 0x66 0x90 0x66 0x90 | |
2107 // 6: 0x66 0x66 0x90 0x66 0x66 0x90 | |
2108 // 7: 0x66 0x66 0x66 0x90 0x66 0x66 0x90 | |
2109 // 8: 0x66 0x66 0x66 0x90 0x66 0x66 0x66 0x90 | |
2110 // 9: 0x66 0x66 0x90 0x66 0x66 0x90 0x66 0x66 0x90 | |
2111 // 10: 0x66 0x66 0x66 0x90 0x66 0x66 0x90 0x66 0x66 0x90 | |
2112 // | |
2113 while(i > 12) { | |
2114 i -= 4; | |
2115 emit_byte(0x66); // size prefix | |
2116 emit_byte(0x66); | |
2117 emit_byte(0x66); | |
2118 emit_byte(0x90); // nop | |
2119 } | |
2120 // 1 - 12 nops | |
2121 if(i > 8) { | |
2122 if(i > 9) { | |
2123 i -= 1; | |
2124 emit_byte(0x66); | |
2125 } | |
2126 i -= 3; | |
2127 emit_byte(0x66); | |
2128 emit_byte(0x66); | |
2129 emit_byte(0x90); | |
2130 } | |
2131 // 1 - 8 nops | |
2132 if(i > 4) { | |
2133 if(i > 6) { | |
2134 i -= 1; | |
2135 emit_byte(0x66); | |
2136 } | |
2137 i -= 3; | |
2138 emit_byte(0x66); | |
2139 emit_byte(0x66); | |
2140 emit_byte(0x90); | |
2141 } | |
2142 switch (i) { | |
2143 case 4: | |
2144 emit_byte(0x66); | |
2145 case 3: | |
2146 emit_byte(0x66); | |
2147 case 2: | |
2148 emit_byte(0x66); | |
2149 case 1: | |
2150 emit_byte(0x90); | |
2151 break; | |
2152 default: | |
2153 assert(i == 0, " "); | |
2154 } | |
2155 } | |
2156 | |
304 | 2157 void Assembler::notl(Register dst) { |
2158 int encode = prefix_and_encode(dst->encoding()); | |
2159 emit_byte(0xF7); | |
2160 emit_byte(0xD0 | encode ); | |
2161 } | |
2162 | |
2163 void Assembler::orl(Address dst, int32_t imm32) { | |
2164 InstructionMark im(this); | |
2165 prefix(dst); | |
2166 emit_byte(0x81); | |
2167 emit_operand(rcx, dst, 4); | |
2168 emit_long(imm32); | |
2169 } | |
2170 | |
2171 void Assembler::orl(Register dst, int32_t imm32) { | |
2172 prefix(dst); | |
2173 emit_arith(0x81, 0xC8, dst, imm32); | |
2174 } | |
2175 | |
2176 | |
2177 void Assembler::orl(Register dst, Address src) { | |
2178 InstructionMark im(this); | |
2179 prefix(src, dst); | |
2180 emit_byte(0x0B); | |
2181 emit_operand(dst, src); | |
2182 } | |
2183 | |
2184 | |
2185 void Assembler::orl(Register dst, Register src) { | |
2186 (void) prefix_and_encode(dst->encoding(), src->encoding()); | |
2187 emit_arith(0x0B, 0xC0, dst, src); | |
2188 } | |
2189 | |
2190 // generic | |
2191 void Assembler::pop(Register dst) { | |
2192 int encode = prefix_and_encode(dst->encoding()); | |
2193 emit_byte(0x58 | encode); | |
2194 } | |
2195 | |
2196 void Assembler::popf() { | |
2197 emit_byte(0x9D); | |
2198 } | |
2199 | |
2200 void Assembler::popl(Address dst) { | |
2201 // NOTE: this will adjust stack by 8byte on 64bits | |
2202 InstructionMark im(this); | |
2203 prefix(dst); | |
2204 emit_byte(0x8F); | |
2205 emit_operand(rax, dst); | |
2206 } | |
2207 | |
2208 void Assembler::prefetch_prefix(Address src) { | |
2209 prefix(src); | |
2210 emit_byte(0x0F); | |
2211 } | |
2212 | |
2213 void Assembler::prefetchnta(Address src) { | |
2214 NOT_LP64(assert(VM_Version::supports_sse2(), "must support")); | |
2215 InstructionMark im(this); | |
2216 prefetch_prefix(src); | |
2217 emit_byte(0x18); | |
2218 emit_operand(rax, src); // 0, src | |
2219 } | |
2220 | |
2221 void Assembler::prefetchr(Address src) { | |
2222 NOT_LP64(assert(VM_Version::supports_3dnow(), "must support")); | |
2223 InstructionMark im(this); | |
2224 prefetch_prefix(src); | |
2225 emit_byte(0x0D); | |
2226 emit_operand(rax, src); // 0, src | |
2227 } | |
2228 | |
2229 void Assembler::prefetcht0(Address src) { | |
2230 NOT_LP64(assert(VM_Version::supports_sse(), "must support")); | |
2231 InstructionMark im(this); | |
2232 prefetch_prefix(src); | |
2233 emit_byte(0x18); | |
2234 emit_operand(rcx, src); // 1, src | |
2235 } | |
2236 | |
2237 void Assembler::prefetcht1(Address src) { | |
2238 NOT_LP64(assert(VM_Version::supports_sse(), "must support")); | |
2239 InstructionMark im(this); | |
2240 prefetch_prefix(src); | |
2241 emit_byte(0x18); | |
2242 emit_operand(rdx, src); // 2, src | |
2243 } | |
2244 | |
2245 void Assembler::prefetcht2(Address src) { | |
2246 NOT_LP64(assert(VM_Version::supports_sse(), "must support")); | |
2247 InstructionMark im(this); | |
2248 prefetch_prefix(src); | |
2249 emit_byte(0x18); | |
2250 emit_operand(rbx, src); // 3, src | |
2251 } | |
2252 | |
2253 void Assembler::prefetchw(Address src) { | |
2254 NOT_LP64(assert(VM_Version::supports_3dnow(), "must support")); | |
2255 InstructionMark im(this); | |
2256 prefetch_prefix(src); | |
2257 emit_byte(0x0D); | |
2258 emit_operand(rcx, src); // 1, src | |
2259 } | |
2260 | |
2261 void Assembler::prefix(Prefix p) { | |
2262 a_byte(p); | |
2263 } | |
2264 | |
2265 void Assembler::pshufd(XMMRegister dst, XMMRegister src, int mode) { | |
2266 assert(isByte(mode), "invalid value"); | |
2267 NOT_LP64(assert(VM_Version::supports_sse2(), "")); | |
2268 | |
2269 emit_byte(0x66); | |
2270 int encode = prefix_and_encode(dst->encoding(), src->encoding()); | |
2271 emit_byte(0x0F); | |
2272 emit_byte(0x70); | |
2273 emit_byte(0xC0 | encode); | |
2274 emit_byte(mode & 0xFF); | |
2275 | |
2276 } | |
2277 | |
2278 void Assembler::pshufd(XMMRegister dst, Address src, int mode) { | |
2279 assert(isByte(mode), "invalid value"); | |
2280 NOT_LP64(assert(VM_Version::supports_sse2(), "")); | |
2281 | |
2282 InstructionMark im(this); | |
2283 emit_byte(0x66); | |
2284 prefix(src, dst); | |
2285 emit_byte(0x0F); | |
2286 emit_byte(0x70); | |
2287 emit_operand(dst, src); | |
2288 emit_byte(mode & 0xFF); | |
2289 } | |
2290 | |
2291 void Assembler::pshuflw(XMMRegister dst, XMMRegister src, int mode) { | |
2292 assert(isByte(mode), "invalid value"); | |
2293 NOT_LP64(assert(VM_Version::supports_sse2(), "")); | |
2294 | |
2295 emit_byte(0xF2); | |
2296 int encode = prefix_and_encode(dst->encoding(), src->encoding()); | |
2297 emit_byte(0x0F); | |
2298 emit_byte(0x70); | |
2299 emit_byte(0xC0 | encode); | |
2300 emit_byte(mode & 0xFF); | |
2301 } | |
2302 | |
2303 void Assembler::pshuflw(XMMRegister dst, Address src, int mode) { | |
2304 assert(isByte(mode), "invalid value"); | |
2305 NOT_LP64(assert(VM_Version::supports_sse2(), "")); | |
2306 | |
2307 InstructionMark im(this); | |
2308 emit_byte(0xF2); | |
2309 prefix(src, dst); // QQ new | |
2310 emit_byte(0x0F); | |
2311 emit_byte(0x70); | |
2312 emit_operand(dst, src); | |
2313 emit_byte(mode & 0xFF); | |
2314 } | |
2315 | |
2316 void Assembler::psrlq(XMMRegister dst, int shift) { | |
2317 // HMM Table D-1 says sse2 or mmx | |
2318 NOT_LP64(assert(VM_Version::supports_sse(), "")); | |
2319 | |
2320 int encode = prefixq_and_encode(xmm2->encoding(), dst->encoding()); | |
2321 emit_byte(0x66); | |
2322 emit_byte(0x0F); | |
2323 emit_byte(0x73); | |
2324 emit_byte(0xC0 | encode); | |
2325 emit_byte(shift); | |
2326 } | |
2327 | |
2328 void Assembler::punpcklbw(XMMRegister dst, XMMRegister src) { | |
2329 NOT_LP64(assert(VM_Version::supports_sse2(), "")); | |
2330 emit_byte(0x66); | |
2331 int encode = prefix_and_encode(dst->encoding(), src->encoding()); | |
2332 emit_byte(0x0F); | |
2333 emit_byte(0x60); | |
2334 emit_byte(0xC0 | encode); | |
2335 } | |
2336 | |
2337 void Assembler::push(int32_t imm32) { | |
2338 // in 64bits we push 64bits onto the stack but only | |
2339 // take a 32bit immediate | |
2340 emit_byte(0x68); | |
2341 emit_long(imm32); | |
2342 } | |
2343 | |
2344 void Assembler::push(Register src) { | |
2345 int encode = prefix_and_encode(src->encoding()); | |
2346 | |
2347 emit_byte(0x50 | encode); | |
2348 } | |
2349 | |
2350 void Assembler::pushf() { | |
2351 emit_byte(0x9C); | |
2352 } | |
2353 | |
2354 void Assembler::pushl(Address src) { | |
2355 // Note this will push 64bit on 64bit | |
2356 InstructionMark im(this); | |
2357 prefix(src); | |
2358 emit_byte(0xFF); | |
2359 emit_operand(rsi, src); | |
2360 } | |
2361 | |
2362 void Assembler::pxor(XMMRegister dst, Address src) { | |
2363 NOT_LP64(assert(VM_Version::supports_sse2(), "")); | |
2364 InstructionMark im(this); | |
2365 emit_byte(0x66); | |
2366 prefix(src, dst); | |
2367 emit_byte(0x0F); | |
2368 emit_byte(0xEF); | |
2369 emit_operand(dst, src); | |
2370 } | |
2371 | |
2372 void Assembler::pxor(XMMRegister dst, XMMRegister src) { | |
2373 NOT_LP64(assert(VM_Version::supports_sse2(), "")); | |
2374 InstructionMark im(this); | |
2375 emit_byte(0x66); | |
2376 int encode = prefix_and_encode(dst->encoding(), src->encoding()); | |
2377 emit_byte(0x0F); | |
2378 emit_byte(0xEF); | |
2379 emit_byte(0xC0 | encode); | |
2380 } | |
2381 | |
2382 void Assembler::rcll(Register dst, int imm8) { | |
2383 assert(isShiftCount(imm8), "illegal shift count"); | |
2384 int encode = prefix_and_encode(dst->encoding()); | |
2385 if (imm8 == 1) { | |
2386 emit_byte(0xD1); | |
2387 emit_byte(0xD0 | encode); | |
2388 } else { | |
2389 emit_byte(0xC1); | |
2390 emit_byte(0xD0 | encode); | |
2391 emit_byte(imm8); | |
2392 } | |
2393 } | |
2394 | |
2395 // copies data from [esi] to [edi] using rcx pointer sized words | |
2396 // generic | |
2397 void Assembler::rep_mov() { | |
2398 emit_byte(0xF3); | |
2399 // MOVSQ | |
2400 LP64_ONLY(prefix(REX_W)); | |
2401 emit_byte(0xA5); | |
2402 } | |
2403 | |
2404 // sets rcx pointer sized words with rax, value at [edi] | |
2405 // generic | |
2406 void Assembler::rep_set() { // rep_set | |
2407 emit_byte(0xF3); | |
2408 // STOSQ | |
2409 LP64_ONLY(prefix(REX_W)); | |
2410 emit_byte(0xAB); | |
2411 } | |
2412 | |
2413 // scans rcx pointer sized words at [edi] for occurance of rax, | |
2414 // generic | |
2415 void Assembler::repne_scan() { // repne_scan | |
2416 emit_byte(0xF2); | |
2417 // SCASQ | |
2418 LP64_ONLY(prefix(REX_W)); | |
2419 emit_byte(0xAF); | |
2420 } | |
2421 | |
2422 #ifdef _LP64 | |
2423 // scans rcx 4 byte words at [edi] for occurance of rax, | |
2424 // generic | |
2425 void Assembler::repne_scanl() { // repne_scan | |
2426 emit_byte(0xF2); | |
2427 // SCASL | |
2428 emit_byte(0xAF); | |
2429 } | |
2430 #endif | |
2431 | |
0 | 2432 void Assembler::ret(int imm16) { |
2433 if (imm16 == 0) { | |
2434 emit_byte(0xC3); | |
2435 } else { | |
2436 emit_byte(0xC2); | |
2437 emit_word(imm16); | |
2438 } | |
2439 } | |
2440 | |
304 | 2441 void Assembler::sahf() { |
2442 #ifdef _LP64 | |
2443 // Not supported in 64bit mode | |
2444 ShouldNotReachHere(); | |
2445 #endif | |
2446 emit_byte(0x9E); | |
2447 } | |
2448 | |
2449 void Assembler::sarl(Register dst, int imm8) { | |
2450 int encode = prefix_and_encode(dst->encoding()); | |
2451 assert(isShiftCount(imm8), "illegal shift count"); | |
2452 if (imm8 == 1) { | |
2453 emit_byte(0xD1); | |
2454 emit_byte(0xF8 | encode); | |
2455 } else { | |
2456 emit_byte(0xC1); | |
2457 emit_byte(0xF8 | encode); | |
2458 emit_byte(imm8); | |
2459 } | |
2460 } | |
2461 | |
2462 void Assembler::sarl(Register dst) { | |
2463 int encode = prefix_and_encode(dst->encoding()); | |
2464 emit_byte(0xD3); | |
2465 emit_byte(0xF8 | encode); | |
2466 } | |
2467 | |
2468 void Assembler::sbbl(Address dst, int32_t imm32) { | |
2469 InstructionMark im(this); | |
2470 prefix(dst); | |
2471 emit_arith_operand(0x81, rbx, dst, imm32); | |
2472 } | |
2473 | |
2474 void Assembler::sbbl(Register dst, int32_t imm32) { | |
2475 prefix(dst); | |
2476 emit_arith(0x81, 0xD8, dst, imm32); | |
2477 } | |
2478 | |
2479 | |
2480 void Assembler::sbbl(Register dst, Address src) { | |
2481 InstructionMark im(this); | |
2482 prefix(src, dst); | |
2483 emit_byte(0x1B); | |
2484 emit_operand(dst, src); | |
2485 } | |
2486 | |
2487 void Assembler::sbbl(Register dst, Register src) { | |
2488 (void) prefix_and_encode(dst->encoding(), src->encoding()); | |
2489 emit_arith(0x1B, 0xC0, dst, src); | |
2490 } | |
2491 | |
2492 void Assembler::setb(Condition cc, Register dst) { | |
2493 assert(0 <= cc && cc < 16, "illegal cc"); | |
2494 int encode = prefix_and_encode(dst->encoding(), true); | |
0 | 2495 emit_byte(0x0F); |
304 | 2496 emit_byte(0x90 | cc); |
2497 emit_byte(0xC0 | encode); | |
2498 } | |
2499 | |
2500 void Assembler::shll(Register dst, int imm8) { | |
2501 assert(isShiftCount(imm8), "illegal shift count"); | |
2502 int encode = prefix_and_encode(dst->encoding()); | |
2503 if (imm8 == 1 ) { | |
2504 emit_byte(0xD1); | |
2505 emit_byte(0xE0 | encode); | |
2506 } else { | |
2507 emit_byte(0xC1); | |
2508 emit_byte(0xE0 | encode); | |
2509 emit_byte(imm8); | |
2510 } | |
2511 } | |
2512 | |
2513 void Assembler::shll(Register dst) { | |
2514 int encode = prefix_and_encode(dst->encoding()); | |
2515 emit_byte(0xD3); | |
2516 emit_byte(0xE0 | encode); | |
2517 } | |
2518 | |
2519 void Assembler::shrl(Register dst, int imm8) { | |
2520 assert(isShiftCount(imm8), "illegal shift count"); | |
2521 int encode = prefix_and_encode(dst->encoding()); | |
2522 emit_byte(0xC1); | |
2523 emit_byte(0xE8 | encode); | |
2524 emit_byte(imm8); | |
2525 } | |
2526 | |
2527 void Assembler::shrl(Register dst) { | |
2528 int encode = prefix_and_encode(dst->encoding()); | |
2529 emit_byte(0xD3); | |
2530 emit_byte(0xE8 | encode); | |
2531 } | |
0 | 2532 |
2533 // copies a single word from [esi] to [edi] | |
2534 void Assembler::smovl() { | |
2535 emit_byte(0xA5); | |
2536 } | |
2537 | |
304 | 2538 void Assembler::sqrtsd(XMMRegister dst, XMMRegister src) { |
2539 // HMM Table D-1 says sse2 | |
2540 // NOT_LP64(assert(VM_Version::supports_sse(), "")); | |
2541 NOT_LP64(assert(VM_Version::supports_sse2(), "")); | |
2542 emit_byte(0xF2); | |
2543 int encode = prefix_and_encode(dst->encoding(), src->encoding()); | |
2544 emit_byte(0x0F); | |
2545 emit_byte(0x51); | |
2546 emit_byte(0xC0 | encode); | |
2547 } | |
2548 | |
2549 void Assembler::stmxcsr( Address dst) { | |
2550 NOT_LP64(assert(VM_Version::supports_sse(), "")); | |
2551 InstructionMark im(this); | |
2552 prefix(dst); | |
2553 emit_byte(0x0F); | |
2554 emit_byte(0xAE); | |
2555 emit_operand(as_Register(3), dst); | |
2556 } | |
2557 | |
2558 void Assembler::subl(Address dst, int32_t imm32) { | |
2559 InstructionMark im(this); | |
2560 prefix(dst); | |
2561 if (is8bit(imm32)) { | |
2562 emit_byte(0x83); | |
2563 emit_operand(rbp, dst, 1); | |
2564 emit_byte(imm32 & 0xFF); | |
2565 } else { | |
2566 emit_byte(0x81); | |
2567 emit_operand(rbp, dst, 4); | |
2568 emit_long(imm32); | |
2569 } | |
2570 } | |
2571 | |
2572 void Assembler::subl(Register dst, int32_t imm32) { | |
2573 prefix(dst); | |
2574 emit_arith(0x81, 0xE8, dst, imm32); | |
2575 } | |
2576 | |
2577 void Assembler::subl(Address dst, Register src) { | |
2578 InstructionMark im(this); | |
2579 prefix(dst, src); | |
2580 emit_byte(0x29); | |
2581 emit_operand(src, dst); | |
2582 } | |
2583 | |
2584 void Assembler::subl(Register dst, Address src) { | |
2585 InstructionMark im(this); | |
2586 prefix(src, dst); | |
2587 emit_byte(0x2B); | |
2588 emit_operand(dst, src); | |
2589 } | |
2590 | |
2591 void Assembler::subl(Register dst, Register src) { | |
2592 (void) prefix_and_encode(dst->encoding(), src->encoding()); | |
2593 emit_arith(0x2B, 0xC0, dst, src); | |
2594 } | |
2595 | |
2596 void Assembler::subsd(XMMRegister dst, XMMRegister src) { | |
2597 NOT_LP64(assert(VM_Version::supports_sse2(), "")); | |
2598 emit_byte(0xF2); | |
2599 int encode = prefix_and_encode(dst->encoding(), src->encoding()); | |
2600 emit_byte(0x0F); | |
2601 emit_byte(0x5C); | |
2602 emit_byte(0xC0 | encode); | |
2603 } | |
2604 | |
2605 void Assembler::subsd(XMMRegister dst, Address src) { | |
2606 NOT_LP64(assert(VM_Version::supports_sse2(), "")); | |
2607 InstructionMark im(this); | |
2608 emit_byte(0xF2); | |
2609 prefix(src, dst); | |
2610 emit_byte(0x0F); | |
2611 emit_byte(0x5C); | |
2612 emit_operand(dst, src); | |
2613 } | |
2614 | |
2615 void Assembler::subss(XMMRegister dst, XMMRegister src) { | |
2616 NOT_LP64(assert(VM_Version::supports_sse(), "")); | |
0 | 2617 emit_byte(0xF3); |
304 | 2618 int encode = prefix_and_encode(dst->encoding(), src->encoding()); |
2619 emit_byte(0x0F); | |
2620 emit_byte(0x5C); | |
2621 emit_byte(0xC0 | encode); | |
2622 } | |
2623 | |
2624 void Assembler::subss(XMMRegister dst, Address src) { | |
2625 NOT_LP64(assert(VM_Version::supports_sse(), "")); | |
2626 InstructionMark im(this); | |
2627 emit_byte(0xF3); | |
2628 prefix(src, dst); | |
2629 emit_byte(0x0F); | |
2630 emit_byte(0x5C); | |
2631 emit_operand(dst, src); | |
2632 } | |
2633 | |
2634 void Assembler::testb(Register dst, int imm8) { | |
2635 NOT_LP64(assert(dst->has_byte_register(), "must have byte register")); | |
2636 (void) prefix_and_encode(dst->encoding(), true); | |
2637 emit_arith_b(0xF6, 0xC0, dst, imm8); | |
2638 } | |
2639 | |
2640 void Assembler::testl(Register dst, int32_t imm32) { | |
2641 // not using emit_arith because test | |
2642 // doesn't support sign-extension of | |
2643 // 8bit operands | |
2644 int encode = dst->encoding(); | |
2645 if (encode == 0) { | |
2646 emit_byte(0xA9); | |
2647 } else { | |
2648 encode = prefix_and_encode(encode); | |
2649 emit_byte(0xF7); | |
2650 emit_byte(0xC0 | encode); | |
2651 } | |
2652 emit_long(imm32); | |
2653 } | |
2654 | |
2655 void Assembler::testl(Register dst, Register src) { | |
2656 (void) prefix_and_encode(dst->encoding(), src->encoding()); | |
2657 emit_arith(0x85, 0xC0, dst, src); | |
2658 } | |
2659 | |
2660 void Assembler::testl(Register dst, Address src) { | |
2661 InstructionMark im(this); | |
2662 prefix(src, dst); | |
2663 emit_byte(0x85); | |
2664 emit_operand(dst, src); | |
2665 } | |
2666 | |
2667 void Assembler::ucomisd(XMMRegister dst, Address src) { | |
2668 NOT_LP64(assert(VM_Version::supports_sse2(), "")); | |
2669 emit_byte(0x66); | |
2670 ucomiss(dst, src); | |
2671 } | |
2672 | |
2673 void Assembler::ucomisd(XMMRegister dst, XMMRegister src) { | |
2674 NOT_LP64(assert(VM_Version::supports_sse2(), "")); | |
2675 emit_byte(0x66); | |
2676 ucomiss(dst, src); | |
2677 } | |
2678 | |
2679 void Assembler::ucomiss(XMMRegister dst, Address src) { | |
2680 NOT_LP64(assert(VM_Version::supports_sse(), "")); | |
2681 | |
2682 InstructionMark im(this); | |
2683 prefix(src, dst); | |
2684 emit_byte(0x0F); | |
2685 emit_byte(0x2E); | |
2686 emit_operand(dst, src); | |
2687 } | |
2688 | |
2689 void Assembler::ucomiss(XMMRegister dst, XMMRegister src) { | |
2690 NOT_LP64(assert(VM_Version::supports_sse(), "")); | |
2691 int encode = prefix_and_encode(dst->encoding(), src->encoding()); | |
2692 emit_byte(0x0F); | |
2693 emit_byte(0x2E); | |
2694 emit_byte(0xC0 | encode); | |
2695 } | |
2696 | |
2697 | |
2698 void Assembler::xaddl(Address dst, Register src) { | |
2699 InstructionMark im(this); | |
2700 prefix(dst, src); | |
0 | 2701 emit_byte(0x0F); |
304 | 2702 emit_byte(0xC1); |
2703 emit_operand(src, dst); | |
2704 } | |
2705 | |
2706 void Assembler::xchgl(Register dst, Address src) { // xchg | |
2707 InstructionMark im(this); | |
2708 prefix(src, dst); | |
2709 emit_byte(0x87); | |
2710 emit_operand(dst, src); | |
2711 } | |
2712 | |
2713 void Assembler::xchgl(Register dst, Register src) { | |
2714 int encode = prefix_and_encode(dst->encoding(), src->encoding()); | |
2715 emit_byte(0x87); | |
2716 emit_byte(0xc0 | encode); | |
2717 } | |
2718 | |
2719 void Assembler::xorl(Register dst, int32_t imm32) { | |
2720 prefix(dst); | |
2721 emit_arith(0x81, 0xF0, dst, imm32); | |
2722 } | |
2723 | |
2724 void Assembler::xorl(Register dst, Address src) { | |
2725 InstructionMark im(this); | |
2726 prefix(src, dst); | |
2727 emit_byte(0x33); | |
2728 emit_operand(dst, src); | |
2729 } | |
2730 | |
2731 void Assembler::xorl(Register dst, Register src) { | |
2732 (void) prefix_and_encode(dst->encoding(), src->encoding()); | |
2733 emit_arith(0x33, 0xC0, dst, src); | |
2734 } | |
2735 | |
2736 void Assembler::xorpd(XMMRegister dst, XMMRegister src) { | |
2737 NOT_LP64(assert(VM_Version::supports_sse2(), "")); | |
2738 emit_byte(0x66); | |
2739 xorps(dst, src); | |
2740 } | |
2741 | |
2742 void Assembler::xorpd(XMMRegister dst, Address src) { | |
2743 NOT_LP64(assert(VM_Version::supports_sse2(), "")); | |
2744 InstructionMark im(this); | |
2745 emit_byte(0x66); | |
2746 prefix(src, dst); | |
2747 emit_byte(0x0F); | |
2748 emit_byte(0x57); | |
2749 emit_operand(dst, src); | |
2750 } | |
2751 | |
2752 | |
2753 void Assembler::xorps(XMMRegister dst, XMMRegister src) { | |
2754 NOT_LP64(assert(VM_Version::supports_sse(), "")); | |
2755 int encode = prefix_and_encode(dst->encoding(), src->encoding()); | |
2756 emit_byte(0x0F); | |
2757 emit_byte(0x57); | |
2758 emit_byte(0xC0 | encode); | |
2759 } | |
2760 | |
2761 void Assembler::xorps(XMMRegister dst, Address src) { | |
2762 NOT_LP64(assert(VM_Version::supports_sse(), "")); | |
2763 InstructionMark im(this); | |
2764 prefix(src, dst); | |
2765 emit_byte(0x0F); | |
2766 emit_byte(0x57); | |
2767 emit_operand(dst, src); | |
2768 } | |
2769 | |
2770 #ifndef _LP64 | |
2771 // 32bit only pieces of the assembler | |
2772 | |
2773 void Assembler::cmp_literal32(Register src1, int32_t imm32, RelocationHolder const& rspec) { | |
2774 // NO PREFIX AS NEVER 64BIT | |
2775 InstructionMark im(this); | |
2776 emit_byte(0x81); | |
2777 emit_byte(0xF8 | src1->encoding()); | |
2778 emit_data(imm32, rspec, 0); | |
2779 } | |
2780 | |
2781 void Assembler::cmp_literal32(Address src1, int32_t imm32, RelocationHolder const& rspec) { | |
2782 // NO PREFIX AS NEVER 64BIT (not even 32bit versions of 64bit regs | |
2783 InstructionMark im(this); | |
2784 emit_byte(0x81); | |
2785 emit_operand(rdi, src1); | |
2786 emit_data(imm32, rspec, 0); | |
2787 } | |
2788 | |
2789 // The 64-bit (32bit platform) cmpxchg compares the value at adr with the contents of rdx:rax, | |
2790 // and stores rcx:rbx into adr if so; otherwise, the value at adr is loaded | |
2791 // into rdx:rax. The ZF is set if the compared values were equal, and cleared otherwise. | |
2792 void Assembler::cmpxchg8(Address adr) { | |
2793 InstructionMark im(this); | |
2794 emit_byte(0x0F); | |
2795 emit_byte(0xc7); | |
2796 emit_operand(rcx, adr); | |
2797 } | |
2798 | |
2799 void Assembler::decl(Register dst) { | |
2800 // Don't use it directly. Use MacroAssembler::decrementl() instead. | |
2801 emit_byte(0x48 | dst->encoding()); | |
2802 } | |
2803 | |
2804 #endif // _LP64 | |
2805 | |
2806 // 64bit typically doesn't use the x87 but needs to for the trig funcs | |
2807 | |
2808 void Assembler::fabs() { | |
2809 emit_byte(0xD9); | |
2810 emit_byte(0xE1); | |
2811 } | |
2812 | |
2813 void Assembler::fadd(int i) { | |
2814 emit_farith(0xD8, 0xC0, i); | |
2815 } | |
2816 | |
2817 void Assembler::fadd_d(Address src) { | |
2818 InstructionMark im(this); | |
2819 emit_byte(0xDC); | |
2820 emit_operand32(rax, src); | |
2821 } | |
2822 | |
2823 void Assembler::fadd_s(Address src) { | |
2824 InstructionMark im(this); | |
2825 emit_byte(0xD8); | |
2826 emit_operand32(rax, src); | |
2827 } | |
2828 | |
2829 void Assembler::fadda(int i) { | |
2830 emit_farith(0xDC, 0xC0, i); | |
2831 } | |
2832 | |
2833 void Assembler::faddp(int i) { | |
2834 emit_farith(0xDE, 0xC0, i); | |
2835 } | |
2836 | |
2837 void Assembler::fchs() { | |
2838 emit_byte(0xD9); | |
2839 emit_byte(0xE0); | |
2840 } | |
2841 | |
2842 void Assembler::fcom(int i) { | |
2843 emit_farith(0xD8, 0xD0, i); | |
2844 } | |
2845 | |
2846 void Assembler::fcomp(int i) { | |
2847 emit_farith(0xD8, 0xD8, i); | |
2848 } | |
2849 | |
2850 void Assembler::fcomp_d(Address src) { | |
2851 InstructionMark im(this); | |
2852 emit_byte(0xDC); | |
2853 emit_operand32(rbx, src); | |
2854 } | |
2855 | |
2856 void Assembler::fcomp_s(Address src) { | |
2857 InstructionMark im(this); | |
2858 emit_byte(0xD8); | |
2859 emit_operand32(rbx, src); | |
2860 } | |
2861 | |
2862 void Assembler::fcompp() { | |
2863 emit_byte(0xDE); | |
2864 emit_byte(0xD9); | |
2865 } | |
2866 | |
2867 void Assembler::fcos() { | |
2868 emit_byte(0xD9); | |
0 | 2869 emit_byte(0xFF); |
304 | 2870 } |
2871 | |
2872 void Assembler::fdecstp() { | |
2873 emit_byte(0xD9); | |
2874 emit_byte(0xF6); | |
2875 } | |
2876 | |
2877 void Assembler::fdiv(int i) { | |
2878 emit_farith(0xD8, 0xF0, i); | |
2879 } | |
2880 | |
2881 void Assembler::fdiv_d(Address src) { | |
2882 InstructionMark im(this); | |
2883 emit_byte(0xDC); | |
2884 emit_operand32(rsi, src); | |
2885 } | |
2886 | |
2887 void Assembler::fdiv_s(Address src) { | |
2888 InstructionMark im(this); | |
2889 emit_byte(0xD8); | |
2890 emit_operand32(rsi, src); | |
2891 } | |
2892 | |
2893 void Assembler::fdiva(int i) { | |
2894 emit_farith(0xDC, 0xF8, i); | |
2895 } | |
2896 | |
2897 // Note: The Intel manual (Pentium Processor User's Manual, Vol.3, 1994) | |
2898 // is erroneous for some of the floating-point instructions below. | |
2899 | |
2900 void Assembler::fdivp(int i) { | |
2901 emit_farith(0xDE, 0xF8, i); // ST(0) <- ST(0) / ST(1) and pop (Intel manual wrong) | |
2902 } | |
2903 | |
2904 void Assembler::fdivr(int i) { | |
2905 emit_farith(0xD8, 0xF8, i); | |
2906 } | |
2907 | |
2908 void Assembler::fdivr_d(Address src) { | |
2909 InstructionMark im(this); | |
2910 emit_byte(0xDC); | |
2911 emit_operand32(rdi, src); | |
2912 } | |
2913 | |
2914 void Assembler::fdivr_s(Address src) { | |
2915 InstructionMark im(this); | |
2916 emit_byte(0xD8); | |
2917 emit_operand32(rdi, src); | |
2918 } | |
2919 | |
2920 void Assembler::fdivra(int i) { | |
2921 emit_farith(0xDC, 0xF0, i); | |
2922 } | |
2923 | |
2924 void Assembler::fdivrp(int i) { | |
2925 emit_farith(0xDE, 0xF0, i); // ST(0) <- ST(1) / ST(0) and pop (Intel manual wrong) | |
2926 } | |
2927 | |
2928 void Assembler::ffree(int i) { | |
2929 emit_farith(0xDD, 0xC0, i); | |
2930 } | |
2931 | |
2932 void Assembler::fild_d(Address adr) { | |
2933 InstructionMark im(this); | |
2934 emit_byte(0xDF); | |
2935 emit_operand32(rbp, adr); | |
2936 } | |
2937 | |
2938 void Assembler::fild_s(Address adr) { | |
2939 InstructionMark im(this); | |
2940 emit_byte(0xDB); | |
2941 emit_operand32(rax, adr); | |
2942 } | |
2943 | |
2944 void Assembler::fincstp() { | |
2945 emit_byte(0xD9); | |
2946 emit_byte(0xF7); | |
2947 } | |
2948 | |
2949 void Assembler::finit() { | |
2950 emit_byte(0x9B); | |
2951 emit_byte(0xDB); | |
2952 emit_byte(0xE3); | |
2953 } | |
2954 | |
2955 void Assembler::fist_s(Address adr) { | |
2956 InstructionMark im(this); | |
2957 emit_byte(0xDB); | |
2958 emit_operand32(rdx, adr); | |
2959 } | |
2960 | |
2961 void Assembler::fistp_d(Address adr) { | |
2962 InstructionMark im(this); | |
2963 emit_byte(0xDF); | |
2964 emit_operand32(rdi, adr); | |
2965 } | |
2966 | |
2967 void Assembler::fistp_s(Address adr) { | |
2968 InstructionMark im(this); | |
2969 emit_byte(0xDB); | |
2970 emit_operand32(rbx, adr); | |
2971 } | |
0 | 2972 |
2973 void Assembler::fld1() { | |
2974 emit_byte(0xD9); | |
2975 emit_byte(0xE8); | |
2976 } | |
2977 | |
304 | 2978 void Assembler::fld_d(Address adr) { |
2979 InstructionMark im(this); | |
2980 emit_byte(0xDD); | |
2981 emit_operand32(rax, adr); | |
2982 } | |
0 | 2983 |
2984 void Assembler::fld_s(Address adr) { | |
2985 InstructionMark im(this); | |
2986 emit_byte(0xD9); | |
304 | 2987 emit_operand32(rax, adr); |
2988 } | |
2989 | |
2990 | |
2991 void Assembler::fld_s(int index) { | |
0 | 2992 emit_farith(0xD9, 0xC0, index); |
2993 } | |
2994 | |
2995 void Assembler::fld_x(Address adr) { | |
2996 InstructionMark im(this); | |
2997 emit_byte(0xDB); | |
304 | 2998 emit_operand32(rbp, adr); |
2999 } | |
3000 | |
3001 void Assembler::fldcw(Address src) { | |
3002 InstructionMark im(this); | |
3003 emit_byte(0xd9); | |
3004 emit_operand32(rbp, src); | |
3005 } | |
3006 | |
3007 void Assembler::fldenv(Address src) { | |
0 | 3008 InstructionMark im(this); |
3009 emit_byte(0xD9); | |
304 | 3010 emit_operand32(rsp, src); |
3011 } | |
3012 | |
3013 void Assembler::fldlg2() { | |
0 | 3014 emit_byte(0xD9); |
304 | 3015 emit_byte(0xEC); |
3016 } | |
0 | 3017 |
3018 void Assembler::fldln2() { | |
3019 emit_byte(0xD9); | |
3020 emit_byte(0xED); | |
3021 } | |
3022 | |
304 | 3023 void Assembler::fldz() { |
0 | 3024 emit_byte(0xD9); |
304 | 3025 emit_byte(0xEE); |
3026 } | |
0 | 3027 |
3028 void Assembler::flog() { | |
3029 fldln2(); | |
3030 fxch(); | |
3031 fyl2x(); | |
3032 } | |
3033 | |
3034 void Assembler::flog10() { | |
3035 fldlg2(); | |
3036 fxch(); | |
3037 fyl2x(); | |
3038 } | |
3039 | |
304 | 3040 void Assembler::fmul(int i) { |
3041 emit_farith(0xD8, 0xC8, i); | |
3042 } | |
3043 | |
3044 void Assembler::fmul_d(Address src) { | |
3045 InstructionMark im(this); | |
3046 emit_byte(0xDC); | |
3047 emit_operand32(rcx, src); | |
3048 } | |
3049 | |
3050 void Assembler::fmul_s(Address src) { | |
3051 InstructionMark im(this); | |
3052 emit_byte(0xD8); | |
3053 emit_operand32(rcx, src); | |
3054 } | |
3055 | |
3056 void Assembler::fmula(int i) { | |
3057 emit_farith(0xDC, 0xC8, i); | |
3058 } | |
3059 | |
3060 void Assembler::fmulp(int i) { | |
3061 emit_farith(0xDE, 0xC8, i); | |
3062 } | |
3063 | |
3064 void Assembler::fnsave(Address dst) { | |
3065 InstructionMark im(this); | |
3066 emit_byte(0xDD); | |
3067 emit_operand32(rsi, dst); | |
3068 } | |
3069 | |
3070 void Assembler::fnstcw(Address src) { | |
3071 InstructionMark im(this); | |
3072 emit_byte(0x9B); | |
3073 emit_byte(0xD9); | |
3074 emit_operand32(rdi, src); | |
3075 } | |
3076 | |
3077 void Assembler::fnstsw_ax() { | |
3078 emit_byte(0xdF); | |
3079 emit_byte(0xE0); | |
3080 } | |
3081 | |
3082 void Assembler::fprem() { | |
3083 emit_byte(0xD9); | |
3084 emit_byte(0xF8); | |
3085 } | |
3086 | |
3087 void Assembler::fprem1() { | |
3088 emit_byte(0xD9); | |
3089 emit_byte(0xF5); | |
3090 } | |
3091 | |
3092 void Assembler::frstor(Address src) { | |
3093 InstructionMark im(this); | |
3094 emit_byte(0xDD); | |
3095 emit_operand32(rsp, src); | |
3096 } | |
0 | 3097 |
3098 void Assembler::fsin() { | |
3099 emit_byte(0xD9); | |
3100 emit_byte(0xFE); | |
3101 } | |
3102 | |
304 | 3103 void Assembler::fsqrt() { |
3104 emit_byte(0xD9); | |
3105 emit_byte(0xFA); | |
3106 } | |
3107 | |
3108 void Assembler::fst_d(Address adr) { | |
3109 InstructionMark im(this); | |
3110 emit_byte(0xDD); | |
3111 emit_operand32(rdx, adr); | |
3112 } | |
3113 | |
3114 void Assembler::fst_s(Address adr) { | |
3115 InstructionMark im(this); | |
3116 emit_byte(0xD9); | |
3117 emit_operand32(rdx, adr); | |
3118 } | |
3119 | |
3120 void Assembler::fstp_d(Address adr) { | |
3121 InstructionMark im(this); | |
3122 emit_byte(0xDD); | |
3123 emit_operand32(rbx, adr); | |
3124 } | |
3125 | |
3126 void Assembler::fstp_d(int index) { | |
3127 emit_farith(0xDD, 0xD8, index); | |
3128 } | |
3129 | |
3130 void Assembler::fstp_s(Address adr) { | |
3131 InstructionMark im(this); | |
0 | 3132 emit_byte(0xD9); |
304 | 3133 emit_operand32(rbx, adr); |
3134 } | |
3135 | |
3136 void Assembler::fstp_x(Address adr) { | |
3137 InstructionMark im(this); | |
3138 emit_byte(0xDB); | |
3139 emit_operand32(rdi, adr); | |
3140 } | |
3141 | |
3142 void Assembler::fsub(int i) { | |
3143 emit_farith(0xD8, 0xE0, i); | |
3144 } | |
3145 | |
3146 void Assembler::fsub_d(Address src) { | |
3147 InstructionMark im(this); | |
3148 emit_byte(0xDC); | |
3149 emit_operand32(rsp, src); | |
3150 } | |
3151 | |
3152 void Assembler::fsub_s(Address src) { | |
3153 InstructionMark im(this); | |
3154 emit_byte(0xD8); | |
3155 emit_operand32(rsp, src); | |
3156 } | |
3157 | |
3158 void Assembler::fsuba(int i) { | |
3159 emit_farith(0xDC, 0xE8, i); | |
3160 } | |
3161 | |
3162 void Assembler::fsubp(int i) { | |
3163 emit_farith(0xDE, 0xE8, i); // ST(0) <- ST(0) - ST(1) and pop (Intel manual wrong) | |
3164 } | |
3165 | |
3166 void Assembler::fsubr(int i) { | |
3167 emit_farith(0xD8, 0xE8, i); | |
3168 } | |
3169 | |
3170 void Assembler::fsubr_d(Address src) { | |
3171 InstructionMark im(this); | |
3172 emit_byte(0xDC); | |
3173 emit_operand32(rbp, src); | |
3174 } | |
3175 | |
3176 void Assembler::fsubr_s(Address src) { | |
3177 InstructionMark im(this); | |
3178 emit_byte(0xD8); | |
3179 emit_operand32(rbp, src); | |
3180 } | |
3181 | |
3182 void Assembler::fsubra(int i) { | |
3183 emit_farith(0xDC, 0xE0, i); | |
3184 } | |
3185 | |
3186 void Assembler::fsubrp(int i) { | |
3187 emit_farith(0xDE, 0xE0, i); // ST(0) <- ST(1) - ST(0) and pop (Intel manual wrong) | |
0 | 3188 } |
3189 | |
3190 void Assembler::ftan() { | |
3191 emit_byte(0xD9); | |
3192 emit_byte(0xF2); | |
3193 emit_byte(0xDD); | |
3194 emit_byte(0xD8); | |
3195 } | |
3196 | |
304 | 3197 void Assembler::ftst() { |
0 | 3198 emit_byte(0xD9); |
304 | 3199 emit_byte(0xE4); |
3200 } | |
0 | 3201 |
3202 void Assembler::fucomi(int i) { | |
3203 // make sure the instruction is supported (introduced for P6, together with cmov) | |
3204 guarantee(VM_Version::supports_cmov(), "illegal instruction"); | |
3205 emit_farith(0xDB, 0xE8, i); | |
3206 } | |
3207 | |
3208 void Assembler::fucomip(int i) { | |
3209 // make sure the instruction is supported (introduced for P6, together with cmov) | |
3210 guarantee(VM_Version::supports_cmov(), "illegal instruction"); | |
3211 emit_farith(0xDF, 0xE8, i); | |
3212 } | |
3213 | |
3214 void Assembler::fwait() { | |
3215 emit_byte(0x9B); | |
3216 } | |
3217 | |
304 | 3218 void Assembler::fxch(int i) { |
3219 emit_farith(0xD9, 0xC8, i); | |
3220 } | |
3221 | |
3222 void Assembler::fyl2x() { | |
0 | 3223 emit_byte(0xD9); |
304 | 3224 emit_byte(0xF1); |
3225 } | |
3226 | |
3227 | |
3228 #ifndef _LP64 | |
3229 | |
3230 void Assembler::incl(Register dst) { | |
3231 // Don't use it directly. Use MacroAssembler::incrementl() instead. | |
3232 emit_byte(0x40 | dst->encoding()); | |
3233 } | |
3234 | |
3235 void Assembler::lea(Register dst, Address src) { | |
3236 leal(dst, src); | |
3237 } | |
3238 | |
3239 void Assembler::mov_literal32(Address dst, int32_t imm32, RelocationHolder const& rspec) { | |
3240 InstructionMark im(this); | |
3241 emit_byte(0xC7); | |
3242 emit_operand(rax, dst); | |
3243 emit_data((int)imm32, rspec, 0); | |
3244 } | |
3245 | |
642
660978a2a31a
6791178: Specialize for zero as the compressed oop vm heap base
kvn
parents:
624
diff
changeset
|
3246 void Assembler::mov_literal32(Register dst, int32_t imm32, RelocationHolder const& rspec) { |
660978a2a31a
6791178: Specialize for zero as the compressed oop vm heap base
kvn
parents:
624
diff
changeset
|
3247 InstructionMark im(this); |
660978a2a31a
6791178: Specialize for zero as the compressed oop vm heap base
kvn
parents:
624
diff
changeset
|
3248 int encode = prefix_and_encode(dst->encoding()); |
660978a2a31a
6791178: Specialize for zero as the compressed oop vm heap base
kvn
parents:
624
diff
changeset
|
3249 emit_byte(0xB8 | encode); |
660978a2a31a
6791178: Specialize for zero as the compressed oop vm heap base
kvn
parents:
624
diff
changeset
|
3250 emit_data((int)imm32, rspec, 0); |
660978a2a31a
6791178: Specialize for zero as the compressed oop vm heap base
kvn
parents:
624
diff
changeset
|
3251 } |
304 | 3252 |
3253 void Assembler::popa() { // 32bit | |
3254 emit_byte(0x61); | |
3255 } | |
3256 | |
3257 void Assembler::push_literal32(int32_t imm32, RelocationHolder const& rspec) { | |
3258 InstructionMark im(this); | |
3259 emit_byte(0x68); | |
3260 emit_data(imm32, rspec, 0); | |
3261 } | |
3262 | |
3263 void Assembler::pusha() { // 32bit | |
3264 emit_byte(0x60); | |
3265 } | |
3266 | |
3267 void Assembler::set_byte_if_not_zero(Register dst) { | |
0 | 3268 emit_byte(0x0F); |
304 | 3269 emit_byte(0x95); |
3270 emit_byte(0xE0 | dst->encoding()); | |
3271 } | |
3272 | |
3273 void Assembler::shldl(Register dst, Register src) { | |
0 | 3274 emit_byte(0x0F); |
304 | 3275 emit_byte(0xA5); |
3276 emit_byte(0xC0 | src->encoding() << 3 | dst->encoding()); | |
3277 } | |
3278 | |
3279 void Assembler::shrdl(Register dst, Register src) { | |
0 | 3280 emit_byte(0x0F); |
304 | 3281 emit_byte(0xAD); |
3282 emit_byte(0xC0 | src->encoding() << 3 | dst->encoding()); | |
3283 } | |
3284 | |
3285 #else // LP64 | |
3286 | |
3287 // 64bit only pieces of the assembler | |
3288 // This should only be used by 64bit instructions that can use rip-relative | |
3289 // it cannot be used by instructions that want an immediate value. | |
3290 | |
3291 bool Assembler::reachable(AddressLiteral adr) { | |
3292 int64_t disp; | |
3293 // None will force a 64bit literal to the code stream. Likely a placeholder | |
3294 // for something that will be patched later and we need to certain it will | |
3295 // always be reachable. | |
3296 if (adr.reloc() == relocInfo::none) { | |
3297 return false; | |
3298 } | |
3299 if (adr.reloc() == relocInfo::internal_word_type) { | |
3300 // This should be rip relative and easily reachable. | |
3301 return true; | |
3302 } | |
3303 if (adr.reloc() == relocInfo::virtual_call_type || | |
3304 adr.reloc() == relocInfo::opt_virtual_call_type || | |
3305 adr.reloc() == relocInfo::static_call_type || | |
3306 adr.reloc() == relocInfo::static_stub_type ) { | |
3307 // This should be rip relative within the code cache and easily | |
3308 // reachable until we get huge code caches. (At which point | |
3309 // ic code is going to have issues). | |
3310 return true; | |
3311 } | |
3312 if (adr.reloc() != relocInfo::external_word_type && | |
3313 adr.reloc() != relocInfo::poll_return_type && // these are really external_word but need special | |
3314 adr.reloc() != relocInfo::poll_type && // relocs to identify them | |
3315 adr.reloc() != relocInfo::runtime_call_type ) { | |
3316 return false; | |
3317 } | |
3318 | |
3319 // Stress the correction code | |
3320 if (ForceUnreachable) { | |
3321 // Must be runtimecall reloc, see if it is in the codecache | |
3322 // Flipping stuff in the codecache to be unreachable causes issues | |
3323 // with things like inline caches where the additional instructions | |
3324 // are not handled. | |
3325 if (CodeCache::find_blob(adr._target) == NULL) { | |
3326 return false; | |
3327 } | |
3328 } | |
3329 // For external_word_type/runtime_call_type if it is reachable from where we | |
3330 // are now (possibly a temp buffer) and where we might end up | |
3331 // anywhere in the codeCache then we are always reachable. | |
3332 // This would have to change if we ever save/restore shared code | |
3333 // to be more pessimistic. | |
3334 | |
3335 disp = (int64_t)adr._target - ((int64_t)CodeCache::low_bound() + sizeof(int)); | |
3336 if (!is_simm32(disp)) return false; | |
3337 disp = (int64_t)adr._target - ((int64_t)CodeCache::high_bound() + sizeof(int)); | |
3338 if (!is_simm32(disp)) return false; | |
3339 | |
3340 disp = (int64_t)adr._target - ((int64_t)_code_pos + sizeof(int)); | |
3341 | |
3342 // Because rip relative is a disp + address_of_next_instruction and we | |
3343 // don't know the value of address_of_next_instruction we apply a fudge factor | |
3344 // to make sure we will be ok no matter the size of the instruction we get placed into. | |
3345 // We don't have to fudge the checks above here because they are already worst case. | |
3346 | |
3347 // 12 == override/rex byte, opcode byte, rm byte, sib byte, a 4-byte disp , 4-byte literal | |
3348 // + 4 because better safe than sorry. | |
3349 const int fudge = 12 + 4; | |
3350 if (disp < 0) { | |
3351 disp -= fudge; | |
3352 } else { | |
3353 disp += fudge; | |
3354 } | |
3355 return is_simm32(disp); | |
3356 } | |
3357 | |
3358 void Assembler::emit_data64(jlong data, | |
3359 relocInfo::relocType rtype, | |
3360 int format) { | |
3361 if (rtype == relocInfo::none) { | |
3362 emit_long64(data); | |
3363 } else { | |
3364 emit_data64(data, Relocation::spec_simple(rtype), format); | |
3365 } | |
3366 } | |
3367 | |
3368 void Assembler::emit_data64(jlong data, | |
3369 RelocationHolder const& rspec, | |
3370 int format) { | |
3371 assert(imm_operand == 0, "default format must be immediate in this file"); | |
3372 assert(imm_operand == format, "must be immediate"); | |
3373 assert(inst_mark() != NULL, "must be inside InstructionMark"); | |
3374 // Do not use AbstractAssembler::relocate, which is not intended for | |
3375 // embedded words. Instead, relocate to the enclosing instruction. | |
3376 code_section()->relocate(inst_mark(), rspec, format); | |
3377 #ifdef ASSERT | |
3378 check_relocation(rspec, format); | |
3379 #endif | |
3380 emit_long64(data); | |
3381 } | |
3382 | |
3383 int Assembler::prefix_and_encode(int reg_enc, bool byteinst) { | |
3384 if (reg_enc >= 8) { | |
3385 prefix(REX_B); | |
3386 reg_enc -= 8; | |
3387 } else if (byteinst && reg_enc >= 4) { | |
3388 prefix(REX); | |
3389 } | |
3390 return reg_enc; | |
3391 } | |
3392 | |
3393 int Assembler::prefixq_and_encode(int reg_enc) { | |
3394 if (reg_enc < 8) { | |
3395 prefix(REX_W); | |
3396 } else { | |
3397 prefix(REX_WB); | |
3398 reg_enc -= 8; | |
3399 } | |
3400 return reg_enc; | |
3401 } | |
3402 | |
3403 int Assembler::prefix_and_encode(int dst_enc, int src_enc, bool byteinst) { | |
3404 if (dst_enc < 8) { | |
3405 if (src_enc >= 8) { | |
3406 prefix(REX_B); | |
3407 src_enc -= 8; | |
3408 } else if (byteinst && src_enc >= 4) { | |
3409 prefix(REX); | |
3410 } | |
3411 } else { | |
3412 if (src_enc < 8) { | |
3413 prefix(REX_R); | |
3414 } else { | |
3415 prefix(REX_RB); | |
3416 src_enc -= 8; | |
3417 } | |
3418 dst_enc -= 8; | |
3419 } | |
3420 return dst_enc << 3 | src_enc; | |
3421 } | |
3422 | |
3423 int Assembler::prefixq_and_encode(int dst_enc, int src_enc) { | |
3424 if (dst_enc < 8) { | |
3425 if (src_enc < 8) { | |
3426 prefix(REX_W); | |
3427 } else { | |
3428 prefix(REX_WB); | |
3429 src_enc -= 8; | |
3430 } | |
3431 } else { | |
3432 if (src_enc < 8) { | |
3433 prefix(REX_WR); | |
3434 } else { | |
3435 prefix(REX_WRB); | |
3436 src_enc -= 8; | |
3437 } | |
3438 dst_enc -= 8; | |
3439 } | |
3440 return dst_enc << 3 | src_enc; | |
3441 } | |
3442 | |
3443 void Assembler::prefix(Register reg) { | |
3444 if (reg->encoding() >= 8) { | |
3445 prefix(REX_B); | |
3446 } | |
3447 } | |
3448 | |
3449 void Assembler::prefix(Address adr) { | |
3450 if (adr.base_needs_rex()) { | |
3451 if (adr.index_needs_rex()) { | |
3452 prefix(REX_XB); | |
3453 } else { | |
3454 prefix(REX_B); | |
3455 } | |
3456 } else { | |
3457 if (adr.index_needs_rex()) { | |
3458 prefix(REX_X); | |
3459 } | |
3460 } | |
3461 } | |
3462 | |
3463 void Assembler::prefixq(Address adr) { | |
3464 if (adr.base_needs_rex()) { | |
3465 if (adr.index_needs_rex()) { | |
3466 prefix(REX_WXB); | |
3467 } else { | |
3468 prefix(REX_WB); | |
3469 } | |
3470 } else { | |
3471 if (adr.index_needs_rex()) { | |
3472 prefix(REX_WX); | |
3473 } else { | |
3474 prefix(REX_W); | |
3475 } | |
3476 } | |
3477 } | |
3478 | |
3479 | |
3480 void Assembler::prefix(Address adr, Register reg, bool byteinst) { | |
3481 if (reg->encoding() < 8) { | |
3482 if (adr.base_needs_rex()) { | |
3483 if (adr.index_needs_rex()) { | |
3484 prefix(REX_XB); | |
3485 } else { | |
3486 prefix(REX_B); | |
3487 } | |
3488 } else { | |
3489 if (adr.index_needs_rex()) { | |
3490 prefix(REX_X); | |
3491 } else if (reg->encoding() >= 4 ) { | |
3492 prefix(REX); | |
3493 } | |
3494 } | |
3495 } else { | |
3496 if (adr.base_needs_rex()) { | |
3497 if (adr.index_needs_rex()) { | |
3498 prefix(REX_RXB); | |
3499 } else { | |
3500 prefix(REX_RB); | |
3501 } | |
3502 } else { | |
3503 if (adr.index_needs_rex()) { | |
3504 prefix(REX_RX); | |
3505 } else { | |
3506 prefix(REX_R); | |
3507 } | |
3508 } | |
3509 } | |
3510 } | |
3511 | |
3512 void Assembler::prefixq(Address adr, Register src) { | |
3513 if (src->encoding() < 8) { | |
3514 if (adr.base_needs_rex()) { | |
3515 if (adr.index_needs_rex()) { | |
3516 prefix(REX_WXB); | |
3517 } else { | |
3518 prefix(REX_WB); | |
3519 } | |
3520 } else { | |
3521 if (adr.index_needs_rex()) { | |
3522 prefix(REX_WX); | |
3523 } else { | |
3524 prefix(REX_W); | |
3525 } | |
3526 } | |
3527 } else { | |
3528 if (adr.base_needs_rex()) { | |
3529 if (adr.index_needs_rex()) { | |
3530 prefix(REX_WRXB); | |
3531 } else { | |
3532 prefix(REX_WRB); | |
3533 } | |
3534 } else { | |
3535 if (adr.index_needs_rex()) { | |
3536 prefix(REX_WRX); | |
3537 } else { | |
3538 prefix(REX_WR); | |
3539 } | |
3540 } | |
3541 } | |
3542 } | |
3543 | |
3544 void Assembler::prefix(Address adr, XMMRegister reg) { | |
3545 if (reg->encoding() < 8) { | |
3546 if (adr.base_needs_rex()) { | |
3547 if (adr.index_needs_rex()) { | |
3548 prefix(REX_XB); | |
3549 } else { | |
3550 prefix(REX_B); | |
3551 } | |
3552 } else { | |
3553 if (adr.index_needs_rex()) { | |
3554 prefix(REX_X); | |
3555 } | |
3556 } | |
3557 } else { | |
3558 if (adr.base_needs_rex()) { | |
3559 if (adr.index_needs_rex()) { | |
3560 prefix(REX_RXB); | |
3561 } else { | |
3562 prefix(REX_RB); | |
3563 } | |
3564 } else { | |
3565 if (adr.index_needs_rex()) { | |
3566 prefix(REX_RX); | |
3567 } else { | |
3568 prefix(REX_R); | |
3569 } | |
3570 } | |
3571 } | |
3572 } | |
3573 | |
3574 void Assembler::adcq(Register dst, int32_t imm32) { | |
3575 (void) prefixq_and_encode(dst->encoding()); | |
3576 emit_arith(0x81, 0xD0, dst, imm32); | |
3577 } | |
3578 | |
3579 void Assembler::adcq(Register dst, Address src) { | |
3580 InstructionMark im(this); | |
3581 prefixq(src, dst); | |
3582 emit_byte(0x13); | |
3583 emit_operand(dst, src); | |
3584 } | |
3585 | |
3586 void Assembler::adcq(Register dst, Register src) { | |
3587 (int) prefixq_and_encode(dst->encoding(), src->encoding()); | |
3588 emit_arith(0x13, 0xC0, dst, src); | |
3589 } | |
3590 | |
3591 void Assembler::addq(Address dst, int32_t imm32) { | |
3592 InstructionMark im(this); | |
3593 prefixq(dst); | |
3594 emit_arith_operand(0x81, rax, dst,imm32); | |
3595 } | |
3596 | |
3597 void Assembler::addq(Address dst, Register src) { | |
3598 InstructionMark im(this); | |
3599 prefixq(dst, src); | |
3600 emit_byte(0x01); | |
3601 emit_operand(src, dst); | |
3602 } | |
3603 | |
3604 void Assembler::addq(Register dst, int32_t imm32) { | |
3605 (void) prefixq_and_encode(dst->encoding()); | |
3606 emit_arith(0x81, 0xC0, dst, imm32); | |
3607 } | |
3608 | |
3609 void Assembler::addq(Register dst, Address src) { | |
3610 InstructionMark im(this); | |
3611 prefixq(src, dst); | |
3612 emit_byte(0x03); | |
3613 emit_operand(dst, src); | |
3614 } | |
3615 | |
3616 void Assembler::addq(Register dst, Register src) { | |
3617 (void) prefixq_and_encode(dst->encoding(), src->encoding()); | |
3618 emit_arith(0x03, 0xC0, dst, src); | |
3619 } | |
3620 | |
3621 void Assembler::andq(Register dst, int32_t imm32) { | |
3622 (void) prefixq_and_encode(dst->encoding()); | |
3623 emit_arith(0x81, 0xE0, dst, imm32); | |
3624 } | |
3625 | |
3626 void Assembler::andq(Register dst, Address src) { | |
3627 InstructionMark im(this); | |
3628 prefixq(src, dst); | |
3629 emit_byte(0x23); | |
3630 emit_operand(dst, src); | |
3631 } | |
3632 | |
3633 void Assembler::andq(Register dst, Register src) { | |
3634 (int) prefixq_and_encode(dst->encoding(), src->encoding()); | |
3635 emit_arith(0x23, 0xC0, dst, src); | |
3636 } | |
3637 | |
3638 void Assembler::bswapq(Register reg) { | |
3639 int encode = prefixq_and_encode(reg->encoding()); | |
3640 emit_byte(0x0F); | |
3641 emit_byte(0xC8 | encode); | |
3642 } | |
3643 | |
3644 void Assembler::cdqq() { | |
3645 prefix(REX_W); | |
3646 emit_byte(0x99); | |
3647 } | |
3648 | |
3649 void Assembler::clflush(Address adr) { | |
3650 prefix(adr); | |
3651 emit_byte(0x0F); | |
3652 emit_byte(0xAE); | |
3653 emit_operand(rdi, adr); | |
3654 } | |
3655 | |
3656 void Assembler::cmovq(Condition cc, Register dst, Register src) { | |
3657 int encode = prefixq_and_encode(dst->encoding(), src->encoding()); | |
3658 emit_byte(0x0F); | |
3659 emit_byte(0x40 | cc); | |
3660 emit_byte(0xC0 | encode); | |
3661 } | |
3662 | |
3663 void Assembler::cmovq(Condition cc, Register dst, Address src) { | |
3664 InstructionMark im(this); | |
3665 prefixq(src, dst); | |
3666 emit_byte(0x0F); | |
3667 emit_byte(0x40 | cc); | |
3668 emit_operand(dst, src); | |
3669 } | |
3670 | |
3671 void Assembler::cmpq(Address dst, int32_t imm32) { | |
3672 InstructionMark im(this); | |
3673 prefixq(dst); | |
3674 emit_byte(0x81); | |
3675 emit_operand(rdi, dst, 4); | |
3676 emit_long(imm32); | |
3677 } | |
3678 | |
3679 void Assembler::cmpq(Register dst, int32_t imm32) { | |
3680 (void) prefixq_and_encode(dst->encoding()); | |
3681 emit_arith(0x81, 0xF8, dst, imm32); | |
3682 } | |
3683 | |
3684 void Assembler::cmpq(Address dst, Register src) { | |
3685 InstructionMark im(this); | |
3686 prefixq(dst, src); | |
3687 emit_byte(0x3B); | |
3688 emit_operand(src, dst); | |
3689 } | |
3690 | |
3691 void Assembler::cmpq(Register dst, Register src) { | |
3692 (void) prefixq_and_encode(dst->encoding(), src->encoding()); | |
3693 emit_arith(0x3B, 0xC0, dst, src); | |
3694 } | |
3695 | |
3696 void Assembler::cmpq(Register dst, Address src) { | |
3697 InstructionMark im(this); | |
3698 prefixq(src, dst); | |
3699 emit_byte(0x3B); | |
3700 emit_operand(dst, src); | |
3701 } | |
3702 | |
3703 void Assembler::cmpxchgq(Register reg, Address adr) { | |
3704 InstructionMark im(this); | |
3705 prefixq(adr, reg); | |
3706 emit_byte(0x0F); | |
3707 emit_byte(0xB1); | |
3708 emit_operand(reg, adr); | |
3709 } | |
3710 | |
3711 void Assembler::cvtsi2sdq(XMMRegister dst, Register src) { | |
3712 NOT_LP64(assert(VM_Version::supports_sse2(), "")); | |
3713 emit_byte(0xF2); | |
3714 int encode = prefixq_and_encode(dst->encoding(), src->encoding()); | |
3715 emit_byte(0x0F); | |
3716 emit_byte(0x2A); | |
3717 emit_byte(0xC0 | encode); | |
3718 } | |
3719 | |
3720 void Assembler::cvtsi2ssq(XMMRegister dst, Register src) { | |
3721 NOT_LP64(assert(VM_Version::supports_sse(), "")); | |
3722 emit_byte(0xF3); | |
3723 int encode = prefixq_and_encode(dst->encoding(), src->encoding()); | |
3724 emit_byte(0x0F); | |
3725 emit_byte(0x2A); | |
3726 emit_byte(0xC0 | encode); | |
3727 } | |
3728 | |
3729 void Assembler::cvttsd2siq(Register dst, XMMRegister src) { | |
3730 NOT_LP64(assert(VM_Version::supports_sse2(), "")); | |
3731 emit_byte(0xF2); | |
3732 int encode = prefixq_and_encode(dst->encoding(), src->encoding()); | |
3733 emit_byte(0x0F); | |
3734 emit_byte(0x2C); | |
3735 emit_byte(0xC0 | encode); | |
3736 } | |
3737 | |
3738 void Assembler::cvttss2siq(Register dst, XMMRegister src) { | |
3739 NOT_LP64(assert(VM_Version::supports_sse(), "")); | |
3740 emit_byte(0xF3); | |
3741 int encode = prefixq_and_encode(dst->encoding(), src->encoding()); | |
3742 emit_byte(0x0F); | |
3743 emit_byte(0x2C); | |
3744 emit_byte(0xC0 | encode); | |
3745 } | |
3746 | |
3747 void Assembler::decl(Register dst) { | |
3748 // Don't use it directly. Use MacroAssembler::decrementl() instead. | |
3749 // Use two-byte form (one-byte form is a REX prefix in 64-bit mode) | |
3750 int encode = prefix_and_encode(dst->encoding()); | |
3751 emit_byte(0xFF); | |
3752 emit_byte(0xC8 | encode); | |
3753 } | |
3754 | |
3755 void Assembler::decq(Register dst) { | |
3756 // Don't use it directly. Use MacroAssembler::decrementq() instead. | |
3757 // Use two-byte form (one-byte from is a REX prefix in 64-bit mode) | |
3758 int encode = prefixq_and_encode(dst->encoding()); | |
3759 emit_byte(0xFF); | |
3760 emit_byte(0xC8 | encode); | |
3761 } | |
3762 | |
3763 void Assembler::decq(Address dst) { | |
3764 // Don't use it directly. Use MacroAssembler::decrementq() instead. | |
3765 InstructionMark im(this); | |
3766 prefixq(dst); | |
3767 emit_byte(0xFF); | |
3768 emit_operand(rcx, dst); | |
3769 } | |
3770 | |
3771 void Assembler::fxrstor(Address src) { | |
3772 prefixq(src); | |
3773 emit_byte(0x0F); | |
3774 emit_byte(0xAE); | |
3775 emit_operand(as_Register(1), src); | |
3776 } | |
3777 | |
3778 void Assembler::fxsave(Address dst) { | |
3779 prefixq(dst); | |
3780 emit_byte(0x0F); | |
3781 emit_byte(0xAE); | |
3782 emit_operand(as_Register(0), dst); | |
3783 } | |
3784 | |
3785 void Assembler::idivq(Register src) { | |
3786 int encode = prefixq_and_encode(src->encoding()); | |
3787 emit_byte(0xF7); | |
3788 emit_byte(0xF8 | encode); | |
3789 } | |
3790 | |
3791 void Assembler::imulq(Register dst, Register src) { | |
3792 int encode = prefixq_and_encode(dst->encoding(), src->encoding()); | |
3793 emit_byte(0x0F); | |
3794 emit_byte(0xAF); | |
3795 emit_byte(0xC0 | encode); | |
3796 } | |
3797 | |
3798 void Assembler::imulq(Register dst, Register src, int value) { | |
3799 int encode = prefixq_and_encode(dst->encoding(), src->encoding()); | |
3800 if (is8bit(value)) { | |
3801 emit_byte(0x6B); | |
3802 emit_byte(0xC0 | encode); | |
3803 emit_byte(value); | |
3804 } else { | |
3805 emit_byte(0x69); | |
3806 emit_byte(0xC0 | encode); | |
3807 emit_long(value); | |
3808 } | |
3809 } | |
3810 | |
3811 void Assembler::incl(Register dst) { | |
3812 // Don't use it directly. Use MacroAssembler::incrementl() instead. | |
3813 // Use two-byte form (one-byte from is a REX prefix in 64-bit mode) | |
3814 int encode = prefix_and_encode(dst->encoding()); | |
3815 emit_byte(0xFF); | |
3816 emit_byte(0xC0 | encode); | |
3817 } | |
3818 | |
3819 void Assembler::incq(Register dst) { | |
3820 // Don't use it directly. Use MacroAssembler::incrementq() instead. | |
3821 // Use two-byte form (one-byte from is a REX prefix in 64-bit mode) | |
3822 int encode = prefixq_and_encode(dst->encoding()); | |
3823 emit_byte(0xFF); | |
3824 emit_byte(0xC0 | encode); | |
3825 } | |
3826 | |
3827 void Assembler::incq(Address dst) { | |
3828 // Don't use it directly. Use MacroAssembler::incrementq() instead. | |
3829 InstructionMark im(this); | |
3830 prefixq(dst); | |
3831 emit_byte(0xFF); | |
3832 emit_operand(rax, dst); | |
3833 } | |
3834 | |
3835 void Assembler::lea(Register dst, Address src) { | |
3836 leaq(dst, src); | |
3837 } | |
3838 | |
3839 void Assembler::leaq(Register dst, Address src) { | |
3840 InstructionMark im(this); | |
3841 prefixq(src, dst); | |
3842 emit_byte(0x8D); | |
3843 emit_operand(dst, src); | |
3844 } | |
3845 | |
3846 void Assembler::mov64(Register dst, int64_t imm64) { | |
3847 InstructionMark im(this); | |
3848 int encode = prefixq_and_encode(dst->encoding()); | |
3849 emit_byte(0xB8 | encode); | |
3850 emit_long64(imm64); | |
3851 } | |
3852 | |
3853 void Assembler::mov_literal64(Register dst, intptr_t imm64, RelocationHolder const& rspec) { | |
3854 InstructionMark im(this); | |
3855 int encode = prefixq_and_encode(dst->encoding()); | |
3856 emit_byte(0xB8 | encode); | |
3857 emit_data64(imm64, rspec); | |
3858 } | |
3859 | |
642
660978a2a31a
6791178: Specialize for zero as the compressed oop vm heap base
kvn
parents:
624
diff
changeset
|
3860 void Assembler::mov_narrow_oop(Register dst, int32_t imm32, RelocationHolder const& rspec) { |
660978a2a31a
6791178: Specialize for zero as the compressed oop vm heap base
kvn
parents:
624
diff
changeset
|
3861 InstructionMark im(this); |
660978a2a31a
6791178: Specialize for zero as the compressed oop vm heap base
kvn
parents:
624
diff
changeset
|
3862 int encode = prefix_and_encode(dst->encoding()); |
660978a2a31a
6791178: Specialize for zero as the compressed oop vm heap base
kvn
parents:
624
diff
changeset
|
3863 emit_byte(0xB8 | encode); |
660978a2a31a
6791178: Specialize for zero as the compressed oop vm heap base
kvn
parents:
624
diff
changeset
|
3864 emit_data((int)imm32, rspec, narrow_oop_operand); |
660978a2a31a
6791178: Specialize for zero as the compressed oop vm heap base
kvn
parents:
624
diff
changeset
|
3865 } |
660978a2a31a
6791178: Specialize for zero as the compressed oop vm heap base
kvn
parents:
624
diff
changeset
|
3866 |
660978a2a31a
6791178: Specialize for zero as the compressed oop vm heap base
kvn
parents:
624
diff
changeset
|
3867 void Assembler::mov_narrow_oop(Address dst, int32_t imm32, RelocationHolder const& rspec) { |
660978a2a31a
6791178: Specialize for zero as the compressed oop vm heap base
kvn
parents:
624
diff
changeset
|
3868 InstructionMark im(this); |
660978a2a31a
6791178: Specialize for zero as the compressed oop vm heap base
kvn
parents:
624
diff
changeset
|
3869 prefix(dst); |
660978a2a31a
6791178: Specialize for zero as the compressed oop vm heap base
kvn
parents:
624
diff
changeset
|
3870 emit_byte(0xC7); |
660978a2a31a
6791178: Specialize for zero as the compressed oop vm heap base
kvn
parents:
624
diff
changeset
|
3871 emit_operand(rax, dst, 4); |
660978a2a31a
6791178: Specialize for zero as the compressed oop vm heap base
kvn
parents:
624
diff
changeset
|
3872 emit_data((int)imm32, rspec, narrow_oop_operand); |
660978a2a31a
6791178: Specialize for zero as the compressed oop vm heap base
kvn
parents:
624
diff
changeset
|
3873 } |
660978a2a31a
6791178: Specialize for zero as the compressed oop vm heap base
kvn
parents:
624
diff
changeset
|
3874 |
660978a2a31a
6791178: Specialize for zero as the compressed oop vm heap base
kvn
parents:
624
diff
changeset
|
3875 void Assembler::cmp_narrow_oop(Register src1, int32_t imm32, RelocationHolder const& rspec) { |
660978a2a31a
6791178: Specialize for zero as the compressed oop vm heap base
kvn
parents:
624
diff
changeset
|
3876 InstructionMark im(this); |
660978a2a31a
6791178: Specialize for zero as the compressed oop vm heap base
kvn
parents:
624
diff
changeset
|
3877 int encode = prefix_and_encode(src1->encoding()); |
660978a2a31a
6791178: Specialize for zero as the compressed oop vm heap base
kvn
parents:
624
diff
changeset
|
3878 emit_byte(0x81); |
660978a2a31a
6791178: Specialize for zero as the compressed oop vm heap base
kvn
parents:
624
diff
changeset
|
3879 emit_byte(0xF8 | encode); |
660978a2a31a
6791178: Specialize for zero as the compressed oop vm heap base
kvn
parents:
624
diff
changeset
|
3880 emit_data((int)imm32, rspec, narrow_oop_operand); |
660978a2a31a
6791178: Specialize for zero as the compressed oop vm heap base
kvn
parents:
624
diff
changeset
|
3881 } |
660978a2a31a
6791178: Specialize for zero as the compressed oop vm heap base
kvn
parents:
624
diff
changeset
|
3882 |
660978a2a31a
6791178: Specialize for zero as the compressed oop vm heap base
kvn
parents:
624
diff
changeset
|
3883 void Assembler::cmp_narrow_oop(Address src1, int32_t imm32, RelocationHolder const& rspec) { |
660978a2a31a
6791178: Specialize for zero as the compressed oop vm heap base
kvn
parents:
624
diff
changeset
|
3884 InstructionMark im(this); |
660978a2a31a
6791178: Specialize for zero as the compressed oop vm heap base
kvn
parents:
624
diff
changeset
|
3885 prefix(src1); |
660978a2a31a
6791178: Specialize for zero as the compressed oop vm heap base
kvn
parents:
624
diff
changeset
|
3886 emit_byte(0x81); |
660978a2a31a
6791178: Specialize for zero as the compressed oop vm heap base
kvn
parents:
624
diff
changeset
|
3887 emit_operand(rax, src1, 4); |
660978a2a31a
6791178: Specialize for zero as the compressed oop vm heap base
kvn
parents:
624
diff
changeset
|
3888 emit_data((int)imm32, rspec, narrow_oop_operand); |
660978a2a31a
6791178: Specialize for zero as the compressed oop vm heap base
kvn
parents:
624
diff
changeset
|
3889 } |
660978a2a31a
6791178: Specialize for zero as the compressed oop vm heap base
kvn
parents:
624
diff
changeset
|
3890 |
304 | 3891 void Assembler::movdq(XMMRegister dst, Register src) { |
3892 // table D-1 says MMX/SSE2 | |
3893 NOT_LP64(assert(VM_Version::supports_sse2() || VM_Version::supports_mmx(), "")); | |
0 | 3894 emit_byte(0x66); |
304 | 3895 int encode = prefixq_and_encode(dst->encoding(), src->encoding()); |
0 | 3896 emit_byte(0x0F); |
304 | 3897 emit_byte(0x6E); |
3898 emit_byte(0xC0 | encode); | |
3899 } | |
3900 | |
3901 void Assembler::movdq(Register dst, XMMRegister src) { | |
3902 // table D-1 says MMX/SSE2 | |
3903 NOT_LP64(assert(VM_Version::supports_sse2() || VM_Version::supports_mmx(), "")); | |
0 | 3904 emit_byte(0x66); |
304 | 3905 // swap src/dst to get correct prefix |
3906 int encode = prefixq_and_encode(src->encoding(), dst->encoding()); | |
0 | 3907 emit_byte(0x0F); |
3908 emit_byte(0x7E); | |
304 | 3909 emit_byte(0xC0 | encode); |
3910 } | |
3911 | |
3912 void Assembler::movq(Register dst, Register src) { | |
3913 int encode = prefixq_and_encode(dst->encoding(), src->encoding()); | |
3914 emit_byte(0x8B); | |
3915 emit_byte(0xC0 | encode); | |
3916 } | |
3917 | |
3918 void Assembler::movq(Register dst, Address src) { | |
3919 InstructionMark im(this); | |
3920 prefixq(src, dst); | |
3921 emit_byte(0x8B); | |
3922 emit_operand(dst, src); | |
3923 } | |
3924 | |
3925 void Assembler::movq(Address dst, Register src) { | |
3926 InstructionMark im(this); | |
3927 prefixq(dst, src); | |
3928 emit_byte(0x89); | |
3929 emit_operand(src, dst); | |
3930 } | |
3931 | |
624 | 3932 void Assembler::movsbq(Register dst, Address src) { |
3933 InstructionMark im(this); | |
3934 prefixq(src, dst); | |
3935 emit_byte(0x0F); | |
3936 emit_byte(0xBE); | |
3937 emit_operand(dst, src); | |
3938 } | |
3939 | |
3940 void Assembler::movsbq(Register dst, Register src) { | |
3941 int encode = prefixq_and_encode(dst->encoding(), src->encoding()); | |
3942 emit_byte(0x0F); | |
3943 emit_byte(0xBE); | |
3944 emit_byte(0xC0 | encode); | |
3945 } | |
3946 | |
304 | 3947 void Assembler::movslq(Register dst, int32_t imm32) { |
3948 // dbx shows movslq(rcx, 3) as movq $0x0000000049000000,(%rbx) | |
3949 // and movslq(r8, 3); as movl $0x0000000048000000,(%rbx) | |
3950 // as a result we shouldn't use until tested at runtime... | |
3951 ShouldNotReachHere(); | |
3952 InstructionMark im(this); | |
3953 int encode = prefixq_and_encode(dst->encoding()); | |
3954 emit_byte(0xC7 | encode); | |
3955 emit_long(imm32); | |
3956 } | |
3957 | |
3958 void Assembler::movslq(Address dst, int32_t imm32) { | |
3959 assert(is_simm32(imm32), "lost bits"); | |
3960 InstructionMark im(this); | |
3961 prefixq(dst); | |
3962 emit_byte(0xC7); | |
3963 emit_operand(rax, dst, 4); | |
3964 emit_long(imm32); | |
3965 } | |
3966 | |
3967 void Assembler::movslq(Register dst, Address src) { | |
3968 InstructionMark im(this); | |
3969 prefixq(src, dst); | |
3970 emit_byte(0x63); | |
3971 emit_operand(dst, src); | |
3972 } | |
3973 | |
3974 void Assembler::movslq(Register dst, Register src) { | |
3975 int encode = prefixq_and_encode(dst->encoding(), src->encoding()); | |
3976 emit_byte(0x63); | |
3977 emit_byte(0xC0 | encode); | |
3978 } | |
3979 | |
624 | 3980 void Assembler::movswq(Register dst, Address src) { |
3981 InstructionMark im(this); | |
3982 prefixq(src, dst); | |
3983 emit_byte(0x0F); | |
3984 emit_byte(0xBF); | |
3985 emit_operand(dst, src); | |
3986 } | |
3987 | |
3988 void Assembler::movswq(Register dst, Register src) { | |
3989 int encode = prefixq_and_encode(dst->encoding(), src->encoding()); | |
3990 emit_byte(0x0F); | |
3991 emit_byte(0xBF); | |
3992 emit_byte(0xC0 | encode); | |
3993 } | |
3994 | |
3995 void Assembler::movzbq(Register dst, Address src) { | |
3996 InstructionMark im(this); | |
3997 prefixq(src, dst); | |
3998 emit_byte(0x0F); | |
3999 emit_byte(0xB6); | |
4000 emit_operand(dst, src); | |
4001 } | |
4002 | |
4003 void Assembler::movzbq(Register dst, Register src) { | |
4004 int encode = prefixq_and_encode(dst->encoding(), src->encoding()); | |
4005 emit_byte(0x0F); | |
4006 emit_byte(0xB6); | |
4007 emit_byte(0xC0 | encode); | |
4008 } | |
4009 | |
4010 void Assembler::movzwq(Register dst, Address src) { | |
4011 InstructionMark im(this); | |
4012 prefixq(src, dst); | |
4013 emit_byte(0x0F); | |
4014 emit_byte(0xB7); | |
4015 emit_operand(dst, src); | |
4016 } | |
4017 | |
4018 void Assembler::movzwq(Register dst, Register src) { | |
4019 int encode = prefixq_and_encode(dst->encoding(), src->encoding()); | |
4020 emit_byte(0x0F); | |
4021 emit_byte(0xB7); | |
4022 emit_byte(0xC0 | encode); | |
4023 } | |
4024 | |
304 | 4025 void Assembler::negq(Register dst) { |
4026 int encode = prefixq_and_encode(dst->encoding()); | |
4027 emit_byte(0xF7); | |
4028 emit_byte(0xD8 | encode); | |
4029 } | |
4030 | |
4031 void Assembler::notq(Register dst) { | |
4032 int encode = prefixq_and_encode(dst->encoding()); | |
4033 emit_byte(0xF7); | |
4034 emit_byte(0xD0 | encode); | |
4035 } | |
4036 | |
4037 void Assembler::orq(Address dst, int32_t imm32) { | |
4038 InstructionMark im(this); | |
4039 prefixq(dst); | |
4040 emit_byte(0x81); | |
4041 emit_operand(rcx, dst, 4); | |
4042 emit_long(imm32); | |
4043 } | |
4044 | |
4045 void Assembler::orq(Register dst, int32_t imm32) { | |
4046 (void) prefixq_and_encode(dst->encoding()); | |
4047 emit_arith(0x81, 0xC8, dst, imm32); | |
4048 } | |
4049 | |
4050 void Assembler::orq(Register dst, Address src) { | |
4051 InstructionMark im(this); | |
4052 prefixq(src, dst); | |
4053 emit_byte(0x0B); | |
4054 emit_operand(dst, src); | |
4055 } | |
4056 | |
4057 void Assembler::orq(Register dst, Register src) { | |
4058 (void) prefixq_and_encode(dst->encoding(), src->encoding()); | |
4059 emit_arith(0x0B, 0xC0, dst, src); | |
4060 } | |
4061 | |
4062 void Assembler::popa() { // 64bit | |
4063 movq(r15, Address(rsp, 0)); | |
4064 movq(r14, Address(rsp, wordSize)); | |
4065 movq(r13, Address(rsp, 2 * wordSize)); | |
4066 movq(r12, Address(rsp, 3 * wordSize)); | |
4067 movq(r11, Address(rsp, 4 * wordSize)); | |
4068 movq(r10, Address(rsp, 5 * wordSize)); | |
4069 movq(r9, Address(rsp, 6 * wordSize)); | |
4070 movq(r8, Address(rsp, 7 * wordSize)); | |
4071 movq(rdi, Address(rsp, 8 * wordSize)); | |
4072 movq(rsi, Address(rsp, 9 * wordSize)); | |
4073 movq(rbp, Address(rsp, 10 * wordSize)); | |
4074 // skip rsp | |
4075 movq(rbx, Address(rsp, 12 * wordSize)); | |
4076 movq(rdx, Address(rsp, 13 * wordSize)); | |
4077 movq(rcx, Address(rsp, 14 * wordSize)); | |
4078 movq(rax, Address(rsp, 15 * wordSize)); | |
4079 | |
4080 addq(rsp, 16 * wordSize); | |
4081 } | |
4082 | |
4083 void Assembler::popq(Address dst) { | |
4084 InstructionMark im(this); | |
4085 prefixq(dst); | |
4086 emit_byte(0x8F); | |
4087 emit_operand(rax, dst); | |
4088 } | |
4089 | |
4090 void Assembler::pusha() { // 64bit | |
4091 // we have to store original rsp. ABI says that 128 bytes | |
4092 // below rsp are local scratch. | |
4093 movq(Address(rsp, -5 * wordSize), rsp); | |
4094 | |
4095 subq(rsp, 16 * wordSize); | |
4096 | |
4097 movq(Address(rsp, 15 * wordSize), rax); | |
4098 movq(Address(rsp, 14 * wordSize), rcx); | |
4099 movq(Address(rsp, 13 * wordSize), rdx); | |
4100 movq(Address(rsp, 12 * wordSize), rbx); | |
4101 // skip rsp | |
4102 movq(Address(rsp, 10 * wordSize), rbp); | |
4103 movq(Address(rsp, 9 * wordSize), rsi); | |
4104 movq(Address(rsp, 8 * wordSize), rdi); | |
4105 movq(Address(rsp, 7 * wordSize), r8); | |
4106 movq(Address(rsp, 6 * wordSize), r9); | |
4107 movq(Address(rsp, 5 * wordSize), r10); | |
4108 movq(Address(rsp, 4 * wordSize), r11); | |
4109 movq(Address(rsp, 3 * wordSize), r12); | |
4110 movq(Address(rsp, 2 * wordSize), r13); | |
4111 movq(Address(rsp, wordSize), r14); | |
4112 movq(Address(rsp, 0), r15); | |
4113 } | |
4114 | |
4115 void Assembler::pushq(Address src) { | |
4116 InstructionMark im(this); | |
4117 prefixq(src); | |
4118 emit_byte(0xFF); | |
4119 emit_operand(rsi, src); | |
4120 } | |
4121 | |
4122 void Assembler::rclq(Register dst, int imm8) { | |
4123 assert(isShiftCount(imm8 >> 1), "illegal shift count"); | |
4124 int encode = prefixq_and_encode(dst->encoding()); | |
4125 if (imm8 == 1) { | |
4126 emit_byte(0xD1); | |
4127 emit_byte(0xD0 | encode); | |
4128 } else { | |
4129 emit_byte(0xC1); | |
4130 emit_byte(0xD0 | encode); | |
4131 emit_byte(imm8); | |
4132 } | |
4133 } | |
4134 void Assembler::sarq(Register dst, int imm8) { | |
4135 assert(isShiftCount(imm8 >> 1), "illegal shift count"); | |
4136 int encode = prefixq_and_encode(dst->encoding()); | |
4137 if (imm8 == 1) { | |
4138 emit_byte(0xD1); | |
4139 emit_byte(0xF8 | encode); | |
4140 } else { | |
4141 emit_byte(0xC1); | |
4142 emit_byte(0xF8 | encode); | |
4143 emit_byte(imm8); | |
4144 } | |
4145 } | |
4146 | |
4147 void Assembler::sarq(Register dst) { | |
4148 int encode = prefixq_and_encode(dst->encoding()); | |
4149 emit_byte(0xD3); | |
4150 emit_byte(0xF8 | encode); | |
4151 } | |
4152 void Assembler::sbbq(Address dst, int32_t imm32) { | |
4153 InstructionMark im(this); | |
4154 prefixq(dst); | |
4155 emit_arith_operand(0x81, rbx, dst, imm32); | |
4156 } | |
4157 | |
4158 void Assembler::sbbq(Register dst, int32_t imm32) { | |
4159 (void) prefixq_and_encode(dst->encoding()); | |
4160 emit_arith(0x81, 0xD8, dst, imm32); | |
4161 } | |
4162 | |
4163 void Assembler::sbbq(Register dst, Address src) { | |
4164 InstructionMark im(this); | |
4165 prefixq(src, dst); | |
4166 emit_byte(0x1B); | |
4167 emit_operand(dst, src); | |
4168 } | |
4169 | |
4170 void Assembler::sbbq(Register dst, Register src) { | |
4171 (void) prefixq_and_encode(dst->encoding(), src->encoding()); | |
4172 emit_arith(0x1B, 0xC0, dst, src); | |
4173 } | |
4174 | |
4175 void Assembler::shlq(Register dst, int imm8) { | |
4176 assert(isShiftCount(imm8 >> 1), "illegal shift count"); | |
4177 int encode = prefixq_and_encode(dst->encoding()); | |
4178 if (imm8 == 1) { | |
4179 emit_byte(0xD1); | |
4180 emit_byte(0xE0 | encode); | |
4181 } else { | |
4182 emit_byte(0xC1); | |
4183 emit_byte(0xE0 | encode); | |
4184 emit_byte(imm8); | |
4185 } | |
4186 } | |
4187 | |
4188 void Assembler::shlq(Register dst) { | |
4189 int encode = prefixq_and_encode(dst->encoding()); | |
4190 emit_byte(0xD3); | |
4191 emit_byte(0xE0 | encode); | |
4192 } | |
4193 | |
4194 void Assembler::shrq(Register dst, int imm8) { | |
4195 assert(isShiftCount(imm8 >> 1), "illegal shift count"); | |
4196 int encode = prefixq_and_encode(dst->encoding()); | |
4197 emit_byte(0xC1); | |
4198 emit_byte(0xE8 | encode); | |
4199 emit_byte(imm8); | |
4200 } | |
4201 | |
4202 void Assembler::shrq(Register dst) { | |
4203 int encode = prefixq_and_encode(dst->encoding()); | |
4204 emit_byte(0xD3); | |
4205 emit_byte(0xE8 | encode); | |
4206 } | |
4207 | |
4208 void Assembler::sqrtsd(XMMRegister dst, Address src) { | |
4209 NOT_LP64(assert(VM_Version::supports_sse2(), "")); | |
0 | 4210 InstructionMark im(this); |
4211 emit_byte(0xF2); | |
304 | 4212 prefix(src, dst); |
0 | 4213 emit_byte(0x0F); |
304 | 4214 emit_byte(0x51); |
4215 emit_operand(dst, src); | |
4216 } | |
4217 | |
4218 void Assembler::subq(Address dst, int32_t imm32) { | |
4219 InstructionMark im(this); | |
4220 prefixq(dst); | |
4221 if (is8bit(imm32)) { | |
4222 emit_byte(0x83); | |
4223 emit_operand(rbp, dst, 1); | |
4224 emit_byte(imm32 & 0xFF); | |
4225 } else { | |
4226 emit_byte(0x81); | |
4227 emit_operand(rbp, dst, 4); | |
4228 emit_long(imm32); | |
4229 } | |
4230 } | |
4231 | |
4232 void Assembler::subq(Register dst, int32_t imm32) { | |
4233 (void) prefixq_and_encode(dst->encoding()); | |
4234 emit_arith(0x81, 0xE8, dst, imm32); | |
4235 } | |
4236 | |
4237 void Assembler::subq(Address dst, Register src) { | |
4238 InstructionMark im(this); | |
4239 prefixq(dst, src); | |
4240 emit_byte(0x29); | |
4241 emit_operand(src, dst); | |
4242 } | |
4243 | |
4244 void Assembler::subq(Register dst, Address src) { | |
4245 InstructionMark im(this); | |
4246 prefixq(src, dst); | |
4247 emit_byte(0x2B); | |
4248 emit_operand(dst, src); | |
4249 } | |
4250 | |
4251 void Assembler::subq(Register dst, Register src) { | |
4252 (void) prefixq_and_encode(dst->encoding(), src->encoding()); | |
4253 emit_arith(0x2B, 0xC0, dst, src); | |
4254 } | |
4255 | |
4256 void Assembler::testq(Register dst, int32_t imm32) { | |
4257 // not using emit_arith because test | |
4258 // doesn't support sign-extension of | |
4259 // 8bit operands | |
4260 int encode = dst->encoding(); | |
4261 if (encode == 0) { | |
4262 prefix(REX_W); | |
4263 emit_byte(0xA9); | |
4264 } else { | |
4265 encode = prefixq_and_encode(encode); | |
4266 emit_byte(0xF7); | |
4267 emit_byte(0xC0 | encode); | |
4268 } | |
4269 emit_long(imm32); | |
4270 } | |
4271 | |
4272 void Assembler::testq(Register dst, Register src) { | |
4273 (void) prefixq_and_encode(dst->encoding(), src->encoding()); | |
4274 emit_arith(0x85, 0xC0, dst, src); | |
4275 } | |
4276 | |
4277 void Assembler::xaddq(Address dst, Register src) { | |
4278 InstructionMark im(this); | |
4279 prefixq(dst, src); | |
71 | 4280 emit_byte(0x0F); |
304 | 4281 emit_byte(0xC1); |
4282 emit_operand(src, dst); | |
4283 } | |
4284 | |
4285 void Assembler::xchgq(Register dst, Address src) { | |
4286 InstructionMark im(this); | |
4287 prefixq(src, dst); | |
4288 emit_byte(0x87); | |
4289 emit_operand(dst, src); | |
4290 } | |
4291 | |
4292 void Assembler::xchgq(Register dst, Register src) { | |
4293 int encode = prefixq_and_encode(dst->encoding(), src->encoding()); | |
4294 emit_byte(0x87); | |
4295 emit_byte(0xc0 | encode); | |
4296 } | |
4297 | |
4298 void Assembler::xorq(Register dst, Register src) { | |
4299 (void) prefixq_and_encode(dst->encoding(), src->encoding()); | |
4300 emit_arith(0x33, 0xC0, dst, src); | |
4301 } | |
4302 | |
4303 void Assembler::xorq(Register dst, Address src) { | |
4304 InstructionMark im(this); | |
4305 prefixq(src, dst); | |
4306 emit_byte(0x33); | |
4307 emit_operand(dst, src); | |
4308 } | |
4309 | |
4310 #endif // !LP64 | |
4311 | |
4312 static Assembler::Condition reverse[] = { | |
4313 Assembler::noOverflow /* overflow = 0x0 */ , | |
4314 Assembler::overflow /* noOverflow = 0x1 */ , | |
4315 Assembler::aboveEqual /* carrySet = 0x2, below = 0x2 */ , | |
4316 Assembler::below /* aboveEqual = 0x3, carryClear = 0x3 */ , | |
4317 Assembler::notZero /* zero = 0x4, equal = 0x4 */ , | |
4318 Assembler::zero /* notZero = 0x5, notEqual = 0x5 */ , | |
4319 Assembler::above /* belowEqual = 0x6 */ , | |
4320 Assembler::belowEqual /* above = 0x7 */ , | |
4321 Assembler::positive /* negative = 0x8 */ , | |
4322 Assembler::negative /* positive = 0x9 */ , | |
4323 Assembler::noParity /* parity = 0xa */ , | |
4324 Assembler::parity /* noParity = 0xb */ , | |
4325 Assembler::greaterEqual /* less = 0xc */ , | |
4326 Assembler::less /* greaterEqual = 0xd */ , | |
4327 Assembler::greater /* lessEqual = 0xe */ , | |
4328 Assembler::lessEqual /* greater = 0xf, */ | |
4329 | |
4330 }; | |
4331 | |
0 | 4332 |
4333 // Implementation of MacroAssembler | |
4334 | |
304 | 4335 // First all the versions that have distinct versions depending on 32/64 bit |
4336 // Unless the difference is trivial (1 line or so). | |
4337 | |
4338 #ifndef _LP64 | |
4339 | |
4340 // 32bit versions | |
4341 | |
0 | 4342 Address MacroAssembler::as_Address(AddressLiteral adr) { |
4343 return Address(adr.target(), adr.rspec()); | |
4344 } | |
4345 | |
4346 Address MacroAssembler::as_Address(ArrayAddress adr) { | |
4347 return Address::make_array(adr); | |
4348 } | |
4349 | |
304 | 4350 int MacroAssembler::biased_locking_enter(Register lock_reg, |
4351 Register obj_reg, | |
4352 Register swap_reg, | |
4353 Register tmp_reg, | |
4354 bool swap_reg_contains_mark, | |
4355 Label& done, | |
4356 Label* slow_case, | |
4357 BiasedLockingCounters* counters) { | |
4358 assert(UseBiasedLocking, "why call this otherwise?"); | |
4359 assert(swap_reg == rax, "swap_reg must be rax, for cmpxchg"); | |
4360 assert_different_registers(lock_reg, obj_reg, swap_reg); | |
4361 | |
4362 if (PrintBiasedLockingStatistics && counters == NULL) | |
4363 counters = BiasedLocking::counters(); | |
4364 | |
4365 bool need_tmp_reg = false; | |
4366 if (tmp_reg == noreg) { | |
4367 need_tmp_reg = true; | |
4368 tmp_reg = lock_reg; | |
4369 } else { | |
4370 assert_different_registers(lock_reg, obj_reg, swap_reg, tmp_reg); | |
4371 } | |
4372 assert(markOopDesc::age_shift == markOopDesc::lock_bits + markOopDesc::biased_lock_bits, "biased locking makes assumptions about bit layout"); | |
4373 Address mark_addr (obj_reg, oopDesc::mark_offset_in_bytes()); | |
4374 Address klass_addr (obj_reg, oopDesc::klass_offset_in_bytes()); | |
4375 Address saved_mark_addr(lock_reg, 0); | |
4376 | |
4377 // Biased locking | |
4378 // See whether the lock is currently biased toward our thread and | |
4379 // whether the epoch is still valid | |
4380 // Note that the runtime guarantees sufficient alignment of JavaThread | |
4381 // pointers to allow age to be placed into low bits | |
4382 // First check to see whether biasing is even enabled for this object | |
4383 Label cas_label; | |
4384 int null_check_offset = -1; | |
4385 if (!swap_reg_contains_mark) { | |
4386 null_check_offset = offset(); | |
4387 movl(swap_reg, mark_addr); | |
4388 } | |
4389 if (need_tmp_reg) { | |
4390 push(tmp_reg); | |
4391 } | |
4392 movl(tmp_reg, swap_reg); | |
4393 andl(tmp_reg, markOopDesc::biased_lock_mask_in_place); | |
4394 cmpl(tmp_reg, markOopDesc::biased_lock_pattern); | |
4395 if (need_tmp_reg) { | |
4396 pop(tmp_reg); | |
4397 } | |
4398 jcc(Assembler::notEqual, cas_label); | |
4399 // The bias pattern is present in the object's header. Need to check | |
4400 // whether the bias owner and the epoch are both still current. | |
4401 // Note that because there is no current thread register on x86 we | |
4402 // need to store off the mark word we read out of the object to | |
4403 // avoid reloading it and needing to recheck invariants below. This | |
4404 // store is unfortunate but it makes the overall code shorter and | |
4405 // simpler. | |
4406 movl(saved_mark_addr, swap_reg); | |
4407 if (need_tmp_reg) { | |
4408 push(tmp_reg); | |
4409 } | |
4410 get_thread(tmp_reg); | |
4411 xorl(swap_reg, tmp_reg); | |
4412 if (swap_reg_contains_mark) { | |
4413 null_check_offset = offset(); | |
4414 } | |
4415 movl(tmp_reg, klass_addr); | |
4416 xorl(swap_reg, Address(tmp_reg, Klass::prototype_header_offset_in_bytes() + klassOopDesc::klass_part_offset_in_bytes())); | |
4417 andl(swap_reg, ~((int) markOopDesc::age_mask_in_place)); | |
4418 if (need_tmp_reg) { | |
4419 pop(tmp_reg); | |
4420 } | |
4421 if (counters != NULL) { | |
4422 cond_inc32(Assembler::zero, | |
4423 ExternalAddress((address)counters->biased_lock_entry_count_addr())); | |
4424 } | |
4425 jcc(Assembler::equal, done); | |
4426 | |
4427 Label try_revoke_bias; | |
4428 Label try_rebias; | |
4429 | |
4430 // At this point we know that the header has the bias pattern and | |
4431 // that we are not the bias owner in the current epoch. We need to | |
4432 // figure out more details about the state of the header in order to | |
4433 // know what operations can be legally performed on the object's | |
4434 // header. | |
4435 | |
4436 // If the low three bits in the xor result aren't clear, that means | |
4437 // the prototype header is no longer biased and we have to revoke | |
4438 // the bias on this object. | |
4439 testl(swap_reg, markOopDesc::biased_lock_mask_in_place); | |
4440 jcc(Assembler::notZero, try_revoke_bias); | |
4441 | |
4442 // Biasing is still enabled for this data type. See whether the | |
4443 // epoch of the current bias is still valid, meaning that the epoch | |
4444 // bits of the mark word are equal to the epoch bits of the | |
4445 // prototype header. (Note that the prototype header's epoch bits | |
4446 // only change at a safepoint.) If not, attempt to rebias the object | |
4447 // toward the current thread. Note that we must be absolutely sure | |
4448 // that the current epoch is invalid in order to do this because | |
4449 // otherwise the manipulations it performs on the mark word are | |
4450 // illegal. | |
4451 testl(swap_reg, markOopDesc::epoch_mask_in_place); | |
4452 jcc(Assembler::notZero, try_rebias); | |
4453 | |
4454 // The epoch of the current bias is still valid but we know nothing | |
4455 // about the owner; it might be set or it might be clear. Try to | |
4456 // acquire the bias of the object using an atomic operation. If this | |
4457 // fails we will go in to the runtime to revoke the object's bias. | |
4458 // Note that we first construct the presumed unbiased header so we | |
4459 // don't accidentally blow away another thread's valid bias. | |
4460 movl(swap_reg, saved_mark_addr); | |
4461 andl(swap_reg, | |
4462 markOopDesc::biased_lock_mask_in_place | markOopDesc::age_mask_in_place | markOopDesc::epoch_mask_in_place); | |
4463 if (need_tmp_reg) { | |
4464 push(tmp_reg); | |
4465 } | |
4466 get_thread(tmp_reg); | |
4467 orl(tmp_reg, swap_reg); | |
4468 if (os::is_MP()) { | |
4469 lock(); | |
4470 } | |
4471 cmpxchgptr(tmp_reg, Address(obj_reg, 0)); | |
4472 if (need_tmp_reg) { | |
4473 pop(tmp_reg); | |
4474 } | |
4475 // If the biasing toward our thread failed, this means that | |
4476 // another thread succeeded in biasing it toward itself and we | |
4477 // need to revoke that bias. The revocation will occur in the | |
4478 // interpreter runtime in the slow case. | |
4479 if (counters != NULL) { | |
4480 cond_inc32(Assembler::zero, | |
4481 ExternalAddress((address)counters->anonymously_biased_lock_entry_count_addr())); | |
4482 } | |
4483 if (slow_case != NULL) { | |
4484 jcc(Assembler::notZero, *slow_case); | |
4485 } | |
4486 jmp(done); | |
4487 | |
4488 bind(try_rebias); | |
4489 // At this point we know the epoch has expired, meaning that the | |
4490 // current "bias owner", if any, is actually invalid. Under these | |
4491 // circumstances _only_, we are allowed to use the current header's | |
4492 // value as the comparison value when doing the cas to acquire the | |
4493 // bias in the current epoch. In other words, we allow transfer of | |
4494 // the bias from one thread to another directly in this situation. | |
4495 // | |
4496 // FIXME: due to a lack of registers we currently blow away the age | |
4497 // bits in this situation. Should attempt to preserve them. | |
4498 if (need_tmp_reg) { | |
4499 push(tmp_reg); | |
4500 } | |
4501 get_thread(tmp_reg); | |
4502 movl(swap_reg, klass_addr); | |
4503 orl(tmp_reg, Address(swap_reg, Klass::prototype_header_offset_in_bytes() + klassOopDesc::klass_part_offset_in_bytes())); | |
4504 movl(swap_reg, saved_mark_addr); | |
4505 if (os::is_MP()) { | |
4506 lock(); | |
4507 } | |
4508 cmpxchgptr(tmp_reg, Address(obj_reg, 0)); | |
4509 if (need_tmp_reg) { | |
4510 pop(tmp_reg); | |
4511 } | |
4512 // If the biasing toward our thread failed, then another thread | |
4513 // succeeded in biasing it toward itself and we need to revoke that | |
4514 // bias. The revocation will occur in the runtime in the slow case. | |
4515 if (counters != NULL) { | |
4516 cond_inc32(Assembler::zero, | |
4517 ExternalAddress((address)counters->rebiased_lock_entry_count_addr())); | |
4518 } | |
4519 if (slow_case != NULL) { | |
4520 jcc(Assembler::notZero, *slow_case); | |
4521 } | |
4522 jmp(done); | |
4523 | |
4524 bind(try_revoke_bias); | |
4525 // The prototype mark in the klass doesn't have the bias bit set any | |
4526 // more, indicating that objects of this data type are not supposed | |
4527 // to be biased any more. We are going to try to reset the mark of | |
4528 // this object to the prototype value and fall through to the | |
4529 // CAS-based locking scheme. Note that if our CAS fails, it means | |
4530 // that another thread raced us for the privilege of revoking the | |
4531 // bias of this particular object, so it's okay to continue in the | |
4532 // normal locking code. | |
4533 // | |
4534 // FIXME: due to a lack of registers we currently blow away the age | |
4535 // bits in this situation. Should attempt to preserve them. | |
4536 movl(swap_reg, saved_mark_addr); | |
4537 if (need_tmp_reg) { | |
4538 push(tmp_reg); | |
4539 } | |
4540 movl(tmp_reg, klass_addr); | |
4541 movl(tmp_reg, Address(tmp_reg, Klass::prototype_header_offset_in_bytes() + klassOopDesc::klass_part_offset_in_bytes())); | |
4542 if (os::is_MP()) { | |
4543 lock(); | |
4544 } | |
4545 cmpxchgptr(tmp_reg, Address(obj_reg, 0)); | |
4546 if (need_tmp_reg) { | |
4547 pop(tmp_reg); | |
4548 } | |
4549 // Fall through to the normal CAS-based lock, because no matter what | |
4550 // the result of the above CAS, some thread must have succeeded in | |
4551 // removing the bias bit from the object's header. | |
4552 if (counters != NULL) { | |
4553 cond_inc32(Assembler::zero, | |
4554 ExternalAddress((address)counters->revoked_lock_entry_count_addr())); | |
4555 } | |
4556 | |
4557 bind(cas_label); | |
4558 | |
4559 return null_check_offset; | |
4560 } | |
4561 void MacroAssembler::call_VM_leaf_base(address entry_point, | |
4562 int number_of_arguments) { | |
4563 call(RuntimeAddress(entry_point)); | |
4564 increment(rsp, number_of_arguments * wordSize); | |
4565 } | |
4566 | |
4567 void MacroAssembler::cmpoop(Address src1, jobject obj) { | |
4568 cmp_literal32(src1, (int32_t)obj, oop_Relocation::spec_for_immediate()); | |
4569 } | |
4570 | |
4571 void MacroAssembler::cmpoop(Register src1, jobject obj) { | |
4572 cmp_literal32(src1, (int32_t)obj, oop_Relocation::spec_for_immediate()); | |
4573 } | |
4574 | |
4575 void MacroAssembler::extend_sign(Register hi, Register lo) { | |
4576 // According to Intel Doc. AP-526, "Integer Divide", p.18. | |
4577 if (VM_Version::is_P6() && hi == rdx && lo == rax) { | |
4578 cdql(); | |
4579 } else { | |
4580 movl(hi, lo); | |
4581 sarl(hi, 31); | |
4582 } | |
4583 } | |
4584 | |
0 | 4585 void MacroAssembler::fat_nop() { |
4586 // A 5 byte nop that is safe for patching (see patch_verified_entry) | |
4587 emit_byte(0x26); // es: | |
4588 emit_byte(0x2e); // cs: | |
4589 emit_byte(0x64); // fs: | |
4590 emit_byte(0x65); // gs: | |
4591 emit_byte(0x90); | |
4592 } | |
4593 | |
304 | 4594 void MacroAssembler::jC2(Register tmp, Label& L) { |
4595 // set parity bit if FPU flag C2 is set (via rax) | |
4596 save_rax(tmp); | |
4597 fwait(); fnstsw_ax(); | |
4598 sahf(); | |
4599 restore_rax(tmp); | |
4600 // branch | |
4601 jcc(Assembler::parity, L); | |
4602 } | |
4603 | |
4604 void MacroAssembler::jnC2(Register tmp, Label& L) { | |
4605 // set parity bit if FPU flag C2 is set (via rax) | |
4606 save_rax(tmp); | |
4607 fwait(); fnstsw_ax(); | |
4608 sahf(); | |
4609 restore_rax(tmp); | |
4610 // branch | |
4611 jcc(Assembler::noParity, L); | |
4612 } | |
4613 | |
0 | 4614 // 32bit can do a case table jump in one instruction but we no longer allow the base |
4615 // to be installed in the Address class | |
4616 void MacroAssembler::jump(ArrayAddress entry) { | |
4617 jmp(as_Address(entry)); | |
4618 } | |
4619 | |
304 | 4620 // Note: y_lo will be destroyed |
4621 void MacroAssembler::lcmp2int(Register x_hi, Register x_lo, Register y_hi, Register y_lo) { | |
4622 // Long compare for Java (semantics as described in JVM spec.) | |
4623 Label high, low, done; | |
4624 | |
4625 cmpl(x_hi, y_hi); | |
4626 jcc(Assembler::less, low); | |
4627 jcc(Assembler::greater, high); | |
4628 // x_hi is the return register | |
4629 xorl(x_hi, x_hi); | |
4630 cmpl(x_lo, y_lo); | |
4631 jcc(Assembler::below, low); | |
4632 jcc(Assembler::equal, done); | |
4633 | |
4634 bind(high); | |
4635 xorl(x_hi, x_hi); | |
4636 increment(x_hi); | |
4637 jmp(done); | |
4638 | |
4639 bind(low); | |
4640 xorl(x_hi, x_hi); | |
4641 decrementl(x_hi); | |
4642 | |
4643 bind(done); | |
4644 } | |
4645 | |
4646 void MacroAssembler::lea(Register dst, AddressLiteral src) { | |
4647 mov_literal32(dst, (int32_t)src.target(), src.rspec()); | |
0 | 4648 } |
4649 | |
4650 void MacroAssembler::lea(Address dst, AddressLiteral adr) { | |
4651 // leal(dst, as_Address(adr)); | |
304 | 4652 // see note in movl as to why we must use a move |
0 | 4653 mov_literal32(dst, (int32_t) adr.target(), adr.rspec()); |
4654 } | |
4655 | |
4656 void MacroAssembler::leave() { | |
304 | 4657 mov(rsp, rbp); |
4658 pop(rbp); | |
4659 } | |
0 | 4660 |
4661 void MacroAssembler::lmul(int x_rsp_offset, int y_rsp_offset) { | |
4662 // Multiplication of two Java long values stored on the stack | |
4663 // as illustrated below. Result is in rdx:rax. | |
4664 // | |
4665 // rsp ---> [ ?? ] \ \ | |
4666 // .... | y_rsp_offset | | |
4667 // [ y_lo ] / (in bytes) | x_rsp_offset | |
4668 // [ y_hi ] | (in bytes) | |
4669 // .... | | |
4670 // [ x_lo ] / | |
4671 // [ x_hi ] | |
4672 // .... | |
4673 // | |
4674 // Basic idea: lo(result) = lo(x_lo * y_lo) | |
4675 // hi(result) = hi(x_lo * y_lo) + lo(x_hi * y_lo) + lo(x_lo * y_hi) | |
4676 Address x_hi(rsp, x_rsp_offset + wordSize); Address x_lo(rsp, x_rsp_offset); | |
4677 Address y_hi(rsp, y_rsp_offset + wordSize); Address y_lo(rsp, y_rsp_offset); | |
4678 Label quick; | |
4679 // load x_hi, y_hi and check if quick | |
4680 // multiplication is possible | |
4681 movl(rbx, x_hi); | |
4682 movl(rcx, y_hi); | |
4683 movl(rax, rbx); | |
4684 orl(rbx, rcx); // rbx, = 0 <=> x_hi = 0 and y_hi = 0 | |
4685 jcc(Assembler::zero, quick); // if rbx, = 0 do quick multiply | |
4686 // do full multiplication | |
4687 // 1st step | |
4688 mull(y_lo); // x_hi * y_lo | |
4689 movl(rbx, rax); // save lo(x_hi * y_lo) in rbx, | |
4690 // 2nd step | |
4691 movl(rax, x_lo); | |
4692 mull(rcx); // x_lo * y_hi | |
4693 addl(rbx, rax); // add lo(x_lo * y_hi) to rbx, | |
4694 // 3rd step | |
4695 bind(quick); // note: rbx, = 0 if quick multiply! | |
4696 movl(rax, x_lo); | |
4697 mull(y_lo); // x_lo * y_lo | |
4698 addl(rdx, rbx); // correct hi(x_lo * y_lo) | |
4699 } | |
4700 | |
304 | 4701 void MacroAssembler::lneg(Register hi, Register lo) { |
4702 negl(lo); | |
4703 adcl(hi, 0); | |
4704 negl(hi); | |
4705 } | |
0 | 4706 |
4707 void MacroAssembler::lshl(Register hi, Register lo) { | |
4708 // Java shift left long support (semantics as described in JVM spec., p.305) | |
4709 // (basic idea for shift counts s >= n: x << s == (x << n) << (s - n)) | |
4710 // shift value is in rcx ! | |
4711 assert(hi != rcx, "must not use rcx"); | |
4712 assert(lo != rcx, "must not use rcx"); | |
4713 const Register s = rcx; // shift count | |
4714 const int n = BitsPerWord; | |
4715 Label L; | |
4716 andl(s, 0x3f); // s := s & 0x3f (s < 0x40) | |
4717 cmpl(s, n); // if (s < n) | |
4718 jcc(Assembler::less, L); // else (s >= n) | |
4719 movl(hi, lo); // x := x << n | |
4720 xorl(lo, lo); | |
4721 // Note: subl(s, n) is not needed since the Intel shift instructions work rcx mod n! | |
4722 bind(L); // s (mod n) < n | |
4723 shldl(hi, lo); // x := x << s | |
4724 shll(lo); | |
4725 } | |
4726 | |
4727 | |
4728 void MacroAssembler::lshr(Register hi, Register lo, bool sign_extension) { | |
4729 // Java shift right long support (semantics as described in JVM spec., p.306 & p.310) | |
4730 // (basic idea for shift counts s >= n: x >> s == (x >> n) >> (s - n)) | |
4731 assert(hi != rcx, "must not use rcx"); | |
4732 assert(lo != rcx, "must not use rcx"); | |
4733 const Register s = rcx; // shift count | |
4734 const int n = BitsPerWord; | |
4735 Label L; | |
4736 andl(s, 0x3f); // s := s & 0x3f (s < 0x40) | |
4737 cmpl(s, n); // if (s < n) | |
4738 jcc(Assembler::less, L); // else (s >= n) | |
4739 movl(lo, hi); // x := x >> n | |
4740 if (sign_extension) sarl(hi, 31); | |
4741 else xorl(hi, hi); | |
4742 // Note: subl(s, n) is not needed since the Intel shift instructions work rcx mod n! | |
4743 bind(L); // s (mod n) < n | |
4744 shrdl(lo, hi); // x := x >> s | |
4745 if (sign_extension) sarl(hi); | |
4746 else shrl(hi); | |
4747 } | |
4748 | |
304 | 4749 void MacroAssembler::movoop(Register dst, jobject obj) { |
4750 mov_literal32(dst, (int32_t)obj, oop_Relocation::spec_for_immediate()); | |
4751 } | |
4752 | |
4753 void MacroAssembler::movoop(Address dst, jobject obj) { | |
4754 mov_literal32(dst, (int32_t)obj, oop_Relocation::spec_for_immediate()); | |
4755 } | |
4756 | |
4757 void MacroAssembler::movptr(Register dst, AddressLiteral src) { | |
4758 if (src.is_lval()) { | |
4759 mov_literal32(dst, (intptr_t)src.target(), src.rspec()); | |
4760 } else { | |
4761 movl(dst, as_Address(src)); | |
4762 } | |
4763 } | |
4764 | |
4765 void MacroAssembler::movptr(ArrayAddress dst, Register src) { | |
4766 movl(as_Address(dst), src); | |
4767 } | |
4768 | |
4769 void MacroAssembler::movptr(Register dst, ArrayAddress src) { | |
4770 movl(dst, as_Address(src)); | |
4771 } | |
4772 | |
4773 // src should NEVER be a real pointer. Use AddressLiteral for true pointers | |
4774 void MacroAssembler::movptr(Address dst, intptr_t src) { | |
4775 movl(dst, src); | |
4776 } | |
4777 | |
4778 | |
4779 void MacroAssembler::movsd(XMMRegister dst, AddressLiteral src) { | |
4780 movsd(dst, as_Address(src)); | |
4781 } | |
4782 | |
4783 void MacroAssembler::pop_callee_saved_registers() { | |
4784 pop(rcx); | |
4785 pop(rdx); | |
4786 pop(rdi); | |
4787 pop(rsi); | |
4788 } | |
4789 | |
4790 void MacroAssembler::pop_fTOS() { | |
4791 fld_d(Address(rsp, 0)); | |
4792 addl(rsp, 2 * wordSize); | |
4793 } | |
4794 | |
4795 void MacroAssembler::push_callee_saved_registers() { | |
4796 push(rsi); | |
4797 push(rdi); | |
4798 push(rdx); | |
4799 push(rcx); | |
4800 } | |
4801 | |
4802 void MacroAssembler::push_fTOS() { | |
4803 subl(rsp, 2 * wordSize); | |
4804 fstp_d(Address(rsp, 0)); | |
4805 } | |
4806 | |
4807 | |
4808 void MacroAssembler::pushoop(jobject obj) { | |
4809 push_literal32((int32_t)obj, oop_Relocation::spec_for_immediate()); | |
4810 } | |
4811 | |
4812 | |
4813 void MacroAssembler::pushptr(AddressLiteral src) { | |
4814 if (src.is_lval()) { | |
4815 push_literal32((int32_t)src.target(), src.rspec()); | |
4816 } else { | |
4817 pushl(as_Address(src)); | |
4818 } | |
4819 } | |
4820 | |
4821 void MacroAssembler::set_word_if_not_zero(Register dst) { | |
4822 xorl(dst, dst); | |
4823 set_byte_if_not_zero(dst); | |
4824 } | |
4825 | |
4826 static void pass_arg0(MacroAssembler* masm, Register arg) { | |
4827 masm->push(arg); | |
4828 } | |
4829 | |
4830 static void pass_arg1(MacroAssembler* masm, Register arg) { | |
4831 masm->push(arg); | |
4832 } | |
4833 | |
4834 static void pass_arg2(MacroAssembler* masm, Register arg) { | |
4835 masm->push(arg); | |
4836 } | |
4837 | |
4838 static void pass_arg3(MacroAssembler* masm, Register arg) { | |
4839 masm->push(arg); | |
4840 } | |
4841 | |
4842 #ifndef PRODUCT | |
4843 extern "C" void findpc(intptr_t x); | |
4844 #endif | |
4845 | |
4846 void MacroAssembler::debug32(int rdi, int rsi, int rbp, int rsp, int rbx, int rdx, int rcx, int rax, int eip, char* msg) { | |
4847 // In order to get locks to work, we need to fake a in_VM state | |
4848 JavaThread* thread = JavaThread::current(); | |
4849 JavaThreadState saved_state = thread->thread_state(); | |
4850 thread->set_thread_state(_thread_in_vm); | |
4851 if (ShowMessageBoxOnError) { | |
4852 JavaThread* thread = JavaThread::current(); | |
4853 JavaThreadState saved_state = thread->thread_state(); | |
4854 thread->set_thread_state(_thread_in_vm); | |
4855 if (CountBytecodes || TraceBytecodes || StopInterpreterAt) { | |
4856 ttyLocker ttyl; | |
4857 BytecodeCounter::print(); | |
4858 } | |
4859 // To see where a verify_oop failed, get $ebx+40/X for this frame. | |
4860 // This is the value of eip which points to where verify_oop will return. | |
4861 if (os::message_box(msg, "Execution stopped, print registers?")) { | |
4862 ttyLocker ttyl; | |
4863 tty->print_cr("eip = 0x%08x", eip); | |
4864 #ifndef PRODUCT | |
4865 tty->cr(); | |
4866 findpc(eip); | |
4867 tty->cr(); | |
4868 #endif | |
4869 tty->print_cr("rax, = 0x%08x", rax); | |
4870 tty->print_cr("rbx, = 0x%08x", rbx); | |
4871 tty->print_cr("rcx = 0x%08x", rcx); | |
4872 tty->print_cr("rdx = 0x%08x", rdx); | |
4873 tty->print_cr("rdi = 0x%08x", rdi); | |
4874 tty->print_cr("rsi = 0x%08x", rsi); | |
4875 tty->print_cr("rbp, = 0x%08x", rbp); | |
4876 tty->print_cr("rsp = 0x%08x", rsp); | |
4877 BREAKPOINT; | |
4878 } | |
4879 } else { | |
4880 ttyLocker ttyl; | |
4881 ::tty->print_cr("=============== DEBUG MESSAGE: %s ================\n", msg); | |
4882 assert(false, "DEBUG MESSAGE"); | |
4883 } | |
4884 ThreadStateTransition::transition(thread, _thread_in_vm, saved_state); | |
4885 } | |
4886 | |
4887 void MacroAssembler::stop(const char* msg) { | |
4888 ExternalAddress message((address)msg); | |
4889 // push address of message | |
4890 pushptr(message.addr()); | |
4891 { Label L; call(L, relocInfo::none); bind(L); } // push eip | |
4892 pusha(); // push registers | |
4893 call(RuntimeAddress(CAST_FROM_FN_PTR(address, MacroAssembler::debug32))); | |
4894 hlt(); | |
4895 } | |
4896 | |
4897 void MacroAssembler::warn(const char* msg) { | |
4898 push_CPU_state(); | |
4899 | |
4900 ExternalAddress message((address) msg); | |
4901 // push address of message | |
4902 pushptr(message.addr()); | |
4903 | |
4904 call(RuntimeAddress(CAST_FROM_FN_PTR(address, warning))); | |
4905 addl(rsp, wordSize); // discard argument | |
4906 pop_CPU_state(); | |
4907 } | |
4908 | |
4909 #else // _LP64 | |
4910 | |
4911 // 64 bit versions | |
4912 | |
4913 Address MacroAssembler::as_Address(AddressLiteral adr) { | |
4914 // amd64 always does this as a pc-rel | |
4915 // we can be absolute or disp based on the instruction type | |
4916 // jmp/call are displacements others are absolute | |
4917 assert(!adr.is_lval(), "must be rval"); | |
4918 assert(reachable(adr), "must be"); | |
4919 return Address((int32_t)(intptr_t)(adr.target() - pc()), adr.target(), adr.reloc()); | |
4920 | |
4921 } | |
4922 | |
4923 Address MacroAssembler::as_Address(ArrayAddress adr) { | |
4924 AddressLiteral base = adr.base(); | |
4925 lea(rscratch1, base); | |
4926 Address index = adr.index(); | |
4927 assert(index._disp == 0, "must not have disp"); // maybe it can? | |
4928 Address array(rscratch1, index._index, index._scale, index._disp); | |
4929 return array; | |
4930 } | |
4931 | |
4932 int MacroAssembler::biased_locking_enter(Register lock_reg, | |
4933 Register obj_reg, | |
4934 Register swap_reg, | |
4935 Register tmp_reg, | |
4936 bool swap_reg_contains_mark, | |
4937 Label& done, | |
4938 Label* slow_case, | |
4939 BiasedLockingCounters* counters) { | |
4940 assert(UseBiasedLocking, "why call this otherwise?"); | |
4941 assert(swap_reg == rax, "swap_reg must be rax for cmpxchgq"); | |
4942 assert(tmp_reg != noreg, "tmp_reg must be supplied"); | |
4943 assert_different_registers(lock_reg, obj_reg, swap_reg, tmp_reg); | |
4944 assert(markOopDesc::age_shift == markOopDesc::lock_bits + markOopDesc::biased_lock_bits, "biased locking makes assumptions about bit layout"); | |
4945 Address mark_addr (obj_reg, oopDesc::mark_offset_in_bytes()); | |
4946 Address saved_mark_addr(lock_reg, 0); | |
4947 | |
4948 if (PrintBiasedLockingStatistics && counters == NULL) | |
4949 counters = BiasedLocking::counters(); | |
4950 | |
4951 // Biased locking | |
4952 // See whether the lock is currently biased toward our thread and | |
4953 // whether the epoch is still valid | |
4954 // Note that the runtime guarantees sufficient alignment of JavaThread | |
4955 // pointers to allow age to be placed into low bits | |
4956 // First check to see whether biasing is even enabled for this object | |
4957 Label cas_label; | |
4958 int null_check_offset = -1; | |
4959 if (!swap_reg_contains_mark) { | |
4960 null_check_offset = offset(); | |
4961 movq(swap_reg, mark_addr); | |
4962 } | |
4963 movq(tmp_reg, swap_reg); | |
4964 andq(tmp_reg, markOopDesc::biased_lock_mask_in_place); | |
4965 cmpq(tmp_reg, markOopDesc::biased_lock_pattern); | |
4966 jcc(Assembler::notEqual, cas_label); | |
4967 // The bias pattern is present in the object's header. Need to check | |
4968 // whether the bias owner and the epoch are both still current. | |
4969 load_prototype_header(tmp_reg, obj_reg); | |
4970 orq(tmp_reg, r15_thread); | |
4971 xorq(tmp_reg, swap_reg); | |
4972 andq(tmp_reg, ~((int) markOopDesc::age_mask_in_place)); | |
4973 if (counters != NULL) { | |
4974 cond_inc32(Assembler::zero, | |
4975 ExternalAddress((address) counters->anonymously_biased_lock_entry_count_addr())); | |
4976 } | |
0 | 4977 jcc(Assembler::equal, done); |
4978 | |
304 | 4979 Label try_revoke_bias; |
4980 Label try_rebias; | |
4981 | |
4982 // At this point we know that the header has the bias pattern and | |
4983 // that we are not the bias owner in the current epoch. We need to | |
4984 // figure out more details about the state of the header in order to | |
4985 // know what operations can be legally performed on the object's | |
4986 // header. | |
4987 | |
4988 // If the low three bits in the xor result aren't clear, that means | |
4989 // the prototype header is no longer biased and we have to revoke | |
4990 // the bias on this object. | |
4991 testq(tmp_reg, markOopDesc::biased_lock_mask_in_place); | |
4992 jcc(Assembler::notZero, try_revoke_bias); | |
4993 | |
4994 // Biasing is still enabled for this data type. See whether the | |
4995 // epoch of the current bias is still valid, meaning that the epoch | |
4996 // bits of the mark word are equal to the epoch bits of the | |
4997 // prototype header. (Note that the prototype header's epoch bits | |
4998 // only change at a safepoint.) If not, attempt to rebias the object | |
4999 // toward the current thread. Note that we must be absolutely sure | |
5000 // that the current epoch is invalid in order to do this because | |
5001 // otherwise the manipulations it performs on the mark word are | |
5002 // illegal. | |
5003 testq(tmp_reg, markOopDesc::epoch_mask_in_place); | |
5004 jcc(Assembler::notZero, try_rebias); | |
5005 | |
5006 // The epoch of the current bias is still valid but we know nothing | |
5007 // about the owner; it might be set or it might be clear. Try to | |
5008 // acquire the bias of the object using an atomic operation. If this | |
5009 // fails we will go in to the runtime to revoke the object's bias. | |
5010 // Note that we first construct the presumed unbiased header so we | |
5011 // don't accidentally blow away another thread's valid bias. | |
5012 andq(swap_reg, | |
5013 markOopDesc::biased_lock_mask_in_place | markOopDesc::age_mask_in_place | markOopDesc::epoch_mask_in_place); | |
5014 movq(tmp_reg, swap_reg); | |
5015 orq(tmp_reg, r15_thread); | |
5016 if (os::is_MP()) { | |
5017 lock(); | |
5018 } | |
5019 cmpxchgq(tmp_reg, Address(obj_reg, 0)); | |
5020 // If the biasing toward our thread failed, this means that | |
5021 // another thread succeeded in biasing it toward itself and we | |
5022 // need to revoke that bias. The revocation will occur in the | |
5023 // interpreter runtime in the slow case. | |
5024 if (counters != NULL) { | |
5025 cond_inc32(Assembler::zero, | |
5026 ExternalAddress((address) counters->anonymously_biased_lock_entry_count_addr())); | |
5027 } | |
5028 if (slow_case != NULL) { | |
5029 jcc(Assembler::notZero, *slow_case); | |
5030 } | |
0 | 5031 jmp(done); |
5032 | |
304 | 5033 bind(try_rebias); |
5034 // At this point we know the epoch has expired, meaning that the | |
5035 // current "bias owner", if any, is actually invalid. Under these | |
5036 // circumstances _only_, we are allowed to use the current header's | |
5037 // value as the comparison value when doing the cas to acquire the | |
5038 // bias in the current epoch. In other words, we allow transfer of | |
5039 // the bias from one thread to another directly in this situation. | |
5040 // | |
5041 // FIXME: due to a lack of registers we currently blow away the age | |
5042 // bits in this situation. Should attempt to preserve them. | |
5043 load_prototype_header(tmp_reg, obj_reg); | |
5044 orq(tmp_reg, r15_thread); | |
5045 if (os::is_MP()) { | |
5046 lock(); | |
5047 } | |
5048 cmpxchgq(tmp_reg, Address(obj_reg, 0)); | |
5049 // If the biasing toward our thread failed, then another thread | |
5050 // succeeded in biasing it toward itself and we need to revoke that | |
5051 // bias. The revocation will occur in the runtime in the slow case. | |
5052 if (counters != NULL) { | |
5053 cond_inc32(Assembler::zero, | |
5054 ExternalAddress((address) counters->rebiased_lock_entry_count_addr())); | |
5055 } | |
5056 if (slow_case != NULL) { | |
5057 jcc(Assembler::notZero, *slow_case); | |
0 | 5058 } |
5059 jmp(done); | |
5060 | |
304 | 5061 bind(try_revoke_bias); |
5062 // The prototype mark in the klass doesn't have the bias bit set any | |
5063 // more, indicating that objects of this data type are not supposed | |
5064 // to be biased any more. We are going to try to reset the mark of | |
5065 // this object to the prototype value and fall through to the | |
5066 // CAS-based locking scheme. Note that if our CAS fails, it means | |
5067 // that another thread raced us for the privilege of revoking the | |
5068 // bias of this particular object, so it's okay to continue in the | |
5069 // normal locking code. | |
5070 // | |
5071 // FIXME: due to a lack of registers we currently blow away the age | |
5072 // bits in this situation. Should attempt to preserve them. | |
5073 load_prototype_header(tmp_reg, obj_reg); | |
5074 if (os::is_MP()) { | |
5075 lock(); | |
5076 } | |
5077 cmpxchgq(tmp_reg, Address(obj_reg, 0)); | |
5078 // Fall through to the normal CAS-based lock, because no matter what | |
5079 // the result of the above CAS, some thread must have succeeded in | |
5080 // removing the bias bit from the object's header. | |
5081 if (counters != NULL) { | |
5082 cond_inc32(Assembler::zero, | |
5083 ExternalAddress((address) counters->revoked_lock_entry_count_addr())); | |
5084 } | |
5085 | |
5086 bind(cas_label); | |
5087 | |
5088 return null_check_offset; | |
5089 } | |
5090 | |
5091 void MacroAssembler::call_VM_leaf_base(address entry_point, int num_args) { | |
5092 Label L, E; | |
5093 | |
5094 #ifdef _WIN64 | |
5095 // Windows always allocates space for it's register args | |
5096 assert(num_args <= 4, "only register arguments supported"); | |
5097 subq(rsp, frame::arg_reg_save_area_bytes); | |
5098 #endif | |
5099 | |
5100 // Align stack if necessary | |
5101 testl(rsp, 15); | |
5102 jcc(Assembler::zero, L); | |
5103 | |
5104 subq(rsp, 8); | |
5105 { | |
5106 call(RuntimeAddress(entry_point)); | |
5107 } | |
5108 addq(rsp, 8); | |
5109 jmp(E); | |
5110 | |
5111 bind(L); | |
5112 { | |
5113 call(RuntimeAddress(entry_point)); | |
5114 } | |
5115 | |
5116 bind(E); | |
5117 | |
5118 #ifdef _WIN64 | |
5119 // restore stack pointer | |
5120 addq(rsp, frame::arg_reg_save_area_bytes); | |
5121 #endif | |
5122 | |
5123 } | |
5124 | |
5125 void MacroAssembler::cmp64(Register src1, AddressLiteral src2) { | |
5126 assert(!src2.is_lval(), "should use cmpptr"); | |
5127 | |
5128 if (reachable(src2)) { | |
5129 cmpq(src1, as_Address(src2)); | |
5130 } else { | |
5131 lea(rscratch1, src2); | |
5132 Assembler::cmpq(src1, Address(rscratch1, 0)); | |
5133 } | |
5134 } | |
5135 | |
5136 int MacroAssembler::corrected_idivq(Register reg) { | |
5137 // Full implementation of Java ldiv and lrem; checks for special | |
5138 // case as described in JVM spec., p.243 & p.271. The function | |
5139 // returns the (pc) offset of the idivl instruction - may be needed | |
5140 // for implicit exceptions. | |
5141 // | |
5142 // normal case special case | |
5143 // | |
5144 // input : rax: dividend min_long | |
5145 // reg: divisor (may not be eax/edx) -1 | |
5146 // | |
5147 // output: rax: quotient (= rax idiv reg) min_long | |
5148 // rdx: remainder (= rax irem reg) 0 | |
5149 assert(reg != rax && reg != rdx, "reg cannot be rax or rdx register"); | |
5150 static const int64_t min_long = 0x8000000000000000; | |
5151 Label normal_case, special_case; | |
5152 | |
5153 // check for special case | |
5154 cmp64(rax, ExternalAddress((address) &min_long)); | |
5155 jcc(Assembler::notEqual, normal_case); | |
5156 xorl(rdx, rdx); // prepare rdx for possible special case (where | |
5157 // remainder = 0) | |
5158 cmpq(reg, -1); | |
5159 jcc(Assembler::equal, special_case); | |
5160 | |
5161 // handle normal case | |
5162 bind(normal_case); | |
5163 cdqq(); | |
5164 int idivq_offset = offset(); | |
5165 idivq(reg); | |
5166 | |
5167 // normal and special case exit | |
5168 bind(special_case); | |
5169 | |
5170 return idivq_offset; | |
5171 } | |
5172 | |
5173 void MacroAssembler::decrementq(Register reg, int value) { | |
5174 if (value == min_jint) { subq(reg, value); return; } | |
5175 if (value < 0) { incrementq(reg, -value); return; } | |
5176 if (value == 0) { ; return; } | |
5177 if (value == 1 && UseIncDec) { decq(reg) ; return; } | |
5178 /* else */ { subq(reg, value) ; return; } | |
5179 } | |
5180 | |
5181 void MacroAssembler::decrementq(Address dst, int value) { | |
5182 if (value == min_jint) { subq(dst, value); return; } | |
5183 if (value < 0) { incrementq(dst, -value); return; } | |
5184 if (value == 0) { ; return; } | |
5185 if (value == 1 && UseIncDec) { decq(dst) ; return; } | |
5186 /* else */ { subq(dst, value) ; return; } | |
5187 } | |
5188 | |
5189 void MacroAssembler::fat_nop() { | |
5190 // A 5 byte nop that is safe for patching (see patch_verified_entry) | |
5191 // Recommened sequence from 'Software Optimization Guide for the AMD | |
5192 // Hammer Processor' | |
5193 emit_byte(0x66); | |
5194 emit_byte(0x66); | |
5195 emit_byte(0x90); | |
5196 emit_byte(0x66); | |
5197 emit_byte(0x90); | |
5198 } | |
5199 | |
5200 void MacroAssembler::incrementq(Register reg, int value) { | |
5201 if (value == min_jint) { addq(reg, value); return; } | |
5202 if (value < 0) { decrementq(reg, -value); return; } | |
5203 if (value == 0) { ; return; } | |
5204 if (value == 1 && UseIncDec) { incq(reg) ; return; } | |
5205 /* else */ { addq(reg, value) ; return; } | |
5206 } | |
5207 | |
5208 void MacroAssembler::incrementq(Address dst, int value) { | |
5209 if (value == min_jint) { addq(dst, value); return; } | |
5210 if (value < 0) { decrementq(dst, -value); return; } | |
5211 if (value == 0) { ; return; } | |
5212 if (value == 1 && UseIncDec) { incq(dst) ; return; } | |
5213 /* else */ { addq(dst, value) ; return; } | |
5214 } | |
5215 | |
5216 // 32bit can do a case table jump in one instruction but we no longer allow the base | |
5217 // to be installed in the Address class | |
5218 void MacroAssembler::jump(ArrayAddress entry) { | |
5219 lea(rscratch1, entry.base()); | |
5220 Address dispatch = entry.index(); | |
5221 assert(dispatch._base == noreg, "must be"); | |
5222 dispatch._base = rscratch1; | |
5223 jmp(dispatch); | |
5224 } | |
5225 | |
5226 void MacroAssembler::lcmp2int(Register x_hi, Register x_lo, Register y_hi, Register y_lo) { | |
5227 ShouldNotReachHere(); // 64bit doesn't use two regs | |
5228 cmpq(x_lo, y_lo); | |
5229 } | |
5230 | |
5231 void MacroAssembler::lea(Register dst, AddressLiteral src) { | |
5232 mov_literal64(dst, (intptr_t)src.target(), src.rspec()); | |
5233 } | |
5234 | |
5235 void MacroAssembler::lea(Address dst, AddressLiteral adr) { | |
5236 mov_literal64(rscratch1, (intptr_t)adr.target(), adr.rspec()); | |
5237 movptr(dst, rscratch1); | |
5238 } | |
5239 | |
5240 void MacroAssembler::leave() { | |
5241 // %%% is this really better? Why not on 32bit too? | |
5242 emit_byte(0xC9); // LEAVE | |
5243 } | |
5244 | |
5245 void MacroAssembler::lneg(Register hi, Register lo) { | |
5246 ShouldNotReachHere(); // 64bit doesn't use two regs | |
5247 negq(lo); | |
5248 } | |
5249 | |
5250 void MacroAssembler::movoop(Register dst, jobject obj) { | |
5251 mov_literal64(dst, (intptr_t)obj, oop_Relocation::spec_for_immediate()); | |
5252 } | |
5253 | |
5254 void MacroAssembler::movoop(Address dst, jobject obj) { | |
5255 mov_literal64(rscratch1, (intptr_t)obj, oop_Relocation::spec_for_immediate()); | |
5256 movq(dst, rscratch1); | |
5257 } | |
5258 | |
5259 void MacroAssembler::movptr(Register dst, AddressLiteral src) { | |
5260 if (src.is_lval()) { | |
5261 mov_literal64(dst, (intptr_t)src.target(), src.rspec()); | |
5262 } else { | |
5263 if (reachable(src)) { | |
5264 movq(dst, as_Address(src)); | |
5265 } else { | |
5266 lea(rscratch1, src); | |
5267 movq(dst, Address(rscratch1,0)); | |
0 | 5268 } |
304 | 5269 } |
5270 } | |
5271 | |
5272 void MacroAssembler::movptr(ArrayAddress dst, Register src) { | |
5273 movq(as_Address(dst), src); | |
5274 } | |
5275 | |
5276 void MacroAssembler::movptr(Register dst, ArrayAddress src) { | |
5277 movq(dst, as_Address(src)); | |
5278 } | |
5279 | |
5280 // src should NEVER be a real pointer. Use AddressLiteral for true pointers | |
5281 void MacroAssembler::movptr(Address dst, intptr_t src) { | |
5282 mov64(rscratch1, src); | |
5283 movq(dst, rscratch1); | |
5284 } | |
5285 | |
5286 // These are mostly for initializing NULL | |
5287 void MacroAssembler::movptr(Address dst, int32_t src) { | |
5288 movslq(dst, src); | |
5289 } | |
5290 | |
5291 void MacroAssembler::movptr(Register dst, int32_t src) { | |
5292 mov64(dst, (intptr_t)src); | |
5293 } | |
5294 | |
5295 void MacroAssembler::pushoop(jobject obj) { | |
5296 movoop(rscratch1, obj); | |
5297 push(rscratch1); | |
5298 } | |
5299 | |
5300 void MacroAssembler::pushptr(AddressLiteral src) { | |
5301 lea(rscratch1, src); | |
5302 if (src.is_lval()) { | |
5303 push(rscratch1); | |
5304 } else { | |
5305 pushq(Address(rscratch1, 0)); | |
5306 } | |
5307 } | |
5308 | |
5309 void MacroAssembler::reset_last_Java_frame(bool clear_fp, | |
5310 bool clear_pc) { | |
5311 // we must set sp to zero to clear frame | |
512
db4caa99ef11
6787106: Hotspot 32 bit build fails on platforms having different definitions for intptr_t & int32_t
xlu
parents:
420
diff
changeset
|
5312 movptr(Address(r15_thread, JavaThread::last_Java_sp_offset()), NULL_WORD); |
304 | 5313 // must clear fp, so that compiled frames are not confused; it is |
5314 // possible that we need it only for debugging | |
5315 if (clear_fp) { | |
512
db4caa99ef11
6787106: Hotspot 32 bit build fails on platforms having different definitions for intptr_t & int32_t
xlu
parents:
420
diff
changeset
|
5316 movptr(Address(r15_thread, JavaThread::last_Java_fp_offset()), NULL_WORD); |
304 | 5317 } |
5318 | |
5319 if (clear_pc) { | |
512
db4caa99ef11
6787106: Hotspot 32 bit build fails on platforms having different definitions for intptr_t & int32_t
xlu
parents:
420
diff
changeset
|
5320 movptr(Address(r15_thread, JavaThread::last_Java_pc_offset()), NULL_WORD); |
304 | 5321 } |
5322 } | |
5323 | |
5324 void MacroAssembler::set_last_Java_frame(Register last_java_sp, | |
5325 Register last_java_fp, | |
5326 address last_java_pc) { | |
5327 // determine last_java_sp register | |
5328 if (!last_java_sp->is_valid()) { | |
5329 last_java_sp = rsp; | |
5330 } | |
5331 | |
5332 // last_java_fp is optional | |
5333 if (last_java_fp->is_valid()) { | |
5334 movptr(Address(r15_thread, JavaThread::last_Java_fp_offset()), | |
5335 last_java_fp); | |
5336 } | |
5337 | |
5338 // last_java_pc is optional | |
5339 if (last_java_pc != NULL) { | |
5340 Address java_pc(r15_thread, | |
5341 JavaThread::frame_anchor_offset() + JavaFrameAnchor::last_Java_pc_offset()); | |
5342 lea(rscratch1, InternalAddress(last_java_pc)); | |
5343 movptr(java_pc, rscratch1); | |
5344 } | |
5345 | |
5346 movptr(Address(r15_thread, JavaThread::last_Java_sp_offset()), last_java_sp); | |
5347 } | |
5348 | |
5349 static void pass_arg0(MacroAssembler* masm, Register arg) { | |
5350 if (c_rarg0 != arg ) { | |
5351 masm->mov(c_rarg0, arg); | |
5352 } | |
5353 } | |
5354 | |
5355 static void pass_arg1(MacroAssembler* masm, Register arg) { | |
5356 if (c_rarg1 != arg ) { | |
5357 masm->mov(c_rarg1, arg); | |
5358 } | |
5359 } | |
5360 | |
5361 static void pass_arg2(MacroAssembler* masm, Register arg) { | |
5362 if (c_rarg2 != arg ) { | |
5363 masm->mov(c_rarg2, arg); | |
5364 } | |
5365 } | |
5366 | |
5367 static void pass_arg3(MacroAssembler* masm, Register arg) { | |
5368 if (c_rarg3 != arg ) { | |
5369 masm->mov(c_rarg3, arg); | |
5370 } | |
5371 } | |
5372 | |
5373 void MacroAssembler::stop(const char* msg) { | |
5374 address rip = pc(); | |
5375 pusha(); // get regs on stack | |
5376 lea(c_rarg0, ExternalAddress((address) msg)); | |
5377 lea(c_rarg1, InternalAddress(rip)); | |
5378 movq(c_rarg2, rsp); // pass pointer to regs array | |
5379 andq(rsp, -16); // align stack as required by ABI | |
5380 call(RuntimeAddress(CAST_FROM_FN_PTR(address, MacroAssembler::debug64))); | |
5381 hlt(); | |
5382 } | |
5383 | |
5384 void MacroAssembler::warn(const char* msg) { | |
5385 push(r12); | |
5386 movq(r12, rsp); | |
5387 andq(rsp, -16); // align stack as required by push_CPU_state and call | |
5388 | |
5389 push_CPU_state(); // keeps alignment at 16 bytes | |
5390 lea(c_rarg0, ExternalAddress((address) msg)); | |
5391 call_VM_leaf(CAST_FROM_FN_PTR(address, warning), c_rarg0); | |
5392 pop_CPU_state(); | |
5393 | |
5394 movq(rsp, r12); | |
5395 pop(r12); | |
5396 } | |
5397 | |
5398 #ifndef PRODUCT | |
5399 extern "C" void findpc(intptr_t x); | |
5400 #endif | |
5401 | |
5402 void MacroAssembler::debug64(char* msg, int64_t pc, int64_t regs[]) { | |
5403 // In order to get locks to work, we need to fake a in_VM state | |
5404 if (ShowMessageBoxOnError ) { | |
5405 JavaThread* thread = JavaThread::current(); | |
5406 JavaThreadState saved_state = thread->thread_state(); | |
5407 thread->set_thread_state(_thread_in_vm); | |
5408 #ifndef PRODUCT | |
5409 if (CountBytecodes || TraceBytecodes || StopInterpreterAt) { | |
5410 ttyLocker ttyl; | |
5411 BytecodeCounter::print(); | |
0 | 5412 } |
304 | 5413 #endif |
5414 // To see where a verify_oop failed, get $ebx+40/X for this frame. | |
5415 // XXX correct this offset for amd64 | |
5416 // This is the value of eip which points to where verify_oop will return. | |
5417 if (os::message_box(msg, "Execution stopped, print registers?")) { | |
5418 ttyLocker ttyl; | |
5419 tty->print_cr("rip = 0x%016lx", pc); | |
5420 #ifndef PRODUCT | |
5421 tty->cr(); | |
5422 findpc(pc); | |
5423 tty->cr(); | |
5424 #endif | |
5425 tty->print_cr("rax = 0x%016lx", regs[15]); | |
5426 tty->print_cr("rbx = 0x%016lx", regs[12]); | |
5427 tty->print_cr("rcx = 0x%016lx", regs[14]); | |
5428 tty->print_cr("rdx = 0x%016lx", regs[13]); | |
5429 tty->print_cr("rdi = 0x%016lx", regs[8]); | |
5430 tty->print_cr("rsi = 0x%016lx", regs[9]); | |
5431 tty->print_cr("rbp = 0x%016lx", regs[10]); | |
5432 tty->print_cr("rsp = 0x%016lx", regs[11]); | |
5433 tty->print_cr("r8 = 0x%016lx", regs[7]); | |
5434 tty->print_cr("r9 = 0x%016lx", regs[6]); | |
5435 tty->print_cr("r10 = 0x%016lx", regs[5]); | |
5436 tty->print_cr("r11 = 0x%016lx", regs[4]); | |
5437 tty->print_cr("r12 = 0x%016lx", regs[3]); | |
5438 tty->print_cr("r13 = 0x%016lx", regs[2]); | |
5439 tty->print_cr("r14 = 0x%016lx", regs[1]); | |
5440 tty->print_cr("r15 = 0x%016lx", regs[0]); | |
5441 BREAKPOINT; | |
0 | 5442 } |
304 | 5443 ThreadStateTransition::transition(thread, _thread_in_vm, saved_state); |
5444 } else { | |
5445 ttyLocker ttyl; | |
5446 ::tty->print_cr("=============== DEBUG MESSAGE: %s ================\n", | |
5447 msg); | |
5448 } | |
5449 } | |
5450 | |
5451 #endif // _LP64 | |
5452 | |
5453 // Now versions that are common to 32/64 bit | |
5454 | |
5455 void MacroAssembler::addptr(Register dst, int32_t imm32) { | |
5456 LP64_ONLY(addq(dst, imm32)) NOT_LP64(addl(dst, imm32)); | |
5457 } | |
5458 | |
5459 void MacroAssembler::addptr(Register dst, Register src) { | |
5460 LP64_ONLY(addq(dst, src)) NOT_LP64(addl(dst, src)); | |
5461 } | |
5462 | |
5463 void MacroAssembler::addptr(Address dst, Register src) { | |
5464 LP64_ONLY(addq(dst, src)) NOT_LP64(addl(dst, src)); | |
5465 } | |
5466 | |
5467 void MacroAssembler::align(int modulus) { | |
5468 if (offset() % modulus != 0) { | |
5469 nop(modulus - (offset() % modulus)); | |
5470 } | |
5471 } | |
5472 | |
5473 void MacroAssembler::andpd(XMMRegister dst, AddressLiteral src) { | |
5474 andpd(dst, as_Address(src)); | |
5475 } | |
5476 | |
5477 void MacroAssembler::andptr(Register dst, int32_t imm32) { | |
5478 LP64_ONLY(andq(dst, imm32)) NOT_LP64(andl(dst, imm32)); | |
5479 } | |
5480 | |
5481 void MacroAssembler::atomic_incl(AddressLiteral counter_addr) { | |
5482 pushf(); | |
5483 if (os::is_MP()) | |
5484 lock(); | |
5485 incrementl(counter_addr); | |
5486 popf(); | |
5487 } | |
5488 | |
5489 // Writes to stack successive pages until offset reached to check for | |
5490 // stack overflow + shadow pages. This clobbers tmp. | |
5491 void MacroAssembler::bang_stack_size(Register size, Register tmp) { | |
5492 movptr(tmp, rsp); | |
5493 // Bang stack for total size given plus shadow page size. | |
5494 // Bang one page at a time because large size can bang beyond yellow and | |
5495 // red zones. | |
5496 Label loop; | |
5497 bind(loop); | |
5498 movl(Address(tmp, (-os::vm_page_size())), size ); | |
5499 subptr(tmp, os::vm_page_size()); | |
5500 subl(size, os::vm_page_size()); | |
5501 jcc(Assembler::greater, loop); | |
5502 | |
5503 // Bang down shadow pages too. | |
5504 // The -1 because we already subtracted 1 page. | |
5505 for (int i = 0; i< StackShadowPages-1; i++) { | |
5506 // this could be any sized move but this is can be a debugging crumb | |
5507 // so the bigger the better. | |
5508 movptr(Address(tmp, (-i*os::vm_page_size())), size ); | |
5509 } | |
5510 } | |
5511 | |
5512 void MacroAssembler::biased_locking_exit(Register obj_reg, Register temp_reg, Label& done) { | |
5513 assert(UseBiasedLocking, "why call this otherwise?"); | |
5514 | |
5515 // Check for biased locking unlock case, which is a no-op | |
5516 // Note: we do not have to check the thread ID for two reasons. | |
5517 // First, the interpreter checks for IllegalMonitorStateException at | |
5518 // a higher level. Second, if the bias was revoked while we held the | |
5519 // lock, the object could not be rebiased toward another thread, so | |
5520 // the bias bit would be clear. | |
5521 movptr(temp_reg, Address(obj_reg, oopDesc::mark_offset_in_bytes())); | |
5522 andptr(temp_reg, markOopDesc::biased_lock_mask_in_place); | |
5523 cmpptr(temp_reg, markOopDesc::biased_lock_pattern); | |
5524 jcc(Assembler::equal, done); | |
5525 } | |
5526 | |
5527 void MacroAssembler::c2bool(Register x) { | |
5528 // implements x == 0 ? 0 : 1 | |
5529 // note: must only look at least-significant byte of x | |
5530 // since C-style booleans are stored in one byte | |
5531 // only! (was bug) | |
5532 andl(x, 0xFF); | |
5533 setb(Assembler::notZero, x); | |
5534 } | |
5535 | |
5536 // Wouldn't need if AddressLiteral version had new name | |
5537 void MacroAssembler::call(Label& L, relocInfo::relocType rtype) { | |
5538 Assembler::call(L, rtype); | |
5539 } | |
5540 | |
5541 void MacroAssembler::call(Register entry) { | |
5542 Assembler::call(entry); | |
5543 } | |
5544 | |
5545 void MacroAssembler::call(AddressLiteral entry) { | |
5546 if (reachable(entry)) { | |
5547 Assembler::call_literal(entry.target(), entry.rspec()); | |
5548 } else { | |
5549 lea(rscratch1, entry); | |
5550 Assembler::call(rscratch1); | |
5551 } | |
5552 } | |
5553 | |
5554 // Implementation of call_VM versions | |
5555 | |
5556 void MacroAssembler::call_VM(Register oop_result, | |
5557 address entry_point, | |
5558 bool check_exceptions) { | |
5559 Label C, E; | |
5560 call(C, relocInfo::none); | |
5561 jmp(E); | |
5562 | |
5563 bind(C); | |
5564 call_VM_helper(oop_result, entry_point, 0, check_exceptions); | |
5565 ret(0); | |
5566 | |
5567 bind(E); | |
5568 } | |
5569 | |
5570 void MacroAssembler::call_VM(Register oop_result, | |
5571 address entry_point, | |
5572 Register arg_1, | |
5573 bool check_exceptions) { | |
5574 Label C, E; | |
5575 call(C, relocInfo::none); | |
5576 jmp(E); | |
5577 | |
5578 bind(C); | |
5579 pass_arg1(this, arg_1); | |
5580 call_VM_helper(oop_result, entry_point, 1, check_exceptions); | |
5581 ret(0); | |
5582 | |
5583 bind(E); | |
5584 } | |
5585 | |
5586 void MacroAssembler::call_VM(Register oop_result, | |
5587 address entry_point, | |
5588 Register arg_1, | |
5589 Register arg_2, | |
5590 bool check_exceptions) { | |
5591 Label C, E; | |
5592 call(C, relocInfo::none); | |
5593 jmp(E); | |
5594 | |
5595 bind(C); | |
5596 | |
5597 LP64_ONLY(assert(arg_1 != c_rarg2, "smashed arg")); | |
5598 | |
5599 pass_arg2(this, arg_2); | |
5600 pass_arg1(this, arg_1); | |
5601 call_VM_helper(oop_result, entry_point, 2, check_exceptions); | |
5602 ret(0); | |
5603 | |
5604 bind(E); | |
5605 } | |
5606 | |
5607 void MacroAssembler::call_VM(Register oop_result, | |
5608 address entry_point, | |
5609 Register arg_1, | |
5610 Register arg_2, | |
5611 Register arg_3, | |
5612 bool check_exceptions) { | |
5613 Label C, E; | |
5614 call(C, relocInfo::none); | |
5615 jmp(E); | |
5616 | |
5617 bind(C); | |
5618 | |
5619 LP64_ONLY(assert(arg_1 != c_rarg3, "smashed arg")); | |
5620 LP64_ONLY(assert(arg_2 != c_rarg3, "smashed arg")); | |
5621 pass_arg3(this, arg_3); | |
5622 | |
5623 LP64_ONLY(assert(arg_1 != c_rarg2, "smashed arg")); | |
5624 pass_arg2(this, arg_2); | |
5625 | |
5626 pass_arg1(this, arg_1); | |
5627 call_VM_helper(oop_result, entry_point, 3, check_exceptions); | |
5628 ret(0); | |
5629 | |
5630 bind(E); | |
5631 } | |
5632 | |
5633 void MacroAssembler::call_VM(Register oop_result, | |
5634 Register last_java_sp, | |
5635 address entry_point, | |
5636 int number_of_arguments, | |
5637 bool check_exceptions) { | |
5638 Register thread = LP64_ONLY(r15_thread) NOT_LP64(noreg); | |
5639 call_VM_base(oop_result, thread, last_java_sp, entry_point, number_of_arguments, check_exceptions); | |
5640 } | |
5641 | |
5642 void MacroAssembler::call_VM(Register oop_result, | |
5643 Register last_java_sp, | |
5644 address entry_point, | |
5645 Register arg_1, | |
5646 bool check_exceptions) { | |
5647 pass_arg1(this, arg_1); | |
5648 call_VM(oop_result, last_java_sp, entry_point, 1, check_exceptions); | |
5649 } | |
5650 | |
5651 void MacroAssembler::call_VM(Register oop_result, | |
5652 Register last_java_sp, | |
5653 address entry_point, | |
5654 Register arg_1, | |
5655 Register arg_2, | |
5656 bool check_exceptions) { | |
5657 | |
5658 LP64_ONLY(assert(arg_1 != c_rarg2, "smashed arg")); | |
5659 pass_arg2(this, arg_2); | |
5660 pass_arg1(this, arg_1); | |
5661 call_VM(oop_result, last_java_sp, entry_point, 2, check_exceptions); | |
5662 } | |
5663 | |
5664 void MacroAssembler::call_VM(Register oop_result, | |
5665 Register last_java_sp, | |
5666 address entry_point, | |
5667 Register arg_1, | |
5668 Register arg_2, | |
5669 Register arg_3, | |
5670 bool check_exceptions) { | |
5671 LP64_ONLY(assert(arg_1 != c_rarg3, "smashed arg")); | |
5672 LP64_ONLY(assert(arg_2 != c_rarg3, "smashed arg")); | |
5673 pass_arg3(this, arg_3); | |
5674 LP64_ONLY(assert(arg_1 != c_rarg2, "smashed arg")); | |
5675 pass_arg2(this, arg_2); | |
5676 pass_arg1(this, arg_1); | |
5677 call_VM(oop_result, last_java_sp, entry_point, 3, check_exceptions); | |
5678 } | |
5679 | |
5680 void MacroAssembler::call_VM_base(Register oop_result, | |
5681 Register java_thread, | |
5682 Register last_java_sp, | |
5683 address entry_point, | |
5684 int number_of_arguments, | |
5685 bool check_exceptions) { | |
5686 // determine java_thread register | |
5687 if (!java_thread->is_valid()) { | |
5688 #ifdef _LP64 | |
5689 java_thread = r15_thread; | |
5690 #else | |
5691 java_thread = rdi; | |
5692 get_thread(java_thread); | |
5693 #endif // LP64 | |
5694 } | |
5695 // determine last_java_sp register | |
5696 if (!last_java_sp->is_valid()) { | |
5697 last_java_sp = rsp; | |
5698 } | |
5699 // debugging support | |
5700 assert(number_of_arguments >= 0 , "cannot have negative number of arguments"); | |
5701 LP64_ONLY(assert(java_thread == r15_thread, "unexpected register")); | |
5702 assert(java_thread != oop_result , "cannot use the same register for java_thread & oop_result"); | |
5703 assert(java_thread != last_java_sp, "cannot use the same register for java_thread & last_java_sp"); | |
5704 | |
5705 // push java thread (becomes first argument of C function) | |
5706 | |
5707 NOT_LP64(push(java_thread); number_of_arguments++); | |
5708 LP64_ONLY(mov(c_rarg0, r15_thread)); | |
5709 | |
5710 // set last Java frame before call | |
5711 assert(last_java_sp != rbp, "can't use ebp/rbp"); | |
5712 | |
5713 // Only interpreter should have to set fp | |
5714 set_last_Java_frame(java_thread, last_java_sp, rbp, NULL); | |
5715 | |
5716 // do the call, remove parameters | |
5717 MacroAssembler::call_VM_leaf_base(entry_point, number_of_arguments); | |
5718 | |
5719 // restore the thread (cannot use the pushed argument since arguments | |
5720 // may be overwritten by C code generated by an optimizing compiler); | |
5721 // however can use the register value directly if it is callee saved. | |
5722 if (LP64_ONLY(true ||) java_thread == rdi || java_thread == rsi) { | |
5723 // rdi & rsi (also r15) are callee saved -> nothing to do | |
5724 #ifdef ASSERT | |
5725 guarantee(java_thread != rax, "change this code"); | |
5726 push(rax); | |
5727 { Label L; | |
5728 get_thread(rax); | |
5729 cmpptr(java_thread, rax); | |
5730 jcc(Assembler::equal, L); | |
5731 stop("MacroAssembler::call_VM_base: rdi not callee saved?"); | |
5732 bind(L); | |
0 | 5733 } |
304 | 5734 pop(rax); |
5735 #endif | |
5736 } else { | |
5737 get_thread(java_thread); | |
5738 } | |
5739 // reset last Java frame | |
5740 // Only interpreter should have to clear fp | |
5741 reset_last_Java_frame(java_thread, true, false); | |
5742 | |
5743 #ifndef CC_INTERP | |
5744 // C++ interp handles this in the interpreter | |
5745 check_and_handle_popframe(java_thread); | |
5746 check_and_handle_earlyret(java_thread); | |
5747 #endif /* CC_INTERP */ | |
5748 | |
5749 if (check_exceptions) { | |
5750 // check for pending exceptions (java_thread is set upon return) | |
5751 cmpptr(Address(java_thread, Thread::pending_exception_offset()), (int32_t) NULL_WORD); | |
5752 #ifndef _LP64 | |
5753 jump_cc(Assembler::notEqual, | |
5754 RuntimeAddress(StubRoutines::forward_exception_entry())); | |
5755 #else | |
5756 // This used to conditionally jump to forward_exception however it is | |
5757 // possible if we relocate that the branch will not reach. So we must jump | |
5758 // around so we can always reach | |
5759 | |
5760 Label ok; | |
5761 jcc(Assembler::equal, ok); | |
5762 jump(RuntimeAddress(StubRoutines::forward_exception_entry())); | |
5763 bind(ok); | |
5764 #endif // LP64 | |
5765 } | |
5766 | |
5767 // get oop result if there is one and reset the value in the thread | |
5768 if (oop_result->is_valid()) { | |
5769 movptr(oop_result, Address(java_thread, JavaThread::vm_result_offset())); | |
512
db4caa99ef11
6787106: Hotspot 32 bit build fails on platforms having different definitions for intptr_t & int32_t
xlu
parents:
420
diff
changeset
|
5770 movptr(Address(java_thread, JavaThread::vm_result_offset()), NULL_WORD); |
304 | 5771 verify_oop(oop_result, "broken oop in call_VM_base"); |
5772 } | |
5773 } | |
5774 | |
5775 void MacroAssembler::call_VM_helper(Register oop_result, address entry_point, int number_of_arguments, bool check_exceptions) { | |
5776 | |
5777 // Calculate the value for last_Java_sp | |
5778 // somewhat subtle. call_VM does an intermediate call | |
5779 // which places a return address on the stack just under the | |
5780 // stack pointer as the user finsihed with it. This allows | |
5781 // use to retrieve last_Java_pc from last_Java_sp[-1]. | |
5782 // On 32bit we then have to push additional args on the stack to accomplish | |
5783 // the actual requested call. On 64bit call_VM only can use register args | |
5784 // so the only extra space is the return address that call_VM created. | |
5785 // This hopefully explains the calculations here. | |
5786 | |
5787 #ifdef _LP64 | |
5788 // We've pushed one address, correct last_Java_sp | |
5789 lea(rax, Address(rsp, wordSize)); | |
5790 #else | |
5791 lea(rax, Address(rsp, (1 + number_of_arguments) * wordSize)); | |
5792 #endif // LP64 | |
5793 | |
5794 call_VM_base(oop_result, noreg, rax, entry_point, number_of_arguments, check_exceptions); | |
5795 | |
5796 } | |
5797 | |
5798 void MacroAssembler::call_VM_leaf(address entry_point, int number_of_arguments) { | |
5799 call_VM_leaf_base(entry_point, number_of_arguments); | |
5800 } | |
5801 | |
5802 void MacroAssembler::call_VM_leaf(address entry_point, Register arg_0) { | |
5803 pass_arg0(this, arg_0); | |
5804 call_VM_leaf(entry_point, 1); | |
5805 } | |
5806 | |
5807 void MacroAssembler::call_VM_leaf(address entry_point, Register arg_0, Register arg_1) { | |
5808 | |
5809 LP64_ONLY(assert(arg_0 != c_rarg1, "smashed arg")); | |
5810 pass_arg1(this, arg_1); | |
5811 pass_arg0(this, arg_0); | |
5812 call_VM_leaf(entry_point, 2); | |
5813 } | |
5814 | |
5815 void MacroAssembler::call_VM_leaf(address entry_point, Register arg_0, Register arg_1, Register arg_2) { | |
5816 LP64_ONLY(assert(arg_0 != c_rarg2, "smashed arg")); | |
5817 LP64_ONLY(assert(arg_1 != c_rarg2, "smashed arg")); | |
5818 pass_arg2(this, arg_2); | |
5819 LP64_ONLY(assert(arg_0 != c_rarg1, "smashed arg")); | |
5820 pass_arg1(this, arg_1); | |
5821 pass_arg0(this, arg_0); | |
5822 call_VM_leaf(entry_point, 3); | |
5823 } | |
5824 | |
5825 void MacroAssembler::check_and_handle_earlyret(Register java_thread) { | |
5826 } | |
5827 | |
5828 void MacroAssembler::check_and_handle_popframe(Register java_thread) { | |
5829 } | |
5830 | |
5831 void MacroAssembler::cmp32(AddressLiteral src1, int32_t imm) { | |
5832 if (reachable(src1)) { | |
5833 cmpl(as_Address(src1), imm); | |
5834 } else { | |
5835 lea(rscratch1, src1); | |
5836 cmpl(Address(rscratch1, 0), imm); | |
5837 } | |
5838 } | |
5839 | |
5840 void MacroAssembler::cmp32(Register src1, AddressLiteral src2) { | |
5841 assert(!src2.is_lval(), "use cmpptr"); | |
5842 if (reachable(src2)) { | |
5843 cmpl(src1, as_Address(src2)); | |
5844 } else { | |
5845 lea(rscratch1, src2); | |
5846 cmpl(src1, Address(rscratch1, 0)); | |
5847 } | |
5848 } | |
5849 | |
5850 void MacroAssembler::cmp32(Register src1, int32_t imm) { | |
5851 Assembler::cmpl(src1, imm); | |
5852 } | |
5853 | |
5854 void MacroAssembler::cmp32(Register src1, Address src2) { | |
5855 Assembler::cmpl(src1, src2); | |
5856 } | |
5857 | |
5858 void MacroAssembler::cmpsd2int(XMMRegister opr1, XMMRegister opr2, Register dst, bool unordered_is_less) { | |
5859 ucomisd(opr1, opr2); | |
5860 | |
5861 Label L; | |
5862 if (unordered_is_less) { | |
5863 movl(dst, -1); | |
5864 jcc(Assembler::parity, L); | |
5865 jcc(Assembler::below , L); | |
5866 movl(dst, 0); | |
5867 jcc(Assembler::equal , L); | |
5868 increment(dst); | |
5869 } else { // unordered is greater | |
5870 movl(dst, 1); | |
5871 jcc(Assembler::parity, L); | |
5872 jcc(Assembler::above , L); | |
5873 movl(dst, 0); | |
5874 jcc(Assembler::equal , L); | |
5875 decrementl(dst); | |
5876 } | |
5877 bind(L); | |
5878 } | |
5879 | |
5880 void MacroAssembler::cmpss2int(XMMRegister opr1, XMMRegister opr2, Register dst, bool unordered_is_less) { | |
5881 ucomiss(opr1, opr2); | |
5882 | |
5883 Label L; | |
5884 if (unordered_is_less) { | |
5885 movl(dst, -1); | |
5886 jcc(Assembler::parity, L); | |
5887 jcc(Assembler::below , L); | |
5888 movl(dst, 0); | |
5889 jcc(Assembler::equal , L); | |
5890 increment(dst); | |
5891 } else { // unordered is greater | |
5892 movl(dst, 1); | |
5893 jcc(Assembler::parity, L); | |
5894 jcc(Assembler::above , L); | |
5895 movl(dst, 0); | |
5896 jcc(Assembler::equal , L); | |
5897 decrementl(dst); | |
5898 } | |
5899 bind(L); | |
5900 } | |
5901 | |
5902 | |
5903 void MacroAssembler::cmp8(AddressLiteral src1, int imm) { | |
5904 if (reachable(src1)) { | |
5905 cmpb(as_Address(src1), imm); | |
5906 } else { | |
5907 lea(rscratch1, src1); | |
5908 cmpb(Address(rscratch1, 0), imm); | |
5909 } | |
5910 } | |
5911 | |
5912 void MacroAssembler::cmpptr(Register src1, AddressLiteral src2) { | |
5913 #ifdef _LP64 | |
5914 if (src2.is_lval()) { | |
5915 movptr(rscratch1, src2); | |
5916 Assembler::cmpq(src1, rscratch1); | |
5917 } else if (reachable(src2)) { | |
5918 cmpq(src1, as_Address(src2)); | |
5919 } else { | |
5920 lea(rscratch1, src2); | |
5921 Assembler::cmpq(src1, Address(rscratch1, 0)); | |
5922 } | |
5923 #else | |
5924 if (src2.is_lval()) { | |
5925 cmp_literal32(src1, (int32_t) src2.target(), src2.rspec()); | |
5926 } else { | |
5927 cmpl(src1, as_Address(src2)); | |
5928 } | |
5929 #endif // _LP64 | |
5930 } | |
5931 | |
5932 void MacroAssembler::cmpptr(Address src1, AddressLiteral src2) { | |
5933 assert(src2.is_lval(), "not a mem-mem compare"); | |
5934 #ifdef _LP64 | |
5935 // moves src2's literal address | |
5936 movptr(rscratch1, src2); | |
5937 Assembler::cmpq(src1, rscratch1); | |
5938 #else | |
5939 cmp_literal32(src1, (int32_t) src2.target(), src2.rspec()); | |
5940 #endif // _LP64 | |
5941 } | |
5942 | |
5943 void MacroAssembler::locked_cmpxchgptr(Register reg, AddressLiteral adr) { | |
5944 if (reachable(adr)) { | |
5945 if (os::is_MP()) | |
5946 lock(); | |
5947 cmpxchgptr(reg, as_Address(adr)); | |
5948 } else { | |
5949 lea(rscratch1, adr); | |
5950 if (os::is_MP()) | |
5951 lock(); | |
5952 cmpxchgptr(reg, Address(rscratch1, 0)); | |
5953 } | |
5954 } | |
5955 | |
5956 void MacroAssembler::cmpxchgptr(Register reg, Address adr) { | |
5957 LP64_ONLY(cmpxchgq(reg, adr)) NOT_LP64(cmpxchgl(reg, adr)); | |
5958 } | |
5959 | |
5960 void MacroAssembler::comisd(XMMRegister dst, AddressLiteral src) { | |
5961 comisd(dst, as_Address(src)); | |
5962 } | |
5963 | |
5964 void MacroAssembler::comiss(XMMRegister dst, AddressLiteral src) { | |
5965 comiss(dst, as_Address(src)); | |
5966 } | |
5967 | |
5968 | |
5969 void MacroAssembler::cond_inc32(Condition cond, AddressLiteral counter_addr) { | |
5970 Condition negated_cond = negate_condition(cond); | |
5971 Label L; | |
5972 jcc(negated_cond, L); | |
5973 atomic_incl(counter_addr); | |
5974 bind(L); | |
5975 } | |
5976 | |
5977 int MacroAssembler::corrected_idivl(Register reg) { | |
5978 // Full implementation of Java idiv and irem; checks for | |
5979 // special case as described in JVM spec., p.243 & p.271. | |
5980 // The function returns the (pc) offset of the idivl | |
5981 // instruction - may be needed for implicit exceptions. | |
5982 // | |
5983 // normal case special case | |
5984 // | |
5985 // input : rax,: dividend min_int | |
5986 // reg: divisor (may not be rax,/rdx) -1 | |
5987 // | |
5988 // output: rax,: quotient (= rax, idiv reg) min_int | |
5989 // rdx: remainder (= rax, irem reg) 0 | |
5990 assert(reg != rax && reg != rdx, "reg cannot be rax, or rdx register"); | |
5991 const int min_int = 0x80000000; | |
5992 Label normal_case, special_case; | |
5993 | |
5994 // check for special case | |
5995 cmpl(rax, min_int); | |
5996 jcc(Assembler::notEqual, normal_case); | |
5997 xorl(rdx, rdx); // prepare rdx for possible special case (where remainder = 0) | |
5998 cmpl(reg, -1); | |
5999 jcc(Assembler::equal, special_case); | |
6000 | |
6001 // handle normal case | |
6002 bind(normal_case); | |
6003 cdql(); | |
6004 int idivl_offset = offset(); | |
6005 idivl(reg); | |
6006 | |
6007 // normal and special case exit | |
6008 bind(special_case); | |
6009 | |
6010 return idivl_offset; | |
6011 } | |
6012 | |
6013 | |
6014 | |
6015 void MacroAssembler::decrementl(Register reg, int value) { | |
6016 if (value == min_jint) {subl(reg, value) ; return; } | |
6017 if (value < 0) { incrementl(reg, -value); return; } | |
6018 if (value == 0) { ; return; } | |
6019 if (value == 1 && UseIncDec) { decl(reg) ; return; } | |
6020 /* else */ { subl(reg, value) ; return; } | |
6021 } | |
6022 | |
6023 void MacroAssembler::decrementl(Address dst, int value) { | |
6024 if (value == min_jint) {subl(dst, value) ; return; } | |
6025 if (value < 0) { incrementl(dst, -value); return; } | |
6026 if (value == 0) { ; return; } | |
6027 if (value == 1 && UseIncDec) { decl(dst) ; return; } | |
6028 /* else */ { subl(dst, value) ; return; } | |
6029 } | |
6030 | |
6031 void MacroAssembler::division_with_shift (Register reg, int shift_value) { | |
6032 assert (shift_value > 0, "illegal shift value"); | |
6033 Label _is_positive; | |
6034 testl (reg, reg); | |
6035 jcc (Assembler::positive, _is_positive); | |
6036 int offset = (1 << shift_value) - 1 ; | |
6037 | |
6038 if (offset == 1) { | |
6039 incrementl(reg); | |
6040 } else { | |
6041 addl(reg, offset); | |
6042 } | |
6043 | |
6044 bind (_is_positive); | |
6045 sarl(reg, shift_value); | |
6046 } | |
6047 | |
6048 // !defined(COMPILER2) is because of stupid core builds | |
6049 #if !defined(_LP64) || defined(COMPILER1) || !defined(COMPILER2) | |
6050 void MacroAssembler::empty_FPU_stack() { | |
6051 if (VM_Version::supports_mmx()) { | |
6052 emms(); | |
6053 } else { | |
6054 for (int i = 8; i-- > 0; ) ffree(i); | |
6055 } | |
6056 } | |
6057 #endif // !LP64 || C1 || !C2 | |
6058 | |
6059 | |
6060 // Defines obj, preserves var_size_in_bytes | |
6061 void MacroAssembler::eden_allocate(Register obj, | |
6062 Register var_size_in_bytes, | |
6063 int con_size_in_bytes, | |
6064 Register t1, | |
6065 Label& slow_case) { | |
6066 assert(obj == rax, "obj must be in rax, for cmpxchg"); | |
6067 assert_different_registers(obj, var_size_in_bytes, t1); | |
362 | 6068 if (CMSIncrementalMode || !Universe::heap()->supports_inline_contig_alloc()) { |
6069 jmp(slow_case); | |
304 | 6070 } else { |
362 | 6071 Register end = t1; |
6072 Label retry; | |
6073 bind(retry); | |
6074 ExternalAddress heap_top((address) Universe::heap()->top_addr()); | |
6075 movptr(obj, heap_top); | |
6076 if (var_size_in_bytes == noreg) { | |
6077 lea(end, Address(obj, con_size_in_bytes)); | |
6078 } else { | |
6079 lea(end, Address(obj, var_size_in_bytes, Address::times_1)); | |
6080 } | |
6081 // if end < obj then we wrapped around => object too long => slow case | |
6082 cmpptr(end, obj); | |
6083 jcc(Assembler::below, slow_case); | |
6084 cmpptr(end, ExternalAddress((address) Universe::heap()->end_addr())); | |
6085 jcc(Assembler::above, slow_case); | |
6086 // Compare obj with the top addr, and if still equal, store the new top addr in | |
6087 // end at the address of the top addr pointer. Sets ZF if was equal, and clears | |
6088 // it otherwise. Use lock prefix for atomicity on MPs. | |
6089 locked_cmpxchgptr(end, heap_top); | |
6090 jcc(Assembler::notEqual, retry); | |
6091 } | |
304 | 6092 } |
6093 | |
6094 void MacroAssembler::enter() { | |
6095 push(rbp); | |
6096 mov(rbp, rsp); | |
6097 } | |
0 | 6098 |
6099 void MacroAssembler::fcmp(Register tmp) { | |
6100 fcmp(tmp, 1, true, true); | |
6101 } | |
6102 | |
6103 void MacroAssembler::fcmp(Register tmp, int index, bool pop_left, bool pop_right) { | |
6104 assert(!pop_right || pop_left, "usage error"); | |
6105 if (VM_Version::supports_cmov()) { | |
6106 assert(tmp == noreg, "unneeded temp"); | |
6107 if (pop_left) { | |
6108 fucomip(index); | |
6109 } else { | |
6110 fucomi(index); | |
6111 } | |
6112 if (pop_right) { | |
6113 fpop(); | |
6114 } | |
6115 } else { | |
6116 assert(tmp != noreg, "need temp"); | |
6117 if (pop_left) { | |
6118 if (pop_right) { | |
6119 fcompp(); | |
6120 } else { | |
6121 fcomp(index); | |
6122 } | |
6123 } else { | |
6124 fcom(index); | |
6125 } | |
6126 // convert FPU condition into eflags condition via rax, | |
6127 save_rax(tmp); | |
6128 fwait(); fnstsw_ax(); | |
6129 sahf(); | |
6130 restore_rax(tmp); | |
6131 } | |
6132 // condition codes set as follows: | |
6133 // | |
6134 // CF (corresponds to C0) if x < y | |
6135 // PF (corresponds to C2) if unordered | |
6136 // ZF (corresponds to C3) if x = y | |
6137 } | |
6138 | |
6139 void MacroAssembler::fcmp2int(Register dst, bool unordered_is_less) { | |
6140 fcmp2int(dst, unordered_is_less, 1, true, true); | |
6141 } | |
6142 | |
6143 void MacroAssembler::fcmp2int(Register dst, bool unordered_is_less, int index, bool pop_left, bool pop_right) { | |
6144 fcmp(VM_Version::supports_cmov() ? noreg : dst, index, pop_left, pop_right); | |
6145 Label L; | |
6146 if (unordered_is_less) { | |
6147 movl(dst, -1); | |
6148 jcc(Assembler::parity, L); | |
6149 jcc(Assembler::below , L); | |
6150 movl(dst, 0); | |
6151 jcc(Assembler::equal , L); | |
6152 increment(dst); | |
6153 } else { // unordered is greater | |
6154 movl(dst, 1); | |
6155 jcc(Assembler::parity, L); | |
6156 jcc(Assembler::above , L); | |
6157 movl(dst, 0); | |
6158 jcc(Assembler::equal , L); | |
304 | 6159 decrementl(dst); |
0 | 6160 } |
6161 bind(L); | |
6162 } | |
6163 | |
304 | 6164 void MacroAssembler::fld_d(AddressLiteral src) { |
6165 fld_d(as_Address(src)); | |
6166 } | |
6167 | |
6168 void MacroAssembler::fld_s(AddressLiteral src) { | |
6169 fld_s(as_Address(src)); | |
6170 } | |
6171 | |
6172 void MacroAssembler::fld_x(AddressLiteral src) { | |
6173 Assembler::fld_x(as_Address(src)); | |
6174 } | |
6175 | |
6176 void MacroAssembler::fldcw(AddressLiteral src) { | |
6177 Assembler::fldcw(as_Address(src)); | |
6178 } | |
0 | 6179 |
6180 void MacroAssembler::fpop() { | |
6181 ffree(); | |
6182 fincstp(); | |
6183 } | |
6184 | |
304 | 6185 void MacroAssembler::fremr(Register tmp) { |
6186 save_rax(tmp); | |
6187 { Label L; | |
6188 bind(L); | |
6189 fprem(); | |
6190 fwait(); fnstsw_ax(); | |
6191 #ifdef _LP64 | |
6192 testl(rax, 0x400); | |
6193 jcc(Assembler::notEqual, L); | |
6194 #else | |
6195 sahf(); | |
6196 jcc(Assembler::parity, L); | |
6197 #endif // _LP64 | |
6198 } | |
6199 restore_rax(tmp); | |
6200 // Result is in ST0. | |
6201 // Note: fxch & fpop to get rid of ST1 | |
6202 // (otherwise FPU stack could overflow eventually) | |
6203 fxch(1); | |
6204 fpop(); | |
6205 } | |
6206 | |
6207 | |
6208 void MacroAssembler::incrementl(AddressLiteral dst) { | |
6209 if (reachable(dst)) { | |
6210 incrementl(as_Address(dst)); | |
0 | 6211 } else { |
304 | 6212 lea(rscratch1, dst); |
6213 incrementl(Address(rscratch1, 0)); | |
6214 } | |
6215 } | |
6216 | |
6217 void MacroAssembler::incrementl(ArrayAddress dst) { | |
6218 incrementl(as_Address(dst)); | |
6219 } | |
6220 | |
6221 void MacroAssembler::incrementl(Register reg, int value) { | |
6222 if (value == min_jint) {addl(reg, value) ; return; } | |
6223 if (value < 0) { decrementl(reg, -value); return; } | |
6224 if (value == 0) { ; return; } | |
6225 if (value == 1 && UseIncDec) { incl(reg) ; return; } | |
6226 /* else */ { addl(reg, value) ; return; } | |
6227 } | |
6228 | |
6229 void MacroAssembler::incrementl(Address dst, int value) { | |
6230 if (value == min_jint) {addl(dst, value) ; return; } | |
6231 if (value < 0) { decrementl(dst, -value); return; } | |
6232 if (value == 0) { ; return; } | |
6233 if (value == 1 && UseIncDec) { incl(dst) ; return; } | |
6234 /* else */ { addl(dst, value) ; return; } | |
6235 } | |
6236 | |
6237 void MacroAssembler::jump(AddressLiteral dst) { | |
6238 if (reachable(dst)) { | |
6239 jmp_literal(dst.target(), dst.rspec()); | |
6240 } else { | |
6241 lea(rscratch1, dst); | |
6242 jmp(rscratch1); | |
6243 } | |
6244 } | |
6245 | |
6246 void MacroAssembler::jump_cc(Condition cc, AddressLiteral dst) { | |
6247 if (reachable(dst)) { | |
6248 InstructionMark im(this); | |
6249 relocate(dst.reloc()); | |
6250 const int short_size = 2; | |
6251 const int long_size = 6; | |
6252 int offs = (intptr_t)dst.target() - ((intptr_t)_code_pos); | |
6253 if (dst.reloc() == relocInfo::none && is8bit(offs - short_size)) { | |
6254 // 0111 tttn #8-bit disp | |
6255 emit_byte(0x70 | cc); | |
6256 emit_byte((offs - short_size) & 0xFF); | |
6257 } else { | |
6258 // 0000 1111 1000 tttn #32-bit disp | |
6259 emit_byte(0x0F); | |
6260 emit_byte(0x80 | cc); | |
6261 emit_long(offs - long_size); | |
6262 } | |
0 | 6263 } else { |
304 | 6264 #ifdef ASSERT |
6265 warning("reversing conditional branch"); | |
6266 #endif /* ASSERT */ | |
6267 Label skip; | |
6268 jccb(reverse[cc], skip); | |
6269 lea(rscratch1, dst); | |
6270 Assembler::jmp(rscratch1); | |
6271 bind(skip); | |
6272 } | |
6273 } | |
6274 | |
6275 void MacroAssembler::ldmxcsr(AddressLiteral src) { | |
6276 if (reachable(src)) { | |
6277 Assembler::ldmxcsr(as_Address(src)); | |
6278 } else { | |
6279 lea(rscratch1, src); | |
6280 Assembler::ldmxcsr(Address(rscratch1, 0)); | |
6281 } | |
6282 } | |
6283 | |
6284 int MacroAssembler::load_signed_byte(Register dst, Address src) { | |
6285 int off; | |
6286 if (LP64_ONLY(true ||) VM_Version::is_P6()) { | |
6287 off = offset(); | |
6288 movsbl(dst, src); // movsxb | |
6289 } else { | |
6290 off = load_unsigned_byte(dst, src); | |
6291 shll(dst, 24); | |
6292 sarl(dst, 24); | |
6293 } | |
6294 return off; | |
6295 } | |
6296 | |
622
56aae7be60d4
6812678: macro assembler needs delayed binding of a few constants (for 6655638)
jrose
parents:
606
diff
changeset
|
6297 // Note: load_signed_short used to be called load_signed_word. |
56aae7be60d4
6812678: macro assembler needs delayed binding of a few constants (for 6655638)
jrose
parents:
606
diff
changeset
|
6298 // Although the 'w' in x86 opcodes refers to the term "word" in the assembler |
56aae7be60d4
6812678: macro assembler needs delayed binding of a few constants (for 6655638)
jrose
parents:
606
diff
changeset
|
6299 // manual, which means 16 bits, that usage is found nowhere in HotSpot code. |
56aae7be60d4
6812678: macro assembler needs delayed binding of a few constants (for 6655638)
jrose
parents:
606
diff
changeset
|
6300 // The term "word" in HotSpot means a 32- or 64-bit machine word. |
56aae7be60d4
6812678: macro assembler needs delayed binding of a few constants (for 6655638)
jrose
parents:
606
diff
changeset
|
6301 int MacroAssembler::load_signed_short(Register dst, Address src) { |
304 | 6302 int off; |
6303 if (LP64_ONLY(true ||) VM_Version::is_P6()) { | |
6304 // This is dubious to me since it seems safe to do a signed 16 => 64 bit | |
6305 // version but this is what 64bit has always done. This seems to imply | |
6306 // that users are only using 32bits worth. | |
6307 off = offset(); | |
6308 movswl(dst, src); // movsxw | |
6309 } else { | |
622
56aae7be60d4
6812678: macro assembler needs delayed binding of a few constants (for 6655638)
jrose
parents:
606
diff
changeset
|
6310 off = load_unsigned_short(dst, src); |
304 | 6311 shll(dst, 16); |
6312 sarl(dst, 16); | |
6313 } | |
6314 return off; | |
6315 } | |
6316 | |
6317 int MacroAssembler::load_unsigned_byte(Register dst, Address src) { | |
6318 // According to Intel Doc. AP-526, "Zero-Extension of Short", p.16, | |
6319 // and "3.9 Partial Register Penalties", p. 22). | |
6320 int off; | |
6321 if (LP64_ONLY(true || ) VM_Version::is_P6() || src.uses(dst)) { | |
6322 off = offset(); | |
6323 movzbl(dst, src); // movzxb | |
6324 } else { | |
6325 xorl(dst, dst); | |
6326 off = offset(); | |
6327 movb(dst, src); | |
6328 } | |
6329 return off; | |
6330 } | |
6331 | |
622
56aae7be60d4
6812678: macro assembler needs delayed binding of a few constants (for 6655638)
jrose
parents:
606
diff
changeset
|
6332 // Note: load_unsigned_short used to be called load_unsigned_word. |
56aae7be60d4
6812678: macro assembler needs delayed binding of a few constants (for 6655638)
jrose
parents:
606
diff
changeset
|
6333 int MacroAssembler::load_unsigned_short(Register dst, Address src) { |
304 | 6334 // According to Intel Doc. AP-526, "Zero-Extension of Short", p.16, |
6335 // and "3.9 Partial Register Penalties", p. 22). | |
6336 int off; | |
6337 if (LP64_ONLY(true ||) VM_Version::is_P6() || src.uses(dst)) { | |
6338 off = offset(); | |
6339 movzwl(dst, src); // movzxw | |
6340 } else { | |
6341 xorl(dst, dst); | |
6342 off = offset(); | |
6343 movw(dst, src); | |
6344 } | |
6345 return off; | |
6346 } | |
6347 | |
622
56aae7be60d4
6812678: macro assembler needs delayed binding of a few constants (for 6655638)
jrose
parents:
606
diff
changeset
|
6348 void MacroAssembler::load_sized_value(Register dst, Address src, |
56aae7be60d4
6812678: macro assembler needs delayed binding of a few constants (for 6655638)
jrose
parents:
606
diff
changeset
|
6349 int size_in_bytes, bool is_signed) { |
56aae7be60d4
6812678: macro assembler needs delayed binding of a few constants (for 6655638)
jrose
parents:
606
diff
changeset
|
6350 switch (size_in_bytes ^ (is_signed ? -1 : 0)) { |
56aae7be60d4
6812678: macro assembler needs delayed binding of a few constants (for 6655638)
jrose
parents:
606
diff
changeset
|
6351 #ifndef _LP64 |
56aae7be60d4
6812678: macro assembler needs delayed binding of a few constants (for 6655638)
jrose
parents:
606
diff
changeset
|
6352 // For case 8, caller is responsible for manually loading |
56aae7be60d4
6812678: macro assembler needs delayed binding of a few constants (for 6655638)
jrose
parents:
606
diff
changeset
|
6353 // the second word into another register. |
56aae7be60d4
6812678: macro assembler needs delayed binding of a few constants (for 6655638)
jrose
parents:
606
diff
changeset
|
6354 case ~8: // fall through: |
56aae7be60d4
6812678: macro assembler needs delayed binding of a few constants (for 6655638)
jrose
parents:
606
diff
changeset
|
6355 case 8: movl( dst, src ); break; |
56aae7be60d4
6812678: macro assembler needs delayed binding of a few constants (for 6655638)
jrose
parents:
606
diff
changeset
|
6356 #else |
56aae7be60d4
6812678: macro assembler needs delayed binding of a few constants (for 6655638)
jrose
parents:
606
diff
changeset
|
6357 case ~8: // fall through: |
56aae7be60d4
6812678: macro assembler needs delayed binding of a few constants (for 6655638)
jrose
parents:
606
diff
changeset
|
6358 case 8: movq( dst, src ); break; |
56aae7be60d4
6812678: macro assembler needs delayed binding of a few constants (for 6655638)
jrose
parents:
606
diff
changeset
|
6359 #endif |
56aae7be60d4
6812678: macro assembler needs delayed binding of a few constants (for 6655638)
jrose
parents:
606
diff
changeset
|
6360 case ~4: // fall through: |
56aae7be60d4
6812678: macro assembler needs delayed binding of a few constants (for 6655638)
jrose
parents:
606
diff
changeset
|
6361 case 4: movl( dst, src ); break; |
56aae7be60d4
6812678: macro assembler needs delayed binding of a few constants (for 6655638)
jrose
parents:
606
diff
changeset
|
6362 case ~2: load_signed_short( dst, src ); break; |
56aae7be60d4
6812678: macro assembler needs delayed binding of a few constants (for 6655638)
jrose
parents:
606
diff
changeset
|
6363 case 2: load_unsigned_short( dst, src ); break; |
56aae7be60d4
6812678: macro assembler needs delayed binding of a few constants (for 6655638)
jrose
parents:
606
diff
changeset
|
6364 case ~1: load_signed_byte( dst, src ); break; |
56aae7be60d4
6812678: macro assembler needs delayed binding of a few constants (for 6655638)
jrose
parents:
606
diff
changeset
|
6365 case 1: load_unsigned_byte( dst, src ); break; |
56aae7be60d4
6812678: macro assembler needs delayed binding of a few constants (for 6655638)
jrose
parents:
606
diff
changeset
|
6366 default: ShouldNotReachHere(); |
56aae7be60d4
6812678: macro assembler needs delayed binding of a few constants (for 6655638)
jrose
parents:
606
diff
changeset
|
6367 } |
56aae7be60d4
6812678: macro assembler needs delayed binding of a few constants (for 6655638)
jrose
parents:
606
diff
changeset
|
6368 } |
56aae7be60d4
6812678: macro assembler needs delayed binding of a few constants (for 6655638)
jrose
parents:
606
diff
changeset
|
6369 |
304 | 6370 void MacroAssembler::mov32(AddressLiteral dst, Register src) { |
6371 if (reachable(dst)) { | |
6372 movl(as_Address(dst), src); | |
6373 } else { | |
6374 lea(rscratch1, dst); | |
6375 movl(Address(rscratch1, 0), src); | |
6376 } | |
6377 } | |
6378 | |
6379 void MacroAssembler::mov32(Register dst, AddressLiteral src) { | |
6380 if (reachable(src)) { | |
6381 movl(dst, as_Address(src)); | |
6382 } else { | |
6383 lea(rscratch1, src); | |
6384 movl(dst, Address(rscratch1, 0)); | |
6385 } | |
0 | 6386 } |
6387 | |
6388 // C++ bool manipulation | |
6389 | |
6390 void MacroAssembler::movbool(Register dst, Address src) { | |
6391 if(sizeof(bool) == 1) | |
6392 movb(dst, src); | |
6393 else if(sizeof(bool) == 2) | |
6394 movw(dst, src); | |
6395 else if(sizeof(bool) == 4) | |
6396 movl(dst, src); | |
6397 else | |
6398 // unsupported | |
6399 ShouldNotReachHere(); | |
6400 } | |
6401 | |
6402 void MacroAssembler::movbool(Address dst, bool boolconst) { | |
6403 if(sizeof(bool) == 1) | |
6404 movb(dst, (int) boolconst); | |
6405 else if(sizeof(bool) == 2) | |
6406 movw(dst, (int) boolconst); | |
6407 else if(sizeof(bool) == 4) | |
6408 movl(dst, (int) boolconst); | |
6409 else | |
6410 // unsupported | |
6411 ShouldNotReachHere(); | |
6412 } | |
6413 | |
6414 void MacroAssembler::movbool(Address dst, Register src) { | |
6415 if(sizeof(bool) == 1) | |
6416 movb(dst, src); | |
6417 else if(sizeof(bool) == 2) | |
6418 movw(dst, src); | |
6419 else if(sizeof(bool) == 4) | |
6420 movl(dst, src); | |
6421 else | |
6422 // unsupported | |
6423 ShouldNotReachHere(); | |
6424 } | |
6425 | |
304 | 6426 void MacroAssembler::movbyte(ArrayAddress dst, int src) { |
6427 movb(as_Address(dst), src); | |
6428 } | |
6429 | |
6430 void MacroAssembler::movdbl(XMMRegister dst, AddressLiteral src) { | |
6431 if (reachable(src)) { | |
6432 if (UseXmmLoadAndClearUpper) { | |
6433 movsd (dst, as_Address(src)); | |
6434 } else { | |
6435 movlpd(dst, as_Address(src)); | |
6436 } | |
6437 } else { | |
6438 lea(rscratch1, src); | |
6439 if (UseXmmLoadAndClearUpper) { | |
6440 movsd (dst, Address(rscratch1, 0)); | |
6441 } else { | |
6442 movlpd(dst, Address(rscratch1, 0)); | |
6443 } | |
6444 } | |
6445 } | |
6446 | |
6447 void MacroAssembler::movflt(XMMRegister dst, AddressLiteral src) { | |
6448 if (reachable(src)) { | |
6449 movss(dst, as_Address(src)); | |
6450 } else { | |
6451 lea(rscratch1, src); | |
6452 movss(dst, Address(rscratch1, 0)); | |
6453 } | |
6454 } | |
6455 | |
6456 void MacroAssembler::movptr(Register dst, Register src) { | |
6457 LP64_ONLY(movq(dst, src)) NOT_LP64(movl(dst, src)); | |
6458 } | |
6459 | |
6460 void MacroAssembler::movptr(Register dst, Address src) { | |
6461 LP64_ONLY(movq(dst, src)) NOT_LP64(movl(dst, src)); | |
6462 } | |
6463 | |
6464 // src should NEVER be a real pointer. Use AddressLiteral for true pointers | |
6465 void MacroAssembler::movptr(Register dst, intptr_t src) { | |
6466 LP64_ONLY(mov64(dst, src)) NOT_LP64(movl(dst, src)); | |
6467 } | |
6468 | |
6469 void MacroAssembler::movptr(Address dst, Register src) { | |
6470 LP64_ONLY(movq(dst, src)) NOT_LP64(movl(dst, src)); | |
6471 } | |
6472 | |
6473 void MacroAssembler::movss(XMMRegister dst, AddressLiteral src) { | |
6474 if (reachable(src)) { | |
6475 movss(dst, as_Address(src)); | |
6476 } else { | |
6477 lea(rscratch1, src); | |
6478 movss(dst, Address(rscratch1, 0)); | |
6479 } | |
6480 } | |
6481 | |
6482 void MacroAssembler::null_check(Register reg, int offset) { | |
6483 if (needs_explicit_null_check(offset)) { | |
6484 // provoke OS NULL exception if reg = NULL by | |
6485 // accessing M[reg] w/o changing any (non-CC) registers | |
6486 // NOTE: cmpl is plenty here to provoke a segv | |
6487 cmpptr(rax, Address(reg, 0)); | |
6488 // Note: should probably use testl(rax, Address(reg, 0)); | |
6489 // may be shorter code (however, this version of | |
6490 // testl needs to be implemented first) | |
6491 } else { | |
6492 // nothing to do, (later) access of M[reg + offset] | |
6493 // will provoke OS NULL exception if reg = NULL | |
6494 } | |
6495 } | |
6496 | |
6497 void MacroAssembler::os_breakpoint() { | |
6498 // instead of directly emitting a breakpoint, call os:breakpoint for better debugability | |
6499 // (e.g., MSVC can't call ps() otherwise) | |
6500 call(RuntimeAddress(CAST_FROM_FN_PTR(address, os::breakpoint))); | |
6501 } | |
6502 | |
6503 void MacroAssembler::pop_CPU_state() { | |
6504 pop_FPU_state(); | |
6505 pop_IU_state(); | |
6506 } | |
6507 | |
6508 void MacroAssembler::pop_FPU_state() { | |
6509 NOT_LP64(frstor(Address(rsp, 0));) | |
6510 LP64_ONLY(fxrstor(Address(rsp, 0));) | |
6511 addptr(rsp, FPUStateSizeInWords * wordSize); | |
6512 } | |
6513 | |
6514 void MacroAssembler::pop_IU_state() { | |
6515 popa(); | |
6516 LP64_ONLY(addq(rsp, 8)); | |
6517 popf(); | |
6518 } | |
6519 | |
6520 // Save Integer and Float state | |
6521 // Warning: Stack must be 16 byte aligned (64bit) | |
6522 void MacroAssembler::push_CPU_state() { | |
6523 push_IU_state(); | |
6524 push_FPU_state(); | |
6525 } | |
6526 | |
6527 void MacroAssembler::push_FPU_state() { | |
6528 subptr(rsp, FPUStateSizeInWords * wordSize); | |
6529 #ifndef _LP64 | |
6530 fnsave(Address(rsp, 0)); | |
6531 fwait(); | |
6532 #else | |
6533 fxsave(Address(rsp, 0)); | |
6534 #endif // LP64 | |
6535 } | |
6536 | |
6537 void MacroAssembler::push_IU_state() { | |
6538 // Push flags first because pusha kills them | |
6539 pushf(); | |
6540 // Make sure rsp stays 16-byte aligned | |
6541 LP64_ONLY(subq(rsp, 8)); | |
6542 pusha(); | |
6543 } | |
6544 | |
6545 void MacroAssembler::reset_last_Java_frame(Register java_thread, bool clear_fp, bool clear_pc) { | |
6546 // determine java_thread register | |
6547 if (!java_thread->is_valid()) { | |
6548 java_thread = rdi; | |
6549 get_thread(java_thread); | |
6550 } | |
6551 // we must set sp to zero to clear frame | |
512
db4caa99ef11
6787106: Hotspot 32 bit build fails on platforms having different definitions for intptr_t & int32_t
xlu
parents:
420
diff
changeset
|
6552 movptr(Address(java_thread, JavaThread::last_Java_sp_offset()), NULL_WORD); |
304 | 6553 if (clear_fp) { |
512
db4caa99ef11
6787106: Hotspot 32 bit build fails on platforms having different definitions for intptr_t & int32_t
xlu
parents:
420
diff
changeset
|
6554 movptr(Address(java_thread, JavaThread::last_Java_fp_offset()), NULL_WORD); |
304 | 6555 } |
6556 | |
6557 if (clear_pc) | |
512
db4caa99ef11
6787106: Hotspot 32 bit build fails on platforms having different definitions for intptr_t & int32_t
xlu
parents:
420
diff
changeset
|
6558 movptr(Address(java_thread, JavaThread::last_Java_pc_offset()), NULL_WORD); |
304 | 6559 |
6560 } | |
6561 | |
6562 void MacroAssembler::restore_rax(Register tmp) { | |
6563 if (tmp == noreg) pop(rax); | |
6564 else if (tmp != rax) mov(rax, tmp); | |
6565 } | |
6566 | |
6567 void MacroAssembler::round_to(Register reg, int modulus) { | |
6568 addptr(reg, modulus - 1); | |
6569 andptr(reg, -modulus); | |
6570 } | |
6571 | |
6572 void MacroAssembler::save_rax(Register tmp) { | |
6573 if (tmp == noreg) push(rax); | |
6574 else if (tmp != rax) mov(tmp, rax); | |
6575 } | |
6576 | |
6577 // Write serialization page so VM thread can do a pseudo remote membar. | |
6578 // We use the current thread pointer to calculate a thread specific | |
6579 // offset to write to within the page. This minimizes bus traffic | |
6580 // due to cache line collision. | |
6581 void MacroAssembler::serialize_memory(Register thread, Register tmp) { | |
6582 movl(tmp, thread); | |
6583 shrl(tmp, os::get_serialize_page_shift_count()); | |
6584 andl(tmp, (os::vm_page_size() - sizeof(int))); | |
6585 | |
6586 Address index(noreg, tmp, Address::times_1); | |
6587 ExternalAddress page(os::get_memory_serialize_page()); | |
6588 | |
606
19962e74284f
6811384: MacroAssembler::serialize_memory may touch next page on amd64
never
parents:
520
diff
changeset
|
6589 // Size of store must match masking code above |
19962e74284f
6811384: MacroAssembler::serialize_memory may touch next page on amd64
never
parents:
520
diff
changeset
|
6590 movl(as_Address(ArrayAddress(page, index)), tmp); |
304 | 6591 } |
6592 | |
6593 // Calls to C land | |
6594 // | |
6595 // When entering C land, the rbp, & rsp of the last Java frame have to be recorded | |
6596 // in the (thread-local) JavaThread object. When leaving C land, the last Java fp | |
6597 // has to be reset to 0. This is required to allow proper stack traversal. | |
6598 void MacroAssembler::set_last_Java_frame(Register java_thread, | |
6599 Register last_java_sp, | |
6600 Register last_java_fp, | |
6601 address last_java_pc) { | |
6602 // determine java_thread register | |
6603 if (!java_thread->is_valid()) { | |
6604 java_thread = rdi; | |
6605 get_thread(java_thread); | |
6606 } | |
6607 // determine last_java_sp register | |
6608 if (!last_java_sp->is_valid()) { | |
6609 last_java_sp = rsp; | |
6610 } | |
6611 | |
6612 // last_java_fp is optional | |
6613 | |
6614 if (last_java_fp->is_valid()) { | |
6615 movptr(Address(java_thread, JavaThread::last_Java_fp_offset()), last_java_fp); | |
6616 } | |
6617 | |
6618 // last_java_pc is optional | |
6619 | |
6620 if (last_java_pc != NULL) { | |
6621 lea(Address(java_thread, | |
6622 JavaThread::frame_anchor_offset() + JavaFrameAnchor::last_Java_pc_offset()), | |
6623 InternalAddress(last_java_pc)); | |
6624 | |
6625 } | |
6626 movptr(Address(java_thread, JavaThread::last_Java_sp_offset()), last_java_sp); | |
6627 } | |
6628 | |
6629 void MacroAssembler::shlptr(Register dst, int imm8) { | |
6630 LP64_ONLY(shlq(dst, imm8)) NOT_LP64(shll(dst, imm8)); | |
6631 } | |
6632 | |
6633 void MacroAssembler::shrptr(Register dst, int imm8) { | |
6634 LP64_ONLY(shrq(dst, imm8)) NOT_LP64(shrl(dst, imm8)); | |
6635 } | |
6636 | |
6637 void MacroAssembler::sign_extend_byte(Register reg) { | |
6638 if (LP64_ONLY(true ||) (VM_Version::is_P6() && reg->has_byte_register())) { | |
6639 movsbl(reg, reg); // movsxb | |
6640 } else { | |
6641 shll(reg, 24); | |
6642 sarl(reg, 24); | |
6643 } | |
6644 } | |
6645 | |
6646 void MacroAssembler::sign_extend_short(Register reg) { | |
6647 if (LP64_ONLY(true ||) VM_Version::is_P6()) { | |
6648 movswl(reg, reg); // movsxw | |
6649 } else { | |
6650 shll(reg, 16); | |
6651 sarl(reg, 16); | |
6652 } | |
6653 } | |
6654 | |
362 | 6655 ////////////////////////////////////////////////////////////////////////////////// |
6656 #ifndef SERIALGC | |
6657 | |
6658 void MacroAssembler::g1_write_barrier_pre(Register obj, | |
6659 #ifndef _LP64 | |
6660 Register thread, | |
6661 #endif | |
6662 Register tmp, | |
6663 Register tmp2, | |
6664 bool tosca_live) { | |
6665 LP64_ONLY(Register thread = r15_thread;) | |
6666 Address in_progress(thread, in_bytes(JavaThread::satb_mark_queue_offset() + | |
6667 PtrQueue::byte_offset_of_active())); | |
6668 | |
6669 Address index(thread, in_bytes(JavaThread::satb_mark_queue_offset() + | |
6670 PtrQueue::byte_offset_of_index())); | |
6671 Address buffer(thread, in_bytes(JavaThread::satb_mark_queue_offset() + | |
6672 PtrQueue::byte_offset_of_buf())); | |
6673 | |
6674 | |
6675 Label done; | |
6676 Label runtime; | |
6677 | |
6678 // if (!marking_in_progress) goto done; | |
6679 if (in_bytes(PtrQueue::byte_width_of_active()) == 4) { | |
6680 cmpl(in_progress, 0); | |
6681 } else { | |
6682 assert(in_bytes(PtrQueue::byte_width_of_active()) == 1, "Assumption"); | |
6683 cmpb(in_progress, 0); | |
6684 } | |
6685 jcc(Assembler::equal, done); | |
6686 | |
6687 // if (x.f == NULL) goto done; | |
6688 cmpptr(Address(obj, 0), NULL_WORD); | |
6689 jcc(Assembler::equal, done); | |
6690 | |
6691 // Can we store original value in the thread's buffer? | |
6692 | |
6693 LP64_ONLY(movslq(tmp, index);) | |
6694 movptr(tmp2, Address(obj, 0)); | |
6695 #ifdef _LP64 | |
6696 cmpq(tmp, 0); | |
6697 #else | |
6698 cmpl(index, 0); | |
6699 #endif | |
6700 jcc(Assembler::equal, runtime); | |
6701 #ifdef _LP64 | |
6702 subq(tmp, wordSize); | |
6703 movl(index, tmp); | |
6704 addq(tmp, buffer); | |
6705 #else | |
6706 subl(index, wordSize); | |
6707 movl(tmp, buffer); | |
6708 addl(tmp, index); | |
6709 #endif | |
6710 movptr(Address(tmp, 0), tmp2); | |
6711 jmp(done); | |
6712 bind(runtime); | |
6713 // save the live input values | |
6714 if(tosca_live) push(rax); | |
6715 push(obj); | |
6716 #ifdef _LP64 | |
6717 movq(c_rarg0, Address(obj, 0)); | |
6718 call_VM_leaf(CAST_FROM_FN_PTR(address, SharedRuntime::g1_wb_pre), c_rarg0, r15_thread); | |
6719 #else | |
6720 push(thread); | |
6721 call_VM_leaf(CAST_FROM_FN_PTR(address, SharedRuntime::g1_wb_pre), tmp2, thread); | |
6722 pop(thread); | |
6723 #endif | |
6724 pop(obj); | |
6725 if(tosca_live) pop(rax); | |
6726 bind(done); | |
6727 | |
6728 } | |
6729 | |
6730 void MacroAssembler::g1_write_barrier_post(Register store_addr, | |
6731 Register new_val, | |
6732 #ifndef _LP64 | |
6733 Register thread, | |
6734 #endif | |
6735 Register tmp, | |
6736 Register tmp2) { | |
6737 | |
6738 LP64_ONLY(Register thread = r15_thread;) | |
6739 Address queue_index(thread, in_bytes(JavaThread::dirty_card_queue_offset() + | |
6740 PtrQueue::byte_offset_of_index())); | |
6741 Address buffer(thread, in_bytes(JavaThread::dirty_card_queue_offset() + | |
6742 PtrQueue::byte_offset_of_buf())); | |
6743 BarrierSet* bs = Universe::heap()->barrier_set(); | |
6744 CardTableModRefBS* ct = (CardTableModRefBS*)bs; | |
6745 Label done; | |
6746 Label runtime; | |
6747 | |
6748 // Does store cross heap regions? | |
6749 | |
6750 movptr(tmp, store_addr); | |
6751 xorptr(tmp, new_val); | |
6752 shrptr(tmp, HeapRegion::LogOfHRGrainBytes); | |
6753 jcc(Assembler::equal, done); | |
6754 | |
6755 // crosses regions, storing NULL? | |
6756 | |
6757 cmpptr(new_val, (int32_t) NULL_WORD); | |
6758 jcc(Assembler::equal, done); | |
6759 | |
6760 // storing region crossing non-NULL, is card already dirty? | |
6761 | |
6762 ExternalAddress cardtable((address) ct->byte_map_base); | |
6763 assert(sizeof(*ct->byte_map_base) == sizeof(jbyte), "adjust this code"); | |
6764 #ifdef _LP64 | |
6765 const Register card_addr = tmp; | |
6766 | |
6767 movq(card_addr, store_addr); | |
6768 shrq(card_addr, CardTableModRefBS::card_shift); | |
6769 | |
6770 lea(tmp2, cardtable); | |
6771 | |
6772 // get the address of the card | |
6773 addq(card_addr, tmp2); | |
6774 #else | |
6775 const Register card_index = tmp; | |
6776 | |
6777 movl(card_index, store_addr); | |
6778 shrl(card_index, CardTableModRefBS::card_shift); | |
6779 | |
6780 Address index(noreg, card_index, Address::times_1); | |
6781 const Register card_addr = tmp; | |
6782 lea(card_addr, as_Address(ArrayAddress(cardtable, index))); | |
6783 #endif | |
6784 cmpb(Address(card_addr, 0), 0); | |
6785 jcc(Assembler::equal, done); | |
6786 | |
6787 // storing a region crossing, non-NULL oop, card is clean. | |
6788 // dirty card and log. | |
6789 | |
6790 movb(Address(card_addr, 0), 0); | |
6791 | |
6792 cmpl(queue_index, 0); | |
6793 jcc(Assembler::equal, runtime); | |
6794 subl(queue_index, wordSize); | |
6795 movptr(tmp2, buffer); | |
6796 #ifdef _LP64 | |
6797 movslq(rscratch1, queue_index); | |
6798 addq(tmp2, rscratch1); | |
6799 movq(Address(tmp2, 0), card_addr); | |
6800 #else | |
6801 addl(tmp2, queue_index); | |
6802 movl(Address(tmp2, 0), card_index); | |
6803 #endif | |
6804 jmp(done); | |
6805 | |
6806 bind(runtime); | |
6807 // save the live input values | |
6808 push(store_addr); | |
6809 push(new_val); | |
6810 #ifdef _LP64 | |
6811 call_VM_leaf(CAST_FROM_FN_PTR(address, SharedRuntime::g1_wb_post), card_addr, r15_thread); | |
6812 #else | |
6813 push(thread); | |
6814 call_VM_leaf(CAST_FROM_FN_PTR(address, SharedRuntime::g1_wb_post), card_addr, thread); | |
6815 pop(thread); | |
6816 #endif | |
6817 pop(new_val); | |
6818 pop(store_addr); | |
6819 | |
6820 bind(done); | |
6821 | |
6822 } | |
6823 | |
6824 #endif // SERIALGC | |
6825 ////////////////////////////////////////////////////////////////////////////////// | |
6826 | |
6827 | |
304 | 6828 void MacroAssembler::store_check(Register obj) { |
6829 // Does a store check for the oop in register obj. The content of | |
6830 // register obj is destroyed afterwards. | |
6831 store_check_part_1(obj); | |
6832 store_check_part_2(obj); | |
6833 } | |
6834 | |
6835 void MacroAssembler::store_check(Register obj, Address dst) { | |
6836 store_check(obj); | |
6837 } | |
6838 | |
6839 | |
6840 // split the store check operation so that other instructions can be scheduled inbetween | |
6841 void MacroAssembler::store_check_part_1(Register obj) { | |
6842 BarrierSet* bs = Universe::heap()->barrier_set(); | |
6843 assert(bs->kind() == BarrierSet::CardTableModRef, "Wrong barrier set kind"); | |
6844 shrptr(obj, CardTableModRefBS::card_shift); | |
6845 } | |
6846 | |
6847 void MacroAssembler::store_check_part_2(Register obj) { | |
6848 BarrierSet* bs = Universe::heap()->barrier_set(); | |
6849 assert(bs->kind() == BarrierSet::CardTableModRef, "Wrong barrier set kind"); | |
6850 CardTableModRefBS* ct = (CardTableModRefBS*)bs; | |
6851 assert(sizeof(*ct->byte_map_base) == sizeof(jbyte), "adjust this code"); | |
6852 | |
6853 // The calculation for byte_map_base is as follows: | |
6854 // byte_map_base = _byte_map - (uintptr_t(low_bound) >> card_shift); | |
6855 // So this essentially converts an address to a displacement and | |
6856 // it will never need to be relocated. On 64bit however the value may be too | |
6857 // large for a 32bit displacement | |
6858 | |
6859 intptr_t disp = (intptr_t) ct->byte_map_base; | |
6860 if (is_simm32(disp)) { | |
6861 Address cardtable(noreg, obj, Address::times_1, disp); | |
6862 movb(cardtable, 0); | |
6863 } else { | |
6864 // By doing it as an ExternalAddress disp could be converted to a rip-relative | |
6865 // displacement and done in a single instruction given favorable mapping and | |
6866 // a smarter version of as_Address. Worst case it is two instructions which | |
6867 // is no worse off then loading disp into a register and doing as a simple | |
6868 // Address() as above. | |
6869 // We can't do as ExternalAddress as the only style since if disp == 0 we'll | |
6870 // assert since NULL isn't acceptable in a reloci (see 6644928). In any case | |
6871 // in some cases we'll get a single instruction version. | |
6872 | |
6873 ExternalAddress cardtable((address)disp); | |
6874 Address index(noreg, obj, Address::times_1); | |
6875 movb(as_Address(ArrayAddress(cardtable, index)), 0); | |
6876 } | |
6877 } | |
6878 | |
6879 void MacroAssembler::subptr(Register dst, int32_t imm32) { | |
6880 LP64_ONLY(subq(dst, imm32)) NOT_LP64(subl(dst, imm32)); | |
6881 } | |
6882 | |
6883 void MacroAssembler::subptr(Register dst, Register src) { | |
6884 LP64_ONLY(subq(dst, src)) NOT_LP64(subl(dst, src)); | |
6885 } | |
6886 | |
6887 void MacroAssembler::test32(Register src1, AddressLiteral src2) { | |
6888 // src2 must be rval | |
6889 | |
6890 if (reachable(src2)) { | |
6891 testl(src1, as_Address(src2)); | |
6892 } else { | |
6893 lea(rscratch1, src2); | |
6894 testl(src1, Address(rscratch1, 0)); | |
6895 } | |
6896 } | |
6897 | |
6898 // C++ bool manipulation | |
0 | 6899 void MacroAssembler::testbool(Register dst) { |
6900 if(sizeof(bool) == 1) | |
304 | 6901 testb(dst, 0xff); |
0 | 6902 else if(sizeof(bool) == 2) { |
6903 // testw implementation needed for two byte bools | |
6904 ShouldNotReachHere(); | |
6905 } else if(sizeof(bool) == 4) | |
6906 testl(dst, dst); | |
6907 else | |
6908 // unsupported | |
6909 ShouldNotReachHere(); | |
6910 } | |
6911 | |
304 | 6912 void MacroAssembler::testptr(Register dst, Register src) { |
6913 LP64_ONLY(testq(dst, src)) NOT_LP64(testl(dst, src)); | |
6914 } | |
6915 | |
6916 // Defines obj, preserves var_size_in_bytes, okay for t2 == var_size_in_bytes. | |
6917 void MacroAssembler::tlab_allocate(Register obj, | |
6918 Register var_size_in_bytes, | |
6919 int con_size_in_bytes, | |
6920 Register t1, | |
6921 Register t2, | |
6922 Label& slow_case) { | |
6923 assert_different_registers(obj, t1, t2); | |
6924 assert_different_registers(obj, var_size_in_bytes, t1); | |
6925 Register end = t2; | |
6926 Register thread = NOT_LP64(t1) LP64_ONLY(r15_thread); | |
6927 | |
6928 verify_tlab(); | |
6929 | |
6930 NOT_LP64(get_thread(thread)); | |
6931 | |
6932 movptr(obj, Address(thread, JavaThread::tlab_top_offset())); | |
6933 if (var_size_in_bytes == noreg) { | |
6934 lea(end, Address(obj, con_size_in_bytes)); | |
6935 } else { | |
6936 lea(end, Address(obj, var_size_in_bytes, Address::times_1)); | |
6937 } | |
6938 cmpptr(end, Address(thread, JavaThread::tlab_end_offset())); | |
6939 jcc(Assembler::above, slow_case); | |
6940 | |
6941 // update the tlab top pointer | |
6942 movptr(Address(thread, JavaThread::tlab_top_offset()), end); | |
6943 | |
6944 // recover var_size_in_bytes if necessary | |
6945 if (var_size_in_bytes == end) { | |
6946 subptr(var_size_in_bytes, obj); | |
6947 } | |
6948 verify_tlab(); | |
6949 } | |
6950 | |
6951 // Preserves rbx, and rdx. | |
6952 void MacroAssembler::tlab_refill(Label& retry, | |
6953 Label& try_eden, | |
6954 Label& slow_case) { | |
6955 Register top = rax; | |
6956 Register t1 = rcx; | |
6957 Register t2 = rsi; | |
6958 Register thread_reg = NOT_LP64(rdi) LP64_ONLY(r15_thread); | |
6959 assert_different_registers(top, thread_reg, t1, t2, /* preserve: */ rbx, rdx); | |
6960 Label do_refill, discard_tlab; | |
6961 | |
6962 if (CMSIncrementalMode || !Universe::heap()->supports_inline_contig_alloc()) { | |
6963 // No allocation in the shared eden. | |
6964 jmp(slow_case); | |
6965 } | |
6966 | |
6967 NOT_LP64(get_thread(thread_reg)); | |
6968 | |
6969 movptr(top, Address(thread_reg, in_bytes(JavaThread::tlab_top_offset()))); | |
6970 movptr(t1, Address(thread_reg, in_bytes(JavaThread::tlab_end_offset()))); | |
6971 | |
6972 // calculate amount of free space | |
6973 subptr(t1, top); | |
6974 shrptr(t1, LogHeapWordSize); | |
6975 | |
6976 // Retain tlab and allocate object in shared space if | |
6977 // the amount free in the tlab is too large to discard. | |
6978 cmpptr(t1, Address(thread_reg, in_bytes(JavaThread::tlab_refill_waste_limit_offset()))); | |
6979 jcc(Assembler::lessEqual, discard_tlab); | |
6980 | |
6981 // Retain | |
6982 // %%% yuck as movptr... | |
6983 movptr(t2, (int32_t) ThreadLocalAllocBuffer::refill_waste_limit_increment()); | |
6984 addptr(Address(thread_reg, in_bytes(JavaThread::tlab_refill_waste_limit_offset())), t2); | |
6985 if (TLABStats) { | |
6986 // increment number of slow_allocations | |
6987 addl(Address(thread_reg, in_bytes(JavaThread::tlab_slow_allocations_offset())), 1); | |
6988 } | |
6989 jmp(try_eden); | |
6990 | |
6991 bind(discard_tlab); | |
6992 if (TLABStats) { | |
6993 // increment number of refills | |
6994 addl(Address(thread_reg, in_bytes(JavaThread::tlab_number_of_refills_offset())), 1); | |
6995 // accumulate wastage -- t1 is amount free in tlab | |
6996 addl(Address(thread_reg, in_bytes(JavaThread::tlab_fast_refill_waste_offset())), t1); | |
6997 } | |
6998 | |
6999 // if tlab is currently allocated (top or end != null) then | |
7000 // fill [top, end + alignment_reserve) with array object | |
7001 testptr (top, top); | |
7002 jcc(Assembler::zero, do_refill); | |
7003 | |
7004 // set up the mark word | |
7005 movptr(Address(top, oopDesc::mark_offset_in_bytes()), (intptr_t)markOopDesc::prototype()->copy_set_hash(0x2)); | |
7006 // set the length to the remaining space | |
7007 subptr(t1, typeArrayOopDesc::header_size(T_INT)); | |
7008 addptr(t1, (int32_t)ThreadLocalAllocBuffer::alignment_reserve()); | |
7009 shlptr(t1, log2_intptr(HeapWordSize/sizeof(jint))); | |
7010 movptr(Address(top, arrayOopDesc::length_offset_in_bytes()), t1); | |
7011 // set klass to intArrayKlass | |
7012 // dubious reloc why not an oop reloc? | |
7013 movptr(t1, ExternalAddress((address) Universe::intArrayKlassObj_addr())); | |
7014 // store klass last. concurrent gcs assumes klass length is valid if | |
7015 // klass field is not null. | |
7016 store_klass(top, t1); | |
7017 | |
7018 // refill the tlab with an eden allocation | |
7019 bind(do_refill); | |
7020 movptr(t1, Address(thread_reg, in_bytes(JavaThread::tlab_size_offset()))); | |
7021 shlptr(t1, LogHeapWordSize); | |
7022 // add object_size ?? | |
7023 eden_allocate(top, t1, 0, t2, slow_case); | |
7024 | |
7025 // Check that t1 was preserved in eden_allocate. | |
7026 #ifdef ASSERT | |
7027 if (UseTLAB) { | |
7028 Label ok; | |
7029 Register tsize = rsi; | |
7030 assert_different_registers(tsize, thread_reg, t1); | |
7031 push(tsize); | |
7032 movptr(tsize, Address(thread_reg, in_bytes(JavaThread::tlab_size_offset()))); | |
7033 shlptr(tsize, LogHeapWordSize); | |
7034 cmpptr(t1, tsize); | |
7035 jcc(Assembler::equal, ok); | |
7036 stop("assert(t1 != tlab size)"); | |
7037 should_not_reach_here(); | |
7038 | |
7039 bind(ok); | |
7040 pop(tsize); | |
7041 } | |
7042 #endif | |
7043 movptr(Address(thread_reg, in_bytes(JavaThread::tlab_start_offset())), top); | |
7044 movptr(Address(thread_reg, in_bytes(JavaThread::tlab_top_offset())), top); | |
7045 addptr(top, t1); | |
7046 subptr(top, (int32_t)ThreadLocalAllocBuffer::alignment_reserve_in_bytes()); | |
7047 movptr(Address(thread_reg, in_bytes(JavaThread::tlab_end_offset())), top); | |
7048 verify_tlab(); | |
7049 jmp(retry); | |
7050 } | |
7051 | |
7052 static const double pi_4 = 0.7853981633974483; | |
7053 | |
7054 void MacroAssembler::trigfunc(char trig, int num_fpu_regs_in_use) { | |
7055 // A hand-coded argument reduction for values in fabs(pi/4, pi/2) | |
7056 // was attempted in this code; unfortunately it appears that the | |
7057 // switch to 80-bit precision and back causes this to be | |
7058 // unprofitable compared with simply performing a runtime call if | |
7059 // the argument is out of the (-pi/4, pi/4) range. | |
7060 | |
7061 Register tmp = noreg; | |
7062 if (!VM_Version::supports_cmov()) { | |
7063 // fcmp needs a temporary so preserve rbx, | |
7064 tmp = rbx; | |
7065 push(tmp); | |
7066 } | |
7067 | |
7068 Label slow_case, done; | |
7069 | |
520
52a431267315
6791168: Fix invalid code in bytecodeInterpreter that can cause gcc ICE
coleenp
parents:
512
diff
changeset
|
7070 ExternalAddress pi4_adr = (address)&pi_4; |
52a431267315
6791168: Fix invalid code in bytecodeInterpreter that can cause gcc ICE
coleenp
parents:
512
diff
changeset
|
7071 if (reachable(pi4_adr)) { |
52a431267315
6791168: Fix invalid code in bytecodeInterpreter that can cause gcc ICE
coleenp
parents:
512
diff
changeset
|
7072 // x ?<= pi/4 |
52a431267315
6791168: Fix invalid code in bytecodeInterpreter that can cause gcc ICE
coleenp
parents:
512
diff
changeset
|
7073 fld_d(pi4_adr); |
52a431267315
6791168: Fix invalid code in bytecodeInterpreter that can cause gcc ICE
coleenp
parents:
512
diff
changeset
|
7074 fld_s(1); // Stack: X PI/4 X |
52a431267315
6791168: Fix invalid code in bytecodeInterpreter that can cause gcc ICE
coleenp
parents:
512
diff
changeset
|
7075 fabs(); // Stack: |X| PI/4 X |
52a431267315
6791168: Fix invalid code in bytecodeInterpreter that can cause gcc ICE
coleenp
parents:
512
diff
changeset
|
7076 fcmp(tmp); |
52a431267315
6791168: Fix invalid code in bytecodeInterpreter that can cause gcc ICE
coleenp
parents:
512
diff
changeset
|
7077 jcc(Assembler::above, slow_case); |
52a431267315
6791168: Fix invalid code in bytecodeInterpreter that can cause gcc ICE
coleenp
parents:
512
diff
changeset
|
7078 |
52a431267315
6791168: Fix invalid code in bytecodeInterpreter that can cause gcc ICE
coleenp
parents:
512
diff
changeset
|
7079 // fastest case: -pi/4 <= x <= pi/4 |
52a431267315
6791168: Fix invalid code in bytecodeInterpreter that can cause gcc ICE
coleenp
parents:
512
diff
changeset
|
7080 switch(trig) { |
52a431267315
6791168: Fix invalid code in bytecodeInterpreter that can cause gcc ICE
coleenp
parents:
512
diff
changeset
|
7081 case 's': |
52a431267315
6791168: Fix invalid code in bytecodeInterpreter that can cause gcc ICE
coleenp
parents:
512
diff
changeset
|
7082 fsin(); |
52a431267315
6791168: Fix invalid code in bytecodeInterpreter that can cause gcc ICE
coleenp
parents:
512
diff
changeset
|
7083 break; |
52a431267315
6791168: Fix invalid code in bytecodeInterpreter that can cause gcc ICE
coleenp
parents:
512
diff
changeset
|
7084 case 'c': |
52a431267315
6791168: Fix invalid code in bytecodeInterpreter that can cause gcc ICE
coleenp
parents:
512
diff
changeset
|
7085 fcos(); |
52a431267315
6791168: Fix invalid code in bytecodeInterpreter that can cause gcc ICE
coleenp
parents:
512
diff
changeset
|
7086 break; |
52a431267315
6791168: Fix invalid code in bytecodeInterpreter that can cause gcc ICE
coleenp
parents:
512
diff
changeset
|
7087 case 't': |
52a431267315
6791168: Fix invalid code in bytecodeInterpreter that can cause gcc ICE
coleenp
parents:
512
diff
changeset
|
7088 ftan(); |
52a431267315
6791168: Fix invalid code in bytecodeInterpreter that can cause gcc ICE
coleenp
parents:
512
diff
changeset
|
7089 break; |
52a431267315
6791168: Fix invalid code in bytecodeInterpreter that can cause gcc ICE
coleenp
parents:
512
diff
changeset
|
7090 default: |
52a431267315
6791168: Fix invalid code in bytecodeInterpreter that can cause gcc ICE
coleenp
parents:
512
diff
changeset
|
7091 assert(false, "bad intrinsic"); |
52a431267315
6791168: Fix invalid code in bytecodeInterpreter that can cause gcc ICE
coleenp
parents:
512
diff
changeset
|
7092 break; |
52a431267315
6791168: Fix invalid code in bytecodeInterpreter that can cause gcc ICE
coleenp
parents:
512
diff
changeset
|
7093 } |
52a431267315
6791168: Fix invalid code in bytecodeInterpreter that can cause gcc ICE
coleenp
parents:
512
diff
changeset
|
7094 jmp(done); |
52a431267315
6791168: Fix invalid code in bytecodeInterpreter that can cause gcc ICE
coleenp
parents:
512
diff
changeset
|
7095 } |
304 | 7096 |
7097 // slow case: runtime call | |
7098 bind(slow_case); | |
7099 // Preserve registers across runtime call | |
7100 pusha(); | |
7101 int incoming_argument_and_return_value_offset = -1; | |
7102 if (num_fpu_regs_in_use > 1) { | |
7103 // Must preserve all other FPU regs (could alternatively convert | |
7104 // SharedRuntime::dsin and dcos into assembly routines known not to trash | |
7105 // FPU state, but can not trust C compiler) | |
7106 NEEDS_CLEANUP; | |
7107 // NOTE that in this case we also push the incoming argument to | |
7108 // the stack and restore it later; we also use this stack slot to | |
7109 // hold the return value from dsin or dcos. | |
7110 for (int i = 0; i < num_fpu_regs_in_use; i++) { | |
7111 subptr(rsp, sizeof(jdouble)); | |
7112 fstp_d(Address(rsp, 0)); | |
7113 } | |
7114 incoming_argument_and_return_value_offset = sizeof(jdouble)*(num_fpu_regs_in_use-1); | |
7115 fld_d(Address(rsp, incoming_argument_and_return_value_offset)); | |
7116 } | |
7117 subptr(rsp, sizeof(jdouble)); | |
7118 fstp_d(Address(rsp, 0)); | |
7119 #ifdef _LP64 | |
7120 movdbl(xmm0, Address(rsp, 0)); | |
7121 #endif // _LP64 | |
7122 | |
7123 // NOTE: we must not use call_VM_leaf here because that requires a | |
7124 // complete interpreter frame in debug mode -- same bug as 4387334 | |
7125 // MacroAssembler::call_VM_leaf_base is perfectly safe and will | |
7126 // do proper 64bit abi | |
7127 | |
7128 NEEDS_CLEANUP; | |
7129 // Need to add stack banging before this runtime call if it needs to | |
7130 // be taken; however, there is no generic stack banging routine at | |
7131 // the MacroAssembler level | |
7132 switch(trig) { | |
7133 case 's': | |
7134 { | |
7135 MacroAssembler::call_VM_leaf_base(CAST_FROM_FN_PTR(address, SharedRuntime::dsin), 0); | |
7136 } | |
7137 break; | |
7138 case 'c': | |
7139 { | |
7140 MacroAssembler::call_VM_leaf_base(CAST_FROM_FN_PTR(address, SharedRuntime::dcos), 0); | |
7141 } | |
7142 break; | |
7143 case 't': | |
7144 { | |
7145 MacroAssembler::call_VM_leaf_base(CAST_FROM_FN_PTR(address, SharedRuntime::dtan), 0); | |
7146 } | |
7147 break; | |
7148 default: | |
7149 assert(false, "bad intrinsic"); | |
7150 break; | |
7151 } | |
7152 #ifdef _LP64 | |
7153 movsd(Address(rsp, 0), xmm0); | |
7154 fld_d(Address(rsp, 0)); | |
7155 #endif // _LP64 | |
7156 addptr(rsp, sizeof(jdouble)); | |
7157 if (num_fpu_regs_in_use > 1) { | |
7158 // Must save return value to stack and then restore entire FPU stack | |
7159 fstp_d(Address(rsp, incoming_argument_and_return_value_offset)); | |
7160 for (int i = 0; i < num_fpu_regs_in_use; i++) { | |
7161 fld_d(Address(rsp, 0)); | |
7162 addptr(rsp, sizeof(jdouble)); | |
7163 } | |
7164 } | |
7165 popa(); | |
7166 | |
7167 // Come here with result in F-TOS | |
7168 bind(done); | |
7169 | |
7170 if (tmp != noreg) { | |
7171 pop(tmp); | |
7172 } | |
7173 } | |
7174 | |
7175 | |
623
9adddb8c0fc8
6812831: factor duplicated assembly code for megamorphic invokeinterface (for 6655638)
jrose
parents:
622
diff
changeset
|
7176 // Look up the method for a megamorphic invokeinterface call. |
9adddb8c0fc8
6812831: factor duplicated assembly code for megamorphic invokeinterface (for 6655638)
jrose
parents:
622
diff
changeset
|
7177 // The target method is determined by <intf_klass, itable_index>. |
9adddb8c0fc8
6812831: factor duplicated assembly code for megamorphic invokeinterface (for 6655638)
jrose
parents:
622
diff
changeset
|
7178 // The receiver klass is in recv_klass. |
9adddb8c0fc8
6812831: factor duplicated assembly code for megamorphic invokeinterface (for 6655638)
jrose
parents:
622
diff
changeset
|
7179 // On success, the result will be in method_result, and execution falls through. |
9adddb8c0fc8
6812831: factor duplicated assembly code for megamorphic invokeinterface (for 6655638)
jrose
parents:
622
diff
changeset
|
7180 // On failure, execution transfers to the given label. |
9adddb8c0fc8
6812831: factor duplicated assembly code for megamorphic invokeinterface (for 6655638)
jrose
parents:
622
diff
changeset
|
7181 void MacroAssembler::lookup_interface_method(Register recv_klass, |
9adddb8c0fc8
6812831: factor duplicated assembly code for megamorphic invokeinterface (for 6655638)
jrose
parents:
622
diff
changeset
|
7182 Register intf_klass, |
9adddb8c0fc8
6812831: factor duplicated assembly code for megamorphic invokeinterface (for 6655638)
jrose
parents:
622
diff
changeset
|
7183 RegisterConstant itable_index, |
9adddb8c0fc8
6812831: factor duplicated assembly code for megamorphic invokeinterface (for 6655638)
jrose
parents:
622
diff
changeset
|
7184 Register method_result, |
9adddb8c0fc8
6812831: factor duplicated assembly code for megamorphic invokeinterface (for 6655638)
jrose
parents:
622
diff
changeset
|
7185 Register scan_temp, |
9adddb8c0fc8
6812831: factor duplicated assembly code for megamorphic invokeinterface (for 6655638)
jrose
parents:
622
diff
changeset
|
7186 Label& L_no_such_interface) { |
9adddb8c0fc8
6812831: factor duplicated assembly code for megamorphic invokeinterface (for 6655638)
jrose
parents:
622
diff
changeset
|
7187 assert_different_registers(recv_klass, intf_klass, method_result, scan_temp); |
9adddb8c0fc8
6812831: factor duplicated assembly code for megamorphic invokeinterface (for 6655638)
jrose
parents:
622
diff
changeset
|
7188 assert(itable_index.is_constant() || itable_index.as_register() == method_result, |
9adddb8c0fc8
6812831: factor duplicated assembly code for megamorphic invokeinterface (for 6655638)
jrose
parents:
622
diff
changeset
|
7189 "caller must use same register for non-constant itable index as for method"); |
9adddb8c0fc8
6812831: factor duplicated assembly code for megamorphic invokeinterface (for 6655638)
jrose
parents:
622
diff
changeset
|
7190 |
9adddb8c0fc8
6812831: factor duplicated assembly code for megamorphic invokeinterface (for 6655638)
jrose
parents:
622
diff
changeset
|
7191 // Compute start of first itableOffsetEntry (which is at the end of the vtable) |
9adddb8c0fc8
6812831: factor duplicated assembly code for megamorphic invokeinterface (for 6655638)
jrose
parents:
622
diff
changeset
|
7192 int vtable_base = instanceKlass::vtable_start_offset() * wordSize; |
9adddb8c0fc8
6812831: factor duplicated assembly code for megamorphic invokeinterface (for 6655638)
jrose
parents:
622
diff
changeset
|
7193 int itentry_off = itableMethodEntry::method_offset_in_bytes(); |
9adddb8c0fc8
6812831: factor duplicated assembly code for megamorphic invokeinterface (for 6655638)
jrose
parents:
622
diff
changeset
|
7194 int scan_step = itableOffsetEntry::size() * wordSize; |
9adddb8c0fc8
6812831: factor duplicated assembly code for megamorphic invokeinterface (for 6655638)
jrose
parents:
622
diff
changeset
|
7195 int vte_size = vtableEntry::size() * wordSize; |
9adddb8c0fc8
6812831: factor duplicated assembly code for megamorphic invokeinterface (for 6655638)
jrose
parents:
622
diff
changeset
|
7196 Address::ScaleFactor times_vte_scale = Address::times_ptr; |
9adddb8c0fc8
6812831: factor duplicated assembly code for megamorphic invokeinterface (for 6655638)
jrose
parents:
622
diff
changeset
|
7197 assert(vte_size == wordSize, "else adjust times_vte_scale"); |
9adddb8c0fc8
6812831: factor duplicated assembly code for megamorphic invokeinterface (for 6655638)
jrose
parents:
622
diff
changeset
|
7198 |
9adddb8c0fc8
6812831: factor duplicated assembly code for megamorphic invokeinterface (for 6655638)
jrose
parents:
622
diff
changeset
|
7199 movl(scan_temp, Address(recv_klass, instanceKlass::vtable_length_offset() * wordSize)); |
9adddb8c0fc8
6812831: factor duplicated assembly code for megamorphic invokeinterface (for 6655638)
jrose
parents:
622
diff
changeset
|
7200 |
9adddb8c0fc8
6812831: factor duplicated assembly code for megamorphic invokeinterface (for 6655638)
jrose
parents:
622
diff
changeset
|
7201 // %%% Could store the aligned, prescaled offset in the klassoop. |
9adddb8c0fc8
6812831: factor duplicated assembly code for megamorphic invokeinterface (for 6655638)
jrose
parents:
622
diff
changeset
|
7202 lea(scan_temp, Address(recv_klass, scan_temp, times_vte_scale, vtable_base)); |
9adddb8c0fc8
6812831: factor duplicated assembly code for megamorphic invokeinterface (for 6655638)
jrose
parents:
622
diff
changeset
|
7203 if (HeapWordsPerLong > 1) { |
9adddb8c0fc8
6812831: factor duplicated assembly code for megamorphic invokeinterface (for 6655638)
jrose
parents:
622
diff
changeset
|
7204 // Round up to align_object_offset boundary |
9adddb8c0fc8
6812831: factor duplicated assembly code for megamorphic invokeinterface (for 6655638)
jrose
parents:
622
diff
changeset
|
7205 // see code for instanceKlass::start_of_itable! |
9adddb8c0fc8
6812831: factor duplicated assembly code for megamorphic invokeinterface (for 6655638)
jrose
parents:
622
diff
changeset
|
7206 round_to(scan_temp, BytesPerLong); |
9adddb8c0fc8
6812831: factor duplicated assembly code for megamorphic invokeinterface (for 6655638)
jrose
parents:
622
diff
changeset
|
7207 } |
9adddb8c0fc8
6812831: factor duplicated assembly code for megamorphic invokeinterface (for 6655638)
jrose
parents:
622
diff
changeset
|
7208 |
9adddb8c0fc8
6812831: factor duplicated assembly code for megamorphic invokeinterface (for 6655638)
jrose
parents:
622
diff
changeset
|
7209 // Adjust recv_klass by scaled itable_index, so we can free itable_index. |
9adddb8c0fc8
6812831: factor duplicated assembly code for megamorphic invokeinterface (for 6655638)
jrose
parents:
622
diff
changeset
|
7210 assert(itableMethodEntry::size() * wordSize == wordSize, "adjust the scaling in the code below"); |
9adddb8c0fc8
6812831: factor duplicated assembly code for megamorphic invokeinterface (for 6655638)
jrose
parents:
622
diff
changeset
|
7211 lea(recv_klass, Address(recv_klass, itable_index, Address::times_ptr, itentry_off)); |
9adddb8c0fc8
6812831: factor duplicated assembly code for megamorphic invokeinterface (for 6655638)
jrose
parents:
622
diff
changeset
|
7212 |
9adddb8c0fc8
6812831: factor duplicated assembly code for megamorphic invokeinterface (for 6655638)
jrose
parents:
622
diff
changeset
|
7213 // for (scan = klass->itable(); scan->interface() != NULL; scan += scan_step) { |
9adddb8c0fc8
6812831: factor duplicated assembly code for megamorphic invokeinterface (for 6655638)
jrose
parents:
622
diff
changeset
|
7214 // if (scan->interface() == intf) { |
9adddb8c0fc8
6812831: factor duplicated assembly code for megamorphic invokeinterface (for 6655638)
jrose
parents:
622
diff
changeset
|
7215 // result = (klass + scan->offset() + itable_index); |
9adddb8c0fc8
6812831: factor duplicated assembly code for megamorphic invokeinterface (for 6655638)
jrose
parents:
622
diff
changeset
|
7216 // } |
9adddb8c0fc8
6812831: factor duplicated assembly code for megamorphic invokeinterface (for 6655638)
jrose
parents:
622
diff
changeset
|
7217 // } |
9adddb8c0fc8
6812831: factor duplicated assembly code for megamorphic invokeinterface (for 6655638)
jrose
parents:
622
diff
changeset
|
7218 Label search, found_method; |
9adddb8c0fc8
6812831: factor duplicated assembly code for megamorphic invokeinterface (for 6655638)
jrose
parents:
622
diff
changeset
|
7219 |
9adddb8c0fc8
6812831: factor duplicated assembly code for megamorphic invokeinterface (for 6655638)
jrose
parents:
622
diff
changeset
|
7220 for (int peel = 1; peel >= 0; peel--) { |
9adddb8c0fc8
6812831: factor duplicated assembly code for megamorphic invokeinterface (for 6655638)
jrose
parents:
622
diff
changeset
|
7221 movptr(method_result, Address(scan_temp, itableOffsetEntry::interface_offset_in_bytes())); |
9adddb8c0fc8
6812831: factor duplicated assembly code for megamorphic invokeinterface (for 6655638)
jrose
parents:
622
diff
changeset
|
7222 cmpptr(intf_klass, method_result); |
9adddb8c0fc8
6812831: factor duplicated assembly code for megamorphic invokeinterface (for 6655638)
jrose
parents:
622
diff
changeset
|
7223 |
9adddb8c0fc8
6812831: factor duplicated assembly code for megamorphic invokeinterface (for 6655638)
jrose
parents:
622
diff
changeset
|
7224 if (peel) { |
9adddb8c0fc8
6812831: factor duplicated assembly code for megamorphic invokeinterface (for 6655638)
jrose
parents:
622
diff
changeset
|
7225 jccb(Assembler::equal, found_method); |
9adddb8c0fc8
6812831: factor duplicated assembly code for megamorphic invokeinterface (for 6655638)
jrose
parents:
622
diff
changeset
|
7226 } else { |
9adddb8c0fc8
6812831: factor duplicated assembly code for megamorphic invokeinterface (for 6655638)
jrose
parents:
622
diff
changeset
|
7227 jccb(Assembler::notEqual, search); |
9adddb8c0fc8
6812831: factor duplicated assembly code for megamorphic invokeinterface (for 6655638)
jrose
parents:
622
diff
changeset
|
7228 // (invert the test to fall through to found_method...) |
9adddb8c0fc8
6812831: factor duplicated assembly code for megamorphic invokeinterface (for 6655638)
jrose
parents:
622
diff
changeset
|
7229 } |
9adddb8c0fc8
6812831: factor duplicated assembly code for megamorphic invokeinterface (for 6655638)
jrose
parents:
622
diff
changeset
|
7230 |
9adddb8c0fc8
6812831: factor duplicated assembly code for megamorphic invokeinterface (for 6655638)
jrose
parents:
622
diff
changeset
|
7231 if (!peel) break; |
9adddb8c0fc8
6812831: factor duplicated assembly code for megamorphic invokeinterface (for 6655638)
jrose
parents:
622
diff
changeset
|
7232 |
9adddb8c0fc8
6812831: factor duplicated assembly code for megamorphic invokeinterface (for 6655638)
jrose
parents:
622
diff
changeset
|
7233 bind(search); |
9adddb8c0fc8
6812831: factor duplicated assembly code for megamorphic invokeinterface (for 6655638)
jrose
parents:
622
diff
changeset
|
7234 |
9adddb8c0fc8
6812831: factor duplicated assembly code for megamorphic invokeinterface (for 6655638)
jrose
parents:
622
diff
changeset
|
7235 // Check that the previous entry is non-null. A null entry means that |
9adddb8c0fc8
6812831: factor duplicated assembly code for megamorphic invokeinterface (for 6655638)
jrose
parents:
622
diff
changeset
|
7236 // the receiver class doesn't implement the interface, and wasn't the |
9adddb8c0fc8
6812831: factor duplicated assembly code for megamorphic invokeinterface (for 6655638)
jrose
parents:
622
diff
changeset
|
7237 // same as when the caller was compiled. |
9adddb8c0fc8
6812831: factor duplicated assembly code for megamorphic invokeinterface (for 6655638)
jrose
parents:
622
diff
changeset
|
7238 testptr(method_result, method_result); |
9adddb8c0fc8
6812831: factor duplicated assembly code for megamorphic invokeinterface (for 6655638)
jrose
parents:
622
diff
changeset
|
7239 jcc(Assembler::zero, L_no_such_interface); |
9adddb8c0fc8
6812831: factor duplicated assembly code for megamorphic invokeinterface (for 6655638)
jrose
parents:
622
diff
changeset
|
7240 addptr(scan_temp, scan_step); |
9adddb8c0fc8
6812831: factor duplicated assembly code for megamorphic invokeinterface (for 6655638)
jrose
parents:
622
diff
changeset
|
7241 } |
9adddb8c0fc8
6812831: factor duplicated assembly code for megamorphic invokeinterface (for 6655638)
jrose
parents:
622
diff
changeset
|
7242 |
9adddb8c0fc8
6812831: factor duplicated assembly code for megamorphic invokeinterface (for 6655638)
jrose
parents:
622
diff
changeset
|
7243 bind(found_method); |
9adddb8c0fc8
6812831: factor duplicated assembly code for megamorphic invokeinterface (for 6655638)
jrose
parents:
622
diff
changeset
|
7244 |
9adddb8c0fc8
6812831: factor duplicated assembly code for megamorphic invokeinterface (for 6655638)
jrose
parents:
622
diff
changeset
|
7245 // Got a hit. |
9adddb8c0fc8
6812831: factor duplicated assembly code for megamorphic invokeinterface (for 6655638)
jrose
parents:
622
diff
changeset
|
7246 movl(scan_temp, Address(scan_temp, itableOffsetEntry::offset_offset_in_bytes())); |
9adddb8c0fc8
6812831: factor duplicated assembly code for megamorphic invokeinterface (for 6655638)
jrose
parents:
622
diff
changeset
|
7247 movptr(method_result, Address(recv_klass, scan_temp, Address::times_1)); |
9adddb8c0fc8
6812831: factor duplicated assembly code for megamorphic invokeinterface (for 6655638)
jrose
parents:
622
diff
changeset
|
7248 } |
9adddb8c0fc8
6812831: factor duplicated assembly code for megamorphic invokeinterface (for 6655638)
jrose
parents:
622
diff
changeset
|
7249 |
9adddb8c0fc8
6812831: factor duplicated assembly code for megamorphic invokeinterface (for 6655638)
jrose
parents:
622
diff
changeset
|
7250 |
304 | 7251 void MacroAssembler::ucomisd(XMMRegister dst, AddressLiteral src) { |
7252 ucomisd(dst, as_Address(src)); | |
7253 } | |
7254 | |
7255 void MacroAssembler::ucomiss(XMMRegister dst, AddressLiteral src) { | |
7256 ucomiss(dst, as_Address(src)); | |
7257 } | |
7258 | |
7259 void MacroAssembler::xorpd(XMMRegister dst, AddressLiteral src) { | |
7260 if (reachable(src)) { | |
7261 xorpd(dst, as_Address(src)); | |
7262 } else { | |
7263 lea(rscratch1, src); | |
7264 xorpd(dst, Address(rscratch1, 0)); | |
7265 } | |
7266 } | |
7267 | |
7268 void MacroAssembler::xorps(XMMRegister dst, AddressLiteral src) { | |
7269 if (reachable(src)) { | |
7270 xorps(dst, as_Address(src)); | |
7271 } else { | |
7272 lea(rscratch1, src); | |
7273 xorps(dst, Address(rscratch1, 0)); | |
7274 } | |
7275 } | |
7276 | |
0 | 7277 void MacroAssembler::verify_oop(Register reg, const char* s) { |
7278 if (!VerifyOops) return; | |
304 | 7279 |
0 | 7280 // Pass register number to verify_oop_subroutine |
7281 char* b = new char[strlen(s) + 50]; | |
7282 sprintf(b, "verify_oop: %s: %s", reg->name(), s); | |
304 | 7283 push(rax); // save rax, |
7284 push(reg); // pass register argument | |
0 | 7285 ExternalAddress buffer((address) b); |
304 | 7286 // avoid using pushptr, as it modifies scratch registers |
7287 // and our contract is not to modify anything | |
7288 movptr(rax, buffer.addr()); | |
7289 push(rax); | |
0 | 7290 // call indirectly to solve generation ordering problem |
7291 movptr(rax, ExternalAddress(StubRoutines::verify_oop_subroutine_entry_address())); | |
7292 call(rax); | |
7293 } | |
7294 | |
7295 | |
622
56aae7be60d4
6812678: macro assembler needs delayed binding of a few constants (for 6655638)
jrose
parents:
606
diff
changeset
|
7296 RegisterConstant MacroAssembler::delayed_value(intptr_t* delayed_value_addr, |
56aae7be60d4
6812678: macro assembler needs delayed binding of a few constants (for 6655638)
jrose
parents:
606
diff
changeset
|
7297 Register tmp, |
56aae7be60d4
6812678: macro assembler needs delayed binding of a few constants (for 6655638)
jrose
parents:
606
diff
changeset
|
7298 int offset) { |
56aae7be60d4
6812678: macro assembler needs delayed binding of a few constants (for 6655638)
jrose
parents:
606
diff
changeset
|
7299 intptr_t value = *delayed_value_addr; |
56aae7be60d4
6812678: macro assembler needs delayed binding of a few constants (for 6655638)
jrose
parents:
606
diff
changeset
|
7300 if (value != 0) |
56aae7be60d4
6812678: macro assembler needs delayed binding of a few constants (for 6655638)
jrose
parents:
606
diff
changeset
|
7301 return RegisterConstant(value + offset); |
56aae7be60d4
6812678: macro assembler needs delayed binding of a few constants (for 6655638)
jrose
parents:
606
diff
changeset
|
7302 |
56aae7be60d4
6812678: macro assembler needs delayed binding of a few constants (for 6655638)
jrose
parents:
606
diff
changeset
|
7303 // load indirectly to solve generation ordering problem |
56aae7be60d4
6812678: macro assembler needs delayed binding of a few constants (for 6655638)
jrose
parents:
606
diff
changeset
|
7304 movptr(tmp, ExternalAddress((address) delayed_value_addr)); |
56aae7be60d4
6812678: macro assembler needs delayed binding of a few constants (for 6655638)
jrose
parents:
606
diff
changeset
|
7305 |
56aae7be60d4
6812678: macro assembler needs delayed binding of a few constants (for 6655638)
jrose
parents:
606
diff
changeset
|
7306 #ifdef ASSERT |
56aae7be60d4
6812678: macro assembler needs delayed binding of a few constants (for 6655638)
jrose
parents:
606
diff
changeset
|
7307 Label L; |
56aae7be60d4
6812678: macro assembler needs delayed binding of a few constants (for 6655638)
jrose
parents:
606
diff
changeset
|
7308 testl(tmp, tmp); |
56aae7be60d4
6812678: macro assembler needs delayed binding of a few constants (for 6655638)
jrose
parents:
606
diff
changeset
|
7309 jccb(Assembler::notZero, L); |
56aae7be60d4
6812678: macro assembler needs delayed binding of a few constants (for 6655638)
jrose
parents:
606
diff
changeset
|
7310 hlt(); |
56aae7be60d4
6812678: macro assembler needs delayed binding of a few constants (for 6655638)
jrose
parents:
606
diff
changeset
|
7311 bind(L); |
56aae7be60d4
6812678: macro assembler needs delayed binding of a few constants (for 6655638)
jrose
parents:
606
diff
changeset
|
7312 #endif |
56aae7be60d4
6812678: macro assembler needs delayed binding of a few constants (for 6655638)
jrose
parents:
606
diff
changeset
|
7313 |
56aae7be60d4
6812678: macro assembler needs delayed binding of a few constants (for 6655638)
jrose
parents:
606
diff
changeset
|
7314 if (offset != 0) |
56aae7be60d4
6812678: macro assembler needs delayed binding of a few constants (for 6655638)
jrose
parents:
606
diff
changeset
|
7315 addptr(tmp, offset); |
56aae7be60d4
6812678: macro assembler needs delayed binding of a few constants (for 6655638)
jrose
parents:
606
diff
changeset
|
7316 |
56aae7be60d4
6812678: macro assembler needs delayed binding of a few constants (for 6655638)
jrose
parents:
606
diff
changeset
|
7317 return RegisterConstant(tmp); |
56aae7be60d4
6812678: macro assembler needs delayed binding of a few constants (for 6655638)
jrose
parents:
606
diff
changeset
|
7318 } |
56aae7be60d4
6812678: macro assembler needs delayed binding of a few constants (for 6655638)
jrose
parents:
606
diff
changeset
|
7319 |
56aae7be60d4
6812678: macro assembler needs delayed binding of a few constants (for 6655638)
jrose
parents:
606
diff
changeset
|
7320 |
0 | 7321 void MacroAssembler::verify_oop_addr(Address addr, const char* s) { |
7322 if (!VerifyOops) return; | |
304 | 7323 |
0 | 7324 // Address adjust(addr.base(), addr.index(), addr.scale(), addr.disp() + BytesPerWord); |
7325 // Pass register number to verify_oop_subroutine | |
7326 char* b = new char[strlen(s) + 50]; | |
7327 sprintf(b, "verify_oop_addr: %s", s); | |
304 | 7328 |
7329 push(rax); // save rax, | |
0 | 7330 // addr may contain rsp so we will have to adjust it based on the push |
7331 // we just did | |
304 | 7332 // NOTE: 64bit seemed to have had a bug in that it did movq(addr, rax); which |
7333 // stores rax into addr which is backwards of what was intended. | |
0 | 7334 if (addr.uses(rsp)) { |
304 | 7335 lea(rax, addr); |
7336 pushptr(Address(rax, BytesPerWord)); | |
0 | 7337 } else { |
304 | 7338 pushptr(addr); |
7339 } | |
7340 | |
0 | 7341 ExternalAddress buffer((address) b); |
7342 // pass msg argument | |
304 | 7343 // avoid using pushptr, as it modifies scratch registers |
7344 // and our contract is not to modify anything | |
7345 movptr(rax, buffer.addr()); | |
7346 push(rax); | |
7347 | |
0 | 7348 // call indirectly to solve generation ordering problem |
7349 movptr(rax, ExternalAddress(StubRoutines::verify_oop_subroutine_entry_address())); | |
7350 call(rax); | |
7351 // Caller pops the arguments and restores rax, from the stack | |
7352 } | |
7353 | |
304 | 7354 void MacroAssembler::verify_tlab() { |
7355 #ifdef ASSERT | |
7356 if (UseTLAB && VerifyOops) { | |
7357 Label next, ok; | |
7358 Register t1 = rsi; | |
7359 Register thread_reg = NOT_LP64(rbx) LP64_ONLY(r15_thread); | |
7360 | |
7361 push(t1); | |
7362 NOT_LP64(push(thread_reg)); | |
7363 NOT_LP64(get_thread(thread_reg)); | |
7364 | |
7365 movptr(t1, Address(thread_reg, in_bytes(JavaThread::tlab_top_offset()))); | |
7366 cmpptr(t1, Address(thread_reg, in_bytes(JavaThread::tlab_start_offset()))); | |
7367 jcc(Assembler::aboveEqual, next); | |
7368 stop("assert(top >= start)"); | |
7369 should_not_reach_here(); | |
7370 | |
7371 bind(next); | |
7372 movptr(t1, Address(thread_reg, in_bytes(JavaThread::tlab_end_offset()))); | |
7373 cmpptr(t1, Address(thread_reg, in_bytes(JavaThread::tlab_top_offset()))); | |
7374 jcc(Assembler::aboveEqual, ok); | |
7375 stop("assert(top <= end)"); | |
7376 should_not_reach_here(); | |
7377 | |
7378 bind(ok); | |
7379 NOT_LP64(pop(thread_reg)); | |
7380 pop(t1); | |
7381 } | |
7382 #endif | |
7383 } | |
0 | 7384 |
7385 class ControlWord { | |
7386 public: | |
7387 int32_t _value; | |
7388 | |
7389 int rounding_control() const { return (_value >> 10) & 3 ; } | |
7390 int precision_control() const { return (_value >> 8) & 3 ; } | |
7391 bool precision() const { return ((_value >> 5) & 1) != 0; } | |
7392 bool underflow() const { return ((_value >> 4) & 1) != 0; } | |
7393 bool overflow() const { return ((_value >> 3) & 1) != 0; } | |
7394 bool zero_divide() const { return ((_value >> 2) & 1) != 0; } | |
7395 bool denormalized() const { return ((_value >> 1) & 1) != 0; } | |
7396 bool invalid() const { return ((_value >> 0) & 1) != 0; } | |
7397 | |
7398 void print() const { | |
7399 // rounding control | |
7400 const char* rc; | |
7401 switch (rounding_control()) { | |
7402 case 0: rc = "round near"; break; | |
7403 case 1: rc = "round down"; break; | |
7404 case 2: rc = "round up "; break; | |
7405 case 3: rc = "chop "; break; | |
7406 }; | |
7407 // precision control | |
7408 const char* pc; | |
7409 switch (precision_control()) { | |
7410 case 0: pc = "24 bits "; break; | |
7411 case 1: pc = "reserved"; break; | |
7412 case 2: pc = "53 bits "; break; | |
7413 case 3: pc = "64 bits "; break; | |
7414 }; | |
7415 // flags | |
7416 char f[9]; | |
7417 f[0] = ' '; | |
7418 f[1] = ' '; | |
7419 f[2] = (precision ()) ? 'P' : 'p'; | |
7420 f[3] = (underflow ()) ? 'U' : 'u'; | |
7421 f[4] = (overflow ()) ? 'O' : 'o'; | |
7422 f[5] = (zero_divide ()) ? 'Z' : 'z'; | |
7423 f[6] = (denormalized()) ? 'D' : 'd'; | |
7424 f[7] = (invalid ()) ? 'I' : 'i'; | |
7425 f[8] = '\x0'; | |
7426 // output | |
7427 printf("%04x masks = %s, %s, %s", _value & 0xFFFF, f, rc, pc); | |
7428 } | |
7429 | |
7430 }; | |
7431 | |
7432 class StatusWord { | |
7433 public: | |
7434 int32_t _value; | |
7435 | |
7436 bool busy() const { return ((_value >> 15) & 1) != 0; } | |
7437 bool C3() const { return ((_value >> 14) & 1) != 0; } | |
7438 bool C2() const { return ((_value >> 10) & 1) != 0; } | |
7439 bool C1() const { return ((_value >> 9) & 1) != 0; } | |
7440 bool C0() const { return ((_value >> 8) & 1) != 0; } | |
7441 int top() const { return (_value >> 11) & 7 ; } | |
7442 bool error_status() const { return ((_value >> 7) & 1) != 0; } | |
7443 bool stack_fault() const { return ((_value >> 6) & 1) != 0; } | |
7444 bool precision() const { return ((_value >> 5) & 1) != 0; } | |
7445 bool underflow() const { return ((_value >> 4) & 1) != 0; } | |
7446 bool overflow() const { return ((_value >> 3) & 1) != 0; } | |
7447 bool zero_divide() const { return ((_value >> 2) & 1) != 0; } | |
7448 bool denormalized() const { return ((_value >> 1) & 1) != 0; } | |
7449 bool invalid() const { return ((_value >> 0) & 1) != 0; } | |
7450 | |
7451 void print() const { | |
7452 // condition codes | |
7453 char c[5]; | |
7454 c[0] = (C3()) ? '3' : '-'; | |
7455 c[1] = (C2()) ? '2' : '-'; | |
7456 c[2] = (C1()) ? '1' : '-'; | |
7457 c[3] = (C0()) ? '0' : '-'; | |
7458 c[4] = '\x0'; | |
7459 // flags | |
7460 char f[9]; | |
7461 f[0] = (error_status()) ? 'E' : '-'; | |
7462 f[1] = (stack_fault ()) ? 'S' : '-'; | |
7463 f[2] = (precision ()) ? 'P' : '-'; | |
7464 f[3] = (underflow ()) ? 'U' : '-'; | |
7465 f[4] = (overflow ()) ? 'O' : '-'; | |
7466 f[5] = (zero_divide ()) ? 'Z' : '-'; | |
7467 f[6] = (denormalized()) ? 'D' : '-'; | |
7468 f[7] = (invalid ()) ? 'I' : '-'; | |
7469 f[8] = '\x0'; | |
7470 // output | |
7471 printf("%04x flags = %s, cc = %s, top = %d", _value & 0xFFFF, f, c, top()); | |
7472 } | |
7473 | |
7474 }; | |
7475 | |
7476 class TagWord { | |
7477 public: | |
7478 int32_t _value; | |
7479 | |
7480 int tag_at(int i) const { return (_value >> (i*2)) & 3; } | |
7481 | |
7482 void print() const { | |
7483 printf("%04x", _value & 0xFFFF); | |
7484 } | |
7485 | |
7486 }; | |
7487 | |
7488 class FPU_Register { | |
7489 public: | |
7490 int32_t _m0; | |
7491 int32_t _m1; | |
7492 int16_t _ex; | |
7493 | |
7494 bool is_indefinite() const { | |
7495 return _ex == -1 && _m1 == (int32_t)0xC0000000 && _m0 == 0; | |
7496 } | |
7497 | |
7498 void print() const { | |
7499 char sign = (_ex < 0) ? '-' : '+'; | |
7500 const char* kind = (_ex == 0x7FFF || _ex == (int16_t)-1) ? "NaN" : " "; | |
7501 printf("%c%04hx.%08x%08x %s", sign, _ex, _m1, _m0, kind); | |
7502 }; | |
7503 | |
7504 }; | |
7505 | |
7506 class FPU_State { | |
7507 public: | |
7508 enum { | |
7509 register_size = 10, | |
7510 number_of_registers = 8, | |
7511 register_mask = 7 | |
7512 }; | |
7513 | |
7514 ControlWord _control_word; | |
7515 StatusWord _status_word; | |
7516 TagWord _tag_word; | |
7517 int32_t _error_offset; | |
7518 int32_t _error_selector; | |
7519 int32_t _data_offset; | |
7520 int32_t _data_selector; | |
7521 int8_t _register[register_size * number_of_registers]; | |
7522 | |
7523 int tag_for_st(int i) const { return _tag_word.tag_at((_status_word.top() + i) & register_mask); } | |
7524 FPU_Register* st(int i) const { return (FPU_Register*)&_register[register_size * i]; } | |
7525 | |
7526 const char* tag_as_string(int tag) const { | |
7527 switch (tag) { | |
7528 case 0: return "valid"; | |
7529 case 1: return "zero"; | |
7530 case 2: return "special"; | |
7531 case 3: return "empty"; | |
7532 } | |
7533 ShouldNotReachHere() | |
7534 return NULL; | |
7535 } | |
7536 | |
7537 void print() const { | |
7538 // print computation registers | |
7539 { int t = _status_word.top(); | |
7540 for (int i = 0; i < number_of_registers; i++) { | |
7541 int j = (i - t) & register_mask; | |
7542 printf("%c r%d = ST%d = ", (j == 0 ? '*' : ' '), i, j); | |
7543 st(j)->print(); | |
7544 printf(" %s\n", tag_as_string(_tag_word.tag_at(i))); | |
7545 } | |
7546 } | |
7547 printf("\n"); | |
7548 // print control registers | |
7549 printf("ctrl = "); _control_word.print(); printf("\n"); | |
7550 printf("stat = "); _status_word .print(); printf("\n"); | |
7551 printf("tags = "); _tag_word .print(); printf("\n"); | |
7552 } | |
7553 | |
7554 }; | |
7555 | |
7556 class Flag_Register { | |
7557 public: | |
7558 int32_t _value; | |
7559 | |
7560 bool overflow() const { return ((_value >> 11) & 1) != 0; } | |
7561 bool direction() const { return ((_value >> 10) & 1) != 0; } | |
7562 bool sign() const { return ((_value >> 7) & 1) != 0; } | |
7563 bool zero() const { return ((_value >> 6) & 1) != 0; } | |
7564 bool auxiliary_carry() const { return ((_value >> 4) & 1) != 0; } | |
7565 bool parity() const { return ((_value >> 2) & 1) != 0; } | |
7566 bool carry() const { return ((_value >> 0) & 1) != 0; } | |
7567 | |
7568 void print() const { | |
7569 // flags | |
7570 char f[8]; | |
7571 f[0] = (overflow ()) ? 'O' : '-'; | |
7572 f[1] = (direction ()) ? 'D' : '-'; | |
7573 f[2] = (sign ()) ? 'S' : '-'; | |
7574 f[3] = (zero ()) ? 'Z' : '-'; | |
7575 f[4] = (auxiliary_carry()) ? 'A' : '-'; | |
7576 f[5] = (parity ()) ? 'P' : '-'; | |
7577 f[6] = (carry ()) ? 'C' : '-'; | |
7578 f[7] = '\x0'; | |
7579 // output | |
7580 printf("%08x flags = %s", _value, f); | |
7581 } | |
7582 | |
7583 }; | |
7584 | |
7585 class IU_Register { | |
7586 public: | |
7587 int32_t _value; | |
7588 | |
7589 void print() const { | |
7590 printf("%08x %11d", _value, _value); | |
7591 } | |
7592 | |
7593 }; | |
7594 | |
7595 class IU_State { | |
7596 public: | |
7597 Flag_Register _eflags; | |
7598 IU_Register _rdi; | |
7599 IU_Register _rsi; | |
7600 IU_Register _rbp; | |
7601 IU_Register _rsp; | |
7602 IU_Register _rbx; | |
7603 IU_Register _rdx; | |
7604 IU_Register _rcx; | |
7605 IU_Register _rax; | |
7606 | |
7607 void print() const { | |
7608 // computation registers | |
7609 printf("rax, = "); _rax.print(); printf("\n"); | |
7610 printf("rbx, = "); _rbx.print(); printf("\n"); | |
7611 printf("rcx = "); _rcx.print(); printf("\n"); | |
7612 printf("rdx = "); _rdx.print(); printf("\n"); | |
7613 printf("rdi = "); _rdi.print(); printf("\n"); | |
7614 printf("rsi = "); _rsi.print(); printf("\n"); | |
7615 printf("rbp, = "); _rbp.print(); printf("\n"); | |
7616 printf("rsp = "); _rsp.print(); printf("\n"); | |
7617 printf("\n"); | |
7618 // control registers | |
7619 printf("flgs = "); _eflags.print(); printf("\n"); | |
7620 } | |
7621 }; | |
7622 | |
7623 | |
7624 class CPU_State { | |
7625 public: | |
7626 FPU_State _fpu_state; | |
7627 IU_State _iu_state; | |
7628 | |
7629 void print() const { | |
7630 printf("--------------------------------------------------\n"); | |
7631 _iu_state .print(); | |
7632 printf("\n"); | |
7633 _fpu_state.print(); | |
7634 printf("--------------------------------------------------\n"); | |
7635 } | |
7636 | |
7637 }; | |
7638 | |
7639 | |
7640 static void _print_CPU_state(CPU_State* state) { | |
7641 state->print(); | |
7642 }; | |
7643 | |
7644 | |
7645 void MacroAssembler::print_CPU_state() { | |
7646 push_CPU_state(); | |
304 | 7647 push(rsp); // pass CPU state |
0 | 7648 call(RuntimeAddress(CAST_FROM_FN_PTR(address, _print_CPU_state))); |
304 | 7649 addptr(rsp, wordSize); // discard argument |
0 | 7650 pop_CPU_state(); |
7651 } | |
7652 | |
7653 | |
7654 static bool _verify_FPU(int stack_depth, char* s, CPU_State* state) { | |
7655 static int counter = 0; | |
7656 FPU_State* fs = &state->_fpu_state; | |
7657 counter++; | |
7658 // For leaf calls, only verify that the top few elements remain empty. | |
7659 // We only need 1 empty at the top for C2 code. | |
7660 if( stack_depth < 0 ) { | |
7661 if( fs->tag_for_st(7) != 3 ) { | |
7662 printf("FPR7 not empty\n"); | |
7663 state->print(); | |
7664 assert(false, "error"); | |
7665 return false; | |
7666 } | |
7667 return true; // All other stack states do not matter | |
7668 } | |
7669 | |
7670 assert((fs->_control_word._value & 0xffff) == StubRoutines::_fpu_cntrl_wrd_std, | |
7671 "bad FPU control word"); | |
7672 | |
7673 // compute stack depth | |
7674 int i = 0; | |
7675 while (i < FPU_State::number_of_registers && fs->tag_for_st(i) < 3) i++; | |
7676 int d = i; | |
7677 while (i < FPU_State::number_of_registers && fs->tag_for_st(i) == 3) i++; | |
7678 // verify findings | |
7679 if (i != FPU_State::number_of_registers) { | |
7680 // stack not contiguous | |
7681 printf("%s: stack not contiguous at ST%d\n", s, i); | |
7682 state->print(); | |
7683 assert(false, "error"); | |
7684 return false; | |
7685 } | |
7686 // check if computed stack depth corresponds to expected stack depth | |
7687 if (stack_depth < 0) { | |
7688 // expected stack depth is -stack_depth or less | |
7689 if (d > -stack_depth) { | |
7690 // too many elements on the stack | |
7691 printf("%s: <= %d stack elements expected but found %d\n", s, -stack_depth, d); | |
7692 state->print(); | |
7693 assert(false, "error"); | |
7694 return false; | |
7695 } | |
7696 } else { | |
7697 // expected stack depth is stack_depth | |
7698 if (d != stack_depth) { | |
7699 // wrong stack depth | |
7700 printf("%s: %d stack elements expected but found %d\n", s, stack_depth, d); | |
7701 state->print(); | |
7702 assert(false, "error"); | |
7703 return false; | |
7704 } | |
7705 } | |
7706 // everything is cool | |
7707 return true; | |
7708 } | |
7709 | |
7710 | |
7711 void MacroAssembler::verify_FPU(int stack_depth, const char* s) { | |
7712 if (!VerifyFPU) return; | |
7713 push_CPU_state(); | |
304 | 7714 push(rsp); // pass CPU state |
0 | 7715 ExternalAddress msg((address) s); |
7716 // pass message string s | |
7717 pushptr(msg.addr()); | |
304 | 7718 push(stack_depth); // pass stack depth |
0 | 7719 call(RuntimeAddress(CAST_FROM_FN_PTR(address, _verify_FPU))); |
304 | 7720 addptr(rsp, 3 * wordSize); // discard arguments |
0 | 7721 // check for error |
7722 { Label L; | |
7723 testl(rax, rax); | |
7724 jcc(Assembler::notZero, L); | |
7725 int3(); // break if error condition | |
7726 bind(L); | |
7727 } | |
7728 pop_CPU_state(); | |
7729 } | |
7730 | |
304 | 7731 void MacroAssembler::load_klass(Register dst, Register src) { |
7732 #ifdef _LP64 | |
7733 if (UseCompressedOops) { | |
7734 movl(dst, Address(src, oopDesc::klass_offset_in_bytes())); | |
7735 decode_heap_oop_not_null(dst); | |
7736 } else | |
7737 #endif | |
7738 movptr(dst, Address(src, oopDesc::klass_offset_in_bytes())); | |
7739 } | |
7740 | |
7741 void MacroAssembler::load_prototype_header(Register dst, Register src) { | |
7742 #ifdef _LP64 | |
7743 if (UseCompressedOops) { | |
642
660978a2a31a
6791178: Specialize for zero as the compressed oop vm heap base
kvn
parents:
624
diff
changeset
|
7744 assert (Universe::heap() != NULL, "java heap should be initialized"); |
304 | 7745 movl(dst, Address(src, oopDesc::klass_offset_in_bytes())); |
642
660978a2a31a
6791178: Specialize for zero as the compressed oop vm heap base
kvn
parents:
624
diff
changeset
|
7746 if (Universe::narrow_oop_shift() != 0) { |
660978a2a31a
6791178: Specialize for zero as the compressed oop vm heap base
kvn
parents:
624
diff
changeset
|
7747 assert(Address::times_8 == LogMinObjAlignmentInBytes && |
660978a2a31a
6791178: Specialize for zero as the compressed oop vm heap base
kvn
parents:
624
diff
changeset
|
7748 Address::times_8 == Universe::narrow_oop_shift(), "decode alg wrong"); |
660978a2a31a
6791178: Specialize for zero as the compressed oop vm heap base
kvn
parents:
624
diff
changeset
|
7749 movq(dst, Address(r12_heapbase, dst, Address::times_8, Klass::prototype_header_offset_in_bytes() + klassOopDesc::klass_part_offset_in_bytes())); |
660978a2a31a
6791178: Specialize for zero as the compressed oop vm heap base
kvn
parents:
624
diff
changeset
|
7750 } else { |
660978a2a31a
6791178: Specialize for zero as the compressed oop vm heap base
kvn
parents:
624
diff
changeset
|
7751 movq(dst, Address(dst, Klass::prototype_header_offset_in_bytes() + klassOopDesc::klass_part_offset_in_bytes())); |
660978a2a31a
6791178: Specialize for zero as the compressed oop vm heap base
kvn
parents:
624
diff
changeset
|
7752 } |
304 | 7753 } else |
7754 #endif | |
642
660978a2a31a
6791178: Specialize for zero as the compressed oop vm heap base
kvn
parents:
624
diff
changeset
|
7755 { |
660978a2a31a
6791178: Specialize for zero as the compressed oop vm heap base
kvn
parents:
624
diff
changeset
|
7756 movptr(dst, Address(src, oopDesc::klass_offset_in_bytes())); |
660978a2a31a
6791178: Specialize for zero as the compressed oop vm heap base
kvn
parents:
624
diff
changeset
|
7757 movptr(dst, Address(dst, Klass::prototype_header_offset_in_bytes() + klassOopDesc::klass_part_offset_in_bytes())); |
660978a2a31a
6791178: Specialize for zero as the compressed oop vm heap base
kvn
parents:
624
diff
changeset
|
7758 } |
304 | 7759 } |
7760 | |
7761 void MacroAssembler::store_klass(Register dst, Register src) { | |
7762 #ifdef _LP64 | |
7763 if (UseCompressedOops) { | |
7764 encode_heap_oop_not_null(src); | |
7765 movl(Address(dst, oopDesc::klass_offset_in_bytes()), src); | |
7766 } else | |
7767 #endif | |
7768 movptr(Address(dst, oopDesc::klass_offset_in_bytes()), src); | |
7769 } | |
7770 | |
7771 #ifdef _LP64 | |
7772 void MacroAssembler::store_klass_gap(Register dst, Register src) { | |
7773 if (UseCompressedOops) { | |
7774 // Store to klass gap in destination | |
7775 movl(Address(dst, oopDesc::klass_gap_offset_in_bytes()), src); | |
7776 } | |
7777 } | |
7778 | |
7779 void MacroAssembler::load_heap_oop(Register dst, Address src) { | |
7780 if (UseCompressedOops) { | |
7781 movl(dst, src); | |
7782 decode_heap_oop(dst); | |
7783 } else { | |
7784 movq(dst, src); | |
7785 } | |
7786 } | |
7787 | |
7788 void MacroAssembler::store_heap_oop(Address dst, Register src) { | |
7789 if (UseCompressedOops) { | |
7790 assert(!dst.uses(src), "not enough registers"); | |
7791 encode_heap_oop(src); | |
7792 movl(dst, src); | |
7793 } else { | |
7794 movq(dst, src); | |
7795 } | |
7796 } | |
7797 | |
7798 // Algorithm must match oop.inline.hpp encode_heap_oop. | |
7799 void MacroAssembler::encode_heap_oop(Register r) { | |
7800 assert (UseCompressedOops, "should be compressed"); | |
642
660978a2a31a
6791178: Specialize for zero as the compressed oop vm heap base
kvn
parents:
624
diff
changeset
|
7801 assert (Universe::heap() != NULL, "java heap should be initialized"); |
660978a2a31a
6791178: Specialize for zero as the compressed oop vm heap base
kvn
parents:
624
diff
changeset
|
7802 if (Universe::narrow_oop_base() == NULL) { |
660978a2a31a
6791178: Specialize for zero as the compressed oop vm heap base
kvn
parents:
624
diff
changeset
|
7803 verify_oop(r, "broken oop in encode_heap_oop"); |
660978a2a31a
6791178: Specialize for zero as the compressed oop vm heap base
kvn
parents:
624
diff
changeset
|
7804 if (Universe::narrow_oop_shift() != 0) { |
660978a2a31a
6791178: Specialize for zero as the compressed oop vm heap base
kvn
parents:
624
diff
changeset
|
7805 assert (LogMinObjAlignmentInBytes == Universe::narrow_oop_shift(), "decode alg wrong"); |
660978a2a31a
6791178: Specialize for zero as the compressed oop vm heap base
kvn
parents:
624
diff
changeset
|
7806 shrq(r, LogMinObjAlignmentInBytes); |
660978a2a31a
6791178: Specialize for zero as the compressed oop vm heap base
kvn
parents:
624
diff
changeset
|
7807 } |
660978a2a31a
6791178: Specialize for zero as the compressed oop vm heap base
kvn
parents:
624
diff
changeset
|
7808 return; |
660978a2a31a
6791178: Specialize for zero as the compressed oop vm heap base
kvn
parents:
624
diff
changeset
|
7809 } |
0 | 7810 #ifdef ASSERT |
304 | 7811 if (CheckCompressedOops) { |
7812 Label ok; | |
7813 push(rscratch1); // cmpptr trashes rscratch1 | |
642
660978a2a31a
6791178: Specialize for zero as the compressed oop vm heap base
kvn
parents:
624
diff
changeset
|
7814 cmpptr(r12_heapbase, ExternalAddress((address)Universe::narrow_oop_base_addr())); |
304 | 7815 jcc(Assembler::equal, ok); |
7816 stop("MacroAssembler::encode_heap_oop: heap base corrupted?"); | |
0 | 7817 bind(ok); |
304 | 7818 pop(rscratch1); |
0 | 7819 } |
7820 #endif | |
304 | 7821 verify_oop(r, "broken oop in encode_heap_oop"); |
7822 testq(r, r); | |
7823 cmovq(Assembler::equal, r, r12_heapbase); | |
7824 subq(r, r12_heapbase); | |
7825 shrq(r, LogMinObjAlignmentInBytes); | |
7826 } | |
7827 | |
7828 void MacroAssembler::encode_heap_oop_not_null(Register r) { | |
7829 assert (UseCompressedOops, "should be compressed"); | |
642
660978a2a31a
6791178: Specialize for zero as the compressed oop vm heap base
kvn
parents:
624
diff
changeset
|
7830 assert (Universe::heap() != NULL, "java heap should be initialized"); |
0 | 7831 #ifdef ASSERT |
304 | 7832 if (CheckCompressedOops) { |
0 | 7833 Label ok; |
304 | 7834 testq(r, r); |
7835 jcc(Assembler::notEqual, ok); | |
7836 stop("null oop passed to encode_heap_oop_not_null"); | |
0 | 7837 bind(ok); |
304 | 7838 } |
7839 #endif | |
7840 verify_oop(r, "broken oop in encode_heap_oop_not_null"); | |
642
660978a2a31a
6791178: Specialize for zero as the compressed oop vm heap base
kvn
parents:
624
diff
changeset
|
7841 if (Universe::narrow_oop_base() != NULL) { |
660978a2a31a
6791178: Specialize for zero as the compressed oop vm heap base
kvn
parents:
624
diff
changeset
|
7842 subq(r, r12_heapbase); |
660978a2a31a
6791178: Specialize for zero as the compressed oop vm heap base
kvn
parents:
624
diff
changeset
|
7843 } |
660978a2a31a
6791178: Specialize for zero as the compressed oop vm heap base
kvn
parents:
624
diff
changeset
|
7844 if (Universe::narrow_oop_shift() != 0) { |
660978a2a31a
6791178: Specialize for zero as the compressed oop vm heap base
kvn
parents:
624
diff
changeset
|
7845 assert (LogMinObjAlignmentInBytes == Universe::narrow_oop_shift(), "decode alg wrong"); |
660978a2a31a
6791178: Specialize for zero as the compressed oop vm heap base
kvn
parents:
624
diff
changeset
|
7846 shrq(r, LogMinObjAlignmentInBytes); |
660978a2a31a
6791178: Specialize for zero as the compressed oop vm heap base
kvn
parents:
624
diff
changeset
|
7847 } |
304 | 7848 } |
7849 | |
7850 void MacroAssembler::encode_heap_oop_not_null(Register dst, Register src) { | |
7851 assert (UseCompressedOops, "should be compressed"); | |
642
660978a2a31a
6791178: Specialize for zero as the compressed oop vm heap base
kvn
parents:
624
diff
changeset
|
7852 assert (Universe::heap() != NULL, "java heap should be initialized"); |
304 | 7853 #ifdef ASSERT |
7854 if (CheckCompressedOops) { | |
7855 Label ok; | |
7856 testq(src, src); | |
7857 jcc(Assembler::notEqual, ok); | |
7858 stop("null oop passed to encode_heap_oop_not_null2"); | |
7859 bind(ok); | |
0 | 7860 } |
7861 #endif | |
304 | 7862 verify_oop(src, "broken oop in encode_heap_oop_not_null2"); |
7863 if (dst != src) { | |
7864 movq(dst, src); | |
7865 } | |
642
660978a2a31a
6791178: Specialize for zero as the compressed oop vm heap base
kvn
parents:
624
diff
changeset
|
7866 if (Universe::narrow_oop_base() != NULL) { |
660978a2a31a
6791178: Specialize for zero as the compressed oop vm heap base
kvn
parents:
624
diff
changeset
|
7867 subq(dst, r12_heapbase); |
660978a2a31a
6791178: Specialize for zero as the compressed oop vm heap base
kvn
parents:
624
diff
changeset
|
7868 } |
660978a2a31a
6791178: Specialize for zero as the compressed oop vm heap base
kvn
parents:
624
diff
changeset
|
7869 if (Universe::narrow_oop_shift() != 0) { |
660978a2a31a
6791178: Specialize for zero as the compressed oop vm heap base
kvn
parents:
624
diff
changeset
|
7870 assert (LogMinObjAlignmentInBytes == Universe::narrow_oop_shift(), "decode alg wrong"); |
660978a2a31a
6791178: Specialize for zero as the compressed oop vm heap base
kvn
parents:
624
diff
changeset
|
7871 shrq(dst, LogMinObjAlignmentInBytes); |
660978a2a31a
6791178: Specialize for zero as the compressed oop vm heap base
kvn
parents:
624
diff
changeset
|
7872 } |
304 | 7873 } |
7874 | |
7875 void MacroAssembler::decode_heap_oop(Register r) { | |
7876 assert (UseCompressedOops, "should be compressed"); | |
642
660978a2a31a
6791178: Specialize for zero as the compressed oop vm heap base
kvn
parents:
624
diff
changeset
|
7877 assert (Universe::heap() != NULL, "java heap should be initialized"); |
660978a2a31a
6791178: Specialize for zero as the compressed oop vm heap base
kvn
parents:
624
diff
changeset
|
7878 if (Universe::narrow_oop_base() == NULL) { |
660978a2a31a
6791178: Specialize for zero as the compressed oop vm heap base
kvn
parents:
624
diff
changeset
|
7879 if (Universe::narrow_oop_shift() != 0) { |
660978a2a31a
6791178: Specialize for zero as the compressed oop vm heap base
kvn
parents:
624
diff
changeset
|
7880 assert (LogMinObjAlignmentInBytes == Universe::narrow_oop_shift(), "decode alg wrong"); |
660978a2a31a
6791178: Specialize for zero as the compressed oop vm heap base
kvn
parents:
624
diff
changeset
|
7881 shlq(r, LogMinObjAlignmentInBytes); |
660978a2a31a
6791178: Specialize for zero as the compressed oop vm heap base
kvn
parents:
624
diff
changeset
|
7882 } |
660978a2a31a
6791178: Specialize for zero as the compressed oop vm heap base
kvn
parents:
624
diff
changeset
|
7883 verify_oop(r, "broken oop in decode_heap_oop"); |
660978a2a31a
6791178: Specialize for zero as the compressed oop vm heap base
kvn
parents:
624
diff
changeset
|
7884 return; |
660978a2a31a
6791178: Specialize for zero as the compressed oop vm heap base
kvn
parents:
624
diff
changeset
|
7885 } |
304 | 7886 #ifdef ASSERT |
7887 if (CheckCompressedOops) { | |
7888 Label ok; | |
7889 push(rscratch1); | |
7890 cmpptr(r12_heapbase, | |
642
660978a2a31a
6791178: Specialize for zero as the compressed oop vm heap base
kvn
parents:
624
diff
changeset
|
7891 ExternalAddress((address)Universe::narrow_oop_base_addr())); |
304 | 7892 jcc(Assembler::equal, ok); |
7893 stop("MacroAssembler::decode_heap_oop: heap base corrupted?"); | |
7894 bind(ok); | |
7895 pop(rscratch1); | |
7896 } | |
7897 #endif | |
7898 | |
7899 Label done; | |
7900 shlq(r, LogMinObjAlignmentInBytes); | |
7901 jccb(Assembler::equal, done); | |
7902 addq(r, r12_heapbase); | |
7903 #if 0 | |
7904 // alternate decoding probably a wash. | |
7905 testq(r, r); | |
7906 jccb(Assembler::equal, done); | |
7907 leaq(r, Address(r12_heapbase, r, Address::times_8, 0)); | |
7908 #endif | |
7909 bind(done); | |
7910 verify_oop(r, "broken oop in decode_heap_oop"); | |
7911 } | |
7912 | |
7913 void MacroAssembler::decode_heap_oop_not_null(Register r) { | |
7914 assert (UseCompressedOops, "should only be used for compressed headers"); | |
642
660978a2a31a
6791178: Specialize for zero as the compressed oop vm heap base
kvn
parents:
624
diff
changeset
|
7915 assert (Universe::heap() != NULL, "java heap should be initialized"); |
304 | 7916 // Cannot assert, unverified entry point counts instructions (see .ad file) |
7917 // vtableStubs also counts instructions in pd_code_size_limit. | |
7918 // Also do not verify_oop as this is called by verify_oop. | |
642
660978a2a31a
6791178: Specialize for zero as the compressed oop vm heap base
kvn
parents:
624
diff
changeset
|
7919 if (Universe::narrow_oop_base() == NULL) { |
660978a2a31a
6791178: Specialize for zero as the compressed oop vm heap base
kvn
parents:
624
diff
changeset
|
7920 if (Universe::narrow_oop_shift() != 0) { |
660978a2a31a
6791178: Specialize for zero as the compressed oop vm heap base
kvn
parents:
624
diff
changeset
|
7921 assert (LogMinObjAlignmentInBytes == Universe::narrow_oop_shift(), "decode alg wrong"); |
660978a2a31a
6791178: Specialize for zero as the compressed oop vm heap base
kvn
parents:
624
diff
changeset
|
7922 shlq(r, LogMinObjAlignmentInBytes); |
660978a2a31a
6791178: Specialize for zero as the compressed oop vm heap base
kvn
parents:
624
diff
changeset
|
7923 } |
660978a2a31a
6791178: Specialize for zero as the compressed oop vm heap base
kvn
parents:
624
diff
changeset
|
7924 } else { |
660978a2a31a
6791178: Specialize for zero as the compressed oop vm heap base
kvn
parents:
624
diff
changeset
|
7925 assert (Address::times_8 == LogMinObjAlignmentInBytes && |
660978a2a31a
6791178: Specialize for zero as the compressed oop vm heap base
kvn
parents:
624
diff
changeset
|
7926 Address::times_8 == Universe::narrow_oop_shift(), "decode alg wrong"); |
660978a2a31a
6791178: Specialize for zero as the compressed oop vm heap base
kvn
parents:
624
diff
changeset
|
7927 leaq(r, Address(r12_heapbase, r, Address::times_8, 0)); |
660978a2a31a
6791178: Specialize for zero as the compressed oop vm heap base
kvn
parents:
624
diff
changeset
|
7928 } |
304 | 7929 } |
7930 | |
7931 void MacroAssembler::decode_heap_oop_not_null(Register dst, Register src) { | |
7932 assert (UseCompressedOops, "should only be used for compressed headers"); | |
642
660978a2a31a
6791178: Specialize for zero as the compressed oop vm heap base
kvn
parents:
624
diff
changeset
|
7933 assert (Universe::heap() != NULL, "java heap should be initialized"); |
304 | 7934 // Cannot assert, unverified entry point counts instructions (see .ad file) |
7935 // vtableStubs also counts instructions in pd_code_size_limit. | |
7936 // Also do not verify_oop as this is called by verify_oop. | |
642
660978a2a31a
6791178: Specialize for zero as the compressed oop vm heap base
kvn
parents:
624
diff
changeset
|
7937 if (Universe::narrow_oop_shift() != 0) { |
660978a2a31a
6791178: Specialize for zero as the compressed oop vm heap base
kvn
parents:
624
diff
changeset
|
7938 assert (Address::times_8 == LogMinObjAlignmentInBytes && |
660978a2a31a
6791178: Specialize for zero as the compressed oop vm heap base
kvn
parents:
624
diff
changeset
|
7939 Address::times_8 == Universe::narrow_oop_shift(), "decode alg wrong"); |
660978a2a31a
6791178: Specialize for zero as the compressed oop vm heap base
kvn
parents:
624
diff
changeset
|
7940 leaq(dst, Address(r12_heapbase, src, Address::times_8, 0)); |
660978a2a31a
6791178: Specialize for zero as the compressed oop vm heap base
kvn
parents:
624
diff
changeset
|
7941 } else if (dst != src) { |
660978a2a31a
6791178: Specialize for zero as the compressed oop vm heap base
kvn
parents:
624
diff
changeset
|
7942 movq(dst, src); |
660978a2a31a
6791178: Specialize for zero as the compressed oop vm heap base
kvn
parents:
624
diff
changeset
|
7943 } |
304 | 7944 } |
7945 | |
7946 void MacroAssembler::set_narrow_oop(Register dst, jobject obj) { | |
642
660978a2a31a
6791178: Specialize for zero as the compressed oop vm heap base
kvn
parents:
624
diff
changeset
|
7947 assert (UseCompressedOops, "should only be used for compressed headers"); |
660978a2a31a
6791178: Specialize for zero as the compressed oop vm heap base
kvn
parents:
624
diff
changeset
|
7948 assert (Universe::heap() != NULL, "java heap should be initialized"); |
660978a2a31a
6791178: Specialize for zero as the compressed oop vm heap base
kvn
parents:
624
diff
changeset
|
7949 assert (oop_recorder() != NULL, "this assembler needs an OopRecorder"); |
660978a2a31a
6791178: Specialize for zero as the compressed oop vm heap base
kvn
parents:
624
diff
changeset
|
7950 int oop_index = oop_recorder()->find_index(obj); |
660978a2a31a
6791178: Specialize for zero as the compressed oop vm heap base
kvn
parents:
624
diff
changeset
|
7951 RelocationHolder rspec = oop_Relocation::spec(oop_index); |
660978a2a31a
6791178: Specialize for zero as the compressed oop vm heap base
kvn
parents:
624
diff
changeset
|
7952 mov_narrow_oop(dst, oop_index, rspec); |
660978a2a31a
6791178: Specialize for zero as the compressed oop vm heap base
kvn
parents:
624
diff
changeset
|
7953 } |
660978a2a31a
6791178: Specialize for zero as the compressed oop vm heap base
kvn
parents:
624
diff
changeset
|
7954 |
660978a2a31a
6791178: Specialize for zero as the compressed oop vm heap base
kvn
parents:
624
diff
changeset
|
7955 void MacroAssembler::set_narrow_oop(Address dst, jobject obj) { |
660978a2a31a
6791178: Specialize for zero as the compressed oop vm heap base
kvn
parents:
624
diff
changeset
|
7956 assert (UseCompressedOops, "should only be used for compressed headers"); |
660978a2a31a
6791178: Specialize for zero as the compressed oop vm heap base
kvn
parents:
624
diff
changeset
|
7957 assert (Universe::heap() != NULL, "java heap should be initialized"); |
660978a2a31a
6791178: Specialize for zero as the compressed oop vm heap base
kvn
parents:
624
diff
changeset
|
7958 assert (oop_recorder() != NULL, "this assembler needs an OopRecorder"); |
304 | 7959 int oop_index = oop_recorder()->find_index(obj); |
7960 RelocationHolder rspec = oop_Relocation::spec(oop_index); | |
642
660978a2a31a
6791178: Specialize for zero as the compressed oop vm heap base
kvn
parents:
624
diff
changeset
|
7961 mov_narrow_oop(dst, oop_index, rspec); |
660978a2a31a
6791178: Specialize for zero as the compressed oop vm heap base
kvn
parents:
624
diff
changeset
|
7962 } |
660978a2a31a
6791178: Specialize for zero as the compressed oop vm heap base
kvn
parents:
624
diff
changeset
|
7963 |
660978a2a31a
6791178: Specialize for zero as the compressed oop vm heap base
kvn
parents:
624
diff
changeset
|
7964 void MacroAssembler::cmp_narrow_oop(Register dst, jobject obj) { |
660978a2a31a
6791178: Specialize for zero as the compressed oop vm heap base
kvn
parents:
624
diff
changeset
|
7965 assert (UseCompressedOops, "should only be used for compressed headers"); |
660978a2a31a
6791178: Specialize for zero as the compressed oop vm heap base
kvn
parents:
624
diff
changeset
|
7966 assert (Universe::heap() != NULL, "java heap should be initialized"); |
660978a2a31a
6791178: Specialize for zero as the compressed oop vm heap base
kvn
parents:
624
diff
changeset
|
7967 assert (oop_recorder() != NULL, "this assembler needs an OopRecorder"); |
660978a2a31a
6791178: Specialize for zero as the compressed oop vm heap base
kvn
parents:
624
diff
changeset
|
7968 int oop_index = oop_recorder()->find_index(obj); |
660978a2a31a
6791178: Specialize for zero as the compressed oop vm heap base
kvn
parents:
624
diff
changeset
|
7969 RelocationHolder rspec = oop_Relocation::spec(oop_index); |
660978a2a31a
6791178: Specialize for zero as the compressed oop vm heap base
kvn
parents:
624
diff
changeset
|
7970 Assembler::cmp_narrow_oop(dst, oop_index, rspec); |
660978a2a31a
6791178: Specialize for zero as the compressed oop vm heap base
kvn
parents:
624
diff
changeset
|
7971 } |
660978a2a31a
6791178: Specialize for zero as the compressed oop vm heap base
kvn
parents:
624
diff
changeset
|
7972 |
660978a2a31a
6791178: Specialize for zero as the compressed oop vm heap base
kvn
parents:
624
diff
changeset
|
7973 void MacroAssembler::cmp_narrow_oop(Address dst, jobject obj) { |
660978a2a31a
6791178: Specialize for zero as the compressed oop vm heap base
kvn
parents:
624
diff
changeset
|
7974 assert (UseCompressedOops, "should only be used for compressed headers"); |
660978a2a31a
6791178: Specialize for zero as the compressed oop vm heap base
kvn
parents:
624
diff
changeset
|
7975 assert (Universe::heap() != NULL, "java heap should be initialized"); |
660978a2a31a
6791178: Specialize for zero as the compressed oop vm heap base
kvn
parents:
624
diff
changeset
|
7976 assert (oop_recorder() != NULL, "this assembler needs an OopRecorder"); |
660978a2a31a
6791178: Specialize for zero as the compressed oop vm heap base
kvn
parents:
624
diff
changeset
|
7977 int oop_index = oop_recorder()->find_index(obj); |
660978a2a31a
6791178: Specialize for zero as the compressed oop vm heap base
kvn
parents:
624
diff
changeset
|
7978 RelocationHolder rspec = oop_Relocation::spec(oop_index); |
660978a2a31a
6791178: Specialize for zero as the compressed oop vm heap base
kvn
parents:
624
diff
changeset
|
7979 Assembler::cmp_narrow_oop(dst, oop_index, rspec); |
304 | 7980 } |
7981 | |
7982 void MacroAssembler::reinit_heapbase() { | |
7983 if (UseCompressedOops) { | |
642
660978a2a31a
6791178: Specialize for zero as the compressed oop vm heap base
kvn
parents:
624
diff
changeset
|
7984 movptr(r12_heapbase, ExternalAddress((address)Universe::narrow_oop_base_addr())); |
304 | 7985 } |
7986 } | |
7987 #endif // _LP64 | |
0 | 7988 |
7989 Assembler::Condition MacroAssembler::negate_condition(Assembler::Condition cond) { | |
7990 switch (cond) { | |
7991 // Note some conditions are synonyms for others | |
7992 case Assembler::zero: return Assembler::notZero; | |
7993 case Assembler::notZero: return Assembler::zero; | |
7994 case Assembler::less: return Assembler::greaterEqual; | |
7995 case Assembler::lessEqual: return Assembler::greater; | |
7996 case Assembler::greater: return Assembler::lessEqual; | |
7997 case Assembler::greaterEqual: return Assembler::less; | |
7998 case Assembler::below: return Assembler::aboveEqual; | |
7999 case Assembler::belowEqual: return Assembler::above; | |
8000 case Assembler::above: return Assembler::belowEqual; | |
8001 case Assembler::aboveEqual: return Assembler::below; | |
8002 case Assembler::overflow: return Assembler::noOverflow; | |
8003 case Assembler::noOverflow: return Assembler::overflow; | |
8004 case Assembler::negative: return Assembler::positive; | |
8005 case Assembler::positive: return Assembler::negative; | |
8006 case Assembler::parity: return Assembler::noParity; | |
8007 case Assembler::noParity: return Assembler::parity; | |
8008 } | |
8009 ShouldNotReachHere(); return Assembler::overflow; | |
8010 } | |
8011 | |
8012 SkipIfEqual::SkipIfEqual( | |
8013 MacroAssembler* masm, const bool* flag_addr, bool value) { | |
8014 _masm = masm; | |
8015 _masm->cmp8(ExternalAddress((address)flag_addr), value); | |
8016 _masm->jcc(Assembler::equal, _label); | |
8017 } | |
8018 | |
8019 SkipIfEqual::~SkipIfEqual() { | |
8020 _masm->bind(_label); | |
8021 } |