comparison src/cpu/x86/vm/assembler_x86.cpp @ 7212:291ffc492eb6

Merge with http://hg.openjdk.java.net/hsx/hsx25/hotspot/
author Doug Simon <doug.simon@oracle.com>
date Fri, 14 Dec 2012 14:35:13 +0100
parents e522a00b91aa cd3d6a6b95d9
children 989155e2d07a
comparison
equal deleted inserted replaced
7163:2ed8d74e5984 7212:291ffc492eb6
21 * questions. 21 * questions.
22 * 22 *
23 */ 23 */
24 24
25 #include "precompiled.hpp" 25 #include "precompiled.hpp"
26 #include "assembler_x86.inline.hpp" 26 #include "asm/assembler.hpp"
27 #include "asm/assembler.inline.hpp"
27 #include "gc_interface/collectedHeap.inline.hpp" 28 #include "gc_interface/collectedHeap.inline.hpp"
28 #include "interpreter/interpreter.hpp" 29 #include "interpreter/interpreter.hpp"
29 #include "memory/cardTableModRefBS.hpp" 30 #include "memory/cardTableModRefBS.hpp"
30 #include "memory/resourceArea.hpp" 31 #include "memory/resourceArea.hpp"
31 #include "prims/methodHandles.hpp" 32 #include "prims/methodHandles.hpp"
1152 1153
1153 void Assembler::call_literal(address entry, RelocationHolder const& rspec) { 1154 void Assembler::call_literal(address entry, RelocationHolder const& rspec) {
1154 assert(entry != NULL, "call most probably wrong"); 1155 assert(entry != NULL, "call most probably wrong");
1155 InstructionMark im(this); 1156 InstructionMark im(this);
1156 emit_byte(0xE8); 1157 emit_byte(0xE8);
1157 intptr_t disp = entry - (_code_pos + sizeof(int32_t)); 1158 intptr_t disp = entry - (pc() + sizeof(int32_t));
1158 assert(is_simm32(disp), "must be 32bit offset (call2)"); 1159 assert(is_simm32(disp), "must be 32bit offset (call2)");
1159 // Technically, should use call32_operand, but this format is 1160 // Technically, should use call32_operand, but this format is
1160 // implied by the fact that we're emitting a call instruction. 1161 // implied by the fact that we're emitting a call instruction.
1161 1162
1162 int operand = LP64_ONLY(disp32_operand) NOT_LP64(call32_operand); 1163 int operand = LP64_ONLY(disp32_operand) NOT_LP64(call32_operand);
1163 emit_data((int) disp, rspec, operand); 1164 emit_data((int) disp, rspec, operand);
1164 } 1165 }
1165 1166
1166 void Assembler::cdql() { 1167 void Assembler::cdql() {
1167 emit_byte(0x99); 1168 emit_byte(0x99);
1169 }
1170
1171 void Assembler::cld() {
1172 emit_byte(0xfc);
1168 } 1173 }
1169 1174
1170 void Assembler::cmovl(Condition cc, Register dst, Register src) { 1175 void Assembler::cmovl(Condition cc, Register dst, Register src) {
1171 NOT_LP64(guarantee(VM_Version::supports_cmov(), "illegal instruction")); 1176 NOT_LP64(guarantee(VM_Version::supports_cmov(), "illegal instruction"));
1172 int encode = prefix_and_encode(dst->encoding(), src->encoding()); 1177 int encode = prefix_and_encode(dst->encoding(), src->encoding());
1256 } 1261 }
1257 1262
1258 void Assembler::comiss(XMMRegister dst, XMMRegister src) { 1263 void Assembler::comiss(XMMRegister dst, XMMRegister src) {
1259 NOT_LP64(assert(VM_Version::supports_sse(), "")); 1264 NOT_LP64(assert(VM_Version::supports_sse(), ""));
1260 emit_simd_arith_nonds(0x2F, dst, src, VEX_SIMD_NONE); 1265 emit_simd_arith_nonds(0x2F, dst, src, VEX_SIMD_NONE);
1266 }
1267
1268 void Assembler::cpuid() {
1269 emit_byte(0x0F);
1270 emit_byte(0xA2);
1261 } 1271 }
1262 1272
1263 void Assembler::cvtdq2pd(XMMRegister dst, XMMRegister src) { 1273 void Assembler::cvtdq2pd(XMMRegister dst, XMMRegister src) {
1264 NOT_LP64(assert(VM_Version::supports_sse2(), "")); 1274 NOT_LP64(assert(VM_Version::supports_sse2(), ""));
1265 emit_simd_arith_nonds(0xE6, dst, src, VEX_SIMD_F3); 1275 emit_simd_arith_nonds(0xE6, dst, src, VEX_SIMD_F3);
1415 address dst = target(L); 1425 address dst = target(L);
1416 assert(dst != NULL, "jcc most probably wrong"); 1426 assert(dst != NULL, "jcc most probably wrong");
1417 1427
1418 const int short_size = 2; 1428 const int short_size = 2;
1419 const int long_size = 6; 1429 const int long_size = 6;
1420 intptr_t offs = (intptr_t)dst - (intptr_t)_code_pos; 1430 intptr_t offs = (intptr_t)dst - (intptr_t)pc();
1421 if (maybe_short && is8bit(offs - short_size)) { 1431 if (maybe_short && is8bit(offs - short_size)) {
1422 // 0111 tttn #8-bit disp 1432 // 0111 tttn #8-bit disp
1423 emit_byte(0x70 | cc); 1433 emit_byte(0x70 | cc);
1424 emit_byte((offs - short_size) & 0xFF); 1434 emit_byte((offs - short_size) & 0xFF);
1425 } else { 1435 } else {
1445 void Assembler::jccb(Condition cc, Label& L) { 1455 void Assembler::jccb(Condition cc, Label& L) {
1446 if (L.is_bound()) { 1456 if (L.is_bound()) {
1447 const int short_size = 2; 1457 const int short_size = 2;
1448 address entry = target(L); 1458 address entry = target(L);
1449 #ifdef ASSERT 1459 #ifdef ASSERT
1450 intptr_t dist = (intptr_t)entry - ((intptr_t)_code_pos + short_size); 1460 intptr_t dist = (intptr_t)entry - ((intptr_t)pc() + short_size);
1451 intptr_t delta = short_branch_delta(); 1461 intptr_t delta = short_branch_delta();
1452 if (delta != 0) { 1462 if (delta != 0) {
1453 dist += (dist < 0 ? (-delta) :delta); 1463 dist += (dist < 0 ? (-delta) :delta);
1454 } 1464 }
1455 assert(is8bit(dist), "Dispacement too large for a short jmp"); 1465 assert(is8bit(dist), "Dispacement too large for a short jmp");
1456 #endif 1466 #endif
1457 intptr_t offs = (intptr_t)entry - (intptr_t)_code_pos; 1467 intptr_t offs = (intptr_t)entry - (intptr_t)pc();
1458 // 0111 tttn #8-bit disp 1468 // 0111 tttn #8-bit disp
1459 emit_byte(0x70 | cc); 1469 emit_byte(0x70 | cc);
1460 emit_byte((offs - short_size) & 0xFF); 1470 emit_byte((offs - short_size) & 0xFF);
1461 } else { 1471 } else {
1462 InstructionMark im(this); 1472 InstructionMark im(this);
1478 address entry = target(L); 1488 address entry = target(L);
1479 assert(entry != NULL, "jmp most probably wrong"); 1489 assert(entry != NULL, "jmp most probably wrong");
1480 InstructionMark im(this); 1490 InstructionMark im(this);
1481 const int short_size = 2; 1491 const int short_size = 2;
1482 const int long_size = 5; 1492 const int long_size = 5;
1483 intptr_t offs = entry - _code_pos; 1493 intptr_t offs = entry - pc();
1484 if (maybe_short && is8bit(offs - short_size)) { 1494 if (maybe_short && is8bit(offs - short_size)) {
1485 emit_byte(0xEB); 1495 emit_byte(0xEB);
1486 emit_byte((offs - short_size) & 0xFF); 1496 emit_byte((offs - short_size) & 0xFF);
1487 } else { 1497 } else {
1488 emit_byte(0xE9); 1498 emit_byte(0xE9);
1508 1518
1509 void Assembler::jmp_literal(address dest, RelocationHolder const& rspec) { 1519 void Assembler::jmp_literal(address dest, RelocationHolder const& rspec) {
1510 InstructionMark im(this); 1520 InstructionMark im(this);
1511 emit_byte(0xE9); 1521 emit_byte(0xE9);
1512 assert(dest != NULL, "must have a target"); 1522 assert(dest != NULL, "must have a target");
1513 intptr_t disp = dest - (_code_pos + sizeof(int32_t)); 1523 intptr_t disp = dest - (pc() + sizeof(int32_t));
1514 assert(is_simm32(disp), "must be 32bit offset (jmp)"); 1524 assert(is_simm32(disp), "must be 32bit offset (jmp)");
1515 emit_data(disp, rspec.reloc(), call32_operand); 1525 emit_data(disp, rspec.reloc(), call32_operand);
1516 } 1526 }
1517 1527
1518 void Assembler::jmpb(Label& L) { 1528 void Assembler::jmpb(Label& L) {
1519 if (L.is_bound()) { 1529 if (L.is_bound()) {
1520 const int short_size = 2; 1530 const int short_size = 2;
1521 address entry = target(L); 1531 address entry = target(L);
1522 assert(entry != NULL, "jmp most probably wrong"); 1532 assert(entry != NULL, "jmp most probably wrong");
1523 #ifdef ASSERT 1533 #ifdef ASSERT
1524 intptr_t dist = (intptr_t)entry - ((intptr_t)_code_pos + short_size); 1534 intptr_t dist = (intptr_t)entry - ((intptr_t)pc() + short_size);
1525 intptr_t delta = short_branch_delta(); 1535 intptr_t delta = short_branch_delta();
1526 if (delta != 0) { 1536 if (delta != 0) {
1527 dist += (dist < 0 ? (-delta) :delta); 1537 dist += (dist < 0 ? (-delta) :delta);
1528 } 1538 }
1529 assert(is8bit(dist), "Dispacement too large for a short jmp"); 1539 assert(is8bit(dist), "Dispacement too large for a short jmp");
1530 #endif 1540 #endif
1531 intptr_t offs = entry - _code_pos; 1541 intptr_t offs = entry - pc();
1532 emit_byte(0xEB); 1542 emit_byte(0xEB);
1533 emit_byte((offs - short_size) & 0xFF); 1543 emit_byte((offs - short_size) & 0xFF);
1534 } else { 1544 } else {
1535 InstructionMark im(this); 1545 InstructionMark im(this);
1536 L.add_patch_at(code(), locator()); 1546 L.add_patch_at(code(), locator());
1554 emit_byte(0x67); // addr32 1564 emit_byte(0x67); // addr32
1555 prefix(src, dst); 1565 prefix(src, dst);
1556 #endif // LP64 1566 #endif // LP64
1557 emit_byte(0x8D); 1567 emit_byte(0x8D);
1558 emit_operand(dst, src); 1568 emit_operand(dst, src);
1569 }
1570
1571 void Assembler::lfence() {
1572 emit_byte(0x0F);
1573 emit_byte(0xAE);
1574 emit_byte(0xE8);
1559 } 1575 }
1560 1576
1561 void Assembler::lock() { 1577 void Assembler::lock() {
1562 emit_byte(0xF0); 1578 emit_byte(0xF0);
1563 } 1579 }
2669 void Assembler::sqrtss(XMMRegister dst, XMMRegister src) { 2685 void Assembler::sqrtss(XMMRegister dst, XMMRegister src) {
2670 NOT_LP64(assert(VM_Version::supports_sse(), "")); 2686 NOT_LP64(assert(VM_Version::supports_sse(), ""));
2671 emit_simd_arith(0x51, dst, src, VEX_SIMD_F3); 2687 emit_simd_arith(0x51, dst, src, VEX_SIMD_F3);
2672 } 2688 }
2673 2689
2690 void Assembler::std() {
2691 emit_byte(0xfd);
2692 }
2693
2674 void Assembler::sqrtss(XMMRegister dst, Address src) { 2694 void Assembler::sqrtss(XMMRegister dst, Address src) {
2675 NOT_LP64(assert(VM_Version::supports_sse(), "")); 2695 NOT_LP64(assert(VM_Version::supports_sse(), ""));
2676 emit_simd_arith(0x51, dst, src, VEX_SIMD_F3); 2696 emit_simd_arith(0x51, dst, src, VEX_SIMD_F3);
2677 } 2697 }
2678 2698
2814 int encode = prefix_and_encode(dst->encoding(), src->encoding()); 2834 int encode = prefix_and_encode(dst->encoding(), src->encoding());
2815 emit_byte(0x87); 2835 emit_byte(0x87);
2816 emit_byte(0xc0 | encode); 2836 emit_byte(0xc0 | encode);
2817 } 2837 }
2818 2838
2839 void Assembler::xgetbv() {
2840 emit_byte(0x0F);
2841 emit_byte(0x01);
2842 emit_byte(0xD0);
2843 }
2844
2819 void Assembler::xorl(Register dst, int32_t imm32) { 2845 void Assembler::xorl(Register dst, int32_t imm32) {
2820 prefix(dst); 2846 prefix(dst);
2821 emit_arith(0x81, 0xF0, dst, imm32); 2847 emit_arith(0x81, 0xF0, dst, imm32);
2822 } 2848 }
2823 2849
4359 disp = (int64_t)adr._target - ((int64_t)CodeCache::low_bound() + sizeof(int)); 4385 disp = (int64_t)adr._target - ((int64_t)CodeCache::low_bound() + sizeof(int));
4360 if (!is_simm32(disp)) return false; 4386 if (!is_simm32(disp)) return false;
4361 disp = (int64_t)adr._target - ((int64_t)CodeCache::high_bound() + sizeof(int)); 4387 disp = (int64_t)adr._target - ((int64_t)CodeCache::high_bound() + sizeof(int));
4362 if (!is_simm32(disp)) return false; 4388 if (!is_simm32(disp)) return false;
4363 4389
4364 disp = (int64_t)adr._target - ((int64_t)_code_pos + sizeof(int)); 4390 disp = (int64_t)adr._target - ((int64_t)pc() + sizeof(int));
4365 4391
4366 // Because rip relative is a disp + address_of_next_instruction and we 4392 // Because rip relative is a disp + address_of_next_instruction and we
4367 // don't know the value of address_of_next_instruction we apply a fudge factor 4393 // don't know the value of address_of_next_instruction we apply a fudge factor
4368 // to make sure we will be ok no matter the size of the instruction we get placed into. 4394 // to make sure we will be ok no matter the size of the instruction we get placed into.
4369 // We don't have to fudge the checks above here because they are already worst case. 4395 // We don't have to fudge the checks above here because they are already worst case.
4390 4416
4391 void Assembler::emit_data64(jlong data, 4417 void Assembler::emit_data64(jlong data,
4392 relocInfo::relocType rtype, 4418 relocInfo::relocType rtype,
4393 int format) { 4419 int format) {
4394 if (rtype == relocInfo::none) { 4420 if (rtype == relocInfo::none) {
4395 emit_long64(data); 4421 emit_int64(data);
4396 } else { 4422 } else {
4397 emit_data64(data, Relocation::spec_simple(rtype), format); 4423 emit_data64(data, Relocation::spec_simple(rtype), format);
4398 } 4424 }
4399 } 4425 }
4400 4426
4408 // embedded words. Instead, relocate to the enclosing instruction. 4434 // embedded words. Instead, relocate to the enclosing instruction.
4409 code_section()->relocate(inst_mark(), rspec, format); 4435 code_section()->relocate(inst_mark(), rspec, format);
4410 #ifdef ASSERT 4436 #ifdef ASSERT
4411 check_relocation(rspec, format); 4437 check_relocation(rspec, format);
4412 #endif 4438 #endif
4413 emit_long64(data); 4439 emit_int64(data);
4414 } 4440 }
4415 4441
4416 int Assembler::prefix_and_encode(int reg_enc, bool byteinst) { 4442 int Assembler::prefix_and_encode(int reg_enc, bool byteinst) {
4417 if (reg_enc >= 8) { 4443 if (reg_enc >= 8) {
4418 prefix(REX_B); 4444 prefix(REX_B);
4941 4967
4942 void Assembler::mov64(Register dst, int64_t imm64) { 4968 void Assembler::mov64(Register dst, int64_t imm64) {
4943 InstructionMark im(this); 4969 InstructionMark im(this);
4944 int encode = prefixq_and_encode(dst->encoding()); 4970 int encode = prefixq_and_encode(dst->encoding());
4945 emit_byte(0xB8 | encode); 4971 emit_byte(0xB8 | encode);
4946 emit_long64(imm64); 4972 emit_int64(imm64);
4947 } 4973 }
4948 4974
4949 void Assembler::mov_literal64(Register dst, intptr_t imm64, RelocationHolder const& rspec) { 4975 void Assembler::mov_literal64(Register dst, intptr_t imm64, RelocationHolder const& rspec) {
4950 InstructionMark im(this); 4976 InstructionMark im(this);
4951 int encode = prefixq_and_encode(dst->encoding()); 4977 int encode = prefixq_and_encode(dst->encoding());
5415 emit_byte(0x33); 5441 emit_byte(0x33);
5416 emit_operand(dst, src); 5442 emit_operand(dst, src);
5417 } 5443 }
5418 5444
5419 #endif // !LP64 5445 #endif // !LP64
5420
5421 static Assembler::Condition reverse[] = {
5422 Assembler::noOverflow /* overflow = 0x0 */ ,
5423 Assembler::overflow /* noOverflow = 0x1 */ ,
5424 Assembler::aboveEqual /* carrySet = 0x2, below = 0x2 */ ,
5425 Assembler::below /* aboveEqual = 0x3, carryClear = 0x3 */ ,
5426 Assembler::notZero /* zero = 0x4, equal = 0x4 */ ,
5427 Assembler::zero /* notZero = 0x5, notEqual = 0x5 */ ,
5428 Assembler::above /* belowEqual = 0x6 */ ,
5429 Assembler::belowEqual /* above = 0x7 */ ,
5430 Assembler::positive /* negative = 0x8 */ ,
5431 Assembler::negative /* positive = 0x9 */ ,
5432 Assembler::noParity /* parity = 0xa */ ,
5433 Assembler::parity /* noParity = 0xb */ ,
5434 Assembler::greaterEqual /* less = 0xc */ ,
5435 Assembler::less /* greaterEqual = 0xd */ ,
5436 Assembler::greater /* lessEqual = 0xe */ ,
5437 Assembler::lessEqual /* greater = 0xf, */
5438
5439 };
5440
5441
5442 // Implementation of MacroAssembler
5443
5444 // First all the versions that have distinct versions depending on 32/64 bit
5445 // Unless the difference is trivial (1 line or so).
5446
5447 #ifndef _LP64
5448
5449 // 32bit versions
5450
5451 Address MacroAssembler::as_Address(AddressLiteral adr) {
5452 return Address(adr.target(), adr.rspec());
5453 }
5454
5455 Address MacroAssembler::as_Address(ArrayAddress adr) {
5456 return Address::make_array(adr);
5457 }
5458
5459 int MacroAssembler::biased_locking_enter(Register lock_reg,
5460 Register obj_reg,
5461 Register swap_reg,
5462 Register tmp_reg,
5463 bool swap_reg_contains_mark,
5464 Label& done,
5465 Label* slow_case,
5466 BiasedLockingCounters* counters) {
5467 assert(UseBiasedLocking, "why call this otherwise?");
5468 assert(swap_reg == rax, "swap_reg must be rax, for cmpxchg");
5469 assert_different_registers(lock_reg, obj_reg, swap_reg);
5470
5471 if (PrintBiasedLockingStatistics && counters == NULL)
5472 counters = BiasedLocking::counters();
5473
5474 bool need_tmp_reg = false;
5475 if (tmp_reg == noreg) {
5476 need_tmp_reg = true;
5477 tmp_reg = lock_reg;
5478 } else {
5479 assert_different_registers(lock_reg, obj_reg, swap_reg, tmp_reg);
5480 }
5481 assert(markOopDesc::age_shift == markOopDesc::lock_bits + markOopDesc::biased_lock_bits, "biased locking makes assumptions about bit layout");
5482 Address mark_addr (obj_reg, oopDesc::mark_offset_in_bytes());
5483 Address klass_addr (obj_reg, oopDesc::klass_offset_in_bytes());
5484 Address saved_mark_addr(lock_reg, 0);
5485
5486 // Biased locking
5487 // See whether the lock is currently biased toward our thread and
5488 // whether the epoch is still valid
5489 // Note that the runtime guarantees sufficient alignment of JavaThread
5490 // pointers to allow age to be placed into low bits
5491 // First check to see whether biasing is even enabled for this object
5492 Label cas_label;
5493 int null_check_offset = -1;
5494 if (!swap_reg_contains_mark) {
5495 null_check_offset = offset();
5496 movl(swap_reg, mark_addr);
5497 }
5498 if (need_tmp_reg) {
5499 push(tmp_reg);
5500 }
5501 movl(tmp_reg, swap_reg);
5502 andl(tmp_reg, markOopDesc::biased_lock_mask_in_place);
5503 cmpl(tmp_reg, markOopDesc::biased_lock_pattern);
5504 if (need_tmp_reg) {
5505 pop(tmp_reg);
5506 }
5507 jcc(Assembler::notEqual, cas_label);
5508 // The bias pattern is present in the object's header. Need to check
5509 // whether the bias owner and the epoch are both still current.
5510 // Note that because there is no current thread register on x86 we
5511 // need to store off the mark word we read out of the object to
5512 // avoid reloading it and needing to recheck invariants below. This
5513 // store is unfortunate but it makes the overall code shorter and
5514 // simpler.
5515 movl(saved_mark_addr, swap_reg);
5516 if (need_tmp_reg) {
5517 push(tmp_reg);
5518 }
5519 get_thread(tmp_reg);
5520 xorl(swap_reg, tmp_reg);
5521 if (swap_reg_contains_mark) {
5522 null_check_offset = offset();
5523 }
5524 movl(tmp_reg, klass_addr);
5525 xorl(swap_reg, Address(tmp_reg, Klass::prototype_header_offset()));
5526 andl(swap_reg, ~((int) markOopDesc::age_mask_in_place));
5527 if (need_tmp_reg) {
5528 pop(tmp_reg);
5529 }
5530 if (counters != NULL) {
5531 cond_inc32(Assembler::zero,
5532 ExternalAddress((address)counters->biased_lock_entry_count_addr()));
5533 }
5534 jcc(Assembler::equal, done);
5535
5536 Label try_revoke_bias;
5537 Label try_rebias;
5538
5539 // At this point we know that the header has the bias pattern and
5540 // that we are not the bias owner in the current epoch. We need to
5541 // figure out more details about the state of the header in order to
5542 // know what operations can be legally performed on the object's
5543 // header.
5544
5545 // If the low three bits in the xor result aren't clear, that means
5546 // the prototype header is no longer biased and we have to revoke
5547 // the bias on this object.
5548 testl(swap_reg, markOopDesc::biased_lock_mask_in_place);
5549 jcc(Assembler::notZero, try_revoke_bias);
5550
5551 // Biasing is still enabled for this data type. See whether the
5552 // epoch of the current bias is still valid, meaning that the epoch
5553 // bits of the mark word are equal to the epoch bits of the
5554 // prototype header. (Note that the prototype header's epoch bits
5555 // only change at a safepoint.) If not, attempt to rebias the object
5556 // toward the current thread. Note that we must be absolutely sure
5557 // that the current epoch is invalid in order to do this because
5558 // otherwise the manipulations it performs on the mark word are
5559 // illegal.
5560 testl(swap_reg, markOopDesc::epoch_mask_in_place);
5561 jcc(Assembler::notZero, try_rebias);
5562
5563 // The epoch of the current bias is still valid but we know nothing
5564 // about the owner; it might be set or it might be clear. Try to
5565 // acquire the bias of the object using an atomic operation. If this
5566 // fails we will go in to the runtime to revoke the object's bias.
5567 // Note that we first construct the presumed unbiased header so we
5568 // don't accidentally blow away another thread's valid bias.
5569 movl(swap_reg, saved_mark_addr);
5570 andl(swap_reg,
5571 markOopDesc::biased_lock_mask_in_place | markOopDesc::age_mask_in_place | markOopDesc::epoch_mask_in_place);
5572 if (need_tmp_reg) {
5573 push(tmp_reg);
5574 }
5575 get_thread(tmp_reg);
5576 orl(tmp_reg, swap_reg);
5577 if (os::is_MP()) {
5578 lock();
5579 }
5580 cmpxchgptr(tmp_reg, Address(obj_reg, 0));
5581 if (need_tmp_reg) {
5582 pop(tmp_reg);
5583 }
5584 // If the biasing toward our thread failed, this means that
5585 // another thread succeeded in biasing it toward itself and we
5586 // need to revoke that bias. The revocation will occur in the
5587 // interpreter runtime in the slow case.
5588 if (counters != NULL) {
5589 cond_inc32(Assembler::zero,
5590 ExternalAddress((address)counters->anonymously_biased_lock_entry_count_addr()));
5591 }
5592 if (slow_case != NULL) {
5593 jcc(Assembler::notZero, *slow_case);
5594 }
5595 jmp(done);
5596
5597 bind(try_rebias);
5598 // At this point we know the epoch has expired, meaning that the
5599 // current "bias owner", if any, is actually invalid. Under these
5600 // circumstances _only_, we are allowed to use the current header's
5601 // value as the comparison value when doing the cas to acquire the
5602 // bias in the current epoch. In other words, we allow transfer of
5603 // the bias from one thread to another directly in this situation.
5604 //
5605 // FIXME: due to a lack of registers we currently blow away the age
5606 // bits in this situation. Should attempt to preserve them.
5607 if (need_tmp_reg) {
5608 push(tmp_reg);
5609 }
5610 get_thread(tmp_reg);
5611 movl(swap_reg, klass_addr);
5612 orl(tmp_reg, Address(swap_reg, Klass::prototype_header_offset()));
5613 movl(swap_reg, saved_mark_addr);
5614 if (os::is_MP()) {
5615 lock();
5616 }
5617 cmpxchgptr(tmp_reg, Address(obj_reg, 0));
5618 if (need_tmp_reg) {
5619 pop(tmp_reg);
5620 }
5621 // If the biasing toward our thread failed, then another thread
5622 // succeeded in biasing it toward itself and we need to revoke that
5623 // bias. The revocation will occur in the runtime in the slow case.
5624 if (counters != NULL) {
5625 cond_inc32(Assembler::zero,
5626 ExternalAddress((address)counters->rebiased_lock_entry_count_addr()));
5627 }
5628 if (slow_case != NULL) {
5629 jcc(Assembler::notZero, *slow_case);
5630 }
5631 jmp(done);
5632
5633 bind(try_revoke_bias);
5634 // The prototype mark in the klass doesn't have the bias bit set any
5635 // more, indicating that objects of this data type are not supposed
5636 // to be biased any more. We are going to try to reset the mark of
5637 // this object to the prototype value and fall through to the
5638 // CAS-based locking scheme. Note that if our CAS fails, it means
5639 // that another thread raced us for the privilege of revoking the
5640 // bias of this particular object, so it's okay to continue in the
5641 // normal locking code.
5642 //
5643 // FIXME: due to a lack of registers we currently blow away the age
5644 // bits in this situation. Should attempt to preserve them.
5645 movl(swap_reg, saved_mark_addr);
5646 if (need_tmp_reg) {
5647 push(tmp_reg);
5648 }
5649 movl(tmp_reg, klass_addr);
5650 movl(tmp_reg, Address(tmp_reg, Klass::prototype_header_offset()));
5651 if (os::is_MP()) {
5652 lock();
5653 }
5654 cmpxchgptr(tmp_reg, Address(obj_reg, 0));
5655 if (need_tmp_reg) {
5656 pop(tmp_reg);
5657 }
5658 // Fall through to the normal CAS-based lock, because no matter what
5659 // the result of the above CAS, some thread must have succeeded in
5660 // removing the bias bit from the object's header.
5661 if (counters != NULL) {
5662 cond_inc32(Assembler::zero,
5663 ExternalAddress((address)counters->revoked_lock_entry_count_addr()));
5664 }
5665
5666 bind(cas_label);
5667
5668 return null_check_offset;
5669 }
5670 void MacroAssembler::call_VM_leaf_base(address entry_point,
5671 int number_of_arguments) {
5672 call(RuntimeAddress(entry_point));
5673 increment(rsp, number_of_arguments * wordSize);
5674 }
5675
5676 void MacroAssembler::cmpklass(Address src1, Metadata* obj) {
5677 cmp_literal32(src1, (int32_t)obj, metadata_Relocation::spec_for_immediate());
5678 }
5679
5680 void MacroAssembler::cmpklass(Register src1, Metadata* obj) {
5681 cmp_literal32(src1, (int32_t)obj, metadata_Relocation::spec_for_immediate());
5682 }
5683
5684 void MacroAssembler::cmpoop(Address src1, jobject obj) {
5685 cmp_literal32(src1, (int32_t)obj, oop_Relocation::spec_for_immediate());
5686 }
5687
5688 void MacroAssembler::cmpoop(Register src1, jobject obj) {
5689 cmp_literal32(src1, (int32_t)obj, oop_Relocation::spec_for_immediate());
5690 }
5691
5692 void MacroAssembler::extend_sign(Register hi, Register lo) {
5693 // According to Intel Doc. AP-526, "Integer Divide", p.18.
5694 if (VM_Version::is_P6() && hi == rdx && lo == rax) {
5695 cdql();
5696 } else {
5697 movl(hi, lo);
5698 sarl(hi, 31);
5699 }
5700 }
5701
5702 void MacroAssembler::jC2(Register tmp, Label& L) {
5703 // set parity bit if FPU flag C2 is set (via rax)
5704 save_rax(tmp);
5705 fwait(); fnstsw_ax();
5706 sahf();
5707 restore_rax(tmp);
5708 // branch
5709 jcc(Assembler::parity, L);
5710 }
5711
5712 void MacroAssembler::jnC2(Register tmp, Label& L) {
5713 // set parity bit if FPU flag C2 is set (via rax)
5714 save_rax(tmp);
5715 fwait(); fnstsw_ax();
5716 sahf();
5717 restore_rax(tmp);
5718 // branch
5719 jcc(Assembler::noParity, L);
5720 }
5721
5722 // 32bit can do a case table jump in one instruction but we no longer allow the base
5723 // to be installed in the Address class
5724 void MacroAssembler::jump(ArrayAddress entry) {
5725 jmp(as_Address(entry));
5726 }
5727
5728 // Note: y_lo will be destroyed
5729 void MacroAssembler::lcmp2int(Register x_hi, Register x_lo, Register y_hi, Register y_lo) {
5730 // Long compare for Java (semantics as described in JVM spec.)
5731 Label high, low, done;
5732
5733 cmpl(x_hi, y_hi);
5734 jcc(Assembler::less, low);
5735 jcc(Assembler::greater, high);
5736 // x_hi is the return register
5737 xorl(x_hi, x_hi);
5738 cmpl(x_lo, y_lo);
5739 jcc(Assembler::below, low);
5740 jcc(Assembler::equal, done);
5741
5742 bind(high);
5743 xorl(x_hi, x_hi);
5744 increment(x_hi);
5745 jmp(done);
5746
5747 bind(low);
5748 xorl(x_hi, x_hi);
5749 decrementl(x_hi);
5750
5751 bind(done);
5752 }
5753
5754 void MacroAssembler::lea(Register dst, AddressLiteral src) {
5755 mov_literal32(dst, (int32_t)src.target(), src.rspec());
5756 }
5757
5758 void MacroAssembler::lea(Address dst, AddressLiteral adr) {
5759 // leal(dst, as_Address(adr));
5760 // see note in movl as to why we must use a move
5761 mov_literal32(dst, (int32_t) adr.target(), adr.rspec());
5762 }
5763
5764 void MacroAssembler::leave() {
5765 mov(rsp, rbp);
5766 pop(rbp);
5767 }
5768
5769 void MacroAssembler::lmul(int x_rsp_offset, int y_rsp_offset) {
5770 // Multiplication of two Java long values stored on the stack
5771 // as illustrated below. Result is in rdx:rax.
5772 //
5773 // rsp ---> [ ?? ] \ \
5774 // .... | y_rsp_offset |
5775 // [ y_lo ] / (in bytes) | x_rsp_offset
5776 // [ y_hi ] | (in bytes)
5777 // .... |
5778 // [ x_lo ] /
5779 // [ x_hi ]
5780 // ....
5781 //
5782 // Basic idea: lo(result) = lo(x_lo * y_lo)
5783 // hi(result) = hi(x_lo * y_lo) + lo(x_hi * y_lo) + lo(x_lo * y_hi)
5784 Address x_hi(rsp, x_rsp_offset + wordSize); Address x_lo(rsp, x_rsp_offset);
5785 Address y_hi(rsp, y_rsp_offset + wordSize); Address y_lo(rsp, y_rsp_offset);
5786 Label quick;
5787 // load x_hi, y_hi and check if quick
5788 // multiplication is possible
5789 movl(rbx, x_hi);
5790 movl(rcx, y_hi);
5791 movl(rax, rbx);
5792 orl(rbx, rcx); // rbx, = 0 <=> x_hi = 0 and y_hi = 0
5793 jcc(Assembler::zero, quick); // if rbx, = 0 do quick multiply
5794 // do full multiplication
5795 // 1st step
5796 mull(y_lo); // x_hi * y_lo
5797 movl(rbx, rax); // save lo(x_hi * y_lo) in rbx,
5798 // 2nd step
5799 movl(rax, x_lo);
5800 mull(rcx); // x_lo * y_hi
5801 addl(rbx, rax); // add lo(x_lo * y_hi) to rbx,
5802 // 3rd step
5803 bind(quick); // note: rbx, = 0 if quick multiply!
5804 movl(rax, x_lo);
5805 mull(y_lo); // x_lo * y_lo
5806 addl(rdx, rbx); // correct hi(x_lo * y_lo)
5807 }
5808
5809 void MacroAssembler::lneg(Register hi, Register lo) {
5810 negl(lo);
5811 adcl(hi, 0);
5812 negl(hi);
5813 }
5814
5815 void MacroAssembler::lshl(Register hi, Register lo) {
5816 // Java shift left long support (semantics as described in JVM spec., p.305)
5817 // (basic idea for shift counts s >= n: x << s == (x << n) << (s - n))
5818 // shift value is in rcx !
5819 assert(hi != rcx, "must not use rcx");
5820 assert(lo != rcx, "must not use rcx");
5821 const Register s = rcx; // shift count
5822 const int n = BitsPerWord;
5823 Label L;
5824 andl(s, 0x3f); // s := s & 0x3f (s < 0x40)
5825 cmpl(s, n); // if (s < n)
5826 jcc(Assembler::less, L); // else (s >= n)
5827 movl(hi, lo); // x := x << n
5828 xorl(lo, lo);
5829 // Note: subl(s, n) is not needed since the Intel shift instructions work rcx mod n!
5830 bind(L); // s (mod n) < n
5831 shldl(hi, lo); // x := x << s
5832 shll(lo);
5833 }
5834
5835
5836 void MacroAssembler::lshr(Register hi, Register lo, bool sign_extension) {
5837 // Java shift right long support (semantics as described in JVM spec., p.306 & p.310)
5838 // (basic idea for shift counts s >= n: x >> s == (x >> n) >> (s - n))
5839 assert(hi != rcx, "must not use rcx");
5840 assert(lo != rcx, "must not use rcx");
5841 const Register s = rcx; // shift count
5842 const int n = BitsPerWord;
5843 Label L;
5844 andl(s, 0x3f); // s := s & 0x3f (s < 0x40)
5845 cmpl(s, n); // if (s < n)
5846 jcc(Assembler::less, L); // else (s >= n)
5847 movl(lo, hi); // x := x >> n
5848 if (sign_extension) sarl(hi, 31);
5849 else xorl(hi, hi);
5850 // Note: subl(s, n) is not needed since the Intel shift instructions work rcx mod n!
5851 bind(L); // s (mod n) < n
5852 shrdl(lo, hi); // x := x >> s
5853 if (sign_extension) sarl(hi);
5854 else shrl(hi);
5855 }
5856
5857 void MacroAssembler::movoop(Register dst, jobject obj) {
5858 mov_literal32(dst, (int32_t)obj, oop_Relocation::spec_for_immediate());
5859 }
5860
5861 void MacroAssembler::movoop(Address dst, jobject obj) {
5862 mov_literal32(dst, (int32_t)obj, oop_Relocation::spec_for_immediate());
5863 }
5864
5865 void MacroAssembler::mov_metadata(Register dst, Metadata* obj) {
5866 mov_literal32(dst, (int32_t)obj, metadata_Relocation::spec_for_immediate());
5867 }
5868
5869 void MacroAssembler::mov_metadata(Address dst, Metadata* obj) {
5870 mov_literal32(dst, (int32_t)obj, metadata_Relocation::spec_for_immediate());
5871 }
5872
5873 void MacroAssembler::movptr(Register dst, AddressLiteral src) {
5874 if (src.is_lval()) {
5875 mov_literal32(dst, (intptr_t)src.target(), src.rspec());
5876 } else {
5877 movl(dst, as_Address(src));
5878 }
5879 }
5880
5881 void MacroAssembler::movptr(ArrayAddress dst, Register src) {
5882 movl(as_Address(dst), src);
5883 }
5884
5885 void MacroAssembler::movptr(Register dst, ArrayAddress src) {
5886 movl(dst, as_Address(src));
5887 }
5888
5889 // src should NEVER be a real pointer. Use AddressLiteral for true pointers
5890 void MacroAssembler::movptr(Address dst, intptr_t src) {
5891 movl(dst, src);
5892 }
5893
5894
5895 void MacroAssembler::pop_callee_saved_registers() {
5896 pop(rcx);
5897 pop(rdx);
5898 pop(rdi);
5899 pop(rsi);
5900 }
5901
5902 void MacroAssembler::pop_fTOS() {
5903 fld_d(Address(rsp, 0));
5904 addl(rsp, 2 * wordSize);
5905 }
5906
5907 void MacroAssembler::push_callee_saved_registers() {
5908 push(rsi);
5909 push(rdi);
5910 push(rdx);
5911 push(rcx);
5912 }
5913
5914 void MacroAssembler::push_fTOS() {
5915 subl(rsp, 2 * wordSize);
5916 fstp_d(Address(rsp, 0));
5917 }
5918
5919
5920 void MacroAssembler::pushoop(jobject obj) {
5921 push_literal32((int32_t)obj, oop_Relocation::spec_for_immediate());
5922 }
5923
5924 void MacroAssembler::pushklass(Metadata* obj) {
5925 push_literal32((int32_t)obj, metadata_Relocation::spec_for_immediate());
5926 }
5927
5928 void MacroAssembler::pushptr(AddressLiteral src) {
5929 if (src.is_lval()) {
5930 push_literal32((int32_t)src.target(), src.rspec());
5931 } else {
5932 pushl(as_Address(src));
5933 }
5934 }
5935
5936 void MacroAssembler::set_word_if_not_zero(Register dst) {
5937 xorl(dst, dst);
5938 set_byte_if_not_zero(dst);
5939 }
5940
5941 static void pass_arg0(MacroAssembler* masm, Register arg) {
5942 masm->push(arg);
5943 }
5944
5945 static void pass_arg1(MacroAssembler* masm, Register arg) {
5946 masm->push(arg);
5947 }
5948
5949 static void pass_arg2(MacroAssembler* masm, Register arg) {
5950 masm->push(arg);
5951 }
5952
5953 static void pass_arg3(MacroAssembler* masm, Register arg) {
5954 masm->push(arg);
5955 }
5956
5957 #ifndef PRODUCT
5958 extern "C" void findpc(intptr_t x);
5959 #endif
5960
5961 void MacroAssembler::debug32(int rdi, int rsi, int rbp, int rsp, int rbx, int rdx, int rcx, int rax, int eip, char* msg) {
5962 // In order to get locks to work, we need to fake a in_VM state
5963 JavaThread* thread = JavaThread::current();
5964 JavaThreadState saved_state = thread->thread_state();
5965 thread->set_thread_state(_thread_in_vm);
5966 if (ShowMessageBoxOnError) {
5967 JavaThread* thread = JavaThread::current();
5968 JavaThreadState saved_state = thread->thread_state();
5969 thread->set_thread_state(_thread_in_vm);
5970 if (CountBytecodes || TraceBytecodes || StopInterpreterAt) {
5971 ttyLocker ttyl;
5972 BytecodeCounter::print();
5973 }
5974 // To see where a verify_oop failed, get $ebx+40/X for this frame.
5975 // This is the value of eip which points to where verify_oop will return.
5976 if (os::message_box(msg, "Execution stopped, print registers?")) {
5977 print_state32(rdi, rsi, rbp, rsp, rbx, rdx, rcx, rax, eip);
5978 BREAKPOINT;
5979 }
5980 } else {
5981 ttyLocker ttyl;
5982 ::tty->print_cr("=============== DEBUG MESSAGE: %s ================\n", msg);
5983 }
5984 // Don't assert holding the ttyLock
5985 assert(false, err_msg("DEBUG MESSAGE: %s", msg));
5986 ThreadStateTransition::transition(thread, _thread_in_vm, saved_state);
5987 }
5988
5989 void MacroAssembler::print_state32(int rdi, int rsi, int rbp, int rsp, int rbx, int rdx, int rcx, int rax, int eip) {
5990 ttyLocker ttyl;
5991 FlagSetting fs(Debugging, true);
5992 tty->print_cr("eip = 0x%08x", eip);
5993 #ifndef PRODUCT
5994 if ((WizardMode || Verbose) && PrintMiscellaneous) {
5995 tty->cr();
5996 findpc(eip);
5997 tty->cr();
5998 }
5999 #endif
6000 #define PRINT_REG(rax) \
6001 { tty->print("%s = ", #rax); os::print_location(tty, rax); }
6002 PRINT_REG(rax);
6003 PRINT_REG(rbx);
6004 PRINT_REG(rcx);
6005 PRINT_REG(rdx);
6006 PRINT_REG(rdi);
6007 PRINT_REG(rsi);
6008 PRINT_REG(rbp);
6009 PRINT_REG(rsp);
6010 #undef PRINT_REG
6011 // Print some words near top of staack.
6012 int* dump_sp = (int*) rsp;
6013 for (int col1 = 0; col1 < 8; col1++) {
6014 tty->print("(rsp+0x%03x) 0x%08x: ", (int)((intptr_t)dump_sp - (intptr_t)rsp), (intptr_t)dump_sp);
6015 os::print_location(tty, *dump_sp++);
6016 }
6017 for (int row = 0; row < 16; row++) {
6018 tty->print("(rsp+0x%03x) 0x%08x: ", (int)((intptr_t)dump_sp - (intptr_t)rsp), (intptr_t)dump_sp);
6019 for (int col = 0; col < 8; col++) {
6020 tty->print(" 0x%08x", *dump_sp++);
6021 }
6022 tty->cr();
6023 }
6024 // Print some instructions around pc:
6025 Disassembler::decode((address)eip-64, (address)eip);
6026 tty->print_cr("--------");
6027 Disassembler::decode((address)eip, (address)eip+32);
6028 }
6029
6030 void MacroAssembler::stop(const char* msg) {
6031 ExternalAddress message((address)msg);
6032 // push address of message
6033 pushptr(message.addr());
6034 { Label L; call(L, relocInfo::none); bind(L); } // push eip
6035 pusha(); // push registers
6036 call(RuntimeAddress(CAST_FROM_FN_PTR(address, MacroAssembler::debug32)));
6037 hlt();
6038 }
6039
6040 void MacroAssembler::warn(const char* msg) {
6041 push_CPU_state();
6042
6043 ExternalAddress message((address) msg);
6044 // push address of message
6045 pushptr(message.addr());
6046
6047 call(RuntimeAddress(CAST_FROM_FN_PTR(address, warning)));
6048 addl(rsp, wordSize); // discard argument
6049 pop_CPU_state();
6050 }
6051
6052 void MacroAssembler::print_state() {
6053 { Label L; call(L, relocInfo::none); bind(L); } // push eip
6054 pusha(); // push registers
6055
6056 push_CPU_state();
6057 call(RuntimeAddress(CAST_FROM_FN_PTR(address, MacroAssembler::print_state32)));
6058 pop_CPU_state();
6059
6060 popa();
6061 addl(rsp, wordSize);
6062 }
6063
6064 #else // _LP64
6065
6066 // 64 bit versions
6067
6068 Address MacroAssembler::as_Address(AddressLiteral adr) {
6069 // amd64 always does this as a pc-rel
6070 // we can be absolute or disp based on the instruction type
6071 // jmp/call are displacements others are absolute
6072 assert(!adr.is_lval(), "must be rval");
6073 assert(reachable(adr), "must be");
6074 return Address((int32_t)(intptr_t)(adr.target() - pc()), adr.target(), adr.reloc());
6075
6076 }
6077
6078 Address MacroAssembler::as_Address(ArrayAddress adr) {
6079 AddressLiteral base = adr.base();
6080 lea(rscratch1, base);
6081 Address index = adr.index();
6082 assert(index._disp == 0, "must not have disp"); // maybe it can?
6083 Address array(rscratch1, index._index, index._scale, index._disp);
6084 return array;
6085 }
6086
6087 int MacroAssembler::biased_locking_enter(Register lock_reg,
6088 Register obj_reg,
6089 Register swap_reg,
6090 Register tmp_reg,
6091 bool swap_reg_contains_mark,
6092 Label& done,
6093 Label* slow_case,
6094 BiasedLockingCounters* counters) {
6095 assert(UseBiasedLocking, "why call this otherwise?");
6096 assert(swap_reg == rax, "swap_reg must be rax for cmpxchgq");
6097 assert(tmp_reg != noreg, "tmp_reg must be supplied");
6098 assert_different_registers(lock_reg, obj_reg, swap_reg, tmp_reg);
6099 assert(markOopDesc::age_shift == markOopDesc::lock_bits + markOopDesc::biased_lock_bits, "biased locking makes assumptions about bit layout");
6100 Address mark_addr (obj_reg, oopDesc::mark_offset_in_bytes());
6101 Address saved_mark_addr(lock_reg, 0);
6102
6103 if (PrintBiasedLockingStatistics && counters == NULL)
6104 counters = BiasedLocking::counters();
6105
6106 // Biased locking
6107 // See whether the lock is currently biased toward our thread and
6108 // whether the epoch is still valid
6109 // Note that the runtime guarantees sufficient alignment of JavaThread
6110 // pointers to allow age to be placed into low bits
6111 // First check to see whether biasing is even enabled for this object
6112 Label cas_label;
6113 int null_check_offset = -1;
6114 if (!swap_reg_contains_mark) {
6115 null_check_offset = offset();
6116 movq(swap_reg, mark_addr);
6117 }
6118 movq(tmp_reg, swap_reg);
6119 andq(tmp_reg, markOopDesc::biased_lock_mask_in_place);
6120 cmpq(tmp_reg, markOopDesc::biased_lock_pattern);
6121 jcc(Assembler::notEqual, cas_label);
6122 // The bias pattern is present in the object's header. Need to check
6123 // whether the bias owner and the epoch are both still current.
6124 load_prototype_header(tmp_reg, obj_reg);
6125 orq(tmp_reg, r15_thread);
6126 xorq(tmp_reg, swap_reg);
6127 andq(tmp_reg, ~((int) markOopDesc::age_mask_in_place));
6128 if (counters != NULL) {
6129 cond_inc32(Assembler::zero,
6130 ExternalAddress((address) counters->anonymously_biased_lock_entry_count_addr()));
6131 }
6132 jcc(Assembler::equal, done);
6133
6134 Label try_revoke_bias;
6135 Label try_rebias;
6136
6137 // At this point we know that the header has the bias pattern and
6138 // that we are not the bias owner in the current epoch. We need to
6139 // figure out more details about the state of the header in order to
6140 // know what operations can be legally performed on the object's
6141 // header.
6142
6143 // If the low three bits in the xor result aren't clear, that means
6144 // the prototype header is no longer biased and we have to revoke
6145 // the bias on this object.
6146 testq(tmp_reg, markOopDesc::biased_lock_mask_in_place);
6147 jcc(Assembler::notZero, try_revoke_bias);
6148
6149 // Biasing is still enabled for this data type. See whether the
6150 // epoch of the current bias is still valid, meaning that the epoch
6151 // bits of the mark word are equal to the epoch bits of the
6152 // prototype header. (Note that the prototype header's epoch bits
6153 // only change at a safepoint.) If not, attempt to rebias the object
6154 // toward the current thread. Note that we must be absolutely sure
6155 // that the current epoch is invalid in order to do this because
6156 // otherwise the manipulations it performs on the mark word are
6157 // illegal.
6158 testq(tmp_reg, markOopDesc::epoch_mask_in_place);
6159 jcc(Assembler::notZero, try_rebias);
6160
6161 // The epoch of the current bias is still valid but we know nothing
6162 // about the owner; it might be set or it might be clear. Try to
6163 // acquire the bias of the object using an atomic operation. If this
6164 // fails we will go in to the runtime to revoke the object's bias.
6165 // Note that we first construct the presumed unbiased header so we
6166 // don't accidentally blow away another thread's valid bias.
6167 andq(swap_reg,
6168 markOopDesc::biased_lock_mask_in_place | markOopDesc::age_mask_in_place | markOopDesc::epoch_mask_in_place);
6169 movq(tmp_reg, swap_reg);
6170 orq(tmp_reg, r15_thread);
6171 if (os::is_MP()) {
6172 lock();
6173 }
6174 cmpxchgq(tmp_reg, Address(obj_reg, 0));
6175 // If the biasing toward our thread failed, this means that
6176 // another thread succeeded in biasing it toward itself and we
6177 // need to revoke that bias. The revocation will occur in the
6178 // interpreter runtime in the slow case.
6179 if (counters != NULL) {
6180 cond_inc32(Assembler::zero,
6181 ExternalAddress((address) counters->anonymously_biased_lock_entry_count_addr()));
6182 }
6183 if (slow_case != NULL) {
6184 jcc(Assembler::notZero, *slow_case);
6185 }
6186 jmp(done);
6187
6188 bind(try_rebias);
6189 // At this point we know the epoch has expired, meaning that the
6190 // current "bias owner", if any, is actually invalid. Under these
6191 // circumstances _only_, we are allowed to use the current header's
6192 // value as the comparison value when doing the cas to acquire the
6193 // bias in the current epoch. In other words, we allow transfer of
6194 // the bias from one thread to another directly in this situation.
6195 //
6196 // FIXME: due to a lack of registers we currently blow away the age
6197 // bits in this situation. Should attempt to preserve them.
6198 load_prototype_header(tmp_reg, obj_reg);
6199 orq(tmp_reg, r15_thread);
6200 if (os::is_MP()) {
6201 lock();
6202 }
6203 cmpxchgq(tmp_reg, Address(obj_reg, 0));
6204 // If the biasing toward our thread failed, then another thread
6205 // succeeded in biasing it toward itself and we need to revoke that
6206 // bias. The revocation will occur in the runtime in the slow case.
6207 if (counters != NULL) {
6208 cond_inc32(Assembler::zero,
6209 ExternalAddress((address) counters->rebiased_lock_entry_count_addr()));
6210 }
6211 if (slow_case != NULL) {
6212 jcc(Assembler::notZero, *slow_case);
6213 }
6214 jmp(done);
6215
6216 bind(try_revoke_bias);
6217 // The prototype mark in the klass doesn't have the bias bit set any
6218 // more, indicating that objects of this data type are not supposed
6219 // to be biased any more. We are going to try to reset the mark of
6220 // this object to the prototype value and fall through to the
6221 // CAS-based locking scheme. Note that if our CAS fails, it means
6222 // that another thread raced us for the privilege of revoking the
6223 // bias of this particular object, so it's okay to continue in the
6224 // normal locking code.
6225 //
6226 // FIXME: due to a lack of registers we currently blow away the age
6227 // bits in this situation. Should attempt to preserve them.
6228 load_prototype_header(tmp_reg, obj_reg);
6229 if (os::is_MP()) {
6230 lock();
6231 }
6232 cmpxchgq(tmp_reg, Address(obj_reg, 0));
6233 // Fall through to the normal CAS-based lock, because no matter what
6234 // the result of the above CAS, some thread must have succeeded in
6235 // removing the bias bit from the object's header.
6236 if (counters != NULL) {
6237 cond_inc32(Assembler::zero,
6238 ExternalAddress((address) counters->revoked_lock_entry_count_addr()));
6239 }
6240
6241 bind(cas_label);
6242
6243 return null_check_offset;
6244 }
6245
6246 void MacroAssembler::call_VM_leaf_base(address entry_point, int num_args) {
6247 Label L, E;
6248
6249 #ifdef _WIN64
6250 // Windows always allocates space for it's register args
6251 assert(num_args <= 4, "only register arguments supported");
6252 subq(rsp, frame::arg_reg_save_area_bytes);
6253 #endif
6254
6255 // Align stack if necessary
6256 testl(rsp, 15);
6257 jcc(Assembler::zero, L);
6258
6259 subq(rsp, 8);
6260 {
6261 call(RuntimeAddress(entry_point));
6262 }
6263 addq(rsp, 8);
6264 jmp(E);
6265
6266 bind(L);
6267 {
6268 call(RuntimeAddress(entry_point));
6269 }
6270
6271 bind(E);
6272
6273 #ifdef _WIN64
6274 // restore stack pointer
6275 addq(rsp, frame::arg_reg_save_area_bytes);
6276 #endif
6277
6278 }
6279
6280 void MacroAssembler::cmp64(Register src1, AddressLiteral src2) {
6281 assert(!src2.is_lval(), "should use cmpptr");
6282
6283 if (reachable(src2)) {
6284 cmpq(src1, as_Address(src2));
6285 } else {
6286 lea(rscratch1, src2);
6287 Assembler::cmpq(src1, Address(rscratch1, 0));
6288 }
6289 }
6290
6291 int MacroAssembler::corrected_idivq(Register reg) {
6292 // Full implementation of Java ldiv and lrem; checks for special
6293 // case as described in JVM spec., p.243 & p.271. The function
6294 // returns the (pc) offset of the idivl instruction - may be needed
6295 // for implicit exceptions.
6296 //
6297 // normal case special case
6298 //
6299 // input : rax: dividend min_long
6300 // reg: divisor (may not be eax/edx) -1
6301 //
6302 // output: rax: quotient (= rax idiv reg) min_long
6303 // rdx: remainder (= rax irem reg) 0
6304 assert(reg != rax && reg != rdx, "reg cannot be rax or rdx register");
6305 static const int64_t min_long = 0x8000000000000000;
6306 Label normal_case, special_case;
6307
6308 // check for special case
6309 cmp64(rax, ExternalAddress((address) &min_long));
6310 jcc(Assembler::notEqual, normal_case);
6311 xorl(rdx, rdx); // prepare rdx for possible special case (where
6312 // remainder = 0)
6313 cmpq(reg, -1);
6314 jcc(Assembler::equal, special_case);
6315
6316 // handle normal case
6317 bind(normal_case);
6318 cdqq();
6319 int idivq_offset = offset();
6320 idivq(reg);
6321
6322 // normal and special case exit
6323 bind(special_case);
6324
6325 return idivq_offset;
6326 }
6327
6328 void MacroAssembler::decrementq(Register reg, int value) {
6329 if (value == min_jint) { subq(reg, value); return; }
6330 if (value < 0) { incrementq(reg, -value); return; }
6331 if (value == 0) { ; return; }
6332 if (value == 1 && UseIncDec) { decq(reg) ; return; }
6333 /* else */ { subq(reg, value) ; return; }
6334 }
6335
6336 void MacroAssembler::decrementq(Address dst, int value) {
6337 if (value == min_jint) { subq(dst, value); return; }
6338 if (value < 0) { incrementq(dst, -value); return; }
6339 if (value == 0) { ; return; }
6340 if (value == 1 && UseIncDec) { decq(dst) ; return; }
6341 /* else */ { subq(dst, value) ; return; }
6342 }
6343
6344 void MacroAssembler::incrementq(Register reg, int value) {
6345 if (value == min_jint) { addq(reg, value); return; }
6346 if (value < 0) { decrementq(reg, -value); return; }
6347 if (value == 0) { ; return; }
6348 if (value == 1 && UseIncDec) { incq(reg) ; return; }
6349 /* else */ { addq(reg, value) ; return; }
6350 }
6351
6352 void MacroAssembler::incrementq(Address dst, int value) {
6353 if (value == min_jint) { addq(dst, value); return; }
6354 if (value < 0) { decrementq(dst, -value); return; }
6355 if (value == 0) { ; return; }
6356 if (value == 1 && UseIncDec) { incq(dst) ; return; }
6357 /* else */ { addq(dst, value) ; return; }
6358 }
6359
6360 // 32bit can do a case table jump in one instruction but we no longer allow the base
6361 // to be installed in the Address class
6362 void MacroAssembler::jump(ArrayAddress entry) {
6363 lea(rscratch1, entry.base());
6364 Address dispatch = entry.index();
6365 assert(dispatch._base == noreg, "must be");
6366 dispatch._base = rscratch1;
6367 jmp(dispatch);
6368 }
6369
6370 void MacroAssembler::lcmp2int(Register x_hi, Register x_lo, Register y_hi, Register y_lo) {
6371 ShouldNotReachHere(); // 64bit doesn't use two regs
6372 cmpq(x_lo, y_lo);
6373 }
6374
6375 void MacroAssembler::lea(Register dst, AddressLiteral src) {
6376 mov_literal64(dst, (intptr_t)src.target(), src.rspec());
6377 }
6378
6379 void MacroAssembler::lea(Address dst, AddressLiteral adr) {
6380 mov_literal64(rscratch1, (intptr_t)adr.target(), adr.rspec());
6381 movptr(dst, rscratch1);
6382 }
6383
6384 void MacroAssembler::leave() {
6385 // %%% is this really better? Why not on 32bit too?
6386 emit_byte(0xC9); // LEAVE
6387 }
6388
6389 void MacroAssembler::lneg(Register hi, Register lo) {
6390 ShouldNotReachHere(); // 64bit doesn't use two regs
6391 negq(lo);
6392 }
6393
6394 void MacroAssembler::movoop(Register dst, jobject obj) {
6395 mov_literal64(dst, (intptr_t)obj, oop_Relocation::spec_for_immediate());
6396 }
6397
6398 void MacroAssembler::movoop(Address dst, jobject obj) {
6399 mov_literal64(rscratch1, (intptr_t)obj, oop_Relocation::spec_for_immediate());
6400 movq(dst, rscratch1);
6401 }
6402
6403 void MacroAssembler::mov_metadata(Register dst, Metadata* obj) {
6404 mov_literal64(dst, (intptr_t)obj, metadata_Relocation::spec_for_immediate());
6405 }
6406
6407 void MacroAssembler::mov_metadata(Address dst, Metadata* obj) {
6408 mov_literal64(rscratch1, (intptr_t)obj, metadata_Relocation::spec_for_immediate());
6409 movq(dst, rscratch1);
6410 }
6411
6412 void MacroAssembler::movptr(Register dst, AddressLiteral src) {
6413 if (src.is_lval()) {
6414 mov_literal64(dst, (intptr_t)src.target(), src.rspec());
6415 } else {
6416 if (reachable(src)) {
6417 movq(dst, as_Address(src));
6418 } else {
6419 lea(rscratch1, src);
6420 movq(dst, Address(rscratch1,0));
6421 }
6422 }
6423 }
6424
6425 void MacroAssembler::movptr(ArrayAddress dst, Register src) {
6426 movq(as_Address(dst), src);
6427 }
6428
6429 void MacroAssembler::movptr(Register dst, ArrayAddress src) {
6430 movq(dst, as_Address(src));
6431 }
6432
6433 // src should NEVER be a real pointer. Use AddressLiteral for true pointers
6434 void MacroAssembler::movptr(Address dst, intptr_t src) {
6435 mov64(rscratch1, src);
6436 movq(dst, rscratch1);
6437 }
6438
6439 // These are mostly for initializing NULL
6440 void MacroAssembler::movptr(Address dst, int32_t src) {
6441 movslq(dst, src);
6442 }
6443
6444 void MacroAssembler::movptr(Register dst, int32_t src) {
6445 mov64(dst, (intptr_t)src);
6446 }
6447
6448 void MacroAssembler::pushoop(jobject obj) {
6449 movoop(rscratch1, obj);
6450 push(rscratch1);
6451 }
6452
6453 void MacroAssembler::pushklass(Metadata* obj) {
6454 mov_metadata(rscratch1, obj);
6455 push(rscratch1);
6456 }
6457
6458 void MacroAssembler::pushptr(AddressLiteral src) {
6459 lea(rscratch1, src);
6460 if (src.is_lval()) {
6461 push(rscratch1);
6462 } else {
6463 pushq(Address(rscratch1, 0));
6464 }
6465 }
6466
6467 void MacroAssembler::reset_last_Java_frame(bool clear_fp,
6468 bool clear_pc) {
6469 // we must set sp to zero to clear frame
6470 movptr(Address(r15_thread, JavaThread::last_Java_sp_offset()), NULL_WORD);
6471 // must clear fp, so that compiled frames are not confused; it is
6472 // possible that we need it only for debugging
6473 if (clear_fp) {
6474 movptr(Address(r15_thread, JavaThread::last_Java_fp_offset()), NULL_WORD);
6475 }
6476
6477 if (clear_pc) {
6478 movptr(Address(r15_thread, JavaThread::last_Java_pc_offset()), NULL_WORD);
6479 }
6480 }
6481
6482 void MacroAssembler::set_last_Java_frame(Register last_java_sp,
6483 Register last_java_fp,
6484 address last_java_pc) {
6485 // determine last_java_sp register
6486 if (!last_java_sp->is_valid()) {
6487 last_java_sp = rsp;
6488 }
6489
6490 // last_java_fp is optional
6491 if (last_java_fp->is_valid()) {
6492 movptr(Address(r15_thread, JavaThread::last_Java_fp_offset()),
6493 last_java_fp);
6494 }
6495
6496 // last_java_pc is optional
6497 if (last_java_pc != NULL) {
6498 Address java_pc(r15_thread,
6499 JavaThread::frame_anchor_offset() + JavaFrameAnchor::last_Java_pc_offset());
6500 lea(rscratch1, InternalAddress(last_java_pc));
6501 movptr(java_pc, rscratch1);
6502 }
6503
6504 movptr(Address(r15_thread, JavaThread::last_Java_sp_offset()), last_java_sp);
6505 }
6506
6507 static void pass_arg0(MacroAssembler* masm, Register arg) {
6508 if (c_rarg0 != arg ) {
6509 masm->mov(c_rarg0, arg);
6510 }
6511 }
6512
6513 static void pass_arg1(MacroAssembler* masm, Register arg) {
6514 if (c_rarg1 != arg ) {
6515 masm->mov(c_rarg1, arg);
6516 }
6517 }
6518
6519 static void pass_arg2(MacroAssembler* masm, Register arg) {
6520 if (c_rarg2 != arg ) {
6521 masm->mov(c_rarg2, arg);
6522 }
6523 }
6524
6525 static void pass_arg3(MacroAssembler* masm, Register arg) {
6526 if (c_rarg3 != arg ) {
6527 masm->mov(c_rarg3, arg);
6528 }
6529 }
6530
6531 void MacroAssembler::stop(const char* msg) {
6532 address rip = pc();
6533 pusha(); // get regs on stack
6534 lea(c_rarg0, ExternalAddress((address) msg));
6535 lea(c_rarg1, InternalAddress(rip));
6536 movq(c_rarg2, rsp); // pass pointer to regs array
6537 andq(rsp, -16); // align stack as required by ABI
6538 call(RuntimeAddress(CAST_FROM_FN_PTR(address, MacroAssembler::debug64)));
6539 hlt();
6540 }
6541
6542 void MacroAssembler::warn(const char* msg) {
6543 push(rbp);
6544 movq(rbp, rsp);
6545 andq(rsp, -16); // align stack as required by push_CPU_state and call
6546 push_CPU_state(); // keeps alignment at 16 bytes
6547 lea(c_rarg0, ExternalAddress((address) msg));
6548 call_VM_leaf(CAST_FROM_FN_PTR(address, warning), c_rarg0);
6549 pop_CPU_state();
6550 mov(rsp, rbp);
6551 pop(rbp);
6552 }
6553
6554 void MacroAssembler::print_state() {
6555 address rip = pc();
6556 pusha(); // get regs on stack
6557 push(rbp);
6558 movq(rbp, rsp);
6559 andq(rsp, -16); // align stack as required by push_CPU_state and call
6560 push_CPU_state(); // keeps alignment at 16 bytes
6561
6562 lea(c_rarg0, InternalAddress(rip));
6563 lea(c_rarg1, Address(rbp, wordSize)); // pass pointer to regs array
6564 call_VM_leaf(CAST_FROM_FN_PTR(address, MacroAssembler::print_state64), c_rarg0, c_rarg1);
6565
6566 pop_CPU_state();
6567 mov(rsp, rbp);
6568 pop(rbp);
6569 popa();
6570 }
6571
6572 #ifndef PRODUCT
6573 extern "C" void findpc(intptr_t x);
6574 #endif
6575
6576 void MacroAssembler::debug64(char* msg, int64_t pc, int64_t regs[]) {
6577 // In order to get locks to work, we need to fake a in_VM state
6578 if (ShowMessageBoxOnError) {
6579 JavaThread* thread = JavaThread::current();
6580 JavaThreadState saved_state = thread->thread_state();
6581 thread->set_thread_state(_thread_in_vm);
6582 #ifndef PRODUCT
6583 if (CountBytecodes || TraceBytecodes || StopInterpreterAt) {
6584 ttyLocker ttyl;
6585 BytecodeCounter::print();
6586 }
6587 #endif
6588 // To see where a verify_oop failed, get $ebx+40/X for this frame.
6589 // XXX correct this offset for amd64
6590 // This is the value of eip which points to where verify_oop will return.
6591 if (os::message_box(msg, "Execution stopped, print registers?")) {
6592 print_state64(pc, regs);
6593 BREAKPOINT;
6594 assert(false, "start up GDB");
6595 }
6596 ThreadStateTransition::transition(thread, _thread_in_vm, saved_state);
6597 } else {
6598 ttyLocker ttyl;
6599 ::tty->print_cr("=============== DEBUG MESSAGE: %s ================\n",
6600 msg);
6601 assert(false, err_msg("DEBUG MESSAGE: %s", msg));
6602 }
6603 }
6604
6605 void MacroAssembler::print_state64(int64_t pc, int64_t regs[]) {
6606 ttyLocker ttyl;
6607 FlagSetting fs(Debugging, true);
6608 tty->print_cr("rip = 0x%016lx", pc);
6609 #ifndef PRODUCT
6610 tty->cr();
6611 findpc(pc);
6612 tty->cr();
6613 #endif
6614 #define PRINT_REG(rax, value) \
6615 { tty->print("%s = ", #rax); os::print_location(tty, value); }
6616 PRINT_REG(rax, regs[15]);
6617 PRINT_REG(rbx, regs[12]);
6618 PRINT_REG(rcx, regs[14]);
6619 PRINT_REG(rdx, regs[13]);
6620 PRINT_REG(rdi, regs[8]);
6621 PRINT_REG(rsi, regs[9]);
6622 PRINT_REG(rbp, regs[10]);
6623 PRINT_REG(rsp, regs[11]);
6624 PRINT_REG(r8 , regs[7]);
6625 PRINT_REG(r9 , regs[6]);
6626 PRINT_REG(r10, regs[5]);
6627 PRINT_REG(r11, regs[4]);
6628 PRINT_REG(r12, regs[3]);
6629 PRINT_REG(r13, regs[2]);
6630 PRINT_REG(r14, regs[1]);
6631 PRINT_REG(r15, regs[0]);
6632 #undef PRINT_REG
6633 // Print some words near top of staack.
6634 int64_t* rsp = (int64_t*) regs[11];
6635 int64_t* dump_sp = rsp;
6636 for (int col1 = 0; col1 < 8; col1++) {
6637 tty->print("(rsp+0x%03x) 0x%016lx: ", (int)((intptr_t)dump_sp - (intptr_t)rsp), (int64_t)dump_sp);
6638 os::print_location(tty, *dump_sp++);
6639 }
6640 for (int row = 0; row < 25; row++) {
6641 tty->print("(rsp+0x%03x) 0x%016lx: ", (int)((intptr_t)dump_sp - (intptr_t)rsp), (int64_t)dump_sp);
6642 for (int col = 0; col < 4; col++) {
6643 tty->print(" 0x%016lx", *dump_sp++);
6644 }
6645 tty->cr();
6646 }
6647 // Print some instructions around pc:
6648 Disassembler::decode((address)pc-64, (address)pc);
6649 tty->print_cr("--------");
6650 Disassembler::decode((address)pc, (address)pc+32);
6651 }
6652
6653 #endif // _LP64
6654
6655 // Now versions that are common to 32/64 bit
6656
6657 void MacroAssembler::addptr(Register dst, int32_t imm32) {
6658 LP64_ONLY(addq(dst, imm32)) NOT_LP64(addl(dst, imm32));
6659 }
6660
6661 void MacroAssembler::addptr(Register dst, Register src) {
6662 LP64_ONLY(addq(dst, src)) NOT_LP64(addl(dst, src));
6663 }
6664
6665 void MacroAssembler::addptr(Address dst, Register src) {
6666 LP64_ONLY(addq(dst, src)) NOT_LP64(addl(dst, src));
6667 }
6668
6669 void MacroAssembler::addsd(XMMRegister dst, AddressLiteral src) {
6670 if (reachable(src)) {
6671 Assembler::addsd(dst, as_Address(src));
6672 } else {
6673 lea(rscratch1, src);
6674 Assembler::addsd(dst, Address(rscratch1, 0));
6675 }
6676 }
6677
6678 void MacroAssembler::addss(XMMRegister dst, AddressLiteral src) {
6679 if (reachable(src)) {
6680 addss(dst, as_Address(src));
6681 } else {
6682 lea(rscratch1, src);
6683 addss(dst, Address(rscratch1, 0));
6684 }
6685 }
6686
6687 void MacroAssembler::align(int modulus) {
6688 if (offset() % modulus != 0) {
6689 nop(modulus - (offset() % modulus));
6690 }
6691 }
6692
6693 void MacroAssembler::andpd(XMMRegister dst, AddressLiteral src) {
6694 // Used in sign-masking with aligned address.
6695 assert((UseAVX > 0) || (((intptr_t)src.target() & 15) == 0), "SSE mode requires address alignment 16 bytes");
6696 if (reachable(src)) {
6697 Assembler::andpd(dst, as_Address(src));
6698 } else {
6699 lea(rscratch1, src);
6700 Assembler::andpd(dst, Address(rscratch1, 0));
6701 }
6702 }
6703
6704 void MacroAssembler::andps(XMMRegister dst, AddressLiteral src) {
6705 // Used in sign-masking with aligned address.
6706 assert((UseAVX > 0) || (((intptr_t)src.target() & 15) == 0), "SSE mode requires address alignment 16 bytes");
6707 if (reachable(src)) {
6708 Assembler::andps(dst, as_Address(src));
6709 } else {
6710 lea(rscratch1, src);
6711 Assembler::andps(dst, Address(rscratch1, 0));
6712 }
6713 }
6714
6715 void MacroAssembler::andptr(Register dst, int32_t imm32) {
6716 LP64_ONLY(andq(dst, imm32)) NOT_LP64(andl(dst, imm32));
6717 }
6718
6719 void MacroAssembler::atomic_incl(AddressLiteral counter_addr) {
6720 pushf();
6721 if (os::is_MP())
6722 lock();
6723 incrementl(counter_addr);
6724 popf();
6725 }
6726
6727 // Writes to stack successive pages until offset reached to check for
6728 // stack overflow + shadow pages. This clobbers tmp.
6729 void MacroAssembler::bang_stack_size(Register size, Register tmp) {
6730 movptr(tmp, rsp);
6731 // Bang stack for total size given plus shadow page size.
6732 // Bang one page at a time because large size can bang beyond yellow and
6733 // red zones.
6734 Label loop;
6735 bind(loop);
6736 movl(Address(tmp, (-os::vm_page_size())), size );
6737 subptr(tmp, os::vm_page_size());
6738 subl(size, os::vm_page_size());
6739 jcc(Assembler::greater, loop);
6740
6741 // Bang down shadow pages too.
6742 // The -1 because we already subtracted 1 page.
6743 for (int i = 0; i< StackShadowPages-1; i++) {
6744 // this could be any sized move but this is can be a debugging crumb
6745 // so the bigger the better.
6746 movptr(Address(tmp, (-i*os::vm_page_size())), size );
6747 }
6748 }
6749
6750 void MacroAssembler::biased_locking_exit(Register obj_reg, Register temp_reg, Label& done) {
6751 assert(UseBiasedLocking, "why call this otherwise?");
6752
6753 // Check for biased locking unlock case, which is a no-op
6754 // Note: we do not have to check the thread ID for two reasons.
6755 // First, the interpreter checks for IllegalMonitorStateException at
6756 // a higher level. Second, if the bias was revoked while we held the
6757 // lock, the object could not be rebiased toward another thread, so
6758 // the bias bit would be clear.
6759 movptr(temp_reg, Address(obj_reg, oopDesc::mark_offset_in_bytes()));
6760 andptr(temp_reg, markOopDesc::biased_lock_mask_in_place);
6761 cmpptr(temp_reg, markOopDesc::biased_lock_pattern);
6762 jcc(Assembler::equal, done);
6763 }
6764
6765 void MacroAssembler::c2bool(Register x) {
6766 // implements x == 0 ? 0 : 1
6767 // note: must only look at least-significant byte of x
6768 // since C-style booleans are stored in one byte
6769 // only! (was bug)
6770 andl(x, 0xFF);
6771 setb(Assembler::notZero, x);
6772 }
6773
6774 // Wouldn't need if AddressLiteral version had new name
6775 void MacroAssembler::call(Label& L, relocInfo::relocType rtype) {
6776 Assembler::call(L, rtype);
6777 }
6778
6779 void MacroAssembler::call(Register entry) {
6780 Assembler::call(entry);
6781 }
6782
6783 void MacroAssembler::call(AddressLiteral entry) {
6784 if (reachable(entry)) {
6785 Assembler::call_literal(entry.target(), entry.rspec());
6786 } else {
6787 lea(rscratch1, entry);
6788 Assembler::call(rscratch1);
6789 }
6790 }
6791
6792 void MacroAssembler::ic_call(address entry) {
6793 RelocationHolder rh = virtual_call_Relocation::spec(pc());
6794 movptr(rax, (intptr_t)Universe::non_oop_word());
6795 call(AddressLiteral(entry, rh));
6796 }
6797
6798 // Implementation of call_VM versions
6799
6800 void MacroAssembler::call_VM(Register oop_result,
6801 address entry_point,
6802 bool check_exceptions) {
6803 Label C, E;
6804 call(C, relocInfo::none);
6805 jmp(E);
6806
6807 bind(C);
6808 call_VM_helper(oop_result, entry_point, 0, check_exceptions);
6809 ret(0);
6810
6811 bind(E);
6812 }
6813
6814 void MacroAssembler::call_VM(Register oop_result,
6815 address entry_point,
6816 Register arg_1,
6817 bool check_exceptions) {
6818 Label C, E;
6819 call(C, relocInfo::none);
6820 jmp(E);
6821
6822 bind(C);
6823 pass_arg1(this, arg_1);
6824 call_VM_helper(oop_result, entry_point, 1, check_exceptions);
6825 ret(0);
6826
6827 bind(E);
6828 }
6829
6830 void MacroAssembler::call_VM(Register oop_result,
6831 address entry_point,
6832 Register arg_1,
6833 Register arg_2,
6834 bool check_exceptions) {
6835 Label C, E;
6836 call(C, relocInfo::none);
6837 jmp(E);
6838
6839 bind(C);
6840
6841 LP64_ONLY(assert(arg_1 != c_rarg2, "smashed arg"));
6842
6843 pass_arg2(this, arg_2);
6844 pass_arg1(this, arg_1);
6845 call_VM_helper(oop_result, entry_point, 2, check_exceptions);
6846 ret(0);
6847
6848 bind(E);
6849 }
6850
6851 void MacroAssembler::call_VM(Register oop_result,
6852 address entry_point,
6853 Register arg_1,
6854 Register arg_2,
6855 Register arg_3,
6856 bool check_exceptions) {
6857 Label C, E;
6858 call(C, relocInfo::none);
6859 jmp(E);
6860
6861 bind(C);
6862
6863 LP64_ONLY(assert(arg_1 != c_rarg3, "smashed arg"));
6864 LP64_ONLY(assert(arg_2 != c_rarg3, "smashed arg"));
6865 pass_arg3(this, arg_3);
6866
6867 LP64_ONLY(assert(arg_1 != c_rarg2, "smashed arg"));
6868 pass_arg2(this, arg_2);
6869
6870 pass_arg1(this, arg_1);
6871 call_VM_helper(oop_result, entry_point, 3, check_exceptions);
6872 ret(0);
6873
6874 bind(E);
6875 }
6876
6877 void MacroAssembler::call_VM(Register oop_result,
6878 Register last_java_sp,
6879 address entry_point,
6880 int number_of_arguments,
6881 bool check_exceptions) {
6882 Register thread = LP64_ONLY(r15_thread) NOT_LP64(noreg);
6883 call_VM_base(oop_result, thread, last_java_sp, entry_point, number_of_arguments, check_exceptions);
6884 }
6885
6886 void MacroAssembler::call_VM(Register oop_result,
6887 Register last_java_sp,
6888 address entry_point,
6889 Register arg_1,
6890 bool check_exceptions) {
6891 pass_arg1(this, arg_1);
6892 call_VM(oop_result, last_java_sp, entry_point, 1, check_exceptions);
6893 }
6894
6895 void MacroAssembler::call_VM(Register oop_result,
6896 Register last_java_sp,
6897 address entry_point,
6898 Register arg_1,
6899 Register arg_2,
6900 bool check_exceptions) {
6901
6902 LP64_ONLY(assert(arg_1 != c_rarg2, "smashed arg"));
6903 pass_arg2(this, arg_2);
6904 pass_arg1(this, arg_1);
6905 call_VM(oop_result, last_java_sp, entry_point, 2, check_exceptions);
6906 }
6907
6908 void MacroAssembler::call_VM(Register oop_result,
6909 Register last_java_sp,
6910 address entry_point,
6911 Register arg_1,
6912 Register arg_2,
6913 Register arg_3,
6914 bool check_exceptions) {
6915 LP64_ONLY(assert(arg_1 != c_rarg3, "smashed arg"));
6916 LP64_ONLY(assert(arg_2 != c_rarg3, "smashed arg"));
6917 pass_arg3(this, arg_3);
6918 LP64_ONLY(assert(arg_1 != c_rarg2, "smashed arg"));
6919 pass_arg2(this, arg_2);
6920 pass_arg1(this, arg_1);
6921 call_VM(oop_result, last_java_sp, entry_point, 3, check_exceptions);
6922 }
6923
6924 void MacroAssembler::super_call_VM(Register oop_result,
6925 Register last_java_sp,
6926 address entry_point,
6927 int number_of_arguments,
6928 bool check_exceptions) {
6929 Register thread = LP64_ONLY(r15_thread) NOT_LP64(noreg);
6930 MacroAssembler::call_VM_base(oop_result, thread, last_java_sp, entry_point, number_of_arguments, check_exceptions);
6931 }
6932
6933 void MacroAssembler::super_call_VM(Register oop_result,
6934 Register last_java_sp,
6935 address entry_point,
6936 Register arg_1,
6937 bool check_exceptions) {
6938 pass_arg1(this, arg_1);
6939 super_call_VM(oop_result, last_java_sp, entry_point, 1, check_exceptions);
6940 }
6941
6942 void MacroAssembler::super_call_VM(Register oop_result,
6943 Register last_java_sp,
6944 address entry_point,
6945 Register arg_1,
6946 Register arg_2,
6947 bool check_exceptions) {
6948
6949 LP64_ONLY(assert(arg_1 != c_rarg2, "smashed arg"));
6950 pass_arg2(this, arg_2);
6951 pass_arg1(this, arg_1);
6952 super_call_VM(oop_result, last_java_sp, entry_point, 2, check_exceptions);
6953 }
6954
6955 void MacroAssembler::super_call_VM(Register oop_result,
6956 Register last_java_sp,
6957 address entry_point,
6958 Register arg_1,
6959 Register arg_2,
6960 Register arg_3,
6961 bool check_exceptions) {
6962 LP64_ONLY(assert(arg_1 != c_rarg3, "smashed arg"));
6963 LP64_ONLY(assert(arg_2 != c_rarg3, "smashed arg"));
6964 pass_arg3(this, arg_3);
6965 LP64_ONLY(assert(arg_1 != c_rarg2, "smashed arg"));
6966 pass_arg2(this, arg_2);
6967 pass_arg1(this, arg_1);
6968 super_call_VM(oop_result, last_java_sp, entry_point, 3, check_exceptions);
6969 }
6970
6971 void MacroAssembler::call_VM_base(Register oop_result,
6972 Register java_thread,
6973 Register last_java_sp,
6974 address entry_point,
6975 int number_of_arguments,
6976 bool check_exceptions) {
6977 // determine java_thread register
6978 if (!java_thread->is_valid()) {
6979 #ifdef _LP64
6980 java_thread = r15_thread;
6981 #else
6982 java_thread = rdi;
6983 get_thread(java_thread);
6984 #endif // LP64
6985 }
6986 // determine last_java_sp register
6987 if (!last_java_sp->is_valid()) {
6988 last_java_sp = rsp;
6989 }
6990 // debugging support
6991 assert(number_of_arguments >= 0 , "cannot have negative number of arguments");
6992 LP64_ONLY(assert(java_thread == r15_thread, "unexpected register"));
6993 #ifdef ASSERT
6994 // TraceBytecodes does not use r12 but saves it over the call, so don't verify
6995 // r12 is the heapbase.
6996 LP64_ONLY(if ((UseCompressedOops || UseCompressedKlassPointers) && !TraceBytecodes) verify_heapbase("call_VM_base: heap base corrupted?");)
6997 #endif // ASSERT
6998
6999 assert(java_thread != oop_result , "cannot use the same register for java_thread & oop_result");
7000 assert(java_thread != last_java_sp, "cannot use the same register for java_thread & last_java_sp");
7001
7002 // push java thread (becomes first argument of C function)
7003
7004 NOT_LP64(push(java_thread); number_of_arguments++);
7005 LP64_ONLY(mov(c_rarg0, r15_thread));
7006
7007 // set last Java frame before call
7008 assert(last_java_sp != rbp, "can't use ebp/rbp");
7009
7010 // Only interpreter should have to set fp
7011 set_last_Java_frame(java_thread, last_java_sp, rbp, NULL);
7012
7013 // do the call, remove parameters
7014 MacroAssembler::call_VM_leaf_base(entry_point, number_of_arguments);
7015
7016 // restore the thread (cannot use the pushed argument since arguments
7017 // may be overwritten by C code generated by an optimizing compiler);
7018 // however can use the register value directly if it is callee saved.
7019 if (LP64_ONLY(true ||) java_thread == rdi || java_thread == rsi) {
7020 // rdi & rsi (also r15) are callee saved -> nothing to do
7021 #ifdef ASSERT
7022 guarantee(java_thread != rax, "change this code");
7023 push(rax);
7024 { Label L;
7025 get_thread(rax);
7026 cmpptr(java_thread, rax);
7027 jcc(Assembler::equal, L);
7028 STOP("MacroAssembler::call_VM_base: rdi not callee saved?");
7029 bind(L);
7030 }
7031 pop(rax);
7032 #endif
7033 } else {
7034 get_thread(java_thread);
7035 }
7036 // reset last Java frame
7037 // Only interpreter should have to clear fp
7038 reset_last_Java_frame(java_thread, true, false);
7039
7040 #ifndef CC_INTERP
7041 // C++ interp handles this in the interpreter
7042 check_and_handle_popframe(java_thread);
7043 check_and_handle_earlyret(java_thread);
7044 #endif /* CC_INTERP */
7045
7046 if (check_exceptions) {
7047 // check for pending exceptions (java_thread is set upon return)
7048 cmpptr(Address(java_thread, Thread::pending_exception_offset()), (int32_t) NULL_WORD);
7049 #ifndef _LP64
7050 jump_cc(Assembler::notEqual,
7051 RuntimeAddress(StubRoutines::forward_exception_entry()));
7052 #else
7053 // This used to conditionally jump to forward_exception however it is
7054 // possible if we relocate that the branch will not reach. So we must jump
7055 // around so we can always reach
7056
7057 Label ok;
7058 jcc(Assembler::equal, ok);
7059 jump(RuntimeAddress(StubRoutines::forward_exception_entry()));
7060 bind(ok);
7061 #endif // LP64
7062 }
7063
7064 // get oop result if there is one and reset the value in the thread
7065 if (oop_result->is_valid()) {
7066 get_vm_result(oop_result, java_thread);
7067 }
7068 }
7069
7070 void MacroAssembler::call_VM_helper(Register oop_result, address entry_point, int number_of_arguments, bool check_exceptions) {
7071
7072 // Calculate the value for last_Java_sp
7073 // somewhat subtle. call_VM does an intermediate call
7074 // which places a return address on the stack just under the
7075 // stack pointer as the user finsihed with it. This allows
7076 // use to retrieve last_Java_pc from last_Java_sp[-1].
7077 // On 32bit we then have to push additional args on the stack to accomplish
7078 // the actual requested call. On 64bit call_VM only can use register args
7079 // so the only extra space is the return address that call_VM created.
7080 // This hopefully explains the calculations here.
7081
7082 #ifdef _LP64
7083 // We've pushed one address, correct last_Java_sp
7084 lea(rax, Address(rsp, wordSize));
7085 #else
7086 lea(rax, Address(rsp, (1 + number_of_arguments) * wordSize));
7087 #endif // LP64
7088
7089 call_VM_base(oop_result, noreg, rax, entry_point, number_of_arguments, check_exceptions);
7090
7091 }
7092
7093 void MacroAssembler::call_VM_leaf(address entry_point, int number_of_arguments) {
7094 call_VM_leaf_base(entry_point, number_of_arguments);
7095 }
7096
7097 void MacroAssembler::call_VM_leaf(address entry_point, Register arg_0) {
7098 pass_arg0(this, arg_0);
7099 call_VM_leaf(entry_point, 1);
7100 }
7101
7102 void MacroAssembler::call_VM_leaf(address entry_point, Register arg_0, Register arg_1) {
7103
7104 LP64_ONLY(assert(arg_0 != c_rarg1, "smashed arg"));
7105 pass_arg1(this, arg_1);
7106 pass_arg0(this, arg_0);
7107 call_VM_leaf(entry_point, 2);
7108 }
7109
7110 void MacroAssembler::call_VM_leaf(address entry_point, Register arg_0, Register arg_1, Register arg_2) {
7111 LP64_ONLY(assert(arg_0 != c_rarg2, "smashed arg"));
7112 LP64_ONLY(assert(arg_1 != c_rarg2, "smashed arg"));
7113 pass_arg2(this, arg_2);
7114 LP64_ONLY(assert(arg_0 != c_rarg1, "smashed arg"));
7115 pass_arg1(this, arg_1);
7116 pass_arg0(this, arg_0);
7117 call_VM_leaf(entry_point, 3);
7118 }
7119
7120 void MacroAssembler::super_call_VM_leaf(address entry_point, Register arg_0) {
7121 pass_arg0(this, arg_0);
7122 MacroAssembler::call_VM_leaf_base(entry_point, 1);
7123 }
7124
7125 void MacroAssembler::super_call_VM_leaf(address entry_point, Register arg_0, Register arg_1) {
7126
7127 LP64_ONLY(assert(arg_0 != c_rarg1, "smashed arg"));
7128 pass_arg1(this, arg_1);
7129 pass_arg0(this, arg_0);
7130 MacroAssembler::call_VM_leaf_base(entry_point, 2);
7131 }
7132
7133 void MacroAssembler::super_call_VM_leaf(address entry_point, Register arg_0, Register arg_1, Register arg_2) {
7134 LP64_ONLY(assert(arg_0 != c_rarg2, "smashed arg"));
7135 LP64_ONLY(assert(arg_1 != c_rarg2, "smashed arg"));
7136 pass_arg2(this, arg_2);
7137 LP64_ONLY(assert(arg_0 != c_rarg1, "smashed arg"));
7138 pass_arg1(this, arg_1);
7139 pass_arg0(this, arg_0);
7140 MacroAssembler::call_VM_leaf_base(entry_point, 3);
7141 }
7142
7143 void MacroAssembler::super_call_VM_leaf(address entry_point, Register arg_0, Register arg_1, Register arg_2, Register arg_3) {
7144 LP64_ONLY(assert(arg_0 != c_rarg3, "smashed arg"));
7145 LP64_ONLY(assert(arg_1 != c_rarg3, "smashed arg"));
7146 LP64_ONLY(assert(arg_2 != c_rarg3, "smashed arg"));
7147 pass_arg3(this, arg_3);
7148 LP64_ONLY(assert(arg_0 != c_rarg2, "smashed arg"));
7149 LP64_ONLY(assert(arg_1 != c_rarg2, "smashed arg"));
7150 pass_arg2(this, arg_2);
7151 LP64_ONLY(assert(arg_0 != c_rarg1, "smashed arg"));
7152 pass_arg1(this, arg_1);
7153 pass_arg0(this, arg_0);
7154 MacroAssembler::call_VM_leaf_base(entry_point, 4);
7155 }
7156
7157 void MacroAssembler::get_vm_result(Register oop_result, Register java_thread) {
7158 movptr(oop_result, Address(java_thread, JavaThread::vm_result_offset()));
7159 movptr(Address(java_thread, JavaThread::vm_result_offset()), NULL_WORD);
7160 verify_oop(oop_result, "broken oop in call_VM_base");
7161 }
7162
7163 void MacroAssembler::get_vm_result_2(Register metadata_result, Register java_thread) {
7164 movptr(metadata_result, Address(java_thread, JavaThread::vm_result_2_offset()));
7165 movptr(Address(java_thread, JavaThread::vm_result_2_offset()), NULL_WORD);
7166 }
7167
7168 void MacroAssembler::check_and_handle_earlyret(Register java_thread) {
7169 }
7170
7171 void MacroAssembler::check_and_handle_popframe(Register java_thread) {
7172 }
7173
7174 void MacroAssembler::cmp32(AddressLiteral src1, int32_t imm) {
7175 if (reachable(src1)) {
7176 cmpl(as_Address(src1), imm);
7177 } else {
7178 lea(rscratch1, src1);
7179 cmpl(Address(rscratch1, 0), imm);
7180 }
7181 }
7182
7183 void MacroAssembler::cmp32(Register src1, AddressLiteral src2) {
7184 assert(!src2.is_lval(), "use cmpptr");
7185 if (reachable(src2)) {
7186 cmpl(src1, as_Address(src2));
7187 } else {
7188 lea(rscratch1, src2);
7189 cmpl(src1, Address(rscratch1, 0));
7190 }
7191 }
7192
7193 void MacroAssembler::cmp32(Register src1, int32_t imm) {
7194 Assembler::cmpl(src1, imm);
7195 }
7196
7197 void MacroAssembler::cmp32(Register src1, Address src2) {
7198 Assembler::cmpl(src1, src2);
7199 }
7200
7201 void MacroAssembler::cmpsd2int(XMMRegister opr1, XMMRegister opr2, Register dst, bool unordered_is_less) {
7202 ucomisd(opr1, opr2);
7203
7204 Label L;
7205 if (unordered_is_less) {
7206 movl(dst, -1);
7207 jcc(Assembler::parity, L);
7208 jcc(Assembler::below , L);
7209 movl(dst, 0);
7210 jcc(Assembler::equal , L);
7211 increment(dst);
7212 } else { // unordered is greater
7213 movl(dst, 1);
7214 jcc(Assembler::parity, L);
7215 jcc(Assembler::above , L);
7216 movl(dst, 0);
7217 jcc(Assembler::equal , L);
7218 decrementl(dst);
7219 }
7220 bind(L);
7221 }
7222
7223 void MacroAssembler::cmpss2int(XMMRegister opr1, XMMRegister opr2, Register dst, bool unordered_is_less) {
7224 ucomiss(opr1, opr2);
7225
7226 Label L;
7227 if (unordered_is_less) {
7228 movl(dst, -1);
7229 jcc(Assembler::parity, L);
7230 jcc(Assembler::below , L);
7231 movl(dst, 0);
7232 jcc(Assembler::equal , L);
7233 increment(dst);
7234 } else { // unordered is greater
7235 movl(dst, 1);
7236 jcc(Assembler::parity, L);
7237 jcc(Assembler::above , L);
7238 movl(dst, 0);
7239 jcc(Assembler::equal , L);
7240 decrementl(dst);
7241 }
7242 bind(L);
7243 }
7244
7245
7246 void MacroAssembler::cmp8(AddressLiteral src1, int imm) {
7247 if (reachable(src1)) {
7248 cmpb(as_Address(src1), imm);
7249 } else {
7250 lea(rscratch1, src1);
7251 cmpb(Address(rscratch1, 0), imm);
7252 }
7253 }
7254
7255 void MacroAssembler::cmpptr(Register src1, AddressLiteral src2) {
7256 #ifdef _LP64
7257 if (src2.is_lval()) {
7258 movptr(rscratch1, src2);
7259 Assembler::cmpq(src1, rscratch1);
7260 } else if (reachable(src2)) {
7261 cmpq(src1, as_Address(src2));
7262 } else {
7263 lea(rscratch1, src2);
7264 Assembler::cmpq(src1, Address(rscratch1, 0));
7265 }
7266 #else
7267 if (src2.is_lval()) {
7268 cmp_literal32(src1, (int32_t) src2.target(), src2.rspec());
7269 } else {
7270 cmpl(src1, as_Address(src2));
7271 }
7272 #endif // _LP64
7273 }
7274
7275 void MacroAssembler::cmpptr(Address src1, AddressLiteral src2) {
7276 assert(src2.is_lval(), "not a mem-mem compare");
7277 #ifdef _LP64
7278 // moves src2's literal address
7279 movptr(rscratch1, src2);
7280 Assembler::cmpq(src1, rscratch1);
7281 #else
7282 cmp_literal32(src1, (int32_t) src2.target(), src2.rspec());
7283 #endif // _LP64
7284 }
7285
7286 void MacroAssembler::locked_cmpxchgptr(Register reg, AddressLiteral adr) {
7287 if (reachable(adr)) {
7288 if (os::is_MP())
7289 lock();
7290 cmpxchgptr(reg, as_Address(adr));
7291 } else {
7292 lea(rscratch1, adr);
7293 if (os::is_MP())
7294 lock();
7295 cmpxchgptr(reg, Address(rscratch1, 0));
7296 }
7297 }
7298
7299 void MacroAssembler::cmpxchgptr(Register reg, Address adr) {
7300 LP64_ONLY(cmpxchgq(reg, adr)) NOT_LP64(cmpxchgl(reg, adr));
7301 }
7302
7303 void MacroAssembler::comisd(XMMRegister dst, AddressLiteral src) {
7304 if (reachable(src)) {
7305 Assembler::comisd(dst, as_Address(src));
7306 } else {
7307 lea(rscratch1, src);
7308 Assembler::comisd(dst, Address(rscratch1, 0));
7309 }
7310 }
7311
7312 void MacroAssembler::comiss(XMMRegister dst, AddressLiteral src) {
7313 if (reachable(src)) {
7314 Assembler::comiss(dst, as_Address(src));
7315 } else {
7316 lea(rscratch1, src);
7317 Assembler::comiss(dst, Address(rscratch1, 0));
7318 }
7319 }
7320
7321
7322 void MacroAssembler::cond_inc32(Condition cond, AddressLiteral counter_addr) {
7323 Condition negated_cond = negate_condition(cond);
7324 Label L;
7325 jcc(negated_cond, L);
7326 atomic_incl(counter_addr);
7327 bind(L);
7328 }
7329
7330 int MacroAssembler::corrected_idivl(Register reg) {
7331 // Full implementation of Java idiv and irem; checks for
7332 // special case as described in JVM spec., p.243 & p.271.
7333 // The function returns the (pc) offset of the idivl
7334 // instruction - may be needed for implicit exceptions.
7335 //
7336 // normal case special case
7337 //
7338 // input : rax,: dividend min_int
7339 // reg: divisor (may not be rax,/rdx) -1
7340 //
7341 // output: rax,: quotient (= rax, idiv reg) min_int
7342 // rdx: remainder (= rax, irem reg) 0
7343 assert(reg != rax && reg != rdx, "reg cannot be rax, or rdx register");
7344 const int min_int = 0x80000000;
7345 Label normal_case, special_case;
7346
7347 // check for special case
7348 cmpl(rax, min_int);
7349 jcc(Assembler::notEqual, normal_case);
7350 xorl(rdx, rdx); // prepare rdx for possible special case (where remainder = 0)
7351 cmpl(reg, -1);
7352 jcc(Assembler::equal, special_case);
7353
7354 // handle normal case
7355 bind(normal_case);
7356 cdql();
7357 int idivl_offset = offset();
7358 idivl(reg);
7359
7360 // normal and special case exit
7361 bind(special_case);
7362
7363 return idivl_offset;
7364 }
7365
7366
7367
7368 void MacroAssembler::decrementl(Register reg, int value) {
7369 if (value == min_jint) {subl(reg, value) ; return; }
7370 if (value < 0) { incrementl(reg, -value); return; }
7371 if (value == 0) { ; return; }
7372 if (value == 1 && UseIncDec) { decl(reg) ; return; }
7373 /* else */ { subl(reg, value) ; return; }
7374 }
7375
7376 void MacroAssembler::decrementl(Address dst, int value) {
7377 if (value == min_jint) {subl(dst, value) ; return; }
7378 if (value < 0) { incrementl(dst, -value); return; }
7379 if (value == 0) { ; return; }
7380 if (value == 1 && UseIncDec) { decl(dst) ; return; }
7381 /* else */ { subl(dst, value) ; return; }
7382 }
7383
7384 void MacroAssembler::division_with_shift (Register reg, int shift_value) {
7385 assert (shift_value > 0, "illegal shift value");
7386 Label _is_positive;
7387 testl (reg, reg);
7388 jcc (Assembler::positive, _is_positive);
7389 int offset = (1 << shift_value) - 1 ;
7390
7391 if (offset == 1) {
7392 incrementl(reg);
7393 } else {
7394 addl(reg, offset);
7395 }
7396
7397 bind (_is_positive);
7398 sarl(reg, shift_value);
7399 }
7400
7401 void MacroAssembler::divsd(XMMRegister dst, AddressLiteral src) {
7402 if (reachable(src)) {
7403 Assembler::divsd(dst, as_Address(src));
7404 } else {
7405 lea(rscratch1, src);
7406 Assembler::divsd(dst, Address(rscratch1, 0));
7407 }
7408 }
7409
7410 void MacroAssembler::divss(XMMRegister dst, AddressLiteral src) {
7411 if (reachable(src)) {
7412 Assembler::divss(dst, as_Address(src));
7413 } else {
7414 lea(rscratch1, src);
7415 Assembler::divss(dst, Address(rscratch1, 0));
7416 }
7417 }
7418
7419 // !defined(COMPILER2) is because of stupid core builds
7420 #if !defined(_LP64) || defined(COMPILER1) || !defined(COMPILER2)
7421 void MacroAssembler::empty_FPU_stack() {
7422 if (VM_Version::supports_mmx()) {
7423 emms();
7424 } else {
7425 for (int i = 8; i-- > 0; ) ffree(i);
7426 }
7427 }
7428 #endif // !LP64 || C1 || !C2
7429
7430
7431 // Defines obj, preserves var_size_in_bytes
7432 void MacroAssembler::eden_allocate(Register obj,
7433 Register var_size_in_bytes,
7434 int con_size_in_bytes,
7435 Register t1,
7436 Label& slow_case) {
7437 assert(obj == rax, "obj must be in rax, for cmpxchg");
7438 assert_different_registers(obj, var_size_in_bytes, t1);
7439 if (CMSIncrementalMode || !Universe::heap()->supports_inline_contig_alloc()) {
7440 jmp(slow_case);
7441 } else {
7442 Register end = t1;
7443 Label retry;
7444 bind(retry);
7445 ExternalAddress heap_top((address) Universe::heap()->top_addr());
7446 movptr(obj, heap_top);
7447 if (var_size_in_bytes == noreg) {
7448 lea(end, Address(obj, con_size_in_bytes));
7449 } else {
7450 lea(end, Address(obj, var_size_in_bytes, Address::times_1));
7451 }
7452 // if end < obj then we wrapped around => object too long => slow case
7453 cmpptr(end, obj);
7454 jcc(Assembler::below, slow_case);
7455 cmpptr(end, ExternalAddress((address) Universe::heap()->end_addr()));
7456 jcc(Assembler::above, slow_case);
7457 // Compare obj with the top addr, and if still equal, store the new top addr in
7458 // end at the address of the top addr pointer. Sets ZF if was equal, and clears
7459 // it otherwise. Use lock prefix for atomicity on MPs.
7460 locked_cmpxchgptr(end, heap_top);
7461 jcc(Assembler::notEqual, retry);
7462 }
7463 }
7464
7465 void MacroAssembler::enter() {
7466 push(rbp);
7467 mov(rbp, rsp);
7468 }
7469
7470 // A 5 byte nop that is safe for patching (see patch_verified_entry)
7471 void MacroAssembler::fat_nop() {
7472 if (UseAddressNop) {
7473 addr_nop_5();
7474 } else {
7475 emit_byte(0x26); // es:
7476 emit_byte(0x2e); // cs:
7477 emit_byte(0x64); // fs:
7478 emit_byte(0x65); // gs:
7479 emit_byte(0x90);
7480 }
7481 }
7482
7483 void MacroAssembler::fcmp(Register tmp) {
7484 fcmp(tmp, 1, true, true);
7485 }
7486
7487 void MacroAssembler::fcmp(Register tmp, int index, bool pop_left, bool pop_right) {
7488 assert(!pop_right || pop_left, "usage error");
7489 if (VM_Version::supports_cmov()) {
7490 assert(tmp == noreg, "unneeded temp");
7491 if (pop_left) {
7492 fucomip(index);
7493 } else {
7494 fucomi(index);
7495 }
7496 if (pop_right) {
7497 fpop();
7498 }
7499 } else {
7500 assert(tmp != noreg, "need temp");
7501 if (pop_left) {
7502 if (pop_right) {
7503 fcompp();
7504 } else {
7505 fcomp(index);
7506 }
7507 } else {
7508 fcom(index);
7509 }
7510 // convert FPU condition into eflags condition via rax,
7511 save_rax(tmp);
7512 fwait(); fnstsw_ax();
7513 sahf();
7514 restore_rax(tmp);
7515 }
7516 // condition codes set as follows:
7517 //
7518 // CF (corresponds to C0) if x < y
7519 // PF (corresponds to C2) if unordered
7520 // ZF (corresponds to C3) if x = y
7521 }
7522
7523 void MacroAssembler::fcmp2int(Register dst, bool unordered_is_less) {
7524 fcmp2int(dst, unordered_is_less, 1, true, true);
7525 }
7526
7527 void MacroAssembler::fcmp2int(Register dst, bool unordered_is_less, int index, bool pop_left, bool pop_right) {
7528 fcmp(VM_Version::supports_cmov() ? noreg : dst, index, pop_left, pop_right);
7529 Label L;
7530 if (unordered_is_less) {
7531 movl(dst, -1);
7532 jcc(Assembler::parity, L);
7533 jcc(Assembler::below , L);
7534 movl(dst, 0);
7535 jcc(Assembler::equal , L);
7536 increment(dst);
7537 } else { // unordered is greater
7538 movl(dst, 1);
7539 jcc(Assembler::parity, L);
7540 jcc(Assembler::above , L);
7541 movl(dst, 0);
7542 jcc(Assembler::equal , L);
7543 decrementl(dst);
7544 }
7545 bind(L);
7546 }
7547
7548 void MacroAssembler::fld_d(AddressLiteral src) {
7549 fld_d(as_Address(src));
7550 }
7551
7552 void MacroAssembler::fld_s(AddressLiteral src) {
7553 fld_s(as_Address(src));
7554 }
7555
7556 void MacroAssembler::fld_x(AddressLiteral src) {
7557 Assembler::fld_x(as_Address(src));
7558 }
7559
7560 void MacroAssembler::fldcw(AddressLiteral src) {
7561 Assembler::fldcw(as_Address(src));
7562 }
7563
7564 void MacroAssembler::pow_exp_core_encoding() {
7565 // kills rax, rcx, rdx
7566 subptr(rsp,sizeof(jdouble));
7567 // computes 2^X. Stack: X ...
7568 // f2xm1 computes 2^X-1 but only operates on -1<=X<=1. Get int(X) and
7569 // keep it on the thread's stack to compute 2^int(X) later
7570 // then compute 2^(X-int(X)) as (2^(X-int(X)-1+1)
7571 // final result is obtained with: 2^X = 2^int(X) * 2^(X-int(X))
7572 fld_s(0); // Stack: X X ...
7573 frndint(); // Stack: int(X) X ...
7574 fsuba(1); // Stack: int(X) X-int(X) ...
7575 fistp_s(Address(rsp,0)); // move int(X) as integer to thread's stack. Stack: X-int(X) ...
7576 f2xm1(); // Stack: 2^(X-int(X))-1 ...
7577 fld1(); // Stack: 1 2^(X-int(X))-1 ...
7578 faddp(1); // Stack: 2^(X-int(X))
7579 // computes 2^(int(X)): add exponent bias (1023) to int(X), then
7580 // shift int(X)+1023 to exponent position.
7581 // Exponent is limited to 11 bits if int(X)+1023 does not fit in 11
7582 // bits, set result to NaN. 0x000 and 0x7FF are reserved exponent
7583 // values so detect them and set result to NaN.
7584 movl(rax,Address(rsp,0));
7585 movl(rcx, -2048); // 11 bit mask and valid NaN binary encoding
7586 addl(rax, 1023);
7587 movl(rdx,rax);
7588 shll(rax,20);
7589 // Check that 0 < int(X)+1023 < 2047. Otherwise set rax to NaN.
7590 addl(rdx,1);
7591 // Check that 1 < int(X)+1023+1 < 2048
7592 // in 3 steps:
7593 // 1- (int(X)+1023+1)&-2048 == 0 => 0 <= int(X)+1023+1 < 2048
7594 // 2- (int(X)+1023+1)&-2048 != 0
7595 // 3- (int(X)+1023+1)&-2048 != 1
7596 // Do 2- first because addl just updated the flags.
7597 cmov32(Assembler::equal,rax,rcx);
7598 cmpl(rdx,1);
7599 cmov32(Assembler::equal,rax,rcx);
7600 testl(rdx,rcx);
7601 cmov32(Assembler::notEqual,rax,rcx);
7602 movl(Address(rsp,4),rax);
7603 movl(Address(rsp,0),0);
7604 fmul_d(Address(rsp,0)); // Stack: 2^X ...
7605 addptr(rsp,sizeof(jdouble));
7606 }
7607
7608 void MacroAssembler::increase_precision() {
7609 subptr(rsp, BytesPerWord);
7610 fnstcw(Address(rsp, 0));
7611 movl(rax, Address(rsp, 0));
7612 orl(rax, 0x300);
7613 push(rax);
7614 fldcw(Address(rsp, 0));
7615 pop(rax);
7616 }
7617
7618 void MacroAssembler::restore_precision() {
7619 fldcw(Address(rsp, 0));
7620 addptr(rsp, BytesPerWord);
7621 }
7622
7623 void MacroAssembler::fast_pow() {
7624 // computes X^Y = 2^(Y * log2(X))
7625 // if fast computation is not possible, result is NaN. Requires
7626 // fallback from user of this macro.
7627 // increase precision for intermediate steps of the computation
7628 increase_precision();
7629 fyl2x(); // Stack: (Y*log2(X)) ...
7630 pow_exp_core_encoding(); // Stack: exp(X) ...
7631 restore_precision();
7632 }
7633
7634 void MacroAssembler::fast_exp() {
7635 // computes exp(X) = 2^(X * log2(e))
7636 // if fast computation is not possible, result is NaN. Requires
7637 // fallback from user of this macro.
7638 // increase precision for intermediate steps of the computation
7639 increase_precision();
7640 fldl2e(); // Stack: log2(e) X ...
7641 fmulp(1); // Stack: (X*log2(e)) ...
7642 pow_exp_core_encoding(); // Stack: exp(X) ...
7643 restore_precision();
7644 }
7645
7646 void MacroAssembler::pow_or_exp(bool is_exp, int num_fpu_regs_in_use) {
7647 // kills rax, rcx, rdx
7648 // pow and exp needs 2 extra registers on the fpu stack.
7649 Label slow_case, done;
7650 Register tmp = noreg;
7651 if (!VM_Version::supports_cmov()) {
7652 // fcmp needs a temporary so preserve rdx,
7653 tmp = rdx;
7654 }
7655 Register tmp2 = rax;
7656 Register tmp3 = rcx;
7657
7658 if (is_exp) {
7659 // Stack: X
7660 fld_s(0); // duplicate argument for runtime call. Stack: X X
7661 fast_exp(); // Stack: exp(X) X
7662 fcmp(tmp, 0, false, false); // Stack: exp(X) X
7663 // exp(X) not equal to itself: exp(X) is NaN go to slow case.
7664 jcc(Assembler::parity, slow_case);
7665 // get rid of duplicate argument. Stack: exp(X)
7666 if (num_fpu_regs_in_use > 0) {
7667 fxch();
7668 fpop();
7669 } else {
7670 ffree(1);
7671 }
7672 jmp(done);
7673 } else {
7674 // Stack: X Y
7675 Label x_negative, y_odd;
7676
7677 fldz(); // Stack: 0 X Y
7678 fcmp(tmp, 1, true, false); // Stack: X Y
7679 jcc(Assembler::above, x_negative);
7680
7681 // X >= 0
7682
7683 fld_s(1); // duplicate arguments for runtime call. Stack: Y X Y
7684 fld_s(1); // Stack: X Y X Y
7685 fast_pow(); // Stack: X^Y X Y
7686 fcmp(tmp, 0, false, false); // Stack: X^Y X Y
7687 // X^Y not equal to itself: X^Y is NaN go to slow case.
7688 jcc(Assembler::parity, slow_case);
7689 // get rid of duplicate arguments. Stack: X^Y
7690 if (num_fpu_regs_in_use > 0) {
7691 fxch(); fpop();
7692 fxch(); fpop();
7693 } else {
7694 ffree(2);
7695 ffree(1);
7696 }
7697 jmp(done);
7698
7699 // X <= 0
7700 bind(x_negative);
7701
7702 fld_s(1); // Stack: Y X Y
7703 frndint(); // Stack: int(Y) X Y
7704 fcmp(tmp, 2, false, false); // Stack: int(Y) X Y
7705 jcc(Assembler::notEqual, slow_case);
7706
7707 subptr(rsp, 8);
7708
7709 // For X^Y, when X < 0, Y has to be an integer and the final
7710 // result depends on whether it's odd or even. We just checked
7711 // that int(Y) == Y. We move int(Y) to gp registers as a 64 bit
7712 // integer to test its parity. If int(Y) is huge and doesn't fit
7713 // in the 64 bit integer range, the integer indefinite value will
7714 // end up in the gp registers. Huge numbers are all even, the
7715 // integer indefinite number is even so it's fine.
7716
7717 #ifdef ASSERT
7718 // Let's check we don't end up with an integer indefinite number
7719 // when not expected. First test for huge numbers: check whether
7720 // int(Y)+1 == int(Y) which is true for very large numbers and
7721 // those are all even. A 64 bit integer is guaranteed to not
7722 // overflow for numbers where y+1 != y (when precision is set to
7723 // double precision).
7724 Label y_not_huge;
7725
7726 fld1(); // Stack: 1 int(Y) X Y
7727 fadd(1); // Stack: 1+int(Y) int(Y) X Y
7728
7729 #ifdef _LP64
7730 // trip to memory to force the precision down from double extended
7731 // precision
7732 fstp_d(Address(rsp, 0));
7733 fld_d(Address(rsp, 0));
7734 #endif
7735
7736 fcmp(tmp, 1, true, false); // Stack: int(Y) X Y
7737 #endif
7738
7739 // move int(Y) as 64 bit integer to thread's stack
7740 fistp_d(Address(rsp,0)); // Stack: X Y
7741
7742 #ifdef ASSERT
7743 jcc(Assembler::notEqual, y_not_huge);
7744
7745 // Y is huge so we know it's even. It may not fit in a 64 bit
7746 // integer and we don't want the debug code below to see the
7747 // integer indefinite value so overwrite int(Y) on the thread's
7748 // stack with 0.
7749 movl(Address(rsp, 0), 0);
7750 movl(Address(rsp, 4), 0);
7751
7752 bind(y_not_huge);
7753 #endif
7754
7755 fld_s(1); // duplicate arguments for runtime call. Stack: Y X Y
7756 fld_s(1); // Stack: X Y X Y
7757 fabs(); // Stack: abs(X) Y X Y
7758 fast_pow(); // Stack: abs(X)^Y X Y
7759 fcmp(tmp, 0, false, false); // Stack: abs(X)^Y X Y
7760 // abs(X)^Y not equal to itself: abs(X)^Y is NaN go to slow case.
7761
7762 pop(tmp2);
7763 NOT_LP64(pop(tmp3));
7764 jcc(Assembler::parity, slow_case);
7765
7766 #ifdef ASSERT
7767 // Check that int(Y) is not integer indefinite value (int
7768 // overflow). Shouldn't happen because for values that would
7769 // overflow, 1+int(Y)==Y which was tested earlier.
7770 #ifndef _LP64
7771 {
7772 Label integer;
7773 testl(tmp2, tmp2);
7774 jcc(Assembler::notZero, integer);
7775 cmpl(tmp3, 0x80000000);
7776 jcc(Assembler::notZero, integer);
7777 STOP("integer indefinite value shouldn't be seen here");
7778 bind(integer);
7779 }
7780 #else
7781 {
7782 Label integer;
7783 mov(tmp3, tmp2); // preserve tmp2 for parity check below
7784 shlq(tmp3, 1);
7785 jcc(Assembler::carryClear, integer);
7786 jcc(Assembler::notZero, integer);
7787 STOP("integer indefinite value shouldn't be seen here");
7788 bind(integer);
7789 }
7790 #endif
7791 #endif
7792
7793 // get rid of duplicate arguments. Stack: X^Y
7794 if (num_fpu_regs_in_use > 0) {
7795 fxch(); fpop();
7796 fxch(); fpop();
7797 } else {
7798 ffree(2);
7799 ffree(1);
7800 }
7801
7802 testl(tmp2, 1);
7803 jcc(Assembler::zero, done); // X <= 0, Y even: X^Y = abs(X)^Y
7804 // X <= 0, Y even: X^Y = -abs(X)^Y
7805
7806 fchs(); // Stack: -abs(X)^Y Y
7807 jmp(done);
7808 }
7809
7810 // slow case: runtime call
7811 bind(slow_case);
7812
7813 fpop(); // pop incorrect result or int(Y)
7814
7815 fp_runtime_fallback(is_exp ? CAST_FROM_FN_PTR(address, SharedRuntime::dexp) : CAST_FROM_FN_PTR(address, SharedRuntime::dpow),
7816 is_exp ? 1 : 2, num_fpu_regs_in_use);
7817
7818 // Come here with result in F-TOS
7819 bind(done);
7820 }
7821
7822 void MacroAssembler::fpop() {
7823 ffree();
7824 fincstp();
7825 }
7826
7827 void MacroAssembler::fremr(Register tmp) {
7828 save_rax(tmp);
7829 { Label L;
7830 bind(L);
7831 fprem();
7832 fwait(); fnstsw_ax();
7833 #ifdef _LP64
7834 testl(rax, 0x400);
7835 jcc(Assembler::notEqual, L);
7836 #else
7837 sahf();
7838 jcc(Assembler::parity, L);
7839 #endif // _LP64
7840 }
7841 restore_rax(tmp);
7842 // Result is in ST0.
7843 // Note: fxch & fpop to get rid of ST1
7844 // (otherwise FPU stack could overflow eventually)
7845 fxch(1);
7846 fpop();
7847 }
7848
7849
7850 void MacroAssembler::incrementl(AddressLiteral dst) {
7851 if (reachable(dst)) {
7852 incrementl(as_Address(dst));
7853 } else {
7854 lea(rscratch1, dst);
7855 incrementl(Address(rscratch1, 0));
7856 }
7857 }
7858
7859 void MacroAssembler::incrementl(ArrayAddress dst) {
7860 incrementl(as_Address(dst));
7861 }
7862
7863 void MacroAssembler::incrementl(Register reg, int value) {
7864 if (value == min_jint) {addl(reg, value) ; return; }
7865 if (value < 0) { decrementl(reg, -value); return; }
7866 if (value == 0) { ; return; }
7867 if (value == 1 && UseIncDec) { incl(reg) ; return; }
7868 /* else */ { addl(reg, value) ; return; }
7869 }
7870
7871 void MacroAssembler::incrementl(Address dst, int value) {
7872 if (value == min_jint) {addl(dst, value) ; return; }
7873 if (value < 0) { decrementl(dst, -value); return; }
7874 if (value == 0) { ; return; }
7875 if (value == 1 && UseIncDec) { incl(dst) ; return; }
7876 /* else */ { addl(dst, value) ; return; }
7877 }
7878
7879 void MacroAssembler::jump(AddressLiteral dst) {
7880 if (reachable(dst)) {
7881 jmp_literal(dst.target(), dst.rspec());
7882 } else {
7883 lea(rscratch1, dst);
7884 jmp(rscratch1);
7885 }
7886 }
7887
7888 void MacroAssembler::jump_cc(Condition cc, AddressLiteral dst) {
7889 if (reachable(dst)) {
7890 InstructionMark im(this);
7891 relocate(dst.reloc());
7892 const int short_size = 2;
7893 const int long_size = 6;
7894 int offs = (intptr_t)dst.target() - ((intptr_t)_code_pos);
7895 if (dst.reloc() == relocInfo::none && is8bit(offs - short_size)) {
7896 // 0111 tttn #8-bit disp
7897 emit_byte(0x70 | cc);
7898 emit_byte((offs - short_size) & 0xFF);
7899 } else {
7900 // 0000 1111 1000 tttn #32-bit disp
7901 emit_byte(0x0F);
7902 emit_byte(0x80 | cc);
7903 emit_long(offs - long_size);
7904 }
7905 } else {
7906 #ifdef ASSERT
7907 warning("reversing conditional branch");
7908 #endif /* ASSERT */
7909 Label skip;
7910 jccb(reverse[cc], skip);
7911 lea(rscratch1, dst);
7912 Assembler::jmp(rscratch1);
7913 bind(skip);
7914 }
7915 }
7916
7917 void MacroAssembler::ldmxcsr(AddressLiteral src) {
7918 if (reachable(src)) {
7919 Assembler::ldmxcsr(as_Address(src));
7920 } else {
7921 lea(rscratch1, src);
7922 Assembler::ldmxcsr(Address(rscratch1, 0));
7923 }
7924 }
7925
7926 int MacroAssembler::load_signed_byte(Register dst, Address src) {
7927 int off;
7928 if (LP64_ONLY(true ||) VM_Version::is_P6()) {
7929 off = offset();
7930 movsbl(dst, src); // movsxb
7931 } else {
7932 off = load_unsigned_byte(dst, src);
7933 shll(dst, 24);
7934 sarl(dst, 24);
7935 }
7936 return off;
7937 }
7938
7939 // Note: load_signed_short used to be called load_signed_word.
7940 // Although the 'w' in x86 opcodes refers to the term "word" in the assembler
7941 // manual, which means 16 bits, that usage is found nowhere in HotSpot code.
7942 // The term "word" in HotSpot means a 32- or 64-bit machine word.
7943 int MacroAssembler::load_signed_short(Register dst, Address src) {
7944 int off;
7945 if (LP64_ONLY(true ||) VM_Version::is_P6()) {
7946 // This is dubious to me since it seems safe to do a signed 16 => 64 bit
7947 // version but this is what 64bit has always done. This seems to imply
7948 // that users are only using 32bits worth.
7949 off = offset();
7950 movswl(dst, src); // movsxw
7951 } else {
7952 off = load_unsigned_short(dst, src);
7953 shll(dst, 16);
7954 sarl(dst, 16);
7955 }
7956 return off;
7957 }
7958
7959 int MacroAssembler::load_unsigned_byte(Register dst, Address src) {
7960 // According to Intel Doc. AP-526, "Zero-Extension of Short", p.16,
7961 // and "3.9 Partial Register Penalties", p. 22).
7962 int off;
7963 if (LP64_ONLY(true || ) VM_Version::is_P6() || src.uses(dst)) {
7964 off = offset();
7965 movzbl(dst, src); // movzxb
7966 } else {
7967 xorl(dst, dst);
7968 off = offset();
7969 movb(dst, src);
7970 }
7971 return off;
7972 }
7973
7974 // Note: load_unsigned_short used to be called load_unsigned_word.
7975 int MacroAssembler::load_unsigned_short(Register dst, Address src) {
7976 // According to Intel Doc. AP-526, "Zero-Extension of Short", p.16,
7977 // and "3.9 Partial Register Penalties", p. 22).
7978 int off;
7979 if (LP64_ONLY(true ||) VM_Version::is_P6() || src.uses(dst)) {
7980 off = offset();
7981 movzwl(dst, src); // movzxw
7982 } else {
7983 xorl(dst, dst);
7984 off = offset();
7985 movw(dst, src);
7986 }
7987 return off;
7988 }
7989
7990 void MacroAssembler::load_sized_value(Register dst, Address src, size_t size_in_bytes, bool is_signed, Register dst2) {
7991 switch (size_in_bytes) {
7992 #ifndef _LP64
7993 case 8:
7994 assert(dst2 != noreg, "second dest register required");
7995 movl(dst, src);
7996 movl(dst2, src.plus_disp(BytesPerInt));
7997 break;
7998 #else
7999 case 8: movq(dst, src); break;
8000 #endif
8001 case 4: movl(dst, src); break;
8002 case 2: is_signed ? load_signed_short(dst, src) : load_unsigned_short(dst, src); break;
8003 case 1: is_signed ? load_signed_byte( dst, src) : load_unsigned_byte( dst, src); break;
8004 default: ShouldNotReachHere();
8005 }
8006 }
8007
8008 void MacroAssembler::store_sized_value(Address dst, Register src, size_t size_in_bytes, Register src2) {
8009 switch (size_in_bytes) {
8010 #ifndef _LP64
8011 case 8:
8012 assert(src2 != noreg, "second source register required");
8013 movl(dst, src);
8014 movl(dst.plus_disp(BytesPerInt), src2);
8015 break;
8016 #else
8017 case 8: movq(dst, src); break;
8018 #endif
8019 case 4: movl(dst, src); break;
8020 case 2: movw(dst, src); break;
8021 case 1: movb(dst, src); break;
8022 default: ShouldNotReachHere();
8023 }
8024 }
8025
8026 void MacroAssembler::mov32(AddressLiteral dst, Register src) {
8027 if (reachable(dst)) {
8028 movl(as_Address(dst), src);
8029 } else {
8030 lea(rscratch1, dst);
8031 movl(Address(rscratch1, 0), src);
8032 }
8033 }
8034
8035 void MacroAssembler::mov32(Register dst, AddressLiteral src) {
8036 if (reachable(src)) {
8037 movl(dst, as_Address(src));
8038 } else {
8039 lea(rscratch1, src);
8040 movl(dst, Address(rscratch1, 0));
8041 }
8042 }
8043
8044 // C++ bool manipulation
8045
8046 void MacroAssembler::movbool(Register dst, Address src) {
8047 if(sizeof(bool) == 1)
8048 movb(dst, src);
8049 else if(sizeof(bool) == 2)
8050 movw(dst, src);
8051 else if(sizeof(bool) == 4)
8052 movl(dst, src);
8053 else
8054 // unsupported
8055 ShouldNotReachHere();
8056 }
8057
8058 void MacroAssembler::movbool(Address dst, bool boolconst) {
8059 if(sizeof(bool) == 1)
8060 movb(dst, (int) boolconst);
8061 else if(sizeof(bool) == 2)
8062 movw(dst, (int) boolconst);
8063 else if(sizeof(bool) == 4)
8064 movl(dst, (int) boolconst);
8065 else
8066 // unsupported
8067 ShouldNotReachHere();
8068 }
8069
8070 void MacroAssembler::movbool(Address dst, Register src) {
8071 if(sizeof(bool) == 1)
8072 movb(dst, src);
8073 else if(sizeof(bool) == 2)
8074 movw(dst, src);
8075 else if(sizeof(bool) == 4)
8076 movl(dst, src);
8077 else
8078 // unsupported
8079 ShouldNotReachHere();
8080 }
8081
8082 void MacroAssembler::movbyte(ArrayAddress dst, int src) {
8083 movb(as_Address(dst), src);
8084 }
8085
8086 void MacroAssembler::movdl(XMMRegister dst, AddressLiteral src) {
8087 if (reachable(src)) {
8088 movdl(dst, as_Address(src));
8089 } else {
8090 lea(rscratch1, src);
8091 movdl(dst, Address(rscratch1, 0));
8092 }
8093 }
8094
8095 void MacroAssembler::movq(XMMRegister dst, AddressLiteral src) {
8096 if (reachable(src)) {
8097 movq(dst, as_Address(src));
8098 } else {
8099 lea(rscratch1, src);
8100 movq(dst, Address(rscratch1, 0));
8101 }
8102 }
8103
8104 void MacroAssembler::movdbl(XMMRegister dst, AddressLiteral src) {
8105 if (reachable(src)) {
8106 if (UseXmmLoadAndClearUpper) {
8107 movsd (dst, as_Address(src));
8108 } else {
8109 movlpd(dst, as_Address(src));
8110 }
8111 } else {
8112 lea(rscratch1, src);
8113 if (UseXmmLoadAndClearUpper) {
8114 movsd (dst, Address(rscratch1, 0));
8115 } else {
8116 movlpd(dst, Address(rscratch1, 0));
8117 }
8118 }
8119 }
8120
8121 void MacroAssembler::movflt(XMMRegister dst, AddressLiteral src) {
8122 if (reachable(src)) {
8123 movss(dst, as_Address(src));
8124 } else {
8125 lea(rscratch1, src);
8126 movss(dst, Address(rscratch1, 0));
8127 }
8128 }
8129
8130 void MacroAssembler::movptr(Register dst, Register src) {
8131 LP64_ONLY(movq(dst, src)) NOT_LP64(movl(dst, src));
8132 }
8133
8134 void MacroAssembler::movptr(Register dst, Address src) {
8135 LP64_ONLY(movq(dst, src)) NOT_LP64(movl(dst, src));
8136 }
8137
8138 // src should NEVER be a real pointer. Use AddressLiteral for true pointers
8139 void MacroAssembler::movptr(Register dst, intptr_t src) {
8140 LP64_ONLY(mov64(dst, src)) NOT_LP64(movl(dst, src));
8141 }
8142
8143 void MacroAssembler::movptr(Address dst, Register src) {
8144 LP64_ONLY(movq(dst, src)) NOT_LP64(movl(dst, src));
8145 }
8146
8147 void MacroAssembler::movdqu(XMMRegister dst, AddressLiteral src) {
8148 if (reachable(src)) {
8149 Assembler::movdqu(dst, as_Address(src));
8150 } else {
8151 lea(rscratch1, src);
8152 Assembler::movdqu(dst, Address(rscratch1, 0));
8153 }
8154 }
8155
8156 void MacroAssembler::movsd(XMMRegister dst, AddressLiteral src) {
8157 if (reachable(src)) {
8158 Assembler::movsd(dst, as_Address(src));
8159 } else {
8160 lea(rscratch1, src);
8161 Assembler::movsd(dst, Address(rscratch1, 0));
8162 }
8163 }
8164
8165 void MacroAssembler::movss(XMMRegister dst, AddressLiteral src) {
8166 if (reachable(src)) {
8167 Assembler::movss(dst, as_Address(src));
8168 } else {
8169 lea(rscratch1, src);
8170 Assembler::movss(dst, Address(rscratch1, 0));
8171 }
8172 }
8173
8174 void MacroAssembler::mulsd(XMMRegister dst, AddressLiteral src) {
8175 if (reachable(src)) {
8176 Assembler::mulsd(dst, as_Address(src));
8177 } else {
8178 lea(rscratch1, src);
8179 Assembler::mulsd(dst, Address(rscratch1, 0));
8180 }
8181 }
8182
8183 void MacroAssembler::mulss(XMMRegister dst, AddressLiteral src) {
8184 if (reachable(src)) {
8185 Assembler::mulss(dst, as_Address(src));
8186 } else {
8187 lea(rscratch1, src);
8188 Assembler::mulss(dst, Address(rscratch1, 0));
8189 }
8190 }
8191
8192 void MacroAssembler::null_check(Register reg, int offset) {
8193 if (needs_explicit_null_check(offset)) {
8194 // provoke OS NULL exception if reg = NULL by
8195 // accessing M[reg] w/o changing any (non-CC) registers
8196 // NOTE: cmpl is plenty here to provoke a segv
8197 cmpptr(rax, Address(reg, 0));
8198 // Note: should probably use testl(rax, Address(reg, 0));
8199 // may be shorter code (however, this version of
8200 // testl needs to be implemented first)
8201 } else {
8202 // nothing to do, (later) access of M[reg + offset]
8203 // will provoke OS NULL exception if reg = NULL
8204 }
8205 }
8206
8207 void MacroAssembler::os_breakpoint() {
8208 // instead of directly emitting a breakpoint, call os:breakpoint for better debugability
8209 // (e.g., MSVC can't call ps() otherwise)
8210 call(RuntimeAddress(CAST_FROM_FN_PTR(address, os::breakpoint)));
8211 }
8212
8213 void MacroAssembler::pop_CPU_state() {
8214 pop_FPU_state();
8215 pop_IU_state();
8216 }
8217
8218 void MacroAssembler::pop_FPU_state() {
8219 NOT_LP64(frstor(Address(rsp, 0));)
8220 LP64_ONLY(fxrstor(Address(rsp, 0));)
8221 addptr(rsp, FPUStateSizeInWords * wordSize);
8222 }
8223
8224 void MacroAssembler::pop_IU_state() {
8225 popa();
8226 LP64_ONLY(addq(rsp, 8));
8227 popf();
8228 }
8229
8230 // Save Integer and Float state
8231 // Warning: Stack must be 16 byte aligned (64bit)
8232 void MacroAssembler::push_CPU_state() {
8233 push_IU_state();
8234 push_FPU_state();
8235 }
8236
8237 void MacroAssembler::push_FPU_state() {
8238 subptr(rsp, FPUStateSizeInWords * wordSize);
8239 #ifndef _LP64
8240 fnsave(Address(rsp, 0));
8241 fwait();
8242 #else
8243 fxsave(Address(rsp, 0));
8244 #endif // LP64
8245 }
8246
8247 void MacroAssembler::push_IU_state() {
8248 // Push flags first because pusha kills them
8249 pushf();
8250 // Make sure rsp stays 16-byte aligned
8251 LP64_ONLY(subq(rsp, 8));
8252 pusha();
8253 }
8254
8255 void MacroAssembler::reset_last_Java_frame(Register java_thread, bool clear_fp, bool clear_pc) {
8256 // determine java_thread register
8257 if (!java_thread->is_valid()) {
8258 java_thread = rdi;
8259 get_thread(java_thread);
8260 }
8261 // we must set sp to zero to clear frame
8262 movptr(Address(java_thread, JavaThread::last_Java_sp_offset()), NULL_WORD);
8263 if (clear_fp) {
8264 movptr(Address(java_thread, JavaThread::last_Java_fp_offset()), NULL_WORD);
8265 }
8266
8267 if (clear_pc)
8268 movptr(Address(java_thread, JavaThread::last_Java_pc_offset()), NULL_WORD);
8269
8270 }
8271
8272 void MacroAssembler::restore_rax(Register tmp) {
8273 if (tmp == noreg) pop(rax);
8274 else if (tmp != rax) mov(rax, tmp);
8275 }
8276
8277 void MacroAssembler::round_to(Register reg, int modulus) {
8278 addptr(reg, modulus - 1);
8279 andptr(reg, -modulus);
8280 }
8281
8282 void MacroAssembler::save_rax(Register tmp) {
8283 if (tmp == noreg) push(rax);
8284 else if (tmp != rax) mov(tmp, rax);
8285 }
8286
8287 // Write serialization page so VM thread can do a pseudo remote membar.
8288 // We use the current thread pointer to calculate a thread specific
8289 // offset to write to within the page. This minimizes bus traffic
8290 // due to cache line collision.
8291 void MacroAssembler::serialize_memory(Register thread, Register tmp) {
8292 movl(tmp, thread);
8293 shrl(tmp, os::get_serialize_page_shift_count());
8294 andl(tmp, (os::vm_page_size() - sizeof(int)));
8295
8296 Address index(noreg, tmp, Address::times_1);
8297 ExternalAddress page(os::get_memory_serialize_page());
8298
8299 // Size of store must match masking code above
8300 movl(as_Address(ArrayAddress(page, index)), tmp);
8301 }
8302
8303 // Calls to C land
8304 //
8305 // When entering C land, the rbp, & rsp of the last Java frame have to be recorded
8306 // in the (thread-local) JavaThread object. When leaving C land, the last Java fp
8307 // has to be reset to 0. This is required to allow proper stack traversal.
8308 void MacroAssembler::set_last_Java_frame(Register java_thread,
8309 Register last_java_sp,
8310 Register last_java_fp,
8311 address last_java_pc) {
8312 // determine java_thread register
8313 if (!java_thread->is_valid()) {
8314 java_thread = rdi;
8315 get_thread(java_thread);
8316 }
8317 // determine last_java_sp register
8318 if (!last_java_sp->is_valid()) {
8319 last_java_sp = rsp;
8320 }
8321
8322 // last_java_fp is optional
8323
8324 if (last_java_fp->is_valid()) {
8325 movptr(Address(java_thread, JavaThread::last_Java_fp_offset()), last_java_fp);
8326 }
8327
8328 // last_java_pc is optional
8329
8330 if (last_java_pc != NULL) {
8331 lea(Address(java_thread,
8332 JavaThread::frame_anchor_offset() + JavaFrameAnchor::last_Java_pc_offset()),
8333 InternalAddress(last_java_pc));
8334
8335 }
8336 movptr(Address(java_thread, JavaThread::last_Java_sp_offset()), last_java_sp);
8337 }
8338
8339 void MacroAssembler::shlptr(Register dst, int imm8) {
8340 LP64_ONLY(shlq(dst, imm8)) NOT_LP64(shll(dst, imm8));
8341 }
8342
8343 void MacroAssembler::shrptr(Register dst, int imm8) {
8344 LP64_ONLY(shrq(dst, imm8)) NOT_LP64(shrl(dst, imm8));
8345 }
8346
8347 void MacroAssembler::sign_extend_byte(Register reg) {
8348 if (LP64_ONLY(true ||) (VM_Version::is_P6() && reg->has_byte_register())) {
8349 movsbl(reg, reg); // movsxb
8350 } else {
8351 shll(reg, 24);
8352 sarl(reg, 24);
8353 }
8354 }
8355
8356 void MacroAssembler::sign_extend_short(Register reg) {
8357 if (LP64_ONLY(true ||) VM_Version::is_P6()) {
8358 movswl(reg, reg); // movsxw
8359 } else {
8360 shll(reg, 16);
8361 sarl(reg, 16);
8362 }
8363 }
8364
8365 void MacroAssembler::testl(Register dst, AddressLiteral src) {
8366 assert(reachable(src), "Address should be reachable");
8367 testl(dst, as_Address(src));
8368 }
8369
8370 void MacroAssembler::sqrtsd(XMMRegister dst, AddressLiteral src) {
8371 if (reachable(src)) {
8372 Assembler::sqrtsd(dst, as_Address(src));
8373 } else {
8374 lea(rscratch1, src);
8375 Assembler::sqrtsd(dst, Address(rscratch1, 0));
8376 }
8377 }
8378
8379 void MacroAssembler::sqrtss(XMMRegister dst, AddressLiteral src) {
8380 if (reachable(src)) {
8381 Assembler::sqrtss(dst, as_Address(src));
8382 } else {
8383 lea(rscratch1, src);
8384 Assembler::sqrtss(dst, Address(rscratch1, 0));
8385 }
8386 }
8387
8388 void MacroAssembler::subsd(XMMRegister dst, AddressLiteral src) {
8389 if (reachable(src)) {
8390 Assembler::subsd(dst, as_Address(src));
8391 } else {
8392 lea(rscratch1, src);
8393 Assembler::subsd(dst, Address(rscratch1, 0));
8394 }
8395 }
8396
8397 void MacroAssembler::subss(XMMRegister dst, AddressLiteral src) {
8398 if (reachable(src)) {
8399 Assembler::subss(dst, as_Address(src));
8400 } else {
8401 lea(rscratch1, src);
8402 Assembler::subss(dst, Address(rscratch1, 0));
8403 }
8404 }
8405
8406 void MacroAssembler::ucomisd(XMMRegister dst, AddressLiteral src) {
8407 if (reachable(src)) {
8408 Assembler::ucomisd(dst, as_Address(src));
8409 } else {
8410 lea(rscratch1, src);
8411 Assembler::ucomisd(dst, Address(rscratch1, 0));
8412 }
8413 }
8414
8415 void MacroAssembler::ucomiss(XMMRegister dst, AddressLiteral src) {
8416 if (reachable(src)) {
8417 Assembler::ucomiss(dst, as_Address(src));
8418 } else {
8419 lea(rscratch1, src);
8420 Assembler::ucomiss(dst, Address(rscratch1, 0));
8421 }
8422 }
8423
8424 void MacroAssembler::xorpd(XMMRegister dst, AddressLiteral src) {
8425 // Used in sign-bit flipping with aligned address.
8426 assert((UseAVX > 0) || (((intptr_t)src.target() & 15) == 0), "SSE mode requires address alignment 16 bytes");
8427 if (reachable(src)) {
8428 Assembler::xorpd(dst, as_Address(src));
8429 } else {
8430 lea(rscratch1, src);
8431 Assembler::xorpd(dst, Address(rscratch1, 0));
8432 }
8433 }
8434
8435 void MacroAssembler::xorps(XMMRegister dst, AddressLiteral src) {
8436 // Used in sign-bit flipping with aligned address.
8437 assert((UseAVX > 0) || (((intptr_t)src.target() & 15) == 0), "SSE mode requires address alignment 16 bytes");
8438 if (reachable(src)) {
8439 Assembler::xorps(dst, as_Address(src));
8440 } else {
8441 lea(rscratch1, src);
8442 Assembler::xorps(dst, Address(rscratch1, 0));
8443 }
8444 }
8445
8446 void MacroAssembler::pshufb(XMMRegister dst, AddressLiteral src) {
8447 // Used in sign-bit flipping with aligned address.
8448 assert((UseAVX > 0) || (((intptr_t)src.target() & 15) == 0), "SSE mode requires address alignment 16 bytes");
8449 if (reachable(src)) {
8450 Assembler::pshufb(dst, as_Address(src));
8451 } else {
8452 lea(rscratch1, src);
8453 Assembler::pshufb(dst, Address(rscratch1, 0));
8454 }
8455 }
8456
8457 // AVX 3-operands instructions
8458
8459 void MacroAssembler::vaddsd(XMMRegister dst, XMMRegister nds, AddressLiteral src) {
8460 if (reachable(src)) {
8461 vaddsd(dst, nds, as_Address(src));
8462 } else {
8463 lea(rscratch1, src);
8464 vaddsd(dst, nds, Address(rscratch1, 0));
8465 }
8466 }
8467
8468 void MacroAssembler::vaddss(XMMRegister dst, XMMRegister nds, AddressLiteral src) {
8469 if (reachable(src)) {
8470 vaddss(dst, nds, as_Address(src));
8471 } else {
8472 lea(rscratch1, src);
8473 vaddss(dst, nds, Address(rscratch1, 0));
8474 }
8475 }
8476
8477 void MacroAssembler::vandpd(XMMRegister dst, XMMRegister nds, AddressLiteral src, bool vector256) {
8478 if (reachable(src)) {
8479 vandpd(dst, nds, as_Address(src), vector256);
8480 } else {
8481 lea(rscratch1, src);
8482 vandpd(dst, nds, Address(rscratch1, 0), vector256);
8483 }
8484 }
8485
8486 void MacroAssembler::vandps(XMMRegister dst, XMMRegister nds, AddressLiteral src, bool vector256) {
8487 if (reachable(src)) {
8488 vandps(dst, nds, as_Address(src), vector256);
8489 } else {
8490 lea(rscratch1, src);
8491 vandps(dst, nds, Address(rscratch1, 0), vector256);
8492 }
8493 }
8494
8495 void MacroAssembler::vdivsd(XMMRegister dst, XMMRegister nds, AddressLiteral src) {
8496 if (reachable(src)) {
8497 vdivsd(dst, nds, as_Address(src));
8498 } else {
8499 lea(rscratch1, src);
8500 vdivsd(dst, nds, Address(rscratch1, 0));
8501 }
8502 }
8503
8504 void MacroAssembler::vdivss(XMMRegister dst, XMMRegister nds, AddressLiteral src) {
8505 if (reachable(src)) {
8506 vdivss(dst, nds, as_Address(src));
8507 } else {
8508 lea(rscratch1, src);
8509 vdivss(dst, nds, Address(rscratch1, 0));
8510 }
8511 }
8512
8513 void MacroAssembler::vmulsd(XMMRegister dst, XMMRegister nds, AddressLiteral src) {
8514 if (reachable(src)) {
8515 vmulsd(dst, nds, as_Address(src));
8516 } else {
8517 lea(rscratch1, src);
8518 vmulsd(dst, nds, Address(rscratch1, 0));
8519 }
8520 }
8521
8522 void MacroAssembler::vmulss(XMMRegister dst, XMMRegister nds, AddressLiteral src) {
8523 if (reachable(src)) {
8524 vmulss(dst, nds, as_Address(src));
8525 } else {
8526 lea(rscratch1, src);
8527 vmulss(dst, nds, Address(rscratch1, 0));
8528 }
8529 }
8530
8531 void MacroAssembler::vsubsd(XMMRegister dst, XMMRegister nds, AddressLiteral src) {
8532 if (reachable(src)) {
8533 vsubsd(dst, nds, as_Address(src));
8534 } else {
8535 lea(rscratch1, src);
8536 vsubsd(dst, nds, Address(rscratch1, 0));
8537 }
8538 }
8539
8540 void MacroAssembler::vsubss(XMMRegister dst, XMMRegister nds, AddressLiteral src) {
8541 if (reachable(src)) {
8542 vsubss(dst, nds, as_Address(src));
8543 } else {
8544 lea(rscratch1, src);
8545 vsubss(dst, nds, Address(rscratch1, 0));
8546 }
8547 }
8548
8549 void MacroAssembler::vxorpd(XMMRegister dst, XMMRegister nds, AddressLiteral src, bool vector256) {
8550 if (reachable(src)) {
8551 vxorpd(dst, nds, as_Address(src), vector256);
8552 } else {
8553 lea(rscratch1, src);
8554 vxorpd(dst, nds, Address(rscratch1, 0), vector256);
8555 }
8556 }
8557
8558 void MacroAssembler::vxorps(XMMRegister dst, XMMRegister nds, AddressLiteral src, bool vector256) {
8559 if (reachable(src)) {
8560 vxorps(dst, nds, as_Address(src), vector256);
8561 } else {
8562 lea(rscratch1, src);
8563 vxorps(dst, nds, Address(rscratch1, 0), vector256);
8564 }
8565 }
8566
8567
8568 //////////////////////////////////////////////////////////////////////////////////
8569 #ifndef SERIALGC
8570
8571 void MacroAssembler::g1_write_barrier_pre(Register obj,
8572 Register pre_val,
8573 Register thread,
8574 Register tmp,
8575 bool tosca_live,
8576 bool expand_call) {
8577
8578 // If expand_call is true then we expand the call_VM_leaf macro
8579 // directly to skip generating the check by
8580 // InterpreterMacroAssembler::call_VM_leaf_base that checks _last_sp.
8581
8582 #ifdef _LP64
8583 assert(thread == r15_thread, "must be");
8584 #endif // _LP64
8585
8586 Label done;
8587 Label runtime;
8588
8589 assert(pre_val != noreg, "check this code");
8590
8591 if (obj != noreg) {
8592 assert_different_registers(obj, pre_val, tmp);
8593 assert(pre_val != rax, "check this code");
8594 }
8595
8596 Address in_progress(thread, in_bytes(JavaThread::satb_mark_queue_offset() +
8597 PtrQueue::byte_offset_of_active()));
8598 Address index(thread, in_bytes(JavaThread::satb_mark_queue_offset() +
8599 PtrQueue::byte_offset_of_index()));
8600 Address buffer(thread, in_bytes(JavaThread::satb_mark_queue_offset() +
8601 PtrQueue::byte_offset_of_buf()));
8602
8603
8604 // Is marking active?
8605 if (in_bytes(PtrQueue::byte_width_of_active()) == 4) {
8606 cmpl(in_progress, 0);
8607 } else {
8608 assert(in_bytes(PtrQueue::byte_width_of_active()) == 1, "Assumption");
8609 cmpb(in_progress, 0);
8610 }
8611 jcc(Assembler::equal, done);
8612
8613 // Do we need to load the previous value?
8614 if (obj != noreg) {
8615 load_heap_oop(pre_val, Address(obj, 0));
8616 }
8617
8618 // Is the previous value null?
8619 cmpptr(pre_val, (int32_t) NULL_WORD);
8620 jcc(Assembler::equal, done);
8621
8622 // Can we store original value in the thread's buffer?
8623 // Is index == 0?
8624 // (The index field is typed as size_t.)
8625
8626 movptr(tmp, index); // tmp := *index_adr
8627 cmpptr(tmp, 0); // tmp == 0?
8628 jcc(Assembler::equal, runtime); // If yes, goto runtime
8629
8630 subptr(tmp, wordSize); // tmp := tmp - wordSize
8631 movptr(index, tmp); // *index_adr := tmp
8632 addptr(tmp, buffer); // tmp := tmp + *buffer_adr
8633
8634 // Record the previous value
8635 movptr(Address(tmp, 0), pre_val);
8636 jmp(done);
8637
8638 bind(runtime);
8639 // save the live input values
8640 if(tosca_live) push(rax);
8641
8642 if (obj != noreg && obj != rax)
8643 push(obj);
8644
8645 if (pre_val != rax)
8646 push(pre_val);
8647
8648 // Calling the runtime using the regular call_VM_leaf mechanism generates
8649 // code (generated by InterpreterMacroAssember::call_VM_leaf_base)
8650 // that checks that the *(ebp+frame::interpreter_frame_last_sp) == NULL.
8651 //
8652 // If we care generating the pre-barrier without a frame (e.g. in the
8653 // intrinsified Reference.get() routine) then ebp might be pointing to
8654 // the caller frame and so this check will most likely fail at runtime.
8655 //
8656 // Expanding the call directly bypasses the generation of the check.
8657 // So when we do not have have a full interpreter frame on the stack
8658 // expand_call should be passed true.
8659
8660 NOT_LP64( push(thread); )
8661
8662 if (expand_call) {
8663 LP64_ONLY( assert(pre_val != c_rarg1, "smashed arg"); )
8664 pass_arg1(this, thread);
8665 pass_arg0(this, pre_val);
8666 MacroAssembler::call_VM_leaf_base(CAST_FROM_FN_PTR(address, SharedRuntime::g1_wb_pre), 2);
8667 } else {
8668 call_VM_leaf(CAST_FROM_FN_PTR(address, SharedRuntime::g1_wb_pre), pre_val, thread);
8669 }
8670
8671 NOT_LP64( pop(thread); )
8672
8673 // save the live input values
8674 if (pre_val != rax)
8675 pop(pre_val);
8676
8677 if (obj != noreg && obj != rax)
8678 pop(obj);
8679
8680 if(tosca_live) pop(rax);
8681
8682 bind(done);
8683 }
8684
8685 void MacroAssembler::g1_write_barrier_post(Register store_addr,
8686 Register new_val,
8687 Register thread,
8688 Register tmp,
8689 Register tmp2) {
8690 #ifdef _LP64
8691 assert(thread == r15_thread, "must be");
8692 #endif // _LP64
8693
8694 Address queue_index(thread, in_bytes(JavaThread::dirty_card_queue_offset() +
8695 PtrQueue::byte_offset_of_index()));
8696 Address buffer(thread, in_bytes(JavaThread::dirty_card_queue_offset() +
8697 PtrQueue::byte_offset_of_buf()));
8698
8699 BarrierSet* bs = Universe::heap()->barrier_set();
8700 CardTableModRefBS* ct = (CardTableModRefBS*)bs;
8701 Label done;
8702 Label runtime;
8703
8704 // Does store cross heap regions?
8705
8706 movptr(tmp, store_addr);
8707 xorptr(tmp, new_val);
8708 shrptr(tmp, HeapRegion::LogOfHRGrainBytes);
8709 jcc(Assembler::equal, done);
8710
8711 // crosses regions, storing NULL?
8712
8713 cmpptr(new_val, (int32_t) NULL_WORD);
8714 jcc(Assembler::equal, done);
8715
8716 // storing region crossing non-NULL, is card already dirty?
8717
8718 ExternalAddress cardtable((address) ct->byte_map_base);
8719 assert(sizeof(*ct->byte_map_base) == sizeof(jbyte), "adjust this code");
8720 #ifdef _LP64
8721 const Register card_addr = tmp;
8722
8723 movq(card_addr, store_addr);
8724 shrq(card_addr, CardTableModRefBS::card_shift);
8725
8726 lea(tmp2, cardtable);
8727
8728 // get the address of the card
8729 addq(card_addr, tmp2);
8730 #else
8731 const Register card_index = tmp;
8732
8733 movl(card_index, store_addr);
8734 shrl(card_index, CardTableModRefBS::card_shift);
8735
8736 Address index(noreg, card_index, Address::times_1);
8737 const Register card_addr = tmp;
8738 lea(card_addr, as_Address(ArrayAddress(cardtable, index)));
8739 #endif
8740 cmpb(Address(card_addr, 0), 0);
8741 jcc(Assembler::equal, done);
8742
8743 // storing a region crossing, non-NULL oop, card is clean.
8744 // dirty card and log.
8745
8746 movb(Address(card_addr, 0), 0);
8747
8748 cmpl(queue_index, 0);
8749 jcc(Assembler::equal, runtime);
8750 subl(queue_index, wordSize);
8751 movptr(tmp2, buffer);
8752 #ifdef _LP64
8753 movslq(rscratch1, queue_index);
8754 addq(tmp2, rscratch1);
8755 movq(Address(tmp2, 0), card_addr);
8756 #else
8757 addl(tmp2, queue_index);
8758 movl(Address(tmp2, 0), card_index);
8759 #endif
8760 jmp(done);
8761
8762 bind(runtime);
8763 // save the live input values
8764 push(store_addr);
8765 push(new_val);
8766 #ifdef _LP64
8767 call_VM_leaf(CAST_FROM_FN_PTR(address, SharedRuntime::g1_wb_post), card_addr, r15_thread);
8768 #else
8769 push(thread);
8770 call_VM_leaf(CAST_FROM_FN_PTR(address, SharedRuntime::g1_wb_post), card_addr, thread);
8771 pop(thread);
8772 #endif
8773 pop(new_val);
8774 pop(store_addr);
8775
8776 bind(done);
8777 }
8778
8779 #endif // SERIALGC
8780 //////////////////////////////////////////////////////////////////////////////////
8781
8782
8783 void MacroAssembler::store_check(Register obj) {
8784 // Does a store check for the oop in register obj. The content of
8785 // register obj is destroyed afterwards.
8786 store_check_part_1(obj);
8787 store_check_part_2(obj);
8788 }
8789
8790 void MacroAssembler::store_check(Register obj, Address dst) {
8791 store_check(obj);
8792 }
8793
8794
8795 // split the store check operation so that other instructions can be scheduled inbetween
8796 void MacroAssembler::store_check_part_1(Register obj) {
8797 BarrierSet* bs = Universe::heap()->barrier_set();
8798 assert(bs->kind() == BarrierSet::CardTableModRef, "Wrong barrier set kind");
8799 shrptr(obj, CardTableModRefBS::card_shift);
8800 }
8801
8802 void MacroAssembler::store_check_part_2(Register obj) {
8803 BarrierSet* bs = Universe::heap()->barrier_set();
8804 assert(bs->kind() == BarrierSet::CardTableModRef, "Wrong barrier set kind");
8805 CardTableModRefBS* ct = (CardTableModRefBS*)bs;
8806 assert(sizeof(*ct->byte_map_base) == sizeof(jbyte), "adjust this code");
8807
8808 // The calculation for byte_map_base is as follows:
8809 // byte_map_base = _byte_map - (uintptr_t(low_bound) >> card_shift);
8810 // So this essentially converts an address to a displacement and
8811 // it will never need to be relocated. On 64bit however the value may be too
8812 // large for a 32bit displacement
8813
8814 intptr_t disp = (intptr_t) ct->byte_map_base;
8815 if (is_simm32(disp)) {
8816 Address cardtable(noreg, obj, Address::times_1, disp);
8817 movb(cardtable, 0);
8818 } else {
8819 // By doing it as an ExternalAddress disp could be converted to a rip-relative
8820 // displacement and done in a single instruction given favorable mapping and
8821 // a smarter version of as_Address. Worst case it is two instructions which
8822 // is no worse off then loading disp into a register and doing as a simple
8823 // Address() as above.
8824 // We can't do as ExternalAddress as the only style since if disp == 0 we'll
8825 // assert since NULL isn't acceptable in a reloci (see 6644928). In any case
8826 // in some cases we'll get a single instruction version.
8827
8828 ExternalAddress cardtable((address)disp);
8829 Address index(noreg, obj, Address::times_1);
8830 movb(as_Address(ArrayAddress(cardtable, index)), 0);
8831 }
8832 }
8833
8834 void MacroAssembler::subptr(Register dst, int32_t imm32) {
8835 LP64_ONLY(subq(dst, imm32)) NOT_LP64(subl(dst, imm32));
8836 }
8837
8838 // Force generation of a 4 byte immediate value even if it fits into 8bit
8839 void MacroAssembler::subptr_imm32(Register dst, int32_t imm32) {
8840 LP64_ONLY(subq_imm32(dst, imm32)) NOT_LP64(subl_imm32(dst, imm32));
8841 }
8842
8843 void MacroAssembler::subptr(Register dst, Register src) {
8844 LP64_ONLY(subq(dst, src)) NOT_LP64(subl(dst, src));
8845 }
8846
8847 // C++ bool manipulation
8848 void MacroAssembler::testbool(Register dst) {
8849 if(sizeof(bool) == 1)
8850 testb(dst, 0xff);
8851 else if(sizeof(bool) == 2) {
8852 // testw implementation needed for two byte bools
8853 ShouldNotReachHere();
8854 } else if(sizeof(bool) == 4)
8855 testl(dst, dst);
8856 else
8857 // unsupported
8858 ShouldNotReachHere();
8859 }
8860
8861 void MacroAssembler::testptr(Register dst, Register src) {
8862 LP64_ONLY(testq(dst, src)) NOT_LP64(testl(dst, src));
8863 }
8864
8865 // Defines obj, preserves var_size_in_bytes, okay for t2 == var_size_in_bytes.
8866 void MacroAssembler::tlab_allocate(Register obj,
8867 Register var_size_in_bytes,
8868 int con_size_in_bytes,
8869 Register t1,
8870 Register t2,
8871 Label& slow_case) {
8872 assert_different_registers(obj, t1, t2);
8873 assert_different_registers(obj, var_size_in_bytes, t1);
8874 Register end = t2;
8875 Register thread = NOT_LP64(t1) LP64_ONLY(r15_thread);
8876
8877 verify_tlab();
8878
8879 NOT_LP64(get_thread(thread));
8880
8881 movptr(obj, Address(thread, JavaThread::tlab_top_offset()));
8882 if (var_size_in_bytes == noreg) {
8883 lea(end, Address(obj, con_size_in_bytes));
8884 } else {
8885 lea(end, Address(obj, var_size_in_bytes, Address::times_1));
8886 }
8887 cmpptr(end, Address(thread, JavaThread::tlab_end_offset()));
8888 jcc(Assembler::above, slow_case);
8889
8890 // update the tlab top pointer
8891 movptr(Address(thread, JavaThread::tlab_top_offset()), end);
8892
8893 // recover var_size_in_bytes if necessary
8894 if (var_size_in_bytes == end) {
8895 subptr(var_size_in_bytes, obj);
8896 }
8897 verify_tlab();
8898 }
8899
8900 // Preserves rbx, and rdx.
8901 Register MacroAssembler::tlab_refill(Label& retry,
8902 Label& try_eden,
8903 Label& slow_case) {
8904 Register top = rax;
8905 Register t1 = rcx;
8906 Register t2 = rsi;
8907 Register thread_reg = NOT_LP64(rdi) LP64_ONLY(r15_thread);
8908 assert_different_registers(top, thread_reg, t1, t2, /* preserve: */ rbx, rdx);
8909 Label do_refill, discard_tlab;
8910
8911 if (CMSIncrementalMode || !Universe::heap()->supports_inline_contig_alloc()) {
8912 // No allocation in the shared eden.
8913 jmp(slow_case);
8914 }
8915
8916 NOT_LP64(get_thread(thread_reg));
8917
8918 movptr(top, Address(thread_reg, in_bytes(JavaThread::tlab_top_offset())));
8919 movptr(t1, Address(thread_reg, in_bytes(JavaThread::tlab_end_offset())));
8920
8921 // calculate amount of free space
8922 subptr(t1, top);
8923 shrptr(t1, LogHeapWordSize);
8924
8925 // Retain tlab and allocate object in shared space if
8926 // the amount free in the tlab is too large to discard.
8927 cmpptr(t1, Address(thread_reg, in_bytes(JavaThread::tlab_refill_waste_limit_offset())));
8928 jcc(Assembler::lessEqual, discard_tlab);
8929
8930 // Retain
8931 // %%% yuck as movptr...
8932 movptr(t2, (int32_t) ThreadLocalAllocBuffer::refill_waste_limit_increment());
8933 addptr(Address(thread_reg, in_bytes(JavaThread::tlab_refill_waste_limit_offset())), t2);
8934 if (TLABStats) {
8935 // increment number of slow_allocations
8936 addl(Address(thread_reg, in_bytes(JavaThread::tlab_slow_allocations_offset())), 1);
8937 }
8938 jmp(try_eden);
8939
8940 bind(discard_tlab);
8941 if (TLABStats) {
8942 // increment number of refills
8943 addl(Address(thread_reg, in_bytes(JavaThread::tlab_number_of_refills_offset())), 1);
8944 // accumulate wastage -- t1 is amount free in tlab
8945 addl(Address(thread_reg, in_bytes(JavaThread::tlab_fast_refill_waste_offset())), t1);
8946 }
8947
8948 // if tlab is currently allocated (top or end != null) then
8949 // fill [top, end + alignment_reserve) with array object
8950 testptr(top, top);
8951 jcc(Assembler::zero, do_refill);
8952
8953 // set up the mark word
8954 movptr(Address(top, oopDesc::mark_offset_in_bytes()), (intptr_t)markOopDesc::prototype()->copy_set_hash(0x2));
8955 // set the length to the remaining space
8956 subptr(t1, typeArrayOopDesc::header_size(T_INT));
8957 addptr(t1, (int32_t)ThreadLocalAllocBuffer::alignment_reserve());
8958 shlptr(t1, log2_intptr(HeapWordSize/sizeof(jint)));
8959 movl(Address(top, arrayOopDesc::length_offset_in_bytes()), t1);
8960 // set klass to intArrayKlass
8961 // dubious reloc why not an oop reloc?
8962 movptr(t1, ExternalAddress((address)Universe::intArrayKlassObj_addr()));
8963 // store klass last. concurrent gcs assumes klass length is valid if
8964 // klass field is not null.
8965 store_klass(top, t1);
8966
8967 movptr(t1, top);
8968 subptr(t1, Address(thread_reg, in_bytes(JavaThread::tlab_start_offset())));
8969 incr_allocated_bytes(thread_reg, t1, 0);
8970
8971 // refill the tlab with an eden allocation
8972 bind(do_refill);
8973 movptr(t1, Address(thread_reg, in_bytes(JavaThread::tlab_size_offset())));
8974 shlptr(t1, LogHeapWordSize);
8975 // allocate new tlab, address returned in top
8976 eden_allocate(top, t1, 0, t2, slow_case);
8977
8978 // Check that t1 was preserved in eden_allocate.
8979 #ifdef ASSERT
8980 if (UseTLAB) {
8981 Label ok;
8982 Register tsize = rsi;
8983 assert_different_registers(tsize, thread_reg, t1);
8984 push(tsize);
8985 movptr(tsize, Address(thread_reg, in_bytes(JavaThread::tlab_size_offset())));
8986 shlptr(tsize, LogHeapWordSize);
8987 cmpptr(t1, tsize);
8988 jcc(Assembler::equal, ok);
8989 STOP("assert(t1 != tlab size)");
8990 should_not_reach_here();
8991
8992 bind(ok);
8993 pop(tsize);
8994 }
8995 #endif
8996 movptr(Address(thread_reg, in_bytes(JavaThread::tlab_start_offset())), top);
8997 movptr(Address(thread_reg, in_bytes(JavaThread::tlab_top_offset())), top);
8998 addptr(top, t1);
8999 subptr(top, (int32_t)ThreadLocalAllocBuffer::alignment_reserve_in_bytes());
9000 movptr(Address(thread_reg, in_bytes(JavaThread::tlab_end_offset())), top);
9001 verify_tlab();
9002 jmp(retry);
9003
9004 return thread_reg; // for use by caller
9005 }
9006
9007 void MacroAssembler::incr_allocated_bytes(Register thread,
9008 Register var_size_in_bytes,
9009 int con_size_in_bytes,
9010 Register t1) {
9011 if (!thread->is_valid()) {
9012 #ifdef _LP64
9013 thread = r15_thread;
9014 #else
9015 assert(t1->is_valid(), "need temp reg");
9016 thread = t1;
9017 get_thread(thread);
9018 #endif
9019 }
9020
9021 #ifdef _LP64
9022 if (var_size_in_bytes->is_valid()) {
9023 addq(Address(thread, in_bytes(JavaThread::allocated_bytes_offset())), var_size_in_bytes);
9024 } else {
9025 addq(Address(thread, in_bytes(JavaThread::allocated_bytes_offset())), con_size_in_bytes);
9026 }
9027 #else
9028 if (var_size_in_bytes->is_valid()) {
9029 addl(Address(thread, in_bytes(JavaThread::allocated_bytes_offset())), var_size_in_bytes);
9030 } else {
9031 addl(Address(thread, in_bytes(JavaThread::allocated_bytes_offset())), con_size_in_bytes);
9032 }
9033 adcl(Address(thread, in_bytes(JavaThread::allocated_bytes_offset())+4), 0);
9034 #endif
9035 }
9036
9037 void MacroAssembler::fp_runtime_fallback(address runtime_entry, int nb_args, int num_fpu_regs_in_use) {
9038 pusha();
9039
9040 // if we are coming from c1, xmm registers may be live
9041 int off = 0;
9042 if (UseSSE == 1) {
9043 subptr(rsp, sizeof(jdouble)*8);
9044 movflt(Address(rsp,off++*sizeof(jdouble)),xmm0);
9045 movflt(Address(rsp,off++*sizeof(jdouble)),xmm1);
9046 movflt(Address(rsp,off++*sizeof(jdouble)),xmm2);
9047 movflt(Address(rsp,off++*sizeof(jdouble)),xmm3);
9048 movflt(Address(rsp,off++*sizeof(jdouble)),xmm4);
9049 movflt(Address(rsp,off++*sizeof(jdouble)),xmm5);
9050 movflt(Address(rsp,off++*sizeof(jdouble)),xmm6);
9051 movflt(Address(rsp,off++*sizeof(jdouble)),xmm7);
9052 } else if (UseSSE >= 2) {
9053 #ifdef COMPILER2
9054 if (MaxVectorSize > 16) {
9055 assert(UseAVX > 0, "256bit vectors are supported only with AVX");
9056 // Save upper half of YMM registes
9057 subptr(rsp, 16 * LP64_ONLY(16) NOT_LP64(8));
9058 vextractf128h(Address(rsp, 0),xmm0);
9059 vextractf128h(Address(rsp, 16),xmm1);
9060 vextractf128h(Address(rsp, 32),xmm2);
9061 vextractf128h(Address(rsp, 48),xmm3);
9062 vextractf128h(Address(rsp, 64),xmm4);
9063 vextractf128h(Address(rsp, 80),xmm5);
9064 vextractf128h(Address(rsp, 96),xmm6);
9065 vextractf128h(Address(rsp,112),xmm7);
9066 #ifdef _LP64
9067 vextractf128h(Address(rsp,128),xmm8);
9068 vextractf128h(Address(rsp,144),xmm9);
9069 vextractf128h(Address(rsp,160),xmm10);
9070 vextractf128h(Address(rsp,176),xmm11);
9071 vextractf128h(Address(rsp,192),xmm12);
9072 vextractf128h(Address(rsp,208),xmm13);
9073 vextractf128h(Address(rsp,224),xmm14);
9074 vextractf128h(Address(rsp,240),xmm15);
9075 #endif
9076 }
9077 #endif
9078 // Save whole 128bit (16 bytes) XMM regiters
9079 subptr(rsp, 16 * LP64_ONLY(16) NOT_LP64(8));
9080 movdqu(Address(rsp,off++*16),xmm0);
9081 movdqu(Address(rsp,off++*16),xmm1);
9082 movdqu(Address(rsp,off++*16),xmm2);
9083 movdqu(Address(rsp,off++*16),xmm3);
9084 movdqu(Address(rsp,off++*16),xmm4);
9085 movdqu(Address(rsp,off++*16),xmm5);
9086 movdqu(Address(rsp,off++*16),xmm6);
9087 movdqu(Address(rsp,off++*16),xmm7);
9088 #ifdef _LP64
9089 movdqu(Address(rsp,off++*16),xmm8);
9090 movdqu(Address(rsp,off++*16),xmm9);
9091 movdqu(Address(rsp,off++*16),xmm10);
9092 movdqu(Address(rsp,off++*16),xmm11);
9093 movdqu(Address(rsp,off++*16),xmm12);
9094 movdqu(Address(rsp,off++*16),xmm13);
9095 movdqu(Address(rsp,off++*16),xmm14);
9096 movdqu(Address(rsp,off++*16),xmm15);
9097 #endif
9098 }
9099
9100 // Preserve registers across runtime call
9101 int incoming_argument_and_return_value_offset = -1;
9102 if (num_fpu_regs_in_use > 1) {
9103 // Must preserve all other FPU regs (could alternatively convert
9104 // SharedRuntime::dsin, dcos etc. into assembly routines known not to trash
9105 // FPU state, but can not trust C compiler)
9106 NEEDS_CLEANUP;
9107 // NOTE that in this case we also push the incoming argument(s) to
9108 // the stack and restore it later; we also use this stack slot to
9109 // hold the return value from dsin, dcos etc.
9110 for (int i = 0; i < num_fpu_regs_in_use; i++) {
9111 subptr(rsp, sizeof(jdouble));
9112 fstp_d(Address(rsp, 0));
9113 }
9114 incoming_argument_and_return_value_offset = sizeof(jdouble)*(num_fpu_regs_in_use-1);
9115 for (int i = nb_args-1; i >= 0; i--) {
9116 fld_d(Address(rsp, incoming_argument_and_return_value_offset-i*sizeof(jdouble)));
9117 }
9118 }
9119
9120 subptr(rsp, nb_args*sizeof(jdouble));
9121 for (int i = 0; i < nb_args; i++) {
9122 fstp_d(Address(rsp, i*sizeof(jdouble)));
9123 }
9124
9125 #ifdef _LP64
9126 if (nb_args > 0) {
9127 movdbl(xmm0, Address(rsp, 0));
9128 }
9129 if (nb_args > 1) {
9130 movdbl(xmm1, Address(rsp, sizeof(jdouble)));
9131 }
9132 assert(nb_args <= 2, "unsupported number of args");
9133 #endif // _LP64
9134
9135 // NOTE: we must not use call_VM_leaf here because that requires a
9136 // complete interpreter frame in debug mode -- same bug as 4387334
9137 // MacroAssembler::call_VM_leaf_base is perfectly safe and will
9138 // do proper 64bit abi
9139
9140 NEEDS_CLEANUP;
9141 // Need to add stack banging before this runtime call if it needs to
9142 // be taken; however, there is no generic stack banging routine at
9143 // the MacroAssembler level
9144
9145 MacroAssembler::call_VM_leaf_base(runtime_entry, 0);
9146
9147 #ifdef _LP64
9148 movsd(Address(rsp, 0), xmm0);
9149 fld_d(Address(rsp, 0));
9150 #endif // _LP64
9151 addptr(rsp, sizeof(jdouble) * nb_args);
9152 if (num_fpu_regs_in_use > 1) {
9153 // Must save return value to stack and then restore entire FPU
9154 // stack except incoming arguments
9155 fstp_d(Address(rsp, incoming_argument_and_return_value_offset));
9156 for (int i = 0; i < num_fpu_regs_in_use - nb_args; i++) {
9157 fld_d(Address(rsp, 0));
9158 addptr(rsp, sizeof(jdouble));
9159 }
9160 fld_d(Address(rsp, (nb_args-1)*sizeof(jdouble)));
9161 addptr(rsp, sizeof(jdouble) * nb_args);
9162 }
9163
9164 off = 0;
9165 if (UseSSE == 1) {
9166 movflt(xmm0, Address(rsp,off++*sizeof(jdouble)));
9167 movflt(xmm1, Address(rsp,off++*sizeof(jdouble)));
9168 movflt(xmm2, Address(rsp,off++*sizeof(jdouble)));
9169 movflt(xmm3, Address(rsp,off++*sizeof(jdouble)));
9170 movflt(xmm4, Address(rsp,off++*sizeof(jdouble)));
9171 movflt(xmm5, Address(rsp,off++*sizeof(jdouble)));
9172 movflt(xmm6, Address(rsp,off++*sizeof(jdouble)));
9173 movflt(xmm7, Address(rsp,off++*sizeof(jdouble)));
9174 addptr(rsp, sizeof(jdouble)*8);
9175 } else if (UseSSE >= 2) {
9176 // Restore whole 128bit (16 bytes) XMM regiters
9177 movdqu(xmm0, Address(rsp,off++*16));
9178 movdqu(xmm1, Address(rsp,off++*16));
9179 movdqu(xmm2, Address(rsp,off++*16));
9180 movdqu(xmm3, Address(rsp,off++*16));
9181 movdqu(xmm4, Address(rsp,off++*16));
9182 movdqu(xmm5, Address(rsp,off++*16));
9183 movdqu(xmm6, Address(rsp,off++*16));
9184 movdqu(xmm7, Address(rsp,off++*16));
9185 #ifdef _LP64
9186 movdqu(xmm8, Address(rsp,off++*16));
9187 movdqu(xmm9, Address(rsp,off++*16));
9188 movdqu(xmm10, Address(rsp,off++*16));
9189 movdqu(xmm11, Address(rsp,off++*16));
9190 movdqu(xmm12, Address(rsp,off++*16));
9191 movdqu(xmm13, Address(rsp,off++*16));
9192 movdqu(xmm14, Address(rsp,off++*16));
9193 movdqu(xmm15, Address(rsp,off++*16));
9194 #endif
9195 addptr(rsp, 16 * LP64_ONLY(16) NOT_LP64(8));
9196 #ifdef COMPILER2
9197 if (MaxVectorSize > 16) {
9198 // Restore upper half of YMM registes.
9199 vinsertf128h(xmm0, Address(rsp, 0));
9200 vinsertf128h(xmm1, Address(rsp, 16));
9201 vinsertf128h(xmm2, Address(rsp, 32));
9202 vinsertf128h(xmm3, Address(rsp, 48));
9203 vinsertf128h(xmm4, Address(rsp, 64));
9204 vinsertf128h(xmm5, Address(rsp, 80));
9205 vinsertf128h(xmm6, Address(rsp, 96));
9206 vinsertf128h(xmm7, Address(rsp,112));
9207 #ifdef _LP64
9208 vinsertf128h(xmm8, Address(rsp,128));
9209 vinsertf128h(xmm9, Address(rsp,144));
9210 vinsertf128h(xmm10, Address(rsp,160));
9211 vinsertf128h(xmm11, Address(rsp,176));
9212 vinsertf128h(xmm12, Address(rsp,192));
9213 vinsertf128h(xmm13, Address(rsp,208));
9214 vinsertf128h(xmm14, Address(rsp,224));
9215 vinsertf128h(xmm15, Address(rsp,240));
9216 #endif
9217 addptr(rsp, 16 * LP64_ONLY(16) NOT_LP64(8));
9218 }
9219 #endif
9220 }
9221 popa();
9222 }
9223
9224 static const double pi_4 = 0.7853981633974483;
9225
9226 void MacroAssembler::trigfunc(char trig, int num_fpu_regs_in_use) {
9227 // A hand-coded argument reduction for values in fabs(pi/4, pi/2)
9228 // was attempted in this code; unfortunately it appears that the
9229 // switch to 80-bit precision and back causes this to be
9230 // unprofitable compared with simply performing a runtime call if
9231 // the argument is out of the (-pi/4, pi/4) range.
9232
9233 Register tmp = noreg;
9234 if (!VM_Version::supports_cmov()) {
9235 // fcmp needs a temporary so preserve rbx,
9236 tmp = rbx;
9237 push(tmp);
9238 }
9239
9240 Label slow_case, done;
9241
9242 ExternalAddress pi4_adr = (address)&pi_4;
9243 if (reachable(pi4_adr)) {
9244 // x ?<= pi/4
9245 fld_d(pi4_adr);
9246 fld_s(1); // Stack: X PI/4 X
9247 fabs(); // Stack: |X| PI/4 X
9248 fcmp(tmp);
9249 jcc(Assembler::above, slow_case);
9250
9251 // fastest case: -pi/4 <= x <= pi/4
9252 switch(trig) {
9253 case 's':
9254 fsin();
9255 break;
9256 case 'c':
9257 fcos();
9258 break;
9259 case 't':
9260 ftan();
9261 break;
9262 default:
9263 assert(false, "bad intrinsic");
9264 break;
9265 }
9266 jmp(done);
9267 }
9268
9269 // slow case: runtime call
9270 bind(slow_case);
9271
9272 switch(trig) {
9273 case 's':
9274 {
9275 fp_runtime_fallback(CAST_FROM_FN_PTR(address, SharedRuntime::dsin), 1, num_fpu_regs_in_use);
9276 }
9277 break;
9278 case 'c':
9279 {
9280 fp_runtime_fallback(CAST_FROM_FN_PTR(address, SharedRuntime::dcos), 1, num_fpu_regs_in_use);
9281 }
9282 break;
9283 case 't':
9284 {
9285 fp_runtime_fallback(CAST_FROM_FN_PTR(address, SharedRuntime::dtan), 1, num_fpu_regs_in_use);
9286 }
9287 break;
9288 default:
9289 assert(false, "bad intrinsic");
9290 break;
9291 }
9292
9293 // Come here with result in F-TOS
9294 bind(done);
9295
9296 if (tmp != noreg) {
9297 pop(tmp);
9298 }
9299 }
9300
9301
9302 // Look up the method for a megamorphic invokeinterface call.
9303 // The target method is determined by <intf_klass, itable_index>.
9304 // The receiver klass is in recv_klass.
9305 // On success, the result will be in method_result, and execution falls through.
9306 // On failure, execution transfers to the given label.
9307 void MacroAssembler::lookup_interface_method(Register recv_klass,
9308 Register intf_klass,
9309 RegisterOrConstant itable_index,
9310 Register method_result,
9311 Register scan_temp,
9312 Label& L_no_such_interface) {
9313 assert_different_registers(recv_klass, intf_klass, method_result, scan_temp);
9314 assert(itable_index.is_constant() || itable_index.as_register() == method_result,
9315 "caller must use same register for non-constant itable index as for method");
9316
9317 // Compute start of first itableOffsetEntry (which is at the end of the vtable)
9318 int vtable_base = InstanceKlass::vtable_start_offset() * wordSize;
9319 int itentry_off = itableMethodEntry::method_offset_in_bytes();
9320 int scan_step = itableOffsetEntry::size() * wordSize;
9321 int vte_size = vtableEntry::size() * wordSize;
9322 Address::ScaleFactor times_vte_scale = Address::times_ptr;
9323 assert(vte_size == wordSize, "else adjust times_vte_scale");
9324
9325 movl(scan_temp, Address(recv_klass, InstanceKlass::vtable_length_offset() * wordSize));
9326
9327 // %%% Could store the aligned, prescaled offset in the klassoop.
9328 lea(scan_temp, Address(recv_klass, scan_temp, times_vte_scale, vtable_base));
9329 if (HeapWordsPerLong > 1) {
9330 // Round up to align_object_offset boundary
9331 // see code for InstanceKlass::start_of_itable!
9332 round_to(scan_temp, BytesPerLong);
9333 }
9334
9335 // Adjust recv_klass by scaled itable_index, so we can free itable_index.
9336 assert(itableMethodEntry::size() * wordSize == wordSize, "adjust the scaling in the code below");
9337 lea(recv_klass, Address(recv_klass, itable_index, Address::times_ptr, itentry_off));
9338
9339 // for (scan = klass->itable(); scan->interface() != NULL; scan += scan_step) {
9340 // if (scan->interface() == intf) {
9341 // result = (klass + scan->offset() + itable_index);
9342 // }
9343 // }
9344 Label search, found_method;
9345
9346 for (int peel = 1; peel >= 0; peel--) {
9347 movptr(method_result, Address(scan_temp, itableOffsetEntry::interface_offset_in_bytes()));
9348 cmpptr(intf_klass, method_result);
9349
9350 if (peel) {
9351 jccb(Assembler::equal, found_method);
9352 } else {
9353 jccb(Assembler::notEqual, search);
9354 // (invert the test to fall through to found_method...)
9355 }
9356
9357 if (!peel) break;
9358
9359 bind(search);
9360
9361 // Check that the previous entry is non-null. A null entry means that
9362 // the receiver class doesn't implement the interface, and wasn't the
9363 // same as when the caller was compiled.
9364 testptr(method_result, method_result);
9365 jcc(Assembler::zero, L_no_such_interface);
9366 addptr(scan_temp, scan_step);
9367 }
9368
9369 bind(found_method);
9370
9371 // Got a hit.
9372 movl(scan_temp, Address(scan_temp, itableOffsetEntry::offset_offset_in_bytes()));
9373 movptr(method_result, Address(recv_klass, scan_temp, Address::times_1));
9374 }
9375
9376
9377 // virtual method calling
9378 void MacroAssembler::lookup_virtual_method(Register recv_klass,
9379 RegisterOrConstant vtable_index,
9380 Register method_result) {
9381 const int base = InstanceKlass::vtable_start_offset() * wordSize;
9382 assert(vtableEntry::size() * wordSize == wordSize, "else adjust the scaling in the code below");
9383 Address vtable_entry_addr(recv_klass,
9384 vtable_index, Address::times_ptr,
9385 base + vtableEntry::method_offset_in_bytes());
9386 movptr(method_result, vtable_entry_addr);
9387 }
9388
9389
9390 void MacroAssembler::check_klass_subtype(Register sub_klass,
9391 Register super_klass,
9392 Register temp_reg,
9393 Label& L_success) {
9394 Label L_failure;
9395 check_klass_subtype_fast_path(sub_klass, super_klass, temp_reg, &L_success, &L_failure, NULL);
9396 check_klass_subtype_slow_path(sub_klass, super_klass, temp_reg, noreg, &L_success, NULL);
9397 bind(L_failure);
9398 }
9399
9400
9401 void MacroAssembler::check_klass_subtype_fast_path(Register sub_klass,
9402 Register super_klass,
9403 Register temp_reg,
9404 Label* L_success,
9405 Label* L_failure,
9406 Label* L_slow_path,
9407 RegisterOrConstant super_check_offset) {
9408 assert_different_registers(sub_klass, super_klass, temp_reg);
9409 bool must_load_sco = (super_check_offset.constant_or_zero() == -1);
9410 if (super_check_offset.is_register()) {
9411 assert_different_registers(sub_klass, super_klass,
9412 super_check_offset.as_register());
9413 } else if (must_load_sco) {
9414 assert(temp_reg != noreg, "supply either a temp or a register offset");
9415 }
9416
9417 Label L_fallthrough;
9418 int label_nulls = 0;
9419 if (L_success == NULL) { L_success = &L_fallthrough; label_nulls++; }
9420 if (L_failure == NULL) { L_failure = &L_fallthrough; label_nulls++; }
9421 if (L_slow_path == NULL) { L_slow_path = &L_fallthrough; label_nulls++; }
9422 assert(label_nulls <= 1, "at most one NULL in the batch");
9423
9424 int sc_offset = in_bytes(Klass::secondary_super_cache_offset());
9425 int sco_offset = in_bytes(Klass::super_check_offset_offset());
9426 Address super_check_offset_addr(super_klass, sco_offset);
9427
9428 // Hacked jcc, which "knows" that L_fallthrough, at least, is in
9429 // range of a jccb. If this routine grows larger, reconsider at
9430 // least some of these.
9431 #define local_jcc(assembler_cond, label) \
9432 if (&(label) == &L_fallthrough) jccb(assembler_cond, label); \
9433 else jcc( assembler_cond, label) /*omit semi*/
9434
9435 // Hacked jmp, which may only be used just before L_fallthrough.
9436 #define final_jmp(label) \
9437 if (&(label) == &L_fallthrough) { /*do nothing*/ } \
9438 else jmp(label) /*omit semi*/
9439
9440 // If the pointers are equal, we are done (e.g., String[] elements).
9441 // This self-check enables sharing of secondary supertype arrays among
9442 // non-primary types such as array-of-interface. Otherwise, each such
9443 // type would need its own customized SSA.
9444 // We move this check to the front of the fast path because many
9445 // type checks are in fact trivially successful in this manner,
9446 // so we get a nicely predicted branch right at the start of the check.
9447 cmpptr(sub_klass, super_klass);
9448 local_jcc(Assembler::equal, *L_success);
9449
9450 // Check the supertype display:
9451 if (must_load_sco) {
9452 // Positive movl does right thing on LP64.
9453 movl(temp_reg, super_check_offset_addr);
9454 super_check_offset = RegisterOrConstant(temp_reg);
9455 }
9456 Address super_check_addr(sub_klass, super_check_offset, Address::times_1, 0);
9457 cmpptr(super_klass, super_check_addr); // load displayed supertype
9458
9459 // This check has worked decisively for primary supers.
9460 // Secondary supers are sought in the super_cache ('super_cache_addr').
9461 // (Secondary supers are interfaces and very deeply nested subtypes.)
9462 // This works in the same check above because of a tricky aliasing
9463 // between the super_cache and the primary super display elements.
9464 // (The 'super_check_addr' can address either, as the case requires.)
9465 // Note that the cache is updated below if it does not help us find
9466 // what we need immediately.
9467 // So if it was a primary super, we can just fail immediately.
9468 // Otherwise, it's the slow path for us (no success at this point).
9469
9470 if (super_check_offset.is_register()) {
9471 local_jcc(Assembler::equal, *L_success);
9472 cmpl(super_check_offset.as_register(), sc_offset);
9473 if (L_failure == &L_fallthrough) {
9474 local_jcc(Assembler::equal, *L_slow_path);
9475 } else {
9476 local_jcc(Assembler::notEqual, *L_failure);
9477 final_jmp(*L_slow_path);
9478 }
9479 } else if (super_check_offset.as_constant() == sc_offset) {
9480 // Need a slow path; fast failure is impossible.
9481 if (L_slow_path == &L_fallthrough) {
9482 local_jcc(Assembler::equal, *L_success);
9483 } else {
9484 local_jcc(Assembler::notEqual, *L_slow_path);
9485 final_jmp(*L_success);
9486 }
9487 } else {
9488 // No slow path; it's a fast decision.
9489 if (L_failure == &L_fallthrough) {
9490 local_jcc(Assembler::equal, *L_success);
9491 } else {
9492 local_jcc(Assembler::notEqual, *L_failure);
9493 final_jmp(*L_success);
9494 }
9495 }
9496
9497 bind(L_fallthrough);
9498
9499 #undef local_jcc
9500 #undef final_jmp
9501 }
9502
9503
9504 void MacroAssembler::check_klass_subtype_slow_path(Register sub_klass,
9505 Register super_klass,
9506 Register temp_reg,
9507 Register temp2_reg,
9508 Label* L_success,
9509 Label* L_failure,
9510 bool set_cond_codes) {
9511 assert_different_registers(sub_klass, super_klass, temp_reg);
9512 if (temp2_reg != noreg)
9513 assert_different_registers(sub_klass, super_klass, temp_reg, temp2_reg);
9514 #define IS_A_TEMP(reg) ((reg) == temp_reg || (reg) == temp2_reg)
9515
9516 Label L_fallthrough;
9517 int label_nulls = 0;
9518 if (L_success == NULL) { L_success = &L_fallthrough; label_nulls++; }
9519 if (L_failure == NULL) { L_failure = &L_fallthrough; label_nulls++; }
9520 assert(label_nulls <= 1, "at most one NULL in the batch");
9521
9522 // a couple of useful fields in sub_klass:
9523 int ss_offset = in_bytes(Klass::secondary_supers_offset());
9524 int sc_offset = in_bytes(Klass::secondary_super_cache_offset());
9525 Address secondary_supers_addr(sub_klass, ss_offset);
9526 Address super_cache_addr( sub_klass, sc_offset);
9527
9528 // Do a linear scan of the secondary super-klass chain.
9529 // This code is rarely used, so simplicity is a virtue here.
9530 // The repne_scan instruction uses fixed registers, which we must spill.
9531 // Don't worry too much about pre-existing connections with the input regs.
9532
9533 assert(sub_klass != rax, "killed reg"); // killed by mov(rax, super)
9534 assert(sub_klass != rcx, "killed reg"); // killed by lea(rcx, &pst_counter)
9535
9536 // Get super_klass value into rax (even if it was in rdi or rcx).
9537 bool pushed_rax = false, pushed_rcx = false, pushed_rdi = false;
9538 if (super_klass != rax || UseCompressedOops) {
9539 if (!IS_A_TEMP(rax)) { push(rax); pushed_rax = true; }
9540 mov(rax, super_klass);
9541 }
9542 if (!IS_A_TEMP(rcx)) { push(rcx); pushed_rcx = true; }
9543 if (!IS_A_TEMP(rdi)) { push(rdi); pushed_rdi = true; }
9544
9545 #ifndef PRODUCT
9546 int* pst_counter = &SharedRuntime::_partial_subtype_ctr;
9547 ExternalAddress pst_counter_addr((address) pst_counter);
9548 NOT_LP64( incrementl(pst_counter_addr) );
9549 LP64_ONLY( lea(rcx, pst_counter_addr) );
9550 LP64_ONLY( incrementl(Address(rcx, 0)) );
9551 #endif //PRODUCT
9552
9553 // We will consult the secondary-super array.
9554 movptr(rdi, secondary_supers_addr);
9555 // Load the array length. (Positive movl does right thing on LP64.)
9556 movl(rcx, Address(rdi, Array<Klass*>::length_offset_in_bytes()));
9557 // Skip to start of data.
9558 addptr(rdi, Array<Klass*>::base_offset_in_bytes());
9559
9560 // Scan RCX words at [RDI] for an occurrence of RAX.
9561 // Set NZ/Z based on last compare.
9562 // Z flag value will not be set by 'repne' if RCX == 0 since 'repne' does
9563 // not change flags (only scas instruction which is repeated sets flags).
9564 // Set Z = 0 (not equal) before 'repne' to indicate that class was not found.
9565
9566 testptr(rax,rax); // Set Z = 0
9567 repne_scan();
9568
9569 // Unspill the temp. registers:
9570 if (pushed_rdi) pop(rdi);
9571 if (pushed_rcx) pop(rcx);
9572 if (pushed_rax) pop(rax);
9573
9574 if (set_cond_codes) {
9575 // Special hack for the AD files: rdi is guaranteed non-zero.
9576 assert(!pushed_rdi, "rdi must be left non-NULL");
9577 // Also, the condition codes are properly set Z/NZ on succeed/failure.
9578 }
9579
9580 if (L_failure == &L_fallthrough)
9581 jccb(Assembler::notEqual, *L_failure);
9582 else jcc(Assembler::notEqual, *L_failure);
9583
9584 // Success. Cache the super we found and proceed in triumph.
9585 movptr(super_cache_addr, super_klass);
9586
9587 if (L_success != &L_fallthrough) {
9588 jmp(*L_success);
9589 }
9590
9591 #undef IS_A_TEMP
9592
9593 bind(L_fallthrough);
9594 }
9595
9596
9597 void MacroAssembler::cmov32(Condition cc, Register dst, Address src) {
9598 if (VM_Version::supports_cmov()) {
9599 cmovl(cc, dst, src);
9600 } else {
9601 Label L;
9602 jccb(negate_condition(cc), L);
9603 movl(dst, src);
9604 bind(L);
9605 }
9606 }
9607
9608 void MacroAssembler::cmov32(Condition cc, Register dst, Register src) {
9609 if (VM_Version::supports_cmov()) {
9610 cmovl(cc, dst, src);
9611 } else {
9612 Label L;
9613 jccb(negate_condition(cc), L);
9614 movl(dst, src);
9615 bind(L);
9616 }
9617 }
9618
9619 void MacroAssembler::verify_oop(Register reg, const char* s) {
9620 if (!VerifyOops) return;
9621
9622 // Pass register number to verify_oop_subroutine
9623 char* b = new char[strlen(s) + 50];
9624 sprintf(b, "verify_oop: %s: %s", reg->name(), s);
9625 BLOCK_COMMENT("verify_oop {");
9626 #ifdef _LP64
9627 push(rscratch1); // save r10, trashed by movptr()
9628 #endif
9629 push(rax); // save rax,
9630 push(reg); // pass register argument
9631 ExternalAddress buffer((address) b);
9632 // avoid using pushptr, as it modifies scratch registers
9633 // and our contract is not to modify anything
9634 movptr(rax, buffer.addr());
9635 push(rax);
9636 // call indirectly to solve generation ordering problem
9637 movptr(rax, ExternalAddress(StubRoutines::verify_oop_subroutine_entry_address()));
9638 call(rax);
9639 // Caller pops the arguments (oop, message) and restores rax, r10
9640 BLOCK_COMMENT("} verify_oop");
9641 }
9642
9643
9644 RegisterOrConstant MacroAssembler::delayed_value_impl(intptr_t* delayed_value_addr,
9645 Register tmp,
9646 int offset) {
9647 intptr_t value = *delayed_value_addr;
9648 if (value != 0)
9649 return RegisterOrConstant(value + offset);
9650
9651 // load indirectly to solve generation ordering problem
9652 movptr(tmp, ExternalAddress((address) delayed_value_addr));
9653
9654 #ifdef ASSERT
9655 { Label L;
9656 testptr(tmp, tmp);
9657 if (WizardMode) {
9658 jcc(Assembler::notZero, L);
9659 char* buf = new char[40];
9660 sprintf(buf, "DelayedValue="INTPTR_FORMAT, delayed_value_addr[1]);
9661 STOP(buf);
9662 } else {
9663 jccb(Assembler::notZero, L);
9664 hlt();
9665 }
9666 bind(L);
9667 }
9668 #endif
9669
9670 if (offset != 0)
9671 addptr(tmp, offset);
9672
9673 return RegisterOrConstant(tmp);
9674 }
9675
9676
9677 Address MacroAssembler::argument_address(RegisterOrConstant arg_slot,
9678 int extra_slot_offset) {
9679 // cf. TemplateTable::prepare_invoke(), if (load_receiver).
9680 int stackElementSize = Interpreter::stackElementSize;
9681 int offset = Interpreter::expr_offset_in_bytes(extra_slot_offset+0);
9682 #ifdef ASSERT
9683 int offset1 = Interpreter::expr_offset_in_bytes(extra_slot_offset+1);
9684 assert(offset1 - offset == stackElementSize, "correct arithmetic");
9685 #endif
9686 Register scale_reg = noreg;
9687 Address::ScaleFactor scale_factor = Address::no_scale;
9688 if (arg_slot.is_constant()) {
9689 offset += arg_slot.as_constant() * stackElementSize;
9690 } else {
9691 scale_reg = arg_slot.as_register();
9692 scale_factor = Address::times(stackElementSize);
9693 }
9694 offset += wordSize; // return PC is on stack
9695 return Address(rsp, scale_reg, scale_factor, offset);
9696 }
9697
9698
9699 void MacroAssembler::verify_oop_addr(Address addr, const char* s) {
9700 if (!VerifyOops) return;
9701
9702 // Address adjust(addr.base(), addr.index(), addr.scale(), addr.disp() + BytesPerWord);
9703 // Pass register number to verify_oop_subroutine
9704 char* b = new char[strlen(s) + 50];
9705 sprintf(b, "verify_oop_addr: %s", s);
9706
9707 #ifdef _LP64
9708 push(rscratch1); // save r10, trashed by movptr()
9709 #endif
9710 push(rax); // save rax,
9711 // addr may contain rsp so we will have to adjust it based on the push
9712 // we just did (and on 64 bit we do two pushes)
9713 // NOTE: 64bit seemed to have had a bug in that it did movq(addr, rax); which
9714 // stores rax into addr which is backwards of what was intended.
9715 if (addr.uses(rsp)) {
9716 lea(rax, addr);
9717 pushptr(Address(rax, LP64_ONLY(2 *) BytesPerWord));
9718 } else {
9719 pushptr(addr);
9720 }
9721
9722 ExternalAddress buffer((address) b);
9723 // pass msg argument
9724 // avoid using pushptr, as it modifies scratch registers
9725 // and our contract is not to modify anything
9726 movptr(rax, buffer.addr());
9727 push(rax);
9728
9729 // call indirectly to solve generation ordering problem
9730 movptr(rax, ExternalAddress(StubRoutines::verify_oop_subroutine_entry_address()));
9731 call(rax);
9732 // Caller pops the arguments (addr, message) and restores rax, r10.
9733 }
9734
9735 void MacroAssembler::verify_tlab() {
9736 #ifdef ASSERT
9737 if (UseTLAB && VerifyOops) {
9738 Label next, ok;
9739 Register t1 = rsi;
9740 Register thread_reg = NOT_LP64(rbx) LP64_ONLY(r15_thread);
9741
9742 push(t1);
9743 NOT_LP64(push(thread_reg));
9744 NOT_LP64(get_thread(thread_reg));
9745
9746 movptr(t1, Address(thread_reg, in_bytes(JavaThread::tlab_top_offset())));
9747 cmpptr(t1, Address(thread_reg, in_bytes(JavaThread::tlab_start_offset())));
9748 jcc(Assembler::aboveEqual, next);
9749 STOP("assert(top >= start)");
9750 should_not_reach_here();
9751
9752 bind(next);
9753 movptr(t1, Address(thread_reg, in_bytes(JavaThread::tlab_end_offset())));
9754 cmpptr(t1, Address(thread_reg, in_bytes(JavaThread::tlab_top_offset())));
9755 jcc(Assembler::aboveEqual, ok);
9756 STOP("assert(top <= end)");
9757 should_not_reach_here();
9758
9759 bind(ok);
9760 NOT_LP64(pop(thread_reg));
9761 pop(t1);
9762 }
9763 #endif
9764 }
9765
9766 class ControlWord {
9767 public:
9768 int32_t _value;
9769
9770 int rounding_control() const { return (_value >> 10) & 3 ; }
9771 int precision_control() const { return (_value >> 8) & 3 ; }
9772 bool precision() const { return ((_value >> 5) & 1) != 0; }
9773 bool underflow() const { return ((_value >> 4) & 1) != 0; }
9774 bool overflow() const { return ((_value >> 3) & 1) != 0; }
9775 bool zero_divide() const { return ((_value >> 2) & 1) != 0; }
9776 bool denormalized() const { return ((_value >> 1) & 1) != 0; }
9777 bool invalid() const { return ((_value >> 0) & 1) != 0; }
9778
9779 void print() const {
9780 // rounding control
9781 const char* rc;
9782 switch (rounding_control()) {
9783 case 0: rc = "round near"; break;
9784 case 1: rc = "round down"; break;
9785 case 2: rc = "round up "; break;
9786 case 3: rc = "chop "; break;
9787 };
9788 // precision control
9789 const char* pc;
9790 switch (precision_control()) {
9791 case 0: pc = "24 bits "; break;
9792 case 1: pc = "reserved"; break;
9793 case 2: pc = "53 bits "; break;
9794 case 3: pc = "64 bits "; break;
9795 };
9796 // flags
9797 char f[9];
9798 f[0] = ' ';
9799 f[1] = ' ';
9800 f[2] = (precision ()) ? 'P' : 'p';
9801 f[3] = (underflow ()) ? 'U' : 'u';
9802 f[4] = (overflow ()) ? 'O' : 'o';
9803 f[5] = (zero_divide ()) ? 'Z' : 'z';
9804 f[6] = (denormalized()) ? 'D' : 'd';
9805 f[7] = (invalid ()) ? 'I' : 'i';
9806 f[8] = '\x0';
9807 // output
9808 printf("%04x masks = %s, %s, %s", _value & 0xFFFF, f, rc, pc);
9809 }
9810
9811 };
9812
9813 class StatusWord {
9814 public:
9815 int32_t _value;
9816
9817 bool busy() const { return ((_value >> 15) & 1) != 0; }
9818 bool C3() const { return ((_value >> 14) & 1) != 0; }
9819 bool C2() const { return ((_value >> 10) & 1) != 0; }
9820 bool C1() const { return ((_value >> 9) & 1) != 0; }
9821 bool C0() const { return ((_value >> 8) & 1) != 0; }
9822 int top() const { return (_value >> 11) & 7 ; }
9823 bool error_status() const { return ((_value >> 7) & 1) != 0; }
9824 bool stack_fault() const { return ((_value >> 6) & 1) != 0; }
9825 bool precision() const { return ((_value >> 5) & 1) != 0; }
9826 bool underflow() const { return ((_value >> 4) & 1) != 0; }
9827 bool overflow() const { return ((_value >> 3) & 1) != 0; }
9828 bool zero_divide() const { return ((_value >> 2) & 1) != 0; }
9829 bool denormalized() const { return ((_value >> 1) & 1) != 0; }
9830 bool invalid() const { return ((_value >> 0) & 1) != 0; }
9831
9832 void print() const {
9833 // condition codes
9834 char c[5];
9835 c[0] = (C3()) ? '3' : '-';
9836 c[1] = (C2()) ? '2' : '-';
9837 c[2] = (C1()) ? '1' : '-';
9838 c[3] = (C0()) ? '0' : '-';
9839 c[4] = '\x0';
9840 // flags
9841 char f[9];
9842 f[0] = (error_status()) ? 'E' : '-';
9843 f[1] = (stack_fault ()) ? 'S' : '-';
9844 f[2] = (precision ()) ? 'P' : '-';
9845 f[3] = (underflow ()) ? 'U' : '-';
9846 f[4] = (overflow ()) ? 'O' : '-';
9847 f[5] = (zero_divide ()) ? 'Z' : '-';
9848 f[6] = (denormalized()) ? 'D' : '-';
9849 f[7] = (invalid ()) ? 'I' : '-';
9850 f[8] = '\x0';
9851 // output
9852 printf("%04x flags = %s, cc = %s, top = %d", _value & 0xFFFF, f, c, top());
9853 }
9854
9855 };
9856
9857 class TagWord {
9858 public:
9859 int32_t _value;
9860
9861 int tag_at(int i) const { return (_value >> (i*2)) & 3; }
9862
9863 void print() const {
9864 printf("%04x", _value & 0xFFFF);
9865 }
9866
9867 };
9868
9869 class FPU_Register {
9870 public:
9871 int32_t _m0;
9872 int32_t _m1;
9873 int16_t _ex;
9874
9875 bool is_indefinite() const {
9876 return _ex == -1 && _m1 == (int32_t)0xC0000000 && _m0 == 0;
9877 }
9878
9879 void print() const {
9880 char sign = (_ex < 0) ? '-' : '+';
9881 const char* kind = (_ex == 0x7FFF || _ex == (int16_t)-1) ? "NaN" : " ";
9882 printf("%c%04hx.%08x%08x %s", sign, _ex, _m1, _m0, kind);
9883 };
9884
9885 };
9886
9887 class FPU_State {
9888 public:
9889 enum {
9890 register_size = 10,
9891 number_of_registers = 8,
9892 register_mask = 7
9893 };
9894
9895 ControlWord _control_word;
9896 StatusWord _status_word;
9897 TagWord _tag_word;
9898 int32_t _error_offset;
9899 int32_t _error_selector;
9900 int32_t _data_offset;
9901 int32_t _data_selector;
9902 int8_t _register[register_size * number_of_registers];
9903
9904 int tag_for_st(int i) const { return _tag_word.tag_at((_status_word.top() + i) & register_mask); }
9905 FPU_Register* st(int i) const { return (FPU_Register*)&_register[register_size * i]; }
9906
9907 const char* tag_as_string(int tag) const {
9908 switch (tag) {
9909 case 0: return "valid";
9910 case 1: return "zero";
9911 case 2: return "special";
9912 case 3: return "empty";
9913 }
9914 ShouldNotReachHere();
9915 return NULL;
9916 }
9917
9918 void print() const {
9919 // print computation registers
9920 { int t = _status_word.top();
9921 for (int i = 0; i < number_of_registers; i++) {
9922 int j = (i - t) & register_mask;
9923 printf("%c r%d = ST%d = ", (j == 0 ? '*' : ' '), i, j);
9924 st(j)->print();
9925 printf(" %s\n", tag_as_string(_tag_word.tag_at(i)));
9926 }
9927 }
9928 printf("\n");
9929 // print control registers
9930 printf("ctrl = "); _control_word.print(); printf("\n");
9931 printf("stat = "); _status_word .print(); printf("\n");
9932 printf("tags = "); _tag_word .print(); printf("\n");
9933 }
9934
9935 };
9936
9937 class Flag_Register {
9938 public:
9939 int32_t _value;
9940
9941 bool overflow() const { return ((_value >> 11) & 1) != 0; }
9942 bool direction() const { return ((_value >> 10) & 1) != 0; }
9943 bool sign() const { return ((_value >> 7) & 1) != 0; }
9944 bool zero() const { return ((_value >> 6) & 1) != 0; }
9945 bool auxiliary_carry() const { return ((_value >> 4) & 1) != 0; }
9946 bool parity() const { return ((_value >> 2) & 1) != 0; }
9947 bool carry() const { return ((_value >> 0) & 1) != 0; }
9948
9949 void print() const {
9950 // flags
9951 char f[8];
9952 f[0] = (overflow ()) ? 'O' : '-';
9953 f[1] = (direction ()) ? 'D' : '-';
9954 f[2] = (sign ()) ? 'S' : '-';
9955 f[3] = (zero ()) ? 'Z' : '-';
9956 f[4] = (auxiliary_carry()) ? 'A' : '-';
9957 f[5] = (parity ()) ? 'P' : '-';
9958 f[6] = (carry ()) ? 'C' : '-';
9959 f[7] = '\x0';
9960 // output
9961 printf("%08x flags = %s", _value, f);
9962 }
9963
9964 };
9965
9966 class IU_Register {
9967 public:
9968 int32_t _value;
9969
9970 void print() const {
9971 printf("%08x %11d", _value, _value);
9972 }
9973
9974 };
9975
9976 class IU_State {
9977 public:
9978 Flag_Register _eflags;
9979 IU_Register _rdi;
9980 IU_Register _rsi;
9981 IU_Register _rbp;
9982 IU_Register _rsp;
9983 IU_Register _rbx;
9984 IU_Register _rdx;
9985 IU_Register _rcx;
9986 IU_Register _rax;
9987
9988 void print() const {
9989 // computation registers
9990 printf("rax, = "); _rax.print(); printf("\n");
9991 printf("rbx, = "); _rbx.print(); printf("\n");
9992 printf("rcx = "); _rcx.print(); printf("\n");
9993 printf("rdx = "); _rdx.print(); printf("\n");
9994 printf("rdi = "); _rdi.print(); printf("\n");
9995 printf("rsi = "); _rsi.print(); printf("\n");
9996 printf("rbp, = "); _rbp.print(); printf("\n");
9997 printf("rsp = "); _rsp.print(); printf("\n");
9998 printf("\n");
9999 // control registers
10000 printf("flgs = "); _eflags.print(); printf("\n");
10001 }
10002 };
10003
10004
10005 class CPU_State {
10006 public:
10007 FPU_State _fpu_state;
10008 IU_State _iu_state;
10009
10010 void print() const {
10011 printf("--------------------------------------------------\n");
10012 _iu_state .print();
10013 printf("\n");
10014 _fpu_state.print();
10015 printf("--------------------------------------------------\n");
10016 }
10017
10018 };
10019
10020
10021 static void _print_CPU_state(CPU_State* state) {
10022 state->print();
10023 };
10024
10025
10026 void MacroAssembler::print_CPU_state() {
10027 push_CPU_state();
10028 push(rsp); // pass CPU state
10029 call(RuntimeAddress(CAST_FROM_FN_PTR(address, _print_CPU_state)));
10030 addptr(rsp, wordSize); // discard argument
10031 pop_CPU_state();
10032 }
10033
10034
10035 static bool _verify_FPU(int stack_depth, char* s, CPU_State* state) {
10036 static int counter = 0;
10037 FPU_State* fs = &state->_fpu_state;
10038 counter++;
10039 // For leaf calls, only verify that the top few elements remain empty.
10040 // We only need 1 empty at the top for C2 code.
10041 if( stack_depth < 0 ) {
10042 if( fs->tag_for_st(7) != 3 ) {
10043 printf("FPR7 not empty\n");
10044 state->print();
10045 assert(false, "error");
10046 return false;
10047 }
10048 return true; // All other stack states do not matter
10049 }
10050
10051 assert((fs->_control_word._value & 0xffff) == StubRoutines::_fpu_cntrl_wrd_std,
10052 "bad FPU control word");
10053
10054 // compute stack depth
10055 int i = 0;
10056 while (i < FPU_State::number_of_registers && fs->tag_for_st(i) < 3) i++;
10057 int d = i;
10058 while (i < FPU_State::number_of_registers && fs->tag_for_st(i) == 3) i++;
10059 // verify findings
10060 if (i != FPU_State::number_of_registers) {
10061 // stack not contiguous
10062 printf("%s: stack not contiguous at ST%d\n", s, i);
10063 state->print();
10064 assert(false, "error");
10065 return false;
10066 }
10067 // check if computed stack depth corresponds to expected stack depth
10068 if (stack_depth < 0) {
10069 // expected stack depth is -stack_depth or less
10070 if (d > -stack_depth) {
10071 // too many elements on the stack
10072 printf("%s: <= %d stack elements expected but found %d\n", s, -stack_depth, d);
10073 state->print();
10074 assert(false, "error");
10075 return false;
10076 }
10077 } else {
10078 // expected stack depth is stack_depth
10079 if (d != stack_depth) {
10080 // wrong stack depth
10081 printf("%s: %d stack elements expected but found %d\n", s, stack_depth, d);
10082 state->print();
10083 assert(false, "error");
10084 return false;
10085 }
10086 }
10087 // everything is cool
10088 return true;
10089 }
10090
10091
10092 void MacroAssembler::verify_FPU(int stack_depth, const char* s) {
10093 if (!VerifyFPU) return;
10094 push_CPU_state();
10095 push(rsp); // pass CPU state
10096 ExternalAddress msg((address) s);
10097 // pass message string s
10098 pushptr(msg.addr());
10099 push(stack_depth); // pass stack depth
10100 call(RuntimeAddress(CAST_FROM_FN_PTR(address, _verify_FPU)));
10101 addptr(rsp, 3 * wordSize); // discard arguments
10102 // check for error
10103 { Label L;
10104 testl(rax, rax);
10105 jcc(Assembler::notZero, L);
10106 int3(); // break if error condition
10107 bind(L);
10108 }
10109 pop_CPU_state();
10110 }
10111
10112 void MacroAssembler::load_klass(Register dst, Register src) {
10113 #ifdef _LP64
10114 if (UseCompressedKlassPointers) {
10115 movl(dst, Address(src, oopDesc::klass_offset_in_bytes()));
10116 decode_klass_not_null(dst);
10117 } else
10118 #endif
10119 movptr(dst, Address(src, oopDesc::klass_offset_in_bytes()));
10120 }
10121
10122 void MacroAssembler::load_prototype_header(Register dst, Register src) {
10123 #ifdef _LP64
10124 if (UseCompressedKlassPointers) {
10125 assert (Universe::heap() != NULL, "java heap should be initialized");
10126 movl(dst, Address(src, oopDesc::klass_offset_in_bytes()));
10127 if (Universe::narrow_klass_shift() != 0) {
10128 assert(LogKlassAlignmentInBytes == Universe::narrow_klass_shift(), "decode alg wrong");
10129 assert(LogKlassAlignmentInBytes == Address::times_8, "klass not aligned on 64bits?");
10130 movq(dst, Address(r12_heapbase, dst, Address::times_8, Klass::prototype_header_offset()));
10131 } else {
10132 movq(dst, Address(dst, Klass::prototype_header_offset()));
10133 }
10134 } else
10135 #endif
10136 {
10137 movptr(dst, Address(src, oopDesc::klass_offset_in_bytes()));
10138 movptr(dst, Address(dst, Klass::prototype_header_offset()));
10139 }
10140 }
10141
10142 void MacroAssembler::store_klass(Register dst, Register src) {
10143 #ifdef _LP64
10144 if (UseCompressedKlassPointers) {
10145 encode_klass_not_null(src);
10146 movl(Address(dst, oopDesc::klass_offset_in_bytes()), src);
10147 } else
10148 #endif
10149 movptr(Address(dst, oopDesc::klass_offset_in_bytes()), src);
10150 }
10151
10152 void MacroAssembler::load_heap_oop(Register dst, Address src) {
10153 #ifdef _LP64
10154 // FIXME: Must change all places where we try to load the klass.
10155 if (UseCompressedOops) {
10156 movl(dst, src);
10157 decode_heap_oop(dst);
10158 } else
10159 #endif
10160 movptr(dst, src);
10161 }
10162
10163 // Doesn't do verfication, generates fixed size code
10164 void MacroAssembler::load_heap_oop_not_null(Register dst, Address src) {
10165 #ifdef _LP64
10166 if (UseCompressedOops) {
10167 movl(dst, src);
10168 decode_heap_oop_not_null(dst);
10169 } else
10170 #endif
10171 movptr(dst, src);
10172 }
10173
10174 void MacroAssembler::store_heap_oop(Address dst, Register src) {
10175 #ifdef _LP64
10176 if (UseCompressedOops) {
10177 assert(!dst.uses(src), "not enough registers");
10178 encode_heap_oop(src);
10179 movl(dst, src);
10180 } else
10181 #endif
10182 movptr(dst, src);
10183 }
10184
10185 void MacroAssembler::cmp_heap_oop(Register src1, Address src2, Register tmp) {
10186 assert_different_registers(src1, tmp);
10187 #ifdef _LP64
10188 if (UseCompressedOops) {
10189 bool did_push = false;
10190 if (tmp == noreg) {
10191 tmp = rax;
10192 push(tmp);
10193 did_push = true;
10194 assert(!src2.uses(rsp), "can't push");
10195 }
10196 load_heap_oop(tmp, src2);
10197 cmpptr(src1, tmp);
10198 if (did_push) pop(tmp);
10199 } else
10200 #endif
10201 cmpptr(src1, src2);
10202 }
10203
10204 // Used for storing NULLs.
10205 void MacroAssembler::store_heap_oop_null(Address dst) {
10206 #ifdef _LP64
10207 if (UseCompressedOops) {
10208 movl(dst, (int32_t)NULL_WORD);
10209 } else {
10210 movslq(dst, (int32_t)NULL_WORD);
10211 }
10212 #else
10213 movl(dst, (int32_t)NULL_WORD);
10214 #endif
10215 }
10216
10217 #ifdef _LP64
10218 void MacroAssembler::store_klass_gap(Register dst, Register src) {
10219 if (UseCompressedKlassPointers) {
10220 // Store to klass gap in destination
10221 movl(Address(dst, oopDesc::klass_gap_offset_in_bytes()), src);
10222 }
10223 }
10224
10225 #ifdef ASSERT
10226 void MacroAssembler::verify_heapbase(const char* msg) {
10227 assert (UseCompressedOops || UseCompressedKlassPointers, "should be compressed");
10228 assert (Universe::heap() != NULL, "java heap should be initialized");
10229 if (CheckCompressedOops) {
10230 Label ok;
10231 push(rscratch1); // cmpptr trashes rscratch1
10232 cmpptr(r12_heapbase, ExternalAddress((address)Universe::narrow_ptrs_base_addr()));
10233 jcc(Assembler::equal, ok);
10234 STOP(msg);
10235 bind(ok);
10236 pop(rscratch1);
10237 }
10238 }
10239 #endif
10240
10241 // Algorithm must match oop.inline.hpp encode_heap_oop.
10242 void MacroAssembler::encode_heap_oop(Register r) {
10243 #ifdef ASSERT
10244 verify_heapbase("MacroAssembler::encode_heap_oop: heap base corrupted?");
10245 #endif
10246 verify_oop(r, "broken oop in encode_heap_oop");
10247 if (Universe::narrow_oop_base() == NULL) {
10248 if (Universe::narrow_oop_shift() != 0) {
10249 assert (LogMinObjAlignmentInBytes == Universe::narrow_oop_shift(), "decode alg wrong");
10250 shrq(r, LogMinObjAlignmentInBytes);
10251 }
10252 return;
10253 }
10254 testq(r, r);
10255 cmovq(Assembler::equal, r, r12_heapbase);
10256 subq(r, r12_heapbase);
10257 shrq(r, LogMinObjAlignmentInBytes);
10258 }
10259
10260 void MacroAssembler::encode_heap_oop_not_null(Register r) {
10261 #ifdef ASSERT
10262 verify_heapbase("MacroAssembler::encode_heap_oop_not_null: heap base corrupted?");
10263 if (CheckCompressedOops) {
10264 Label ok;
10265 testq(r, r);
10266 jcc(Assembler::notEqual, ok);
10267 STOP("null oop passed to encode_heap_oop_not_null");
10268 bind(ok);
10269 }
10270 #endif
10271 verify_oop(r, "broken oop in encode_heap_oop_not_null");
10272 if (Universe::narrow_oop_base() != NULL) {
10273 subq(r, r12_heapbase);
10274 }
10275 if (Universe::narrow_oop_shift() != 0) {
10276 assert (LogMinObjAlignmentInBytes == Universe::narrow_oop_shift(), "decode alg wrong");
10277 shrq(r, LogMinObjAlignmentInBytes);
10278 }
10279 }
10280
10281 void MacroAssembler::encode_heap_oop_not_null(Register dst, Register src) {
10282 #ifdef ASSERT
10283 verify_heapbase("MacroAssembler::encode_heap_oop_not_null2: heap base corrupted?");
10284 if (CheckCompressedOops) {
10285 Label ok;
10286 testq(src, src);
10287 jcc(Assembler::notEqual, ok);
10288 STOP("null oop passed to encode_heap_oop_not_null2");
10289 bind(ok);
10290 }
10291 #endif
10292 verify_oop(src, "broken oop in encode_heap_oop_not_null2");
10293 if (dst != src) {
10294 movq(dst, src);
10295 }
10296 if (Universe::narrow_oop_base() != NULL) {
10297 subq(dst, r12_heapbase);
10298 }
10299 if (Universe::narrow_oop_shift() != 0) {
10300 assert (LogMinObjAlignmentInBytes == Universe::narrow_oop_shift(), "decode alg wrong");
10301 shrq(dst, LogMinObjAlignmentInBytes);
10302 }
10303 }
10304
10305 void MacroAssembler::decode_heap_oop(Register r) {
10306 #ifdef ASSERT
10307 verify_heapbase("MacroAssembler::decode_heap_oop: heap base corrupted?");
10308 #endif
10309 if (Universe::narrow_oop_base() == NULL) {
10310 if (Universe::narrow_oop_shift() != 0) {
10311 assert (LogMinObjAlignmentInBytes == Universe::narrow_oop_shift(), "decode alg wrong");
10312 shlq(r, LogMinObjAlignmentInBytes);
10313 }
10314 } else {
10315 Label done;
10316 shlq(r, LogMinObjAlignmentInBytes);
10317 jccb(Assembler::equal, done);
10318 addq(r, r12_heapbase);
10319 bind(done);
10320 }
10321 verify_oop(r, "broken oop in decode_heap_oop");
10322 }
10323
10324 void MacroAssembler::decode_heap_oop_not_null(Register r) {
10325 // Note: it will change flags
10326 assert (UseCompressedOops, "should only be used for compressed headers");
10327 assert (Universe::heap() != NULL, "java heap should be initialized");
10328 // Cannot assert, unverified entry point counts instructions (see .ad file)
10329 // vtableStubs also counts instructions in pd_code_size_limit.
10330 // Also do not verify_oop as this is called by verify_oop.
10331 if (Universe::narrow_oop_shift() != 0) {
10332 assert(LogMinObjAlignmentInBytes == Universe::narrow_oop_shift(), "decode alg wrong");
10333 shlq(r, LogMinObjAlignmentInBytes);
10334 if (Universe::narrow_oop_base() != NULL) {
10335 addq(r, r12_heapbase);
10336 }
10337 } else {
10338 assert (Universe::narrow_oop_base() == NULL, "sanity");
10339 }
10340 }
10341
10342 void MacroAssembler::decode_heap_oop_not_null(Register dst, Register src) {
10343 // Note: it will change flags
10344 assert (UseCompressedOops, "should only be used for compressed headers");
10345 assert (Universe::heap() != NULL, "java heap should be initialized");
10346 // Cannot assert, unverified entry point counts instructions (see .ad file)
10347 // vtableStubs also counts instructions in pd_code_size_limit.
10348 // Also do not verify_oop as this is called by verify_oop.
10349 if (Universe::narrow_oop_shift() != 0) {
10350 assert(LogMinObjAlignmentInBytes == Universe::narrow_oop_shift(), "decode alg wrong");
10351 if (LogMinObjAlignmentInBytes == Address::times_8) {
10352 leaq(dst, Address(r12_heapbase, src, Address::times_8, 0));
10353 } else {
10354 if (dst != src) {
10355 movq(dst, src);
10356 }
10357 shlq(dst, LogMinObjAlignmentInBytes);
10358 if (Universe::narrow_oop_base() != NULL) {
10359 addq(dst, r12_heapbase);
10360 }
10361 }
10362 } else {
10363 assert (Universe::narrow_oop_base() == NULL, "sanity");
10364 if (dst != src) {
10365 movq(dst, src);
10366 }
10367 }
10368 }
10369
10370 void MacroAssembler::encode_klass_not_null(Register r) {
10371 assert(Metaspace::is_initialized(), "metaspace should be initialized");
10372 #ifdef ASSERT
10373 verify_heapbase("MacroAssembler::encode_klass_not_null: heap base corrupted?");
10374 #endif
10375 if (Universe::narrow_klass_base() != NULL) {
10376 subq(r, r12_heapbase);
10377 }
10378 if (Universe::narrow_klass_shift() != 0) {
10379 assert (LogKlassAlignmentInBytes == Universe::narrow_klass_shift(), "decode alg wrong");
10380 shrq(r, LogKlassAlignmentInBytes);
10381 }
10382 }
10383
10384 void MacroAssembler::encode_klass_not_null(Register dst, Register src) {
10385 assert(Metaspace::is_initialized(), "metaspace should be initialized");
10386 #ifdef ASSERT
10387 verify_heapbase("MacroAssembler::encode_klass_not_null2: heap base corrupted?");
10388 #endif
10389 if (dst != src) {
10390 movq(dst, src);
10391 }
10392 if (Universe::narrow_klass_base() != NULL) {
10393 subq(dst, r12_heapbase);
10394 }
10395 if (Universe::narrow_klass_shift() != 0) {
10396 assert (LogKlassAlignmentInBytes == Universe::narrow_klass_shift(), "decode alg wrong");
10397 shrq(dst, LogKlassAlignmentInBytes);
10398 }
10399 }
10400
10401 void MacroAssembler::decode_klass_not_null(Register r) {
10402 assert(Metaspace::is_initialized(), "metaspace should be initialized");
10403 // Note: it will change flags
10404 assert (UseCompressedKlassPointers, "should only be used for compressed headers");
10405 // Cannot assert, unverified entry point counts instructions (see .ad file)
10406 // vtableStubs also counts instructions in pd_code_size_limit.
10407 // Also do not verify_oop as this is called by verify_oop.
10408 if (Universe::narrow_klass_shift() != 0) {
10409 assert(LogKlassAlignmentInBytes == Universe::narrow_klass_shift(), "decode alg wrong");
10410 shlq(r, LogKlassAlignmentInBytes);
10411 if (Universe::narrow_klass_base() != NULL) {
10412 addq(r, r12_heapbase);
10413 }
10414 } else {
10415 assert (Universe::narrow_klass_base() == NULL, "sanity");
10416 }
10417 }
10418
10419 void MacroAssembler::decode_klass_not_null(Register dst, Register src) {
10420 assert(Metaspace::is_initialized(), "metaspace should be initialized");
10421 // Note: it will change flags
10422 assert (UseCompressedKlassPointers, "should only be used for compressed headers");
10423 // Cannot assert, unverified entry point counts instructions (see .ad file)
10424 // vtableStubs also counts instructions in pd_code_size_limit.
10425 // Also do not verify_oop as this is called by verify_oop.
10426 if (Universe::narrow_klass_shift() != 0) {
10427 assert(LogKlassAlignmentInBytes == Universe::narrow_klass_shift(), "decode alg wrong");
10428 assert(LogKlassAlignmentInBytes == Address::times_8, "klass not aligned on 64bits?");
10429 leaq(dst, Address(r12_heapbase, src, Address::times_8, 0));
10430 } else {
10431 assert (Universe::narrow_klass_base() == NULL, "sanity");
10432 if (dst != src) {
10433 movq(dst, src);
10434 }
10435 }
10436 }
10437
10438 void MacroAssembler::set_narrow_oop(Register dst, jobject obj) {
10439 assert (UseCompressedOops, "should only be used for compressed headers");
10440 assert (Universe::heap() != NULL, "java heap should be initialized");
10441 assert (oop_recorder() != NULL, "this assembler needs an OopRecorder");
10442 int oop_index = oop_recorder()->find_index(obj);
10443 RelocationHolder rspec = oop_Relocation::spec(oop_index);
10444 mov_narrow_oop(dst, oop_index, rspec);
10445 }
10446
10447 void MacroAssembler::set_narrow_oop(Address dst, jobject obj) {
10448 assert (UseCompressedOops, "should only be used for compressed headers");
10449 assert (Universe::heap() != NULL, "java heap should be initialized");
10450 assert (oop_recorder() != NULL, "this assembler needs an OopRecorder");
10451 int oop_index = oop_recorder()->find_index(obj);
10452 RelocationHolder rspec = oop_Relocation::spec(oop_index);
10453 mov_narrow_oop(dst, oop_index, rspec);
10454 }
10455
10456 void MacroAssembler::set_narrow_klass(Register dst, Klass* k) {
10457 assert (UseCompressedKlassPointers, "should only be used for compressed headers");
10458 assert (oop_recorder() != NULL, "this assembler needs an OopRecorder");
10459 int klass_index = oop_recorder()->find_index(k);
10460 RelocationHolder rspec = metadata_Relocation::spec(klass_index);
10461 mov_narrow_oop(dst, oopDesc::encode_klass(k), rspec);
10462 }
10463
10464 void MacroAssembler::set_narrow_klass(Address dst, Klass* k) {
10465 assert (UseCompressedKlassPointers, "should only be used for compressed headers");
10466 assert (oop_recorder() != NULL, "this assembler needs an OopRecorder");
10467 int klass_index = oop_recorder()->find_index(k);
10468 RelocationHolder rspec = metadata_Relocation::spec(klass_index);
10469 mov_narrow_oop(dst, oopDesc::encode_klass(k), rspec);
10470 }
10471
10472 void MacroAssembler::cmp_narrow_oop(Register dst, jobject obj) {
10473 assert (UseCompressedOops, "should only be used for compressed headers");
10474 assert (Universe::heap() != NULL, "java heap should be initialized");
10475 assert (oop_recorder() != NULL, "this assembler needs an OopRecorder");
10476 int oop_index = oop_recorder()->find_index(obj);
10477 RelocationHolder rspec = oop_Relocation::spec(oop_index);
10478 Assembler::cmp_narrow_oop(dst, oop_index, rspec);
10479 }
10480
10481 void MacroAssembler::cmp_narrow_oop(Address dst, jobject obj) {
10482 assert (UseCompressedOops, "should only be used for compressed headers");
10483 assert (Universe::heap() != NULL, "java heap should be initialized");
10484 assert (oop_recorder() != NULL, "this assembler needs an OopRecorder");
10485 int oop_index = oop_recorder()->find_index(obj);
10486 RelocationHolder rspec = oop_Relocation::spec(oop_index);
10487 Assembler::cmp_narrow_oop(dst, oop_index, rspec);
10488 }
10489
10490 void MacroAssembler::cmp_narrow_klass(Register dst, Klass* k) {
10491 assert (UseCompressedKlassPointers, "should only be used for compressed headers");
10492 assert (oop_recorder() != NULL, "this assembler needs an OopRecorder");
10493 int klass_index = oop_recorder()->find_index(k);
10494 RelocationHolder rspec = metadata_Relocation::spec(klass_index);
10495 Assembler::cmp_narrow_oop(dst, oopDesc::encode_klass(k), rspec);
10496 }
10497
10498 void MacroAssembler::cmp_narrow_klass(Address dst, Klass* k) {
10499 assert (UseCompressedKlassPointers, "should only be used for compressed headers");
10500 assert (oop_recorder() != NULL, "this assembler needs an OopRecorder");
10501 int klass_index = oop_recorder()->find_index(k);
10502 RelocationHolder rspec = metadata_Relocation::spec(klass_index);
10503 Assembler::cmp_narrow_oop(dst, oopDesc::encode_klass(k), rspec);
10504 }
10505
10506 void MacroAssembler::reinit_heapbase() {
10507 if (UseCompressedOops || UseCompressedKlassPointers) {
10508 movptr(r12_heapbase, ExternalAddress((address)Universe::narrow_ptrs_base_addr()));
10509 }
10510 }
10511 #endif // _LP64
10512
10513
10514 // C2 compiled method's prolog code.
10515 void MacroAssembler::verified_entry(int framesize, bool stack_bang, bool fp_mode_24b) {
10516
10517 // WARNING: Initial instruction MUST be 5 bytes or longer so that
10518 // NativeJump::patch_verified_entry will be able to patch out the entry
10519 // code safely. The push to verify stack depth is ok at 5 bytes,
10520 // the frame allocation can be either 3 or 6 bytes. So if we don't do
10521 // stack bang then we must use the 6 byte frame allocation even if
10522 // we have no frame. :-(
10523
10524 assert((framesize & (StackAlignmentInBytes-1)) == 0, "frame size not aligned");
10525 // Remove word for return addr
10526 framesize -= wordSize;
10527
10528 // Calls to C2R adapters often do not accept exceptional returns.
10529 // We require that their callers must bang for them. But be careful, because
10530 // some VM calls (such as call site linkage) can use several kilobytes of
10531 // stack. But the stack safety zone should account for that.
10532 // See bugs 4446381, 4468289, 4497237.
10533 if (stack_bang) {
10534 generate_stack_overflow_check(framesize);
10535
10536 // We always push rbp, so that on return to interpreter rbp, will be
10537 // restored correctly and we can correct the stack.
10538 push(rbp);
10539 // Remove word for ebp
10540 framesize -= wordSize;
10541
10542 // Create frame
10543 if (framesize) {
10544 subptr(rsp, framesize);
10545 }
10546 } else {
10547 // Create frame (force generation of a 4 byte immediate value)
10548 subptr_imm32(rsp, framesize);
10549
10550 // Save RBP register now.
10551 framesize -= wordSize;
10552 movptr(Address(rsp, framesize), rbp);
10553 }
10554
10555 if (VerifyStackAtCalls) { // Majik cookie to verify stack depth
10556 framesize -= wordSize;
10557 movptr(Address(rsp, framesize), (int32_t)0xbadb100d);
10558 }
10559
10560 #ifndef _LP64
10561 // If method sets FPU control word do it now
10562 if (fp_mode_24b) {
10563 fldcw(ExternalAddress(StubRoutines::addr_fpu_cntrl_wrd_24()));
10564 }
10565 if (UseSSE >= 2 && VerifyFPU) {
10566 verify_FPU(0, "FPU stack must be clean on entry");
10567 }
10568 #endif
10569
10570 #ifdef ASSERT
10571 if (VerifyStackAtCalls) {
10572 Label L;
10573 push(rax);
10574 mov(rax, rsp);
10575 andptr(rax, StackAlignmentInBytes-1);
10576 cmpptr(rax, StackAlignmentInBytes-wordSize);
10577 pop(rax);
10578 jcc(Assembler::equal, L);
10579 STOP("Stack is not properly aligned!");
10580 bind(L);
10581 }
10582 #endif
10583
10584 }
10585
10586
10587 // IndexOf for constant substrings with size >= 8 chars
10588 // which don't need to be loaded through stack.
10589 void MacroAssembler::string_indexofC8(Register str1, Register str2,
10590 Register cnt1, Register cnt2,
10591 int int_cnt2, Register result,
10592 XMMRegister vec, Register tmp) {
10593 ShortBranchVerifier sbv(this);
10594 assert(UseSSE42Intrinsics, "SSE4.2 is required");
10595
10596 // This method uses pcmpestri inxtruction with bound registers
10597 // inputs:
10598 // xmm - substring
10599 // rax - substring length (elements count)
10600 // mem - scanned string
10601 // rdx - string length (elements count)
10602 // 0xd - mode: 1100 (substring search) + 01 (unsigned shorts)
10603 // outputs:
10604 // rcx - matched index in string
10605 assert(cnt1 == rdx && cnt2 == rax && tmp == rcx, "pcmpestri");
10606
10607 Label RELOAD_SUBSTR, SCAN_TO_SUBSTR, SCAN_SUBSTR,
10608 RET_FOUND, RET_NOT_FOUND, EXIT, FOUND_SUBSTR,
10609 MATCH_SUBSTR_HEAD, RELOAD_STR, FOUND_CANDIDATE;
10610
10611 // Note, inline_string_indexOf() generates checks:
10612 // if (substr.count > string.count) return -1;
10613 // if (substr.count == 0) return 0;
10614 assert(int_cnt2 >= 8, "this code isused only for cnt2 >= 8 chars");
10615
10616 // Load substring.
10617 movdqu(vec, Address(str2, 0));
10618 movl(cnt2, int_cnt2);
10619 movptr(result, str1); // string addr
10620
10621 if (int_cnt2 > 8) {
10622 jmpb(SCAN_TO_SUBSTR);
10623
10624 // Reload substr for rescan, this code
10625 // is executed only for large substrings (> 8 chars)
10626 bind(RELOAD_SUBSTR);
10627 movdqu(vec, Address(str2, 0));
10628 negptr(cnt2); // Jumped here with negative cnt2, convert to positive
10629
10630 bind(RELOAD_STR);
10631 // We came here after the beginning of the substring was
10632 // matched but the rest of it was not so we need to search
10633 // again. Start from the next element after the previous match.
10634
10635 // cnt2 is number of substring reminding elements and
10636 // cnt1 is number of string reminding elements when cmp failed.
10637 // Restored cnt1 = cnt1 - cnt2 + int_cnt2
10638 subl(cnt1, cnt2);
10639 addl(cnt1, int_cnt2);
10640 movl(cnt2, int_cnt2); // Now restore cnt2
10641
10642 decrementl(cnt1); // Shift to next element
10643 cmpl(cnt1, cnt2);
10644 jccb(Assembler::negative, RET_NOT_FOUND); // Left less then substring
10645
10646 addptr(result, 2);
10647
10648 } // (int_cnt2 > 8)
10649
10650 // Scan string for start of substr in 16-byte vectors
10651 bind(SCAN_TO_SUBSTR);
10652 pcmpestri(vec, Address(result, 0), 0x0d);
10653 jccb(Assembler::below, FOUND_CANDIDATE); // CF == 1
10654 subl(cnt1, 8);
10655 jccb(Assembler::lessEqual, RET_NOT_FOUND); // Scanned full string
10656 cmpl(cnt1, cnt2);
10657 jccb(Assembler::negative, RET_NOT_FOUND); // Left less then substring
10658 addptr(result, 16);
10659 jmpb(SCAN_TO_SUBSTR);
10660
10661 // Found a potential substr
10662 bind(FOUND_CANDIDATE);
10663 // Matched whole vector if first element matched (tmp(rcx) == 0).
10664 if (int_cnt2 == 8) {
10665 jccb(Assembler::overflow, RET_FOUND); // OF == 1
10666 } else { // int_cnt2 > 8
10667 jccb(Assembler::overflow, FOUND_SUBSTR);
10668 }
10669 // After pcmpestri tmp(rcx) contains matched element index
10670 // Compute start addr of substr
10671 lea(result, Address(result, tmp, Address::times_2));
10672
10673 // Make sure string is still long enough
10674 subl(cnt1, tmp);
10675 cmpl(cnt1, cnt2);
10676 if (int_cnt2 == 8) {
10677 jccb(Assembler::greaterEqual, SCAN_TO_SUBSTR);
10678 } else { // int_cnt2 > 8
10679 jccb(Assembler::greaterEqual, MATCH_SUBSTR_HEAD);
10680 }
10681 // Left less then substring.
10682
10683 bind(RET_NOT_FOUND);
10684 movl(result, -1);
10685 jmpb(EXIT);
10686
10687 if (int_cnt2 > 8) {
10688 // This code is optimized for the case when whole substring
10689 // is matched if its head is matched.
10690 bind(MATCH_SUBSTR_HEAD);
10691 pcmpestri(vec, Address(result, 0), 0x0d);
10692 // Reload only string if does not match
10693 jccb(Assembler::noOverflow, RELOAD_STR); // OF == 0
10694
10695 Label CONT_SCAN_SUBSTR;
10696 // Compare the rest of substring (> 8 chars).
10697 bind(FOUND_SUBSTR);
10698 // First 8 chars are already matched.
10699 negptr(cnt2);
10700 addptr(cnt2, 8);
10701
10702 bind(SCAN_SUBSTR);
10703 subl(cnt1, 8);
10704 cmpl(cnt2, -8); // Do not read beyond substring
10705 jccb(Assembler::lessEqual, CONT_SCAN_SUBSTR);
10706 // Back-up strings to avoid reading beyond substring:
10707 // cnt1 = cnt1 - cnt2 + 8
10708 addl(cnt1, cnt2); // cnt2 is negative
10709 addl(cnt1, 8);
10710 movl(cnt2, 8); negptr(cnt2);
10711 bind(CONT_SCAN_SUBSTR);
10712 if (int_cnt2 < (int)G) {
10713 movdqu(vec, Address(str2, cnt2, Address::times_2, int_cnt2*2));
10714 pcmpestri(vec, Address(result, cnt2, Address::times_2, int_cnt2*2), 0x0d);
10715 } else {
10716 // calculate index in register to avoid integer overflow (int_cnt2*2)
10717 movl(tmp, int_cnt2);
10718 addptr(tmp, cnt2);
10719 movdqu(vec, Address(str2, tmp, Address::times_2, 0));
10720 pcmpestri(vec, Address(result, tmp, Address::times_2, 0), 0x0d);
10721 }
10722 // Need to reload strings pointers if not matched whole vector
10723 jcc(Assembler::noOverflow, RELOAD_SUBSTR); // OF == 0
10724 addptr(cnt2, 8);
10725 jcc(Assembler::negative, SCAN_SUBSTR);
10726 // Fall through if found full substring
10727
10728 } // (int_cnt2 > 8)
10729
10730 bind(RET_FOUND);
10731 // Found result if we matched full small substring.
10732 // Compute substr offset
10733 subptr(result, str1);
10734 shrl(result, 1); // index
10735 bind(EXIT);
10736
10737 } // string_indexofC8
10738
10739 // Small strings are loaded through stack if they cross page boundary.
10740 void MacroAssembler::string_indexof(Register str1, Register str2,
10741 Register cnt1, Register cnt2,
10742 int int_cnt2, Register result,
10743 XMMRegister vec, Register tmp) {
10744 ShortBranchVerifier sbv(this);
10745 assert(UseSSE42Intrinsics, "SSE4.2 is required");
10746 //
10747 // int_cnt2 is length of small (< 8 chars) constant substring
10748 // or (-1) for non constant substring in which case its length
10749 // is in cnt2 register.
10750 //
10751 // Note, inline_string_indexOf() generates checks:
10752 // if (substr.count > string.count) return -1;
10753 // if (substr.count == 0) return 0;
10754 //
10755 assert(int_cnt2 == -1 || (0 < int_cnt2 && int_cnt2 < 8), "should be != 0");
10756
10757 // This method uses pcmpestri inxtruction with bound registers
10758 // inputs:
10759 // xmm - substring
10760 // rax - substring length (elements count)
10761 // mem - scanned string
10762 // rdx - string length (elements count)
10763 // 0xd - mode: 1100 (substring search) + 01 (unsigned shorts)
10764 // outputs:
10765 // rcx - matched index in string
10766 assert(cnt1 == rdx && cnt2 == rax && tmp == rcx, "pcmpestri");
10767
10768 Label RELOAD_SUBSTR, SCAN_TO_SUBSTR, SCAN_SUBSTR, ADJUST_STR,
10769 RET_FOUND, RET_NOT_FOUND, CLEANUP, FOUND_SUBSTR,
10770 FOUND_CANDIDATE;
10771
10772 { //========================================================
10773 // We don't know where these strings are located
10774 // and we can't read beyond them. Load them through stack.
10775 Label BIG_STRINGS, CHECK_STR, COPY_SUBSTR, COPY_STR;
10776
10777 movptr(tmp, rsp); // save old SP
10778
10779 if (int_cnt2 > 0) { // small (< 8 chars) constant substring
10780 if (int_cnt2 == 1) { // One char
10781 load_unsigned_short(result, Address(str2, 0));
10782 movdl(vec, result); // move 32 bits
10783 } else if (int_cnt2 == 2) { // Two chars
10784 movdl(vec, Address(str2, 0)); // move 32 bits
10785 } else if (int_cnt2 == 4) { // Four chars
10786 movq(vec, Address(str2, 0)); // move 64 bits
10787 } else { // cnt2 = { 3, 5, 6, 7 }
10788 // Array header size is 12 bytes in 32-bit VM
10789 // + 6 bytes for 3 chars == 18 bytes,
10790 // enough space to load vec and shift.
10791 assert(HeapWordSize*TypeArrayKlass::header_size() >= 12,"sanity");
10792 movdqu(vec, Address(str2, (int_cnt2*2)-16));
10793 psrldq(vec, 16-(int_cnt2*2));
10794 }
10795 } else { // not constant substring
10796 cmpl(cnt2, 8);
10797 jccb(Assembler::aboveEqual, BIG_STRINGS); // Both strings are big enough
10798
10799 // We can read beyond string if srt+16 does not cross page boundary
10800 // since heaps are aligned and mapped by pages.
10801 assert(os::vm_page_size() < (int)G, "default page should be small");
10802 movl(result, str2); // We need only low 32 bits
10803 andl(result, (os::vm_page_size()-1));
10804 cmpl(result, (os::vm_page_size()-16));
10805 jccb(Assembler::belowEqual, CHECK_STR);
10806
10807 // Move small strings to stack to allow load 16 bytes into vec.
10808 subptr(rsp, 16);
10809 int stk_offset = wordSize-2;
10810 push(cnt2);
10811
10812 bind(COPY_SUBSTR);
10813 load_unsigned_short(result, Address(str2, cnt2, Address::times_2, -2));
10814 movw(Address(rsp, cnt2, Address::times_2, stk_offset), result);
10815 decrement(cnt2);
10816 jccb(Assembler::notZero, COPY_SUBSTR);
10817
10818 pop(cnt2);
10819 movptr(str2, rsp); // New substring address
10820 } // non constant
10821
10822 bind(CHECK_STR);
10823 cmpl(cnt1, 8);
10824 jccb(Assembler::aboveEqual, BIG_STRINGS);
10825
10826 // Check cross page boundary.
10827 movl(result, str1); // We need only low 32 bits
10828 andl(result, (os::vm_page_size()-1));
10829 cmpl(result, (os::vm_page_size()-16));
10830 jccb(Assembler::belowEqual, BIG_STRINGS);
10831
10832 subptr(rsp, 16);
10833 int stk_offset = -2;
10834 if (int_cnt2 < 0) { // not constant
10835 push(cnt2);
10836 stk_offset += wordSize;
10837 }
10838 movl(cnt2, cnt1);
10839
10840 bind(COPY_STR);
10841 load_unsigned_short(result, Address(str1, cnt2, Address::times_2, -2));
10842 movw(Address(rsp, cnt2, Address::times_2, stk_offset), result);
10843 decrement(cnt2);
10844 jccb(Assembler::notZero, COPY_STR);
10845
10846 if (int_cnt2 < 0) { // not constant
10847 pop(cnt2);
10848 }
10849 movptr(str1, rsp); // New string address
10850
10851 bind(BIG_STRINGS);
10852 // Load substring.
10853 if (int_cnt2 < 0) { // -1
10854 movdqu(vec, Address(str2, 0));
10855 push(cnt2); // substr count
10856 push(str2); // substr addr
10857 push(str1); // string addr
10858 } else {
10859 // Small (< 8 chars) constant substrings are loaded already.
10860 movl(cnt2, int_cnt2);
10861 }
10862 push(tmp); // original SP
10863
10864 } // Finished loading
10865
10866 //========================================================
10867 // Start search
10868 //
10869
10870 movptr(result, str1); // string addr
10871
10872 if (int_cnt2 < 0) { // Only for non constant substring
10873 jmpb(SCAN_TO_SUBSTR);
10874
10875 // SP saved at sp+0
10876 // String saved at sp+1*wordSize
10877 // Substr saved at sp+2*wordSize
10878 // Substr count saved at sp+3*wordSize
10879
10880 // Reload substr for rescan, this code
10881 // is executed only for large substrings (> 8 chars)
10882 bind(RELOAD_SUBSTR);
10883 movptr(str2, Address(rsp, 2*wordSize));
10884 movl(cnt2, Address(rsp, 3*wordSize));
10885 movdqu(vec, Address(str2, 0));
10886 // We came here after the beginning of the substring was
10887 // matched but the rest of it was not so we need to search
10888 // again. Start from the next element after the previous match.
10889 subptr(str1, result); // Restore counter
10890 shrl(str1, 1);
10891 addl(cnt1, str1);
10892 decrementl(cnt1); // Shift to next element
10893 cmpl(cnt1, cnt2);
10894 jccb(Assembler::negative, RET_NOT_FOUND); // Left less then substring
10895
10896 addptr(result, 2);
10897 } // non constant
10898
10899 // Scan string for start of substr in 16-byte vectors
10900 bind(SCAN_TO_SUBSTR);
10901 assert(cnt1 == rdx && cnt2 == rax && tmp == rcx, "pcmpestri");
10902 pcmpestri(vec, Address(result, 0), 0x0d);
10903 jccb(Assembler::below, FOUND_CANDIDATE); // CF == 1
10904 subl(cnt1, 8);
10905 jccb(Assembler::lessEqual, RET_NOT_FOUND); // Scanned full string
10906 cmpl(cnt1, cnt2);
10907 jccb(Assembler::negative, RET_NOT_FOUND); // Left less then substring
10908 addptr(result, 16);
10909
10910 bind(ADJUST_STR);
10911 cmpl(cnt1, 8); // Do not read beyond string
10912 jccb(Assembler::greaterEqual, SCAN_TO_SUBSTR);
10913 // Back-up string to avoid reading beyond string.
10914 lea(result, Address(result, cnt1, Address::times_2, -16));
10915 movl(cnt1, 8);
10916 jmpb(SCAN_TO_SUBSTR);
10917
10918 // Found a potential substr
10919 bind(FOUND_CANDIDATE);
10920 // After pcmpestri tmp(rcx) contains matched element index
10921
10922 // Make sure string is still long enough
10923 subl(cnt1, tmp);
10924 cmpl(cnt1, cnt2);
10925 jccb(Assembler::greaterEqual, FOUND_SUBSTR);
10926 // Left less then substring.
10927
10928 bind(RET_NOT_FOUND);
10929 movl(result, -1);
10930 jmpb(CLEANUP);
10931
10932 bind(FOUND_SUBSTR);
10933 // Compute start addr of substr
10934 lea(result, Address(result, tmp, Address::times_2));
10935
10936 if (int_cnt2 > 0) { // Constant substring
10937 // Repeat search for small substring (< 8 chars)
10938 // from new point without reloading substring.
10939 // Have to check that we don't read beyond string.
10940 cmpl(tmp, 8-int_cnt2);
10941 jccb(Assembler::greater, ADJUST_STR);
10942 // Fall through if matched whole substring.
10943 } else { // non constant
10944 assert(int_cnt2 == -1, "should be != 0");
10945
10946 addl(tmp, cnt2);
10947 // Found result if we matched whole substring.
10948 cmpl(tmp, 8);
10949 jccb(Assembler::lessEqual, RET_FOUND);
10950
10951 // Repeat search for small substring (<= 8 chars)
10952 // from new point 'str1' without reloading substring.
10953 cmpl(cnt2, 8);
10954 // Have to check that we don't read beyond string.
10955 jccb(Assembler::lessEqual, ADJUST_STR);
10956
10957 Label CHECK_NEXT, CONT_SCAN_SUBSTR, RET_FOUND_LONG;
10958 // Compare the rest of substring (> 8 chars).
10959 movptr(str1, result);
10960
10961 cmpl(tmp, cnt2);
10962 // First 8 chars are already matched.
10963 jccb(Assembler::equal, CHECK_NEXT);
10964
10965 bind(SCAN_SUBSTR);
10966 pcmpestri(vec, Address(str1, 0), 0x0d);
10967 // Need to reload strings pointers if not matched whole vector
10968 jcc(Assembler::noOverflow, RELOAD_SUBSTR); // OF == 0
10969
10970 bind(CHECK_NEXT);
10971 subl(cnt2, 8);
10972 jccb(Assembler::lessEqual, RET_FOUND_LONG); // Found full substring
10973 addptr(str1, 16);
10974 addptr(str2, 16);
10975 subl(cnt1, 8);
10976 cmpl(cnt2, 8); // Do not read beyond substring
10977 jccb(Assembler::greaterEqual, CONT_SCAN_SUBSTR);
10978 // Back-up strings to avoid reading beyond substring.
10979 lea(str2, Address(str2, cnt2, Address::times_2, -16));
10980 lea(str1, Address(str1, cnt2, Address::times_2, -16));
10981 subl(cnt1, cnt2);
10982 movl(cnt2, 8);
10983 addl(cnt1, 8);
10984 bind(CONT_SCAN_SUBSTR);
10985 movdqu(vec, Address(str2, 0));
10986 jmpb(SCAN_SUBSTR);
10987
10988 bind(RET_FOUND_LONG);
10989 movptr(str1, Address(rsp, wordSize));
10990 } // non constant
10991
10992 bind(RET_FOUND);
10993 // Compute substr offset
10994 subptr(result, str1);
10995 shrl(result, 1); // index
10996
10997 bind(CLEANUP);
10998 pop(rsp); // restore SP
10999
11000 } // string_indexof
11001
11002 // Compare strings.
11003 void MacroAssembler::string_compare(Register str1, Register str2,
11004 Register cnt1, Register cnt2, Register result,
11005 XMMRegister vec1) {
11006 ShortBranchVerifier sbv(this);
11007 Label LENGTH_DIFF_LABEL, POP_LABEL, DONE_LABEL, WHILE_HEAD_LABEL;
11008
11009 // Compute the minimum of the string lengths and the
11010 // difference of the string lengths (stack).
11011 // Do the conditional move stuff
11012 movl(result, cnt1);
11013 subl(cnt1, cnt2);
11014 push(cnt1);
11015 cmov32(Assembler::lessEqual, cnt2, result);
11016
11017 // Is the minimum length zero?
11018 testl(cnt2, cnt2);
11019 jcc(Assembler::zero, LENGTH_DIFF_LABEL);
11020
11021 // Load first characters
11022 load_unsigned_short(result, Address(str1, 0));
11023 load_unsigned_short(cnt1, Address(str2, 0));
11024
11025 // Compare first characters
11026 subl(result, cnt1);
11027 jcc(Assembler::notZero, POP_LABEL);
11028 decrementl(cnt2);
11029 jcc(Assembler::zero, LENGTH_DIFF_LABEL);
11030
11031 {
11032 // Check after comparing first character to see if strings are equivalent
11033 Label LSkip2;
11034 // Check if the strings start at same location
11035 cmpptr(str1, str2);
11036 jccb(Assembler::notEqual, LSkip2);
11037
11038 // Check if the length difference is zero (from stack)
11039 cmpl(Address(rsp, 0), 0x0);
11040 jcc(Assembler::equal, LENGTH_DIFF_LABEL);
11041
11042 // Strings might not be equivalent
11043 bind(LSkip2);
11044 }
11045
11046 Address::ScaleFactor scale = Address::times_2;
11047 int stride = 8;
11048
11049 // Advance to next element
11050 addptr(str1, 16/stride);
11051 addptr(str2, 16/stride);
11052
11053 if (UseSSE42Intrinsics) {
11054 Label COMPARE_WIDE_VECTORS, VECTOR_NOT_EQUAL, COMPARE_TAIL;
11055 int pcmpmask = 0x19;
11056 // Setup to compare 16-byte vectors
11057 movl(result, cnt2);
11058 andl(cnt2, ~(stride - 1)); // cnt2 holds the vector count
11059 jccb(Assembler::zero, COMPARE_TAIL);
11060
11061 lea(str1, Address(str1, result, scale));
11062 lea(str2, Address(str2, result, scale));
11063 negptr(result);
11064
11065 // pcmpestri
11066 // inputs:
11067 // vec1- substring
11068 // rax - negative string length (elements count)
11069 // mem - scaned string
11070 // rdx - string length (elements count)
11071 // pcmpmask - cmp mode: 11000 (string compare with negated result)
11072 // + 00 (unsigned bytes) or + 01 (unsigned shorts)
11073 // outputs:
11074 // rcx - first mismatched element index
11075 assert(result == rax && cnt2 == rdx && cnt1 == rcx, "pcmpestri");
11076
11077 bind(COMPARE_WIDE_VECTORS);
11078 movdqu(vec1, Address(str1, result, scale));
11079 pcmpestri(vec1, Address(str2, result, scale), pcmpmask);
11080 // After pcmpestri cnt1(rcx) contains mismatched element index
11081
11082 jccb(Assembler::below, VECTOR_NOT_EQUAL); // CF==1
11083 addptr(result, stride);
11084 subptr(cnt2, stride);
11085 jccb(Assembler::notZero, COMPARE_WIDE_VECTORS);
11086
11087 // compare wide vectors tail
11088 testl(result, result);
11089 jccb(Assembler::zero, LENGTH_DIFF_LABEL);
11090
11091 movl(cnt2, stride);
11092 movl(result, stride);
11093 negptr(result);
11094 movdqu(vec1, Address(str1, result, scale));
11095 pcmpestri(vec1, Address(str2, result, scale), pcmpmask);
11096 jccb(Assembler::aboveEqual, LENGTH_DIFF_LABEL);
11097
11098 // Mismatched characters in the vectors
11099 bind(VECTOR_NOT_EQUAL);
11100 addptr(result, cnt1);
11101 movptr(cnt2, result);
11102 load_unsigned_short(result, Address(str1, cnt2, scale));
11103 load_unsigned_short(cnt1, Address(str2, cnt2, scale));
11104 subl(result, cnt1);
11105 jmpb(POP_LABEL);
11106
11107 bind(COMPARE_TAIL); // limit is zero
11108 movl(cnt2, result);
11109 // Fallthru to tail compare
11110 }
11111
11112 // Shift str2 and str1 to the end of the arrays, negate min
11113 lea(str1, Address(str1, cnt2, scale, 0));
11114 lea(str2, Address(str2, cnt2, scale, 0));
11115 negptr(cnt2);
11116
11117 // Compare the rest of the elements
11118 bind(WHILE_HEAD_LABEL);
11119 load_unsigned_short(result, Address(str1, cnt2, scale, 0));
11120 load_unsigned_short(cnt1, Address(str2, cnt2, scale, 0));
11121 subl(result, cnt1);
11122 jccb(Assembler::notZero, POP_LABEL);
11123 increment(cnt2);
11124 jccb(Assembler::notZero, WHILE_HEAD_LABEL);
11125
11126 // Strings are equal up to min length. Return the length difference.
11127 bind(LENGTH_DIFF_LABEL);
11128 pop(result);
11129 jmpb(DONE_LABEL);
11130
11131 // Discard the stored length difference
11132 bind(POP_LABEL);
11133 pop(cnt1);
11134
11135 // That's it
11136 bind(DONE_LABEL);
11137 }
11138
11139 // Compare char[] arrays aligned to 4 bytes or substrings.
11140 void MacroAssembler::char_arrays_equals(bool is_array_equ, Register ary1, Register ary2,
11141 Register limit, Register result, Register chr,
11142 XMMRegister vec1, XMMRegister vec2) {
11143 ShortBranchVerifier sbv(this);
11144 Label TRUE_LABEL, FALSE_LABEL, DONE, COMPARE_VECTORS, COMPARE_CHAR;
11145
11146 int length_offset = arrayOopDesc::length_offset_in_bytes();
11147 int base_offset = arrayOopDesc::base_offset_in_bytes(T_CHAR);
11148
11149 // Check the input args
11150 cmpptr(ary1, ary2);
11151 jcc(Assembler::equal, TRUE_LABEL);
11152
11153 if (is_array_equ) {
11154 // Need additional checks for arrays_equals.
11155 testptr(ary1, ary1);
11156 jcc(Assembler::zero, FALSE_LABEL);
11157 testptr(ary2, ary2);
11158 jcc(Assembler::zero, FALSE_LABEL);
11159
11160 // Check the lengths
11161 movl(limit, Address(ary1, length_offset));
11162 cmpl(limit, Address(ary2, length_offset));
11163 jcc(Assembler::notEqual, FALSE_LABEL);
11164 }
11165
11166 // count == 0
11167 testl(limit, limit);
11168 jcc(Assembler::zero, TRUE_LABEL);
11169
11170 if (is_array_equ) {
11171 // Load array address
11172 lea(ary1, Address(ary1, base_offset));
11173 lea(ary2, Address(ary2, base_offset));
11174 }
11175
11176 shll(limit, 1); // byte count != 0
11177 movl(result, limit); // copy
11178
11179 if (UseSSE42Intrinsics) {
11180 // With SSE4.2, use double quad vector compare
11181 Label COMPARE_WIDE_VECTORS, COMPARE_TAIL;
11182
11183 // Compare 16-byte vectors
11184 andl(result, 0x0000000e); // tail count (in bytes)
11185 andl(limit, 0xfffffff0); // vector count (in bytes)
11186 jccb(Assembler::zero, COMPARE_TAIL);
11187
11188 lea(ary1, Address(ary1, limit, Address::times_1));
11189 lea(ary2, Address(ary2, limit, Address::times_1));
11190 negptr(limit);
11191
11192 bind(COMPARE_WIDE_VECTORS);
11193 movdqu(vec1, Address(ary1, limit, Address::times_1));
11194 movdqu(vec2, Address(ary2, limit, Address::times_1));
11195 pxor(vec1, vec2);
11196
11197 ptest(vec1, vec1);
11198 jccb(Assembler::notZero, FALSE_LABEL);
11199 addptr(limit, 16);
11200 jcc(Assembler::notZero, COMPARE_WIDE_VECTORS);
11201
11202 testl(result, result);
11203 jccb(Assembler::zero, TRUE_LABEL);
11204
11205 movdqu(vec1, Address(ary1, result, Address::times_1, -16));
11206 movdqu(vec2, Address(ary2, result, Address::times_1, -16));
11207 pxor(vec1, vec2);
11208
11209 ptest(vec1, vec1);
11210 jccb(Assembler::notZero, FALSE_LABEL);
11211 jmpb(TRUE_LABEL);
11212
11213 bind(COMPARE_TAIL); // limit is zero
11214 movl(limit, result);
11215 // Fallthru to tail compare
11216 }
11217
11218 // Compare 4-byte vectors
11219 andl(limit, 0xfffffffc); // vector count (in bytes)
11220 jccb(Assembler::zero, COMPARE_CHAR);
11221
11222 lea(ary1, Address(ary1, limit, Address::times_1));
11223 lea(ary2, Address(ary2, limit, Address::times_1));
11224 negptr(limit);
11225
11226 bind(COMPARE_VECTORS);
11227 movl(chr, Address(ary1, limit, Address::times_1));
11228 cmpl(chr, Address(ary2, limit, Address::times_1));
11229 jccb(Assembler::notEqual, FALSE_LABEL);
11230 addptr(limit, 4);
11231 jcc(Assembler::notZero, COMPARE_VECTORS);
11232
11233 // Compare trailing char (final 2 bytes), if any
11234 bind(COMPARE_CHAR);
11235 testl(result, 0x2); // tail char
11236 jccb(Assembler::zero, TRUE_LABEL);
11237 load_unsigned_short(chr, Address(ary1, 0));
11238 load_unsigned_short(limit, Address(ary2, 0));
11239 cmpl(chr, limit);
11240 jccb(Assembler::notEqual, FALSE_LABEL);
11241
11242 bind(TRUE_LABEL);
11243 movl(result, 1); // return true
11244 jmpb(DONE);
11245
11246 bind(FALSE_LABEL);
11247 xorl(result, result); // return false
11248
11249 // That's it
11250 bind(DONE);
11251 }
11252
11253 void MacroAssembler::generate_fill(BasicType t, bool aligned,
11254 Register to, Register value, Register count,
11255 Register rtmp, XMMRegister xtmp) {
11256 ShortBranchVerifier sbv(this);
11257 assert_different_registers(to, value, count, rtmp);
11258 Label L_exit, L_skip_align1, L_skip_align2, L_fill_byte;
11259 Label L_fill_2_bytes, L_fill_4_bytes;
11260
11261 int shift = -1;
11262 switch (t) {
11263 case T_BYTE:
11264 shift = 2;
11265 break;
11266 case T_SHORT:
11267 shift = 1;
11268 break;
11269 case T_INT:
11270 shift = 0;
11271 break;
11272 default: ShouldNotReachHere();
11273 }
11274
11275 if (t == T_BYTE) {
11276 andl(value, 0xff);
11277 movl(rtmp, value);
11278 shll(rtmp, 8);
11279 orl(value, rtmp);
11280 }
11281 if (t == T_SHORT) {
11282 andl(value, 0xffff);
11283 }
11284 if (t == T_BYTE || t == T_SHORT) {
11285 movl(rtmp, value);
11286 shll(rtmp, 16);
11287 orl(value, rtmp);
11288 }
11289
11290 cmpl(count, 2<<shift); // Short arrays (< 8 bytes) fill by element
11291 jcc(Assembler::below, L_fill_4_bytes); // use unsigned cmp
11292 if (!UseUnalignedLoadStores && !aligned && (t == T_BYTE || t == T_SHORT)) {
11293 // align source address at 4 bytes address boundary
11294 if (t == T_BYTE) {
11295 // One byte misalignment happens only for byte arrays
11296 testptr(to, 1);
11297 jccb(Assembler::zero, L_skip_align1);
11298 movb(Address(to, 0), value);
11299 increment(to);
11300 decrement(count);
11301 BIND(L_skip_align1);
11302 }
11303 // Two bytes misalignment happens only for byte and short (char) arrays
11304 testptr(to, 2);
11305 jccb(Assembler::zero, L_skip_align2);
11306 movw(Address(to, 0), value);
11307 addptr(to, 2);
11308 subl(count, 1<<(shift-1));
11309 BIND(L_skip_align2);
11310 }
11311 if (UseSSE < 2) {
11312 Label L_fill_32_bytes_loop, L_check_fill_8_bytes, L_fill_8_bytes_loop, L_fill_8_bytes;
11313 // Fill 32-byte chunks
11314 subl(count, 8 << shift);
11315 jcc(Assembler::less, L_check_fill_8_bytes);
11316 align(16);
11317
11318 BIND(L_fill_32_bytes_loop);
11319
11320 for (int i = 0; i < 32; i += 4) {
11321 movl(Address(to, i), value);
11322 }
11323
11324 addptr(to, 32);
11325 subl(count, 8 << shift);
11326 jcc(Assembler::greaterEqual, L_fill_32_bytes_loop);
11327 BIND(L_check_fill_8_bytes);
11328 addl(count, 8 << shift);
11329 jccb(Assembler::zero, L_exit);
11330 jmpb(L_fill_8_bytes);
11331
11332 //
11333 // length is too short, just fill qwords
11334 //
11335 BIND(L_fill_8_bytes_loop);
11336 movl(Address(to, 0), value);
11337 movl(Address(to, 4), value);
11338 addptr(to, 8);
11339 BIND(L_fill_8_bytes);
11340 subl(count, 1 << (shift + 1));
11341 jcc(Assembler::greaterEqual, L_fill_8_bytes_loop);
11342 // fall through to fill 4 bytes
11343 } else {
11344 Label L_fill_32_bytes;
11345 if (!UseUnalignedLoadStores) {
11346 // align to 8 bytes, we know we are 4 byte aligned to start
11347 testptr(to, 4);
11348 jccb(Assembler::zero, L_fill_32_bytes);
11349 movl(Address(to, 0), value);
11350 addptr(to, 4);
11351 subl(count, 1<<shift);
11352 }
11353 BIND(L_fill_32_bytes);
11354 {
11355 assert( UseSSE >= 2, "supported cpu only" );
11356 Label L_fill_32_bytes_loop, L_check_fill_8_bytes, L_fill_8_bytes_loop, L_fill_8_bytes;
11357 // Fill 32-byte chunks
11358 movdl(xtmp, value);
11359 pshufd(xtmp, xtmp, 0);
11360
11361 subl(count, 8 << shift);
11362 jcc(Assembler::less, L_check_fill_8_bytes);
11363 align(16);
11364
11365 BIND(L_fill_32_bytes_loop);
11366
11367 if (UseUnalignedLoadStores) {
11368 movdqu(Address(to, 0), xtmp);
11369 movdqu(Address(to, 16), xtmp);
11370 } else {
11371 movq(Address(to, 0), xtmp);
11372 movq(Address(to, 8), xtmp);
11373 movq(Address(to, 16), xtmp);
11374 movq(Address(to, 24), xtmp);
11375 }
11376
11377 addptr(to, 32);
11378 subl(count, 8 << shift);
11379 jcc(Assembler::greaterEqual, L_fill_32_bytes_loop);
11380 BIND(L_check_fill_8_bytes);
11381 addl(count, 8 << shift);
11382 jccb(Assembler::zero, L_exit);
11383 jmpb(L_fill_8_bytes);
11384
11385 //
11386 // length is too short, just fill qwords
11387 //
11388 BIND(L_fill_8_bytes_loop);
11389 movq(Address(to, 0), xtmp);
11390 addptr(to, 8);
11391 BIND(L_fill_8_bytes);
11392 subl(count, 1 << (shift + 1));
11393 jcc(Assembler::greaterEqual, L_fill_8_bytes_loop);
11394 }
11395 }
11396 // fill trailing 4 bytes
11397 BIND(L_fill_4_bytes);
11398 testl(count, 1<<shift);
11399 jccb(Assembler::zero, L_fill_2_bytes);
11400 movl(Address(to, 0), value);
11401 if (t == T_BYTE || t == T_SHORT) {
11402 addptr(to, 4);
11403 BIND(L_fill_2_bytes);
11404 // fill trailing 2 bytes
11405 testl(count, 1<<(shift-1));
11406 jccb(Assembler::zero, L_fill_byte);
11407 movw(Address(to, 0), value);
11408 if (t == T_BYTE) {
11409 addptr(to, 2);
11410 BIND(L_fill_byte);
11411 // fill trailing byte
11412 testl(count, 1);
11413 jccb(Assembler::zero, L_exit);
11414 movb(Address(to, 0), value);
11415 } else {
11416 BIND(L_fill_byte);
11417 }
11418 } else {
11419 BIND(L_fill_2_bytes);
11420 }
11421 BIND(L_exit);
11422 }
11423 #undef BIND
11424 #undef BLOCK_COMMENT
11425
11426
11427 Assembler::Condition MacroAssembler::negate_condition(Assembler::Condition cond) {
11428 switch (cond) {
11429 // Note some conditions are synonyms for others
11430 case Assembler::zero: return Assembler::notZero;
11431 case Assembler::notZero: return Assembler::zero;
11432 case Assembler::less: return Assembler::greaterEqual;
11433 case Assembler::lessEqual: return Assembler::greater;
11434 case Assembler::greater: return Assembler::lessEqual;
11435 case Assembler::greaterEqual: return Assembler::less;
11436 case Assembler::below: return Assembler::aboveEqual;
11437 case Assembler::belowEqual: return Assembler::above;
11438 case Assembler::above: return Assembler::belowEqual;
11439 case Assembler::aboveEqual: return Assembler::below;
11440 case Assembler::overflow: return Assembler::noOverflow;
11441 case Assembler::noOverflow: return Assembler::overflow;
11442 case Assembler::negative: return Assembler::positive;
11443 case Assembler::positive: return Assembler::negative;
11444 case Assembler::parity: return Assembler::noParity;
11445 case Assembler::noParity: return Assembler::parity;
11446 }
11447 ShouldNotReachHere(); return Assembler::overflow;
11448 }
11449
11450 SkipIfEqual::SkipIfEqual(
11451 MacroAssembler* masm, const bool* flag_addr, bool value) {
11452 _masm = masm;
11453 _masm->cmp8(ExternalAddress((address)flag_addr), value);
11454 _masm->jcc(Assembler::equal, _label);
11455 }
11456
11457 SkipIfEqual::~SkipIfEqual() {
11458 _masm->bind(_label);
11459 }