comparison src/cpu/x86/vm/x86_64.ad @ 12355:cefad50507d8

Merge with hs25-b53
author Gilles Duboscq <duboscq@ssw.jku.at>
date Fri, 11 Oct 2013 10:38:03 +0200
parents 268e7a2178d7
children 59e8ad757e19
comparison
equal deleted inserted replaced
12058:ccb4f2af2319 12355:cefad50507d8
527 void emit_d32_reloc(CodeBuffer& cbuf, int d32, RelocationHolder const& rspec, int format) { 527 void emit_d32_reloc(CodeBuffer& cbuf, int d32, RelocationHolder const& rspec, int format) {
528 #ifdef ASSERT 528 #ifdef ASSERT
529 if (rspec.reloc()->type() == relocInfo::oop_type && 529 if (rspec.reloc()->type() == relocInfo::oop_type &&
530 d32 != 0 && d32 != (intptr_t) Universe::non_oop_word()) { 530 d32 != 0 && d32 != (intptr_t) Universe::non_oop_word()) {
531 assert(Universe::heap()->is_in_reserved((address)(intptr_t)d32), "should be real oop"); 531 assert(Universe::heap()->is_in_reserved((address)(intptr_t)d32), "should be real oop");
532 assert(oop((intptr_t)d32)->is_oop() && (ScavengeRootsInCode || !oop((intptr_t)d32)->is_scavengable()), "cannot embed scavengable oops in code"); 532 assert(cast_to_oop((intptr_t)d32)->is_oop() && (ScavengeRootsInCode || !cast_to_oop((intptr_t)d32)->is_scavengable()), "cannot embed scavengable oops in code");
533 } 533 }
534 #endif 534 #endif
535 cbuf.relocate(cbuf.insts_mark(), rspec, format); 535 cbuf.relocate(cbuf.insts_mark(), rspec, format);
536 cbuf.insts()->emit_int32(d32); 536 cbuf.insts()->emit_int32(d32);
537 } 537 }
554 void emit_d64_reloc(CodeBuffer& cbuf, int64_t d64, RelocationHolder const& rspec, int format) { 554 void emit_d64_reloc(CodeBuffer& cbuf, int64_t d64, RelocationHolder const& rspec, int format) {
555 #ifdef ASSERT 555 #ifdef ASSERT
556 if (rspec.reloc()->type() == relocInfo::oop_type && 556 if (rspec.reloc()->type() == relocInfo::oop_type &&
557 d64 != 0 && d64 != (int64_t) Universe::non_oop_word()) { 557 d64 != 0 && d64 != (int64_t) Universe::non_oop_word()) {
558 assert(Universe::heap()->is_in_reserved((address)d64), "should be real oop"); 558 assert(Universe::heap()->is_in_reserved((address)d64), "should be real oop");
559 assert(oop(d64)->is_oop() && (ScavengeRootsInCode || !oop(d64)->is_scavengable()), 559 assert(cast_to_oop(d64)->is_oop() && (ScavengeRootsInCode || !cast_to_oop(d64)->is_scavengable()),
560 "cannot embed scavengable oops in code"); 560 "cannot embed scavengable oops in code");
561 } 561 }
562 #endif 562 #endif
563 cbuf.relocate(cbuf.insts_mark(), rspec, format); 563 cbuf.relocate(cbuf.insts_mark(), rspec, format);
564 cbuf.insts()->emit_int64(d64); 564 cbuf.insts()->emit_int64(d64);
1389 1389
1390 //============================================================================= 1390 //=============================================================================
1391 #ifndef PRODUCT 1391 #ifndef PRODUCT
1392 void MachUEPNode::format(PhaseRegAlloc* ra_, outputStream* st) const 1392 void MachUEPNode::format(PhaseRegAlloc* ra_, outputStream* st) const
1393 { 1393 {
1394 if (UseCompressedKlassPointers) { 1394 if (UseCompressedClassPointers) {
1395 st->print_cr("movl rscratch1, [j_rarg0 + oopDesc::klass_offset_in_bytes()]\t# compressed klass"); 1395 st->print_cr("movl rscratch1, [j_rarg0 + oopDesc::klass_offset_in_bytes()]\t# compressed klass");
1396 st->print_cr("\tdecode_klass_not_null rscratch1, rscratch1"); 1396 st->print_cr("\tdecode_klass_not_null rscratch1, rscratch1");
1397 st->print_cr("\tcmpq rax, rscratch1\t # Inline cache check"); 1397 st->print_cr("\tcmpq rax, rscratch1\t # Inline cache check");
1398 } else { 1398 } else {
1399 st->print_cr("\tcmpq rax, [j_rarg0 + oopDesc::klass_offset_in_bytes()]\t" 1399 st->print_cr("\tcmpq rax, [j_rarg0 + oopDesc::klass_offset_in_bytes()]\t"
1406 1406
1407 void MachUEPNode::emit(CodeBuffer& cbuf, PhaseRegAlloc* ra_) const 1407 void MachUEPNode::emit(CodeBuffer& cbuf, PhaseRegAlloc* ra_) const
1408 { 1408 {
1409 MacroAssembler masm(&cbuf); 1409 MacroAssembler masm(&cbuf);
1410 uint insts_size = cbuf.insts_size(); 1410 uint insts_size = cbuf.insts_size();
1411 if (UseCompressedKlassPointers) { 1411 if (UseCompressedClassPointers) {
1412 masm.load_klass(rscratch1, j_rarg0); 1412 masm.load_klass(rscratch1, j_rarg0);
1413 masm.cmpptr(rax, rscratch1); 1413 masm.cmpptr(rax, rscratch1);
1414 } else { 1414 } else {
1415 masm.cmpptr(rax, Address(j_rarg0, oopDesc::klass_offset_in_bytes())); 1415 masm.cmpptr(rax, Address(j_rarg0, oopDesc::klass_offset_in_bytes()));
1416 } 1416 }
1555 assert(UseCompressedOops, "only for compressed oops code"); 1555 assert(UseCompressedOops, "only for compressed oops code");
1556 return (LogMinObjAlignmentInBytes <= 3); 1556 return (LogMinObjAlignmentInBytes <= 3);
1557 } 1557 }
1558 1558
1559 bool Matcher::narrow_klass_use_complex_address() { 1559 bool Matcher::narrow_klass_use_complex_address() {
1560 assert(UseCompressedKlassPointers, "only for compressed klass code"); 1560 assert(UseCompressedClassPointers, "only for compressed klass code");
1561 return (LogKlassAlignmentInBytes <= 3); 1561 return (LogKlassAlignmentInBytes <= 3);
1562 } 1562 }
1563 1563
1564 // Is it better to copy float constants, or load them directly from 1564 // Is it better to copy float constants, or load them directly from
1565 // memory? Intel can load a float constant from a direct address, 1565 // memory? Intel can load a float constant from a direct address,
1645 return LONG_RDX_REG_mask(); 1645 return LONG_RDX_REG_mask();
1646 } 1646 }
1647 1647
1648 const RegMask Matcher::method_handle_invoke_SP_save_mask() { 1648 const RegMask Matcher::method_handle_invoke_SP_save_mask() {
1649 return PTR_RBP_REG_mask(); 1649 return PTR_RBP_REG_mask();
1650 }
1651
1652 const RegMask Matcher::mathExactI_result_proj_mask() {
1653 return INT_RAX_REG_mask();
1654 }
1655
1656 const RegMask Matcher::mathExactI_flags_proj_mask() {
1657 return INT_FLAGS_mask();
1650 } 1658 }
1651 1659
1652 %} 1660 %}
1653 1661
1654 //----------ENCODING BLOCK----------------------------------------------------- 1662 //----------ENCODING BLOCK-----------------------------------------------------
4131 not_equal(0x5, "ne"); 4139 not_equal(0x5, "ne");
4132 less(0xC, "l"); 4140 less(0xC, "l");
4133 greater_equal(0xD, "ge"); 4141 greater_equal(0xD, "ge");
4134 less_equal(0xE, "le"); 4142 less_equal(0xE, "le");
4135 greater(0xF, "g"); 4143 greater(0xF, "g");
4144 overflow(0x0, "o");
4145 no_overflow(0x1, "no");
4136 %} 4146 %}
4137 %} 4147 %}
4138 4148
4139 // Comparison Code, unsigned compare. Used by FP also, with 4149 // Comparison Code, unsigned compare. Used by FP also, with
4140 // C2 (unordered) turned into GT or LT already. The other bits 4150 // C2 (unordered) turned into GT or LT already. The other bits
4149 not_equal(0x5, "ne"); 4159 not_equal(0x5, "ne");
4150 less(0x2, "b"); 4160 less(0x2, "b");
4151 greater_equal(0x3, "nb"); 4161 greater_equal(0x3, "nb");
4152 less_equal(0x6, "be"); 4162 less_equal(0x6, "be");
4153 greater(0x7, "nbe"); 4163 greater(0x7, "nbe");
4164 overflow(0x0, "o");
4165 no_overflow(0x1, "no");
4154 %} 4166 %}
4155 %} 4167 %}
4156 4168
4157 4169
4158 // Floating comparisons that don't require any fixup for the unordered case 4170 // Floating comparisons that don't require any fixup for the unordered case
4168 not_equal(0x5, "ne"); 4180 not_equal(0x5, "ne");
4169 less(0x2, "b"); 4181 less(0x2, "b");
4170 greater_equal(0x3, "nb"); 4182 greater_equal(0x3, "nb");
4171 less_equal(0x6, "be"); 4183 less_equal(0x6, "be");
4172 greater(0x7, "nbe"); 4184 greater(0x7, "nbe");
4185 overflow(0x0, "o");
4186 no_overflow(0x1, "no");
4173 %} 4187 %}
4174 %} 4188 %}
4175 4189
4176 4190
4177 // Floating comparisons that can be fixed up with extra conditional jumps 4191 // Floating comparisons that can be fixed up with extra conditional jumps
4185 not_equal(0x5, "ne"); 4199 not_equal(0x5, "ne");
4186 less(0x2, "b"); 4200 less(0x2, "b");
4187 greater_equal(0x3, "nb"); 4201 greater_equal(0x3, "nb");
4188 less_equal(0x6, "be"); 4202 less_equal(0x6, "be");
4189 greater(0x7, "nbe"); 4203 greater(0x7, "nbe");
4204 overflow(0x0, "o");
4205 no_overflow(0x1, "no");
4190 %} 4206 %}
4191 %} 4207 %}
4192 4208
4193 4209
4194 //----------OPERAND CLASSES---------------------------------------------------- 4210 //----------OPERAND CLASSES----------------------------------------------------
6919 %} 6935 %}
6920 %} 6936 %}
6921 6937
6922 //----------Arithmetic Instructions-------------------------------------------- 6938 //----------Arithmetic Instructions--------------------------------------------
6923 //----------Addition Instructions---------------------------------------------- 6939 //----------Addition Instructions----------------------------------------------
6940
6941 instruct addExactI_rReg(rax_RegI dst, rRegI src, rFlagsReg cr)
6942 %{
6943 match(AddExactI dst src);
6944 effect(DEF cr);
6945
6946 format %{ "addl $dst, $src\t# addExact int" %}
6947 ins_encode %{
6948 __ addl($dst$$Register, $src$$Register);
6949 %}
6950 ins_pipe(ialu_reg_reg);
6951 %}
6952
6953 instruct addExactI_rReg_imm(rax_RegI dst, immI src, rFlagsReg cr)
6954 %{
6955 match(AddExactI dst src);
6956 effect(DEF cr);
6957
6958 format %{ "addl $dst, $src\t# addExact int" %}
6959 ins_encode %{
6960 __ addl($dst$$Register, $src$$constant);
6961 %}
6962 ins_pipe(ialu_reg_reg);
6963 %}
6924 6964
6925 instruct addI_rReg(rRegI dst, rRegI src, rFlagsReg cr) 6965 instruct addI_rReg(rRegI dst, rRegI src, rFlagsReg cr)
6926 %{ 6966 %{
6927 match(Set dst (AddI dst src)); 6967 match(Set dst (AddI dst src));
6928 effect(KILL cr); 6968 effect(KILL cr);