comparison src/cpu/x86/vm/assembler_x86.cpp @ 628:7bb995fbd3c0

Merge
author trims
date Thu, 12 Mar 2009 18:16:36 -0700
parents 0fbdb4381b99 337400e7a5dd
children bd441136a5ce
comparison
equal deleted inserted replaced
580:ce2272390558 628:7bb995fbd3c0
127 127
128 128
129 // Convert the raw encoding form into the form expected by the constructor for 129 // Convert the raw encoding form into the form expected by the constructor for
130 // Address. An index of 4 (rsp) corresponds to having no index, so convert 130 // Address. An index of 4 (rsp) corresponds to having no index, so convert
131 // that to noreg for the Address constructor. 131 // that to noreg for the Address constructor.
132 Address Address::make_raw(int base, int index, int scale, int disp) { 132 Address Address::make_raw(int base, int index, int scale, int disp, bool disp_is_oop) {
133 RelocationHolder rspec;
134 if (disp_is_oop) {
135 rspec = Relocation::spec_simple(relocInfo::oop_type);
136 }
133 bool valid_index = index != rsp->encoding(); 137 bool valid_index = index != rsp->encoding();
134 if (valid_index) { 138 if (valid_index) {
135 Address madr(as_Register(base), as_Register(index), (Address::ScaleFactor)scale, in_ByteSize(disp)); 139 Address madr(as_Register(base), as_Register(index), (Address::ScaleFactor)scale, in_ByteSize(disp));
140 madr._rspec = rspec;
136 return madr; 141 return madr;
137 } else { 142 } else {
138 Address madr(as_Register(base), noreg, Address::no_scale, in_ByteSize(disp)); 143 Address madr(as_Register(base), noreg, Address::no_scale, in_ByteSize(disp));
144 madr._rspec = rspec;
139 return madr; 145 return madr;
140 } 146 }
141 } 147 }
142 148
143 // Implementation of Assembler 149 // Implementation of Assembler
3890 prefixq(dst, src); 3896 prefixq(dst, src);
3891 emit_byte(0x89); 3897 emit_byte(0x89);
3892 emit_operand(src, dst); 3898 emit_operand(src, dst);
3893 } 3899 }
3894 3900
3901 void Assembler::movsbq(Register dst, Address src) {
3902 InstructionMark im(this);
3903 prefixq(src, dst);
3904 emit_byte(0x0F);
3905 emit_byte(0xBE);
3906 emit_operand(dst, src);
3907 }
3908
3909 void Assembler::movsbq(Register dst, Register src) {
3910 int encode = prefixq_and_encode(dst->encoding(), src->encoding());
3911 emit_byte(0x0F);
3912 emit_byte(0xBE);
3913 emit_byte(0xC0 | encode);
3914 }
3915
3895 void Assembler::movslq(Register dst, int32_t imm32) { 3916 void Assembler::movslq(Register dst, int32_t imm32) {
3896 // dbx shows movslq(rcx, 3) as movq $0x0000000049000000,(%rbx) 3917 // dbx shows movslq(rcx, 3) as movq $0x0000000049000000,(%rbx)
3897 // and movslq(r8, 3); as movl $0x0000000048000000,(%rbx) 3918 // and movslq(r8, 3); as movl $0x0000000048000000,(%rbx)
3898 // as a result we shouldn't use until tested at runtime... 3919 // as a result we shouldn't use until tested at runtime...
3899 ShouldNotReachHere(); 3920 ShouldNotReachHere();
3920 } 3941 }
3921 3942
3922 void Assembler::movslq(Register dst, Register src) { 3943 void Assembler::movslq(Register dst, Register src) {
3923 int encode = prefixq_and_encode(dst->encoding(), src->encoding()); 3944 int encode = prefixq_and_encode(dst->encoding(), src->encoding());
3924 emit_byte(0x63); 3945 emit_byte(0x63);
3946 emit_byte(0xC0 | encode);
3947 }
3948
3949 void Assembler::movswq(Register dst, Address src) {
3950 InstructionMark im(this);
3951 prefixq(src, dst);
3952 emit_byte(0x0F);
3953 emit_byte(0xBF);
3954 emit_operand(dst, src);
3955 }
3956
3957 void Assembler::movswq(Register dst, Register src) {
3958 int encode = prefixq_and_encode(dst->encoding(), src->encoding());
3959 emit_byte(0x0F);
3960 emit_byte(0xBF);
3961 emit_byte(0xC0 | encode);
3962 }
3963
3964 void Assembler::movzbq(Register dst, Address src) {
3965 InstructionMark im(this);
3966 prefixq(src, dst);
3967 emit_byte(0x0F);
3968 emit_byte(0xB6);
3969 emit_operand(dst, src);
3970 }
3971
3972 void Assembler::movzbq(Register dst, Register src) {
3973 int encode = prefixq_and_encode(dst->encoding(), src->encoding());
3974 emit_byte(0x0F);
3975 emit_byte(0xB6);
3976 emit_byte(0xC0 | encode);
3977 }
3978
3979 void Assembler::movzwq(Register dst, Address src) {
3980 InstructionMark im(this);
3981 prefixq(src, dst);
3982 emit_byte(0x0F);
3983 emit_byte(0xB7);
3984 emit_operand(dst, src);
3985 }
3986
3987 void Assembler::movzwq(Register dst, Register src) {
3988 int encode = prefixq_and_encode(dst->encoding(), src->encoding());
3989 emit_byte(0x0F);
3990 emit_byte(0xB7);
3925 emit_byte(0xC0 | encode); 3991 emit_byte(0xC0 | encode);
3926 } 3992 }
3927 3993
3928 void Assembler::negq(Register dst) { 3994 void Assembler::negq(Register dst) {
3929 int encode = prefixq_and_encode(dst->encoding()); 3995 int encode = prefixq_and_encode(dst->encoding());
6195 sarl(dst, 24); 6261 sarl(dst, 24);
6196 } 6262 }
6197 return off; 6263 return off;
6198 } 6264 }
6199 6265
6200 // word => int32 which seems bad for 64bit 6266 // Note: load_signed_short used to be called load_signed_word.
6201 int MacroAssembler::load_signed_word(Register dst, Address src) { 6267 // Although the 'w' in x86 opcodes refers to the term "word" in the assembler
6268 // manual, which means 16 bits, that usage is found nowhere in HotSpot code.
6269 // The term "word" in HotSpot means a 32- or 64-bit machine word.
6270 int MacroAssembler::load_signed_short(Register dst, Address src) {
6202 int off; 6271 int off;
6203 if (LP64_ONLY(true ||) VM_Version::is_P6()) { 6272 if (LP64_ONLY(true ||) VM_Version::is_P6()) {
6204 // This is dubious to me since it seems safe to do a signed 16 => 64 bit 6273 // This is dubious to me since it seems safe to do a signed 16 => 64 bit
6205 // version but this is what 64bit has always done. This seems to imply 6274 // version but this is what 64bit has always done. This seems to imply
6206 // that users are only using 32bits worth. 6275 // that users are only using 32bits worth.
6207 off = offset(); 6276 off = offset();
6208 movswl(dst, src); // movsxw 6277 movswl(dst, src); // movsxw
6209 } else { 6278 } else {
6210 off = load_unsigned_word(dst, src); 6279 off = load_unsigned_short(dst, src);
6211 shll(dst, 16); 6280 shll(dst, 16);
6212 sarl(dst, 16); 6281 sarl(dst, 16);
6213 } 6282 }
6214 return off; 6283 return off;
6215 } 6284 }
6227 movb(dst, src); 6296 movb(dst, src);
6228 } 6297 }
6229 return off; 6298 return off;
6230 } 6299 }
6231 6300
6232 int MacroAssembler::load_unsigned_word(Register dst, Address src) { 6301 // Note: load_unsigned_short used to be called load_unsigned_word.
6302 int MacroAssembler::load_unsigned_short(Register dst, Address src) {
6233 // According to Intel Doc. AP-526, "Zero-Extension of Short", p.16, 6303 // According to Intel Doc. AP-526, "Zero-Extension of Short", p.16,
6234 // and "3.9 Partial Register Penalties", p. 22). 6304 // and "3.9 Partial Register Penalties", p. 22).
6235 int off; 6305 int off;
6236 if (LP64_ONLY(true ||) VM_Version::is_P6() || src.uses(dst)) { 6306 if (LP64_ONLY(true ||) VM_Version::is_P6() || src.uses(dst)) {
6237 off = offset(); 6307 off = offset();
6240 xorl(dst, dst); 6310 xorl(dst, dst);
6241 off = offset(); 6311 off = offset();
6242 movw(dst, src); 6312 movw(dst, src);
6243 } 6313 }
6244 return off; 6314 return off;
6315 }
6316
6317 void MacroAssembler::load_sized_value(Register dst, Address src,
6318 int size_in_bytes, bool is_signed) {
6319 switch (size_in_bytes ^ (is_signed ? -1 : 0)) {
6320 #ifndef _LP64
6321 // For case 8, caller is responsible for manually loading
6322 // the second word into another register.
6323 case ~8: // fall through:
6324 case 8: movl( dst, src ); break;
6325 #else
6326 case ~8: // fall through:
6327 case 8: movq( dst, src ); break;
6328 #endif
6329 case ~4: // fall through:
6330 case 4: movl( dst, src ); break;
6331 case ~2: load_signed_short( dst, src ); break;
6332 case 2: load_unsigned_short( dst, src ); break;
6333 case ~1: load_signed_byte( dst, src ); break;
6334 case 1: load_unsigned_byte( dst, src ); break;
6335 default: ShouldNotReachHere();
6336 }
6245 } 6337 }
6246 6338
6247 void MacroAssembler::mov32(AddressLiteral dst, Register src) { 6339 void MacroAssembler::mov32(AddressLiteral dst, Register src) {
6248 if (reachable(dst)) { 6340 if (reachable(dst)) {
6249 movl(as_Address(dst), src); 6341 movl(as_Address(dst), src);
6461 andl(tmp, (os::vm_page_size() - sizeof(int))); 6553 andl(tmp, (os::vm_page_size() - sizeof(int)));
6462 6554
6463 Address index(noreg, tmp, Address::times_1); 6555 Address index(noreg, tmp, Address::times_1);
6464 ExternalAddress page(os::get_memory_serialize_page()); 6556 ExternalAddress page(os::get_memory_serialize_page());
6465 6557
6466 movptr(ArrayAddress(page, index), tmp); 6558 // Size of store must match masking code above
6559 movl(as_Address(ArrayAddress(page, index)), tmp);
6467 } 6560 }
6468 6561
6469 // Calls to C land 6562 // Calls to C land
6470 // 6563 //
6471 // When entering C land, the rbp, & rsp of the last Java frame have to be recorded 6564 // When entering C land, the rbp, & rsp of the last Java frame have to be recorded
7047 pop(tmp); 7140 pop(tmp);
7048 } 7141 }
7049 } 7142 }
7050 7143
7051 7144
7145 // Look up the method for a megamorphic invokeinterface call.
7146 // The target method is determined by <intf_klass, itable_index>.
7147 // The receiver klass is in recv_klass.
7148 // On success, the result will be in method_result, and execution falls through.
7149 // On failure, execution transfers to the given label.
7150 void MacroAssembler::lookup_interface_method(Register recv_klass,
7151 Register intf_klass,
7152 RegisterConstant itable_index,
7153 Register method_result,
7154 Register scan_temp,
7155 Label& L_no_such_interface) {
7156 assert_different_registers(recv_klass, intf_klass, method_result, scan_temp);
7157 assert(itable_index.is_constant() || itable_index.as_register() == method_result,
7158 "caller must use same register for non-constant itable index as for method");
7159
7160 // Compute start of first itableOffsetEntry (which is at the end of the vtable)
7161 int vtable_base = instanceKlass::vtable_start_offset() * wordSize;
7162 int itentry_off = itableMethodEntry::method_offset_in_bytes();
7163 int scan_step = itableOffsetEntry::size() * wordSize;
7164 int vte_size = vtableEntry::size() * wordSize;
7165 Address::ScaleFactor times_vte_scale = Address::times_ptr;
7166 assert(vte_size == wordSize, "else adjust times_vte_scale");
7167
7168 movl(scan_temp, Address(recv_klass, instanceKlass::vtable_length_offset() * wordSize));
7169
7170 // %%% Could store the aligned, prescaled offset in the klassoop.
7171 lea(scan_temp, Address(recv_klass, scan_temp, times_vte_scale, vtable_base));
7172 if (HeapWordsPerLong > 1) {
7173 // Round up to align_object_offset boundary
7174 // see code for instanceKlass::start_of_itable!
7175 round_to(scan_temp, BytesPerLong);
7176 }
7177
7178 // Adjust recv_klass by scaled itable_index, so we can free itable_index.
7179 assert(itableMethodEntry::size() * wordSize == wordSize, "adjust the scaling in the code below");
7180 lea(recv_klass, Address(recv_klass, itable_index, Address::times_ptr, itentry_off));
7181
7182 // for (scan = klass->itable(); scan->interface() != NULL; scan += scan_step) {
7183 // if (scan->interface() == intf) {
7184 // result = (klass + scan->offset() + itable_index);
7185 // }
7186 // }
7187 Label search, found_method;
7188
7189 for (int peel = 1; peel >= 0; peel--) {
7190 movptr(method_result, Address(scan_temp, itableOffsetEntry::interface_offset_in_bytes()));
7191 cmpptr(intf_klass, method_result);
7192
7193 if (peel) {
7194 jccb(Assembler::equal, found_method);
7195 } else {
7196 jccb(Assembler::notEqual, search);
7197 // (invert the test to fall through to found_method...)
7198 }
7199
7200 if (!peel) break;
7201
7202 bind(search);
7203
7204 // Check that the previous entry is non-null. A null entry means that
7205 // the receiver class doesn't implement the interface, and wasn't the
7206 // same as when the caller was compiled.
7207 testptr(method_result, method_result);
7208 jcc(Assembler::zero, L_no_such_interface);
7209 addptr(scan_temp, scan_step);
7210 }
7211
7212 bind(found_method);
7213
7214 // Got a hit.
7215 movl(scan_temp, Address(scan_temp, itableOffsetEntry::offset_offset_in_bytes()));
7216 movptr(method_result, Address(recv_klass, scan_temp, Address::times_1));
7217 }
7218
7219
7052 void MacroAssembler::ucomisd(XMMRegister dst, AddressLiteral src) { 7220 void MacroAssembler::ucomisd(XMMRegister dst, AddressLiteral src) {
7053 ucomisd(dst, as_Address(src)); 7221 ucomisd(dst, as_Address(src));
7054 } 7222 }
7055 7223
7056 void MacroAssembler::ucomiss(XMMRegister dst, AddressLiteral src) { 7224 void MacroAssembler::ucomiss(XMMRegister dst, AddressLiteral src) {
7089 movptr(rax, buffer.addr()); 7257 movptr(rax, buffer.addr());
7090 push(rax); 7258 push(rax);
7091 // call indirectly to solve generation ordering problem 7259 // call indirectly to solve generation ordering problem
7092 movptr(rax, ExternalAddress(StubRoutines::verify_oop_subroutine_entry_address())); 7260 movptr(rax, ExternalAddress(StubRoutines::verify_oop_subroutine_entry_address()));
7093 call(rax); 7261 call(rax);
7262 }
7263
7264
7265 RegisterConstant MacroAssembler::delayed_value(intptr_t* delayed_value_addr,
7266 Register tmp,
7267 int offset) {
7268 intptr_t value = *delayed_value_addr;
7269 if (value != 0)
7270 return RegisterConstant(value + offset);
7271
7272 // load indirectly to solve generation ordering problem
7273 movptr(tmp, ExternalAddress((address) delayed_value_addr));
7274
7275 #ifdef ASSERT
7276 Label L;
7277 testl(tmp, tmp);
7278 jccb(Assembler::notZero, L);
7279 hlt();
7280 bind(L);
7281 #endif
7282
7283 if (offset != 0)
7284 addptr(tmp, offset);
7285
7286 return RegisterConstant(tmp);
7094 } 7287 }
7095 7288
7096 7289
7097 void MacroAssembler::verify_oop_addr(Address addr, const char* s) { 7290 void MacroAssembler::verify_oop_addr(Address addr, const char* s) {
7098 if (!VerifyOops) return; 7291 if (!VerifyOops) return;