Mercurial > hg > truffle
comparison src/cpu/sparc/vm/c1_LIRAssembler_sparc.cpp @ 727:6b2273dd6fa9
6822110: Add AddressLiteral class on SPARC
Summary: The Address class on SPARC currently handles both, addresses and address literals, what makes the Address class more complicated than it has to be.
Reviewed-by: never, kvn
author | twisti |
---|---|
date | Tue, 21 Apr 2009 11:16:30 -0700 |
parents | c89f86385056 |
children | c96bf21b756f |
comparison
equal
deleted
inserted
replaced
725:928912ce8438 | 727:6b2273dd6fa9 |
---|---|
194 int slot_offset = monitor_offset - ((i * BasicObjectLock::size()) * BytesPerWord); | 194 int slot_offset = monitor_offset - ((i * BasicObjectLock::size()) * BytesPerWord); |
195 #ifdef ASSERT | 195 #ifdef ASSERT |
196 // verify the interpreter's monitor has a non-null object | 196 // verify the interpreter's monitor has a non-null object |
197 { | 197 { |
198 Label L; | 198 Label L; |
199 __ ld_ptr(Address(OSR_buf, 0, slot_offset + BasicObjectLock::obj_offset_in_bytes()), O7); | 199 __ ld_ptr(OSR_buf, slot_offset + BasicObjectLock::obj_offset_in_bytes(), O7); |
200 __ cmp(G0, O7); | 200 __ cmp(G0, O7); |
201 __ br(Assembler::notEqual, false, Assembler::pt, L); | 201 __ br(Assembler::notEqual, false, Assembler::pt, L); |
202 __ delayed()->nop(); | 202 __ delayed()->nop(); |
203 __ stop("locked object is NULL"); | 203 __ stop("locked object is NULL"); |
204 __ bind(L); | 204 __ bind(L); |
205 } | 205 } |
206 #endif // ASSERT | 206 #endif // ASSERT |
207 // Copy the lock field into the compiled activation. | 207 // Copy the lock field into the compiled activation. |
208 __ ld_ptr(Address(OSR_buf, 0, slot_offset + BasicObjectLock::lock_offset_in_bytes()), O7); | 208 __ ld_ptr(OSR_buf, slot_offset + BasicObjectLock::lock_offset_in_bytes(), O7); |
209 __ st_ptr(O7, frame_map()->address_for_monitor_lock(i)); | 209 __ st_ptr(O7, frame_map()->address_for_monitor_lock(i)); |
210 __ ld_ptr(Address(OSR_buf, 0, slot_offset + BasicObjectLock::obj_offset_in_bytes()), O7); | 210 __ ld_ptr(OSR_buf, slot_offset + BasicObjectLock::obj_offset_in_bytes(), O7); |
211 __ st_ptr(O7, frame_map()->address_for_monitor_object(i)); | 211 __ st_ptr(O7, frame_map()->address_for_monitor_object(i)); |
212 } | 212 } |
213 } | 213 } |
214 } | 214 } |
215 | 215 |
236 | 236 |
237 int value_offset = java_lang_String:: value_offset_in_bytes(); // char array | 237 int value_offset = java_lang_String:: value_offset_in_bytes(); // char array |
238 int offset_offset = java_lang_String::offset_offset_in_bytes(); // first character position | 238 int offset_offset = java_lang_String::offset_offset_in_bytes(); // first character position |
239 int count_offset = java_lang_String:: count_offset_in_bytes(); | 239 int count_offset = java_lang_String:: count_offset_in_bytes(); |
240 | 240 |
241 __ ld_ptr(Address(str0, 0, value_offset), tmp0); | 241 __ ld_ptr(str0, value_offset, tmp0); |
242 __ ld(Address(str0, 0, offset_offset), tmp2); | 242 __ ld(str0, offset_offset, tmp2); |
243 __ add(tmp0, arrayOopDesc::base_offset_in_bytes(T_CHAR), tmp0); | 243 __ add(tmp0, arrayOopDesc::base_offset_in_bytes(T_CHAR), tmp0); |
244 __ ld(Address(str0, 0, count_offset), str0); | 244 __ ld(str0, count_offset, str0); |
245 __ sll(tmp2, exact_log2(sizeof(jchar)), tmp2); | 245 __ sll(tmp2, exact_log2(sizeof(jchar)), tmp2); |
246 | 246 |
247 // str1 may be null | 247 // str1 may be null |
248 add_debug_info_for_null_check_here(info); | 248 add_debug_info_for_null_check_here(info); |
249 | 249 |
250 __ ld_ptr(Address(str1, 0, value_offset), tmp1); | 250 __ ld_ptr(str1, value_offset, tmp1); |
251 __ add(tmp0, tmp2, tmp0); | 251 __ add(tmp0, tmp2, tmp0); |
252 | 252 |
253 __ ld(Address(str1, 0, offset_offset), tmp2); | 253 __ ld(str1, offset_offset, tmp2); |
254 __ add(tmp1, arrayOopDesc::base_offset_in_bytes(T_CHAR), tmp1); | 254 __ add(tmp1, arrayOopDesc::base_offset_in_bytes(T_CHAR), tmp1); |
255 __ ld(Address(str1, 0, count_offset), str1); | 255 __ ld(str1, count_offset, str1); |
256 __ sll(tmp2, exact_log2(sizeof(jchar)), tmp2); | 256 __ sll(tmp2, exact_log2(sizeof(jchar)), tmp2); |
257 __ subcc(str0, str1, O7); | 257 __ subcc(str0, str1, O7); |
258 __ add(tmp1, tmp2, tmp1); | 258 __ add(tmp1, tmp2, tmp1); |
259 } | 259 } |
260 | 260 |
410 #ifdef ASSERT | 410 #ifdef ASSERT |
411 int offset = code_offset(); | 411 int offset = code_offset(); |
412 #endif // ASSERT | 412 #endif // ASSERT |
413 compilation()->offsets()->set_value(CodeOffsets::Deopt, code_offset()); | 413 compilation()->offsets()->set_value(CodeOffsets::Deopt, code_offset()); |
414 | 414 |
415 Address deopt_blob(G3_scratch, SharedRuntime::deopt_blob()->unpack()); | 415 AddressLiteral deopt_blob(SharedRuntime::deopt_blob()->unpack()); |
416 | 416 |
417 __ JUMP(deopt_blob, 0); // sethi;jmp | 417 __ JUMP(deopt_blob, G3_scratch, 0); // sethi;jmp |
418 __ delayed()->nop(); | 418 __ delayed()->nop(); |
419 | 419 |
420 assert(code_offset() - offset <= deopt_handler_size, "overflow"); | 420 assert(code_offset() - offset <= deopt_handler_size, "overflow"); |
421 | 421 |
422 debug_only(__ stop("should have gone to the caller");) | 422 debug_only(__ stop("should have gone to the caller");) |
439 void LIR_Assembler::jobject2reg_with_patching(Register reg, CodeEmitInfo *info) { | 439 void LIR_Assembler::jobject2reg_with_patching(Register reg, CodeEmitInfo *info) { |
440 // Allocate a new index in oop table to hold the oop once it's been patched | 440 // Allocate a new index in oop table to hold the oop once it's been patched |
441 int oop_index = __ oop_recorder()->allocate_index((jobject)NULL); | 441 int oop_index = __ oop_recorder()->allocate_index((jobject)NULL); |
442 PatchingStub* patch = new PatchingStub(_masm, PatchingStub::load_klass_id, oop_index); | 442 PatchingStub* patch = new PatchingStub(_masm, PatchingStub::load_klass_id, oop_index); |
443 | 443 |
444 Address addr = Address(reg, address(NULL), oop_Relocation::spec(oop_index)); | 444 AddressLiteral addrlit(NULL, oop_Relocation::spec(oop_index)); |
445 assert(addr.rspec().type() == relocInfo::oop_type, "must be an oop reloc"); | 445 assert(addrlit.rspec().type() == relocInfo::oop_type, "must be an oop reloc"); |
446 // It may not seem necessary to use a sethi/add pair to load a NULL into dest, but the | 446 // It may not seem necessary to use a sethi/add pair to load a NULL into dest, but the |
447 // NULL will be dynamically patched later and the patched value may be large. We must | 447 // NULL will be dynamically patched later and the patched value may be large. We must |
448 // therefore generate the sethi/add as a placeholders | 448 // therefore generate the sethi/add as a placeholders |
449 __ sethi(addr, true); | 449 __ patchable_set(addrlit, reg); |
450 __ add(addr, reg, 0); | |
451 | 450 |
452 patching_epilog(patch, lir_patch_normal, reg, info); | 451 patching_epilog(patch, lir_patch_normal, reg, info); |
453 } | 452 } |
454 | 453 |
455 | 454 |
704 } | 703 } |
705 | 704 |
706 | 705 |
707 void LIR_Assembler::vtable_call(int vtable_offset, CodeEmitInfo* info) { | 706 void LIR_Assembler::vtable_call(int vtable_offset, CodeEmitInfo* info) { |
708 add_debug_info_for_null_check_here(info); | 707 add_debug_info_for_null_check_here(info); |
709 __ ld_ptr(Address(O0, 0, oopDesc::klass_offset_in_bytes()), G3_scratch); | 708 __ ld_ptr(O0, oopDesc::klass_offset_in_bytes(), G3_scratch); |
710 if (__ is_simm13(vtable_offset) ) { | 709 if (__ is_simm13(vtable_offset) ) { |
711 __ ld_ptr(G3_scratch, vtable_offset, G5_method); | 710 __ ld_ptr(G3_scratch, vtable_offset, G5_method); |
712 } else { | 711 } else { |
713 // This will generate 2 instructions | 712 // This will generate 2 instructions |
714 __ set(vtable_offset, G5_method); | 713 __ set(vtable_offset, G5_method); |
715 // ld_ptr, set_hi, set | 714 // ld_ptr, set_hi, set |
716 __ ld_ptr(G3_scratch, G5_method, G5_method); | 715 __ ld_ptr(G3_scratch, G5_method, G5_method); |
717 } | 716 } |
718 __ ld_ptr(G5_method, in_bytes(methodOopDesc::from_compiled_offset()), G3_scratch); | 717 __ ld_ptr(G5_method, methodOopDesc::from_compiled_offset(), G3_scratch); |
719 __ callr(G3_scratch, G0); | 718 __ callr(G3_scratch, G0); |
720 // the peephole pass fills the delay slot | 719 // the peephole pass fills the delay slot |
721 } | 720 } |
722 | 721 |
723 | 722 |
736 case T_ARRAY : // fall through | 735 case T_ARRAY : // fall through |
737 case T_OBJECT: __ ld_ptr(s, disp, d); break; | 736 case T_OBJECT: __ ld_ptr(s, disp, d); break; |
738 default : ShouldNotReachHere(); | 737 default : ShouldNotReachHere(); |
739 } | 738 } |
740 } else { | 739 } else { |
741 __ sethi(disp & ~0x3ff, O7, true); | 740 __ set(disp, O7); |
742 __ add(O7, disp & 0x3ff, O7); | |
743 if (info != NULL) add_debug_info_for_null_check_here(info); | 741 if (info != NULL) add_debug_info_for_null_check_here(info); |
744 load_offset = code_offset(); | 742 load_offset = code_offset(); |
745 switch(ld_type) { | 743 switch(ld_type) { |
746 case T_BOOLEAN: // fall through | 744 case T_BOOLEAN: // fall through |
747 case T_BYTE : __ ldsb(s, O7, d); break; | 745 case T_BYTE : __ ldsb(s, O7, d); break; |
773 case T_ARRAY : // fall through | 771 case T_ARRAY : // fall through |
774 case T_OBJECT: __ st_ptr(value, base, offset); break; | 772 case T_OBJECT: __ st_ptr(value, base, offset); break; |
775 default : ShouldNotReachHere(); | 773 default : ShouldNotReachHere(); |
776 } | 774 } |
777 } else { | 775 } else { |
778 __ sethi(offset & ~0x3ff, O7, true); | 776 __ set(offset, O7); |
779 __ add(O7, offset & 0x3ff, O7); | |
780 if (info != NULL) add_debug_info_for_null_check_here(info); | 777 if (info != NULL) add_debug_info_for_null_check_here(info); |
781 switch (type) { | 778 switch (type) { |
782 case T_BOOLEAN: // fall through | 779 case T_BOOLEAN: // fall through |
783 case T_BYTE : __ stb(value, base, O7); break; | 780 case T_BYTE : __ stb(value, base, O7); break; |
784 case T_CHAR : __ sth(value, base, O7); break; | 781 case T_CHAR : __ sth(value, base, O7); break; |
811 __ ldf(FloatRegisterImpl::S, s, disp , d); | 808 __ ldf(FloatRegisterImpl::S, s, disp , d); |
812 } else { | 809 } else { |
813 __ ldf(w, s, disp, d); | 810 __ ldf(w, s, disp, d); |
814 } | 811 } |
815 } else { | 812 } else { |
816 __ sethi(disp & ~0x3ff, O7, true); | 813 __ set(disp, O7); |
817 __ add(O7, disp & 0x3ff, O7); | |
818 if (info != NULL) add_debug_info_for_null_check_here(info); | 814 if (info != NULL) add_debug_info_for_null_check_here(info); |
819 __ ldf(w, s, O7, d); | 815 __ ldf(w, s, O7, d); |
820 } | 816 } |
821 } | 817 } |
822 | 818 |
837 __ stf(FloatRegisterImpl::S, value , base, offset); | 833 __ stf(FloatRegisterImpl::S, value , base, offset); |
838 } else { | 834 } else { |
839 __ stf(w, value, base, offset); | 835 __ stf(w, value, base, offset); |
840 } | 836 } |
841 } else { | 837 } else { |
842 __ sethi(offset & ~0x3ff, O7, true); | 838 __ set(offset, O7); |
843 __ add(O7, offset & 0x3ff, O7); | |
844 if (info != NULL) add_debug_info_for_null_check_here(info); | 839 if (info != NULL) add_debug_info_for_null_check_here(info); |
845 __ stf(w, value, O7, base); | 840 __ stf(w, value, O7, base); |
846 } | 841 } |
847 } | 842 } |
848 | 843 |
850 int LIR_Assembler::store(LIR_Opr from_reg, Register base, int offset, BasicType type, bool unaligned) { | 845 int LIR_Assembler::store(LIR_Opr from_reg, Register base, int offset, BasicType type, bool unaligned) { |
851 int store_offset; | 846 int store_offset; |
852 if (!Assembler::is_simm13(offset + (type == T_LONG) ? wordSize : 0)) { | 847 if (!Assembler::is_simm13(offset + (type == T_LONG) ? wordSize : 0)) { |
853 assert(!unaligned, "can't handle this"); | 848 assert(!unaligned, "can't handle this"); |
854 // for offsets larger than a simm13 we setup the offset in O7 | 849 // for offsets larger than a simm13 we setup the offset in O7 |
855 __ sethi(offset & ~0x3ff, O7, true); | 850 __ set(offset, O7); |
856 __ add(O7, offset & 0x3ff, O7); | |
857 store_offset = store(from_reg, base, O7, type); | 851 store_offset = store(from_reg, base, O7, type); |
858 } else { | 852 } else { |
859 if (type == T_ARRAY || type == T_OBJECT) __ verify_oop(from_reg->as_register()); | 853 if (type == T_ARRAY || type == T_OBJECT) __ verify_oop(from_reg->as_register()); |
860 store_offset = code_offset(); | 854 store_offset = code_offset(); |
861 switch (type) { | 855 switch (type) { |
935 int load_offset; | 929 int load_offset; |
936 if (!Assembler::is_simm13(offset + (type == T_LONG) ? wordSize : 0)) { | 930 if (!Assembler::is_simm13(offset + (type == T_LONG) ? wordSize : 0)) { |
937 assert(base != O7, "destroying register"); | 931 assert(base != O7, "destroying register"); |
938 assert(!unaligned, "can't handle this"); | 932 assert(!unaligned, "can't handle this"); |
939 // for offsets larger than a simm13 we setup the offset in O7 | 933 // for offsets larger than a simm13 we setup the offset in O7 |
940 __ sethi(offset & ~0x3ff, O7, true); | 934 __ set(offset, O7); |
941 __ add(O7, offset & 0x3ff, O7); | |
942 load_offset = load(base, O7, to_reg, type); | 935 load_offset = load(base, O7, to_reg, type); |
943 } else { | 936 } else { |
944 load_offset = code_offset(); | 937 load_offset = code_offset(); |
945 switch(type) { | 938 switch(type) { |
946 case T_BOOLEAN: // fall through | 939 case T_BOOLEAN: // fall through |
1211 } else { | 1204 } else { |
1212 ShouldNotReachHere(); | 1205 ShouldNotReachHere(); |
1213 assert(to_reg->is_single_fpu(), "wrong register kind"); | 1206 assert(to_reg->is_single_fpu(), "wrong register kind"); |
1214 | 1207 |
1215 __ set(con, O7); | 1208 __ set(con, O7); |
1216 Address temp_slot(SP, 0, (frame::register_save_words * wordSize) + STACK_BIAS); | 1209 Address temp_slot(SP, (frame::register_save_words * wordSize) + STACK_BIAS); |
1217 __ st(O7, temp_slot); | 1210 __ st(O7, temp_slot); |
1218 __ ldf(FloatRegisterImpl::S, temp_slot, to_reg->as_float_reg()); | 1211 __ ldf(FloatRegisterImpl::S, temp_slot, to_reg->as_float_reg()); |
1219 } | 1212 } |
1220 } | 1213 } |
1221 break; | 1214 break; |
1236 __ set(con, to_reg->as_register()); | 1229 __ set(con, to_reg->as_register()); |
1237 #endif | 1230 #endif |
1238 } else { | 1231 } else { |
1239 ShouldNotReachHere(); | 1232 ShouldNotReachHere(); |
1240 assert(to_reg->is_double_fpu(), "wrong register kind"); | 1233 assert(to_reg->is_double_fpu(), "wrong register kind"); |
1241 Address temp_slot_lo(SP, 0, ((frame::register_save_words ) * wordSize) + STACK_BIAS); | 1234 Address temp_slot_lo(SP, ((frame::register_save_words ) * wordSize) + STACK_BIAS); |
1242 Address temp_slot_hi(SP, 0, ((frame::register_save_words) * wordSize) + (longSize/2) + STACK_BIAS); | 1235 Address temp_slot_hi(SP, ((frame::register_save_words) * wordSize) + (longSize/2) + STACK_BIAS); |
1243 __ set(low(con), O7); | 1236 __ set(low(con), O7); |
1244 __ st(O7, temp_slot_lo); | 1237 __ st(O7, temp_slot_lo); |
1245 __ set(high(con), O7); | 1238 __ set(high(con), O7); |
1246 __ st(O7, temp_slot_hi); | 1239 __ st(O7, temp_slot_hi); |
1247 __ ldf(FloatRegisterImpl::D, temp_slot_lo, to_reg->as_double_reg()); | 1240 __ ldf(FloatRegisterImpl::D, temp_slot_lo, to_reg->as_double_reg()); |
1265 if (const_addr == NULL) { | 1258 if (const_addr == NULL) { |
1266 bailout("const section overflow"); | 1259 bailout("const section overflow"); |
1267 break; | 1260 break; |
1268 } | 1261 } |
1269 RelocationHolder rspec = internal_word_Relocation::spec(const_addr); | 1262 RelocationHolder rspec = internal_word_Relocation::spec(const_addr); |
1263 AddressLiteral const_addrlit(const_addr, rspec); | |
1270 if (to_reg->is_single_fpu()) { | 1264 if (to_reg->is_single_fpu()) { |
1271 __ sethi( (intx)const_addr & ~0x3ff, O7, true, rspec); | 1265 __ patchable_sethi(const_addrlit, O7); |
1272 __ relocate(rspec); | 1266 __ relocate(rspec); |
1273 | 1267 __ ldf(FloatRegisterImpl::S, O7, const_addrlit.low10(), to_reg->as_float_reg()); |
1274 int offset = (intx)const_addr & 0x3ff; | |
1275 __ ldf (FloatRegisterImpl::S, O7, offset, to_reg->as_float_reg()); | |
1276 | 1268 |
1277 } else { | 1269 } else { |
1278 assert(to_reg->is_single_cpu(), "Must be a cpu register."); | 1270 assert(to_reg->is_single_cpu(), "Must be a cpu register."); |
1279 | 1271 |
1280 __ set((intx)const_addr, O7, rspec); | 1272 __ set(const_addrlit, O7); |
1281 load(O7, 0, to_reg->as_register(), T_INT); | 1273 load(O7, 0, to_reg->as_register(), T_INT); |
1282 } | 1274 } |
1283 } | 1275 } |
1284 break; | 1276 break; |
1285 | 1277 |
1291 break; | 1283 break; |
1292 } | 1284 } |
1293 RelocationHolder rspec = internal_word_Relocation::spec(const_addr); | 1285 RelocationHolder rspec = internal_word_Relocation::spec(const_addr); |
1294 | 1286 |
1295 if (to_reg->is_double_fpu()) { | 1287 if (to_reg->is_double_fpu()) { |
1296 __ sethi( (intx)const_addr & ~0x3ff, O7, true, rspec); | 1288 AddressLiteral const_addrlit(const_addr, rspec); |
1297 int offset = (intx)const_addr & 0x3ff; | 1289 __ patchable_sethi(const_addrlit, O7); |
1298 __ relocate(rspec); | 1290 __ relocate(rspec); |
1299 __ ldf (FloatRegisterImpl::D, O7, offset, to_reg->as_double_reg()); | 1291 __ ldf (FloatRegisterImpl::D, O7, const_addrlit.low10(), to_reg->as_double_reg()); |
1300 } else { | 1292 } else { |
1301 assert(to_reg->is_double_cpu(), "Must be a long register."); | 1293 assert(to_reg->is_double_cpu(), "Must be a long register."); |
1302 #ifdef _LP64 | 1294 #ifdef _LP64 |
1303 __ set(jlong_cast(c->as_jdouble()), to_reg->as_register_lo()); | 1295 __ set(jlong_cast(c->as_jdouble()), to_reg->as_register_lo()); |
1304 #else | 1296 #else |
1315 } | 1307 } |
1316 } | 1308 } |
1317 | 1309 |
1318 Address LIR_Assembler::as_Address(LIR_Address* addr) { | 1310 Address LIR_Assembler::as_Address(LIR_Address* addr) { |
1319 Register reg = addr->base()->as_register(); | 1311 Register reg = addr->base()->as_register(); |
1320 return Address(reg, 0, addr->disp()); | 1312 return Address(reg, addr->disp()); |
1321 } | 1313 } |
1322 | 1314 |
1323 | 1315 |
1324 void LIR_Assembler::stack2stack(LIR_Opr src, LIR_Opr dest, BasicType type) { | 1316 void LIR_Assembler::stack2stack(LIR_Opr src, LIR_Opr dest, BasicType type) { |
1325 switch (type) { | 1317 switch (type) { |
1358 } | 1350 } |
1359 | 1351 |
1360 | 1352 |
1361 Address LIR_Assembler::as_Address_hi(LIR_Address* addr) { | 1353 Address LIR_Assembler::as_Address_hi(LIR_Address* addr) { |
1362 Address base = as_Address(addr); | 1354 Address base = as_Address(addr); |
1363 return Address(base.base(), 0, base.disp() + hi_word_offset_in_bytes); | 1355 return Address(base.base(), base.disp() + hi_word_offset_in_bytes); |
1364 } | 1356 } |
1365 | 1357 |
1366 | 1358 |
1367 Address LIR_Assembler::as_Address_lo(LIR_Address* addr) { | 1359 Address LIR_Assembler::as_Address_lo(LIR_Address* addr) { |
1368 Address base = as_Address(addr); | 1360 Address base = as_Address(addr); |
1369 return Address(base.base(), 0, base.disp() + lo_word_offset_in_bytes); | 1361 return Address(base.base(), base.disp() + lo_word_offset_in_bytes); |
1370 } | 1362 } |
1371 | 1363 |
1372 | 1364 |
1373 void LIR_Assembler::mem2reg(LIR_Opr src_opr, LIR_Opr dest, BasicType type, | 1365 void LIR_Assembler::mem2reg(LIR_Opr src_opr, LIR_Opr dest, BasicType type, |
1374 LIR_PatchCode patch_code, CodeEmitInfo* info, bool unaligned) { | 1366 LIR_PatchCode patch_code, CodeEmitInfo* info, bool unaligned) { |
1394 } | 1386 } |
1395 | 1387 |
1396 if (addr->index()->is_illegal()) { | 1388 if (addr->index()->is_illegal()) { |
1397 if (!Assembler::is_simm13(disp_value) && (!unaligned || Assembler::is_simm13(disp_value + 4))) { | 1389 if (!Assembler::is_simm13(disp_value) && (!unaligned || Assembler::is_simm13(disp_value + 4))) { |
1398 if (needs_patching) { | 1390 if (needs_patching) { |
1399 __ sethi(0, O7, true); | 1391 __ patchable_set(0, O7); |
1400 __ add(O7, 0, O7); | |
1401 } else { | 1392 } else { |
1402 __ set(disp_value, O7); | 1393 __ set(disp_value, O7); |
1403 } | 1394 } |
1404 disp_reg = O7; | 1395 disp_reg = O7; |
1405 } | 1396 } |
1542 } | 1533 } |
1543 | 1534 |
1544 if (addr->index()->is_illegal()) { | 1535 if (addr->index()->is_illegal()) { |
1545 if (!Assembler::is_simm13(disp_value) && (!unaligned || Assembler::is_simm13(disp_value + 4))) { | 1536 if (!Assembler::is_simm13(disp_value) && (!unaligned || Assembler::is_simm13(disp_value + 4))) { |
1546 if (needs_patching) { | 1537 if (needs_patching) { |
1547 __ sethi(0, O7, true); | 1538 __ patchable_set(0, O7); |
1548 __ add(O7, 0, O7); | |
1549 } else { | 1539 } else { |
1550 __ set(disp_value, O7); | 1540 __ set(disp_value, O7); |
1551 } | 1541 } |
1552 disp_reg = O7; | 1542 disp_reg = O7; |
1553 } | 1543 } |
1625 int start = __ offset(); | 1615 int start = __ offset(); |
1626 __ relocate(static_stub_Relocation::spec(call_pc)); | 1616 __ relocate(static_stub_Relocation::spec(call_pc)); |
1627 | 1617 |
1628 __ set_oop(NULL, G5); | 1618 __ set_oop(NULL, G5); |
1629 // must be set to -1 at code generation time | 1619 // must be set to -1 at code generation time |
1630 Address a(G3, (address)-1); | 1620 AddressLiteral addrlit(-1); |
1631 __ jump_to(a, 0); | 1621 __ jump_to(addrlit, G3); |
1632 __ delayed()->nop(); | 1622 __ delayed()->nop(); |
1633 | 1623 |
1634 assert(__ offset() - start <= call_stub_size, "stub too big"); | 1624 assert(__ offset() - start <= call_stub_size, "stub too big"); |
1635 __ end_a_stub(); | 1625 __ end_a_stub(); |
1636 } | 1626 } |
2061 } else { | 2051 } else { |
2062 // reuse the debug info from the safepoint poll for the throw op itself | 2052 // reuse the debug info from the safepoint poll for the throw op itself |
2063 address pc_for_athrow = __ pc(); | 2053 address pc_for_athrow = __ pc(); |
2064 int pc_for_athrow_offset = __ offset(); | 2054 int pc_for_athrow_offset = __ offset(); |
2065 RelocationHolder rspec = internal_word_Relocation::spec(pc_for_athrow); | 2055 RelocationHolder rspec = internal_word_Relocation::spec(pc_for_athrow); |
2066 __ set((intptr_t)pc_for_athrow, Oissuing_pc, rspec); | 2056 __ set(pc_for_athrow, Oissuing_pc, rspec); |
2067 add_call_info(pc_for_athrow_offset, info); // for exception handler | 2057 add_call_info(pc_for_athrow_offset, info); // for exception handler |
2068 | 2058 |
2069 __ call(Runtime1::entry_for(Runtime1::handle_exception_id), relocInfo::runtime_call_type); | 2059 __ call(Runtime1::entry_for(Runtime1::handle_exception_id), relocInfo::runtime_call_type); |
2070 __ delayed()->nop(); | 2060 __ delayed()->nop(); |
2071 } | 2061 } |
2449 __ set(mdo_offset_bias, data_val); | 2439 __ set(mdo_offset_bias, data_val); |
2450 __ add(mdo, data_val, mdo); | 2440 __ add(mdo, data_val, mdo); |
2451 } | 2441 } |
2452 | 2442 |
2453 | 2443 |
2454 Address flags_addr(mdo, 0, md->byte_offset_of_slot(data, DataLayout::flags_offset()) - mdo_offset_bias); | 2444 Address flags_addr(mdo, md->byte_offset_of_slot(data, DataLayout::flags_offset()) - mdo_offset_bias); |
2455 __ ldub(flags_addr, data_val); | 2445 __ ldub(flags_addr, data_val); |
2456 __ or3(data_val, BitData::null_seen_byte_constant(), data_val); | 2446 __ or3(data_val, BitData::null_seen_byte_constant(), data_val); |
2457 __ stb(data_val, flags_addr); | 2447 __ stb(data_val, flags_addr); |
2458 __ bind(profile_done); | 2448 __ bind(profile_done); |
2459 } | 2449 } |
2736 mdo_offset_bias = md->byte_offset_of_slot(data, CounterData::count_offset()); | 2726 mdo_offset_bias = md->byte_offset_of_slot(data, CounterData::count_offset()); |
2737 __ set(mdo_offset_bias, O7); | 2727 __ set(mdo_offset_bias, O7); |
2738 __ add(mdo, O7, mdo); | 2728 __ add(mdo, O7, mdo); |
2739 } | 2729 } |
2740 | 2730 |
2741 Address counter_addr(mdo, 0, md->byte_offset_of_slot(data, CounterData::count_offset()) - mdo_offset_bias); | 2731 Address counter_addr(mdo, md->byte_offset_of_slot(data, CounterData::count_offset()) - mdo_offset_bias); |
2742 __ lduw(counter_addr, tmp1); | 2732 __ lduw(counter_addr, tmp1); |
2743 __ add(tmp1, DataLayout::counter_increment, tmp1); | 2733 __ add(tmp1, DataLayout::counter_increment, tmp1); |
2744 __ stw(tmp1, counter_addr); | 2734 __ stw(tmp1, counter_addr); |
2745 Bytecodes::Code bc = method->java_code_at_bci(bci); | 2735 Bytecodes::Code bc = method->java_code_at_bci(bci); |
2746 // Perform additional virtual call profiling for invokevirtual and | 2736 // Perform additional virtual call profiling for invokevirtual and |
2762 ciVirtualCallData* vc_data = (ciVirtualCallData*) data; | 2752 ciVirtualCallData* vc_data = (ciVirtualCallData*) data; |
2763 uint i; | 2753 uint i; |
2764 for (i = 0; i < VirtualCallData::row_limit(); i++) { | 2754 for (i = 0; i < VirtualCallData::row_limit(); i++) { |
2765 ciKlass* receiver = vc_data->receiver(i); | 2755 ciKlass* receiver = vc_data->receiver(i); |
2766 if (known_klass->equals(receiver)) { | 2756 if (known_klass->equals(receiver)) { |
2767 Address data_addr(mdo, 0, md->byte_offset_of_slot(data, | 2757 Address data_addr(mdo, md->byte_offset_of_slot(data, |
2768 VirtualCallData::receiver_count_offset(i)) - | 2758 VirtualCallData::receiver_count_offset(i)) - |
2769 mdo_offset_bias); | 2759 mdo_offset_bias); |
2770 __ lduw(data_addr, tmp1); | 2760 __ lduw(data_addr, tmp1); |
2771 __ add(tmp1, DataLayout::counter_increment, tmp1); | 2761 __ add(tmp1, DataLayout::counter_increment, tmp1); |
2772 __ stw(tmp1, data_addr); | 2762 __ stw(tmp1, data_addr); |
2773 return; | 2763 return; |
2780 // always does a write to the receiver part of the | 2770 // always does a write to the receiver part of the |
2781 // VirtualCallData rather than just the first time | 2771 // VirtualCallData rather than just the first time |
2782 for (i = 0; i < VirtualCallData::row_limit(); i++) { | 2772 for (i = 0; i < VirtualCallData::row_limit(); i++) { |
2783 ciKlass* receiver = vc_data->receiver(i); | 2773 ciKlass* receiver = vc_data->receiver(i); |
2784 if (receiver == NULL) { | 2774 if (receiver == NULL) { |
2785 Address recv_addr(mdo, 0, md->byte_offset_of_slot(data, VirtualCallData::receiver_offset(i)) - | 2775 Address recv_addr(mdo, md->byte_offset_of_slot(data, VirtualCallData::receiver_offset(i)) - |
2786 mdo_offset_bias); | 2776 mdo_offset_bias); |
2787 jobject2reg(known_klass->encoding(), tmp1); | 2777 jobject2reg(known_klass->encoding(), tmp1); |
2788 __ st_ptr(tmp1, recv_addr); | 2778 __ st_ptr(tmp1, recv_addr); |
2789 Address data_addr(mdo, 0, md->byte_offset_of_slot(data, VirtualCallData::receiver_count_offset(i)) - | 2779 Address data_addr(mdo, md->byte_offset_of_slot(data, VirtualCallData::receiver_count_offset(i)) - |
2790 mdo_offset_bias); | 2780 mdo_offset_bias); |
2791 __ lduw(data_addr, tmp1); | 2781 __ lduw(data_addr, tmp1); |
2792 __ add(tmp1, DataLayout::counter_increment, tmp1); | 2782 __ add(tmp1, DataLayout::counter_increment, tmp1); |
2793 __ stw(tmp1, data_addr); | 2783 __ stw(tmp1, data_addr); |
2794 return; | 2784 return; |
2795 } | 2785 } |
2796 } | 2786 } |
2797 } else { | 2787 } else { |
2798 load(Address(recv, 0, oopDesc::klass_offset_in_bytes()), recv, T_OBJECT); | 2788 load(Address(recv, oopDesc::klass_offset_in_bytes()), recv, T_OBJECT); |
2799 Label update_done; | 2789 Label update_done; |
2800 uint i; | 2790 uint i; |
2801 for (i = 0; i < VirtualCallData::row_limit(); i++) { | 2791 for (i = 0; i < VirtualCallData::row_limit(); i++) { |
2802 Label next_test; | 2792 Label next_test; |
2803 // See if the receiver is receiver[n]. | 2793 // See if the receiver is receiver[n]. |
2804 Address receiver_addr(mdo, 0, md->byte_offset_of_slot(data, VirtualCallData::receiver_offset(i)) - | 2794 Address receiver_addr(mdo, md->byte_offset_of_slot(data, VirtualCallData::receiver_offset(i)) - |
2805 mdo_offset_bias); | 2795 mdo_offset_bias); |
2806 __ ld_ptr(receiver_addr, tmp1); | 2796 __ ld_ptr(receiver_addr, tmp1); |
2807 __ verify_oop(tmp1); | 2797 __ verify_oop(tmp1); |
2808 __ cmp(recv, tmp1); | 2798 __ cmp(recv, tmp1); |
2809 __ brx(Assembler::notEqual, false, Assembler::pt, next_test); | 2799 __ brx(Assembler::notEqual, false, Assembler::pt, next_test); |
2810 __ delayed()->nop(); | 2800 __ delayed()->nop(); |
2811 Address data_addr(mdo, 0, md->byte_offset_of_slot(data, VirtualCallData::receiver_count_offset(i)) - | 2801 Address data_addr(mdo, md->byte_offset_of_slot(data, VirtualCallData::receiver_count_offset(i)) - |
2812 mdo_offset_bias); | 2802 mdo_offset_bias); |
2813 __ lduw(data_addr, tmp1); | 2803 __ lduw(data_addr, tmp1); |
2814 __ add(tmp1, DataLayout::counter_increment, tmp1); | 2804 __ add(tmp1, DataLayout::counter_increment, tmp1); |
2815 __ stw(tmp1, data_addr); | 2805 __ stw(tmp1, data_addr); |
2816 __ br(Assembler::always, false, Assembler::pt, update_done); | 2806 __ br(Assembler::always, false, Assembler::pt, update_done); |
2819 } | 2809 } |
2820 | 2810 |
2821 // Didn't find receiver; find next empty slot and fill it in | 2811 // Didn't find receiver; find next empty slot and fill it in |
2822 for (i = 0; i < VirtualCallData::row_limit(); i++) { | 2812 for (i = 0; i < VirtualCallData::row_limit(); i++) { |
2823 Label next_test; | 2813 Label next_test; |
2824 Address recv_addr(mdo, 0, md->byte_offset_of_slot(data, VirtualCallData::receiver_offset(i)) - | 2814 Address recv_addr(mdo, md->byte_offset_of_slot(data, VirtualCallData::receiver_offset(i)) - |
2825 mdo_offset_bias); | 2815 mdo_offset_bias); |
2826 load(recv_addr, tmp1, T_OBJECT); | 2816 load(recv_addr, tmp1, T_OBJECT); |
2827 __ tst(tmp1); | 2817 __ tst(tmp1); |
2828 __ brx(Assembler::notEqual, false, Assembler::pt, next_test); | 2818 __ brx(Assembler::notEqual, false, Assembler::pt, next_test); |
2829 __ delayed()->nop(); | 2819 __ delayed()->nop(); |
2830 __ st_ptr(recv, recv_addr); | 2820 __ st_ptr(recv, recv_addr); |
2831 __ set(DataLayout::counter_increment, tmp1); | 2821 __ set(DataLayout::counter_increment, tmp1); |
2832 __ st_ptr(tmp1, Address(mdo, 0, md->byte_offset_of_slot(data, VirtualCallData::receiver_count_offset(i)) - | 2822 __ st_ptr(tmp1, mdo, md->byte_offset_of_slot(data, VirtualCallData::receiver_count_offset(i)) - |
2833 mdo_offset_bias)); | 2823 mdo_offset_bias); |
2834 if (i < (VirtualCallData::row_limit() - 1)) { | 2824 if (i < (VirtualCallData::row_limit() - 1)) { |
2835 __ br(Assembler::always, false, Assembler::pt, update_done); | 2825 __ br(Assembler::always, false, Assembler::pt, update_done); |
2836 __ delayed()->nop(); | 2826 __ delayed()->nop(); |
2837 } | 2827 } |
2838 __ bind(next_test); | 2828 __ bind(next_test); |