comparison src/share/vm/c1/c1_LIRGenerator.cpp @ 1681:126ea7725993

6953477: Increase portability and flexibility of building Hotspot Summary: A collection of portability improvements including shared code support for PPC, ARM platforms, software floating point, cross compilation support and improvements in error crash detail. Reviewed-by: phh, never, coleenp, dholmes
author bobv
date Tue, 03 Aug 2010 08:13:38 -0400
parents b812ff5abc73
children d5d065957597
comparison
equal deleted inserted replaced
1680:a64438a2b7e8 1681:126ea7725993
29 #define __ gen()->lir(__FILE__, __LINE__)-> 29 #define __ gen()->lir(__FILE__, __LINE__)->
30 #else 30 #else
31 #define __ gen()->lir()-> 31 #define __ gen()->lir()->
32 #endif 32 #endif
33 33
34 // TODO: ARM - Use some recognizable constant which still fits architectural constraints
35 #ifdef ARM
36 #define PATCHED_ADDR (204)
37 #else
38 #define PATCHED_ADDR (max_jint)
39 #endif
34 40
35 void PhiResolverState::reset(int max_vregs) { 41 void PhiResolverState::reset(int max_vregs) {
36 // Initialize array sizes 42 // Initialize array sizes
37 _virtual_operands.at_put_grow(max_vregs - 1, NULL, NULL); 43 _virtual_operands.at_put_grow(max_vregs - 1, NULL, NULL);
38 _virtual_operands.trunc_to(0); 44 _virtual_operands.trunc_to(0);
223 } 229 }
224 230
225 void LIRItem::load_item_force(LIR_Opr reg) { 231 void LIRItem::load_item_force(LIR_Opr reg) {
226 LIR_Opr r = result(); 232 LIR_Opr r = result();
227 if (r != reg) { 233 if (r != reg) {
234 #if !defined(ARM) && !defined(E500V2)
228 if (r->type() != reg->type()) { 235 if (r->type() != reg->type()) {
229 // moves between different types need an intervening spill slot 236 // moves between different types need an intervening spill slot
230 LIR_Opr tmp = _gen->force_to_spill(r, reg->type()); 237 r = _gen->force_to_spill(r, reg->type());
231 __ move(tmp, reg); 238 }
232 } else { 239 #endif
233 __ move(r, reg); 240 __ move(r, reg);
234 }
235 _result = reg; 241 _result = reg;
236 } 242 }
237 } 243 }
238 244
239 ciObject* LIRItem::get_jobject_constant() const { 245 ciObject* LIRItem::get_jobject_constant() const {
626 // for handling NullPointerException, use debug info representing just the lock stack before this monitorenter 632 // for handling NullPointerException, use debug info representing just the lock stack before this monitorenter
627 __ lock_object(hdr, object, lock, scratch, slow_path, info_for_exception); 633 __ lock_object(hdr, object, lock, scratch, slow_path, info_for_exception);
628 } 634 }
629 635
630 636
631 void LIRGenerator::monitor_exit(LIR_Opr object, LIR_Opr lock, LIR_Opr new_hdr, int monitor_no) { 637 void LIRGenerator::monitor_exit(LIR_Opr object, LIR_Opr lock, LIR_Opr new_hdr, LIR_Opr scratch, int monitor_no) {
632 if (!GenerateSynchronizationCode) return; 638 if (!GenerateSynchronizationCode) return;
633 // setup registers 639 // setup registers
634 LIR_Opr hdr = lock; 640 LIR_Opr hdr = lock;
635 lock = new_hdr; 641 lock = new_hdr;
636 CodeStub* slow_path = new MonitorExitStub(lock, UseFastLocking, monitor_no); 642 CodeStub* slow_path = new MonitorExitStub(lock, UseFastLocking, monitor_no);
637 __ load_stack_address_monitor(monitor_no, lock); 643 __ load_stack_address_monitor(monitor_no, lock);
638 __ unlock_object(hdr, object, lock, slow_path); 644 __ unlock_object(hdr, object, lock, scratch, slow_path);
639 } 645 }
640 646
641 647
642 void LIRGenerator::new_instance(LIR_Opr dst, ciInstanceKlass* klass, LIR_Opr scratch1, LIR_Opr scratch2, LIR_Opr scratch3, LIR_Opr scratch4, LIR_Opr klass_reg, CodeEmitInfo* info) { 648 void LIRGenerator::new_instance(LIR_Opr dst, ciInstanceKlass* klass, LIR_Opr scratch1, LIR_Opr scratch2, LIR_Opr scratch3, LIR_Opr scratch4, LIR_Opr klass_reg, CodeEmitInfo* info) {
643 jobject2reg_with_patching(klass_reg, klass, info); 649 jobject2reg_with_patching(klass_reg, klass, info);
1398 } 1404 }
1399 addr = ptr; 1405 addr = ptr;
1400 } 1406 }
1401 assert(addr->is_register(), "must be a register at this point"); 1407 assert(addr->is_register(), "must be a register at this point");
1402 1408
1409 #ifdef ARM
1410 // TODO: ARM - move to platform-dependent code
1411 LIR_Opr tmp = FrameMap::R14_opr;
1412 if (VM_Version::supports_movw()) {
1413 __ move((LIR_Opr)card_table_base, tmp);
1414 } else {
1415 __ move(new LIR_Address(FrameMap::Rthread_opr, in_bytes(JavaThread::card_table_base_offset()), T_ADDRESS), tmp);
1416 }
1417
1418 CardTableModRefBS* ct = (CardTableModRefBS*)_bs;
1419 LIR_Address *card_addr = new LIR_Address(tmp, addr, (LIR_Address::Scale) -CardTableModRefBS::card_shift, 0, T_BYTE);
1420 if(((int)ct->byte_map_base & 0xff) == 0) {
1421 __ move(tmp, card_addr);
1422 } else {
1423 LIR_Opr tmp_zero = new_register(T_INT);
1424 __ move(LIR_OprFact::intConst(0), tmp_zero);
1425 __ move(tmp_zero, card_addr);
1426 }
1427 #else // ARM
1403 LIR_Opr tmp = new_pointer_register(); 1428 LIR_Opr tmp = new_pointer_register();
1404 if (TwoOperandLIRForm) { 1429 if (TwoOperandLIRForm) {
1405 __ move(addr, tmp); 1430 __ move(addr, tmp);
1406 __ unsigned_shift_right(tmp, CardTableModRefBS::card_shift, tmp); 1431 __ unsigned_shift_right(tmp, CardTableModRefBS::card_shift, tmp);
1407 } else { 1432 } else {
1413 } else { 1438 } else {
1414 __ move(LIR_OprFact::intConst(0), 1439 __ move(LIR_OprFact::intConst(0),
1415 new LIR_Address(tmp, load_constant(card_table_base), 1440 new LIR_Address(tmp, load_constant(card_table_base),
1416 T_BYTE)); 1441 T_BYTE));
1417 } 1442 }
1443 #endif // ARM
1418 } 1444 }
1419 1445
1420 1446
1421 //------------------------field access-------------------------------------- 1447 //------------------------field access--------------------------------------
1422 1448
1505 if (needs_patching) { 1531 if (needs_patching) {
1506 // we need to patch the offset in the instruction so don't allow 1532 // we need to patch the offset in the instruction so don't allow
1507 // generate_address to try to be smart about emitting the -1. 1533 // generate_address to try to be smart about emitting the -1.
1508 // Otherwise the patching code won't know how to find the 1534 // Otherwise the patching code won't know how to find the
1509 // instruction to patch. 1535 // instruction to patch.
1510 address = new LIR_Address(object.result(), max_jint, field_type); 1536 address = new LIR_Address(object.result(), PATCHED_ADDR, field_type);
1511 } else { 1537 } else {
1512 address = generate_address(object.result(), x->offset(), field_type); 1538 address = generate_address(object.result(), x->offset(), field_type);
1513 } 1539 }
1514 1540
1515 if (is_volatile && os::is_MP()) { 1541 if (is_volatile && os::is_MP()) {
1582 if (needs_patching) { 1608 if (needs_patching) {
1583 // we need to patch the offset in the instruction so don't allow 1609 // we need to patch the offset in the instruction so don't allow
1584 // generate_address to try to be smart about emitting the -1. 1610 // generate_address to try to be smart about emitting the -1.
1585 // Otherwise the patching code won't know how to find the 1611 // Otherwise the patching code won't know how to find the
1586 // instruction to patch. 1612 // instruction to patch.
1587 address = new LIR_Address(object.result(), max_jint, field_type); 1613 address = new LIR_Address(object.result(), PATCHED_ADDR, field_type);
1588 } else { 1614 } else {
1589 address = generate_address(object.result(), x->offset(), field_type); 1615 address = generate_address(object.result(), x->offset(), field_type);
1590 } 1616 }
1591 1617
1592 if (is_volatile) { 1618 if (is_volatile) {
1842 __ convert(Bytecodes::_i2l, index_op, tmp); 1868 __ convert(Bytecodes::_i2l, index_op, tmp);
1843 index_op = tmp; 1869 index_op = tmp;
1844 } 1870 }
1845 #endif 1871 #endif
1846 addr = new LIR_Address(base_op, index_op, LIR_Address::Scale(log2_scale), 0, dst_type); 1872 addr = new LIR_Address(base_op, index_op, LIR_Address::Scale(log2_scale), 0, dst_type);
1873 #elif defined(ARM)
1874 addr = generate_address(base_op, index_op, log2_scale, 0, dst_type);
1847 #else 1875 #else
1848 if (index_op->is_illegal() || log2_scale == 0) { 1876 if (index_op->is_illegal() || log2_scale == 0) {
1849 #ifdef _LP64 1877 #ifdef _LP64
1850 if (!index_op->is_illegal() && index_op->type() == T_INT) { 1878 if (!index_op->is_illegal() && index_op->type() == T_INT) {
1851 LIR_Opr tmp = new_pointer_register(); 1879 LIR_Opr tmp = new_pointer_register();
1914 #ifdef _LP64 1942 #ifdef _LP64
1915 if(idx.result()->type() == T_INT) { 1943 if(idx.result()->type() == T_INT) {
1916 __ convert(Bytecodes::_i2l, idx.result(), index_op); 1944 __ convert(Bytecodes::_i2l, idx.result(), index_op);
1917 } else { 1945 } else {
1918 #endif 1946 #endif
1947 // TODO: ARM also allows embedded shift in the address
1919 __ move(idx.result(), index_op); 1948 __ move(idx.result(), index_op);
1920 #ifdef _LP64 1949 #ifdef _LP64
1921 } 1950 }
1922 #endif 1951 #endif
1923 __ shift_left(index_op, log2_scale, index_op); 1952 __ shift_left(index_op, log2_scale, index_op);
2202 __ move(src, dest); 2231 __ move(src, dest);
2203 2232
2204 // Assign new location to Local instruction for this local 2233 // Assign new location to Local instruction for this local
2205 Local* local = x->state()->local_at(java_index)->as_Local(); 2234 Local* local = x->state()->local_at(java_index)->as_Local();
2206 assert(local != NULL, "Locals for incoming arguments must have been created"); 2235 assert(local != NULL, "Locals for incoming arguments must have been created");
2236 #ifndef __SOFTFP__
2237 // The java calling convention passes double as long and float as int.
2207 assert(as_ValueType(t)->tag() == local->type()->tag(), "check"); 2238 assert(as_ValueType(t)->tag() == local->type()->tag(), "check");
2239 #endif // __SOFTFP__
2208 local->set_operand(dest); 2240 local->set_operand(dest);
2209 _instruction_for_operand.at_put_grow(dest->vreg_number(), local, NULL); 2241 _instruction_for_operand.at_put_grow(dest->vreg_number(), local, NULL);
2210 java_index += type2size[t]; 2242 java_index += type2size[t];
2211 } 2243 }
2212 2244