Mercurial > hg > truffle
comparison src/share/vm/opto/memnode.cpp @ 113:ba764ed4b6f2
6420645: Create a vm that uses compressed oops for up to 32gb heapsizes
Summary: Compressed oops in instances, arrays, and headers. Code contributors are coleenp, phh, never, swamyv
Reviewed-by: jmasa, kamg, acorn, tbell, kvn, rasbold
author | coleenp |
---|---|
date | Sun, 13 Apr 2008 17:43:42 -0400 |
parents | de93acbb64fc |
children | d1a5218d7eaf |
comparison
equal
deleted
inserted
replaced
110:a49a647afe9a | 113:ba764ed4b6f2 |
---|---|
547 | 547 |
548 case Op_AddP: // No change to NULL-ness, so peek thru AddP's | 548 case Op_AddP: // No change to NULL-ness, so peek thru AddP's |
549 adr = adr->in(AddPNode::Base); | 549 adr = adr->in(AddPNode::Base); |
550 continue; | 550 continue; |
551 | 551 |
552 case Op_DecodeN: // No change to NULL-ness, so peek thru | |
553 adr = adr->in(1); | |
554 continue; | |
555 | |
552 case Op_CastPP: | 556 case Op_CastPP: |
553 // If the CastPP is useless, just peek on through it. | 557 // If the CastPP is useless, just peek on through it. |
554 if( ccp->type(adr) == ccp->type(adr->in(1)) ) { | 558 if( ccp->type(adr) == ccp->type(adr->in(1)) ) { |
555 // Remember the cast that we've peeked though. If we peek | 559 // Remember the cast that we've peeked though. If we peek |
556 // through more than one, then we end up remembering the highest | 560 // through more than one, then we end up remembering the highest |
603 // List of "safe" opcodes; those that implicitly block the memory | 607 // List of "safe" opcodes; those that implicitly block the memory |
604 // op below any null check. | 608 // op below any null check. |
605 case Op_CastX2P: // no null checks on native pointers | 609 case Op_CastX2P: // no null checks on native pointers |
606 case Op_Parm: // 'this' pointer is not null | 610 case Op_Parm: // 'this' pointer is not null |
607 case Op_LoadP: // Loading from within a klass | 611 case Op_LoadP: // Loading from within a klass |
612 case Op_LoadN: // Loading from within a klass | |
608 case Op_LoadKlass: // Loading from within a klass | 613 case Op_LoadKlass: // Loading from within a klass |
609 case Op_ConP: // Loading from a klass | 614 case Op_ConP: // Loading from a klass |
610 case Op_CreateEx: // Sucking up the guts of an exception oop | 615 case Op_CreateEx: // Sucking up the guts of an exception oop |
611 case Op_Con: // Reading from TLS | 616 case Op_Con: // Reading from TLS |
612 case Op_CMoveP: // CMoveP is pinned | 617 case Op_CMoveP: // CMoveP is pinned |
667 #endif | 672 #endif |
668 | 673 |
669 | 674 |
670 //----------------------------LoadNode::make----------------------------------- | 675 //----------------------------LoadNode::make----------------------------------- |
671 // Polymorphic factory method: | 676 // Polymorphic factory method: |
672 LoadNode *LoadNode::make( Compile *C, Node *ctl, Node *mem, Node *adr, const TypePtr* adr_type, const Type *rt, BasicType bt ) { | 677 Node *LoadNode::make( PhaseGVN& gvn, Node *ctl, Node *mem, Node *adr, const TypePtr* adr_type, const Type *rt, BasicType bt ) { |
678 Compile* C = gvn.C; | |
679 | |
673 // sanity check the alias category against the created node type | 680 // sanity check the alias category against the created node type |
674 assert(!(adr_type->isa_oopptr() && | 681 assert(!(adr_type->isa_oopptr() && |
675 adr_type->offset() == oopDesc::klass_offset_in_bytes()), | 682 adr_type->offset() == oopDesc::klass_offset_in_bytes()), |
676 "use LoadKlassNode instead"); | 683 "use LoadKlassNode instead"); |
677 assert(!(adr_type->isa_aryptr() && | 684 assert(!(adr_type->isa_aryptr() && |
685 case T_SHORT: return new (C, 3) LoadSNode(ctl, mem, adr, adr_type, rt->is_int() ); | 692 case T_SHORT: return new (C, 3) LoadSNode(ctl, mem, adr, adr_type, rt->is_int() ); |
686 case T_LONG: return new (C, 3) LoadLNode(ctl, mem, adr, adr_type, rt->is_long() ); | 693 case T_LONG: return new (C, 3) LoadLNode(ctl, mem, adr, adr_type, rt->is_long() ); |
687 case T_FLOAT: return new (C, 3) LoadFNode(ctl, mem, adr, adr_type, rt ); | 694 case T_FLOAT: return new (C, 3) LoadFNode(ctl, mem, adr, adr_type, rt ); |
688 case T_DOUBLE: return new (C, 3) LoadDNode(ctl, mem, adr, adr_type, rt ); | 695 case T_DOUBLE: return new (C, 3) LoadDNode(ctl, mem, adr, adr_type, rt ); |
689 case T_ADDRESS: return new (C, 3) LoadPNode(ctl, mem, adr, adr_type, rt->is_ptr() ); | 696 case T_ADDRESS: return new (C, 3) LoadPNode(ctl, mem, adr, adr_type, rt->is_ptr() ); |
690 case T_OBJECT: return new (C, 3) LoadPNode(ctl, mem, adr, adr_type, rt->is_oopptr()); | 697 case T_OBJECT: |
698 #ifdef _LP64 | |
699 if (adr->bottom_type()->is_narrow()) { | |
700 const TypeNarrowOop* narrowtype; | |
701 if (rt->isa_narrowoop()) { | |
702 narrowtype = rt->is_narrowoop(); | |
703 rt = narrowtype->make_oopptr(); | |
704 } else { | |
705 narrowtype = rt->is_oopptr()->make_narrowoop(); | |
706 } | |
707 Node* load = gvn.transform(new (C, 3) LoadNNode(ctl, mem, adr, adr_type, narrowtype)); | |
708 | |
709 return new (C, 2) DecodeNNode(load, rt); | |
710 } else | |
711 #endif | |
712 { | |
713 assert(!adr->bottom_type()->is_narrow(), "should have got back a narrow oop"); | |
714 return new (C, 3) LoadPNode(ctl, mem, adr, adr_type, rt->is_oopptr()); | |
715 } | |
691 } | 716 } |
692 ShouldNotReachHere(); | 717 ShouldNotReachHere(); |
693 return (LoadNode*)NULL; | 718 return (LoadNode*)NULL; |
694 } | 719 } |
695 | 720 |
1741 | 1766 |
1742 } | 1767 } |
1743 //============================================================================= | 1768 //============================================================================= |
1744 //---------------------------StoreNode::make----------------------------------- | 1769 //---------------------------StoreNode::make----------------------------------- |
1745 // Polymorphic factory method: | 1770 // Polymorphic factory method: |
1746 StoreNode* StoreNode::make( Compile *C, Node* ctl, Node* mem, Node* adr, const TypePtr* adr_type, Node* val, BasicType bt ) { | 1771 StoreNode* StoreNode::make( PhaseGVN& gvn, Node* ctl, Node* mem, Node* adr, const TypePtr* adr_type, Node* val, BasicType bt ) { |
1772 Compile* C = gvn.C; | |
1773 | |
1747 switch (bt) { | 1774 switch (bt) { |
1748 case T_BOOLEAN: | 1775 case T_BOOLEAN: |
1749 case T_BYTE: return new (C, 4) StoreBNode(ctl, mem, adr, adr_type, val); | 1776 case T_BYTE: return new (C, 4) StoreBNode(ctl, mem, adr, adr_type, val); |
1750 case T_INT: return new (C, 4) StoreINode(ctl, mem, adr, adr_type, val); | 1777 case T_INT: return new (C, 4) StoreINode(ctl, mem, adr, adr_type, val); |
1751 case T_CHAR: | 1778 case T_CHAR: |
1752 case T_SHORT: return new (C, 4) StoreCNode(ctl, mem, adr, adr_type, val); | 1779 case T_SHORT: return new (C, 4) StoreCNode(ctl, mem, adr, adr_type, val); |
1753 case T_LONG: return new (C, 4) StoreLNode(ctl, mem, adr, adr_type, val); | 1780 case T_LONG: return new (C, 4) StoreLNode(ctl, mem, adr, adr_type, val); |
1754 case T_FLOAT: return new (C, 4) StoreFNode(ctl, mem, adr, adr_type, val); | 1781 case T_FLOAT: return new (C, 4) StoreFNode(ctl, mem, adr, adr_type, val); |
1755 case T_DOUBLE: return new (C, 4) StoreDNode(ctl, mem, adr, adr_type, val); | 1782 case T_DOUBLE: return new (C, 4) StoreDNode(ctl, mem, adr, adr_type, val); |
1756 case T_ADDRESS: | 1783 case T_ADDRESS: |
1757 case T_OBJECT: return new (C, 4) StorePNode(ctl, mem, adr, adr_type, val); | 1784 case T_OBJECT: |
1785 #ifdef _LP64 | |
1786 if (adr->bottom_type()->is_narrow() || | |
1787 (UseCompressedOops && val->bottom_type()->isa_klassptr() && | |
1788 adr->bottom_type()->isa_rawptr())) { | |
1789 const TypePtr* type = val->bottom_type()->is_ptr(); | |
1790 Node* cp; | |
1791 if (type->isa_oopptr()) { | |
1792 const TypeNarrowOop* etype = type->is_oopptr()->make_narrowoop(); | |
1793 cp = gvn.transform(new (C, 2) EncodePNode(val, etype)); | |
1794 } else if (type == TypePtr::NULL_PTR) { | |
1795 cp = gvn.transform(new (C, 1) ConNNode(TypeNarrowOop::NULL_PTR)); | |
1796 } else { | |
1797 ShouldNotReachHere(); | |
1798 } | |
1799 return new (C, 4) StoreNNode(ctl, mem, adr, adr_type, cp); | |
1800 } else | |
1801 #endif | |
1802 { | |
1803 return new (C, 4) StorePNode(ctl, mem, adr, adr_type, val); | |
1804 } | |
1758 } | 1805 } |
1759 ShouldNotReachHere(); | 1806 ShouldNotReachHere(); |
1760 return (StoreNode*)NULL; | 1807 return (StoreNode*)NULL; |
1761 } | 1808 } |
1762 | 1809 |
2134 int unit = BytesPerLong; | 2181 int unit = BytesPerLong; |
2135 if ((offset % unit) != 0) { | 2182 if ((offset % unit) != 0) { |
2136 Node* adr = new (C, 4) AddPNode(dest, dest, phase->MakeConX(offset)); | 2183 Node* adr = new (C, 4) AddPNode(dest, dest, phase->MakeConX(offset)); |
2137 adr = phase->transform(adr); | 2184 adr = phase->transform(adr); |
2138 const TypePtr* atp = TypeRawPtr::BOTTOM; | 2185 const TypePtr* atp = TypeRawPtr::BOTTOM; |
2139 mem = StoreNode::make(C, ctl, mem, adr, atp, phase->zerocon(T_INT), T_INT); | 2186 mem = StoreNode::make(*phase, ctl, mem, adr, atp, phase->zerocon(T_INT), T_INT); |
2140 mem = phase->transform(mem); | 2187 mem = phase->transform(mem); |
2141 offset += BytesPerInt; | 2188 offset += BytesPerInt; |
2142 } | 2189 } |
2143 assert((offset % unit) == 0, ""); | 2190 assert((offset % unit) == 0, ""); |
2144 | 2191 |
2197 } | 2244 } |
2198 if (done_offset < end_offset) { // emit the final 32-bit store | 2245 if (done_offset < end_offset) { // emit the final 32-bit store |
2199 Node* adr = new (C, 4) AddPNode(dest, dest, phase->MakeConX(done_offset)); | 2246 Node* adr = new (C, 4) AddPNode(dest, dest, phase->MakeConX(done_offset)); |
2200 adr = phase->transform(adr); | 2247 adr = phase->transform(adr); |
2201 const TypePtr* atp = TypeRawPtr::BOTTOM; | 2248 const TypePtr* atp = TypeRawPtr::BOTTOM; |
2202 mem = StoreNode::make(C, ctl, mem, adr, atp, phase->zerocon(T_INT), T_INT); | 2249 mem = StoreNode::make(*phase, ctl, mem, adr, atp, phase->zerocon(T_INT), T_INT); |
2203 mem = phase->transform(mem); | 2250 mem = phase->transform(mem); |
2204 done_offset += BytesPerInt; | 2251 done_offset += BytesPerInt; |
2205 } | 2252 } |
2206 assert(done_offset == end_offset, ""); | 2253 assert(done_offset == end_offset, ""); |
2207 return mem; | 2254 return mem; |
2554 return FAIL; // arraycopy got here first; punt | 2601 return FAIL; // arraycopy got here first; punt |
2555 | 2602 |
2556 assert(allocation() != NULL, "must be present"); | 2603 assert(allocation() != NULL, "must be present"); |
2557 | 2604 |
2558 // no negatives, no header fields: | 2605 // no negatives, no header fields: |
2559 if (start < (intptr_t) sizeof(oopDesc)) return FAIL; | 2606 if (start < (intptr_t) allocation()->minimum_header_size()) return FAIL; |
2560 if (start < (intptr_t) sizeof(arrayOopDesc) && | |
2561 start < (intptr_t) allocation()->minimum_header_size()) return FAIL; | |
2562 | 2607 |
2563 // after a certain size, we bail out on tracking all the stores: | 2608 // after a certain size, we bail out on tracking all the stores: |
2564 intptr_t ti_limit = (TrackedInitializationLimit * HeapWordSize); | 2609 intptr_t ti_limit = (TrackedInitializationLimit * HeapWordSize); |
2565 if (start >= ti_limit) return FAIL; | 2610 if (start >= ti_limit) return FAIL; |
2566 | 2611 |
2893 intptr_t off[2]; | 2938 intptr_t off[2]; |
2894 int nst = 0; | 2939 int nst = 0; |
2895 if (!split) { | 2940 if (!split) { |
2896 ++new_long; | 2941 ++new_long; |
2897 off[nst] = offset; | 2942 off[nst] = offset; |
2898 st[nst++] = StoreNode::make(C, ctl, zmem, adr, atp, | 2943 st[nst++] = StoreNode::make(*phase, ctl, zmem, adr, atp, |
2899 phase->longcon(con), T_LONG); | 2944 phase->longcon(con), T_LONG); |
2900 } else { | 2945 } else { |
2901 // Omit either if it is a zero. | 2946 // Omit either if it is a zero. |
2902 if (con0 != 0) { | 2947 if (con0 != 0) { |
2903 ++new_int; | 2948 ++new_int; |
2904 off[nst] = offset; | 2949 off[nst] = offset; |
2905 st[nst++] = StoreNode::make(C, ctl, zmem, adr, atp, | 2950 st[nst++] = StoreNode::make(*phase, ctl, zmem, adr, atp, |
2906 phase->intcon(con0), T_INT); | 2951 phase->intcon(con0), T_INT); |
2907 } | 2952 } |
2908 if (con1 != 0) { | 2953 if (con1 != 0) { |
2909 ++new_int; | 2954 ++new_int; |
2910 offset += BytesPerInt; | 2955 offset += BytesPerInt; |
2911 adr = make_raw_address(offset, phase); | 2956 adr = make_raw_address(offset, phase); |
2912 off[nst] = offset; | 2957 off[nst] = offset; |
2913 st[nst++] = StoreNode::make(C, ctl, zmem, adr, atp, | 2958 st[nst++] = StoreNode::make(*phase, ctl, zmem, adr, atp, |
2914 phase->intcon(con1), T_INT); | 2959 phase->intcon(con1), T_INT); |
2915 } | 2960 } |
2916 } | 2961 } |
2917 | 2962 |
2918 // Insert second store first, then the first before the second. | 2963 // Insert second store first, then the first before the second. |
3016 coalesce_subword_stores(header_size, size_in_bytes, phase); | 3061 coalesce_subword_stores(header_size, size_in_bytes, phase); |
3017 | 3062 |
3018 Node* zmem = zero_memory(); // initially zero memory state | 3063 Node* zmem = zero_memory(); // initially zero memory state |
3019 Node* inits = zmem; // accumulating a linearized chain of inits | 3064 Node* inits = zmem; // accumulating a linearized chain of inits |
3020 #ifdef ASSERT | 3065 #ifdef ASSERT |
3021 intptr_t last_init_off = sizeof(oopDesc); // previous init offset | 3066 intptr_t first_offset = allocation()->minimum_header_size(); |
3022 intptr_t last_init_end = sizeof(oopDesc); // previous init offset+size | 3067 intptr_t last_init_off = first_offset; // previous init offset |
3023 intptr_t last_tile_end = sizeof(oopDesc); // previous tile offset+size | 3068 intptr_t last_init_end = first_offset; // previous init offset+size |
3069 intptr_t last_tile_end = first_offset; // previous tile offset+size | |
3024 #endif | 3070 #endif |
3025 intptr_t zeroes_done = header_size; | 3071 intptr_t zeroes_done = header_size; |
3026 | 3072 |
3027 bool do_zeroing = true; // we might give up if inits are very sparse | 3073 bool do_zeroing = true; // we might give up if inits are very sparse |
3028 int big_init_gaps = 0; // how many large gaps have we seen? | 3074 int big_init_gaps = 0; // how many large gaps have we seen? |
3153 | 3199 |
3154 #ifdef ASSERT | 3200 #ifdef ASSERT |
3155 bool InitializeNode::stores_are_sane(PhaseTransform* phase) { | 3201 bool InitializeNode::stores_are_sane(PhaseTransform* phase) { |
3156 if (is_complete()) | 3202 if (is_complete()) |
3157 return true; // stores could be anything at this point | 3203 return true; // stores could be anything at this point |
3158 intptr_t last_off = sizeof(oopDesc); | 3204 assert(allocation() != NULL, "must be present"); |
3205 intptr_t last_off = allocation()->minimum_header_size(); | |
3159 for (uint i = InitializeNode::RawStores; i < req(); i++) { | 3206 for (uint i = InitializeNode::RawStores; i < req(); i++) { |
3160 Node* st = in(i); | 3207 Node* st = in(i); |
3161 intptr_t st_off = get_store_offset(st, phase); | 3208 intptr_t st_off = get_store_offset(st, phase); |
3162 if (st_off < 0) continue; // ignore dead garbage | 3209 if (st_off < 0) continue; // ignore dead garbage |
3163 if (last_off > st_off) { | 3210 if (last_off > st_off) { |