comparison src/share/vm/opto/memnode.cpp @ 235:9c2ecc2ffb12 jdk7-b31

Merge
author trims
date Fri, 11 Jul 2008 01:14:44 -0700
parents d1605aabd0a1 1dd146f17531
children 02a35ad4adf8
comparison
equal deleted inserted replaced
197:de141433919f 235:9c2ecc2ffb12
89 89
90 #endif 90 #endif
91 91
92 Node *MemNode::optimize_simple_memory_chain(Node *mchain, const TypePtr *t_adr, PhaseGVN *phase) { 92 Node *MemNode::optimize_simple_memory_chain(Node *mchain, const TypePtr *t_adr, PhaseGVN *phase) {
93 const TypeOopPtr *tinst = t_adr->isa_oopptr(); 93 const TypeOopPtr *tinst = t_adr->isa_oopptr();
94 if (tinst == NULL || !tinst->is_instance_field()) 94 if (tinst == NULL || !tinst->is_known_instance_field())
95 return mchain; // don't try to optimize non-instance types 95 return mchain; // don't try to optimize non-instance types
96 uint instance_id = tinst->instance_id(); 96 uint instance_id = tinst->instance_id();
97 Node *prev = NULL; 97 Node *prev = NULL;
98 Node *result = mchain; 98 Node *result = mchain;
99 while (prev != result) { 99 while (prev != result) {
123 return result; 123 return result;
124 } 124 }
125 125
126 Node *MemNode::optimize_memory_chain(Node *mchain, const TypePtr *t_adr, PhaseGVN *phase) { 126 Node *MemNode::optimize_memory_chain(Node *mchain, const TypePtr *t_adr, PhaseGVN *phase) {
127 const TypeOopPtr *t_oop = t_adr->isa_oopptr(); 127 const TypeOopPtr *t_oop = t_adr->isa_oopptr();
128 bool is_instance = (t_oop != NULL) && t_oop->is_instance_field(); 128 bool is_instance = (t_oop != NULL) && t_oop->is_known_instance_field();
129 PhaseIterGVN *igvn = phase->is_IterGVN(); 129 PhaseIterGVN *igvn = phase->is_IterGVN();
130 Node *result = mchain; 130 Node *result = mchain;
131 result = optimize_simple_memory_chain(result, t_adr, phase); 131 result = optimize_simple_memory_chain(result, t_adr, phase);
132 if (is_instance && igvn != NULL && result->is_Phi()) { 132 if (is_instance && igvn != NULL && result->is_Phi()) {
133 PhiNode *mphi = result->as_Phi(); 133 PhiNode *mphi = result->as_Phi();
134 assert(mphi->bottom_type() == Type::MEMORY, "memory phi required"); 134 assert(mphi->bottom_type() == Type::MEMORY, "memory phi required");
135 const TypePtr *t = mphi->adr_type(); 135 const TypePtr *t = mphi->adr_type();
136 if (t == TypePtr::BOTTOM || t == TypeRawPtr::BOTTOM || 136 if (t == TypePtr::BOTTOM || t == TypeRawPtr::BOTTOM ||
137 t->isa_oopptr() && !t->is_oopptr()->is_instance() && 137 t->isa_oopptr() && !t->is_oopptr()->is_known_instance() &&
138 t->is_oopptr()->cast_to_instance(t_oop->instance_id()) == t_oop) { 138 t->is_oopptr()->cast_to_instance_id(t_oop->instance_id()) == t_oop) {
139 // clone the Phi with our address type 139 // clone the Phi with our address type
140 result = mphi->split_out_instance(t_adr, igvn); 140 result = mphi->split_out_instance(t_adr, igvn);
141 } else { 141 } else {
142 assert(phase->C->get_alias_index(t) == phase->C->get_alias_index(t_adr), "correct memory chain"); 142 assert(phase->C->get_alias_index(t) == phase->C->get_alias_index(t_adr), "correct memory chain");
143 } 143 }
468 if (known_identical) { 468 if (known_identical) {
469 // From caller, can_see_stored_value will consult find_captured_store. 469 // From caller, can_see_stored_value will consult find_captured_store.
470 return mem; // let caller handle steps (c), (d) 470 return mem; // let caller handle steps (c), (d)
471 } 471 }
472 472
473 } else if (addr_t != NULL && addr_t->is_instance_field()) { 473 } else if (addr_t != NULL && addr_t->is_known_instance_field()) {
474 // Can't use optimize_simple_memory_chain() since it needs PhaseGVN. 474 // Can't use optimize_simple_memory_chain() since it needs PhaseGVN.
475 if (mem->is_Proj() && mem->in(0)->is_Call()) { 475 if (mem->is_Proj() && mem->in(0)->is_Call()) {
476 CallNode *call = mem->in(0)->as_Call(); 476 CallNode *call = mem->in(0)->as_Call();
477 if (!call->may_modify(addr_t, phase)) { 477 if (!call->may_modify(addr_t, phase)) {
478 mem = call->in(TypeFunc::Memory); 478 mem = call->in(TypeFunc::Memory);
767 case T_DOUBLE: return new (C, 3) LoadDNode(ctl, mem, adr, adr_type, rt ); 767 case T_DOUBLE: return new (C, 3) LoadDNode(ctl, mem, adr, adr_type, rt );
768 case T_ADDRESS: return new (C, 3) LoadPNode(ctl, mem, adr, adr_type, rt->is_ptr() ); 768 case T_ADDRESS: return new (C, 3) LoadPNode(ctl, mem, adr, adr_type, rt->is_ptr() );
769 case T_OBJECT: 769 case T_OBJECT:
770 #ifdef _LP64 770 #ifdef _LP64
771 if (adr->bottom_type()->is_ptr_to_narrowoop()) { 771 if (adr->bottom_type()->is_ptr_to_narrowoop()) {
772 const TypeNarrowOop* narrowtype; 772 Node* load = gvn.transform(new (C, 3) LoadNNode(ctl, mem, adr, adr_type, rt->make_narrowoop()));
773 if (rt->isa_narrowoop()) { 773 return new (C, 2) DecodeNNode(load, load->bottom_type()->make_ptr());
774 narrowtype = rt->is_narrowoop();
775 } else {
776 narrowtype = rt->is_oopptr()->make_narrowoop();
777 }
778 Node* load = gvn.transform(new (C, 3) LoadNNode(ctl, mem, adr, adr_type, narrowtype));
779
780 return DecodeNNode::decode(&gvn, load);
781 } else 774 } else
782 #endif 775 #endif
783 { 776 {
784 assert(!adr->bottom_type()->is_ptr_to_narrowoop(), "should have got back a narrow oop"); 777 assert(!adr->bottom_type()->is_ptr_to_narrowoop(), "should have got back a narrow oop");
785 return new (C, 3) LoadPNode(ctl, mem, adr, adr_type, rt->is_oopptr()); 778 return new (C, 3) LoadPNode(ctl, mem, adr, adr_type, rt->is_oopptr());
921 bool LoadNode::is_instance_field_load_with_local_phi(Node* ctrl) { 914 bool LoadNode::is_instance_field_load_with_local_phi(Node* ctrl) {
922 if( in(MemNode::Memory)->is_Phi() && in(MemNode::Memory)->in(0) == ctrl && 915 if( in(MemNode::Memory)->is_Phi() && in(MemNode::Memory)->in(0) == ctrl &&
923 in(MemNode::Address)->is_AddP() ) { 916 in(MemNode::Address)->is_AddP() ) {
924 const TypeOopPtr* t_oop = in(MemNode::Address)->bottom_type()->isa_oopptr(); 917 const TypeOopPtr* t_oop = in(MemNode::Address)->bottom_type()->isa_oopptr();
925 // Only instances. 918 // Only instances.
926 if( t_oop != NULL && t_oop->is_instance_field() && 919 if( t_oop != NULL && t_oop->is_known_instance_field() &&
927 t_oop->offset() != Type::OffsetBot && 920 t_oop->offset() != Type::OffsetBot &&
928 t_oop->offset() != Type::OffsetTop) { 921 t_oop->offset() != Type::OffsetTop) {
929 return true; 922 return true;
930 } 923 }
931 } 924 }
1144 Node* address = in(MemNode::Address); 1137 Node* address = in(MemNode::Address);
1145 const TypePtr *addr_t = phase->type(address)->isa_ptr(); 1138 const TypePtr *addr_t = phase->type(address)->isa_ptr();
1146 const TypeOopPtr *t_oop = addr_t->isa_oopptr(); 1139 const TypeOopPtr *t_oop = addr_t->isa_oopptr();
1147 1140
1148 assert(mem->is_Phi() && (t_oop != NULL) && 1141 assert(mem->is_Phi() && (t_oop != NULL) &&
1149 t_oop->is_instance_field(), "invalide conditions"); 1142 t_oop->is_known_instance_field(), "invalide conditions");
1150 1143
1151 Node *region = mem->in(0); 1144 Node *region = mem->in(0);
1152 if (region == NULL) { 1145 if (region == NULL) {
1153 return NULL; // Wait stable graph 1146 return NULL; // Wait stable graph
1154 } 1147 }
1312 set_req(MemNode::Memory, opt_mem); 1305 set_req(MemNode::Memory, opt_mem);
1313 return this; 1306 return this;
1314 } 1307 }
1315 const TypeOopPtr *t_oop = addr_t->isa_oopptr(); 1308 const TypeOopPtr *t_oop = addr_t->isa_oopptr();
1316 if (can_reshape && opt_mem->is_Phi() && 1309 if (can_reshape && opt_mem->is_Phi() &&
1317 (t_oop != NULL) && t_oop->is_instance_field()) { 1310 (t_oop != NULL) && t_oop->is_known_instance_field()) {
1318 // Split instance field load through Phi. 1311 // Split instance field load through Phi.
1319 Node* result = split_through_phi(phase); 1312 Node* result = split_through_phi(phase);
1320 if (result != NULL) return result; 1313 if (result != NULL) return result;
1321 } 1314 }
1322 } 1315 }
1547 if (value != NULL && value->is_Con()) 1540 if (value != NULL && value->is_Con())
1548 return value->bottom_type(); 1541 return value->bottom_type();
1549 } 1542 }
1550 1543
1551 const TypeOopPtr *tinst = tp->isa_oopptr(); 1544 const TypeOopPtr *tinst = tp->isa_oopptr();
1552 if (tinst != NULL && tinst->is_instance_field()) { 1545 if (tinst != NULL && tinst->is_known_instance_field()) {
1553 // If we have an instance type and our memory input is the 1546 // If we have an instance type and our memory input is the
1554 // programs's initial memory state, there is no matching store, 1547 // programs's initial memory state, there is no matching store,
1555 // so just return a zero of the appropriate type 1548 // so just return a zero of the appropriate type
1556 Node *mem = in(MemNode::Memory); 1549 Node *mem = in(MemNode::Memory);
1557 if (mem->is_Parm() && mem->in(0)->is_Start()) { 1550 if (mem->is_Parm() && mem->in(0)->is_Start()) {
1629 // sanity check the alias category against the created node type 1622 // sanity check the alias category against the created node type
1630 const TypeOopPtr *adr_type = adr->bottom_type()->isa_oopptr(); 1623 const TypeOopPtr *adr_type = adr->bottom_type()->isa_oopptr();
1631 assert(adr_type != NULL, "expecting TypeOopPtr"); 1624 assert(adr_type != NULL, "expecting TypeOopPtr");
1632 #ifdef _LP64 1625 #ifdef _LP64
1633 if (adr_type->is_ptr_to_narrowoop()) { 1626 if (adr_type->is_ptr_to_narrowoop()) {
1634 const TypeNarrowOop* narrowtype = tk->is_oopptr()->make_narrowoop(); 1627 Node* load_klass = gvn.transform(new (C, 3) LoadNKlassNode(ctl, mem, adr, at, tk->make_narrowoop()));
1635 Node* load_klass = gvn.transform(new (C, 3) LoadNKlassNode(ctl, mem, adr, at, narrowtype)); 1628 return new (C, 2) DecodeNNode(load_klass, load_klass->bottom_type()->make_ptr());
1636 return DecodeNNode::decode(&gvn, load_klass);
1637 } 1629 }
1638 #endif 1630 #endif
1639 assert(!adr_type->is_ptr_to_narrowoop(), "should have got back a narrow oop"); 1631 assert(!adr_type->is_ptr_to_narrowoop(), "should have got back a narrow oop");
1640 return new (C, 3) LoadKlassNode(ctl, mem, adr, at, tk); 1632 return new (C, 3) LoadKlassNode(ctl, mem, adr, at, tk);
1641 } 1633 }
1841 1833
1842 1834
1843 //------------------------------Value------------------------------------------ 1835 //------------------------------Value------------------------------------------
1844 const Type *LoadNKlassNode::Value( PhaseTransform *phase ) const { 1836 const Type *LoadNKlassNode::Value( PhaseTransform *phase ) const {
1845 const Type *t = klass_value_common(phase); 1837 const Type *t = klass_value_common(phase);
1846 1838 if (t == Type::TOP)
1847 if (t == TypePtr::NULL_PTR) { 1839 return t;
1848 return TypeNarrowOop::NULL_PTR; 1840
1849 } 1841 return t->make_narrowoop();
1850 if (t != Type::TOP && !t->isa_narrowoop()) {
1851 assert(t->is_oopptr(), "sanity");
1852 t = t->is_oopptr()->make_narrowoop();
1853 }
1854 return t;
1855 } 1842 }
1856 1843
1857 //------------------------------Identity--------------------------------------- 1844 //------------------------------Identity---------------------------------------
1858 // To clean up reflective code, simplify k.java_mirror.as_klass to narrow k. 1845 // To clean up reflective code, simplify k.java_mirror.as_klass to narrow k.
1859 // Also feed through the klass in Allocate(...klass...)._klass. 1846 // Also feed through the klass in Allocate(...klass...)._klass.
1862 1849
1863 const Type *t = phase->type( x ); 1850 const Type *t = phase->type( x );
1864 if( t == Type::TOP ) return x; 1851 if( t == Type::TOP ) return x;
1865 if( t->isa_narrowoop()) return x; 1852 if( t->isa_narrowoop()) return x;
1866 1853
1867 return EncodePNode::encode(phase, x); 1854 return phase->transform(new (phase->C, 2) EncodePNode(x, t->make_narrowoop()));
1868 } 1855 }
1869 1856
1870 //------------------------------Value----------------------------------------- 1857 //------------------------------Value-----------------------------------------
1871 const Type *LoadRangeNode::Value( PhaseTransform *phase ) const { 1858 const Type *LoadRangeNode::Value( PhaseTransform *phase ) const {
1872 // Either input is TOP ==> the result is TOP 1859 // Either input is TOP ==> the result is TOP
1928 case T_OBJECT: 1915 case T_OBJECT:
1929 #ifdef _LP64 1916 #ifdef _LP64
1930 if (adr->bottom_type()->is_ptr_to_narrowoop() || 1917 if (adr->bottom_type()->is_ptr_to_narrowoop() ||
1931 (UseCompressedOops && val->bottom_type()->isa_klassptr() && 1918 (UseCompressedOops && val->bottom_type()->isa_klassptr() &&
1932 adr->bottom_type()->isa_rawptr())) { 1919 adr->bottom_type()->isa_rawptr())) {
1933 const TypePtr* type = val->bottom_type()->is_ptr(); 1920 val = gvn.transform(new (C, 2) EncodePNode(val, val->bottom_type()->make_narrowoop()));
1934 Node* cp = EncodePNode::encode(&gvn, val); 1921 return new (C, 4) StoreNNode(ctl, mem, adr, adr_type, val);
1935 return new (C, 4) StoreNNode(ctl, mem, adr, adr_type, cp);
1936 } else 1922 } else
1937 #endif 1923 #endif
1938 { 1924 {
1939 return new (C, 4) StorePNode(ctl, mem, adr, adr_type, val); 1925 return new (C, 4) StorePNode(ctl, mem, adr, adr_type, val);
1940 } 1926 }
1941 } 1927 }
1942 ShouldNotReachHere(); 1928 ShouldNotReachHere();
1943 return (StoreNode*)NULL; 1929 return (StoreNode*)NULL;
1944 } 1930 }
1945 1931
2149 bool StoreNode::value_never_loaded( PhaseTransform *phase) const { 2135 bool StoreNode::value_never_loaded( PhaseTransform *phase) const {
2150 Node *adr = in(Address); 2136 Node *adr = in(Address);
2151 const TypeOopPtr *adr_oop = phase->type(adr)->isa_oopptr(); 2137 const TypeOopPtr *adr_oop = phase->type(adr)->isa_oopptr();
2152 if (adr_oop == NULL) 2138 if (adr_oop == NULL)
2153 return false; 2139 return false;
2154 if (!adr_oop->is_instance_field()) 2140 if (!adr_oop->is_known_instance_field())
2155 return false; // if not a distinct instance, there may be aliases of the address 2141 return false; // if not a distinct instance, there may be aliases of the address
2156 for (DUIterator_Fast imax, i = adr->fast_outs(imax); i < imax; i++) { 2142 for (DUIterator_Fast imax, i = adr->fast_outs(imax); i < imax; i++) {
2157 Node *use = adr->fast_out(i); 2143 Node *use = adr->fast_out(i);
2158 int opc = use->Opcode(); 2144 int opc = use->Opcode();
2159 if (use->is_Load() || use->is_LoadStore()) { 2145 if (use->is_Load() || use->is_LoadStore()) {