comparison src/share/vm/opto/memnode.cpp @ 4970:33df1aeaebbf

Merge with http://hg.openjdk.java.net/hsx/hsx24/hotspot/
author Thomas Wuerthinger <thomas.wuerthinger@oracle.com>
date Mon, 27 Feb 2012 13:10:13 +0100
parents 52474ec73861
children 0919b2e7895d
comparison
equal deleted inserted replaced
4703:2cfb7fb2dce7 4970:33df1aeaebbf
1471 // Helper to recognize certain Klass fields which are invariant across 1471 // Helper to recognize certain Klass fields which are invariant across
1472 // some group of array types (e.g., int[] or all T[] where T < Object). 1472 // some group of array types (e.g., int[] or all T[] where T < Object).
1473 const Type* 1473 const Type*
1474 LoadNode::load_array_final_field(const TypeKlassPtr *tkls, 1474 LoadNode::load_array_final_field(const TypeKlassPtr *tkls,
1475 ciKlass* klass) const { 1475 ciKlass* klass) const {
1476 if (tkls->offset() == Klass::modifier_flags_offset_in_bytes() + (int)sizeof(oopDesc)) { 1476 if (tkls->offset() == in_bytes(Klass::modifier_flags_offset())) {
1477 // The field is Klass::_modifier_flags. Return its (constant) value. 1477 // The field is Klass::_modifier_flags. Return its (constant) value.
1478 // (Folds up the 2nd indirection in aClassConstant.getModifiers().) 1478 // (Folds up the 2nd indirection in aClassConstant.getModifiers().)
1479 assert(this->Opcode() == Op_LoadI, "must load an int from _modifier_flags"); 1479 assert(this->Opcode() == Op_LoadI, "must load an int from _modifier_flags");
1480 return TypeInt::make(klass->modifier_flags()); 1480 return TypeInt::make(klass->modifier_flags());
1481 } 1481 }
1482 if (tkls->offset() == Klass::access_flags_offset_in_bytes() + (int)sizeof(oopDesc)) { 1482 if (tkls->offset() == in_bytes(Klass::access_flags_offset())) {
1483 // The field is Klass::_access_flags. Return its (constant) value. 1483 // The field is Klass::_access_flags. Return its (constant) value.
1484 // (Folds up the 2nd indirection in Reflection.getClassAccessFlags(aClassConstant).) 1484 // (Folds up the 2nd indirection in Reflection.getClassAccessFlags(aClassConstant).)
1485 assert(this->Opcode() == Op_LoadI, "must load an int from _access_flags"); 1485 assert(this->Opcode() == Op_LoadI, "must load an int from _access_flags");
1486 return TypeInt::make(klass->access_flags()); 1486 return TypeInt::make(klass->access_flags());
1487 } 1487 }
1488 if (tkls->offset() == Klass::layout_helper_offset_in_bytes() + (int)sizeof(oopDesc)) { 1488 if (tkls->offset() == in_bytes(Klass::layout_helper_offset())) {
1489 // The field is Klass::_layout_helper. Return its constant value if known. 1489 // The field is Klass::_layout_helper. Return its constant value if known.
1490 assert(this->Opcode() == Op_LoadI, "must load an int from _layout_helper"); 1490 assert(this->Opcode() == Op_LoadI, "must load an int from _layout_helper");
1491 return TypeInt::make(klass->layout_helper()); 1491 return TypeInt::make(klass->layout_helper());
1492 } 1492 }
1493 1493
1634 ciKlass* klass = tkls->klass(); 1634 ciKlass* klass = tkls->klass();
1635 if (klass->is_loaded() && tkls->klass_is_exact()) { 1635 if (klass->is_loaded() && tkls->klass_is_exact()) {
1636 // We are loading a field from a Klass metaobject whose identity 1636 // We are loading a field from a Klass metaobject whose identity
1637 // is known at compile time (the type is "exact" or "precise"). 1637 // is known at compile time (the type is "exact" or "precise").
1638 // Check for fields we know are maintained as constants by the VM. 1638 // Check for fields we know are maintained as constants by the VM.
1639 if (tkls->offset() == Klass::super_check_offset_offset_in_bytes() + (int)sizeof(oopDesc)) { 1639 if (tkls->offset() == in_bytes(Klass::super_check_offset_offset())) {
1640 // The field is Klass::_super_check_offset. Return its (constant) value. 1640 // The field is Klass::_super_check_offset. Return its (constant) value.
1641 // (Folds up type checking code.) 1641 // (Folds up type checking code.)
1642 assert(Opcode() == Op_LoadI, "must load an int from _super_check_offset"); 1642 assert(Opcode() == Op_LoadI, "must load an int from _super_check_offset");
1643 return TypeInt::make(klass->super_check_offset()); 1643 return TypeInt::make(klass->super_check_offset());
1644 } 1644 }
1645 // Compute index into primary_supers array 1645 // Compute index into primary_supers array
1646 juint depth = (tkls->offset() - (Klass::primary_supers_offset_in_bytes() + (int)sizeof(oopDesc))) / sizeof(klassOop); 1646 juint depth = (tkls->offset() - in_bytes(Klass::primary_supers_offset())) / sizeof(klassOop);
1647 // Check for overflowing; use unsigned compare to handle the negative case. 1647 // Check for overflowing; use unsigned compare to handle the negative case.
1648 if( depth < ciKlass::primary_super_limit() ) { 1648 if( depth < ciKlass::primary_super_limit() ) {
1649 // The field is an element of Klass::_primary_supers. Return its (constant) value. 1649 // The field is an element of Klass::_primary_supers. Return its (constant) value.
1650 // (Folds up type checking code.) 1650 // (Folds up type checking code.)
1651 assert(Opcode() == Op_LoadKlass, "must load a klass from _primary_supers"); 1651 assert(Opcode() == Op_LoadKlass, "must load a klass from _primary_supers");
1652 ciKlass *ss = klass->super_of_depth(depth); 1652 ciKlass *ss = klass->super_of_depth(depth);
1653 return ss ? TypeKlassPtr::make(ss) : TypePtr::NULL_PTR; 1653 return ss ? TypeKlassPtr::make(ss) : TypePtr::NULL_PTR;
1654 } 1654 }
1655 const Type* aift = load_array_final_field(tkls, klass); 1655 const Type* aift = load_array_final_field(tkls, klass);
1656 if (aift != NULL) return aift; 1656 if (aift != NULL) return aift;
1657 if (tkls->offset() == in_bytes(arrayKlass::component_mirror_offset()) + (int)sizeof(oopDesc) 1657 if (tkls->offset() == in_bytes(arrayKlass::component_mirror_offset())
1658 && klass->is_array_klass()) { 1658 && klass->is_array_klass()) {
1659 // The field is arrayKlass::_component_mirror. Return its (constant) value. 1659 // The field is arrayKlass::_component_mirror. Return its (constant) value.
1660 // (Folds up aClassConstant.getComponentType, common in Arrays.copyOf.) 1660 // (Folds up aClassConstant.getComponentType, common in Arrays.copyOf.)
1661 assert(Opcode() == Op_LoadP, "must load an oop from _component_mirror"); 1661 assert(Opcode() == Op_LoadP, "must load an oop from _component_mirror");
1662 return TypeInstPtr::make(klass->as_array_klass()->component_mirror()); 1662 return TypeInstPtr::make(klass->as_array_klass()->component_mirror());
1663 } 1663 }
1664 if (tkls->offset() == Klass::java_mirror_offset_in_bytes() + (int)sizeof(oopDesc)) { 1664 if (tkls->offset() == in_bytes(Klass::java_mirror_offset())) {
1665 // The field is Klass::_java_mirror. Return its (constant) value. 1665 // The field is Klass::_java_mirror. Return its (constant) value.
1666 // (Folds up the 2nd indirection in anObjConstant.getClass().) 1666 // (Folds up the 2nd indirection in anObjConstant.getClass().)
1667 assert(Opcode() == Op_LoadP, "must load an oop from _java_mirror"); 1667 assert(Opcode() == Op_LoadP, "must load an oop from _java_mirror");
1668 return TypeInstPtr::make(klass->java_mirror()); 1668 return TypeInstPtr::make(klass->java_mirror());
1669 } 1669 }
1677 while( inner->is_obj_array_klass() ) 1677 while( inner->is_obj_array_klass() )
1678 inner = inner->as_obj_array_klass()->base_element_type(); 1678 inner = inner->as_obj_array_klass()->base_element_type();
1679 if( inner->is_instance_klass() && 1679 if( inner->is_instance_klass() &&
1680 !inner->as_instance_klass()->flags().is_interface() ) { 1680 !inner->as_instance_klass()->flags().is_interface() ) {
1681 // Compute index into primary_supers array 1681 // Compute index into primary_supers array
1682 juint depth = (tkls->offset() - (Klass::primary_supers_offset_in_bytes() + (int)sizeof(oopDesc))) / sizeof(klassOop); 1682 juint depth = (tkls->offset() - in_bytes(Klass::primary_supers_offset())) / sizeof(klassOop);
1683 // Check for overflowing; use unsigned compare to handle the negative case. 1683 // Check for overflowing; use unsigned compare to handle the negative case.
1684 if( depth < ciKlass::primary_super_limit() && 1684 if( depth < ciKlass::primary_super_limit() &&
1685 depth <= klass->super_depth() ) { // allow self-depth checks to handle self-check case 1685 depth <= klass->super_depth() ) { // allow self-depth checks to handle self-check case
1686 // The field is an element of Klass::_primary_supers. Return its (constant) value. 1686 // The field is an element of Klass::_primary_supers. Return its (constant) value.
1687 // (Folds up type checking code.) 1687 // (Folds up type checking code.)
1693 } 1693 }
1694 1694
1695 // If the type is enough to determine that the thing is not an array, 1695 // If the type is enough to determine that the thing is not an array,
1696 // we can give the layout_helper a positive interval type. 1696 // we can give the layout_helper a positive interval type.
1697 // This will help short-circuit some reflective code. 1697 // This will help short-circuit some reflective code.
1698 if (tkls->offset() == Klass::layout_helper_offset_in_bytes() + (int)sizeof(oopDesc) 1698 if (tkls->offset() == in_bytes(Klass::layout_helper_offset())
1699 && !klass->is_array_klass() // not directly typed as an array 1699 && !klass->is_array_klass() // not directly typed as an array
1700 && !klass->is_interface() // specifically not Serializable & Cloneable 1700 && !klass->is_interface() // specifically not Serializable & Cloneable
1701 && !klass->is_java_lang_Object() // not the supertype of all T[] 1701 && !klass->is_java_lang_Object() // not the supertype of all T[]
1702 ) { 1702 ) {
1703 // Note: When interfaces are reliable, we can narrow the interface 1703 // Note: When interfaces are reliable, we can narrow the interface
1716 // (Also allow a variable load from a fresh array to produce zero.) 1716 // (Also allow a variable load from a fresh array to produce zero.)
1717 const TypeOopPtr *tinst = tp->isa_oopptr(); 1717 const TypeOopPtr *tinst = tp->isa_oopptr();
1718 bool is_instance = (tinst != NULL) && tinst->is_known_instance_field(); 1718 bool is_instance = (tinst != NULL) && tinst->is_known_instance_field();
1719 if (ReduceFieldZeroing || is_instance) { 1719 if (ReduceFieldZeroing || is_instance) {
1720 Node* value = can_see_stored_value(mem,phase); 1720 Node* value = can_see_stored_value(mem,phase);
1721 if (value != NULL && value->is_Con()) 1721 if (value != NULL && value->is_Con()) {
1722 assert(value->bottom_type()->higher_equal(_type),"sanity");
1722 return value->bottom_type(); 1723 return value->bottom_type();
1724 }
1723 } 1725 }
1724 1726
1725 if (is_instance) { 1727 if (is_instance) {
1726 // If we have an instance type and our memory input is the 1728 // If we have an instance type and our memory input is the
1727 // programs's initial memory state, there is no matching store, 1729 // programs's initial memory state, there is no matching store,
1757 } 1759 }
1758 // Identity call will handle the case where truncation is not needed. 1760 // Identity call will handle the case where truncation is not needed.
1759 return LoadNode::Ideal(phase, can_reshape); 1761 return LoadNode::Ideal(phase, can_reshape);
1760 } 1762 }
1761 1763
1764 const Type* LoadBNode::Value(PhaseTransform *phase) const {
1765 Node* mem = in(MemNode::Memory);
1766 Node* value = can_see_stored_value(mem,phase);
1767 if (value != NULL && value->is_Con() &&
1768 !value->bottom_type()->higher_equal(_type)) {
1769 // If the input to the store does not fit with the load's result type,
1770 // it must be truncated. We can't delay until Ideal call since
1771 // a singleton Value is needed for split_thru_phi optimization.
1772 int con = value->get_int();
1773 return TypeInt::make((con << 24) >> 24);
1774 }
1775 return LoadNode::Value(phase);
1776 }
1777
1762 //--------------------------LoadUBNode::Ideal------------------------------------- 1778 //--------------------------LoadUBNode::Ideal-------------------------------------
1763 // 1779 //
1764 // If the previous store is to the same address as this load, 1780 // If the previous store is to the same address as this load,
1765 // and the value stored was larger than a byte, replace this load 1781 // and the value stored was larger than a byte, replace this load
1766 // with the value stored truncated to a byte. If no truncation is 1782 // with the value stored truncated to a byte. If no truncation is
1773 return new (phase->C, 3) AndINode(value, phase->intcon(0xFF)); 1789 return new (phase->C, 3) AndINode(value, phase->intcon(0xFF));
1774 // Identity call will handle the case where truncation is not needed. 1790 // Identity call will handle the case where truncation is not needed.
1775 return LoadNode::Ideal(phase, can_reshape); 1791 return LoadNode::Ideal(phase, can_reshape);
1776 } 1792 }
1777 1793
1794 const Type* LoadUBNode::Value(PhaseTransform *phase) const {
1795 Node* mem = in(MemNode::Memory);
1796 Node* value = can_see_stored_value(mem,phase);
1797 if (value != NULL && value->is_Con() &&
1798 !value->bottom_type()->higher_equal(_type)) {
1799 // If the input to the store does not fit with the load's result type,
1800 // it must be truncated. We can't delay until Ideal call since
1801 // a singleton Value is needed for split_thru_phi optimization.
1802 int con = value->get_int();
1803 return TypeInt::make(con & 0xFF);
1804 }
1805 return LoadNode::Value(phase);
1806 }
1807
1778 //--------------------------LoadUSNode::Ideal------------------------------------- 1808 //--------------------------LoadUSNode::Ideal-------------------------------------
1779 // 1809 //
1780 // If the previous store is to the same address as this load, 1810 // If the previous store is to the same address as this load,
1781 // and the value stored was larger than a char, replace this load 1811 // and the value stored was larger than a char, replace this load
1782 // with the value stored truncated to a char. If no truncation is 1812 // with the value stored truncated to a char. If no truncation is
1789 return new (phase->C, 3) AndINode(value,phase->intcon(0xFFFF)); 1819 return new (phase->C, 3) AndINode(value,phase->intcon(0xFFFF));
1790 // Identity call will handle the case where truncation is not needed. 1820 // Identity call will handle the case where truncation is not needed.
1791 return LoadNode::Ideal(phase, can_reshape); 1821 return LoadNode::Ideal(phase, can_reshape);
1792 } 1822 }
1793 1823
1824 const Type* LoadUSNode::Value(PhaseTransform *phase) const {
1825 Node* mem = in(MemNode::Memory);
1826 Node* value = can_see_stored_value(mem,phase);
1827 if (value != NULL && value->is_Con() &&
1828 !value->bottom_type()->higher_equal(_type)) {
1829 // If the input to the store does not fit with the load's result type,
1830 // it must be truncated. We can't delay until Ideal call since
1831 // a singleton Value is needed for split_thru_phi optimization.
1832 int con = value->get_int();
1833 return TypeInt::make(con & 0xFFFF);
1834 }
1835 return LoadNode::Value(phase);
1836 }
1837
1794 //--------------------------LoadSNode::Ideal-------------------------------------- 1838 //--------------------------LoadSNode::Ideal--------------------------------------
1795 // 1839 //
1796 // If the previous store is to the same address as this load, 1840 // If the previous store is to the same address as this load,
1797 // and the value stored was larger than a short, replace this load 1841 // and the value stored was larger than a short, replace this load
1798 // with the value stored truncated to a short. If no truncation is 1842 // with the value stored truncated to a short. If no truncation is
1805 Node *result = phase->transform( new (phase->C, 3) LShiftINode(value, phase->intcon(16)) ); 1849 Node *result = phase->transform( new (phase->C, 3) LShiftINode(value, phase->intcon(16)) );
1806 return new (phase->C, 3) RShiftINode(result, phase->intcon(16)); 1850 return new (phase->C, 3) RShiftINode(result, phase->intcon(16));
1807 } 1851 }
1808 // Identity call will handle the case where truncation is not needed. 1852 // Identity call will handle the case where truncation is not needed.
1809 return LoadNode::Ideal(phase, can_reshape); 1853 return LoadNode::Ideal(phase, can_reshape);
1854 }
1855
1856 const Type* LoadSNode::Value(PhaseTransform *phase) const {
1857 Node* mem = in(MemNode::Memory);
1858 Node* value = can_see_stored_value(mem,phase);
1859 if (value != NULL && value->is_Con() &&
1860 !value->bottom_type()->higher_equal(_type)) {
1861 // If the input to the store does not fit with the load's result type,
1862 // it must be truncated. We can't delay until Ideal call since
1863 // a singleton Value is needed for split_thru_phi optimization.
1864 int con = value->get_int();
1865 return TypeInt::make((con << 16) >> 16);
1866 }
1867 return LoadNode::Value(phase);
1810 } 1868 }
1811 1869
1812 //============================================================================= 1870 //=============================================================================
1813 //----------------------------LoadKlassNode::make------------------------------ 1871 //----------------------------LoadKlassNode::make------------------------------
1814 // Polymorphic factory method: 1872 // Polymorphic factory method:
1936 if (tkls != NULL && !StressReflectiveCode) { 1994 if (tkls != NULL && !StressReflectiveCode) {
1937 ciKlass* klass = tkls->klass(); 1995 ciKlass* klass = tkls->klass();
1938 if( !klass->is_loaded() ) 1996 if( !klass->is_loaded() )
1939 return _type; // Bail out if not loaded 1997 return _type; // Bail out if not loaded
1940 if( klass->is_obj_array_klass() && 1998 if( klass->is_obj_array_klass() &&
1941 (uint)tkls->offset() == objArrayKlass::element_klass_offset_in_bytes() + sizeof(oopDesc)) { 1999 tkls->offset() == in_bytes(objArrayKlass::element_klass_offset())) {
1942 ciKlass* elem = klass->as_obj_array_klass()->element_klass(); 2000 ciKlass* elem = klass->as_obj_array_klass()->element_klass();
1943 // // Always returning precise element type is incorrect, 2001 // // Always returning precise element type is incorrect,
1944 // // e.g., element type could be object and array may contain strings 2002 // // e.g., element type could be object and array may contain strings
1945 // return TypeKlassPtr::make(TypePtr::Constant, elem, 0); 2003 // return TypeKlassPtr::make(TypePtr::Constant, elem, 0);
1946 2004
1947 // The array's TypeKlassPtr was declared 'precise' or 'not precise' 2005 // The array's TypeKlassPtr was declared 'precise' or 'not precise'
1948 // according to the element type's subclassing. 2006 // according to the element type's subclassing.
1949 return TypeKlassPtr::make(tkls->ptr(), elem, 0/*offset*/); 2007 return TypeKlassPtr::make(tkls->ptr(), elem, 0/*offset*/);
1950 } 2008 }
1951 if( klass->is_instance_klass() && tkls->klass_is_exact() && 2009 if( klass->is_instance_klass() && tkls->klass_is_exact() &&
1952 (uint)tkls->offset() == Klass::super_offset_in_bytes() + sizeof(oopDesc)) { 2010 tkls->offset() == in_bytes(Klass::super_offset())) {
1953 ciKlass* sup = klass->as_instance_klass()->super(); 2011 ciKlass* sup = klass->as_instance_klass()->super();
1954 // The field is Klass::_super. Return its (constant) value. 2012 // The field is Klass::_super. Return its (constant) value.
1955 // (Folds up the 2nd indirection in aClassConstant.getSuperClass().) 2013 // (Folds up the 2nd indirection in aClassConstant.getSuperClass().)
1956 return sup ? TypeKlassPtr::make(sup) : TypePtr::NULL_PTR; 2014 return sup ? TypeKlassPtr::make(sup) : TypePtr::NULL_PTR;
1957 } 2015 }
2011 if (tkls != NULL && !tkls->empty() 2069 if (tkls != NULL && !tkls->empty()
2012 && (tkls->klass()->is_instance_klass() || 2070 && (tkls->klass()->is_instance_klass() ||
2013 tkls->klass()->is_array_klass()) 2071 tkls->klass()->is_array_klass())
2014 && adr2->is_AddP() 2072 && adr2->is_AddP()
2015 ) { 2073 ) {
2016 int mirror_field = Klass::java_mirror_offset_in_bytes(); 2074 int mirror_field = in_bytes(Klass::java_mirror_offset());
2017 if (offset == java_lang_Class::array_klass_offset_in_bytes()) { 2075 if (offset == java_lang_Class::array_klass_offset_in_bytes()) {
2018 mirror_field = in_bytes(arrayKlass::component_mirror_offset()); 2076 mirror_field = in_bytes(arrayKlass::component_mirror_offset());
2019 } 2077 }
2020 if (tkls->offset() == mirror_field + (int)sizeof(oopDesc)) { 2078 if (tkls->offset() == mirror_field) {
2021 return adr2->in(AddPNode::Base); 2079 return adr2->in(AddPNode::Base);
2022 } 2080 }
2023 } 2081 }
2024 } 2082 }
2025 } 2083 }
2199 2257
2200 // Back-to-back stores to same address? Fold em up. Generally 2258 // Back-to-back stores to same address? Fold em up. Generally
2201 // unsafe if I have intervening uses... Also disallowed for StoreCM 2259 // unsafe if I have intervening uses... Also disallowed for StoreCM
2202 // since they must follow each StoreP operation. Redundant StoreCMs 2260 // since they must follow each StoreP operation. Redundant StoreCMs
2203 // are eliminated just before matching in final_graph_reshape. 2261 // are eliminated just before matching in final_graph_reshape.
2204 if (mem->is_Store() && phase->eqv_uncast(mem->in(MemNode::Address), address) && 2262 if (mem->is_Store() && mem->in(MemNode::Address)->eqv_uncast(address) &&
2205 mem->Opcode() != Op_StoreCM) { 2263 mem->Opcode() != Op_StoreCM) {
2206 // Looking at a dead closed cycle of memory? 2264 // Looking at a dead closed cycle of memory?
2207 assert(mem != mem->in(MemNode::Memory), "dead loop in StoreNode::Ideal"); 2265 assert(mem != mem->in(MemNode::Memory), "dead loop in StoreNode::Ideal");
2208 2266
2209 assert(Opcode() == mem->Opcode() || 2267 assert(Opcode() == mem->Opcode() ||
2272 Node* adr = in(MemNode::Address); 2330 Node* adr = in(MemNode::Address);
2273 Node* val = in(MemNode::ValueIn); 2331 Node* val = in(MemNode::ValueIn);
2274 2332
2275 // Load then Store? Then the Store is useless 2333 // Load then Store? Then the Store is useless
2276 if (val->is_Load() && 2334 if (val->is_Load() &&
2277 phase->eqv_uncast( val->in(MemNode::Address), adr ) && 2335 val->in(MemNode::Address)->eqv_uncast(adr) &&
2278 phase->eqv_uncast( val->in(MemNode::Memory ), mem ) && 2336 val->in(MemNode::Memory )->eqv_uncast(mem) &&
2279 val->as_Load()->store_Opcode() == Opcode()) { 2337 val->as_Load()->store_Opcode() == Opcode()) {
2280 return mem; 2338 return mem;
2281 } 2339 }
2282 2340
2283 // Two stores in a row of the same value? 2341 // Two stores in a row of the same value?
2284 if (mem->is_Store() && 2342 if (mem->is_Store() &&
2285 phase->eqv_uncast( mem->in(MemNode::Address), adr ) && 2343 mem->in(MemNode::Address)->eqv_uncast(adr) &&
2286 phase->eqv_uncast( mem->in(MemNode::ValueIn), val ) && 2344 mem->in(MemNode::ValueIn)->eqv_uncast(val) &&
2287 mem->Opcode() == Opcode()) { 2345 mem->Opcode() == Opcode()) {
2288 return mem; 2346 return mem;
2289 } 2347 }
2290 2348
2291 // Store of zero anywhere into a freshly-allocated object? 2349 // Store of zero anywhere into a freshly-allocated object?
2719 case Op_MemBarAcquireLock: return new(C, len) MemBarAcquireLockNode(C, atp, pn); 2777 case Op_MemBarAcquireLock: return new(C, len) MemBarAcquireLockNode(C, atp, pn);
2720 case Op_MemBarReleaseLock: return new(C, len) MemBarReleaseLockNode(C, atp, pn); 2778 case Op_MemBarReleaseLock: return new(C, len) MemBarReleaseLockNode(C, atp, pn);
2721 case Op_MemBarVolatile: return new(C, len) MemBarVolatileNode(C, atp, pn); 2779 case Op_MemBarVolatile: return new(C, len) MemBarVolatileNode(C, atp, pn);
2722 case Op_MemBarCPUOrder: return new(C, len) MemBarCPUOrderNode(C, atp, pn); 2780 case Op_MemBarCPUOrder: return new(C, len) MemBarCPUOrderNode(C, atp, pn);
2723 case Op_Initialize: return new(C, len) InitializeNode(C, atp, pn); 2781 case Op_Initialize: return new(C, len) InitializeNode(C, atp, pn);
2782 case Op_MemBarStoreStore: return new(C, len) MemBarStoreStoreNode(C, atp, pn);
2724 default: ShouldNotReachHere(); return NULL; 2783 default: ShouldNotReachHere(); return NULL;
2725 } 2784 }
2726 } 2785 }
2727 2786
2728 //------------------------------Ideal------------------------------------------ 2787 //------------------------------Ideal------------------------------------------
2868 // will be considered for capture by an InitializeNode. This puts a 2927 // will be considered for capture by an InitializeNode. This puts a
2869 // reasonable limit on the complexity of optimized initializations. 2928 // reasonable limit on the complexity of optimized initializations.
2870 2929
2871 //---------------------------InitializeNode------------------------------------ 2930 //---------------------------InitializeNode------------------------------------
2872 InitializeNode::InitializeNode(Compile* C, int adr_type, Node* rawoop) 2931 InitializeNode::InitializeNode(Compile* C, int adr_type, Node* rawoop)
2873 : _is_complete(Incomplete), 2932 : _is_complete(Incomplete), _does_not_escape(false),
2874 MemBarNode(C, adr_type, rawoop) 2933 MemBarNode(C, adr_type, rawoop)
2875 { 2934 {
2876 init_class_id(Class_Initialize); 2935 init_class_id(Class_Initialize);
2877 2936
2878 assert(adr_type == Compile::AliasIdxRaw, "only valid atp"); 2937 assert(adr_type == Compile::AliasIdxRaw, "only valid atp");