Mercurial > hg > graal-jvmci-8
comparison src/share/vm/opto/memnode.cpp @ 18041:52b4284cb496
Merge with jdk8u20-b26
author | Gilles Duboscq <duboscq@ssw.jku.at> |
---|---|
date | Wed, 15 Oct 2014 16:02:50 +0200 |
parents | 89152779163c 78bbf4d43a14 |
children | 7848fc12602b |
comparison
equal
deleted
inserted
replaced
17606:45d7b2c7029d | 18041:52b4284cb496 |
---|---|
1 /* | 1 /* |
2 * Copyright (c) 1997, 2013, Oracle and/or its affiliates. All rights reserved. | 2 * Copyright (c) 1997, 2014, Oracle and/or its affiliates. All rights reserved. |
3 * DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER. | 3 * DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER. |
4 * | 4 * |
5 * This code is free software; you can redistribute it and/or modify it | 5 * This code is free software; you can redistribute it and/or modify it |
6 * under the terms of the GNU General Public License version 2 only, as | 6 * under the terms of the GNU General Public License version 2 only, as |
7 * published by the Free Software Foundation. | 7 * published by the Free Software Foundation. |
304 // Note: C++ will not remove it since the call has side effect. | 304 // Note: C++ will not remove it since the call has side effect. |
305 if (t_adr->isa_oopptr()) { | 305 if (t_adr->isa_oopptr()) { |
306 int alias_idx = phase->C->get_alias_index(t_adr->is_ptr()); | 306 int alias_idx = phase->C->get_alias_index(t_adr->is_ptr()); |
307 } | 307 } |
308 | 308 |
309 #ifdef ASSERT | |
310 Node* base = NULL; | 309 Node* base = NULL; |
311 if (address->is_AddP()) | 310 if (address->is_AddP()) { |
312 base = address->in(AddPNode::Base); | 311 base = address->in(AddPNode::Base); |
312 } | |
313 if (base != NULL && phase->type(base)->higher_equal(TypePtr::NULL_PTR) && | 313 if (base != NULL && phase->type(base)->higher_equal(TypePtr::NULL_PTR) && |
314 !t_adr->isa_rawptr()) { | 314 !t_adr->isa_rawptr()) { |
315 // Note: raw address has TOP base and top->higher_equal(TypePtr::NULL_PTR) is true. | 315 // Note: raw address has TOP base and top->higher_equal(TypePtr::NULL_PTR) is true. |
316 Compile* C = phase->C; | 316 // Skip this node optimization if its address has TOP base. |
317 tty->cr(); | 317 return NodeSentinel; // caller will return NULL |
318 tty->print_cr("===== NULL+offs not RAW address ====="); | 318 } |
319 if (C->is_dead_node(this->_idx)) tty->print_cr("'this' is dead"); | |
320 if ((ctl != NULL) && C->is_dead_node(ctl->_idx)) tty->print_cr("'ctl' is dead"); | |
321 if (C->is_dead_node(mem->_idx)) tty->print_cr("'mem' is dead"); | |
322 if (C->is_dead_node(address->_idx)) tty->print_cr("'address' is dead"); | |
323 if (C->is_dead_node(base->_idx)) tty->print_cr("'base' is dead"); | |
324 tty->cr(); | |
325 base->dump(1); | |
326 tty->cr(); | |
327 this->dump(2); | |
328 tty->print("this->adr_type(): "); adr_type()->dump(); tty->cr(); | |
329 tty->print("phase->type(address): "); t_adr->dump(); tty->cr(); | |
330 tty->print("phase->type(base): "); phase->type(address)->dump(); tty->cr(); | |
331 tty->cr(); | |
332 } | |
333 assert(base == NULL || t_adr->isa_rawptr() || | |
334 !phase->type(base)->higher_equal(TypePtr::NULL_PTR), "NULL+offs not RAW address?"); | |
335 #endif | |
336 | 319 |
337 // Avoid independent memory operations | 320 // Avoid independent memory operations |
338 Node* old_mem = mem; | 321 Node* old_mem = mem; |
339 | 322 |
340 // The code which unhooks non-raw memories from complete (macro-expanded) | 323 // The code which unhooks non-raw memories from complete (macro-expanded) |
655 "must stay in the original alias category"); | 638 "must stay in the original alias category"); |
656 // The type of the address must be contained in the adr_type, | 639 // The type of the address must be contained in the adr_type, |
657 // disregarding "null"-ness. | 640 // disregarding "null"-ness. |
658 // (We make an exception for TypeRawPtr::BOTTOM, which is a bit bucket.) | 641 // (We make an exception for TypeRawPtr::BOTTOM, which is a bit bucket.) |
659 const TypePtr* tp_notnull = tp->join(TypePtr::NOTNULL)->is_ptr(); | 642 const TypePtr* tp_notnull = tp->join(TypePtr::NOTNULL)->is_ptr(); |
660 assert(cross_check->meet(tp_notnull) == cross_check, | 643 assert(cross_check->meet(tp_notnull) == cross_check->remove_speculative(), |
661 "real address must not escape from expected memory type"); | 644 "real address must not escape from expected memory type"); |
662 } | 645 } |
663 #endif | 646 #endif |
664 return tp; | 647 return tp; |
665 } | 648 } |
905 } | 888 } |
906 #endif | 889 #endif |
907 | 890 |
908 //----------------------------LoadNode::make----------------------------------- | 891 //----------------------------LoadNode::make----------------------------------- |
909 // Polymorphic factory method: | 892 // Polymorphic factory method: |
910 Node *LoadNode::make( PhaseGVN& gvn, Node *ctl, Node *mem, Node *adr, const TypePtr* adr_type, const Type *rt, BasicType bt ) { | 893 Node *LoadNode::make(PhaseGVN& gvn, Node *ctl, Node *mem, Node *adr, const TypePtr* adr_type, const Type *rt, BasicType bt, MemOrd mo) { |
911 Compile* C = gvn.C; | 894 Compile* C = gvn.C; |
912 | 895 |
913 // sanity check the alias category against the created node type | 896 // sanity check the alias category against the created node type |
914 assert(!(adr_type->isa_oopptr() && | 897 assert(!(adr_type->isa_oopptr() && |
915 adr_type->offset() == oopDesc::klass_offset_in_bytes()), | 898 adr_type->offset() == oopDesc::klass_offset_in_bytes()), |
921 assert( ctl != NULL || C->get_alias_index(adr_type) != Compile::AliasIdxRaw || | 904 assert( ctl != NULL || C->get_alias_index(adr_type) != Compile::AliasIdxRaw || |
922 // oop will be recorded in oop map if load crosses safepoint | 905 // oop will be recorded in oop map if load crosses safepoint |
923 rt->isa_oopptr() || is_immutable_value(adr), | 906 rt->isa_oopptr() || is_immutable_value(adr), |
924 "raw memory operations should have control edge"); | 907 "raw memory operations should have control edge"); |
925 switch (bt) { | 908 switch (bt) { |
926 case T_BOOLEAN: return new (C) LoadUBNode(ctl, mem, adr, adr_type, rt->is_int() ); | 909 case T_BOOLEAN: return new (C) LoadUBNode(ctl, mem, adr, adr_type, rt->is_int(), mo); |
927 case T_BYTE: return new (C) LoadBNode (ctl, mem, adr, adr_type, rt->is_int() ); | 910 case T_BYTE: return new (C) LoadBNode (ctl, mem, adr, adr_type, rt->is_int(), mo); |
928 case T_INT: return new (C) LoadINode (ctl, mem, adr, adr_type, rt->is_int() ); | 911 case T_INT: return new (C) LoadINode (ctl, mem, adr, adr_type, rt->is_int(), mo); |
929 case T_CHAR: return new (C) LoadUSNode(ctl, mem, adr, adr_type, rt->is_int() ); | 912 case T_CHAR: return new (C) LoadUSNode(ctl, mem, adr, adr_type, rt->is_int(), mo); |
930 case T_SHORT: return new (C) LoadSNode (ctl, mem, adr, adr_type, rt->is_int() ); | 913 case T_SHORT: return new (C) LoadSNode (ctl, mem, adr, adr_type, rt->is_int(), mo); |
931 case T_LONG: return new (C) LoadLNode (ctl, mem, adr, adr_type, rt->is_long() ); | 914 case T_LONG: return new (C) LoadLNode (ctl, mem, adr, adr_type, rt->is_long(), mo); |
932 case T_FLOAT: return new (C) LoadFNode (ctl, mem, adr, adr_type, rt ); | 915 case T_FLOAT: return new (C) LoadFNode (ctl, mem, adr, adr_type, rt, mo); |
933 case T_DOUBLE: return new (C) LoadDNode (ctl, mem, adr, adr_type, rt ); | 916 case T_DOUBLE: return new (C) LoadDNode (ctl, mem, adr, adr_type, rt, mo); |
934 case T_ADDRESS: return new (C) LoadPNode (ctl, mem, adr, adr_type, rt->is_ptr() ); | 917 case T_ADDRESS: return new (C) LoadPNode (ctl, mem, adr, adr_type, rt->is_ptr(), mo); |
935 case T_OBJECT: | 918 case T_OBJECT: |
936 #ifdef _LP64 | 919 #ifdef _LP64 |
937 if (adr->bottom_type()->is_ptr_to_narrowoop()) { | 920 if (adr->bottom_type()->is_ptr_to_narrowoop()) { |
938 Node* load = gvn.transform(new (C) LoadNNode(ctl, mem, adr, adr_type, rt->make_narrowoop())); | 921 Node* load = gvn.transform(new (C) LoadNNode(ctl, mem, adr, adr_type, rt->make_narrowoop(), mo)); |
939 return new (C) DecodeNNode(load, load->bottom_type()->make_ptr()); | 922 return new (C) DecodeNNode(load, load->bottom_type()->make_ptr()); |
940 } else | 923 } else |
941 #endif | 924 #endif |
942 { | 925 { |
943 assert(!adr->bottom_type()->is_ptr_to_narrowoop() && !adr->bottom_type()->is_ptr_to_narrowklass(), "should have got back a narrow oop"); | 926 assert(!adr->bottom_type()->is_ptr_to_narrowoop() && !adr->bottom_type()->is_ptr_to_narrowklass(), "should have got back a narrow oop"); |
944 return new (C) LoadPNode(ctl, mem, adr, adr_type, rt->is_oopptr()); | 927 return new (C) LoadPNode(ctl, mem, adr, adr_type, rt->is_oopptr(), mo); |
945 } | 928 } |
946 } | 929 } |
947 ShouldNotReachHere(); | 930 ShouldNotReachHere(); |
948 return (LoadNode*)NULL; | 931 return (LoadNode*)NULL; |
949 } | 932 } |
950 | 933 |
951 LoadLNode* LoadLNode::make_atomic(Compile *C, Node* ctl, Node* mem, Node* adr, const TypePtr* adr_type, const Type* rt) { | 934 LoadLNode* LoadLNode::make_atomic(Compile *C, Node* ctl, Node* mem, Node* adr, const TypePtr* adr_type, const Type* rt, MemOrd mo) { |
952 bool require_atomic = true; | 935 bool require_atomic = true; |
953 return new (C) LoadLNode(ctl, mem, adr, adr_type, rt->is_long(), require_atomic); | 936 return new (C) LoadLNode(ctl, mem, adr, adr_type, rt->is_long(), mo, require_atomic); |
954 } | 937 } |
955 | 938 |
956 | 939 |
957 | 940 |
958 | 941 |
1000 // through any kind of MemBar but normal loads shouldn't skip | 983 // through any kind of MemBar but normal loads shouldn't skip |
1001 // through MemBarAcquire since the could allow them to move out of | 984 // through MemBarAcquire since the could allow them to move out of |
1002 // a synchronized region. | 985 // a synchronized region. |
1003 while (current->is_Proj()) { | 986 while (current->is_Proj()) { |
1004 int opc = current->in(0)->Opcode(); | 987 int opc = current->in(0)->Opcode(); |
1005 if ((final && (opc == Op_MemBarAcquire || opc == Op_MemBarAcquireLock)) || | 988 if ((final && (opc == Op_MemBarAcquire || |
1006 opc == Op_MemBarRelease || opc == Op_MemBarCPUOrder || | 989 opc == Op_MemBarAcquireLock || |
1007 opc == Op_MemBarReleaseLock) { | 990 opc == Op_LoadFence)) || |
991 opc == Op_MemBarRelease || | |
992 opc == Op_StoreFence || | |
993 opc == Op_MemBarReleaseLock || | |
994 opc == Op_MemBarCPUOrder) { | |
1008 Node* mem = current->in(0)->in(TypeFunc::Memory); | 995 Node* mem = current->in(0)->in(TypeFunc::Memory); |
1009 if (mem->is_MergeMem()) { | 996 if (mem->is_MergeMem()) { |
1010 MergeMemNode* merge = mem->as_MergeMem(); | 997 MergeMemNode* merge = mem->as_MergeMem(); |
1011 Node* new_st = merge->memory_at(alias_idx); | 998 Node* new_st = merge->memory_at(alias_idx); |
1012 if (new_st == merge->base_memory()) { | 999 if (new_st == merge->base_memory()) { |
1587 return NULL; | 1574 return NULL; |
1588 } | 1575 } |
1589 | 1576 |
1590 // Try to constant-fold a stable array element. | 1577 // Try to constant-fold a stable array element. |
1591 static const Type* fold_stable_ary_elem(const TypeAryPtr* ary, int off, BasicType loadbt) { | 1578 static const Type* fold_stable_ary_elem(const TypeAryPtr* ary, int off, BasicType loadbt) { |
1579 assert(ary->const_oop(), "array should be constant"); | |
1592 assert(ary->is_stable(), "array should be stable"); | 1580 assert(ary->is_stable(), "array should be stable"); |
1593 | 1581 |
1594 if (ary->const_oop() != NULL) { | 1582 // Decode the results of GraphKit::array_element_address. |
1595 // Decode the results of GraphKit::array_element_address. | 1583 ciArray* aobj = ary->const_oop()->as_array(); |
1596 ciArray* aobj = ary->const_oop()->as_array(); | 1584 ciConstant con = aobj->element_value_by_offset(off); |
1597 ciConstant con = aobj->element_value_by_offset(off); | 1585 |
1598 | 1586 if (con.basic_type() != T_ILLEGAL && !con.is_null_or_zero()) { |
1599 if (con.basic_type() != T_ILLEGAL && !con.is_null_or_zero()) { | 1587 const Type* con_type = Type::make_from_constant(con); |
1600 const Type* con_type = Type::make_from_constant(con); | 1588 if (con_type != NULL) { |
1601 if (con_type != NULL) { | 1589 if (con_type->isa_aryptr()) { |
1602 if (con_type->isa_aryptr()) { | 1590 // Join with the array element type, in case it is also stable. |
1603 // Join with the array element type, in case it is also stable. | 1591 int dim = ary->stable_dimension(); |
1604 int dim = ary->stable_dimension(); | 1592 con_type = con_type->is_aryptr()->cast_to_stable(true, dim-1); |
1605 con_type = con_type->is_aryptr()->cast_to_stable(true, dim-1); | 1593 } |
1606 } | 1594 if (loadbt == T_NARROWOOP && con_type->isa_oopptr()) { |
1607 if (loadbt == T_NARROWOOP && con_type->isa_oopptr()) { | 1595 con_type = con_type->make_narrowoop(); |
1608 con_type = con_type->make_narrowoop(); | 1596 } |
1609 } | |
1610 #ifndef PRODUCT | 1597 #ifndef PRODUCT |
1611 if (TraceIterativeGVN) { | 1598 if (TraceIterativeGVN) { |
1612 tty->print("FoldStableValues: array element [off=%d]: con_type=", off); | 1599 tty->print("FoldStableValues: array element [off=%d]: con_type=", off); |
1613 con_type->dump(); tty->cr(); | 1600 con_type->dump(); tty->cr(); |
1614 } | 1601 } |
1615 #endif //PRODUCT | 1602 #endif //PRODUCT |
1616 return con_type; | 1603 return con_type; |
1617 } | 1604 } |
1618 } | 1605 } |
1619 } | |
1620 | |
1621 return NULL; | 1606 return NULL; |
1622 } | 1607 } |
1623 | 1608 |
1624 //------------------------------Value----------------------------------------- | 1609 //------------------------------Value----------------------------------------- |
1625 const Type *LoadNode::Value( PhaseTransform *phase ) const { | 1610 const Type *LoadNode::Value( PhaseTransform *phase ) const { |
1635 Compile* C = phase->C; | 1620 Compile* C = phase->C; |
1636 | 1621 |
1637 // Try to guess loaded type from pointer type | 1622 // Try to guess loaded type from pointer type |
1638 if (tp->isa_aryptr()) { | 1623 if (tp->isa_aryptr()) { |
1639 const TypeAryPtr* ary = tp->is_aryptr(); | 1624 const TypeAryPtr* ary = tp->is_aryptr(); |
1640 const Type *t = ary->elem(); | 1625 const Type* t = ary->elem(); |
1641 | 1626 |
1642 // Determine whether the reference is beyond the header or not, by comparing | 1627 // Determine whether the reference is beyond the header or not, by comparing |
1643 // the offset against the offset of the start of the array's data. | 1628 // the offset against the offset of the start of the array's data. |
1644 // Different array types begin at slightly different offsets (12 vs. 16). | 1629 // Different array types begin at slightly different offsets (12 vs. 16). |
1645 // We choose T_BYTE as an example base type that is least restrictive | 1630 // We choose T_BYTE as an example base type that is least restrictive |
1647 // possible base offset. | 1632 // possible base offset. |
1648 const int min_base_off = arrayOopDesc::base_offset_in_bytes(T_BYTE); | 1633 const int min_base_off = arrayOopDesc::base_offset_in_bytes(T_BYTE); |
1649 const bool off_beyond_header = ((uint)off >= (uint)min_base_off); | 1634 const bool off_beyond_header = ((uint)off >= (uint)min_base_off); |
1650 | 1635 |
1651 // Try to constant-fold a stable array element. | 1636 // Try to constant-fold a stable array element. |
1652 if (FoldStableValues && ary->is_stable()) { | 1637 if (FoldStableValues && ary->is_stable() && ary->const_oop() != NULL) { |
1653 // Make sure the reference is not into the header | 1638 // Make sure the reference is not into the header and the offset is constant |
1654 if (off_beyond_header && off != Type::OffsetBot) { | 1639 if (off_beyond_header && adr->is_AddP() && off != Type::OffsetBot) { |
1655 assert(adr->is_AddP() && adr->in(AddPNode::Offset)->is_Con(), "offset is a constant"); | |
1656 const Type* con_type = fold_stable_ary_elem(ary, off, memory_type()); | 1640 const Type* con_type = fold_stable_ary_elem(ary, off, memory_type()); |
1657 if (con_type != NULL) { | 1641 if (con_type != NULL) { |
1658 return con_type; | 1642 return con_type; |
1659 } | 1643 } |
1660 } | 1644 } |
1679 && (_type->isa_vect() == NULL) | 1663 && (_type->isa_vect() == NULL) |
1680 && Opcode() != Op_LoadKlass && Opcode() != Op_LoadNKlass) { | 1664 && Opcode() != Op_LoadKlass && Opcode() != Op_LoadNKlass) { |
1681 // t might actually be lower than _type, if _type is a unique | 1665 // t might actually be lower than _type, if _type is a unique |
1682 // concrete subclass of abstract class t. | 1666 // concrete subclass of abstract class t. |
1683 if (off_beyond_header) { // is the offset beyond the header? | 1667 if (off_beyond_header) { // is the offset beyond the header? |
1684 const Type* jt = t->join(_type); | 1668 const Type* jt = t->join_speculative(_type); |
1685 // In any case, do not allow the join, per se, to empty out the type. | 1669 // In any case, do not allow the join, per se, to empty out the type. |
1686 if (jt->empty() && !t->empty()) { | 1670 if (jt->empty() && !t->empty()) { |
1687 // This can happen if a interface-typed array narrows to a class type. | 1671 // This can happen if a interface-typed array narrows to a class type. |
1688 jt = _type; | 1672 jt = _type; |
1689 } | 1673 } |
2030 const TypePtr *adr_type = adr->bottom_type()->isa_ptr(); | 2014 const TypePtr *adr_type = adr->bottom_type()->isa_ptr(); |
2031 assert(adr_type != NULL, "expecting TypeKlassPtr"); | 2015 assert(adr_type != NULL, "expecting TypeKlassPtr"); |
2032 #ifdef _LP64 | 2016 #ifdef _LP64 |
2033 if (adr_type->is_ptr_to_narrowklass()) { | 2017 if (adr_type->is_ptr_to_narrowklass()) { |
2034 assert(UseCompressedClassPointers, "no compressed klasses"); | 2018 assert(UseCompressedClassPointers, "no compressed klasses"); |
2035 Node* load_klass = gvn.transform(new (C) LoadNKlassNode(ctl, mem, adr, at, tk->make_narrowklass())); | 2019 Node* load_klass = gvn.transform(new (C) LoadNKlassNode(ctl, mem, adr, at, tk->make_narrowklass(), MemNode::unordered)); |
2036 return new (C) DecodeNKlassNode(load_klass, load_klass->bottom_type()->make_ptr()); | 2020 return new (C) DecodeNKlassNode(load_klass, load_klass->bottom_type()->make_ptr()); |
2037 } | 2021 } |
2038 #endif | 2022 #endif |
2039 assert(!adr_type->is_ptr_to_narrowklass() && !adr_type->is_ptr_to_narrowoop(), "should have got back a narrow oop"); | 2023 assert(!adr_type->is_ptr_to_narrowklass() && !adr_type->is_ptr_to_narrowoop(), "should have got back a narrow oop"); |
2040 return new (C) LoadKlassNode(ctl, mem, adr, at, tk); | 2024 return new (C) LoadKlassNode(ctl, mem, adr, at, tk, MemNode::unordered); |
2041 } | 2025 } |
2042 | 2026 |
2043 //------------------------------Value------------------------------------------ | 2027 //------------------------------Value------------------------------------------ |
2044 const Type *LoadKlassNode::Value( PhaseTransform *phase ) const { | 2028 const Type *LoadKlassNode::Value( PhaseTransform *phase ) const { |
2045 return klass_value_common(phase); | 2029 return klass_value_common(phase); |
2350 } | 2334 } |
2351 | 2335 |
2352 //============================================================================= | 2336 //============================================================================= |
2353 //---------------------------StoreNode::make----------------------------------- | 2337 //---------------------------StoreNode::make----------------------------------- |
2354 // Polymorphic factory method: | 2338 // Polymorphic factory method: |
2355 StoreNode* StoreNode::make( PhaseGVN& gvn, Node* ctl, Node* mem, Node* adr, const TypePtr* adr_type, Node* val, BasicType bt ) { | 2339 StoreNode* StoreNode::make(PhaseGVN& gvn, Node* ctl, Node* mem, Node* adr, const TypePtr* adr_type, Node* val, BasicType bt, MemOrd mo) { |
2340 assert((mo == unordered || mo == release), "unexpected"); | |
2356 Compile* C = gvn.C; | 2341 Compile* C = gvn.C; |
2357 assert( C->get_alias_index(adr_type) != Compile::AliasIdxRaw || | 2342 assert(C->get_alias_index(adr_type) != Compile::AliasIdxRaw || |
2358 ctl != NULL, "raw memory operations should have control edge"); | 2343 ctl != NULL, "raw memory operations should have control edge"); |
2359 | 2344 |
2360 switch (bt) { | 2345 switch (bt) { |
2361 case T_BOOLEAN: | 2346 case T_BOOLEAN: |
2362 case T_BYTE: return new (C) StoreBNode(ctl, mem, adr, adr_type, val); | 2347 case T_BYTE: return new (C) StoreBNode(ctl, mem, adr, adr_type, val, mo); |
2363 case T_INT: return new (C) StoreINode(ctl, mem, adr, adr_type, val); | 2348 case T_INT: return new (C) StoreINode(ctl, mem, adr, adr_type, val, mo); |
2364 case T_CHAR: | 2349 case T_CHAR: |
2365 case T_SHORT: return new (C) StoreCNode(ctl, mem, adr, adr_type, val); | 2350 case T_SHORT: return new (C) StoreCNode(ctl, mem, adr, adr_type, val, mo); |
2366 case T_LONG: return new (C) StoreLNode(ctl, mem, adr, adr_type, val); | 2351 case T_LONG: return new (C) StoreLNode(ctl, mem, adr, adr_type, val, mo); |
2367 case T_FLOAT: return new (C) StoreFNode(ctl, mem, adr, adr_type, val); | 2352 case T_FLOAT: return new (C) StoreFNode(ctl, mem, adr, adr_type, val, mo); |
2368 case T_DOUBLE: return new (C) StoreDNode(ctl, mem, adr, adr_type, val); | 2353 case T_DOUBLE: return new (C) StoreDNode(ctl, mem, adr, adr_type, val, mo); |
2369 case T_METADATA: | 2354 case T_METADATA: |
2370 case T_ADDRESS: | 2355 case T_ADDRESS: |
2371 case T_OBJECT: | 2356 case T_OBJECT: |
2372 #ifdef _LP64 | 2357 #ifdef _LP64 |
2373 if (adr->bottom_type()->is_ptr_to_narrowoop()) { | 2358 if (adr->bottom_type()->is_ptr_to_narrowoop()) { |
2374 val = gvn.transform(new (C) EncodePNode(val, val->bottom_type()->make_narrowoop())); | 2359 val = gvn.transform(new (C) EncodePNode(val, val->bottom_type()->make_narrowoop())); |
2375 return new (C) StoreNNode(ctl, mem, adr, adr_type, val); | 2360 return new (C) StoreNNode(ctl, mem, adr, adr_type, val, mo); |
2376 } else if (adr->bottom_type()->is_ptr_to_narrowklass() || | 2361 } else if (adr->bottom_type()->is_ptr_to_narrowklass() || |
2377 (UseCompressedClassPointers && val->bottom_type()->isa_klassptr() && | 2362 (UseCompressedClassPointers && val->bottom_type()->isa_klassptr() && |
2378 adr->bottom_type()->isa_rawptr())) { | 2363 adr->bottom_type()->isa_rawptr())) { |
2379 val = gvn.transform(new (C) EncodePKlassNode(val, val->bottom_type()->make_narrowklass())); | 2364 val = gvn.transform(new (C) EncodePKlassNode(val, val->bottom_type()->make_narrowklass())); |
2380 return new (C) StoreNKlassNode(ctl, mem, adr, adr_type, val); | 2365 return new (C) StoreNKlassNode(ctl, mem, adr, adr_type, val, mo); |
2381 } | 2366 } |
2382 #endif | 2367 #endif |
2383 { | 2368 { |
2384 return new (C) StorePNode(ctl, mem, adr, adr_type, val); | 2369 return new (C) StorePNode(ctl, mem, adr, adr_type, val, mo); |
2385 } | 2370 } |
2386 } | 2371 } |
2387 ShouldNotReachHere(); | 2372 ShouldNotReachHere(); |
2388 return (StoreNode*)NULL; | 2373 return (StoreNode*)NULL; |
2389 } | 2374 } |
2390 | 2375 |
2391 StoreLNode* StoreLNode::make_atomic(Compile *C, Node* ctl, Node* mem, Node* adr, const TypePtr* adr_type, Node* val) { | 2376 StoreLNode* StoreLNode::make_atomic(Compile *C, Node* ctl, Node* mem, Node* adr, const TypePtr* adr_type, Node* val, MemOrd mo) { |
2392 bool require_atomic = true; | 2377 bool require_atomic = true; |
2393 return new (C) StoreLNode(ctl, mem, adr, adr_type, val, require_atomic); | 2378 return new (C) StoreLNode(ctl, mem, adr, adr_type, val, mo, require_atomic); |
2394 } | 2379 } |
2395 | 2380 |
2396 | 2381 |
2397 //--------------------------bottom_type---------------------------------------- | 2382 //--------------------------bottom_type---------------------------------------- |
2398 const Type *StoreNode::bottom_type() const { | 2383 const Type *StoreNode::bottom_type() const { |
2781 if( adr->Opcode() != Op_AddP ) Unimplemented(); | 2766 if( adr->Opcode() != Op_AddP ) Unimplemented(); |
2782 Node *base = adr->in(1); | 2767 Node *base = adr->in(1); |
2783 | 2768 |
2784 Node *zero = phase->makecon(TypeLong::ZERO); | 2769 Node *zero = phase->makecon(TypeLong::ZERO); |
2785 Node *off = phase->MakeConX(BytesPerLong); | 2770 Node *off = phase->MakeConX(BytesPerLong); |
2786 mem = new (phase->C) StoreLNode(in(0),mem,adr,atp,zero); | 2771 mem = new (phase->C) StoreLNode(in(0),mem,adr,atp,zero,MemNode::unordered,false); |
2787 count--; | 2772 count--; |
2788 while( count-- ) { | 2773 while( count-- ) { |
2789 mem = phase->transform(mem); | 2774 mem = phase->transform(mem); |
2790 adr = phase->transform(new (phase->C) AddPNode(base,adr,off)); | 2775 adr = phase->transform(new (phase->C) AddPNode(base,adr,off)); |
2791 mem = new (phase->C) StoreLNode(in(0),mem,adr,atp,zero); | 2776 mem = new (phase->C) StoreLNode(in(0),mem,adr,atp,zero,MemNode::unordered,false); |
2792 } | 2777 } |
2793 return mem; | 2778 return mem; |
2794 } | 2779 } |
2795 | 2780 |
2796 //----------------------------step_through---------------------------------- | 2781 //----------------------------step_through---------------------------------- |
2830 int unit = BytesPerLong; | 2815 int unit = BytesPerLong; |
2831 if ((offset % unit) != 0) { | 2816 if ((offset % unit) != 0) { |
2832 Node* adr = new (C) AddPNode(dest, dest, phase->MakeConX(offset)); | 2817 Node* adr = new (C) AddPNode(dest, dest, phase->MakeConX(offset)); |
2833 adr = phase->transform(adr); | 2818 adr = phase->transform(adr); |
2834 const TypePtr* atp = TypeRawPtr::BOTTOM; | 2819 const TypePtr* atp = TypeRawPtr::BOTTOM; |
2835 mem = StoreNode::make(*phase, ctl, mem, adr, atp, phase->zerocon(T_INT), T_INT); | 2820 mem = StoreNode::make(*phase, ctl, mem, adr, atp, phase->zerocon(T_INT), T_INT, MemNode::unordered); |
2836 mem = phase->transform(mem); | 2821 mem = phase->transform(mem); |
2837 offset += BytesPerInt; | 2822 offset += BytesPerInt; |
2838 } | 2823 } |
2839 assert((offset % unit) == 0, ""); | 2824 assert((offset % unit) == 0, ""); |
2840 | 2825 |
2891 } | 2876 } |
2892 if (done_offset < end_offset) { // emit the final 32-bit store | 2877 if (done_offset < end_offset) { // emit the final 32-bit store |
2893 Node* adr = new (C) AddPNode(dest, dest, phase->MakeConX(done_offset)); | 2878 Node* adr = new (C) AddPNode(dest, dest, phase->MakeConX(done_offset)); |
2894 adr = phase->transform(adr); | 2879 adr = phase->transform(adr); |
2895 const TypePtr* atp = TypeRawPtr::BOTTOM; | 2880 const TypePtr* atp = TypeRawPtr::BOTTOM; |
2896 mem = StoreNode::make(*phase, ctl, mem, adr, atp, phase->zerocon(T_INT), T_INT); | 2881 mem = StoreNode::make(*phase, ctl, mem, adr, atp, phase->zerocon(T_INT), T_INT, MemNode::unordered); |
2897 mem = phase->transform(mem); | 2882 mem = phase->transform(mem); |
2898 done_offset += BytesPerInt; | 2883 done_offset += BytesPerInt; |
2899 } | 2884 } |
2900 assert(done_offset == end_offset, ""); | 2885 assert(done_offset == end_offset, ""); |
2901 return mem; | 2886 return mem; |
2975 } | 2960 } |
2976 | 2961 |
2977 //------------------------------make------------------------------------------- | 2962 //------------------------------make------------------------------------------- |
2978 MemBarNode* MemBarNode::make(Compile* C, int opcode, int atp, Node* pn) { | 2963 MemBarNode* MemBarNode::make(Compile* C, int opcode, int atp, Node* pn) { |
2979 switch (opcode) { | 2964 switch (opcode) { |
2980 case Op_MemBarAcquire: return new(C) MemBarAcquireNode(C, atp, pn); | 2965 case Op_MemBarAcquire: return new(C) MemBarAcquireNode(C, atp, pn); |
2981 case Op_MemBarRelease: return new(C) MemBarReleaseNode(C, atp, pn); | 2966 case Op_LoadFence: return new(C) LoadFenceNode(C, atp, pn); |
2982 case Op_MemBarAcquireLock: return new(C) MemBarAcquireLockNode(C, atp, pn); | 2967 case Op_MemBarRelease: return new(C) MemBarReleaseNode(C, atp, pn); |
2983 case Op_MemBarReleaseLock: return new(C) MemBarReleaseLockNode(C, atp, pn); | 2968 case Op_StoreFence: return new(C) StoreFenceNode(C, atp, pn); |
2984 case Op_MemBarVolatile: return new(C) MemBarVolatileNode(C, atp, pn); | 2969 case Op_MemBarAcquireLock: return new(C) MemBarAcquireLockNode(C, atp, pn); |
2985 case Op_MemBarCPUOrder: return new(C) MemBarCPUOrderNode(C, atp, pn); | 2970 case Op_MemBarReleaseLock: return new(C) MemBarReleaseLockNode(C, atp, pn); |
2986 case Op_Initialize: return new(C) InitializeNode(C, atp, pn); | 2971 case Op_MemBarVolatile: return new(C) MemBarVolatileNode(C, atp, pn); |
2987 case Op_MemBarStoreStore: return new(C) MemBarStoreStoreNode(C, atp, pn); | 2972 case Op_MemBarCPUOrder: return new(C) MemBarCPUOrderNode(C, atp, pn); |
2988 default: ShouldNotReachHere(); return NULL; | 2973 case Op_Initialize: return new(C) InitializeNode(C, atp, pn); |
2974 case Op_MemBarStoreStore: return new(C) MemBarStoreStoreNode(C, atp, pn); | |
2975 default: ShouldNotReachHere(); return NULL; | |
2989 } | 2976 } |
2990 } | 2977 } |
2991 | 2978 |
2992 //------------------------------Ideal------------------------------------------ | 2979 //------------------------------Ideal------------------------------------------ |
2993 // Return a node which is more "ideal" than the current node. Strip out | 2980 // Return a node which is more "ideal" than the current node. Strip out |
3765 int nst = 0; | 3752 int nst = 0; |
3766 if (!split) { | 3753 if (!split) { |
3767 ++new_long; | 3754 ++new_long; |
3768 off[nst] = offset; | 3755 off[nst] = offset; |
3769 st[nst++] = StoreNode::make(*phase, ctl, zmem, adr, atp, | 3756 st[nst++] = StoreNode::make(*phase, ctl, zmem, adr, atp, |
3770 phase->longcon(con), T_LONG); | 3757 phase->longcon(con), T_LONG, MemNode::unordered); |
3771 } else { | 3758 } else { |
3772 // Omit either if it is a zero. | 3759 // Omit either if it is a zero. |
3773 if (con0 != 0) { | 3760 if (con0 != 0) { |
3774 ++new_int; | 3761 ++new_int; |
3775 off[nst] = offset; | 3762 off[nst] = offset; |
3776 st[nst++] = StoreNode::make(*phase, ctl, zmem, adr, atp, | 3763 st[nst++] = StoreNode::make(*phase, ctl, zmem, adr, atp, |
3777 phase->intcon(con0), T_INT); | 3764 phase->intcon(con0), T_INT, MemNode::unordered); |
3778 } | 3765 } |
3779 if (con1 != 0) { | 3766 if (con1 != 0) { |
3780 ++new_int; | 3767 ++new_int; |
3781 offset += BytesPerInt; | 3768 offset += BytesPerInt; |
3782 adr = make_raw_address(offset, phase); | 3769 adr = make_raw_address(offset, phase); |
3783 off[nst] = offset; | 3770 off[nst] = offset; |
3784 st[nst++] = StoreNode::make(*phase, ctl, zmem, adr, atp, | 3771 st[nst++] = StoreNode::make(*phase, ctl, zmem, adr, atp, |
3785 phase->intcon(con1), T_INT); | 3772 phase->intcon(con1), T_INT, MemNode::unordered); |
3786 } | 3773 } |
3787 } | 3774 } |
3788 | 3775 |
3789 // Insert second store first, then the first before the second. | 3776 // Insert second store first, then the first before the second. |
3790 // Insert each one just before any overlapping non-constant stores. | 3777 // Insert each one just before any overlapping non-constant stores. |
4034 for (uint i = InitializeNode::RawStores; i < req(); i++) { | 4021 for (uint i = InitializeNode::RawStores; i < req(); i++) { |
4035 Node* st = in(i); | 4022 Node* st = in(i); |
4036 intptr_t st_off = get_store_offset(st, phase); | 4023 intptr_t st_off = get_store_offset(st, phase); |
4037 if (st_off < 0) continue; // ignore dead garbage | 4024 if (st_off < 0) continue; // ignore dead garbage |
4038 if (last_off > st_off) { | 4025 if (last_off > st_off) { |
4039 tty->print_cr("*** bad store offset at %d: %d > %d", i, last_off, st_off); | 4026 tty->print_cr("*** bad store offset at %d: " INTX_FORMAT " > " INTX_FORMAT, i, last_off, st_off); |
4040 this->dump(2); | 4027 this->dump(2); |
4041 assert(false, "ascending store offsets"); | 4028 assert(false, "ascending store offsets"); |
4042 return false; | 4029 return false; |
4043 } | 4030 } |
4044 last_off = st_off + st->as_Store()->memory_size(); | 4031 last_off = st_off + st->as_Store()->memory_size(); |