diff src/share/vm/opto/memnode.cpp @ 18041:52b4284cb496

Merge with jdk8u20-b26
author Gilles Duboscq <duboscq@ssw.jku.at>
date Wed, 15 Oct 2014 16:02:50 +0200
parents 89152779163c 78bbf4d43a14
children 7848fc12602b
line wrap: on
line diff
--- a/src/share/vm/opto/memnode.cpp	Thu Oct 16 10:21:29 2014 +0200
+++ b/src/share/vm/opto/memnode.cpp	Wed Oct 15 16:02:50 2014 +0200
@@ -1,5 +1,5 @@
 /*
- * Copyright (c) 1997, 2013, Oracle and/or its affiliates. All rights reserved.
+ * Copyright (c) 1997, 2014, Oracle and/or its affiliates. All rights reserved.
  * DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER.
  *
  * This code is free software; you can redistribute it and/or modify it
@@ -306,33 +306,16 @@
     int alias_idx = phase->C->get_alias_index(t_adr->is_ptr());
   }
 
-#ifdef ASSERT
   Node* base = NULL;
-  if (address->is_AddP())
+  if (address->is_AddP()) {
     base = address->in(AddPNode::Base);
+  }
   if (base != NULL && phase->type(base)->higher_equal(TypePtr::NULL_PTR) &&
       !t_adr->isa_rawptr()) {
     // Note: raw address has TOP base and top->higher_equal(TypePtr::NULL_PTR) is true.
-    Compile* C = phase->C;
-    tty->cr();
-    tty->print_cr("===== NULL+offs not RAW address =====");
-    if (C->is_dead_node(this->_idx))    tty->print_cr("'this' is dead");
-    if ((ctl != NULL) && C->is_dead_node(ctl->_idx)) tty->print_cr("'ctl' is dead");
-    if (C->is_dead_node(mem->_idx))     tty->print_cr("'mem' is dead");
-    if (C->is_dead_node(address->_idx)) tty->print_cr("'address' is dead");
-    if (C->is_dead_node(base->_idx))    tty->print_cr("'base' is dead");
-    tty->cr();
-    base->dump(1);
-    tty->cr();
-    this->dump(2);
-    tty->print("this->adr_type():     "); adr_type()->dump(); tty->cr();
-    tty->print("phase->type(address): "); t_adr->dump(); tty->cr();
-    tty->print("phase->type(base):    "); phase->type(address)->dump(); tty->cr();
-    tty->cr();
+    // Skip this node optimization if its address has TOP base.
+    return NodeSentinel; // caller will return NULL
   }
-  assert(base == NULL || t_adr->isa_rawptr() ||
-        !phase->type(base)->higher_equal(TypePtr::NULL_PTR), "NULL+offs not RAW address?");
-#endif
 
   // Avoid independent memory operations
   Node* old_mem = mem;
@@ -657,7 +640,7 @@
       // disregarding "null"-ness.
       // (We make an exception for TypeRawPtr::BOTTOM, which is a bit bucket.)
       const TypePtr* tp_notnull = tp->join(TypePtr::NOTNULL)->is_ptr();
-      assert(cross_check->meet(tp_notnull) == cross_check,
+      assert(cross_check->meet(tp_notnull) == cross_check->remove_speculative(),
              "real address must not escape from expected memory type");
     }
     #endif
@@ -907,7 +890,7 @@
 
 //----------------------------LoadNode::make-----------------------------------
 // Polymorphic factory method:
-Node *LoadNode::make( PhaseGVN& gvn, Node *ctl, Node *mem, Node *adr, const TypePtr* adr_type, const Type *rt, BasicType bt ) {
+Node *LoadNode::make(PhaseGVN& gvn, Node *ctl, Node *mem, Node *adr, const TypePtr* adr_type, const Type *rt, BasicType bt, MemOrd mo) {
   Compile* C = gvn.C;
 
   // sanity check the alias category against the created node type
@@ -923,34 +906,34 @@
           rt->isa_oopptr() || is_immutable_value(adr),
           "raw memory operations should have control edge");
   switch (bt) {
-  case T_BOOLEAN: return new (C) LoadUBNode(ctl, mem, adr, adr_type, rt->is_int()    );
-  case T_BYTE:    return new (C) LoadBNode (ctl, mem, adr, adr_type, rt->is_int()    );
-  case T_INT:     return new (C) LoadINode (ctl, mem, adr, adr_type, rt->is_int()    );
-  case T_CHAR:    return new (C) LoadUSNode(ctl, mem, adr, adr_type, rt->is_int()    );
-  case T_SHORT:   return new (C) LoadSNode (ctl, mem, adr, adr_type, rt->is_int()    );
-  case T_LONG:    return new (C) LoadLNode (ctl, mem, adr, adr_type, rt->is_long()   );
-  case T_FLOAT:   return new (C) LoadFNode (ctl, mem, adr, adr_type, rt              );
-  case T_DOUBLE:  return new (C) LoadDNode (ctl, mem, adr, adr_type, rt              );
-  case T_ADDRESS: return new (C) LoadPNode (ctl, mem, adr, adr_type, rt->is_ptr()    );
+  case T_BOOLEAN: return new (C) LoadUBNode(ctl, mem, adr, adr_type, rt->is_int(),  mo);
+  case T_BYTE:    return new (C) LoadBNode (ctl, mem, adr, adr_type, rt->is_int(),  mo);
+  case T_INT:     return new (C) LoadINode (ctl, mem, adr, adr_type, rt->is_int(),  mo);
+  case T_CHAR:    return new (C) LoadUSNode(ctl, mem, adr, adr_type, rt->is_int(),  mo);
+  case T_SHORT:   return new (C) LoadSNode (ctl, mem, adr, adr_type, rt->is_int(),  mo);
+  case T_LONG:    return new (C) LoadLNode (ctl, mem, adr, adr_type, rt->is_long(), mo);
+  case T_FLOAT:   return new (C) LoadFNode (ctl, mem, adr, adr_type, rt,            mo);
+  case T_DOUBLE:  return new (C) LoadDNode (ctl, mem, adr, adr_type, rt,            mo);
+  case T_ADDRESS: return new (C) LoadPNode (ctl, mem, adr, adr_type, rt->is_ptr(),  mo);
   case T_OBJECT:
 #ifdef _LP64
     if (adr->bottom_type()->is_ptr_to_narrowoop()) {
-      Node* load  = gvn.transform(new (C) LoadNNode(ctl, mem, adr, adr_type, rt->make_narrowoop()));
+      Node* load  = gvn.transform(new (C) LoadNNode(ctl, mem, adr, adr_type, rt->make_narrowoop(), mo));
       return new (C) DecodeNNode(load, load->bottom_type()->make_ptr());
     } else
 #endif
     {
       assert(!adr->bottom_type()->is_ptr_to_narrowoop() && !adr->bottom_type()->is_ptr_to_narrowklass(), "should have got back a narrow oop");
-      return new (C) LoadPNode(ctl, mem, adr, adr_type, rt->is_oopptr());
+      return new (C) LoadPNode(ctl, mem, adr, adr_type, rt->is_oopptr(), mo);
     }
   }
   ShouldNotReachHere();
   return (LoadNode*)NULL;
 }
 
-LoadLNode* LoadLNode::make_atomic(Compile *C, Node* ctl, Node* mem, Node* adr, const TypePtr* adr_type, const Type* rt) {
+LoadLNode* LoadLNode::make_atomic(Compile *C, Node* ctl, Node* mem, Node* adr, const TypePtr* adr_type, const Type* rt, MemOrd mo) {
   bool require_atomic = true;
-  return new (C) LoadLNode(ctl, mem, adr, adr_type, rt->is_long(), require_atomic);
+  return new (C) LoadLNode(ctl, mem, adr, adr_type, rt->is_long(), mo, require_atomic);
 }
 
 
@@ -1002,9 +985,13 @@
     // a synchronized region.
     while (current->is_Proj()) {
       int opc = current->in(0)->Opcode();
-      if ((final && (opc == Op_MemBarAcquire || opc == Op_MemBarAcquireLock)) ||
-          opc == Op_MemBarRelease || opc == Op_MemBarCPUOrder ||
-          opc == Op_MemBarReleaseLock) {
+      if ((final && (opc == Op_MemBarAcquire ||
+                     opc == Op_MemBarAcquireLock ||
+                     opc == Op_LoadFence)) ||
+          opc == Op_MemBarRelease ||
+          opc == Op_StoreFence ||
+          opc == Op_MemBarReleaseLock ||
+          opc == Op_MemBarCPUOrder) {
         Node* mem = current->in(0)->in(TypeFunc::Memory);
         if (mem->is_MergeMem()) {
           MergeMemNode* merge = mem->as_MergeMem();
@@ -1589,35 +1576,33 @@
 
 // Try to constant-fold a stable array element.
 static const Type* fold_stable_ary_elem(const TypeAryPtr* ary, int off, BasicType loadbt) {
+  assert(ary->const_oop(), "array should be constant");
   assert(ary->is_stable(), "array should be stable");
 
-  if (ary->const_oop() != NULL) {
-    // Decode the results of GraphKit::array_element_address.
-    ciArray* aobj = ary->const_oop()->as_array();
-    ciConstant con = aobj->element_value_by_offset(off);
-
-    if (con.basic_type() != T_ILLEGAL && !con.is_null_or_zero()) {
-      const Type* con_type = Type::make_from_constant(con);
-      if (con_type != NULL) {
-        if (con_type->isa_aryptr()) {
-          // Join with the array element type, in case it is also stable.
-          int dim = ary->stable_dimension();
-          con_type = con_type->is_aryptr()->cast_to_stable(true, dim-1);
-        }
-        if (loadbt == T_NARROWOOP && con_type->isa_oopptr()) {
-          con_type = con_type->make_narrowoop();
-        }
+  // Decode the results of GraphKit::array_element_address.
+  ciArray* aobj = ary->const_oop()->as_array();
+  ciConstant con = aobj->element_value_by_offset(off);
+
+  if (con.basic_type() != T_ILLEGAL && !con.is_null_or_zero()) {
+    const Type* con_type = Type::make_from_constant(con);
+    if (con_type != NULL) {
+      if (con_type->isa_aryptr()) {
+        // Join with the array element type, in case it is also stable.
+        int dim = ary->stable_dimension();
+        con_type = con_type->is_aryptr()->cast_to_stable(true, dim-1);
+      }
+      if (loadbt == T_NARROWOOP && con_type->isa_oopptr()) {
+        con_type = con_type->make_narrowoop();
+      }
 #ifndef PRODUCT
-        if (TraceIterativeGVN) {
-          tty->print("FoldStableValues: array element [off=%d]: con_type=", off);
-          con_type->dump(); tty->cr();
-        }
+      if (TraceIterativeGVN) {
+        tty->print("FoldStableValues: array element [off=%d]: con_type=", off);
+        con_type->dump(); tty->cr();
+      }
 #endif //PRODUCT
-        return con_type;
-      }
+      return con_type;
     }
   }
-
   return NULL;
 }
 
@@ -1637,7 +1622,7 @@
   // Try to guess loaded type from pointer type
   if (tp->isa_aryptr()) {
     const TypeAryPtr* ary = tp->is_aryptr();
-    const Type *t = ary->elem();
+    const Type* t = ary->elem();
 
     // Determine whether the reference is beyond the header or not, by comparing
     // the offset against the offset of the start of the array's data.
@@ -1649,10 +1634,9 @@
     const bool off_beyond_header = ((uint)off >= (uint)min_base_off);
 
     // Try to constant-fold a stable array element.
-    if (FoldStableValues && ary->is_stable()) {
-      // Make sure the reference is not into the header
-      if (off_beyond_header && off != Type::OffsetBot) {
-        assert(adr->is_AddP() && adr->in(AddPNode::Offset)->is_Con(), "offset is a constant");
+    if (FoldStableValues && ary->is_stable() && ary->const_oop() != NULL) {
+      // Make sure the reference is not into the header and the offset is constant
+      if (off_beyond_header && adr->is_AddP() && off != Type::OffsetBot) {
         const Type* con_type = fold_stable_ary_elem(ary, off, memory_type());
         if (con_type != NULL) {
           return con_type;
@@ -1681,7 +1665,7 @@
       // t might actually be lower than _type, if _type is a unique
       // concrete subclass of abstract class t.
       if (off_beyond_header) {  // is the offset beyond the header?
-        const Type* jt = t->join(_type);
+        const Type* jt = t->join_speculative(_type);
         // In any case, do not allow the join, per se, to empty out the type.
         if (jt->empty() && !t->empty()) {
           // This can happen if a interface-typed array narrows to a class type.
@@ -2032,12 +2016,12 @@
 #ifdef _LP64
   if (adr_type->is_ptr_to_narrowklass()) {
     assert(UseCompressedClassPointers, "no compressed klasses");
-    Node* load_klass = gvn.transform(new (C) LoadNKlassNode(ctl, mem, adr, at, tk->make_narrowklass()));
+    Node* load_klass = gvn.transform(new (C) LoadNKlassNode(ctl, mem, adr, at, tk->make_narrowklass(), MemNode::unordered));
     return new (C) DecodeNKlassNode(load_klass, load_klass->bottom_type()->make_ptr());
   }
 #endif
   assert(!adr_type->is_ptr_to_narrowklass() && !adr_type->is_ptr_to_narrowoop(), "should have got back a narrow oop");
-  return new (C) LoadKlassNode(ctl, mem, adr, at, tk);
+  return new (C) LoadKlassNode(ctl, mem, adr, at, tk, MemNode::unordered);
 }
 
 //------------------------------Value------------------------------------------
@@ -2352,45 +2336,46 @@
 //=============================================================================
 //---------------------------StoreNode::make-----------------------------------
 // Polymorphic factory method:
-StoreNode* StoreNode::make( PhaseGVN& gvn, Node* ctl, Node* mem, Node* adr, const TypePtr* adr_type, Node* val, BasicType bt ) {
+StoreNode* StoreNode::make(PhaseGVN& gvn, Node* ctl, Node* mem, Node* adr, const TypePtr* adr_type, Node* val, BasicType bt, MemOrd mo) {
+  assert((mo == unordered || mo == release), "unexpected");
   Compile* C = gvn.C;
-  assert( C->get_alias_index(adr_type) != Compile::AliasIdxRaw ||
-          ctl != NULL, "raw memory operations should have control edge");
+  assert(C->get_alias_index(adr_type) != Compile::AliasIdxRaw ||
+         ctl != NULL, "raw memory operations should have control edge");
 
   switch (bt) {
   case T_BOOLEAN:
-  case T_BYTE:    return new (C) StoreBNode(ctl, mem, adr, adr_type, val);
-  case T_INT:     return new (C) StoreINode(ctl, mem, adr, adr_type, val);
+  case T_BYTE:    return new (C) StoreBNode(ctl, mem, adr, adr_type, val, mo);
+  case T_INT:     return new (C) StoreINode(ctl, mem, adr, adr_type, val, mo);
   case T_CHAR:
-  case T_SHORT:   return new (C) StoreCNode(ctl, mem, adr, adr_type, val);
-  case T_LONG:    return new (C) StoreLNode(ctl, mem, adr, adr_type, val);
-  case T_FLOAT:   return new (C) StoreFNode(ctl, mem, adr, adr_type, val);
-  case T_DOUBLE:  return new (C) StoreDNode(ctl, mem, adr, adr_type, val);
+  case T_SHORT:   return new (C) StoreCNode(ctl, mem, adr, adr_type, val, mo);
+  case T_LONG:    return new (C) StoreLNode(ctl, mem, adr, adr_type, val, mo);
+  case T_FLOAT:   return new (C) StoreFNode(ctl, mem, adr, adr_type, val, mo);
+  case T_DOUBLE:  return new (C) StoreDNode(ctl, mem, adr, adr_type, val, mo);
   case T_METADATA:
   case T_ADDRESS:
   case T_OBJECT:
 #ifdef _LP64
     if (adr->bottom_type()->is_ptr_to_narrowoop()) {
       val = gvn.transform(new (C) EncodePNode(val, val->bottom_type()->make_narrowoop()));
-      return new (C) StoreNNode(ctl, mem, adr, adr_type, val);
+      return new (C) StoreNNode(ctl, mem, adr, adr_type, val, mo);
     } else if (adr->bottom_type()->is_ptr_to_narrowklass() ||
                (UseCompressedClassPointers && val->bottom_type()->isa_klassptr() &&
                 adr->bottom_type()->isa_rawptr())) {
       val = gvn.transform(new (C) EncodePKlassNode(val, val->bottom_type()->make_narrowklass()));
-      return new (C) StoreNKlassNode(ctl, mem, adr, adr_type, val);
+      return new (C) StoreNKlassNode(ctl, mem, adr, adr_type, val, mo);
     }
 #endif
     {
-      return new (C) StorePNode(ctl, mem, adr, adr_type, val);
+      return new (C) StorePNode(ctl, mem, adr, adr_type, val, mo);
     }
   }
   ShouldNotReachHere();
   return (StoreNode*)NULL;
 }
 
-StoreLNode* StoreLNode::make_atomic(Compile *C, Node* ctl, Node* mem, Node* adr, const TypePtr* adr_type, Node* val) {
+StoreLNode* StoreLNode::make_atomic(Compile *C, Node* ctl, Node* mem, Node* adr, const TypePtr* adr_type, Node* val, MemOrd mo) {
   bool require_atomic = true;
-  return new (C) StoreLNode(ctl, mem, adr, adr_type, val, require_atomic);
+  return new (C) StoreLNode(ctl, mem, adr, adr_type, val, mo, require_atomic);
 }
 
 
@@ -2783,12 +2768,12 @@
 
   Node *zero = phase->makecon(TypeLong::ZERO);
   Node *off  = phase->MakeConX(BytesPerLong);
-  mem = new (phase->C) StoreLNode(in(0),mem,adr,atp,zero);
+  mem = new (phase->C) StoreLNode(in(0),mem,adr,atp,zero,MemNode::unordered,false);
   count--;
   while( count-- ) {
     mem = phase->transform(mem);
     adr = phase->transform(new (phase->C) AddPNode(base,adr,off));
-    mem = new (phase->C) StoreLNode(in(0),mem,adr,atp,zero);
+    mem = new (phase->C) StoreLNode(in(0),mem,adr,atp,zero,MemNode::unordered,false);
   }
   return mem;
 }
@@ -2832,7 +2817,7 @@
     Node* adr = new (C) AddPNode(dest, dest, phase->MakeConX(offset));
     adr = phase->transform(adr);
     const TypePtr* atp = TypeRawPtr::BOTTOM;
-    mem = StoreNode::make(*phase, ctl, mem, adr, atp, phase->zerocon(T_INT), T_INT);
+    mem = StoreNode::make(*phase, ctl, mem, adr, atp, phase->zerocon(T_INT), T_INT, MemNode::unordered);
     mem = phase->transform(mem);
     offset += BytesPerInt;
   }
@@ -2893,7 +2878,7 @@
     Node* adr = new (C) AddPNode(dest, dest, phase->MakeConX(done_offset));
     adr = phase->transform(adr);
     const TypePtr* atp = TypeRawPtr::BOTTOM;
-    mem = StoreNode::make(*phase, ctl, mem, adr, atp, phase->zerocon(T_INT), T_INT);
+    mem = StoreNode::make(*phase, ctl, mem, adr, atp, phase->zerocon(T_INT), T_INT, MemNode::unordered);
     mem = phase->transform(mem);
     done_offset += BytesPerInt;
   }
@@ -2977,15 +2962,17 @@
 //------------------------------make-------------------------------------------
 MemBarNode* MemBarNode::make(Compile* C, int opcode, int atp, Node* pn) {
   switch (opcode) {
-  case Op_MemBarAcquire:   return new(C) MemBarAcquireNode(C,  atp, pn);
-  case Op_MemBarRelease:   return new(C) MemBarReleaseNode(C,  atp, pn);
-  case Op_MemBarAcquireLock: return new(C) MemBarAcquireLockNode(C,  atp, pn);
-  case Op_MemBarReleaseLock: return new(C) MemBarReleaseLockNode(C,  atp, pn);
-  case Op_MemBarVolatile:  return new(C) MemBarVolatileNode(C, atp, pn);
-  case Op_MemBarCPUOrder:  return new(C) MemBarCPUOrderNode(C, atp, pn);
-  case Op_Initialize:      return new(C) InitializeNode(C,     atp, pn);
-  case Op_MemBarStoreStore: return new(C) MemBarStoreStoreNode(C,  atp, pn);
-  default:                 ShouldNotReachHere(); return NULL;
+  case Op_MemBarAcquire:     return new(C) MemBarAcquireNode(C, atp, pn);
+  case Op_LoadFence:         return new(C) LoadFenceNode(C, atp, pn);
+  case Op_MemBarRelease:     return new(C) MemBarReleaseNode(C, atp, pn);
+  case Op_StoreFence:        return new(C) StoreFenceNode(C, atp, pn);
+  case Op_MemBarAcquireLock: return new(C) MemBarAcquireLockNode(C, atp, pn);
+  case Op_MemBarReleaseLock: return new(C) MemBarReleaseLockNode(C, atp, pn);
+  case Op_MemBarVolatile:    return new(C) MemBarVolatileNode(C, atp, pn);
+  case Op_MemBarCPUOrder:    return new(C) MemBarCPUOrderNode(C, atp, pn);
+  case Op_Initialize:        return new(C) InitializeNode(C, atp, pn);
+  case Op_MemBarStoreStore:  return new(C) MemBarStoreStoreNode(C, atp, pn);
+  default: ShouldNotReachHere(); return NULL;
   }
 }
 
@@ -3767,14 +3754,14 @@
       ++new_long;
       off[nst] = offset;
       st[nst++] = StoreNode::make(*phase, ctl, zmem, adr, atp,
-                                  phase->longcon(con), T_LONG);
+                                  phase->longcon(con), T_LONG, MemNode::unordered);
     } else {
       // Omit either if it is a zero.
       if (con0 != 0) {
         ++new_int;
         off[nst]  = offset;
         st[nst++] = StoreNode::make(*phase, ctl, zmem, adr, atp,
-                                    phase->intcon(con0), T_INT);
+                                    phase->intcon(con0), T_INT, MemNode::unordered);
       }
       if (con1 != 0) {
         ++new_int;
@@ -3782,7 +3769,7 @@
         adr = make_raw_address(offset, phase);
         off[nst]  = offset;
         st[nst++] = StoreNode::make(*phase, ctl, zmem, adr, atp,
-                                    phase->intcon(con1), T_INT);
+                                    phase->intcon(con1), T_INT, MemNode::unordered);
       }
     }
 
@@ -4036,7 +4023,7 @@
     intptr_t st_off = get_store_offset(st, phase);
     if (st_off < 0)  continue;  // ignore dead garbage
     if (last_off > st_off) {
-      tty->print_cr("*** bad store offset at %d: %d > %d", i, last_off, st_off);
+      tty->print_cr("*** bad store offset at %d: " INTX_FORMAT " > " INTX_FORMAT, i, last_off, st_off);
       this->dump(2);
       assert(false, "ascending store offsets");
       return false;