diff src/share/vm/opto/parse3.cpp @ 18041:52b4284cb496

Merge with jdk8u20-b26
author Gilles Duboscq <duboscq@ssw.jku.at>
date Wed, 15 Oct 2014 16:02:50 +0200
parents 4ca6dc0799b6
children
line wrap: on
line diff
--- a/src/share/vm/opto/parse3.cpp	Thu Oct 16 10:21:29 2014 +0200
+++ b/src/share/vm/opto/parse3.cpp	Wed Oct 15 16:02:50 2014 +0200
@@ -227,8 +227,13 @@
   } else {
     type = Type::get_const_basic_type(bt);
   }
+  if (support_IRIW_for_not_multiple_copy_atomic_cpu && field->is_volatile()) {
+    insert_mem_bar(Op_MemBarVolatile);   // StoreLoad barrier
+  }
   // Build the load.
-  Node* ld = make_load(NULL, adr, type, bt, adr_type, is_vol);
+  //
+  MemNode::MemOrd mo = is_vol ? MemNode::acquire : MemNode::unordered;
+  Node* ld = make_load(NULL, adr, type, bt, adr_type, mo, is_vol);
 
   // Adjust Java stack
   if (type2size[bt] == 1)
@@ -288,6 +293,16 @@
   // Round doubles before storing
   if (bt == T_DOUBLE)  val = dstore_rounding(val);
 
+  // Conservatively release stores of object references.
+  const MemNode::MemOrd mo =
+    is_vol ?
+    // Volatile fields need releasing stores.
+    MemNode::release :
+    // Non-volatile fields also need releasing stores if they hold an
+    // object reference, because the object reference might point to
+    // a freshly created object.
+    StoreNode::release_if_reference(bt);
+
   // Store the value.
   Node* store;
   if (bt == T_OBJECT) {
@@ -297,15 +312,24 @@
     } else {
       field_type = TypeOopPtr::make_from_klass(field->type()->as_klass());
     }
-    store = store_oop_to_object( control(), obj, adr, adr_type, val, field_type, bt);
+    store = store_oop_to_object(control(), obj, adr, adr_type, val, field_type, bt, mo);
   } else {
-    store = store_to_memory( control(), adr, val, bt, adr_type, is_vol );
+    store = store_to_memory(control(), adr, val, bt, adr_type, mo, is_vol);
   }
 
   // If reference is volatile, prevent following volatiles ops from
   // floating up before the volatile write.
   if (is_vol) {
-    insert_mem_bar(Op_MemBarVolatile); // Use fat membar
+    // If not multiple copy atomic, we do the MemBarVolatile before the load.
+    if (!support_IRIW_for_not_multiple_copy_atomic_cpu) {
+      insert_mem_bar(Op_MemBarVolatile); // Use fat membar
+    }
+    // Remember we wrote a volatile field.
+    // For not multiple copy atomic cpu (ppc64) a barrier should be issued
+    // in constructors which have such stores. See do_exits() in parse1.cpp.
+    if (is_field) {
+      set_wrote_volatile(true);
+    }
   }
 
   // If the field is final, the rules of Java say we are in <init> or <clinit>.
@@ -337,7 +361,7 @@
     //   should_be_constant = (oop not scavengable || ScavengeRootsInCode >= 2)
     // An oop is not scavengable if it is in the perm gen.
     if (stable_type != NULL && con_type != NULL && con_type->isa_oopptr())
-      con_type = con_type->join(stable_type);
+      con_type = con_type->join_speculative(stable_type);
     break;
 
   case T_ILLEGAL:
@@ -414,7 +438,7 @@
       Node*    elem   = expand_multianewarray(array_klass_1, &lengths[1], ndimensions-1, nargs);
       intptr_t offset = header + ((intptr_t)i << LogBytesPerHeapOop);
       Node*    eaddr  = basic_plus_adr(array, offset);
-      store_oop_to_array(control(), array, eaddr, adr_type, elem, elemtype, T_OBJECT);
+      store_oop_to_array(control(), array, eaddr, adr_type, elem, elemtype, T_OBJECT, MemNode::unordered);
     }
   }
   return array;
@@ -503,7 +527,7 @@
       // Fill-in it with values
       for (j = 0; j < ndimensions; j++) {
         Node *dims_elem = array_element_address(dims, intcon(j), T_INT);
-        store_to_memory(control(), dims_elem, length[j], T_INT, TypeAryPtr::INTS);
+        store_to_memory(control(), dims_elem, length[j], T_INT, TypeAryPtr::INTS, MemNode::unordered);
       }
     }