changeset 6498:6bc8aa568cb9

moved MemoryBarriers to com.oracle.graal.api.code
author Doug Simon <doug.simon@oracle.com>
date Wed, 03 Oct 2012 18:14:17 +0200
parents 64b7dd2075c0
children e2e15876a157
files graal/com.oracle.graal.api.code/src/com/oracle/graal/api/code/MemoryBarriers.java graal/com.oracle.graal.asm.amd64/src/com/oracle/max/asm/amd64/AMD64.java graal/com.oracle.graal.asm.amd64/src/com/oracle/max/asm/amd64/AMD64Assembler.java graal/com.oracle.graal.hotspot/src/com/oracle/graal/hotspot/meta/HotSpotRuntime.java graal/com.oracle.graal.hotspot/src/com/oracle/graal/hotspot/snippets/UnsafeSnippets.java graal/com.oracle.graal.nodes/src/com/oracle/graal/nodes/extended/MembarNode.java graal/com.oracle.max.criutils/src/com/oracle/max/criutils/MemoryBarriers.java
diffstat 7 files changed, 119 insertions(+), 119 deletions(-) [+]
line wrap: on
line diff
--- /dev/null	Thu Jan 01 00:00:00 1970 +0000
+++ b/graal/com.oracle.graal.api.code/src/com/oracle/graal/api/code/MemoryBarriers.java	Wed Oct 03 18:14:17 2012 +0200
@@ -0,0 +1,114 @@
+/*
+ * Copyright (c) 2011, 2011, Oracle and/or its affiliates. All rights reserved.
+ * DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER.
+ *
+ * This code is free software; you can redistribute it and/or modify it
+ * under the terms of the GNU General Public License version 2 only, as
+ * published by the Free Software Foundation.
+ *
+ * This code is distributed in the hope that it will be useful, but WITHOUT
+ * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
+ * FITNESS FOR A PARTICULAR PURPOSE.  See the GNU General Public License
+ * version 2 for more details (a copy is included in the LICENSE file that
+ * accompanied this code).
+ *
+ * You should have received a copy of the GNU General Public License version
+ * 2 along with this work; if not, write to the Free Software Foundation,
+ * Inc., 51 Franklin St, Fifth Floor, Boston, MA 02110-1301 USA.
+ *
+ * Please contact Oracle, 500 Oracle Parkway, Redwood Shores, CA 94065 USA
+ * or visit www.oracle.com if you need additional information or have any
+ * questions.
+ */
+package com.oracle.graal.api.code;
+
+/**
+ * Constants and intrinsic definition for memory barriers.
+ *
+ * The documentation for each constant is taken from Doug Lea's
+ * <a href="http://gee.cs.oswego.edu/dl/jmm/cookbook.html">The JSR-133 Cookbook for Compiler Writers</a>.
+ * <p>
+ * The {@code JMM_*} constants capture the memory barriers necessary to implement the Java Memory Model
+ * with respect to volatile field accesses. Their values are explained by this
+ * comment from templateTable_i486.cpp in the HotSpot source code:
+ * <pre>
+ * Volatile variables demand their effects be made known to all CPU's in
+ * order.  Store buffers on most chips allow reads & writes to reorder; the
+ * JMM's ReadAfterWrite.java test fails in -Xint mode without some kind of
+ * memory barrier (i.e., it's not sufficient that the interpreter does not
+ * reorder volatile references, the hardware also must not reorder them).
+ *
+ * According to the new Java Memory Model (JMM):
+ * (1) All volatiles are serialized wrt to each other.
+ * ALSO reads & writes act as acquire & release, so:
+ * (2) A read cannot let unrelated NON-volatile memory refs that happen after
+ * the read float up to before the read.  It's OK for non-volatile memory refs
+ * that happen before the volatile read to float down below it.
+ * (3) Similarly, a volatile write cannot let unrelated NON-volatile memory refs
+ * that happen BEFORE the write float down to after the write.  It's OK for
+ * non-volatile memory refs that happen after the volatile write to float up
+ * before it.
+ *
+ * We only put in barriers around volatile refs (they are expensive), not
+ * _between_ memory refs (which would require us to track the flavor of the
+ * previous memory refs).  Requirements (2) and (3) require some barriers
+ * before volatile stores and after volatile loads.  These nearly cover
+ * requirement (1) but miss the volatile-store-volatile-load case.  This final
+ * case is placed after volatile-stores although it could just as well go
+ * before volatile-loads.
+ * </pre>
+ */
+public class MemoryBarriers {
+
+    /**
+     * The sequence {@code Load1; LoadLoad; Load2} ensures that {@code Load1}'s data are loaded before data accessed
+     * by {@code Load2} and all subsequent load instructions are loaded. In general, explicit {@code LoadLoad}
+     * barriers are needed on processors that perform speculative loads and/or out-of-order processing in which
+     * waiting load instructions can bypass waiting stores. On processors that guarantee to always preserve load
+     * ordering, these barriers amount to no-ops.
+     */
+    public static final int LOAD_LOAD   = 0x0001;
+
+    /**
+     * The sequence {@code Load1; LoadStore; Store2} ensures that {@code Load1}'s data are loaded before all data
+     * associated with {@code Store2} and subsequent store instructions are flushed. {@code LoadStore} barriers are
+     * needed only on those out-of-order processors in which waiting store instructions can bypass loads.
+     */
+    public static final int LOAD_STORE  = 0x0002;
+
+    /**
+     * The sequence {@code Store1; StoreLoad; Load2} ensures that {@code Store1}'s data are made visible to other
+     * processors (i.e., flushed to main memory) before data accessed by {@code Load2} and all subsequent load
+     * instructions are loaded. {@code StoreLoad} barriers protect against a subsequent load incorrectly using
+     * {@code Store1}'s data value rather than that from a more recent store to the same location performed by a
+     * different processor. Because of this, on the processors discussed below, a {@code StoreLoad} is strictly
+     * necessary only for separating stores from subsequent loads of the same location(s) as were stored before the
+     * barrier. {@code StoreLoad} barriers are needed on nearly all recent multiprocessors, and are usually the most
+     * expensive kind. Part of the reason they are expensive is that they must disable mechanisms that ordinarily
+     * bypass cache to satisfy loads from write-buffers. This might be implemented by letting the buffer fully
+     * flush, among other possible stalls.
+     */
+    public static final int STORE_LOAD  = 0x0004;
+
+    /**
+     * The sequence {@code Store1; StoreStore; Store2} ensures that {@code Store1}'s data are visible to other
+     * processors (i.e., flushed to memory) before the data associated with {@code Store2} and all subsequent store
+     * instructions. In general, {@code StoreStore} barriers are needed on processors that do not otherwise
+     * guarantee strict ordering of flushes from write buffers and/or caches to other processors or main memory.
+     */
+    public static final int STORE_STORE = 0x0008;
+
+    public static final int JMM_PRE_VOLATILE_WRITE = LOAD_STORE | STORE_STORE;
+    public static final int JMM_POST_VOLATILE_WRITE = STORE_LOAD | STORE_STORE;
+    public static final int JMM_PRE_VOLATILE_READ = 0;
+    public static final int JMM_POST_VOLATILE_READ = LOAD_LOAD | LOAD_STORE;
+
+    public static String barriersString(int barriers) {
+        StringBuilder sb = new StringBuilder();
+        sb.append((barriers & LOAD_LOAD) != 0 ? "LOAD_LOAD " : "");
+        sb.append((barriers & LOAD_STORE) != 0 ? "LOAD_STORE " : "");
+        sb.append((barriers & STORE_LOAD) != 0 ? "STORE_LOAD " : "");
+        sb.append((barriers & STORE_STORE) != 0 ? "STORE_STORE " : "");
+        return sb.toString().trim();
+    }
+}
--- a/graal/com.oracle.graal.asm.amd64/src/com/oracle/max/asm/amd64/AMD64.java	Wed Oct 03 17:42:12 2012 +0200
+++ b/graal/com.oracle.graal.asm.amd64/src/com/oracle/max/asm/amd64/AMD64.java	Wed Oct 03 18:14:17 2012 +0200
@@ -22,9 +22,9 @@
  */
 package com.oracle.max.asm.amd64;
 
+import static com.oracle.graal.api.code.MemoryBarriers.*;
 import static com.oracle.graal.api.code.Register.RegisterFlag.*;
 import static com.oracle.graal.api.meta.Kind.*;
-import static com.oracle.max.criutils.MemoryBarriers.*;
 
 import com.oracle.graal.api.code.*;
 import com.oracle.graal.api.code.Register.*;
--- a/graal/com.oracle.graal.asm.amd64/src/com/oracle/max/asm/amd64/AMD64Assembler.java	Wed Oct 03 17:42:12 2012 +0200
+++ b/graal/com.oracle.graal.asm.amd64/src/com/oracle/max/asm/amd64/AMD64Assembler.java	Wed Oct 03 18:14:17 2012 +0200
@@ -22,11 +22,11 @@
  */
 package com.oracle.max.asm.amd64;
 
+import static com.oracle.graal.api.code.MemoryBarriers.*;
 import static com.oracle.graal.api.code.ValueUtil.*;
 import static com.oracle.max.asm.NumUtil.*;
 import static com.oracle.max.asm.amd64.AMD64.*;
 import static com.oracle.max.asm.amd64.AMD64AsmOptions.*;
-import static com.oracle.max.criutils.MemoryBarriers.*;
 
 import com.oracle.graal.api.code.*;
 import com.oracle.graal.api.meta.*;
--- a/graal/com.oracle.graal.hotspot/src/com/oracle/graal/hotspot/meta/HotSpotRuntime.java	Wed Oct 03 17:42:12 2012 +0200
+++ b/graal/com.oracle.graal.hotspot/src/com/oracle/graal/hotspot/meta/HotSpotRuntime.java	Wed Oct 03 18:14:17 2012 +0200
@@ -22,7 +22,7 @@
  */
 package com.oracle.graal.hotspot.meta;
 
-import static com.oracle.max.criutils.MemoryBarriers.*;
+import static com.oracle.graal.api.code.MemoryBarriers.*;
 
 import java.lang.reflect.*;
 import java.util.*;
--- a/graal/com.oracle.graal.hotspot/src/com/oracle/graal/hotspot/snippets/UnsafeSnippets.java	Wed Oct 03 17:42:12 2012 +0200
+++ b/graal/com.oracle.graal.hotspot/src/com/oracle/graal/hotspot/snippets/UnsafeSnippets.java	Wed Oct 03 18:14:17 2012 +0200
@@ -22,11 +22,11 @@
  */
 package com.oracle.graal.hotspot.snippets;
 
+import com.oracle.graal.api.code.*;
 import com.oracle.graal.api.meta.*;
 import com.oracle.graal.nodes.extended.*;
 import com.oracle.graal.nodes.java.*;
 import com.oracle.graal.snippets.*;
-import com.oracle.max.criutils.*;
 
 /**
  * Snippets for {@link sun.misc.Unsafe} methods.
--- a/graal/com.oracle.graal.nodes/src/com/oracle/graal/nodes/extended/MembarNode.java	Wed Oct 03 17:42:12 2012 +0200
+++ b/graal/com.oracle.graal.nodes/src/com/oracle/graal/nodes/extended/MembarNode.java	Wed Oct 03 18:14:17 2012 +0200
@@ -22,10 +22,10 @@
  */
 package com.oracle.graal.nodes.extended;
 
+import com.oracle.graal.api.code.*;
 import com.oracle.graal.nodes.*;
 import com.oracle.graal.nodes.spi.*;
 import com.oracle.graal.nodes.type.*;
-import com.oracle.max.criutils.*;
 
 /**
  * Creates a memory barrier.
--- a/graal/com.oracle.max.criutils/src/com/oracle/max/criutils/MemoryBarriers.java	Wed Oct 03 17:42:12 2012 +0200
+++ /dev/null	Thu Jan 01 00:00:00 1970 +0000
@@ -1,114 +0,0 @@
-/*
- * Copyright (c) 2011, 2011, Oracle and/or its affiliates. All rights reserved.
- * DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER.
- *
- * This code is free software; you can redistribute it and/or modify it
- * under the terms of the GNU General Public License version 2 only, as
- * published by the Free Software Foundation.
- *
- * This code is distributed in the hope that it will be useful, but WITHOUT
- * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
- * FITNESS FOR A PARTICULAR PURPOSE.  See the GNU General Public License
- * version 2 for more details (a copy is included in the LICENSE file that
- * accompanied this code).
- *
- * You should have received a copy of the GNU General Public License version
- * 2 along with this work; if not, write to the Free Software Foundation,
- * Inc., 51 Franklin St, Fifth Floor, Boston, MA 02110-1301 USA.
- *
- * Please contact Oracle, 500 Oracle Parkway, Redwood Shores, CA 94065 USA
- * or visit www.oracle.com if you need additional information or have any
- * questions.
- */
-package com.oracle.max.criutils;
-
-/**
- * Constants and intrinsic definition for memory barriers.
- *
- * The documentation for each constant is taken from Doug Lea's
- * <a href="http://gee.cs.oswego.edu/dl/jmm/cookbook.html">The JSR-133 Cookbook for Compiler Writers</a>.
- * <p>
- * The {@code JMM_*} constants capture the memory barriers necessary to implement the Java Memory Model
- * with respect to volatile field accesses. Their values are explained by this
- * comment from templateTable_i486.cpp in the HotSpot source code:
- * <pre>
- * Volatile variables demand their effects be made known to all CPU's in
- * order.  Store buffers on most chips allow reads & writes to reorder; the
- * JMM's ReadAfterWrite.java test fails in -Xint mode without some kind of
- * memory barrier (i.e., it's not sufficient that the interpreter does not
- * reorder volatile references, the hardware also must not reorder them).
- *
- * According to the new Java Memory Model (JMM):
- * (1) All volatiles are serialized wrt to each other.
- * ALSO reads & writes act as acquire & release, so:
- * (2) A read cannot let unrelated NON-volatile memory refs that happen after
- * the read float up to before the read.  It's OK for non-volatile memory refs
- * that happen before the volatile read to float down below it.
- * (3) Similarly, a volatile write cannot let unrelated NON-volatile memory refs
- * that happen BEFORE the write float down to after the write.  It's OK for
- * non-volatile memory refs that happen after the volatile write to float up
- * before it.
- *
- * We only put in barriers around volatile refs (they are expensive), not
- * _between_ memory refs (which would require us to track the flavor of the
- * previous memory refs).  Requirements (2) and (3) require some barriers
- * before volatile stores and after volatile loads.  These nearly cover
- * requirement (1) but miss the volatile-store-volatile-load case.  This final
- * case is placed after volatile-stores although it could just as well go
- * before volatile-loads.
- * </pre>
- */
-public class MemoryBarriers {
-
-    /**
-     * The sequence {@code Load1; LoadLoad; Load2} ensures that {@code Load1}'s data are loaded before data accessed
-     * by {@code Load2} and all subsequent load instructions are loaded. In general, explicit {@code LoadLoad}
-     * barriers are needed on processors that perform speculative loads and/or out-of-order processing in which
-     * waiting load instructions can bypass waiting stores. On processors that guarantee to always preserve load
-     * ordering, these barriers amount to no-ops.
-     */
-    public static final int LOAD_LOAD   = 0x0001;
-
-    /**
-     * The sequence {@code Load1; LoadStore; Store2} ensures that {@code Load1}'s data are loaded before all data
-     * associated with {@code Store2} and subsequent store instructions are flushed. {@code LoadStore} barriers are
-     * needed only on those out-of-order processors in which waiting store instructions can bypass loads.
-     */
-    public static final int LOAD_STORE  = 0x0002;
-
-    /**
-     * The sequence {@code Store1; StoreLoad; Load2} ensures that {@code Store1}'s data are made visible to other
-     * processors (i.e., flushed to main memory) before data accessed by {@code Load2} and all subsequent load
-     * instructions are loaded. {@code StoreLoad} barriers protect against a subsequent load incorrectly using
-     * {@code Store1}'s data value rather than that from a more recent store to the same location performed by a
-     * different processor. Because of this, on the processors discussed below, a {@code StoreLoad} is strictly
-     * necessary only for separating stores from subsequent loads of the same location(s) as were stored before the
-     * barrier. {@code StoreLoad} barriers are needed on nearly all recent multiprocessors, and are usually the most
-     * expensive kind. Part of the reason they are expensive is that they must disable mechanisms that ordinarily
-     * bypass cache to satisfy loads from write-buffers. This might be implemented by letting the buffer fully
-     * flush, among other possible stalls.
-     */
-    public static final int STORE_LOAD  = 0x0004;
-
-    /**
-     * The sequence {@code Store1; StoreStore; Store2} ensures that {@code Store1}'s data are visible to other
-     * processors (i.e., flushed to memory) before the data associated with {@code Store2} and all subsequent store
-     * instructions. In general, {@code StoreStore} barriers are needed on processors that do not otherwise
-     * guarantee strict ordering of flushes from write buffers and/or caches to other processors or main memory.
-     */
-    public static final int STORE_STORE = 0x0008;
-
-    public static final int JMM_PRE_VOLATILE_WRITE = LOAD_STORE | STORE_STORE;
-    public static final int JMM_POST_VOLATILE_WRITE = STORE_LOAD | STORE_STORE;
-    public static final int JMM_PRE_VOLATILE_READ = 0;
-    public static final int JMM_POST_VOLATILE_READ = LOAD_LOAD | LOAD_STORE;
-
-    public static String barriersString(int barriers) {
-        StringBuilder sb = new StringBuilder();
-        sb.append((barriers & LOAD_LOAD) != 0 ? "LOAD_LOAD " : "");
-        sb.append((barriers & LOAD_STORE) != 0 ? "LOAD_STORE " : "");
-        sb.append((barriers & STORE_LOAD) != 0 ? "STORE_LOAD " : "");
-        sb.append((barriers & STORE_STORE) != 0 ? "STORE_STORE " : "");
-        return sb.toString().trim();
-    }
-}