changeset 7215:7c5f7e7cf2c2

Merge.
author Christian Haeubl <haeubl@ssw.jku.at>
date Tue, 04 Dec 2012 10:10:02 +0100
parents ce248dc0a656 (diff) e9fe9d3d94ac (current diff)
children 5a95c784febf
files src/cpu/x86/vm/c1_globals_x86.hpp src/share/vm/runtime/globals.hpp
diffstat 87 files changed, 2640 insertions(+), 1033 deletions(-) [+]
line wrap: on
line diff
--- a/graal/com.oracle.graal.compiler.amd64/src/com/oracle/graal/compiler/amd64/AMD64LIRGenerator.java	Tue Dec 04 10:09:25 2012 +0100
+++ b/graal/com.oracle.graal.compiler.amd64/src/com/oracle/graal/compiler/amd64/AMD64LIRGenerator.java	Tue Dec 04 10:10:02 2012 +0100
@@ -699,4 +699,16 @@
         append(new CondMoveOp(result, Condition.EQ, load(Constant.TRUE), Constant.FALSE));
         setResult(node, result);
     }
+
+    @Override
+    public void visitBreakpointNode(BreakpointNode node) {
+        Kind[] sig = new Kind[node.arguments.size()];
+        for (int i = 0; i < sig.length; i++) {
+            sig[i] = node.arguments.get(i).kind();
+        }
+
+        CallingConvention cc = frameMap.registerConfig.getCallingConvention(CallingConvention.Type.JavaCall, Kind.Void, sig, target(), false);
+        Value[] parameters = visitInvokeArguments(cc, node.arguments);
+        append(new AMD64BreakpointOp(parameters));
+    }
 }
--- a/graal/com.oracle.graal.compiler.test/src/com/oracle/graal/compiler/test/GraalCompilerTest.java	Tue Dec 04 10:09:25 2012 +0100
+++ b/graal/com.oracle.graal.compiler.test/src/com/oracle/graal/compiler/test/GraalCompilerTest.java	Tue Dec 04 10:10:02 2012 +0100
@@ -126,7 +126,7 @@
                     canonicalId.set(node, id);
                 }
                 String name = node instanceof ConstantNode ? node.toString(Verbosity.Name) : node.getClass().getSimpleName();
-                result.append("  " + id + "|" + name + "    (" + node.usages().size() + ")\n");
+                result.append("  " + id + "|" + name + "    (" + node.usages().count() + ")\n");
             }
         }
         return result.toString();
--- a/graal/com.oracle.graal.compiler.test/src/com/oracle/graal/compiler/test/TypeSystemTest.java	Tue Dec 04 10:09:25 2012 +0100
+++ b/graal/com.oracle.graal.compiler.test/src/com/oracle/graal/compiler/test/TypeSystemTest.java	Tue Dec 04 10:10:02 2012 +0100
@@ -237,7 +237,7 @@
     }
 
     private static void outputNode(Node node) {
-        System.out.print("  " + node + "    (usage count: " + node.usages().size() + ") (inputs:");
+        System.out.print("  " + node + "    (usage count: " + node.usages().count() + ") (inputs:");
         for (Node input : node.inputs()) {
             System.out.print(" " + input.toString(Verbosity.Id));
         }
--- a/graal/com.oracle.graal.compiler/src/com/oracle/graal/compiler/gen/LIRGenerator.java	Tue Dec 04 10:09:25 2012 +0100
+++ b/graal/com.oracle.graal.compiler/src/com/oracle/graal/compiler/gen/LIRGenerator.java	Tue Dec 04 10:10:02 2012 +0100
@@ -383,7 +383,7 @@
             }
         }
         if (block.numberOfSux() >= 1 && !endsWithJump(block)) {
-            NodeSuccessorsIterable successors = block.getEndNode().successors();
+            NodeClassIterable successors = block.getEndNode().successors();
             assert successors.isNotEmpty() : "should have at least one successor : " + block.getEndNode();
 
             emitJump(getLIRBlock((FixedNode) successors.first()), null);
--- a/graal/com.oracle.graal.graph/src/com/oracle/graal/graph/Graph.java	Tue Dec 04 10:09:25 2012 +0100
+++ b/graal/com.oracle.graal.graph/src/com/oracle/graal/graph/Graph.java	Tue Dec 04 10:10:02 2012 +0100
@@ -303,15 +303,6 @@
                 return new NodeIterator();
             }
 
-            @SuppressWarnings("unchecked")
-            @Override
-            public <F extends Node> NodeIterable<F> filter(Class<F> clazz) {
-                if (IterableNodeType.class.isAssignableFrom(clazz)) {
-                    return getNodes((Class) clazz);
-                }
-                return super.filter(clazz);
-            }
-
             @Override
             public int count() {
                 return getNodeCount();
--- a/graal/com.oracle.graal.graph/src/com/oracle/graal/graph/Node.java	Tue Dec 04 10:09:25 2012 +0100
+++ b/graal/com.oracle.graal.graph/src/com/oracle/graal/graph/Node.java	Tue Dec 04 10:10:02 2012 +0100
@@ -27,6 +27,7 @@
 
 import com.oracle.graal.graph.Graph.InputChangedListener;
 import com.oracle.graal.graph.NodeClass.*;
+import com.oracle.graal.graph.iterators.*;
 
 
 /**
@@ -131,22 +132,22 @@
     }
 
     /**
-     * Returns an {@link NodeInputsIterable iterable} which can be used to traverse all non-null input edges of this node.
-     * @return an {@link NodeInputsIterable iterable} for all non-null input edges.
+     * Returns an {@link NodeClassIterable iterable} which can be used to traverse all non-null input edges of this node.
+     * @return an {@link NodeClassIterable iterable} for all non-null input edges.
      */
-    public NodeInputsIterable inputs() {
+    public NodeClassIterable inputs() {
         return getNodeClass().getInputIterable(this);
     }
 
     /**
-     * Returns an {@link NodeSuccessorsIterable iterable} which can be used to traverse all non-null successor edges of this node.
-     * @return an {@link NodeSuccessorsIterable iterable} for all non-null successor edges.
+     * Returns an {@link NodeClassIterable iterable} which can be used to traverse all non-null successor edges of this node.
+     * @return an {@link NodeClassIterable iterable} for all non-null successor edges.
      */
-    public NodeSuccessorsIterable successors() {
+    public NodeClassIterable successors() {
         return getNodeClass().getSuccessorIterable(this);
     }
 
-    public final NodeUsagesList usages() {
+    public final NodeIterable<Node> usages() {
         return usages;
     }
 
@@ -551,7 +552,7 @@
         }
 
         if (precision > 0) {
-            if (this.usages.size() > 0) {
+            if (this.usages.count() > 0) {
                 formatter.format(" usages={");
                 int z = 0;
                 for (Node usage : this.usages) {
--- a/graal/com.oracle.graal.graph/src/com/oracle/graal/graph/NodeClass.java	Tue Dec 04 10:09:25 2012 +0100
+++ b/graal/com.oracle.graal.graph/src/com/oracle/graal/graph/NodeClass.java	Tue Dec 04 10:10:02 2012 +0100
@@ -216,6 +216,11 @@
         return str.toString();
     }
 
+    /**
+     * Describes an edge slot for a {@link NodeClass}.
+     * @see NodeClass#get(Node, Position)
+     * @see NodeClass#getName(Position)
+     */
     public static final class Position {
         public final boolean input;
         public final int index;
@@ -566,9 +571,9 @@
         }
     }
 
-    public NodeInputsIterable getInputIterable(final Node node) {
+    public NodeClassIterable getInputIterable(final Node node) {
         assert clazz.isInstance(node);
-        return new NodeInputsIterable() {
+        return new NodeClassIterable() {
 
             @Override
             public NodeClassIterator iterator() {
@@ -582,9 +587,9 @@
         };
     }
 
-    public NodeSuccessorsIterable getSuccessorIterable(final Node node) {
+    public NodeClassIterable getSuccessorIterable(final Node node) {
         assert clazz.isInstance(node);
-        return new NodeSuccessorsIterable() {
+        return new NodeClassIterable() {
             @Override
             public NodeClassIterator iterator() {
                 return new NodeClassIterator(node, successorOffsets, directSuccessorCount);
--- /dev/null	Thu Jan 01 00:00:00 1970 +0000
+++ b/graal/com.oracle.graal.graph/src/com/oracle/graal/graph/NodeClassIterable.java	Tue Dec 04 10:10:02 2012 +0100
@@ -0,0 +1,35 @@
+/*
+ * Copyright (c) 2011, Oracle and/or its affiliates. All rights reserved.
+ * DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER.
+ *
+ * This code is free software; you can redistribute it and/or modify it
+ * under the terms of the GNU General Public License version 2 only, as
+ * published by the Free Software Foundation.
+ *
+ * This code is distributed in the hope that it will be useful, but WITHOUT
+ * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
+ * FITNESS FOR A PARTICULAR PURPOSE.  See the GNU General Public License
+ * version 2 for more details (a copy is included in the LICENSE file that
+ * accompanied this code).
+ *
+ * You should have received a copy of the GNU General Public License version
+ * 2 along with this work; if not, write to the Free Software Foundation,
+ * Inc., 51 Franklin St, Fifth Floor, Boston, MA 02110-1301 USA.
+ *
+ * Please contact Oracle, 500 Oracle Parkway, Redwood Shores, CA 94065 USA
+ * or visit www.oracle.com if you need additional information or have any
+ * questions.
+ */
+package com.oracle.graal.graph;
+
+import com.oracle.graal.graph.NodeClass.NodeClassIterator;
+import com.oracle.graal.graph.NodeClass.Position;
+import com.oracle.graal.graph.iterators.*;
+
+/**
+ * The iterator returned by this iterable can be used to access {@link Position Positions} during iteration using {@link NodeClassIterator#nextPosition()}.
+ */
+public abstract class NodeClassIterable extends AbstractNodeIterable<Node> {
+    @Override
+    public abstract NodeClassIterator iterator();
+}
--- a/graal/com.oracle.graal.graph/src/com/oracle/graal/graph/NodeInputsIterable.java	Tue Dec 04 10:09:25 2012 +0100
+++ /dev/null	Thu Jan 01 00:00:00 1970 +0000
@@ -1,31 +0,0 @@
-/*
- * Copyright (c) 2011, Oracle and/or its affiliates. All rights reserved.
- * DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER.
- *
- * This code is free software; you can redistribute it and/or modify it
- * under the terms of the GNU General Public License version 2 only, as
- * published by the Free Software Foundation.
- *
- * This code is distributed in the hope that it will be useful, but WITHOUT
- * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
- * FITNESS FOR A PARTICULAR PURPOSE.  See the GNU General Public License
- * version 2 for more details (a copy is included in the LICENSE file that
- * accompanied this code).
- *
- * You should have received a copy of the GNU General Public License version
- * 2 along with this work; if not, write to the Free Software Foundation,
- * Inc., 51 Franklin St, Fifth Floor, Boston, MA 02110-1301 USA.
- *
- * Please contact Oracle, 500 Oracle Parkway, Redwood Shores, CA 94065 USA
- * or visit www.oracle.com if you need additional information or have any
- * questions.
- */
-package com.oracle.graal.graph;
-
-import com.oracle.graal.graph.NodeClass.NodeClassIterator;
-import com.oracle.graal.graph.iterators.*;
-
-public abstract class NodeInputsIterable extends AbstractNodeIterable<Node> {
-    @Override
-    public abstract NodeClassIterator iterator();
-}
--- a/graal/com.oracle.graal.graph/src/com/oracle/graal/graph/NodeSuccessorsIterable.java	Tue Dec 04 10:09:25 2012 +0100
+++ /dev/null	Thu Jan 01 00:00:00 1970 +0000
@@ -1,31 +0,0 @@
-/*
- * Copyright (c) 2011, Oracle and/or its affiliates. All rights reserved.
- * DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER.
- *
- * This code is free software; you can redistribute it and/or modify it
- * under the terms of the GNU General Public License version 2 only, as
- * published by the Free Software Foundation.
- *
- * This code is distributed in the hope that it will be useful, but WITHOUT
- * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
- * FITNESS FOR A PARTICULAR PURPOSE.  See the GNU General Public License
- * version 2 for more details (a copy is included in the LICENSE file that
- * accompanied this code).
- *
- * You should have received a copy of the GNU General Public License version
- * 2 along with this work; if not, write to the Free Software Foundation,
- * Inc., 51 Franklin St, Fifth Floor, Boston, MA 02110-1301 USA.
- *
- * Please contact Oracle, 500 Oracle Parkway, Redwood Shores, CA 94065 USA
- * or visit www.oracle.com if you need additional information or have any
- * questions.
- */
-package com.oracle.graal.graph;
-
-import com.oracle.graal.graph.NodeClass.NodeClassIterator;
-import com.oracle.graal.graph.iterators.*;
-
-public abstract class NodeSuccessorsIterable extends AbstractNodeIterable<Node> {
-    @Override
-    public abstract NodeClassIterator iterator();
-}
--- a/graal/com.oracle.graal.graph/src/com/oracle/graal/graph/NodeUsagesList.java	Tue Dec 04 10:09:25 2012 +0100
+++ b/graal/com.oracle.graal.graph/src/com/oracle/graal/graph/NodeUsagesList.java	Tue Dec 04 10:10:02 2012 +0100
@@ -44,10 +44,6 @@
         this.nodes = nodes;
     }
 
-    public int size() {
-        return size;
-    }
-
     @Override
     public boolean isEmpty() {
         return size == 0;
@@ -63,63 +59,6 @@
         return size;
     }
 
-    protected void incModCount() {
-        modCount++;
-    }
-
-    public boolean add(Node node) {
-        incModCount();
-        if (size == nodes.length) {
-            nodes = Arrays.copyOf(nodes, nodes.length * 2 + 1);
-        }
-        nodes[size++] = node;
-        return true;
-    }
-
-    void copyAndClear(NodeUsagesList other) {
-        incModCount();
-        other.incModCount();
-        nodes = other.nodes;
-        size = other.size;
-        nodes = EMPTY_NODE_ARRAY;
-        size = 0;
-    }
-
-    public void clear() {
-        incModCount();
-        nodes = EMPTY_NODE_ARRAY;
-        size = 0;
-    }
-
-    boolean remove(Node node) {
-        int i = 0;
-        incModCount();
-        while (i < size && nodes[i] != node) {
-            i++;
-        }
-        if (i < size) {
-            i++;
-            while (i < size) {
-                nodes[i - 1] = nodes[i];
-                i++;
-            }
-            nodes[--size] = null;
-            return true;
-        } else {
-            return false;
-        }
-    }
-
-    boolean replaceFirst(Node node, Node other) {
-        for (int i = 0; i < size; i++) {
-            if (nodes[i] == node) {
-                nodes[i] = other;
-                return true;
-            }
-        }
-        return false;
-    }
-
     @Override
     public Iterator<Node> iterator() {
         return new Iterator<Node>() {
@@ -160,6 +99,63 @@
         return Arrays.asList(Arrays.copyOf(NodeUsagesList.this.nodes, NodeUsagesList.this.size));
     }
 
+    private void incModCount() {
+        modCount++;
+    }
+
+    boolean add(Node node) {
+        incModCount();
+        if (size == nodes.length) {
+            nodes = Arrays.copyOf(nodes, nodes.length * 2 + 1);
+        }
+        nodes[size++] = node;
+        return true;
+    }
+
+    void copyAndClear(NodeUsagesList other) {
+        incModCount();
+        other.incModCount();
+        nodes = other.nodes;
+        size = other.size;
+        nodes = EMPTY_NODE_ARRAY;
+        size = 0;
+    }
+
+    void clear() {
+        incModCount();
+        nodes = EMPTY_NODE_ARRAY;
+        size = 0;
+    }
+
+    boolean remove(Node node) {
+        int i = 0;
+        incModCount();
+        while (i < size && nodes[i] != node) {
+            i++;
+        }
+        if (i < size) {
+            i++;
+            while (i < size) {
+                nodes[i - 1] = nodes[i];
+                i++;
+            }
+            nodes[--size] = null;
+            return true;
+        } else {
+            return false;
+        }
+    }
+
+    boolean replaceFirst(Node node, Node other) {
+        for (int i = 0; i < size; i++) {
+            if (nodes[i] == node) {
+                nodes[i] = other;
+                return true;
+            }
+        }
+        return false;
+    }
+
     @Override
     public String toString() {
         StringBuilder str = new StringBuilder();
--- a/graal/com.oracle.graal.hotspot.amd64/src/com/oracle/graal/hotspot/amd64/AMD64BreakpointOp.java	Tue Dec 04 10:09:25 2012 +0100
+++ /dev/null	Thu Jan 01 00:00:00 1970 +0000
@@ -1,52 +0,0 @@
-/*
- * Copyright (c) 2011, 2012, Oracle and/or its affiliates. All rights reserved.
- * DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER.
- *
- * This code is free software; you can redistribute it and/or modify it
- * under the terms of the GNU General Public License version 2 only, as
- * published by the Free Software Foundation.
- *
- * This code is distributed in the hope that it will be useful, but WITHOUT
- * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
- * FITNESS FOR A PARTICULAR PURPOSE.  See the GNU General Public License
- * version 2 for more details (a copy is included in the LICENSE file that
- * accompanied this code).
- *
- * You should have received a copy of the GNU General Public License version
- * 2 along with this work; if not, write to the Free Software Foundation,
- * Inc., 51 Franklin St, Fifth Floor, Boston, MA 02110-1301 USA.
- *
- * Please contact Oracle, 500 Oracle Parkway, Redwood Shores, CA 94065 USA
- * or visit www.oracle.com if you need additional information or have any
- * questions.
- */
-package com.oracle.graal.hotspot.amd64;
-
-import static com.oracle.graal.lir.LIRInstruction.OperandFlag.*;
-
-import com.oracle.graal.api.meta.*;
-import com.oracle.graal.asm.amd64.*;
-import com.oracle.graal.lir.LIRInstruction.Opcode;
-import com.oracle.graal.lir.amd64.*;
-import com.oracle.graal.lir.asm.*;
-
-/**
- * Emits a breakpoint.
- */
-@Opcode("BREAKPOINT")
-public class AMD64BreakpointOp extends AMD64LIRInstruction {
-
-    /**
-     * A set of values loaded into the Java ABI parameter locations (for inspection by a debugger).
-     */
-    @Use({REG, STACK}) protected Value[] parameters;
-
-    public AMD64BreakpointOp(Value[] parameters) {
-        this.parameters = parameters;
-    }
-
-    @Override
-    public void emitCode(TargetMethodAssembler tasm, AMD64MacroAssembler asm) {
-        asm.int3();
-    }
-}
--- a/graal/com.oracle.graal.hotspot.amd64/src/com/oracle/graal/hotspot/amd64/AMD64HotSpotBackend.java	Tue Dec 04 10:09:25 2012 +0100
+++ b/graal/com.oracle.graal.hotspot.amd64/src/com/oracle/graal/hotspot/amd64/AMD64HotSpotBackend.java	Tue Dec 04 10:10:02 2012 +0100
@@ -88,19 +88,6 @@
         }
 
         @Override
-        public void visitBreakpointNode(BreakpointNode i) {
-            Kind[] sig = new Kind[i.arguments.size()];
-            int pos = 0;
-            for (ValueNode arg : i.arguments) {
-                sig[pos++] = arg.kind();
-            }
-
-            CallingConvention cc = frameMap.registerConfig.getCallingConvention(CallingConvention.Type.JavaCall, Kind.Void, sig, target(), false);
-            Value[] parameters = visitInvokeArguments(cc, i.arguments);
-            append(new AMD64BreakpointOp(parameters));
-        }
-
-        @Override
         public void visitExceptionObject(ExceptionObjectNode x) {
             HotSpotVMConfig config = runtime().config;
             RegisterValue thread = runtime().threadRegister().asValue();
--- a/graal/com.oracle.graal.hotspot.amd64/src/com/oracle/graal/hotspot/amd64/AMD64HotSpotRuntime.java	Tue Dec 04 10:09:25 2012 +0100
+++ b/graal/com.oracle.graal.hotspot.amd64/src/com/oracle/graal/hotspot/amd64/AMD64HotSpotRuntime.java	Tue Dec 04 10:10:02 2012 +0100
@@ -72,13 +72,13 @@
                 /* arg0:         a */ arg(0, Kind.Double),
                 /* arg1:         b */ arg(1, Kind.Double));
 
-        addRuntimeCall(MONITORENTER, config.fastMonitorEnterStub,
+        addRuntimeCall(MONITORENTER, config.monitorEnterStub,
                 /*        temps */ new Register[] {rax, rbx},
                 /*          ret */ ret(Kind.Void),
                 /* arg0: object */ arg(0, Kind.Object),
                 /* arg1:   lock */ arg(1, word));
 
-        addRuntimeCall(MONITOREXIT, c.fastMonitorExitStub,
+        addRuntimeCall(MONITOREXIT, c.monitorExitStub,
                 /*        temps */ new Register[] {rax, rbx},
                 /*          ret */ ret(Kind.Void),
                 /* arg0: object */ arg(0, Kind.Object),
--- a/graal/com.oracle.graal.hotspot/src/com/oracle/graal/hotspot/HotSpotVMConfig.java	Tue Dec 04 10:09:25 2012 +0100
+++ b/graal/com.oracle.graal.hotspot/src/com/oracle/graal/hotspot/HotSpotVMConfig.java	Tue Dec 04 10:10:02 2012 +0100
@@ -24,7 +24,7 @@
 
 
 /**
- * Used to communicate configuration details, runtime offsets, etc. to graal upon compileMethod.
+ * Used to communicate configuration details, runtime offsets, etc. to Graal upon compileMethod.
  */
 public final class HotSpotVMConfig extends CompilerObject {
 
@@ -245,8 +245,8 @@
     public long inlineCacheMissStub;
     public long handleExceptionStub;
     public long handleDeoptStub;
-    public long fastMonitorEnterStub;
-    public long fastMonitorExitStub;
+    public long monitorEnterStub;
+    public long monitorExitStub;
     public long verifyOopStub;
     public long vmErrorStub;
     public long deoptimizeStub;
--- a/graal/com.oracle.graal.hotspot/src/com/oracle/graal/hotspot/snippets/ArrayCopySnippets.java	Tue Dec 04 10:09:25 2012 +0100
+++ b/graal/com.oracle.graal.hotspot/src/com/oracle/graal/hotspot/snippets/ArrayCopySnippets.java	Tue Dec 04 10:10:02 2012 +0100
@@ -28,6 +28,7 @@
 import com.oracle.graal.hotspot.nodes.*;
 import com.oracle.graal.nodes.*;
 import com.oracle.graal.nodes.extended.*;
+import com.oracle.graal.nodes.java.*;
 import com.oracle.graal.nodes.spi.*;
 import com.oracle.graal.nodes.type.*;
 import com.oracle.graal.snippets.*;
@@ -43,6 +44,7 @@
 
     @Snippet
     public static void vectorizedCopy(Object src, int srcPos, Object dest, int destPos, int length, @ConstantParameter("baseKind") Kind baseKind) {
+        checkInputs(src, srcPos, dest, destPos, length);
         int header = arrayBaseOffset(baseKind);
         int elementSize = arrayIndexScale(baseKind);
         long byteLength = (long) length * elementSize;
@@ -70,68 +72,43 @@
     }
 
     @Snippet
-    public static void arraycopy(byte[] src, int srcPos, byte[] dest, int destPos, int length) {
+    public static void checkInputs(Object src, int srcPos, Object dest, int destPos, int length) {
         if (src == null || dest == null) {
             throw new NullPointerException();
         }
-        if (srcPos < 0 || destPos < 0 || length < 0 || srcPos + length > src.length || destPos + length > dest.length) {
+        if (srcPos < 0 || destPos < 0 || length < 0 || srcPos + length > ArrayLengthNode.arrayLength(src) || destPos + length > ArrayLengthNode.arrayLength(dest)) {
             throw new IndexOutOfBoundsException();
         }
+    }
+
+    @Snippet
+    public static void arraycopy(byte[] src, int srcPos, byte[] dest, int destPos, int length) {
         vectorizedCopy(src, srcPos, dest, destPos, length, Kind.Byte);
     }
 
     @Snippet
     public static void arraycopy(char[] src, int srcPos, char[] dest, int destPos, int length) {
-        if (src == null || dest == null) {
-            throw new NullPointerException();
-        }
-        if (srcPos < 0 || destPos < 0 || length < 0 || srcPos + length > src.length || destPos + length > dest.length) {
-            throw new IndexOutOfBoundsException();
-        }
         vectorizedCopy(src, srcPos, dest, destPos, length, Kind.Char);
     }
 
     @Snippet
     public static void arraycopy(short[] src, int srcPos, short[] dest, int destPos, int length) {
-        if (src == null || dest == null) {
-            throw new NullPointerException();
-        }
-        if (srcPos < 0 || destPos < 0 || length < 0 || srcPos + length > src.length || destPos + length > dest.length) {
-            throw new IndexOutOfBoundsException();
-        }
         vectorizedCopy(src, srcPos, dest, destPos, length, Kind.Short);
     }
 
     @Snippet
     public static void arraycopy(int[] src, int srcPos, int[] dest, int destPos, int length) {
-        if (src == null || dest == null) {
-            throw new NullPointerException();
-        }
-        if (srcPos < 0 || destPos < 0 || length < 0 || srcPos + length > src.length || destPos + length > dest.length) {
-            throw new IndexOutOfBoundsException();
-        }
         vectorizedCopy(src, srcPos, dest, destPos, length, Kind.Int);
     }
 
     @Snippet
     public static void arraycopy(float[] src, int srcPos, float[] dest, int destPos, int length) {
-        if (src == null || dest == null) {
-            throw new NullPointerException();
-        }
-        if (srcPos < 0 || destPos < 0 || length < 0 || srcPos + length > src.length || destPos + length > dest.length) {
-            throw new IndexOutOfBoundsException();
-        }
         vectorizedCopy(src, srcPos, dest, destPos, length, Kind.Float);
     }
 
     @Snippet
     public static void arraycopy(long[] src, int srcPos, long[] dest, int destPos, int length) {
-        if (src == null || dest == null) {
-            throw new NullPointerException();
-        }
-        if (srcPos < 0 || destPos < 0 || length < 0 || srcPos + length > src.length || destPos + length > dest.length) {
-            throw new IndexOutOfBoundsException();
-        }
+        checkInputs(src, srcPos, dest, destPos, length);
         Kind baseKind = Kind.Long;
         int header = arrayBaseOffset(baseKind);
         long byteLength = (long) length * arrayIndexScale(baseKind);
@@ -152,12 +129,7 @@
 
     @Snippet
     public static void arraycopy(double[] src, int srcPos, double[] dest, int destPos, int length) {
-        if (src == null || dest == null) {
-            throw new NullPointerException();
-        }
-        if (srcPos < 0 || destPos < 0 || length < 0 || srcPos + length > src.length || destPos + length > dest.length) {
-            throw new IndexOutOfBoundsException();
-        }
+        checkInputs(src, srcPos, dest, destPos, length);
         Kind baseKind = Kind.Double;
         int header = arrayBaseOffset(baseKind);
         long byteLength = (long) length * arrayIndexScale(baseKind);
@@ -179,12 +151,7 @@
     // Does NOT perform store checks
     @Snippet
     public static void arraycopy(Object[] src, int srcPos, Object[] dest, int destPos, int length) {
-        if (src == null || dest == null) {
-            throw new NullPointerException();
-        }
-        if (srcPos < 0 || destPos < 0 || length < 0 || srcPos + length > src.length || destPos + length > dest.length) {
-            throw new IndexOutOfBoundsException();
-        }
+        checkInputs(src, srcPos, dest, destPos, length);
         final int scale = arrayIndexScale(Kind.Object);
         int header = arrayBaseOffset(Kind.Object);
         if (src == dest && srcPos < destPos) { // bad aliased case
--- a/graal/com.oracle.graal.hotspot/src/com/oracle/graal/hotspot/snippets/NewObjectSnippets.java	Tue Dec 04 10:09:25 2012 +0100
+++ b/graal/com.oracle.graal.hotspot/src/com/oracle/graal/hotspot/snippets/NewObjectSnippets.java	Tue Dec 04 10:10:02 2012 +0100
@@ -61,9 +61,9 @@
         Word thread = thread();
         Word top = loadWordFromWord(thread, threadTlabTopOffset());
         Word end = loadWordFromWord(thread, threadTlabEndOffset());
-        Word available = end.minus(top);
-        if (available.aboveOrEqual(Word.fromInt(size))) {
-            Word newTop = top.plus(size);
+        Word newTop = top.plus(size);
+        // this check might lead to problems if the TLAB is within 16GB of the address space end (checked in c++ code)
+        if (newTop.belowOrEqual(end)) {
             storeObject(thread, 0, threadTlabTopOffset(), newTop);
             return top;
         }
--- a/graal/com.oracle.graal.interpreter/src/com/oracle/graal/interpreter/BytecodeInterpreter.java	Tue Dec 04 10:09:25 2012 +0100
+++ b/graal/com.oracle.graal.interpreter/src/com/oracle/graal/interpreter/BytecodeInterpreter.java	Tue Dec 04 10:10:02 2012 +0100
@@ -65,11 +65,11 @@
         GraalRuntime runtime = Graal.getRuntime();
         this.runtimeInterface = runtime.getCapability(RuntimeInterpreterInterface.class);
         if (this.runtimeInterface == null) {
-            throw new UnsupportedOperationException("The provided graal runtime does not support the required capability " + RuntimeInterpreterInterface.class.getName() + ".");
+            throw new UnsupportedOperationException("The provided Graal runtime does not support the required capability " + RuntimeInterpreterInterface.class.getName() + ".");
         }
         this.metaAccessProvider = runtime.getCapability(MetaAccessProvider.class);
         if (this.metaAccessProvider == null) {
-            throw new UnsupportedOperationException("The provided graal runtime does not support the required capability " + MetaAccessProvider.class.getName() + ".");
+            throw new UnsupportedOperationException("The provided Graal runtime does not support the required capability " + MetaAccessProvider.class.getName() + ".");
         }
 
         this.rootMethod = resolveRootMethod();
--- a/graal/com.oracle.graal.java/src/com/oracle/graal/java/GraphBuilderPhase.java	Tue Dec 04 10:09:25 2012 +0100
+++ b/graal/com.oracle.graal.java/src/com/oracle/graal/java/GraphBuilderPhase.java	Tue Dec 04 10:10:02 2012 +0100
@@ -202,7 +202,7 @@
 
         // remove dead FrameStates
         for (Node n : currentGraph.getNodes(FrameState.class)) {
-            if (n.usages().size() == 0 && n.predecessor() == null) {
+            if (n.usages().count() == 0 && n.predecessor() == null) {
                 n.safeDelete();
             }
         }
--- a/graal/com.oracle.graal.jtt/src/com/oracle/graal/jtt/jdk/IntegerBits.java	Tue Dec 04 10:09:25 2012 +0100
+++ b/graal/com.oracle.graal.jtt/src/com/oracle/graal/jtt/jdk/IntegerBits.java	Tue Dec 04 10:10:02 2012 +0100
@@ -28,20 +28,20 @@
 public class IntegerBits {
     @SuppressWarnings("unused")
     private static int init = Integer.reverseBytes(42);
-    private int original = 0x01020304;
-    private int reversed = 0x04030201;
-    private int v = 0b1000;
-    private int zero = 0;
+    private static int original = 0x01020304;
+    private static int reversed = 0x04030201;
+    private static int v = 0b1000;
+    private static int zero = 0;
 
-    public int test(int o) {
+    public static int test(int o) {
         return Integer.reverseBytes(o);
     }
 
-    public int test2(int o) {
+    public static int test2(int o) {
         return Integer.numberOfLeadingZeros(o);
     }
 
-    public int test3(int o) {
+    public static int test3(int o) {
         return Integer.numberOfTrailingZeros(o);
     }
 
--- a/graal/com.oracle.graal.jtt/src/com/oracle/graal/jtt/jdk/LongBits.java	Tue Dec 04 10:09:25 2012 +0100
+++ b/graal/com.oracle.graal.jtt/src/com/oracle/graal/jtt/jdk/LongBits.java	Tue Dec 04 10:10:02 2012 +0100
@@ -28,21 +28,21 @@
 public class LongBits {
     @SuppressWarnings("unused")
     private static long init = Long.reverseBytes(42);
-    private long original = 0x0102030405060708L;
-    private long reversed = 0x0807060504030201L;
-    private long v = 0b1000L;
-    private long v2 = 0x0100000000L;
-    private long zero = 0L;
+    private static long original = 0x0102030405060708L;
+    private static long reversed = 0x0807060504030201L;
+    private static long v = 0b1000L;
+    private static long v2 = 0x0100000000L;
+    private static long zero = 0L;
 
-    public long test(long o) {
+    public static long test(long o) {
         return Long.reverseBytes(o);
     }
 
-    public int test2(long o) {
+    public static int test2(long o) {
         return Long.numberOfLeadingZeros(o);
     }
 
-    public int test3(long o) {
+    public static int test3(long o) {
         return Long.numberOfTrailingZeros(o);
     }
 
--- a/graal/com.oracle.graal.jtt/src/com/oracle/graal/jtt/loop/LoopLastIndexOf.java	Tue Dec 04 10:09:25 2012 +0100
+++ b/graal/com.oracle.graal.jtt/src/com/oracle/graal/jtt/loop/LoopLastIndexOf.java	Tue Dec 04 10:10:02 2012 +0100
@@ -29,10 +29,10 @@
  */
 public class LoopLastIndexOf {
 
-    private final char[] v1 = new char[]{'a', 'b', 'c', 'd', 'a', 'b', 'c', 'd', 'a', 'b', 'c', 'd'};
-    private final char[] v2 = new char[]{'d', 'a'};
-    private final char[] v3 = new char[]{'d', 'b', 'c'};
-    private final char[] v4 = new char[]{'z', 'a', 'b', 'c'};
+    private static final char[] v1 = new char[]{'a', 'b', 'c', 'd', 'a', 'b', 'c', 'd', 'a', 'b', 'c', 'd'};
+    private static final char[] v2 = new char[]{'d', 'a'};
+    private static final char[] v3 = new char[]{'d', 'b', 'c'};
+    private static final char[] v4 = new char[]{'z', 'a', 'b', 'c'};
 
     public static int test(char[] source, int sourceOffset, int sourceCount, char[] target, int targetOffset, int targetCount, int fromIndexParam) {
         int rightIndex = sourceCount - targetCount;
--- /dev/null	Thu Jan 01 00:00:00 1970 +0000
+++ b/graal/com.oracle.graal.lir.amd64/src/com/oracle/graal/lir/amd64/AMD64BreakpointOp.java	Tue Dec 04 10:10:02 2012 +0100
@@ -0,0 +1,51 @@
+/*
+ * Copyright (c) 2011, 2012, Oracle and/or its affiliates. All rights reserved.
+ * DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER.
+ *
+ * This code is free software; you can redistribute it and/or modify it
+ * under the terms of the GNU General Public License version 2 only, as
+ * published by the Free Software Foundation.
+ *
+ * This code is distributed in the hope that it will be useful, but WITHOUT
+ * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
+ * FITNESS FOR A PARTICULAR PURPOSE.  See the GNU General Public License
+ * version 2 for more details (a copy is included in the LICENSE file that
+ * accompanied this code).
+ *
+ * You should have received a copy of the GNU General Public License version
+ * 2 along with this work; if not, write to the Free Software Foundation,
+ * Inc., 51 Franklin St, Fifth Floor, Boston, MA 02110-1301 USA.
+ *
+ * Please contact Oracle, 500 Oracle Parkway, Redwood Shores, CA 94065 USA
+ * or visit www.oracle.com if you need additional information or have any
+ * questions.
+ */
+package com.oracle.graal.lir.amd64;
+
+import static com.oracle.graal.lir.LIRInstruction.OperandFlag.*;
+
+import com.oracle.graal.api.meta.*;
+import com.oracle.graal.asm.amd64.*;
+import com.oracle.graal.lir.LIRInstruction.Opcode;
+import com.oracle.graal.lir.asm.*;
+
+/**
+ * Emits a breakpoint.
+ */
+@Opcode("BREAKPOINT")
+public class AMD64BreakpointOp extends AMD64LIRInstruction {
+
+    /**
+     * A set of values loaded into the Java ABI parameter locations (for inspection by a debugger).
+     */
+    @Use({REG, STACK}) protected Value[] parameters;
+
+    public AMD64BreakpointOp(Value[] parameters) {
+        this.parameters = parameters;
+    }
+
+    @Override
+    public void emitCode(TargetMethodAssembler tasm, AMD64MacroAssembler asm) {
+        asm.int3();
+    }
+}
--- a/graal/com.oracle.graal.nodes/src/com/oracle/graal/nodes/EndNode.java	Tue Dec 04 10:09:25 2012 +0100
+++ b/graal/com.oracle.graal.nodes/src/com/oracle/graal/nodes/EndNode.java	Tue Dec 04 10:10:02 2012 +0100
@@ -45,7 +45,7 @@
 
     @Override
     public boolean verify() {
-        assertTrue(usages().size() <= 1, "at most one usage");
+        assertTrue(usages().count() <= 1, "at most one usage");
         return super.verify();
     }
 
--- a/graal/com.oracle.graal.nodes/src/com/oracle/graal/nodes/IfNode.java	Tue Dec 04 10:09:25 2012 +0100
+++ b/graal/com.oracle.graal.nodes/src/com/oracle/graal/nodes/IfNode.java	Tue Dec 04 10:10:02 2012 +0100
@@ -26,6 +26,7 @@
 
 import com.oracle.graal.api.meta.*;
 import com.oracle.graal.graph.*;
+import com.oracle.graal.graph.iterators.*;
 import com.oracle.graal.nodes.PhiNode.PhiType;
 import com.oracle.graal.nodes.calc.*;
 import com.oracle.graal.nodes.spi.*;
@@ -238,7 +239,7 @@
         }
 
         // Only consider merges with a single usage that is both a phi and an operand of the comparison
-        NodeUsagesList mergeUsages = merge.usages();
+        NodeIterable<Node> mergeUsages = merge.usages();
         if (mergeUsages.count() != 1) {
             return false;
         }
@@ -249,7 +250,7 @@
 
         // Ensure phi is used by at most the comparison and the merge's frame state (if any)
         PhiNode phi = (PhiNode) singleUsage;
-        NodeUsagesList phiUsages = phi.usages();
+        NodeIterable<Node> phiUsages = phi.usages();
         if (phiUsages.count() > 2) {
             return false;
         }
--- a/graal/com.oracle.graal.nodes/src/com/oracle/graal/nodes/java/MethodCallTargetNode.java	Tue Dec 04 10:09:25 2012 +0100
+++ b/graal/com.oracle.graal.nodes/src/com/oracle/graal/nodes/java/MethodCallTargetNode.java	Tue Dec 04 10:10:02 2012 +0100
@@ -98,7 +98,7 @@
 
     @Override
     public boolean verify() {
-        assert usages().size() <= 1 : "call target may only be used by a single invoke";
+        assert usages().count() <= 1 : "call target may only be used by a single invoke";
         for (Node n : usages()) {
             assertTrue(n instanceof Invoke, "call target can only be used from an invoke (%s)", n);
         }
--- a/graal/com.oracle.graal.printer/src/com/oracle/graal/printer/BinaryGraphPrinter.java	Tue Dec 04 10:09:25 2012 +0100
+++ b/graal/com.oracle.graal.printer/src/com/oracle/graal/printer/BinaryGraphPrinter.java	Tue Dec 04 10:10:02 2012 +0100
@@ -382,7 +382,7 @@
                 writePropertyObject(entry.getValue());
             }
             // successors
-            NodeSuccessorsIterable successors = node.successors();
+            NodeClassIterable successors = node.successors();
             writeShort((char) successors.count());
             NodeClassIterator suxIt = successors.iterator();
             while (suxIt.hasNext()) {
@@ -392,7 +392,7 @@
                 writeShort((char) pos.index);
             }
             //inputs
-            NodeInputsIterable inputs = node.inputs();
+            NodeClassIterable inputs = node.inputs();
             writeShort((char) inputs.count());
             NodeClassIterator inIt = inputs.iterator();
             while (inIt.hasNext()) {
--- a/graal/com.oracle.graal.snippets/src/com/oracle/graal/snippets/SnippetIntrinsificationPhase.java	Tue Dec 04 10:09:25 2012 +0100
+++ b/graal/com.oracle.graal.snippets/src/com/oracle/graal/snippets/SnippetIntrinsificationPhase.java	Tue Dec 04 10:10:02 2012 +0100
@@ -206,13 +206,13 @@
                     Type boundType = typeVariable.getBounds()[0];
                     if (boundType instanceof Class && ((Class) boundType).getSuperclass() == null) {
                         // Unbound generic => try boxing elimination
-                        if (node.usages().size() == 2) {
+                        if (node.usages().count() == 2) {
                             if (node instanceof Invoke) {
                                 Invoke invokeNode = (Invoke) node;
                                 MethodCallTargetNode callTarget = invokeNode.methodCallTarget();
                                 if (pool.isBoxingMethod(callTarget.targetMethod())) {
                                     FrameState stateAfter = invokeNode.stateAfter();
-                                    assert stateAfter.usages().size() == 1;
+                                    assert stateAfter.usages().count() == 1;
                                     invokeNode.node().replaceAtUsages(null);
                                     ValueNode result = callTarget.arguments().get(0);
                                     StructuredGraph graph = (StructuredGraph) node.graph();
--- a/graal/com.oracle.graal.virtual/src/com/oracle/graal/virtual/phases/ea/PartialEscapeClosure.java	Tue Dec 04 10:09:25 2012 +0100
+++ b/graal/com.oracle.graal.virtual/src/com/oracle/graal/virtual/phases/ea/PartialEscapeClosure.java	Tue Dec 04 10:10:02 2012 +0100
@@ -151,7 +151,7 @@
             StateSplit split = (StateSplit) node;
             FrameState stateAfter = split.stateAfter();
             if (stateAfter != null) {
-                if (stateAfter.usages().size() > 1) {
+                if (stateAfter.usages().count() > 1) {
                     stateAfter = (FrameState) stateAfter.copyWithInputs();
                     split.setStateAfter(stateAfter);
                 }
--- a/make/bsd/makefiles/graal.make	Tue Dec 04 10:09:25 2012 +0100
+++ b/make/bsd/makefiles/graal.make	Tue Dec 04 10:10:02 2012 +0100
@@ -29,4 +29,4 @@
 
 VM_SUBDIR = graal
 
-CFLAGS += -DGRAAL -DCOMPILER1
+CFLAGS += -DGRAAL
--- a/make/bsd/makefiles/vm.make	Tue Dec 04 10:09:25 2012 +0100
+++ b/make/bsd/makefiles/vm.make	Tue Dec 04 10:10:02 2012 +0100
@@ -175,8 +175,6 @@
 
 SHARK_PATHS := $(GAMMADIR)/src/share/vm/shark
 
-GRAAL_PATHS := $(call altsrc,$(HS_COMMON_SRC)/share/vm/c1)
-GRAAL_PATHS += $(HS_COMMON_SRC)/share/vm/c1
 GRAAL_PATHS += $(call altsrc,$(HS_COMMON_SRC)/share/vm/graal)
 GRAAL_PATHS += $(HS_COMMON_SRC)/share/vm/graal
 
@@ -194,19 +192,19 @@
 COMPILER1_SPECIFIC_FILES := c1_\*
 SHARK_SPECIFIC_FILES     := shark
 ZERO_SPECIFIC_FILES      := zero
-GRAAL_SPECIFIC_FILES     := graal
+GRAAL_SPECIFIC_FILES     := graal\*
 
 # Always exclude these.
 Src_Files_EXCLUDE += jsig.c jvmtiEnvRecommended.cpp jvmtiEnvStub.cpp
 
 # Exclude per type.
-Src_Files_EXCLUDE/CORE      := $(COMPILER1_SPECIFIC_FILES) $(COMPILER2_SPECIFIC_FILES) $(ZERO_SPECIFIC_FILES) $(SHARK_SPECIFIC_FILES) ciTypeFlow.cpp
-Src_Files_EXCLUDE/COMPILER1 := $(COMPILER2_SPECIFIC_FILES) $(ZERO_SPECIFIC_FILES) $(SHARK_SPECIFIC_FILES) ciTypeFlow.cpp
-Src_Files_EXCLUDE/COMPILER2 := $(COMPILER1_SPECIFIC_FILES) $(ZERO_SPECIFIC_FILES) $(SHARK_SPECIFIC_FILES)
-Src_Files_EXCLUDE/TIERED    := $(ZERO_SPECIFIC_FILES) $(SHARK_SPECIFIC_FILES)
-Src_Files_EXCLUDE/ZERO      := $(COMPILER1_SPECIFIC_FILES) $(COMPILER2_SPECIFIC_FILES) $(SHARK_SPECIFIC_FILES) ciTypeFlow.cpp
-Src_Files_EXCLUDE/SHARK     := $(COMPILER1_SPECIFIC_FILES) $(COMPILER2_SPECIFIC_FILES) $(ZERO_SPECIFIC_FILES)
-Src_Files_EXCLUDE/GRAAL     := $(COMPILER2_SPECIFIC_FILES) $(ZERO_SPECIFIC_FILES) $(SHARK_SPECIFIC_FILES) ciTypeFlow.cpp
+Src_Files_EXCLUDE/CORE      := $(COMPILER1_SPECIFIC_FILES) $(COMPILER2_SPECIFIC_FILES) $(ZERO_SPECIFIC_FILES) $(SHARK_SPECIFIC_FILES) $(GRAAL_SPECIFIC_FILES) ciTypeFlow.cpp
+Src_Files_EXCLUDE/COMPILER1 := $(COMPILER2_SPECIFIC_FILES) $(ZERO_SPECIFIC_FILES) $(SHARK_SPECIFIC_FILES) $(GRAAL_SPECIFIC_FILES) ciTypeFlow.cpp
+Src_Files_EXCLUDE/COMPILER2 := $(COMPILER1_SPECIFIC_FILES) $(ZERO_SPECIFIC_FILES) $(SHARK_SPECIFIC_FILES) $(GRAAL_SPECIFIC_FILES)
+Src_Files_EXCLUDE/TIERED    := $(ZERO_SPECIFIC_FILES) $(SHARK_SPECIFIC_FILES) $(GRAAL_SPECIFIC_FILES)
+Src_Files_EXCLUDE/ZERO      := $(COMPILER1_SPECIFIC_FILES) $(COMPILER2_SPECIFIC_FILES) $(SHARK_SPECIFIC_FILES) $(GRAAL_SPECIFIC_FILES) ciTypeFlow.cpp
+Src_Files_EXCLUDE/SHARK     := $(COMPILER1_SPECIFIC_FILES) $(COMPILER2_SPECIFIC_FILES) $(ZERO_SPECIFIC_FILES) $(GRAAL_SPECIFIC_FILES)
+Src_Files_EXCLUDE/GRAAL     := $(COMPILER1_SPECIFIC_FILES) $(COMPILER2_SPECIFIC_FILES) $(ZERO_SPECIFIC_FILES) $(SHARK_SPECIFIC_FILES) ciTypeFlow.cpp
 
 Src_Files_EXCLUDE +=  $(Src_Files_EXCLUDE/$(TYPE))
 
--- a/make/linux/makefiles/graal.make	Tue Dec 04 10:09:25 2012 +0100
+++ b/make/linux/makefiles/graal.make	Tue Dec 04 10:10:02 2012 +0100
@@ -29,4 +29,4 @@
 
 VM_SUBDIR = graal
 
-CFLAGS += -DGRAAL -DCOMPILER1
+CFLAGS += -DGRAAL
--- a/make/linux/makefiles/vm.make	Tue Dec 04 10:09:25 2012 +0100
+++ b/make/linux/makefiles/vm.make	Tue Dec 04 10:10:02 2012 +0100
@@ -177,8 +177,6 @@
 
 SHARK_PATHS := $(GAMMADIR)/src/share/vm/shark
 
-GRAAL_PATHS := $(call altsrc,$(HS_COMMON_SRC)/share/vm/c1)
-GRAAL_PATHS += $(HS_COMMON_SRC)/share/vm/c1
 GRAAL_PATHS += $(call altsrc,$(HS_COMMON_SRC)/share/vm/graal)
 GRAAL_PATHS += $(HS_COMMON_SRC)/share/vm/graal
 
@@ -196,19 +194,19 @@
 COMPILER1_SPECIFIC_FILES := c1_\*
 SHARK_SPECIFIC_FILES     := shark
 ZERO_SPECIFIC_FILES      := zero
-GRAAL_SPECIFIC_FILES     := graal
+GRAAL_SPECIFIC_FILES     := graal\*
 
 # Always exclude these.
 Src_Files_EXCLUDE += jsig.c jvmtiEnvRecommended.cpp jvmtiEnvStub.cpp
 
 # Exclude per type.
-Src_Files_EXCLUDE/CORE      := $(COMPILER1_SPECIFIC_FILES) $(COMPILER2_SPECIFIC_FILES) $(ZERO_SPECIFIC_FILES) $(SHARK_SPECIFIC_FILES) ciTypeFlow.cpp
-Src_Files_EXCLUDE/COMPILER1 := $(COMPILER2_SPECIFIC_FILES) $(ZERO_SPECIFIC_FILES) $(SHARK_SPECIFIC_FILES) ciTypeFlow.cpp
-Src_Files_EXCLUDE/COMPILER2 := $(COMPILER1_SPECIFIC_FILES) $(ZERO_SPECIFIC_FILES) $(SHARK_SPECIFIC_FILES)
-Src_Files_EXCLUDE/TIERED    := $(ZERO_SPECIFIC_FILES) $(SHARK_SPECIFIC_FILES)
-Src_Files_EXCLUDE/ZERO      := $(COMPILER1_SPECIFIC_FILES) $(COMPILER2_SPECIFIC_FILES) $(SHARK_SPECIFIC_FILES) ciTypeFlow.cpp
-Src_Files_EXCLUDE/SHARK     := $(COMPILER1_SPECIFIC_FILES) $(COMPILER2_SPECIFIC_FILES) $(ZERO_SPECIFIC_FILES)
-Src_Files_EXCLUDE/GRAAL     := $(COMPILER2_SPECIFIC_FILES) $(ZERO_SPECIFIC_FILES) $(SHARK_SPECIFIC_FILES) ciTypeFlow.cpp
+Src_Files_EXCLUDE/CORE      := $(COMPILER1_SPECIFIC_FILES) $(COMPILER2_SPECIFIC_FILES) $(ZERO_SPECIFIC_FILES) $(SHARK_SPECIFIC_FILES) $(GRAAL_SPECIFIC_FILES) ciTypeFlow.cpp
+Src_Files_EXCLUDE/COMPILER1 := $(COMPILER2_SPECIFIC_FILES) $(ZERO_SPECIFIC_FILES) $(SHARK_SPECIFIC_FILES) $(GRAAL_SPECIFIC_FILES) ciTypeFlow.cpp
+Src_Files_EXCLUDE/COMPILER2 := $(COMPILER1_SPECIFIC_FILES) $(ZERO_SPECIFIC_FILES) $(SHARK_SPECIFIC_FILES) $(GRAAL_SPECIFIC_FILES)
+Src_Files_EXCLUDE/TIERED    := $(ZERO_SPECIFIC_FILES) $(SHARK_SPECIFIC_FILES) $(GRAAL_SPECIFIC_FILES)
+Src_Files_EXCLUDE/ZERO      := $(COMPILER1_SPECIFIC_FILES) $(COMPILER2_SPECIFIC_FILES) $(SHARK_SPECIFIC_FILES) $(GRAAL_SPECIFIC_FILES) ciTypeFlow.cpp
+Src_Files_EXCLUDE/SHARK     := $(COMPILER1_SPECIFIC_FILES) $(COMPILER2_SPECIFIC_FILES) $(ZERO_SPECIFIC_FILES) $(GRAAL_SPECIFIC_FILES)
+Src_Files_EXCLUDE/GRAAL     := $(COMPILER1_SPECIFIC_FILES) $(COMPILER2_SPECIFIC_FILES) $(ZERO_SPECIFIC_FILES) $(SHARK_SPECIFIC_FILES) ciTypeFlow.cpp
 
 Src_Files_EXCLUDE +=  $(Src_Files_EXCLUDE/$(TYPE))
 
--- a/make/solaris/makefiles/graal.make	Tue Dec 04 10:09:25 2012 +0100
+++ b/make/solaris/makefiles/graal.make	Tue Dec 04 10:10:02 2012 +0100
@@ -29,4 +29,4 @@
 
 VM_SUBDIR = graal
 
-CFLAGS += -DGRAAL -DCOMPILER1
+CFLAGS += -DGRAAL
--- a/make/solaris/makefiles/vm.make	Tue Dec 04 10:09:25 2012 +0100
+++ b/make/solaris/makefiles/vm.make	Tue Dec 04 10:10:02 2012 +0100
@@ -190,8 +190,6 @@
 COMPILER2_PATHS += $(HS_COMMON_SRC)/share/vm/libadt
 COMPILER2_PATHS +=  $(GENERATED)/adfiles
 
-GRAAL_PATHS := $(call altsrc,$(HS_COMMON_SRC)/share/vm/c1)
-GRAAL_PATHS += $(HS_COMMON_SRC)/share/vm/c1
 GRAAL_PATHS += $(call altsrc,$(HS_COMMON_SRC)/share/vm/graal)
 GRAAL_PATHS += $(HS_COMMON_SRC)/share/vm/graal
 
@@ -215,13 +213,13 @@
 Src_Files_EXCLUDE := dtrace jsig.c jvmtiEnvRecommended.cpp jvmtiEnvStub.cpp
 
 # Exclude per type.
-Src_Files_EXCLUDE/CORE      := $(COMPILER1_SPECIFIC_FILES) $(COMPILER2_SPECIFIC_FILES) $(ZERO_SPECIFIC_FILES) $(SHARK_SPECIFIC_FILES) ciTypeFlow.cpp
-Src_Files_EXCLUDE/COMPILER1 := $(COMPILER2_SPECIFIC_FILES) $(ZERO_SPECIFIC_FILES) $(SHARK_SPECIFIC_FILES) ciTypeFlow.cpp
-Src_Files_EXCLUDE/COMPILER2 := $(COMPILER1_SPECIFIC_FILES) $(ZERO_SPECIFIC_FILES) $(SHARK_SPECIFIC_FILES)
-Src_Files_EXCLUDE/TIERED    := $(ZERO_SPECIFIC_FILES) $(SHARK_SPECIFIC_FILES)
-Src_Files_EXCLUDE/ZERO      := $(COMPILER1_SPECIFIC_FILES) $(COMPILER2_SPECIFIC_FILES) $(SHARK_SPECIFIC_FILES) ciTypeFlow.cpp
-Src_Files_EXCLUDE/SHARK     := $(COMPILER1_SPECIFIC_FILES) $(COMPILER2_SPECIFIC_FILES) $(ZERO_SPECIFIC_FILES)
-Src_Files_EXCLUDE/GRAAL     := $(COMPILER2_SPECIFIC_FILES) $(ZERO_SPECIFIC_FILES) $(SHARK_SPECIFIC_FILES) ciTypeFlow.cpp
+Src_Files_EXCLUDE/CORE      := $(COMPILER1_SPECIFIC_FILES) $(COMPILER2_SPECIFIC_FILES) $(ZERO_SPECIFIC_FILES) $(SHARK_SPECIFIC_FILES) $(GRAAL_SPECIFIC_FILES) ciTypeFlow.cpp
+Src_Files_EXCLUDE/COMPILER1 := $(COMPILER2_SPECIFIC_FILES) $(ZERO_SPECIFIC_FILES) $(SHARK_SPECIFIC_FILES) $(GRAAL_SPECIFIC_FILES) ciTypeFlow.cpp
+Src_Files_EXCLUDE/COMPILER2 := $(COMPILER1_SPECIFIC_FILES) $(ZERO_SPECIFIC_FILES) $(SHARK_SPECIFIC_FILES) $(GRAAL_SPECIFIC_FILES)
+Src_Files_EXCLUDE/TIERED    := $(ZERO_SPECIFIC_FILES) $(SHARK_SPECIFIC_FILES) $(GRAAL_SPECIFIC_FILES)
+Src_Files_EXCLUDE/ZERO      := $(COMPILER1_SPECIFIC_FILES) $(COMPILER2_SPECIFIC_FILES) $(SHARK_SPECIFIC_FILES) $(GRAAL_SPECIFIC_FILES) ciTypeFlow.cpp
+Src_Files_EXCLUDE/SHARK     := $(COMPILER1_SPECIFIC_FILES) $(COMPILER2_SPECIFIC_FILES) $(ZERO_SPECIFIC_FILES) $(GRAAL_SPECIFIC_FILES)
+Src_Files_EXCLUDE/GRAAL     := $(COMPILER1_SPECIFIC_FILES) $(COMPILER2_SPECIFIC_FILES) $(ZERO_SPECIFIC_FILES) $(SHARK_SPECIFIC_FILES) $(GRAAL_SPECIFIC_FILES) ciTypeFlow.cpp
 
 Src_Files_EXCLUDE +=  $(Src_Files_EXCLUDE/$(TYPE))
 
--- a/mx/projects	Tue Dec 04 10:09:25 2012 +0100
+++ b/mx/projects	Tue Dec 04 10:10:02 2012 +0100
@@ -19,8 +19,8 @@
 library@JACOCOREPORT@path=lib/jacocoreport.jar
 library@JACOCOREPORT@urls=http://lafo.ssw.uni-linz.ac.at/jacoco/jacocoreport.jar
 
-library@DACAPO_SCALA@path=lib/dacapo-scala-0.1.0.jar
-library@DACAPO_SCALA@urls=http://repo.scalabench.org/snapshots/org/scalabench/benchmarks/scala-benchmark-suite/0.1.0-SNAPSHOT/scala-benchmark-suite-0.1.0-20110908.085753-2.jar
+library@DACAPO_SCALA@path=lib/dacapo-scala-0.1.0-20120216.jar
+library@DACAPO_SCALA@urls=http://repo.scalabench.org/snapshots/org/scalabench/benchmarks/scala-benchmark-suite/0.1.0-SNAPSHOT/scala-benchmark-suite-0.1.0-20120216.103539-3.jar
 
 # graal.api.runtime
 project@com.oracle.graal.api.runtime@subDir=graal
--- a/src/cpu/x86/vm/c1_MacroAssembler_x86.cpp	Tue Dec 04 10:09:25 2012 +0100
+++ b/src/cpu/x86/vm/c1_MacroAssembler_x86.cpp	Tue Dec 04 10:10:02 2012 +0100
@@ -35,7 +35,7 @@
 #include "runtime/os.hpp"
 #include "runtime/stubRoutines.hpp"
 
-int C1_MacroAssembler::lock_object(Register hdr, Register obj, Register disp_hdr, Register scratch, Label& slow_case, bool use_basic_object_lock) {
+int C1_MacroAssembler::lock_object(Register hdr, Register obj, Register disp_hdr, Register scratch, Label& slow_case) {
   const int aligned_mask = BytesPerWord -1;
   const int hdr_offset = oopDesc::mark_offset_in_bytes();
   assert(hdr == rax, "hdr must be rax, for the cmpxchg instruction");
@@ -45,10 +45,8 @@
 
   verify_oop(obj);
 
-  if (use_basic_object_lock) {
-    // save object being locked into the BasicObjectLock
-    movptr(Address(disp_hdr, BasicObjectLock::obj_offset_in_bytes()), obj);
-  }
+  // save object being locked into the BasicObjectLock
+  movptr(Address(disp_hdr, BasicObjectLock::obj_offset_in_bytes()), obj);
 
   if (UseBiasedLocking) {
     assert(scratch != noreg, "should have scratch register at this point");
@@ -100,7 +98,7 @@
 }
 
 
-void C1_MacroAssembler::unlock_object(Register hdr, Register obj, Register disp_hdr, Label& slow_case, bool use_basic_object_lock) {
+void C1_MacroAssembler::unlock_object(Register hdr, Register obj, Register disp_hdr, Label& slow_case) {
   const int aligned_mask = BytesPerWord -1;
   const int hdr_offset = oopDesc::mark_offset_in_bytes();
   assert(disp_hdr == rax, "disp_hdr must be rax, for the cmpxchg instruction");
@@ -108,10 +106,8 @@
   Label done;
 
   if (UseBiasedLocking) {
-    if (use_basic_object_lock) {
-      // load object
-      movptr(obj, Address(disp_hdr, BasicObjectLock::obj_offset_in_bytes()));
-    }
+    // load object
+    movptr(obj, Address(disp_hdr, BasicObjectLock::obj_offset_in_bytes()));
     biased_locking_exit(obj, hdr, done);
   }
 
@@ -122,10 +118,8 @@
   // if we had recursive locking, we are done
   jcc(Assembler::zero, done);
   if (!UseBiasedLocking) {
-    if (use_basic_object_lock) {
-      // load object
-      movptr(obj, Address(disp_hdr, BasicObjectLock::obj_offset_in_bytes()));
-    }
+    // load object
+    movptr(obj, Address(disp_hdr, BasicObjectLock::obj_offset_in_bytes()));
   }
   verify_oop(obj);
   // test if object header is pointing to the displaced header, and if so, restore
@@ -290,13 +284,10 @@
     }
   }
 
-#ifndef GRAAL
-  // TODO(thomaswue): Check how we can access the flag without a ciEnv object.
   if (CURRENT_ENV->dtrace_alloc_probes()) {
     assert(obj == rax, "must be");
     call(RuntimeAddress(Runtime1::entry_for(Runtime1::dtrace_object_alloc_id)));
   }
-#endif
 
   verify_oop(obj);
 }
@@ -326,14 +317,10 @@
   const Register len_zero = len;
   initialize_body(obj, arr_size, header_size * BytesPerWord, len_zero);
 
-
-#ifndef GRAAL
-  // TODO(thomaswue): Check how we can access the flag without a ciEnv object.
   if (CURRENT_ENV->dtrace_alloc_probes()) {
     assert(obj == rax, "must be");
     call(RuntimeAddress(Runtime1::entry_for(Runtime1::dtrace_object_alloc_id)));
   }
-#endif
 
   verify_oop(obj);
 }
--- a/src/cpu/x86/vm/c1_MacroAssembler_x86.hpp	Tue Dec 04 10:09:25 2012 +0100
+++ b/src/cpu/x86/vm/c1_MacroAssembler_x86.hpp	Tue Dec 04 10:10:02 2012 +0100
@@ -51,13 +51,13 @@
   // disp_hdr: must point to the displaced header location, contents preserved
   // scratch : scratch register, contents destroyed
   // returns code offset at which to add null check debug information
-  int lock_object  (Register swap, Register obj, Register disp_hdr, Register scratch, Label& slow_case, bool use_basic_object_lock = true);
+  int lock_object  (Register swap, Register obj, Register disp_hdr, Register scratch, Label& slow_case);
 
   // unlocking
   // hdr     : contents destroyed
   // obj     : must point to the object to lock, contents preserved
   // disp_hdr: must be eax & must point to the displaced header location, contents destroyed
-  void unlock_object(Register swap, Register obj, Register lock, Label& slow_case, bool use_basic_object_lock = true);
+  void unlock_object(Register swap, Register obj, Register lock, Label& slow_case);
 
   void initialize_object(
     Register obj,                      // result: pointer to object after successful allocation
--- a/src/cpu/x86/vm/c1_Runtime1_x86.cpp	Tue Dec 04 10:09:25 2012 +0100
+++ b/src/cpu/x86/vm/c1_Runtime1_x86.cpp	Tue Dec 04 10:10:02 2012 +0100
@@ -38,7 +38,6 @@
 #include "runtime/vframeArray.hpp"
 #include "vmreg_x86.inline.hpp"
 
-static void restore_live_registers(StubAssembler* sasm, bool restore_fpu_registers = true);
 
 // Implementation of StubAssembler
 
@@ -113,14 +112,6 @@
     if (metadata_result->is_valid()) {
       movptr(Address(thread, JavaThread::vm_result_2_offset()), NULL_WORD);
     }
-#ifdef GRAAL
-    // (thomaswue) Deoptimize in case of an exception.
-    restore_live_registers(this, false);
-    movptr(Address(thread, Thread::pending_exception_offset()), NULL_WORD);
-    leave();
-    movl(rscratch1, Deoptimization::make_trap_request(Deoptimization::Reason_constraint, Deoptimization::Action_reinterpret));
-    jump(RuntimeAddress(SharedRuntime::deopt_blob()->uncommon_trap()));
-#else
     if (frame_size() == no_frame_size) {
       leave();
       jump(RuntimeAddress(StubRoutines::forward_exception_entry()));
@@ -129,7 +120,6 @@
     } else {
       jump(RuntimeAddress(Runtime1::entry_for(Runtime1::forward_exception_id)));
     }
-#endif
     bind(L);
   }
   // get oop results if there are any and reset the values in the thread
@@ -561,7 +551,7 @@
 }
 
 
-static void restore_live_registers(StubAssembler* sasm, bool restore_fpu_registers/* = true*/) {
+static void restore_live_registers(StubAssembler* sasm, bool restore_fpu_registers = true) {
   __ block_comment("restore_live_registers");
 
   restore_fpu(sasm, restore_fpu_registers);
@@ -615,25 +605,6 @@
 // has_argument: true if the exception needs an argument (passed on stack because registers must be preserved)
 
 OopMapSet* Runtime1::generate_exception_throw(StubAssembler* sasm, address target, bool has_argument) {
-  OopMapSet* oop_maps = new OopMapSet();
-#ifdef GRAAL
-  OopMap* oop_map = save_live_registers(sasm, 1);
-
-  // now all registers are saved and can be used freely
-  // verify that no old value is used accidentally
-  __ invalidate_registers(true, true, true, true, true, true);
-
-  // registers used by this stub
-  const Register temp_reg = rbx;
-
-  // load argument for exception that is passed as an argument into the stub
-  if (has_argument) {
-    __ movptr(c_rarg1, r10);
-  }
-  int call_offset = __ call_RT(noreg, noreg, target, has_argument ? 1 : 0);
-
-  oop_maps->add_gc_map(call_offset, oop_map);
-#else
   // preserve all registers
   int num_rt_args = has_argument ? 2 : 1;
   OopMap* oop_map = save_live_registers(sasm, num_rt_args);
@@ -656,8 +627,8 @@
   }
   int call_offset = __ call_RT(noreg, noreg, target, num_rt_args - 1);
 
+  OopMapSet* oop_maps = new OopMapSet();
   oop_maps->add_gc_map(call_offset, oop_map);
-#endif
 
   __ stop("should not reach here");
 
@@ -1002,6 +973,7 @@
   return oop_maps;
 }
 
+
 OopMapSet* Runtime1::generate_code_for(StubID id, StubAssembler* sasm) {
 
   // for better readability
@@ -1048,10 +1020,6 @@
 
           __ push(rdi);
           __ push(rbx);
-#ifdef GRAAL
-          __ push(rcx);
-          __ push(rsi);
-#endif
 
           if (id == fast_new_instance_init_check_id) {
             // make sure the klass is initialized
@@ -1090,10 +1058,6 @@
 
           __ initialize_object(obj, klass, obj_size, 0, t1, t2);
           __ verify_oop(obj);
-#ifdef GRAAL
-          __ pop(rsi);
-          __ pop(rcx);
-#endif
           __ pop(rbx);
           __ pop(rdi);
           __ ret(0);
@@ -1107,19 +1071,11 @@
 
           __ initialize_object(obj, klass, obj_size, 0, t1, t2);
           __ verify_oop(obj);
-#ifdef GRAAL
-          __ pop(rsi);
-          __ pop(rcx);
-#endif
           __ pop(rbx);
           __ pop(rdi);
           __ ret(0);
 
           __ bind(slow_path);
-#ifdef GRAAL
-          __ pop(rsi);
-          __ pop(rcx);
-#endif
           __ pop(rbx);
           __ pop(rdi);
         }
@@ -1310,13 +1266,8 @@
         // will be place in C abi locations
 
 #ifdef _LP64
-#ifdef GRAAL
-        __ verify_oop(j_rarg0);
-        __ mov(rax, j_rarg0);
-#else
         __ verify_oop(c_rarg0);
         __ mov(rax, c_rarg0);
-#endif
 #else
         // The object is passed on the stack and we haven't pushed a
         // frame yet so it's one work away from top of stack.
@@ -1446,17 +1397,9 @@
         __ movptr(rax, Address(rsp, (sup_k_off) * VMRegImpl::stack_slot_size)); // superclass
 
         Label miss;
-#ifdef GRAAL
-        Label success;
-          __ check_klass_subtype_fast_path(rsi, rax, rcx, &success, &miss, NULL);
-#endif
-
         __ check_klass_subtype_slow_path(rsi, rax, rcx, rdi, NULL, &miss);
 
         // fallthrough on success:
-#ifdef GRAAL
-        __ bind(success);
-#endif
         __ movptr(Address(rsp, (result_off) * VMRegImpl::stack_slot_size), 1); // result
         __ pop(rax);
         __ pop(rcx);
@@ -1862,242 +1805,6 @@
       }
       break;
 #endif // !SERIALGC
-#ifdef GRAAL
-    case graal_unwind_exception_call_id: {
-      // remove the frame from the stack
-      __ movptr(rsp, rbp);
-      __ pop(rbp);
-      // exception_oop is passed using ordinary java calling conventions
-      __ movptr(rax, j_rarg0);
-
-      Label nonNullExceptionOop;
-      __ testptr(rax, rax);
-      __ jcc(Assembler::notZero, nonNullExceptionOop);
-      {
-        __ enter();
-        oop_maps = new OopMapSet();
-        OopMap* oop_map = save_live_registers(sasm, 0);
-        int call_offset = __ call_RT(rax, noreg, (address)graal_create_null_exception, 0);
-        oop_maps->add_gc_map(call_offset, oop_map);
-        __ leave();
-      }
-      __ bind(nonNullExceptionOop);
-
-      __ set_info("unwind_exception", dont_gc_arguments);
-      // note: no stubframe since we are about to leave the current
-      //       activation and we are calling a leaf VM function only.
-      generate_unwind_exception(sasm);
-      __ should_not_reach_here();
-      break;
-    }
-
-    case graal_OSR_migration_end_id: {
-    __ enter();
-    save_live_registers(sasm, 0);
-    __ movptr(c_rarg0, j_rarg0);
-    __ call(RuntimeAddress(CAST_FROM_FN_PTR(address, SharedRuntime::OSR_migration_end)));
-    restore_live_registers(sasm);
-    __ leave();
-    __ ret(0);
-      break;
-    }
-
-    case graal_set_deopt_info_id: {
-    __ movptr(Address(r15_thread, JavaThread::graal_deopt_info_offset()), rscratch1);
-    __ ret(0);
-      break;
-    }
-
-    case graal_create_null_pointer_exception_id: {
-		__ enter();
-		oop_maps = new OopMapSet();
-		OopMap* oop_map = save_live_registers(sasm, 0);
-		int call_offset = __ call_RT(rax, noreg, (address)graal_create_null_exception, 0);
-		oop_maps->add_gc_map(call_offset, oop_map);
-		__ leave();
-		__ ret(0);
-      break;
-    }
-
-    case graal_create_out_of_bounds_exception_id: {
-		__ enter();
-		oop_maps = new OopMapSet();
-		OopMap* oop_map = save_live_registers(sasm, 0);
-		int call_offset = __ call_RT(rax, noreg, (address)graal_create_out_of_bounds_exception, j_rarg0);
-		oop_maps->add_gc_map(call_offset, oop_map);
-		__ leave();
-		__ ret(0);
-      break;
-    }
-
-    case graal_vm_error_id: {
-      __ enter();
-      oop_maps = new OopMapSet();
-      OopMap* oop_map = save_live_registers(sasm, 0);
-      int call_offset = __ call_RT(noreg, noreg, (address)graal_vm_error, j_rarg0, j_rarg1, j_rarg2);
-      oop_maps->add_gc_map(call_offset, oop_map);
-      restore_live_registers(sasm);
-      __ leave();
-      __ ret(0);
-      break;
-    }
-
-    case graal_log_printf_id: {
-      __ enter();
-      oop_maps = new OopMapSet();
-      OopMap* oop_map = save_live_registers(sasm, 0);
-      int call_offset = __ call_RT(noreg, noreg, (address)graal_log_printf, j_rarg0, j_rarg1, j_rarg2);
-      oop_maps->add_gc_map(call_offset, oop_map);
-      restore_live_registers(sasm);
-      __ leave();
-      __ ret(0);
-      break;
-    }
-
-    case graal_log_primitive_id: {
-      __ enter();
-      oop_maps = new OopMapSet();
-      OopMap* oop_map = save_live_registers(sasm, 0);
-      int call_offset = __ call_RT(noreg, noreg, (address)graal_log_primitive, j_rarg0, j_rarg1, j_rarg2);
-      oop_maps->add_gc_map(call_offset, oop_map);
-      restore_live_registers(sasm);
-      __ leave();
-      __ ret(0);
-      break;
-    }
-
-    case graal_log_object_id: {
-      __ enter();
-      oop_maps = new OopMapSet();
-      OopMap* oop_map = save_live_registers(sasm, 0);
-      int call_offset = __ call_RT(noreg, noreg, (address)graal_log_object, j_rarg0, j_rarg1);
-      oop_maps->add_gc_map(call_offset, oop_map);
-      restore_live_registers(sasm);
-      __ leave();
-      __ ret(0);
-      break;
-    }
-
-    case graal_verify_oop_id: {
-      // We use enter & leave so that a better stack trace is produced in the hs_err file
-      __ enter();
-      __ verify_oop(r13, "graal verify oop");
-      __ leave();
-      __ ret(0);
-      break;
-    }
-
-    case graal_arithmetic_frem_id: {
-      __ subptr(rsp, 8);
-      __ movflt(Address(rsp, 0), xmm1);
-      __ fld_s(Address(rsp, 0));
-      __ movflt(Address(rsp, 0), xmm0);
-      __ fld_s(Address(rsp, 0));
-      Label L;
-      __ bind(L);
-      __ fprem();
-      __ fwait();
-      __ fnstsw_ax();
-      __ testl(rax, 0x400);
-      __ jcc(Assembler::notZero, L);
-      __ fxch(1);
-      __ fpop();
-      __ fstp_s(Address(rsp, 0));
-      __ movflt(xmm0, Address(rsp, 0));
-      __ addptr(rsp, 8);
-      __ ret(0);
-      break;
-    }
-    case graal_arithmetic_drem_id: {
-      __ subptr(rsp, 8);
-      __ movdbl(Address(rsp, 0), xmm1);
-      __ fld_d(Address(rsp, 0));
-      __ movdbl(Address(rsp, 0), xmm0);
-      __ fld_d(Address(rsp, 0));
-      Label L;
-      __ bind(L);
-      __ fprem();
-      __ fwait();
-      __ fnstsw_ax();
-      __ testl(rax, 0x400);
-      __ jcc(Assembler::notZero, L);
-      __ fxch(1);
-      __ fpop();
-      __ fstp_d(Address(rsp, 0));
-      __ movdbl(xmm0, Address(rsp, 0));
-      __ addptr(rsp, 8);
-      __ ret(0);
-      break;
-    }
-    case graal_monitorenter_id: {
-      Label slow_case;
-
-      Register obj = j_rarg0;
-      Register lock = j_rarg1;
-
-      Register scratch1 = rax;
-      Register scratch2 = rbx;
-      assert_different_registers(obj, lock, scratch1, scratch2);
-
-      // copied from LIR_Assembler::emit_lock
-      if (UseFastLocking) {
-        assert(BasicLock::displaced_header_offset_in_bytes() == 0, "lock_reg must point to the displaced header");
-        __ lock_object(scratch1, obj, lock, scratch2, slow_case, false);
-      __ ret(0);
-      }
-
-      __ bind(slow_case);
-      {
-        StubFrame f(sasm, "graal_monitorenter", dont_gc_arguments);
-        OopMap* map = save_live_registers(sasm, 2, save_fpu_registers);
-
-        // Called with store_parameter and not C abi
-        int call_offset = __ call_RT(noreg, noreg, CAST_FROM_FN_PTR(address, graal_monitorenter), obj, lock);
-
-        oop_maps = new OopMapSet();
-        oop_maps->add_gc_map(call_offset, map);
-        restore_live_registers(sasm, save_fpu_registers);
-      }
-      __ ret(0);
-      break;
-    }
-    case graal_monitorexit_id: {
-      Label slow_case;
-
-      Register obj = j_rarg0;
-      Register lock = j_rarg1;
-
-      // needed in rax later on...
-      Register lock2 = rax;
-      __ mov(lock2, lock);
-      Register scratch1 = rbx;
-      assert_different_registers(obj, lock, scratch1, lock2);
-
-      // copied from LIR_Assembler::emit_lock
-      if (UseFastLocking) {
-        assert(BasicLock::displaced_header_offset_in_bytes() == 0, "lock_reg must point to the displaced header");
-        __ unlock_object(scratch1, obj, lock2, slow_case, false);
-      __ ret(0);
-      }
-
-      __ bind(slow_case);
-      {
-        StubFrame f(sasm, "graal_monitorexit", dont_gc_arguments);
-        OopMap* map = save_live_registers(sasm, 2, save_fpu_registers);
-
-        // note: really a leaf routine but must setup last java sp
-        //       => use call_RT for now (speed can be improved by
-        //       doing last java sp setup manually)
-        int call_offset = __ call_RT(noreg, noreg, CAST_FROM_FN_PTR(address, graal_monitorexit), obj, lock);
-
-        oop_maps = new OopMapSet();
-        oop_maps->add_gc_map(call_offset, map);
-        restore_live_registers(sasm, save_fpu_registers);
-      }
-      __ ret(0);
-      break;
-    }
-#endif
 
     default:
       { StubFrame f(sasm, "unimplemented entry", dont_gc_arguments);
--- a/src/cpu/x86/vm/c1_globals_x86.hpp	Tue Dec 04 10:09:25 2012 +0100
+++ b/src/cpu/x86/vm/c1_globals_x86.hpp	Tue Dec 04 10:10:02 2012 +0100
@@ -37,40 +37,26 @@
 define_pd_global(bool, ResizeTLAB,                   true );
 define_pd_global(bool, InlineIntrinsics,             true );
 define_pd_global(bool, PreferInterpreterNativeStubs, false);
+define_pd_global(bool, ProfileTraps,                 false);
+define_pd_global(bool, UseOnStackReplacement,        true );
 define_pd_global(bool, TieredCompilation,            false);
+define_pd_global(intx, CompileThreshold,             1500 );
 define_pd_global(intx, BackEdgeThreshold,            100000);
 
 define_pd_global(intx, OnStackReplacePercentage,     933  );
 define_pd_global(intx, FreqInlineSize,               325  );
 define_pd_global(intx, NewSizeThreadIncrease,        4*K  );
-define_pd_global(uintx,MetaspaceSize,                12*M );
-define_pd_global(bool, NeverActAsServerClassMachine, true );
-define_pd_global(uint64_t,MaxRAM,                    1ULL*G);
-define_pd_global(bool, CICompileOSR,                 true );
-
-#ifdef GRAAL
-define_pd_global(bool, ProfileTraps,                 true );
-define_pd_global(bool, UseOnStackReplacement,        true);
-define_pd_global(intx, CompileThreshold,             10000 );
-define_pd_global(intx, InitialCodeCacheSize,         16*M  );
-define_pd_global(intx, ReservedCodeCacheSize,        64*M );
-define_pd_global(bool, ProfileInterpreter,           true );
-define_pd_global(intx, CodeCacheExpansionSize,       64*K );
-define_pd_global(uintx,CodeCacheMinBlockLength,      4);
-define_pd_global(intx, TypeProfileWidth,             8);
-#else
-define_pd_global(bool, ProfileTraps,                 false);
-define_pd_global(bool, UseOnStackReplacement,        true );
-define_pd_global(intx, CompileThreshold,             1500 );
 define_pd_global(intx, InitialCodeCacheSize,         160*K);
 define_pd_global(intx, ReservedCodeCacheSize,        32*M );
 define_pd_global(bool, ProfileInterpreter,           false);
 define_pd_global(intx, CodeCacheExpansionSize,       32*K );
 define_pd_global(uintx,CodeCacheMinBlockLength,      1);
-define_pd_global(intx, TypeProfileWidth,             0);
-#endif // GRAAL
+define_pd_global(uintx,MetaspaceSize,                     12*M );
+define_pd_global(bool, NeverActAsServerClassMachine, true );
+define_pd_global(uint64_t,MaxRAM,                    1ULL*G);
+define_pd_global(bool, CICompileOSR,                 true );
 #endif // !TIERED
-
+define_pd_global(bool, UseTypeProfile,               false);
 define_pd_global(bool, RoundFPResults,               true );
 
 define_pd_global(bool, LIRFillDelaySlots,            false);
--- /dev/null	Thu Jan 01 00:00:00 1970 +0000
+++ b/src/cpu/x86/vm/graalGlobals_x86.hpp	Tue Dec 04 10:10:02 2012 +0100
@@ -0,0 +1,70 @@
+/*
+ * Copyright (c) 2000, 2012, Oracle and/or its affiliates. All rights reserved.
+ * DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER.
+ *
+ * This code is free software; you can redistribute it and/or modify it
+ * under the terms of the GNU General Public License version 2 only, as
+ * published by the Free Software Foundation.
+ *
+ * This code is distributed in the hope that it will be useful, but WITHOUT
+ * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
+ * FITNESS FOR A PARTICULAR PURPOSE.  See the GNU General Public License
+ * version 2 for more details (a copy is included in the LICENSE file that
+ * accompanied this code).
+ *
+ * You should have received a copy of the GNU General Public License version
+ * 2 along with this work; if not, write to the Free Software Foundation,
+ * Inc., 51 Franklin St, Fifth Floor, Boston, MA 02110-1301 USA.
+ *
+ * Please contact Oracle, 500 Oracle Parkway, Redwood Shores, CA 94065 USA
+ * or visit www.oracle.com if you need additional information or have any
+ * questions.
+ *
+ */
+
+#ifndef CPU_X86_VM_GRAALGLOBALS_X86_HPP
+#define CPU_X86_VM_GRAALGLOBALS_X86_HPP
+
+#include "utilities/globalDefinitions.hpp"
+#include "utilities/macros.hpp"
+
+// Sets the default values for platform dependent flags used by the Graal compiler.
+// (see graalGlobals.hpp)
+
+define_pd_global(bool, BackgroundCompilation,        true );
+define_pd_global(bool, UseTLAB,                      true );
+define_pd_global(bool, ResizeTLAB,                   true );
+define_pd_global(bool, InlineIntrinsics,             true );
+define_pd_global(bool, PreferInterpreterNativeStubs, false);
+define_pd_global(bool, TieredCompilation,            false);
+define_pd_global(intx, BackEdgeThreshold,            100000);
+
+define_pd_global(intx, OnStackReplacePercentage,     933  );
+define_pd_global(intx, FreqInlineSize,               325  );
+define_pd_global(intx, NewSizeThreadIncrease,        4*K  );
+define_pd_global(uintx,MetaspaceSize,                12*M );
+define_pd_global(uintx,MaxPermSize,                  64*M );
+define_pd_global(bool, NeverActAsServerClassMachine, true );
+define_pd_global(uint64_t,MaxRAM,                    1ULL*G);
+define_pd_global(bool, CICompileOSR,                 true );
+
+define_pd_global(bool, ProfileTraps,                 true );
+define_pd_global(bool, UseOnStackReplacement,        true);
+define_pd_global(intx, CompileThreshold,             10000 );
+define_pd_global(intx, InitialCodeCacheSize,         16*M  );
+define_pd_global(intx, ReservedCodeCacheSize,        64*M );
+define_pd_global(bool, ProfileInterpreter,           true );
+define_pd_global(intx, CodeCacheExpansionSize,       64*K );
+define_pd_global(uintx,CodeCacheMinBlockLength,      4);
+define_pd_global(intx, TypeProfileWidth,             8);
+
+define_pd_global(bool, RoundFPResults,               true );
+
+define_pd_global(bool, LIRFillDelaySlots,            false);
+define_pd_global(bool, OptimizeSinglePrecision,      true );
+define_pd_global(bool, CSEArrayLength,               false);
+define_pd_global(bool, TwoOperandLIRForm,            true );
+
+define_pd_global(intx, SafepointPollOffset,          256  );
+
+#endif // CPU_X86_VM_GRAALGLOBALS_X86_HPP
--- /dev/null	Thu Jan 01 00:00:00 1970 +0000
+++ b/src/cpu/x86/vm/graalRuntime_x86.cpp	Tue Dec 04 10:10:02 2012 +0100
@@ -0,0 +1,1193 @@
+/*
+ * Copyright (c) 1999, 2012, Oracle and/or its affiliates. All rights reserved.
+ * DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER.
+ *
+ * This code is free software; you can redistribute it and/or modify it
+ * under the terms of the GNU General Public License version 2 only, as
+ * published by the Free Software Foundation.
+ *
+ * This code is distributed in the hope that it will be useful, but WITHOUT
+ * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
+ * FITNESS FOR A PARTICULAR PURPOSE.  See the GNU General Public License
+ * version 2 for more details (a copy is included in the LICENSE file that
+ * accompanied this code).
+ *
+ * You should have received a copy of the GNU General Public License version
+ * 2 along with this work; if not, write to the Free Software Foundation,
+ * Inc., 51 Franklin St, Fifth Floor, Boston, MA 02110-1301 USA.
+ *
+ * Please contact Oracle, 500 Oracle Parkway, Redwood Shores, CA 94065 USA
+ * or visit www.oracle.com if you need additional information or have any
+ * questions.
+ *
+ */
+
+#include "precompiled.hpp"
+#include "asm/assembler.hpp"
+#include "graal/graalRuntime.hpp"
+#include "interpreter/interpreter.hpp"
+#include "nativeInst_x86.hpp"
+#include "oops/compiledICHolder.hpp"
+#include "oops/oop.inline.hpp"
+#include "prims/jvmtiExport.hpp"
+#include "register_x86.hpp"
+#include "runtime/sharedRuntime.hpp"
+#include "runtime/signature.hpp"
+#include "runtime/vframeArray.hpp"
+#include "vmreg_x86.inline.hpp"
+
+static void restore_live_registers(GraalStubAssembler* sasm, bool restore_fpu_registers = true);
+
+// Implementation of GraalStubAssembler
+
+int GraalStubAssembler::call_RT(Register oop_result1, Register metadata_result, address entry, int args_size) {
+  // setup registers
+  const Register thread = NOT_LP64(rdi) LP64_ONLY(r15_thread); // is callee-saved register (Visual C++ calling conventions)
+  assert(!(oop_result1->is_valid() || metadata_result->is_valid()) || oop_result1 != metadata_result, "registers must be different");
+  assert(oop_result1 != thread && metadata_result != thread, "registers must be different");
+  assert(args_size >= 0, "illegal args_size");
+  bool align_stack = false;
+#ifdef _LP64
+  // At a method handle call, the stack may not be properly aligned
+  // when returning with an exception.
+  align_stack = (stub_id() == false /*GraalRuntime::handle_exception_from_callee_id*/);
+#endif
+
+#ifdef _LP64
+  mov(c_rarg0, thread);
+  set_num_rt_args(0); // Nothing on stack
+#else
+  set_num_rt_args(1 + args_size);
+
+  // push java thread (becomes first argument of C function)
+  get_thread(thread);
+  push(thread);
+#endif // _LP64
+
+  int call_offset;
+  if (!align_stack) {
+    set_last_Java_frame(thread, noreg, rbp, NULL);
+  } else {
+    address the_pc = pc();
+    call_offset = offset();
+    set_last_Java_frame(thread, noreg, rbp, the_pc);
+    andptr(rsp, -(StackAlignmentInBytes));    // Align stack
+  }
+
+  // do the call
+  call(RuntimeAddress(entry));
+  if (!align_stack) {
+    call_offset = offset();
+  }
+  // verify callee-saved register
+#ifdef ASSERT
+  guarantee(thread != rax, "change this code");
+  push(rax);
+  { Label L;
+    get_thread(rax);
+    cmpptr(thread, rax);
+    jcc(Assembler::equal, L);
+    int3();
+    stop("GraalStubAssembler::call_RT: rdi not callee saved?");
+    bind(L);
+  }
+  pop(rax);
+#endif
+  reset_last_Java_frame(thread, true, align_stack);
+
+  // discard thread and arguments
+  NOT_LP64(addptr(rsp, num_rt_args()*BytesPerWord));
+
+  // check for pending exceptions
+  { Label L;
+    cmpptr(Address(thread, Thread::pending_exception_offset()), (int32_t)NULL_WORD);
+    jcc(Assembler::equal, L);
+    // exception pending => remove activation and forward to exception handler
+    movptr(rax, Address(thread, Thread::pending_exception_offset()));
+    // make sure that the vm_results are cleared
+    if (oop_result1->is_valid()) {
+      movptr(Address(thread, JavaThread::vm_result_offset()), NULL_WORD);
+    }
+    if (metadata_result->is_valid()) {
+      movptr(Address(thread, JavaThread::vm_result_2_offset()), NULL_WORD);
+    }
+#ifdef GRAAL
+    // (thomaswue) Deoptimize in case of an exception.
+    restore_live_registers(this, false);
+    movptr(Address(thread, Thread::pending_exception_offset()), NULL_WORD);
+    leave();
+    movl(rscratch1, Deoptimization::make_trap_request(Deoptimization::Reason_constraint, Deoptimization::Action_reinterpret));
+    jump(RuntimeAddress(SharedRuntime::deopt_blob()->uncommon_trap()));
+#else
+    if (frame_size() == no_frame_size) {
+      leave();
+      jump(RuntimeAddress(StubRoutines::forward_exception_entry()));
+    } else if (_stub_id == GraalRuntime::forward_exception_id) {
+      should_not_reach_here();
+    } else {
+      jump(RuntimeAddress(GraalRuntime::entry_for(GraalRuntime::forward_exception_id)));
+    }
+#endif
+    bind(L);
+  }
+  // get oop results if there are any and reset the values in the thread
+  if (oop_result1->is_valid()) {
+    get_vm_result(oop_result1, thread);
+  }
+  if (metadata_result->is_valid()) {
+    get_vm_result_2(metadata_result, thread);
+  }
+  return call_offset;
+}
+
+
+int GraalStubAssembler::call_RT(Register oop_result1, Register metadata_result, address entry, Register arg1) {
+#ifdef _LP64
+  mov(c_rarg1, arg1);
+#else
+  push(arg1);
+#endif // _LP64
+  return call_RT(oop_result1, metadata_result, entry, 1);
+}
+
+
+int GraalStubAssembler::call_RT(Register oop_result1, Register metadata_result, address entry, Register arg1, Register arg2) {
+#ifdef _LP64
+  if (c_rarg1 == arg2) {
+    if (c_rarg2 == arg1) {
+      xchgq(arg1, arg2);
+    } else {
+      mov(c_rarg2, arg2);
+      mov(c_rarg1, arg1);
+    }
+  } else {
+    mov(c_rarg1, arg1);
+    mov(c_rarg2, arg2);
+  }
+#else
+  push(arg2);
+  push(arg1);
+#endif // _LP64
+  return call_RT(oop_result1, metadata_result, entry, 2);
+}
+
+
+int GraalStubAssembler::call_RT(Register oop_result1, Register metadata_result, address entry, Register arg1, Register arg2, Register arg3) {
+#ifdef _LP64
+  // if there is any conflict use the stack
+  if (arg1 == c_rarg2 || arg1 == c_rarg3 ||
+      arg2 == c_rarg1 || arg1 == c_rarg3 ||
+      arg3 == c_rarg1 || arg1 == c_rarg2) {
+    push(arg3);
+    push(arg2);
+    push(arg1);
+    pop(c_rarg1);
+    pop(c_rarg2);
+    pop(c_rarg3);
+  } else {
+    mov(c_rarg1, arg1);
+    mov(c_rarg2, arg2);
+    mov(c_rarg3, arg3);
+  }
+#else
+  push(arg3);
+  push(arg2);
+  push(arg1);
+#endif // _LP64
+  return call_RT(oop_result1, metadata_result, entry, 3);
+}
+
+// Implementation of GraalStubFrame
+
+class GraalStubFrame: public StackObj {
+ private:
+  GraalStubAssembler* _sasm;
+
+ public:
+  GraalStubFrame(GraalStubAssembler* sasm, const char* name, bool must_gc_arguments);
+  ~GraalStubFrame();
+};
+
+
+#define __ _sasm->
+
+GraalStubFrame::GraalStubFrame(GraalStubAssembler* sasm, const char* name, bool must_gc_arguments) {
+  _sasm = sasm;
+  __ set_info(name, must_gc_arguments);
+  __ enter();
+}
+
+GraalStubFrame::~GraalStubFrame() {
+  __ leave();
+  __ ret(0);
+}
+
+#undef __
+
+
+// Implementation of GraalRuntime
+
+const int float_regs_as_doubles_size_in_slots = FloatRegisterImpl::number_of_registers * 2;
+const int xmm_regs_as_doubles_size_in_slots = XMMRegisterImpl::number_of_registers * 2;
+
+// Stack layout for saving/restoring  all the registers needed during a runtime
+// call (this includes deoptimization)
+// Note: note that users of this frame may well have arguments to some runtime
+// while these values are on the stack. These positions neglect those arguments
+// but the code in save_live_registers will take the argument count into
+// account.
+//
+#ifdef _LP64
+  #define SLOT2(x) x,
+  #define SLOT_PER_WORD 2
+#else
+  #define SLOT2(x)
+  #define SLOT_PER_WORD 1
+#endif // _LP64
+
+enum reg_save_layout {
+  // 64bit needs to keep stack 16 byte aligned. So we add some alignment dummies to make that
+  // happen and will assert if the stack size we create is misaligned
+#ifdef _LP64
+  align_dummy_0, align_dummy_1,
+#endif // _LP64
+#ifdef _WIN64
+  // Windows always allocates space for it's argument registers (see
+  // frame::arg_reg_save_area_bytes).
+  arg_reg_save_1, arg_reg_save_1H,                                                          // 0, 4
+  arg_reg_save_2, arg_reg_save_2H,                                                          // 8, 12
+  arg_reg_save_3, arg_reg_save_3H,                                                          // 16, 20
+  arg_reg_save_4, arg_reg_save_4H,                                                          // 24, 28
+#endif // _WIN64
+  xmm_regs_as_doubles_off,                                                                  // 32
+  float_regs_as_doubles_off = xmm_regs_as_doubles_off + xmm_regs_as_doubles_size_in_slots,  // 160
+  fpu_state_off = float_regs_as_doubles_off + float_regs_as_doubles_size_in_slots,          // 224
+  // fpu_state_end_off is exclusive
+  fpu_state_end_off = fpu_state_off + (FPUStateSizeInWords / SLOT_PER_WORD),                // 352
+  marker = fpu_state_end_off, SLOT2(markerH)                                                // 352, 356
+  extra_space_offset,                                                                       // 360
+#ifdef _LP64
+  r15_off = extra_space_offset, r15H_off,                                                   // 360, 364
+  r14_off, r14H_off,                                                                        // 368, 372
+  r13_off, r13H_off,                                                                        // 376, 380
+  r12_off, r12H_off,                                                                        // 384, 388
+  r11_off, r11H_off,                                                                        // 392, 396
+  r10_off, r10H_off,                                                                        // 400, 404
+  r9_off, r9H_off,                                                                          // 408, 412
+  r8_off, r8H_off,                                                                          // 416, 420
+  rdi_off, rdiH_off,                                                                        // 424, 428
+#else
+  rdi_off = extra_space_offset,
+#endif // _LP64
+  rsi_off, SLOT2(rsiH_off)                                                                  // 432, 436
+  rbp_off, SLOT2(rbpH_off)                                                                  // 440, 444
+  rsp_off, SLOT2(rspH_off)                                                                  // 448, 452
+  rbx_off, SLOT2(rbxH_off)                                                                  // 456, 460
+  rdx_off, SLOT2(rdxH_off)                                                                  // 464, 468
+  rcx_off, SLOT2(rcxH_off)                                                                  // 472, 476
+  rax_off, SLOT2(raxH_off)                                                                  // 480, 484
+  saved_rbp_off, SLOT2(saved_rbpH_off)                                                      // 488, 492
+  return_off, SLOT2(returnH_off)                                                            // 496, 500
+  reg_save_frame_size   // As noted: neglects any parameters to runtime                     // 504
+};
+
+// Save registers which might be killed by calls into the runtime.
+// Tries to smart about FP registers.  In particular we separate
+// saving and describing the FPU registers for deoptimization since we
+// have to save the FPU registers twice if we describe them and on P4
+// saving FPU registers which don't contain anything appears
+// expensive.  The deopt blob is the only thing which needs to
+// describe FPU registers.  In all other cases it should be sufficient
+// to simply save their current value.
+
+static OopMap* generate_oop_map(GraalStubAssembler* sasm, int num_rt_args,
+                                bool save_fpu_registers = true) {
+
+  // In 64bit all the args are in regs so there are no additional stack slots
+  LP64_ONLY(num_rt_args = 0);
+  LP64_ONLY(assert((reg_save_frame_size * VMRegImpl::stack_slot_size) % 16 == 0, "must be 16 byte aligned");)
+  int frame_size_in_slots = reg_save_frame_size + num_rt_args; // args + thread
+  sasm->set_frame_size(frame_size_in_slots / VMRegImpl::slots_per_word );
+
+  // record saved value locations in an OopMap
+  // locations are offsets from sp after runtime call; num_rt_args is number of arguments in call, including thread
+  OopMap* map = new OopMap(frame_size_in_slots, 0);
+  map->set_callee_saved(VMRegImpl::stack2reg(rax_off + num_rt_args), rax->as_VMReg());
+  map->set_callee_saved(VMRegImpl::stack2reg(rcx_off + num_rt_args), rcx->as_VMReg());
+  map->set_callee_saved(VMRegImpl::stack2reg(rdx_off + num_rt_args), rdx->as_VMReg());
+  map->set_callee_saved(VMRegImpl::stack2reg(rbx_off + num_rt_args), rbx->as_VMReg());
+  map->set_callee_saved(VMRegImpl::stack2reg(rsi_off + num_rt_args), rsi->as_VMReg());
+  map->set_callee_saved(VMRegImpl::stack2reg(rdi_off + num_rt_args), rdi->as_VMReg());
+#ifdef _LP64
+  map->set_callee_saved(VMRegImpl::stack2reg(r8_off + num_rt_args),  r8->as_VMReg());
+  map->set_callee_saved(VMRegImpl::stack2reg(r9_off + num_rt_args),  r9->as_VMReg());
+  map->set_callee_saved(VMRegImpl::stack2reg(r10_off + num_rt_args), r10->as_VMReg());
+  map->set_callee_saved(VMRegImpl::stack2reg(r11_off + num_rt_args), r11->as_VMReg());
+  map->set_callee_saved(VMRegImpl::stack2reg(r12_off + num_rt_args), r12->as_VMReg());
+  map->set_callee_saved(VMRegImpl::stack2reg(r13_off + num_rt_args), r13->as_VMReg());
+  map->set_callee_saved(VMRegImpl::stack2reg(r14_off + num_rt_args), r14->as_VMReg());
+  map->set_callee_saved(VMRegImpl::stack2reg(r15_off + num_rt_args), r15->as_VMReg());
+
+  // This is stupid but needed.
+  map->set_callee_saved(VMRegImpl::stack2reg(raxH_off + num_rt_args), rax->as_VMReg()->next());
+  map->set_callee_saved(VMRegImpl::stack2reg(rcxH_off + num_rt_args), rcx->as_VMReg()->next());
+  map->set_callee_saved(VMRegImpl::stack2reg(rdxH_off + num_rt_args), rdx->as_VMReg()->next());
+  map->set_callee_saved(VMRegImpl::stack2reg(rbxH_off + num_rt_args), rbx->as_VMReg()->next());
+  map->set_callee_saved(VMRegImpl::stack2reg(rsiH_off + num_rt_args), rsi->as_VMReg()->next());
+  map->set_callee_saved(VMRegImpl::stack2reg(rdiH_off + num_rt_args), rdi->as_VMReg()->next());
+
+  map->set_callee_saved(VMRegImpl::stack2reg(r8H_off + num_rt_args),  r8->as_VMReg()->next());
+  map->set_callee_saved(VMRegImpl::stack2reg(r9H_off + num_rt_args),  r9->as_VMReg()->next());
+  map->set_callee_saved(VMRegImpl::stack2reg(r10H_off + num_rt_args), r10->as_VMReg()->next());
+  map->set_callee_saved(VMRegImpl::stack2reg(r11H_off + num_rt_args), r11->as_VMReg()->next());
+  map->set_callee_saved(VMRegImpl::stack2reg(r12H_off + num_rt_args), r12->as_VMReg()->next());
+  map->set_callee_saved(VMRegImpl::stack2reg(r13H_off + num_rt_args), r13->as_VMReg()->next());
+  map->set_callee_saved(VMRegImpl::stack2reg(r14H_off + num_rt_args), r14->as_VMReg()->next());
+  map->set_callee_saved(VMRegImpl::stack2reg(r15H_off + num_rt_args), r15->as_VMReg()->next());
+#endif // _LP64
+
+  if (save_fpu_registers) {
+    if (UseSSE < 2) {
+      int fpu_off = float_regs_as_doubles_off;
+      for (int n = 0; n < FloatRegisterImpl::number_of_registers; n++) {
+        VMReg fpu_name_0 = as_FloatRegister(n)->as_VMReg();
+        map->set_callee_saved(VMRegImpl::stack2reg(fpu_off +     num_rt_args), fpu_name_0);
+        // %%% This is really a waste but we'll keep things as they were for now
+        if (true) {
+          map->set_callee_saved(VMRegImpl::stack2reg(fpu_off + 1 + num_rt_args), fpu_name_0->next());
+        }
+        fpu_off += 2;
+      }
+      assert(fpu_off == fpu_state_off, "incorrect number of fpu stack slots");
+    }
+
+    if (UseSSE >= 2) {
+      int xmm_off = xmm_regs_as_doubles_off;
+      for (int n = 0; n < XMMRegisterImpl::number_of_registers; n++) {
+        VMReg xmm_name_0 = as_XMMRegister(n)->as_VMReg();
+        map->set_callee_saved(VMRegImpl::stack2reg(xmm_off +     num_rt_args), xmm_name_0);
+        // %%% This is really a waste but we'll keep things as they were for now
+        if (true) {
+          map->set_callee_saved(VMRegImpl::stack2reg(xmm_off + 1 + num_rt_args), xmm_name_0->next());
+        }
+        xmm_off += 2;
+      }
+      assert(xmm_off == float_regs_as_doubles_off, "incorrect number of xmm registers");
+
+    } else if (UseSSE == 1) {
+      int xmm_off = xmm_regs_as_doubles_off;
+      for (int n = 0; n < XMMRegisterImpl::number_of_registers; n++) {
+        VMReg xmm_name_0 = as_XMMRegister(n)->as_VMReg();
+        map->set_callee_saved(VMRegImpl::stack2reg(xmm_off +     num_rt_args), xmm_name_0);
+        xmm_off += 2;
+      }
+      assert(xmm_off == float_regs_as_doubles_off, "incorrect number of xmm registers");
+    }
+  }
+
+  return map;
+}
+
+#define __ sasm->
+
+static OopMap* save_live_registers(GraalStubAssembler* sasm, int num_rt_args,
+                                   bool save_fpu_registers = true) {
+  __ block_comment("save_live_registers");
+
+  __ pusha();         // integer registers
+
+  // assert(float_regs_as_doubles_off % 2 == 0, "misaligned offset");
+  // assert(xmm_regs_as_doubles_off % 2 == 0, "misaligned offset");
+
+  __ subptr(rsp, extra_space_offset * VMRegImpl::stack_slot_size);
+
+#ifdef ASSERT
+  __ movptr(Address(rsp, marker * VMRegImpl::stack_slot_size), (int32_t)0xfeedbeef);
+#endif
+
+  if (save_fpu_registers) {
+    if (UseSSE < 2) {
+      // save FPU stack
+      __ fnsave(Address(rsp, fpu_state_off * VMRegImpl::stack_slot_size));
+      __ fwait();
+
+#ifdef ASSERT
+      Label ok;
+      __ cmpw(Address(rsp, fpu_state_off * VMRegImpl::stack_slot_size), StubRoutines::fpu_cntrl_wrd_std());
+      __ jccb(Assembler::equal, ok);
+      __ stop("corrupted control word detected");
+      __ bind(ok);
+#endif
+
+      // Reset the control word to guard against exceptions being unmasked
+      // since fstp_d can cause FPU stack underflow exceptions.  Write it
+      // into the on stack copy and then reload that to make sure that the
+      // current and future values are correct.
+      __ movw(Address(rsp, fpu_state_off * VMRegImpl::stack_slot_size), StubRoutines::fpu_cntrl_wrd_std());
+      __ frstor(Address(rsp, fpu_state_off * VMRegImpl::stack_slot_size));
+
+      // Save the FPU registers in de-opt-able form
+      __ fstp_d(Address(rsp, float_regs_as_doubles_off * VMRegImpl::stack_slot_size +  0));
+      __ fstp_d(Address(rsp, float_regs_as_doubles_off * VMRegImpl::stack_slot_size +  8));
+      __ fstp_d(Address(rsp, float_regs_as_doubles_off * VMRegImpl::stack_slot_size + 16));
+      __ fstp_d(Address(rsp, float_regs_as_doubles_off * VMRegImpl::stack_slot_size + 24));
+      __ fstp_d(Address(rsp, float_regs_as_doubles_off * VMRegImpl::stack_slot_size + 32));
+      __ fstp_d(Address(rsp, float_regs_as_doubles_off * VMRegImpl::stack_slot_size + 40));
+      __ fstp_d(Address(rsp, float_regs_as_doubles_off * VMRegImpl::stack_slot_size + 48));
+      __ fstp_d(Address(rsp, float_regs_as_doubles_off * VMRegImpl::stack_slot_size + 56));
+    }
+
+    if (UseSSE >= 2) {
+      // save XMM registers
+      // XMM registers can contain float or double values, but this is not known here,
+      // so always save them as doubles.
+      // note that float values are _not_ converted automatically, so for float values
+      // the second word contains only garbage data.
+      __ movdbl(Address(rsp, xmm_regs_as_doubles_off * VMRegImpl::stack_slot_size +  0), xmm0);
+      __ movdbl(Address(rsp, xmm_regs_as_doubles_off * VMRegImpl::stack_slot_size +  8), xmm1);
+      __ movdbl(Address(rsp, xmm_regs_as_doubles_off * VMRegImpl::stack_slot_size + 16), xmm2);
+      __ movdbl(Address(rsp, xmm_regs_as_doubles_off * VMRegImpl::stack_slot_size + 24), xmm3);
+      __ movdbl(Address(rsp, xmm_regs_as_doubles_off * VMRegImpl::stack_slot_size + 32), xmm4);
+      __ movdbl(Address(rsp, xmm_regs_as_doubles_off * VMRegImpl::stack_slot_size + 40), xmm5);
+      __ movdbl(Address(rsp, xmm_regs_as_doubles_off * VMRegImpl::stack_slot_size + 48), xmm6);
+      __ movdbl(Address(rsp, xmm_regs_as_doubles_off * VMRegImpl::stack_slot_size + 56), xmm7);
+#ifdef _LP64
+      __ movdbl(Address(rsp, xmm_regs_as_doubles_off * VMRegImpl::stack_slot_size + 64), xmm8);
+      __ movdbl(Address(rsp, xmm_regs_as_doubles_off * VMRegImpl::stack_slot_size + 72), xmm9);
+      __ movdbl(Address(rsp, xmm_regs_as_doubles_off * VMRegImpl::stack_slot_size + 80), xmm10);
+      __ movdbl(Address(rsp, xmm_regs_as_doubles_off * VMRegImpl::stack_slot_size + 88), xmm11);
+      __ movdbl(Address(rsp, xmm_regs_as_doubles_off * VMRegImpl::stack_slot_size + 96), xmm12);
+      __ movdbl(Address(rsp, xmm_regs_as_doubles_off * VMRegImpl::stack_slot_size + 104), xmm13);
+      __ movdbl(Address(rsp, xmm_regs_as_doubles_off * VMRegImpl::stack_slot_size + 112), xmm14);
+      __ movdbl(Address(rsp, xmm_regs_as_doubles_off * VMRegImpl::stack_slot_size + 120), xmm15);
+#endif // _LP64
+    } else if (UseSSE == 1) {
+      // save XMM registers as float because double not supported without SSE2
+      __ movflt(Address(rsp, xmm_regs_as_doubles_off * VMRegImpl::stack_slot_size +  0), xmm0);
+      __ movflt(Address(rsp, xmm_regs_as_doubles_off * VMRegImpl::stack_slot_size +  8), xmm1);
+      __ movflt(Address(rsp, xmm_regs_as_doubles_off * VMRegImpl::stack_slot_size + 16), xmm2);
+      __ movflt(Address(rsp, xmm_regs_as_doubles_off * VMRegImpl::stack_slot_size + 24), xmm3);
+      __ movflt(Address(rsp, xmm_regs_as_doubles_off * VMRegImpl::stack_slot_size + 32), xmm4);
+      __ movflt(Address(rsp, xmm_regs_as_doubles_off * VMRegImpl::stack_slot_size + 40), xmm5);
+      __ movflt(Address(rsp, xmm_regs_as_doubles_off * VMRegImpl::stack_slot_size + 48), xmm6);
+      __ movflt(Address(rsp, xmm_regs_as_doubles_off * VMRegImpl::stack_slot_size + 56), xmm7);
+    }
+  }
+
+  // FPU stack must be empty now
+  __ verify_FPU(0, "save_live_registers");
+
+  return generate_oop_map(sasm, num_rt_args, save_fpu_registers);
+}
+
+
+static void restore_fpu(GraalStubAssembler* sasm, bool restore_fpu_registers = true) {
+  if (restore_fpu_registers) {
+    if (UseSSE >= 2) {
+      // restore XMM registers
+      __ movdbl(xmm0, Address(rsp, xmm_regs_as_doubles_off * VMRegImpl::stack_slot_size +  0));
+      __ movdbl(xmm1, Address(rsp, xmm_regs_as_doubles_off * VMRegImpl::stack_slot_size +  8));
+      __ movdbl(xmm2, Address(rsp, xmm_regs_as_doubles_off * VMRegImpl::stack_slot_size + 16));
+      __ movdbl(xmm3, Address(rsp, xmm_regs_as_doubles_off * VMRegImpl::stack_slot_size + 24));
+      __ movdbl(xmm4, Address(rsp, xmm_regs_as_doubles_off * VMRegImpl::stack_slot_size + 32));
+      __ movdbl(xmm5, Address(rsp, xmm_regs_as_doubles_off * VMRegImpl::stack_slot_size + 40));
+      __ movdbl(xmm6, Address(rsp, xmm_regs_as_doubles_off * VMRegImpl::stack_slot_size + 48));
+      __ movdbl(xmm7, Address(rsp, xmm_regs_as_doubles_off * VMRegImpl::stack_slot_size + 56));
+#ifdef _LP64
+      __ movdbl(xmm8, Address(rsp, xmm_regs_as_doubles_off * VMRegImpl::stack_slot_size + 64));
+      __ movdbl(xmm9, Address(rsp, xmm_regs_as_doubles_off * VMRegImpl::stack_slot_size + 72));
+      __ movdbl(xmm10, Address(rsp, xmm_regs_as_doubles_off * VMRegImpl::stack_slot_size + 80));
+      __ movdbl(xmm11, Address(rsp, xmm_regs_as_doubles_off * VMRegImpl::stack_slot_size + 88));
+      __ movdbl(xmm12, Address(rsp, xmm_regs_as_doubles_off * VMRegImpl::stack_slot_size + 96));
+      __ movdbl(xmm13, Address(rsp, xmm_regs_as_doubles_off * VMRegImpl::stack_slot_size + 104));
+      __ movdbl(xmm14, Address(rsp, xmm_regs_as_doubles_off * VMRegImpl::stack_slot_size + 112));
+      __ movdbl(xmm15, Address(rsp, xmm_regs_as_doubles_off * VMRegImpl::stack_slot_size + 120));
+#endif // _LP64
+    } else if (UseSSE == 1) {
+      // restore XMM registers
+      __ movflt(xmm0, Address(rsp, xmm_regs_as_doubles_off * VMRegImpl::stack_slot_size +  0));
+      __ movflt(xmm1, Address(rsp, xmm_regs_as_doubles_off * VMRegImpl::stack_slot_size +  8));
+      __ movflt(xmm2, Address(rsp, xmm_regs_as_doubles_off * VMRegImpl::stack_slot_size + 16));
+      __ movflt(xmm3, Address(rsp, xmm_regs_as_doubles_off * VMRegImpl::stack_slot_size + 24));
+      __ movflt(xmm4, Address(rsp, xmm_regs_as_doubles_off * VMRegImpl::stack_slot_size + 32));
+      __ movflt(xmm5, Address(rsp, xmm_regs_as_doubles_off * VMRegImpl::stack_slot_size + 40));
+      __ movflt(xmm6, Address(rsp, xmm_regs_as_doubles_off * VMRegImpl::stack_slot_size + 48));
+      __ movflt(xmm7, Address(rsp, xmm_regs_as_doubles_off * VMRegImpl::stack_slot_size + 56));
+    }
+
+    if (UseSSE < 2) {
+      __ frstor(Address(rsp, fpu_state_off * VMRegImpl::stack_slot_size));
+    } else {
+      // check that FPU stack is really empty
+      __ verify_FPU(0, "restore_live_registers");
+    }
+
+  } else {
+    // check that FPU stack is really empty
+    __ verify_FPU(0, "restore_live_registers");
+  }
+
+#ifdef ASSERT
+  {
+    Label ok;
+    __ cmpptr(Address(rsp, marker * VMRegImpl::stack_slot_size), (int32_t)0xfeedbeef);
+    __ jcc(Assembler::equal, ok);
+    __ stop("bad offsets in frame");
+    __ bind(ok);
+  }
+#endif // ASSERT
+
+  __ addptr(rsp, extra_space_offset * VMRegImpl::stack_slot_size);
+}
+
+
+static void restore_live_registers(GraalStubAssembler* sasm, bool restore_fpu_registers/* = true*/) {
+  __ block_comment("restore_live_registers");
+
+  restore_fpu(sasm, restore_fpu_registers);
+  __ popa();
+}
+
+
+static void restore_live_registers_except_rax(GraalStubAssembler* sasm, bool restore_fpu_registers = true) {
+  __ block_comment("restore_live_registers_except_rax");
+
+  restore_fpu(sasm, restore_fpu_registers);
+
+#ifdef _LP64
+  __ movptr(r15, Address(rsp, 0));
+  __ movptr(r14, Address(rsp, wordSize));
+  __ movptr(r13, Address(rsp, 2 * wordSize));
+  __ movptr(r12, Address(rsp, 3 * wordSize));
+  __ movptr(r11, Address(rsp, 4 * wordSize));
+  __ movptr(r10, Address(rsp, 5 * wordSize));
+  __ movptr(r9,  Address(rsp, 6 * wordSize));
+  __ movptr(r8,  Address(rsp, 7 * wordSize));
+  __ movptr(rdi, Address(rsp, 8 * wordSize));
+  __ movptr(rsi, Address(rsp, 9 * wordSize));
+  __ movptr(rbp, Address(rsp, 10 * wordSize));
+  // skip rsp
+  __ movptr(rbx, Address(rsp, 12 * wordSize));
+  __ movptr(rdx, Address(rsp, 13 * wordSize));
+  __ movptr(rcx, Address(rsp, 14 * wordSize));
+
+  __ addptr(rsp, 16 * wordSize);
+#else
+
+  __ pop(rdi);
+  __ pop(rsi);
+  __ pop(rbp);
+  __ pop(rbx); // skip this value
+  __ pop(rbx);
+  __ pop(rdx);
+  __ pop(rcx);
+  __ addptr(rsp, BytesPerWord);
+#endif // _LP64
+}
+
+OopMapSet* GraalRuntime::generate_handle_exception(StubID id, GraalStubAssembler *sasm) {
+  __ block_comment("generate_handle_exception");
+
+  // incoming parameters
+  const Register exception_oop = rax;
+  const Register exception_pc  = rdx;
+  // other registers used in this stub
+  const Register thread = NOT_LP64(rdi) LP64_ONLY(r15_thread);
+
+  // Save registers, if required.
+  OopMapSet* oop_maps = new OopMapSet();
+  OopMap* oop_map = NULL;
+  switch (id) {
+    case graal_handle_exception_nofpu_id:
+      // At this point all registers MAY be live.
+      oop_map = save_live_registers(sasm, 1 /*thread*/, id == graal_handle_exception_nofpu_id);
+      break;
+    default:  ShouldNotReachHere();
+  }
+
+#ifdef TIERED
+  // C2 can leave the fpu stack dirty
+  if (UseSSE < 2) {
+    __ empty_FPU_stack();
+  }
+#endif // TIERED
+
+  // verify that only rax, and rdx is valid at this time
+#ifdef ASSERT
+  __ movptr(rbx, 0xDEAD);
+  __ movptr(rcx, 0xDEAD);
+  __ movptr(rsi, 0xDEAD);
+  __ movptr(rdi, 0xDEAD);
+#endif
+
+  // verify that rax, contains a valid exception
+  __ verify_not_null_oop(exception_oop);
+
+  // load address of JavaThread object for thread-local data
+  NOT_LP64(__ get_thread(thread);)
+
+#ifdef ASSERT
+  // check that fields in JavaThread for exception oop and issuing pc are
+  // empty before writing to them
+  Label oop_empty;
+  __ cmpptr(Address(thread, JavaThread::exception_oop_offset()), (int32_t) NULL_WORD);
+  __ jcc(Assembler::equal, oop_empty);
+  __ stop("exception oop already set");
+  __ bind(oop_empty);
+
+  Label pc_empty;
+  __ cmpptr(Address(thread, JavaThread::exception_pc_offset()), 0);
+  __ jcc(Assembler::equal, pc_empty);
+  __ stop("exception pc already set");
+  __ bind(pc_empty);
+#endif
+
+  // save exception oop and issuing pc into JavaThread
+  // (exception handler will load it from here)
+  __ movptr(Address(thread, JavaThread::exception_oop_offset()), exception_oop);
+  __ movptr(Address(thread, JavaThread::exception_pc_offset()),  exception_pc);
+
+  // patch throwing pc into return address (has bci & oop map)
+  __ movptr(Address(rbp, 1*BytesPerWord), exception_pc);
+
+  // compute the exception handler.
+  // the exception oop and the throwing pc are read from the fields in JavaThread
+  int call_offset = __ call_RT(noreg, noreg, CAST_FROM_FN_PTR(address, exception_handler_for_pc));
+  oop_maps->add_gc_map(call_offset, oop_map);
+
+  // rax: handler address
+  //      will be the deopt blob if nmethod was deoptimized while we looked up
+  //      handler regardless of whether handler existed in the nmethod.
+
+  // only rax, is valid at this time, all other registers have been destroyed by the runtime call
+#ifdef ASSERT
+  __ movptr(rbx, 0xDEAD);
+  __ movptr(rcx, 0xDEAD);
+  __ movptr(rdx, 0xDEAD);
+  __ movptr(rsi, 0xDEAD);
+  __ movptr(rdi, 0xDEAD);
+#endif
+
+  // patch the return address, this stub will directly return to the exception handler
+  __ movptr(Address(rbp, 1*BytesPerWord), rax);
+
+  switch (id) {
+    case graal_handle_exception_nofpu_id:
+      // Restore the registers that were saved at the beginning.
+      restore_live_registers(sasm, id == graal_handle_exception_nofpu_id);
+      break;
+    default:  ShouldNotReachHere();
+  }
+
+  return oop_maps;
+}
+
+void GraalRuntime::generate_unwind_exception(GraalStubAssembler *sasm) {
+  // incoming parameters
+  const Register exception_oop = rax;
+  // callee-saved copy of exception_oop during runtime call
+  const Register exception_oop_callee_saved = NOT_LP64(rsi) LP64_ONLY(r14);
+  // other registers used in this stub
+  const Register exception_pc = rdx;
+  const Register handler_addr = rbx;
+  const Register thread = NOT_LP64(rdi) LP64_ONLY(r15_thread);
+
+  // verify that only rax is valid at this time
+#ifdef ASSERT
+  __ movptr(rbx, 0xDEAD);
+  __ movptr(rcx, 0xDEAD);
+  __ movptr(rdx, 0xDEAD);
+  __ movptr(rsi, 0xDEAD);
+  __ movptr(rdi, 0xDEAD);
+#endif
+
+#ifdef ASSERT
+  // check that fields in JavaThread for exception oop and issuing pc are empty
+  NOT_LP64(__ get_thread(thread);)
+  Label oop_empty;
+  __ cmpptr(Address(thread, JavaThread::exception_oop_offset()), 0);
+  __ jcc(Assembler::equal, oop_empty);
+  __ stop("exception oop must be empty");
+  __ bind(oop_empty);
+
+  Label pc_empty;
+  __ cmpptr(Address(thread, JavaThread::exception_pc_offset()), 0);
+  __ jcc(Assembler::equal, pc_empty);
+  __ stop("exception pc must be empty");
+  __ bind(pc_empty);
+#endif
+
+  // clear the FPU stack in case any FPU results are left behind
+  __ empty_FPU_stack();
+
+  // save exception_oop in callee-saved register to preserve it during runtime calls
+  __ verify_not_null_oop(exception_oop);
+  __ movptr(exception_oop_callee_saved, exception_oop);
+
+  NOT_LP64(__ get_thread(thread);)
+  // Get return address (is on top of stack after leave).
+  __ movptr(exception_pc, Address(rsp, 0));
+
+  // search the exception handler address of the caller (using the return address)
+  __ call_VM_leaf(CAST_FROM_FN_PTR(address, SharedRuntime::exception_handler_for_return_address), thread, exception_pc);
+  // rax: exception handler address of the caller
+
+  // Only RAX and RSI are valid at this time, all other registers have been destroyed by the call.
+#ifdef ASSERT
+  __ movptr(rbx, 0xDEAD);
+  __ movptr(rcx, 0xDEAD);
+  __ movptr(rdx, 0xDEAD);
+  __ movptr(rdi, 0xDEAD);
+#endif
+
+  // move result of call into correct register
+  __ movptr(handler_addr, rax);
+
+  // Restore exception oop to RAX (required convention of exception handler).
+  __ movptr(exception_oop, exception_oop_callee_saved);
+
+  // verify that there is really a valid exception in rax
+  __ verify_not_null_oop(exception_oop);
+
+  // get throwing pc (= return address).
+  // rdx has been destroyed by the call, so it must be set again
+  // the pop is also necessary to simulate the effect of a ret(0)
+  __ pop(exception_pc);
+
+  // Restore SP from BP if the exception PC is a method handle call site.
+  NOT_LP64(__ get_thread(thread);)
+  __ cmpl(Address(thread, JavaThread::is_method_handle_return_offset()), 0);
+  __ cmovptr(Assembler::notEqual, rsp, rbp_mh_SP_save);
+
+  // continue at exception handler (return address removed)
+  // note: do *not* remove arguments when unwinding the
+  //       activation since the caller assumes having
+  //       all arguments on the stack when entering the
+  //       runtime to determine the exception handler
+  //       (GC happens at call site with arguments!)
+  // rax: exception oop
+  // rdx: throwing pc
+  // rbx: exception handler
+  __ jmp(handler_addr);
+}
+
+OopMapSet* GraalRuntime::generate_code_for(StubID id, GraalStubAssembler* sasm) {
+
+  // for better readability
+  const bool must_gc_arguments = true;
+  const bool dont_gc_arguments = false;
+
+  // default value; overwritten for some optimized stubs that are called from methods that do not use the fpu
+  bool save_fpu_registers = true;
+
+  // stub code & info for the different stubs
+  OopMapSet* oop_maps = NULL;
+  switch (id) {
+
+    case graal_new_instance_id:
+      {
+        Register klass = rdx; // Incoming
+        Register obj   = rax; // Result
+        __ set_info("new_instance", dont_gc_arguments);
+        __ enter();
+        OopMap* map = save_live_registers(sasm, 2);
+        int call_offset = __ call_RT(obj, noreg, CAST_FROM_FN_PTR(address, new_instance), klass);
+        oop_maps = new OopMapSet();
+        oop_maps->add_gc_map(call_offset, map);
+        restore_live_registers_except_rax(sasm);
+        __ verify_oop(obj);
+        __ leave();
+        __ ret(0);
+
+        // rax,: new instance
+      }
+
+      break;
+
+    case graal_new_type_array_id:
+    case graal_new_object_array_id:
+      {
+        Register length   = rbx; // Incoming
+        Register klass    = rdx; // Incoming
+        Register obj      = rax; // Result
+
+        if (id == graal_new_type_array_id) {
+          __ set_info("new_type_array", dont_gc_arguments);
+        } else {
+          __ set_info("new_object_array", dont_gc_arguments);
+        }
+
+#ifdef ASSERT
+        // assert object type is really an array of the proper kind
+        {
+          Label ok;
+          Register t0 = obj;
+          __ movl(t0, Address(klass, Klass::layout_helper_offset()));
+          __ sarl(t0, Klass::_lh_array_tag_shift);
+          int tag = ((id == graal_new_type_array_id)
+                     ? Klass::_lh_array_tag_type_value
+                     : Klass::_lh_array_tag_obj_value);
+          __ cmpl(t0, tag);
+          __ jcc(Assembler::equal, ok);
+          __ stop("assert(is an array klass)");
+          __ should_not_reach_here();
+          __ bind(ok);
+        }
+#endif // ASSERT
+        __ enter();
+        OopMap* map = save_live_registers(sasm, 3);
+        int call_offset;
+        if (id == graal_new_type_array_id) {
+          call_offset = __ call_RT(obj, noreg, CAST_FROM_FN_PTR(address, new_type_array), klass, length);
+        } else {
+          call_offset = __ call_RT(obj, noreg, CAST_FROM_FN_PTR(address, new_object_array), klass, length);
+        }
+
+        oop_maps = new OopMapSet();
+        oop_maps->add_gc_map(call_offset, map);
+        restore_live_registers_except_rax(sasm);
+
+        __ verify_oop(obj);
+        __ leave();
+        __ ret(0);
+
+        // rax,: new array
+      }
+      break;
+
+    case graal_new_multi_array_id:
+      { GraalStubFrame f(sasm, "new_multi_array", dont_gc_arguments);
+        // rax,: klass
+        // rbx,: rank
+        // rcx: address of 1st dimension
+        OopMap* map = save_live_registers(sasm, 4);
+        int call_offset = __ call_RT(rax, noreg, CAST_FROM_FN_PTR(address, new_multi_array), rax, rbx, rcx);
+
+        oop_maps = new OopMapSet();
+        oop_maps->add_gc_map(call_offset, map);
+        restore_live_registers_except_rax(sasm);
+
+        // rax,: new multi array
+        __ verify_oop(rax);
+      }
+      break;
+
+    case graal_register_finalizer_id:
+      {
+        __ set_info("register_finalizer", dont_gc_arguments);
+
+        // This is called via call_runtime so the arguments
+        // will be place in C abi locations
+
+#ifdef _LP64
+        __ verify_oop(j_rarg0);
+        __ mov(rax, j_rarg0);
+#else
+        // The object is passed on the stack and we haven't pushed a
+        // frame yet so it's one work away from top of stack.
+        __ movptr(rax, Address(rsp, 1 * BytesPerWord));
+        __ verify_oop(rax);
+#endif // _LP64
+
+        // load the klass and check the has finalizer flag
+        Label register_finalizer;
+        Register t = rsi;
+        __ load_klass(t, rax);
+        __ movl(t, Address(t, Klass::access_flags_offset()));
+        __ testl(t, JVM_ACC_HAS_FINALIZER);
+        __ jcc(Assembler::notZero, register_finalizer);
+        __ ret(0);
+
+        __ bind(register_finalizer);
+        __ enter();
+        OopMap* oop_map = save_live_registers(sasm, 2 /*num_rt_args */);
+        int call_offset = __ call_RT(noreg, noreg, CAST_FROM_FN_PTR(address, SharedRuntime::register_finalizer), rax);
+        oop_maps = new OopMapSet();
+        oop_maps->add_gc_map(call_offset, oop_map);
+
+        // Now restore all the live registers
+        restore_live_registers(sasm);
+
+        __ leave();
+        __ ret(0);
+      }
+      break;
+
+    case graal_handle_exception_nofpu_id:
+      { GraalStubFrame f(sasm, "handle_exception", dont_gc_arguments);
+        oop_maps = generate_handle_exception(id, sasm);
+      }
+      break;
+
+    case graal_slow_subtype_check_id:
+      {
+        // Typical calling sequence:
+        // __ push(klass_RInfo);  // object klass or other subclass
+        // __ push(sup_k_RInfo);  // array element klass or other superclass
+        // __ call(slow_subtype_check);
+        // Note that the subclass is pushed first, and is therefore deepest.
+        // Previous versions of this code reversed the names 'sub' and 'super'.
+        // This was operationally harmless but made the code unreadable.
+        enum layout {
+          rax_off, SLOT2(raxH_off)
+          rcx_off, SLOT2(rcxH_off)
+          rsi_off, SLOT2(rsiH_off)
+          rdi_off, SLOT2(rdiH_off)
+          // saved_rbp_off, SLOT2(saved_rbpH_off)
+          return_off, SLOT2(returnH_off)
+          sup_k_off, SLOT2(sup_kH_off)
+          klass_off, SLOT2(superH_off)
+          framesize,
+          result_off = klass_off  // deepest argument is also the return value
+        };
+
+        __ set_info("slow_subtype_check", dont_gc_arguments);
+        __ push(rdi);
+        __ push(rsi);
+        __ push(rcx);
+        __ push(rax);
+
+        // This is called by pushing args and not with C abi
+        __ movptr(rsi, Address(rsp, (klass_off) * VMRegImpl::stack_slot_size)); // subclass
+        __ movptr(rax, Address(rsp, (sup_k_off) * VMRegImpl::stack_slot_size)); // superclass
+
+        Label miss;
+        Label success;
+        __ check_klass_subtype_fast_path(rsi, rax, rcx, &success, &miss, NULL);
+
+        __ check_klass_subtype_slow_path(rsi, rax, rcx, rdi, NULL, &miss);
+
+        // fallthrough on success:
+        __ bind(success);
+        __ movptr(Address(rsp, (result_off) * VMRegImpl::stack_slot_size), 1); // result
+        __ pop(rax);
+        __ pop(rcx);
+        __ pop(rsi);
+        __ pop(rdi);
+        __ ret(0);
+
+        __ bind(miss);
+        __ movptr(Address(rsp, (result_off) * VMRegImpl::stack_slot_size), NULL_WORD); // result
+        __ pop(rax);
+        __ pop(rcx);
+        __ pop(rsi);
+        __ pop(rdi);
+        __ ret(0);
+      }
+      break;
+
+    case graal_unwind_exception_call_id: {
+      // remove the frame from the stack
+      __ movptr(rsp, rbp);
+      __ pop(rbp);
+      // exception_oop is passed using ordinary java calling conventions
+      __ movptr(rax, j_rarg0);
+
+      Label nonNullExceptionOop;
+      __ testptr(rax, rax);
+      __ jcc(Assembler::notZero, nonNullExceptionOop);
+      {
+        __ enter();
+        oop_maps = new OopMapSet();
+        OopMap* oop_map = save_live_registers(sasm, 0);
+        int call_offset = __ call_RT(rax, noreg, (address)graal_create_null_exception, 0);
+        oop_maps->add_gc_map(call_offset, oop_map);
+        __ leave();
+      }
+      __ bind(nonNullExceptionOop);
+
+      __ set_info("unwind_exception", dont_gc_arguments);
+      // note: no stubframe since we are about to leave the current
+      //       activation and we are calling a leaf VM function only.
+      generate_unwind_exception(sasm);
+      __ should_not_reach_here();
+      break;
+    }
+
+    case graal_OSR_migration_end_id: {
+    __ enter();
+    save_live_registers(sasm, 0);
+    __ movptr(c_rarg0, j_rarg0);
+    __ call(RuntimeAddress(CAST_FROM_FN_PTR(address, SharedRuntime::OSR_migration_end)));
+    restore_live_registers(sasm);
+    __ leave();
+    __ ret(0);
+      break;
+    }
+
+    case graal_set_deopt_info_id: {
+    __ movptr(Address(r15_thread, JavaThread::graal_deopt_info_offset()), rscratch1);
+    __ ret(0);
+      break;
+    }
+
+    case graal_create_null_pointer_exception_id: {
+		__ enter();
+		oop_maps = new OopMapSet();
+		OopMap* oop_map = save_live_registers(sasm, 0);
+		int call_offset = __ call_RT(rax, noreg, (address)graal_create_null_exception, 0);
+		oop_maps->add_gc_map(call_offset, oop_map);
+		__ leave();
+		__ ret(0);
+      break;
+    }
+
+    case graal_create_out_of_bounds_exception_id: {
+		__ enter();
+		oop_maps = new OopMapSet();
+		OopMap* oop_map = save_live_registers(sasm, 0);
+		int call_offset = __ call_RT(rax, noreg, (address)graal_create_out_of_bounds_exception, j_rarg0);
+		oop_maps->add_gc_map(call_offset, oop_map);
+		__ leave();
+		__ ret(0);
+      break;
+    }
+
+    case graal_vm_error_id: {
+      __ enter();
+      oop_maps = new OopMapSet();
+      OopMap* oop_map = save_live_registers(sasm, 0);
+      int call_offset = __ call_RT(noreg, noreg, (address)graal_vm_error, j_rarg0, j_rarg1, j_rarg2);
+      oop_maps->add_gc_map(call_offset, oop_map);
+      restore_live_registers(sasm);
+      __ leave();
+      __ ret(0);
+      break;
+    }
+
+    case graal_log_printf_id: {
+      __ enter();
+      oop_maps = new OopMapSet();
+      OopMap* oop_map = save_live_registers(sasm, 0);
+      int call_offset = __ call_RT(noreg, noreg, (address)graal_log_printf, j_rarg0, j_rarg1, j_rarg2);
+      oop_maps->add_gc_map(call_offset, oop_map);
+      restore_live_registers(sasm);
+      __ leave();
+      __ ret(0);
+      break;
+    }
+
+    case graal_log_primitive_id: {
+      __ enter();
+      oop_maps = new OopMapSet();
+      OopMap* oop_map = save_live_registers(sasm, 0);
+      int call_offset = __ call_RT(noreg, noreg, (address)graal_log_primitive, j_rarg0, j_rarg1, j_rarg2);
+      oop_maps->add_gc_map(call_offset, oop_map);
+      restore_live_registers(sasm);
+      __ leave();
+      __ ret(0);
+      break;
+    }
+
+    case graal_log_object_id: {
+      __ enter();
+      oop_maps = new OopMapSet();
+      OopMap* oop_map = save_live_registers(sasm, 0);
+      int call_offset = __ call_RT(noreg, noreg, (address)graal_log_object, j_rarg0, j_rarg1);
+      oop_maps->add_gc_map(call_offset, oop_map);
+      restore_live_registers(sasm);
+      __ leave();
+      __ ret(0);
+      break;
+    }
+
+    case graal_verify_oop_id: {
+      // We use enter & leave so that a better stack trace is produced in the hs_err file
+      __ enter();
+      __ verify_oop(r13, "Graal verify oop");
+      __ leave();
+      __ ret(0);
+      break;
+    }
+
+    case graal_arithmetic_frem_id: {
+      __ subptr(rsp, 8);
+      __ movflt(Address(rsp, 0), xmm1);
+      __ fld_s(Address(rsp, 0));
+      __ movflt(Address(rsp, 0), xmm0);
+      __ fld_s(Address(rsp, 0));
+      Label L;
+      __ bind(L);
+      __ fprem();
+      __ fwait();
+      __ fnstsw_ax();
+      __ testl(rax, 0x400);
+      __ jcc(Assembler::notZero, L);
+      __ fxch(1);
+      __ fpop();
+      __ fstp_s(Address(rsp, 0));
+      __ movflt(xmm0, Address(rsp, 0));
+      __ addptr(rsp, 8);
+      __ ret(0);
+      break;
+    }
+    case graal_arithmetic_drem_id: {
+      __ subptr(rsp, 8);
+      __ movdbl(Address(rsp, 0), xmm1);
+      __ fld_d(Address(rsp, 0));
+      __ movdbl(Address(rsp, 0), xmm0);
+      __ fld_d(Address(rsp, 0));
+      Label L;
+      __ bind(L);
+      __ fprem();
+      __ fwait();
+      __ fnstsw_ax();
+      __ testl(rax, 0x400);
+      __ jcc(Assembler::notZero, L);
+      __ fxch(1);
+      __ fpop();
+      __ fstp_d(Address(rsp, 0));
+      __ movdbl(xmm0, Address(rsp, 0));
+      __ addptr(rsp, 8);
+      __ ret(0);
+      break;
+    }
+    case graal_monitorenter_id: {
+      Register obj = j_rarg0;
+      Register lock = j_rarg1;
+      {
+        GraalStubFrame f(sasm, "graal_monitorenter", dont_gc_arguments);
+        OopMap* map = save_live_registers(sasm, 2, save_fpu_registers);
+
+        // Called with store_parameter and not C abi
+        int call_offset = __ call_RT(noreg, noreg, CAST_FROM_FN_PTR(address, graal_monitorenter), obj, lock);
+
+        oop_maps = new OopMapSet();
+        oop_maps->add_gc_map(call_offset, map);
+        restore_live_registers(sasm, save_fpu_registers);
+      }
+      __ ret(0);
+      break;
+    }
+    case graal_monitorexit_id: {
+      Register obj = j_rarg0;
+      Register lock = j_rarg1;
+      {
+        GraalStubFrame f(sasm, "graal_monitorexit", dont_gc_arguments);
+        OopMap* map = save_live_registers(sasm, 2, save_fpu_registers);
+
+        // note: really a leaf routine but must setup last java sp
+        //       => use call_RT for now (speed can be improved by
+        //       doing last java sp setup manually)
+        int call_offset = __ call_RT(noreg, noreg, CAST_FROM_FN_PTR(address, graal_monitorexit), obj, lock);
+
+        oop_maps = new OopMapSet();
+        oop_maps->add_gc_map(call_offset, map);
+        restore_live_registers(sasm, save_fpu_registers);
+      }
+      __ ret(0);
+      break;
+    }
+
+    default:
+      { GraalStubFrame f(sasm, "unimplemented entry", dont_gc_arguments);
+        __ movptr(rax, (int)id);
+        __ call_RT(noreg, noreg, CAST_FROM_FN_PTR(address, unimplemented_entry), rax);
+        __ should_not_reach_here();
+      }
+      break;
+  }
+  return oop_maps;
+}
+
+#undef __
--- /dev/null	Thu Jan 01 00:00:00 1970 +0000
+++ b/src/cpu/x86/vm/graalStubAssembler_x86.cpp	Tue Dec 04 10:10:02 2012 +0100
@@ -0,0 +1,54 @@
+/*
+ * Copyright (c) 1999, 2012, Oracle and/or its affiliates. All rights reserved.
+ * DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER.
+ *
+ * This code is free software; you can redistribute it and/or modify it
+ * under the terms of the GNU General Public License version 2 only, as
+ * published by the Free Software Foundation.
+ *
+ * This code is distributed in the hope that it will be useful, but WITHOUT
+ * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
+ * FITNESS FOR A PARTICULAR PURPOSE.  See the GNU General Public License
+ * version 2 for more details (a copy is included in the LICENSE file that
+ * accompanied this code).
+ *
+ * You should have received a copy of the GNU General Public License version
+ * 2 along with this work; if not, write to the Free Software Foundation,
+ * Inc., 51 Franklin St, Fifth Floor, Boston, MA 02110-1301 USA.
+ *
+ * Please contact Oracle, 500 Oracle Parkway, Redwood Shores, CA 94065 USA
+ * or visit www.oracle.com if you need additional information or have any
+ * questions.
+ *
+ */
+
+#include "precompiled.hpp"
+#include "graal/graalRuntime.hpp"
+#include "classfile/systemDictionary.hpp"
+#include "gc_interface/collectedHeap.hpp"
+#include "interpreter/interpreter.hpp"
+#include "oops/arrayOop.hpp"
+#include "oops/markOop.hpp"
+#include "runtime/basicLock.hpp"
+#include "runtime/biasedLocking.hpp"
+#include "runtime/os.hpp"
+#include "runtime/stubRoutines.hpp"
+
+#ifndef PRODUCT
+
+void GraalStubAssembler::verify_stack_oop(int stack_offset) {
+  if (!VerifyOops) return;
+  verify_oop_addr(Address(rsp, stack_offset));
+}
+
+void GraalStubAssembler::verify_not_null_oop(Register r) {
+  if (!VerifyOops) return;
+  Label not_null;
+  testptr(r, r);
+  jcc(Assembler::notZero, not_null);
+  stop("non-null oop required");
+  bind(not_null);
+  verify_oop(r);
+}
+
+#endif // ifndef PRODUCT
--- a/src/cpu/x86/vm/sharedRuntime_x86_32.cpp	Tue Dec 04 10:09:25 2012 +0100
+++ b/src/cpu/x86/vm/sharedRuntime_x86_32.cpp	Tue Dec 04 10:10:02 2012 +0100
@@ -1763,7 +1763,8 @@
 
   int vep_offset = ((intptr_t)__ pc()) - start;
 
-#ifdef COMPILER1 || GRAAL
+#if defined(COMPILER1) || defined(GRAAL)
+
   if (InlineObjectHash && method->intrinsic_id() == vmIntrinsics::_hashCode) {
     // Object.hashCode can pull the hashCode from the header word
     // instead of doing a full VM transition once it's been computed.
--- a/src/share/vm/c1/c1_Runtime1.cpp	Tue Dec 04 10:09:25 2012 +0100
+++ b/src/share/vm/c1/c1_Runtime1.cpp	Tue Dec 04 10:10:02 2012 +0100
@@ -205,14 +205,6 @@
 #if defined(SPARC) || defined(PPC)
     case handle_exception_nofpu_id:  // Unused on sparc
 #endif
-#ifdef GRAAL
-    case graal_verify_oop_id:
-    case graal_unwind_exception_call_id:
-    case graal_OSR_migration_end_id:
-    case graal_arithmetic_frem_id:
-    case graal_arithmetic_drem_id:
-    case graal_set_deopt_info_id:
-#endif
       break;
 
     // All other stubs should have oopmaps
@@ -539,9 +531,8 @@
     if (TraceExceptions) {
       ttyLocker ttyl;
       ResourceMark rm;
-      int offset = pc - nm->code_begin();
-      tty->print_cr("Exception <%s> (0x%x) thrown in compiled method <%s> at PC " PTR_FORMAT " [" PTR_FORMAT "+%d] for thread 0x%x",
-                    exception->print_value_string(), (address)exception(), nm->method()->print_value_string(), pc, nm->code_begin(), offset, thread);
+      tty->print_cr("Exception <%s> (0x%x) thrown in compiled method <%s> at PC " PTR_FORMAT " for thread 0x%x",
+                    exception->print_value_string(), (address)exception(), nm->method()->print_value_string(), pc, thread);
     }
     // for AbortVMOnException flag
     NOT_PRODUCT(Exceptions::debug_check_abort(exception));
@@ -559,7 +550,7 @@
     thread->set_exception_pc(pc);
 
     // the exception cache is used only by non-implicit exceptions
-    if (continuation != NULL && !SharedRuntime::deopt_blob()->contains(continuation)) {
+    if (continuation != NULL) {
       nm->add_handler_for_exception_and_pc(exception, pc, continuation);
     }
   }
@@ -652,158 +643,6 @@
   SharedRuntime::throw_and_post_jvmti_exception(thread, vmSymbols::java_lang_IncompatibleClassChangeError());
 JRT_END
 
-#ifdef GRAAL
-
-JRT_ENTRY(void, Runtime1::graal_create_null_exception(JavaThread* thread))
-  thread->set_vm_result(Exceptions::new_exception(thread, vmSymbols::java_lang_NullPointerException(), NULL)());
-JRT_END
-
-JRT_ENTRY(void, Runtime1::graal_create_out_of_bounds_exception(JavaThread* thread, jint index))
-  char message[jintAsStringSize];
-  sprintf(message, "%d", index);
-  thread->set_vm_result(Exceptions::new_exception(thread, vmSymbols::java_lang_ArrayIndexOutOfBoundsException(), message)());
-JRT_END
-
-JRT_ENTRY_NO_ASYNC(void, Runtime1::graal_monitorenter(JavaThread* thread, oopDesc* obj, BasicLock* lock))
-  NOT_PRODUCT(_monitorenter_slowcase_cnt++;)
-#ifdef ASSERT
-  if (TraceGraal >= 3) {
-    char type[1024];
-    obj->klass()->name()->as_C_string(type, 1024);
-    markOop mark = obj->mark();
-    tty->print_cr("entered locking slow case with obj=" INTPTR_FORMAT ", type=%s, mark=" INTPTR_FORMAT ", lock=" INTPTR_FORMAT, obj, type, mark, lock);
-    tty->flush();
-  }
-  if (PrintBiasedLockingStatistics) {
-    Atomic::inc(BiasedLocking::slow_path_entry_count_addr());
-  }
-#endif
-  Handle h_obj(thread, obj);
-  assert(h_obj()->is_oop(), "must be NULL or an object");
-  if (UseBiasedLocking) {
-    // Retry fast entry if bias is revoked to avoid unnecessary inflation
-    ObjectSynchronizer::fast_enter(h_obj, lock, true, CHECK);
-  } else {
-    if (UseFastLocking) {
-      // When using fast locking, the compiled code has already tried the fast case
-      ObjectSynchronizer::slow_enter(h_obj, lock, THREAD);
-    } else {
-      ObjectSynchronizer::fast_enter(h_obj, lock, false, THREAD);
-    }
-  }
-#ifdef ASSERT
-  if (TraceGraal >= 3) {
-    tty->print_cr("exiting locking");
-    tty->print_cr("");
-    tty->print_cr("done");
-  }
-#endif
-JRT_END
-
-
-JRT_LEAF(void, Runtime1::graal_monitorexit(JavaThread* thread, oopDesc* obj, BasicLock* lock))
-  NOT_PRODUCT(_monitorexit_slowcase_cnt++;)
-  assert(thread == JavaThread::current(), "threads must correspond");
-  assert(thread->last_Java_sp(), "last_Java_sp must be set");
-  // monitorexit is non-blocking (leaf routine) => no exceptions can be thrown
-  EXCEPTION_MARK;
-
-#ifdef DEBUG
-  if (!obj->is_oop()) {
-    ResetNoHandleMark rhm;
-    nmethod* method = thread->last_frame().cb()->as_nmethod_or_null();
-    if (method != NULL) {
-      tty->print_cr("ERROR in monitorexit in method %s wrong obj " INTPTR_FORMAT, method->name(), obj);
-    }
-    thread->print_stack_on(tty);
-    assert(false, "invalid lock object pointer dected");
-  }
-#endif
-
-  if (UseFastLocking) {
-    // When using fast locking, the compiled code has already tried the fast case
-    ObjectSynchronizer::slow_exit(obj, lock, THREAD);
-  } else {
-    ObjectSynchronizer::fast_exit(obj, lock, THREAD);
-  }
-#ifdef ASSERT
-  if (TraceGraal >= 3) {
-    char type[1024];
-    obj->klass()->name()->as_C_string(type, 1024);
-    tty->print_cr("exited locking slow case with obj=" INTPTR_FORMAT ", type=%s, mark=" INTPTR_FORMAT ", lock=" INTPTR_FORMAT, obj, type, obj->mark(), lock);
-    tty->flush();
-  }
-#endif
-JRT_END
-
-JRT_ENTRY(void, Runtime1::graal_log_object(JavaThread* thread, oop obj, jint flags))
-  bool string =  mask_bits_are_true(flags, LOG_OBJECT_STRING);
-  bool address = mask_bits_are_true(flags, LOG_OBJECT_ADDRESS);
-  bool newline = mask_bits_are_true(flags, LOG_OBJECT_NEWLINE);
-  if (!string) {
-    if (!address && obj->is_oop_or_null(true)) {
-      char buf[O_BUFLEN];
-      tty->print("%s@%p", obj->klass()->name()->as_C_string(buf, O_BUFLEN), obj);
-    } else {
-      tty->print("%p", obj);
-    }
-  } else {
-    ResourceMark rm;
-    assert(obj != NULL && java_lang_String::is_instance(obj), "must be");
-    char *buf = java_lang_String::as_utf8_string(obj);
-    tty->print(buf);
-  }
-  if (newline) {
-    tty->cr();
-  }
-JRT_END
-
-JRT_ENTRY(void, Runtime1::graal_vm_error(JavaThread* thread, oop where, oop format, jlong value))
-  ResourceMark rm;
-  assert(where == NULL || java_lang_String::is_instance(where), "must be");
-  const char *error_msg = where == NULL ? "<internal Graal error>" : java_lang_String::as_utf8_string(where);
-  char *detail_msg = NULL;
-  if (format != NULL) {
-    const char* buf = java_lang_String::as_utf8_string(format);
-    size_t detail_msg_length = strlen(buf) * 2;
-    detail_msg = (char *) NEW_RESOURCE_ARRAY(u_char, detail_msg_length);
-    jio_snprintf(detail_msg, detail_msg_length, buf, value);
-  }
-  report_vm_error(__FILE__, __LINE__, error_msg, detail_msg);
-JRT_END
-
-JRT_ENTRY(void, Runtime1::graal_log_printf(JavaThread* thread, oop format, jlong val))
-  ResourceMark rm;
-  assert(format != NULL && java_lang_String::is_instance(format), "must be");
-  char *buf = java_lang_String::as_utf8_string(format);
-  tty->print(buf, val);
-JRT_END
-
-JRT_ENTRY(void, Runtime1::graal_log_primitive(JavaThread* thread, jchar typeChar, jlong value, jboolean newline))
-  union {
-      jlong l;
-      jdouble d;
-      jfloat f;
-  } uu;
-  uu.l = value;
-  switch (typeChar) {
-    case 'z': tty->print(value == 0 ? "false" : "true"); break;
-    case 'b': tty->print("%d", (jbyte) value); break;
-    case 'c': tty->print("%c", (jchar) value); break;
-    case 's': tty->print("%d", (jshort) value); break;
-    case 'i': tty->print("%d", (jint) value); break;
-    case 'f': tty->print("%f", uu.f); break;
-    case 'j': tty->print(INT64_FORMAT, value); break;
-    case 'd': tty->print("%lf", uu.d); break;
-    default: assert(false, "unknown typeChar"); break;
-  }
-  if (newline) {
-    tty->cr();
-  }
-JRT_END
-
-#endif /* GRAAL */
-
 
 JRT_ENTRY_NO_ASYNC(void, Runtime1::monitorenter(JavaThread* thread, oopDesc* obj, BasicObjectLock* lock))
   NOT_PRODUCT(_monitorenter_slowcase_cnt++;)
@@ -1048,9 +887,7 @@
           mirror = Handle(THREAD, m);
         }
         break;
-      default:
-        tty->print_cr("Unhandled bytecode: %d stub_id=%d caller=%s bci=%d pc=%d", code, stub_id, caller_method->name()->as_C_string(), bci, caller_frame.pc());
-        Unimplemented();
+      default: Unimplemented();
     }
     // convert to handle
     load_klass = KlassHandle(THREAD, k);
--- a/src/share/vm/c1/c1_Runtime1.hpp	Tue Dec 04 10:09:25 2012 +0100
+++ b/src/share/vm/c1/c1_Runtime1.hpp	Tue Dec 04 10:10:02 2012 +0100
@@ -36,57 +36,7 @@
 // The Runtime1 holds all assembly stubs and VM
 // runtime routines needed by code code generated
 // by the Compiler1.
-#ifdef GRAAL
-#define RUNTIME1_STUBS(stub, last_entry) \
-  stub(dtrace_object_alloc)          \
-  stub(unwind_exception)             \
-  stub(forward_exception)            \
-  stub(throw_range_check_failed)       /* throws ArrayIndexOutOfBoundsException */ \
-  stub(throw_index_exception)          /* throws IndexOutOfBoundsException */ \
-  stub(throw_div0_exception)         \
-  stub(throw_null_pointer_exception) \
-  stub(register_finalizer)           \
-  stub(new_instance)                 \
-  stub(fast_new_instance)            \
-  stub(fast_new_instance_init_check) \
-  stub(new_type_array)               \
-  stub(new_object_array)             \
-  stub(new_multi_array)              \
-  stub(handle_exception_nofpu)         /* optimized version that does not preserve fpu registers */ \
-  stub(handle_exception)             \
-  stub(handle_exception_from_callee) \
-  stub(throw_array_store_exception)  \
-  stub(throw_class_cast_exception)   \
-  stub(throw_incompatible_class_change_error)   \
-  stub(slow_subtype_check)           \
-  stub(monitorenter)                 \
-  stub(monitorenter_nofpu)             /* optimized version that does not preserve fpu registers */ \
-  stub(monitorexit)                  \
-  stub(monitorexit_nofpu)              /* optimized version that does not preserve fpu registers */ \
-  stub(deoptimize)                   \
-  stub(access_field_patching)        \
-  stub(load_klass_patching)          \
-  stub(load_mirror_patching)         \
-  stub(g1_pre_barrier_slow)          \
-  stub(g1_post_barrier_slow)         \
-  stub(fpu2long_stub)                \
-  stub(counter_overflow)             \
-  stub(graal_unwind_exception_call)  \
-  stub(graal_OSR_migration_end)      \
-  stub(graal_arithmetic_frem)        \
-  stub(graal_arithmetic_drem)        \
-  stub(graal_monitorenter)           \
-  stub(graal_monitorexit)            \
-  stub(graal_verify_oop)             \
-  stub(graal_vm_error)               \
-  stub(graal_set_deopt_info)         \
-  stub(graal_create_null_pointer_exception) \
-  stub(graal_create_out_of_bounds_exception) \
-  stub(graal_log_object)             \
-  stub(graal_log_printf)             \
-  stub(graal_log_primitive)          \
-  last_entry(number_of_ids)
-#else
+
 #define RUNTIME1_STUBS(stub, last_entry) \
   stub(dtrace_object_alloc)          \
   stub(unwind_exception)             \
@@ -122,7 +72,6 @@
   stub(fpu2long_stub)                \
   stub(counter_overflow)             \
   last_entry(number_of_ids)
-#endif
 
 #define DECLARE_STUB_ID(x)       x ## _id ,
 #define DECLARE_LAST_STUB_ID(x)  x
@@ -201,25 +150,9 @@
   static void throw_class_cast_exception(JavaThread* thread, oopDesc* object);
   static void throw_incompatible_class_change_error(JavaThread* thread);
   static void throw_array_store_exception(JavaThread* thread, oopDesc* object);
+
   static void monitorenter(JavaThread* thread, oopDesc* obj, BasicObjectLock* lock);
   static void monitorexit (JavaThread* thread, BasicObjectLock* lock);
-#ifdef GRAAL
-  static void graal_create_null_exception(JavaThread* thread);
-  static void graal_create_out_of_bounds_exception(JavaThread* thread, jint index);
-  static void graal_monitorenter(JavaThread* thread, oopDesc* obj, BasicLock* lock);
-  static void graal_monitorexit (JavaThread* thread, oopDesc* obj, BasicLock* lock);
-  static void graal_vm_error(JavaThread* thread, oop where, oop format, jlong value);
-  static void graal_log_printf(JavaThread* thread, oop format, jlong value);
-  static void graal_log_primitive(JavaThread* thread, jchar typeChar, jlong value, jboolean newline);
-
-  // Note: Must be kept in sync with constants in com.oracle.graal.snippets.Log
-  enum {
-    LOG_OBJECT_NEWLINE = 0x01,
-    LOG_OBJECT_STRING  = 0x02,
-    LOG_OBJECT_ADDRESS = 0x04
-  };
-  static void graal_log_object(JavaThread* thread, oop msg, jint flags);
-#endif // GRAAL
 
   static void deoptimize(JavaThread* thread);
 
--- a/src/share/vm/c1/c1_globals.hpp	Tue Dec 04 10:09:25 2012 +0100
+++ b/src/share/vm/c1/c1_globals.hpp	Tue Dec 04 10:10:02 2012 +0100
@@ -56,14 +56,6 @@
 //
 #define C1_FLAGS(develop, develop_pd, product, product_pd, notproduct)      \
                                                                             \
-  product(bool, DebugGraal, true,                                           \
-          "Enable JVMTI for the compiler thread")                           \
-  product(bool, BootstrapGraal, true,                                       \
-          "Bootstrap graal before running Java main method")                \
-  product(ccstr, GraalClassPath, NULL,                                      \
-          "Use the defined graal class path instead of searching for the classes") \
-  product(intx, TraceGraal, 0,                                              \
-          "Trace level for graal")                                          \
   product(bool, TraceSignals, false,                                        \
           "Trace signals and implicit exception handling")                  \
   /* Printing */                                                            \
--- a/src/share/vm/ci/ciEnv.cpp	Tue Dec 04 10:09:25 2012 +0100
+++ b/src/share/vm/ci/ciEnv.cpp	Tue Dec 04 10:10:02 2012 +0100
@@ -86,7 +86,7 @@
 // ciEnv::ciEnv
 ciEnv::ciEnv(CompileTask* task, int system_dictionary_modification_counter) {
   VM_ENTRY_MARK;
-  CompilerThread* compiler_thread = CompilerThread::current();
+
   // Set up ciEnv::current immediately, for the sake of ciObjectFactory, etc.
   thread->set_env(this);
   assert(ciEnv::current() == this, "sanity");
@@ -104,7 +104,7 @@
 
   _system_dictionary_modification_counter = system_dictionary_modification_counter;
   _num_inlined_bytecodes = 0;
-  assert(task == NULL || compiler_thread->task() == task, "sanity");
+  assert(task == NULL || thread->task() == task, "sanity");
   _task = task;
   _log = NULL;
 
@@ -141,11 +141,7 @@
   ASSERT_IN_VM;
 
   // Set up ciEnv::current immediately, for the sake of ciObjectFactory, etc.
-#ifdef GRAAL
-  JavaThread* current_thread = JavaThread::current();
-#else
   CompilerThread* current_thread = CompilerThread::current();
-#endif
   assert(current_thread->env() == NULL, "must be");
   current_thread->set_env(this);
   assert(ciEnv::current() == this, "sanity");
@@ -158,10 +154,8 @@
   _break_at_compile = false;
   _compiler_data = NULL;
 #ifndef PRODUCT
-#ifndef GRAAL
   assert(firstEnv, "must be first");
   firstEnv = false;
-#endif
 #endif /* !PRODUCT */
 
   _system_dictionary_modification_counter = 0;
@@ -194,16 +188,11 @@
 }
 
 ciEnv::~ciEnv() {
-#ifdef GRAAL
-  _factory->remove_symbols();
-  JavaThread::current()->set_env(NULL);
-#else
   CompilerThread* current_thread = CompilerThread::current();
   _factory->remove_symbols();
   // Need safepoint to clear the env on the thread.  RedefineClasses might
   // be reading it.
   GUARDED_VM_ENTRY(current_thread->set_env(NULL);)
-#endif
 }
 
 // ------------------------------------------------------------------
@@ -780,8 +769,8 @@
       Method* m = lookup_method(accessor->get_instanceKlass(), lookup, name_sym, sig_sym, bc);
       if (m != NULL &&
           (bc == Bytecodes::_invokestatic
-           ?  InstanceKlass::cast(m->method_holder())->is_not_initialized()
-           : !InstanceKlass::cast(m->method_holder())->is_loaded())) {
+           ?  m->method_holder()->is_not_initialized()
+           : !m->method_holder()->is_loaded())) {
         m = NULL;
       }
 #ifdef ASSERT
@@ -927,7 +916,7 @@
 
 // ------------------------------------------------------------------
 // ciEnv::register_method
-nmethod* ciEnv::register_method(ciMethod* target,
+void ciEnv::register_method(ciMethod* target,
                             int entry_bci,
                             CodeOffsets* offsets,
                             int orig_pc_offset,
@@ -995,7 +984,7 @@
       // If the code buffer is created on each compile attempt
       // as in C2, then it must be freed.
       code_buffer->free_blob();
-      return NULL;
+      return;
     }
 
     assert(offsets->value(CodeOffsets::Deopt) != -1, "must have deopt entry");
@@ -1073,7 +1062,7 @@
                         method_name,
                         entry_bci);
         }
-        InstanceKlass::cast(method->method_holder())->add_osr_nmethod(nm);
+        method->method_holder()->add_osr_nmethod(nm);
 
       }
     }
@@ -1083,7 +1072,6 @@
     nm->post_compiled_method_load_event();
   }
 
-  return nm;
 }
 
 
--- a/src/share/vm/ci/ciEnv.hpp	Tue Dec 04 10:09:25 2012 +0100
+++ b/src/share/vm/ci/ciEnv.hpp	Tue Dec 04 10:10:02 2012 +0100
@@ -45,9 +45,6 @@
 
   friend class CompileBroker;
   friend class Dependencies;  // for get_object, during logging
-#ifdef GRAAL
-  friend class CodeInstaller;
-#endif
 
   static fileStream* _replay_data_stream;
 
@@ -105,8 +102,6 @@
   ciInstance* _the_null_string;      // The Java string "null"
   ciInstance* _the_min_jint_string; // The Java string "-2147483648"
 
-public:
-
   // Look up a klass by name from a particular class loader (the accessor's).
   // If require_local, result must be defined in that class loader, or NULL.
   // If !require_local, a result from remote class loader may be reported,
@@ -137,8 +132,6 @@
                                  int method_index, Bytecodes::Code bc,
                                  ciInstanceKlass* loading_klass);
 
-private:
-
   // Implementation methods for loading and constant pool access.
   ciKlass* get_klass_by_name_impl(ciKlass* accessing_klass,
                                   constantPoolHandle cpool,
@@ -166,7 +159,6 @@
                            Symbol*         sig,
                            Bytecodes::Code bc);
 
-  public:
   // Get a ciObject from the object factory.  Ensures uniqueness
   // of ciObjects.
   ciObject* get_object(oop o) {
@@ -176,7 +168,6 @@
       return _factory->get(o);
     }
   }
-  private:
 
   ciSymbol* get_symbol(Symbol* o) {
     if (o == NULL) {
@@ -362,7 +353,7 @@
   uint compile_id();  // task()->compile_id()
 
   // Register the result of a compilation.
-  nmethod* register_method(ciMethod*             target,
+  void register_method(ciMethod*                 target,
                        int                       entry_bci,
                        CodeOffsets*              offsets,
                        int                       orig_pc_offset,
@@ -430,10 +421,10 @@
   Arena*    arena() { return _arena; }
 
   // What is the current compilation environment?
-  static ciEnv* current() { return JavaThread::current()->env(); }
+  static ciEnv* current() { return CompilerThread::current()->env(); }
 
   // Overload with current thread argument
-  static ciEnv* current(JavaThread *thread) { return thread->env(); }
+  static ciEnv* current(CompilerThread *thread) { return thread->env(); }
 
   // Per-compiler data.  (Used by C2 to publish the Compile* pointer.)
   void* compiler_data() { return _compiler_data; }
--- a/src/share/vm/ci/ciKlass.cpp	Tue Dec 04 10:09:25 2012 +0100
+++ b/src/share/vm/ci/ciKlass.cpp	Tue Dec 04 10:10:02 2012 +0100
@@ -71,9 +71,8 @@
   if (this == that) {
     return true;
   }
-#ifndef GRAAL
+
   VM_ENTRY_MARK;
-#endif
   Klass* this_klass = get_Klass();
   Klass* that_klass = that->get_Klass();
   bool result = this_klass->is_subtype_of(that_klass);
--- a/src/share/vm/ci/ciObjectFactory.cpp	Tue Dec 04 10:09:25 2012 +0100
+++ b/src/share/vm/ci/ciObjectFactory.cpp	Tue Dec 04 10:10:02 2012 +0100
@@ -84,13 +84,11 @@
   _arena = arena;
   _ci_metadata = new (arena) GrowableArray<ciMetadata*>(arena, expected_size, 0, NULL);
 
-#ifndef GRAAL
   // If the shared ci objects exist append them to this factory's objects
 
   if (_shared_ci_metadata != NULL) {
     _ci_metadata->appendAll(_shared_ci_metadata);
   }
-#endif
 
   _unloaded_methods = new (arena) GrowableArray<ciMethod*>(arena, 4, 0, NULL);
   _unloaded_klasses = new (arena) GrowableArray<ciKlass*>(arena, 8, 0, NULL);
@@ -207,7 +205,6 @@
 
 
 ciSymbol* ciObjectFactory::get_symbol(Symbol* key) {
-#ifndef GRAAL
   vmSymbols::SID sid = vmSymbols::find_sid(key);
   if (sid != vmSymbols::NO_SID) {
     // do not pollute the main cache with it
@@ -215,8 +212,6 @@
   }
 
   assert(vmSymbols::find_sid(key) == vmSymbols::NO_SID, "");
-#endif
-
   ciSymbol* s = new (arena()) ciSymbol(key, vmSymbols::NO_SID);
   _symbols->push(s);
   return s;
--- a/src/share/vm/ci/ciUtilities.hpp	Tue Dec 04 10:09:25 2012 +0100
+++ b/src/share/vm/ci/ciUtilities.hpp	Tue Dec 04 10:10:02 2012 +0100
@@ -36,7 +36,7 @@
 
 // Bring the compilation thread into the VM state.
 #define VM_ENTRY_MARK                       \
-  JavaThread* thread=JavaThread::current(); \
+  CompilerThread* thread=CompilerThread::current(); \
   ThreadInVMfromNative __tiv(thread);       \
   ResetNoHandleMark rnhm;                   \
   HandleMarkCleaner __hm(thread);           \
@@ -47,7 +47,7 @@
 
 // Bring the compilation thread into the VM state.  No handle mark.
 #define VM_QUICK_ENTRY_MARK                 \
-  JavaThread* thread=JavaThread::current(); \
+  CompilerThread* thread=CompilerThread::current(); \
   ThreadInVMfromNative __tiv(thread);       \
 /*                                          \
  * [TODO] The NoHandleMark line does nothing but declare a function prototype \
@@ -60,7 +60,7 @@
 
 
 #define EXCEPTION_CONTEXT \
-  JavaThread* thread=JavaThread::current(); \
+  CompilerThread* thread=CompilerThread::current(); \
   Thread* THREAD = thread;
 
 
--- a/src/share/vm/code/codeBlob.cpp	Tue Dec 04 10:09:25 2012 +0100
+++ b/src/share/vm/code/codeBlob.cpp	Tue Dec 04 10:10:02 2012 +0100
@@ -134,10 +134,11 @@
   cb->copy_code_and_locs_to(this);
   set_oop_maps(oop_maps);
   _frame_size = frame_size;
-#ifdef COMPILER1
+#if defined(COMPILER1) || defined(GRAAL)
+
   // probably wrong for tiered
   assert(_frame_size >= -1, "must use frame size or -1 for runtime stubs");
-#endif // COMPILER1
+#endif // COMPILER1 || GRAAL
 }
 
 
@@ -383,7 +384,7 @@
   _unpack_offset           = unpack_offset;
   _unpack_with_exception   = unpack_with_exception_offset;
   _unpack_with_reexecution = unpack_with_reexecution_offset;
-#ifdef COMPILER1
+#if defined(COMPILER1) || defined(GRAAL)
   _unpack_with_exception_in_tls   = -1;
 #endif
 }
--- a/src/share/vm/code/codeBlob.hpp	Tue Dec 04 10:09:25 2012 +0100
+++ b/src/share/vm/code/codeBlob.hpp	Tue Dec 04 10:10:02 2012 +0100
@@ -105,6 +105,7 @@
 
   virtual bool is_compiled_by_c2() const         { return false; }
   virtual bool is_compiled_by_c1() const         { return false; }
+  virtual bool is_compiled_by_graal() const      { return false; }
 
   // Casting
   nmethod* as_nmethod_or_null()                  { return is_nmethod() ? (nmethod*) this : NULL; }
@@ -357,7 +358,7 @@
 
   int _unpack_with_exception_in_tls;
 
-  // (thomaswue) Offset when graal calls uncommon_trap.
+  // (thomaswue) Offset when Graal calls uncommon_trap.
   int _uncommon_trap_offset;
   int _implicit_exception_uncommon_trap_offset;
 
@@ -415,7 +416,7 @@
   }
   address unpack_with_exception_in_tls() const   { return code_begin() + _unpack_with_exception_in_tls; }
 
-  // (thomaswue) Offset when graal calls uncommon_trap.
+  // (thomaswue) Offset when Graal calls uncommon_trap.
   void set_uncommon_trap_offset(int offset) {
     _uncommon_trap_offset = offset;
     assert(contains(code_begin() + _uncommon_trap_offset), "must be PC inside codeblob");
--- a/src/share/vm/code/compiledIC.cpp	Tue Dec 04 10:09:25 2012 +0100
+++ b/src/share/vm/code/compiledIC.cpp	Tue Dec 04 10:10:02 2012 +0100
@@ -239,8 +239,8 @@
   // for calling directly to vep without using the inline cache (i.e., cached_value == NULL)
 #ifdef ASSERT
   CodeBlob* caller = CodeCache::find_blob_unsafe(instruction_address());
-  bool is_c1_method = caller->is_compiled_by_c1();
-  assert( is_c1_method ||
+  bool is_c1_or_graal_method = caller->is_compiled_by_c1() || caller->is_compiled_by_graal();
+  assert( is_c1_or_graal_method ||
          !is_monomorphic ||
          is_optimized() ||
          (cached_metadata() != NULL && cached_metadata()->is_klass()), "sanity check");
--- a/src/share/vm/code/nmethod.cpp	Tue Dec 04 10:09:25 2012 +0100
+++ b/src/share/vm/code/nmethod.cpp	Tue Dec 04 10:10:02 2012 +0100
@@ -102,6 +102,11 @@
   if (is_native_method()) return false;
   return compiler()->is_c1();
 }
+bool nmethod::is_compiled_by_graal() const {
+  if (compiler() == NULL || method() == NULL)  return false;  // can happen during debug printing
+  if (is_native_method()) return false;
+  return compiler()->is_graal();
+}
 bool nmethod::is_compiled_by_c2() const {
   if (compiler() == NULL || method() == NULL)  return false;  // can happen during debug printing
   if (is_native_method()) return false;
@@ -864,7 +869,7 @@
 #ifdef GRAAL
     _graal_installed_code = installed_code();
 
-    // graal produces no (!) stub section
+    // Graal might not produce any stub sections
     if (offsets->value(CodeOffsets::Exceptions) != -1) {
       _exception_offset        = code_offset()          + offsets->value(CodeOffsets::Exceptions);
     } else {
@@ -2552,6 +2557,8 @@
     tty->print("(c2) ");
   } else if (is_compiled_by_shark()) {
     tty->print("(shark) ");
+  } else if (is_compiled_by_graal()) {
+    tty->print("(Graal) ");
   } else {
     tty->print("(nm) ");
   }
--- a/src/share/vm/code/nmethod.hpp	Tue Dec 04 10:09:25 2012 +0100
+++ b/src/share/vm/code/nmethod.hpp	Tue Dec 04 10:10:02 2012 +0100
@@ -352,6 +352,7 @@
   bool is_osr_method() const                      { return _entry_bci != InvocationEntryBci; }
 
   bool is_compiled_by_c1() const;
+  bool is_compiled_by_graal() const;
   bool is_compiled_by_c2() const;
   bool is_compiled_by_shark() const;
 
--- a/src/share/vm/compiler/abstractCompiler.hpp	Tue Dec 04 10:09:25 2012 +0100
+++ b/src/share/vm/compiler/abstractCompiler.hpp	Tue Dec 04 10:10:02 2012 +0100
@@ -50,26 +50,36 @@
   // Missing feature tests
   virtual bool supports_native()                 { return true; }
   virtual bool supports_osr   ()                 { return true; }
-#if defined(TIERED) || ( !defined(COMPILER1) && !defined(COMPILER2) && !defined(SHARK))
+#if defined(TIERED) || ( !defined(COMPILER1) && !defined(COMPILER2) && !defined(SHARK) && !defined(GRAAL))
   virtual bool is_c1   ()                        { return false; }
   virtual bool is_c2   ()                        { return false; }
   virtual bool is_shark()                        { return false; }
+  virtual bool is_graal()                        { return false; }
 #else
 #ifdef COMPILER1
   bool is_c1   ()                                { return true; }
   bool is_c2   ()                                { return false; }
   bool is_shark()                                { return false; }
+  bool is_graal()                                { return false; }
 #endif // COMPILER1
 #ifdef COMPILER2
   bool is_c1   ()                                { return false; }
   bool is_c2   ()                                { return true; }
   bool is_shark()                                { return false; }
+  bool is_graal()                                { return false; }
 #endif // COMPILER2
 #ifdef SHARK
   bool is_c1   ()                                { return false; }
   bool is_c2   ()                                { return false; }
   bool is_shark()                                { return true; }
+  bool is_graal()                                { return false; }
 #endif // SHARK
+#ifdef GRAAL
+  bool is_c1   ()                                { return false; }
+  bool is_c2   ()                                { return false; }
+  bool is_shark()                                { return false; }
+  bool is_graal()                                { return true; }
+#endif // GRAAL
 #endif // TIERED
 
   void mark_initialized()                        { _is_initialized = true; }
--- a/src/share/vm/compiler/compileBroker.cpp	Tue Dec 04 10:09:25 2012 +0100
+++ b/src/share/vm/compiler/compileBroker.cpp	Tue Dec 04 10:10:02 2012 +0100
@@ -51,6 +51,9 @@
 #ifdef COMPILER1
 #include "c1/c1_Compiler.hpp"
 #endif
+#ifdef GRAAL
+#include "graal/graalCompiler.hpp"
+#endif
 #ifdef COMPILER2
 #include "opto/c2compiler.hpp"
 #endif
--- a/src/share/vm/compiler/oopMap.cpp	Tue Dec 04 10:09:25 2012 +0100
+++ b/src/share/vm/compiler/oopMap.cpp	Tue Dec 04 10:10:02 2012 +0100
@@ -319,6 +319,7 @@
 static void add_derived_oop(oop* base, oop* derived) {
 #ifndef TIERED
   COMPILER1_PRESENT(ShouldNotReachHere();)
+  GRAAL_ONLY(ShouldNotReachHere();)
 #endif // TIERED
 #ifdef COMPILER2
   DerivedPointerTable::add(derived, base);
@@ -380,6 +381,7 @@
     if (!oms.is_done()) {
 #ifndef TIERED
       COMPILER1_PRESENT(ShouldNotReachHere();)
+      GRAAL_ONLY(ShouldNotReachHere();)
 #endif // !TIERED
       // Protect the operation on the derived pointers.  This
       // protects the addition of derived pointers to the shared
@@ -502,7 +504,7 @@
 
   // Check that runtime stubs save all callee-saved registers
 #ifdef COMPILER2
-  assert(cb->is_compiled_by_c1() || !cb->is_runtime_stub() ||
+  assert(cb->is_compiled_by_c1() || cb->is_compiled_by_graal() || !cb->is_runtime_stub() ||
          (nof_callee >= SAVED_ON_ENTRY_REG_COUNT || nof_callee >= C_SAVED_ON_ENTRY_REG_COUNT),
          "must save all");
 #endif // COMPILER2
@@ -521,6 +523,7 @@
 bool OopMap::has_derived_pointer() const {
 #ifndef TIERED
   COMPILER1_PRESENT(return false);
+  GRAAL_ONLY(return false);
 #endif // !TIERED
 #ifdef COMPILER2
   OopMapStream oms((OopMap*)this,OopMapValue::derived_oop_value);
--- a/src/share/vm/graal/graalCodeInstaller.cpp	Tue Dec 04 10:09:25 2012 +0100
+++ b/src/share/vm/graal/graalCodeInstaller.cpp	Tue Dec 04 10:10:02 2012 +0100
@@ -29,7 +29,7 @@
 #include "graal/graalJavaAccess.hpp"
 #include "graal/graalCompilerToVM.hpp"
 #include "graal/graalVmIds.hpp"
-#include "c1/c1_Runtime1.hpp"
+#include "graal/graalRuntime.hpp"
 #include "classfile/vmSymbols.hpp"
 #include "vmreg_x86.inline.hpp"
 
--- a/src/share/vm/graal/graalCompiler.cpp	Tue Dec 04 10:09:25 2012 +0100
+++ b/src/share/vm/graal/graalCompiler.cpp	Tue Dec 04 10:10:02 2012 +0100
@@ -30,7 +30,7 @@
 #include "graal/graalCompilerToVM.hpp"
 #include "graal/graalVmIds.hpp"
 #include "graal/graalEnv.hpp"
-#include "c1/c1_Runtime1.hpp"
+#include "graal/graalRuntime.hpp"
 #include "runtime/arguments.hpp"
 #include "runtime/compilationPolicy.hpp"
 
@@ -49,10 +49,15 @@
   JavaThread* THREAD = JavaThread::current();
   TRACE_graal_1("GraalCompiler::initialize");
 
+  unsigned long heap_end = (long) Universe::heap()->reserved_region().end();
+  unsigned long allocation_end = heap_end + 16l * 1024 * 1024 * 1024;
+  guarantee(heap_end < allocation_end, "heap end too close to end of address space (might lead to erroneous TLAB allocations)");
+  NOT_LP64(error("check TLAB allocation code for address space conflicts"));
+
   _deopted_leaf_graph_count = 0;
 
   initialize_buffer_blob();
-  Runtime1::initialize(THREAD->get_buffer_blob());
+  GraalRuntime::initialize(THREAD->get_buffer_blob());
 
   JNIEnv *env = ((JavaThread *) Thread::current())->jni_environment();
   jclass klass = env->FindClass("com/oracle/graal/hotspot/bridge/CompilerToVMImpl");
@@ -65,14 +70,14 @@
   ResourceMark rm;
   HandleMark hm;
   {
-    VM_ENTRY_MARK;
+    GRAAL_VM_ENTRY_MARK;
     check_pending_exception("Could not register natives");
   }
 
   graal_compute_offsets();
 
   {
-    VM_ENTRY_MARK;
+    GRAAL_VM_ENTRY_MARK;
     HandleMark hm;
     VMToCompiler::setDefaultOptions();
     for (int i = 0; i < Arguments::num_graal_args(); ++i) {
@@ -139,19 +144,14 @@
 
   JavaThread* THREAD = JavaThread::current();
   if (THREAD->get_buffer_blob() == NULL) {
-    // setup CodeBuffer.  Preallocate a BufferBlob of size
-    // NMethodSizeLimit plus some extra space for constants.
-    int code_buffer_size = Compilation::desired_max_code_buffer_size() +
-      Compilation::desired_max_constant_size();
-    BufferBlob* blob = BufferBlob::create("graal temporary CodeBuffer",
-                                          code_buffer_size);
+    BufferBlob* blob = BufferBlob::create("Graal thread-local CodeBuffer", GraalNMethodSizeLimit);
     guarantee(blob != NULL, "must create code buffer");
     THREAD->set_buffer_blob(blob);
   }
 }
 
 void GraalCompiler::compile_method(methodHandle method, int entry_bci, jboolean blocking) {
-  EXCEPTION_CONTEXT
+  GRAAL_EXCEPTION_CONTEXT
   if (!_initialized) {
     method->clear_queued_for_compilation();
     method->invocation_counter()->reset();
--- a/src/share/vm/graal/graalCompiler.hpp	Tue Dec 04 10:09:25 2012 +0100
+++ b/src/share/vm/graal/graalCompiler.hpp	Tue Dec 04 10:10:02 2012 +0100
@@ -48,12 +48,11 @@
 
   virtual const char* name() { return "G"; }
 
-  // Native / OSR not supported
-  virtual bool supports_native()                 { return true; }
+  virtual bool supports_native()                 { return false; }
   virtual bool supports_osr   ()                 { return true; }
 
-  // Pretend to be C1
-  bool is_c1   ()                                { return true; }
+  bool is_graal()                                { return true; }
+  bool is_c1   ()                                { return false; }
   bool is_c2   ()                                { return false; }
 
   // Initialization
--- a/src/share/vm/graal/graalCompilerToVM.cpp	Tue Dec 04 10:09:25 2012 +0100
+++ b/src/share/vm/graal/graalCompilerToVM.cpp	Tue Dec 04 10:10:02 2012 +0100
@@ -27,7 +27,7 @@
 #include "oops/generateOopMap.hpp"
 #include "oops/fieldStreams.hpp"
 #include "runtime/javaCalls.hpp"
-#include "c1/c1_Runtime1.hpp"
+#include "graal/graalRuntime.hpp"
 #include "compiler/compileBroker.hpp"
 #include "compiler/compilerOracle.hpp"
 #include "graal/graalCompilerToVM.hpp"
@@ -48,7 +48,7 @@
 #define C2V_VMENTRY(result_type, name, signature) \
   JNIEXPORT result_type JNICALL c2v_ ## name signature { \
   TRACE_graal_3("CompilerToVM::" #name); \
-  VM_ENTRY_MARK; \
+  GRAAL_VM_ENTRY_MARK; \
 
 // Entry to native method implementation that calls a JNI function
 // and hence cannot transition current thread to '_thread_in_vm'.
@@ -669,41 +669,36 @@
   set_int(env, config, "bciProfileWidth", BciProfileWidth);
   set_int(env, config, "typeProfileWidth", TypeProfileWidth);
 
-  // We use the fast path stub so that we get TLAB refills whenever possible instead of
-  // unconditionally allocating directly from the heap (which the slow path does).
-  // The stub must also do initialization when the compiled check fails.
-  Runtime1::StubID newInstanceStub = Runtime1::fast_new_instance_init_check_id;
-
   set_long(env, config, "debugStub", VmIds::addStub((address)warning));
-  set_long(env, config, "instanceofStub", VmIds::addStub(Runtime1::entry_for(Runtime1::slow_subtype_check_id)));
-  set_long(env, config, "newInstanceStub", VmIds::addStub(Runtime1::entry_for(newInstanceStub)));
-  set_long(env, config, "newTypeArrayStub", VmIds::addStub(Runtime1::entry_for(Runtime1::new_type_array_id)));
-  set_long(env, config, "newObjectArrayStub", VmIds::addStub(Runtime1::entry_for(Runtime1::new_object_array_id)));
-  set_long(env, config, "newMultiArrayStub", VmIds::addStub(Runtime1::entry_for(Runtime1::new_multi_array_id)));
+  set_long(env, config, "instanceofStub", VmIds::addStub(GraalRuntime::entry_for(GraalRuntime::graal_slow_subtype_check_id)));
+  set_long(env, config, "newInstanceStub", VmIds::addStub(GraalRuntime::entry_for(GraalRuntime::graal_new_instance_id)));
+  set_long(env, config, "newTypeArrayStub", VmIds::addStub(GraalRuntime::entry_for(GraalRuntime::graal_new_type_array_id)));
+  set_long(env, config, "newObjectArrayStub", VmIds::addStub(GraalRuntime::entry_for(GraalRuntime::graal_new_object_array_id)));
+  set_long(env, config, "newMultiArrayStub", VmIds::addStub(GraalRuntime::entry_for(GraalRuntime::graal_new_multi_array_id)));
   set_long(env, config, "inlineCacheMissStub", VmIds::addStub(SharedRuntime::get_ic_miss_stub()));
-  set_long(env, config, "handleExceptionStub", VmIds::addStub(Runtime1::entry_for(Runtime1::handle_exception_nofpu_id)));
+  set_long(env, config, "handleExceptionStub", VmIds::addStub(GraalRuntime::entry_for(GraalRuntime::graal_handle_exception_nofpu_id)));
   set_long(env, config, "handleDeoptStub", VmIds::addStub(SharedRuntime::deopt_blob()->unpack()));
-  set_long(env, config, "fastMonitorEnterStub", VmIds::addStub(Runtime1::entry_for(Runtime1::graal_monitorenter_id)));
-  set_long(env, config, "fastMonitorExitStub", VmIds::addStub(Runtime1::entry_for(Runtime1::graal_monitorexit_id)));
-  set_long(env, config, "verifyOopStub", VmIds::addStub(Runtime1::entry_for(Runtime1::graal_verify_oop_id)));
-  set_long(env, config, "vmErrorStub", VmIds::addStub(Runtime1::entry_for(Runtime1::graal_vm_error_id)));
+  set_long(env, config, "monitorEnterStub", VmIds::addStub(GraalRuntime::entry_for(GraalRuntime::graal_monitorenter_id)));
+  set_long(env, config, "monitorExitStub", VmIds::addStub(GraalRuntime::entry_for(GraalRuntime::graal_monitorexit_id)));
+  set_long(env, config, "verifyOopStub", VmIds::addStub(GraalRuntime::entry_for(GraalRuntime::graal_verify_oop_id)));
+  set_long(env, config, "vmErrorStub", VmIds::addStub(GraalRuntime::entry_for(GraalRuntime::graal_vm_error_id)));
   set_long(env, config, "deoptimizeStub", VmIds::addStub(SharedRuntime::deopt_blob()->uncommon_trap()));
-  set_long(env, config, "unwindExceptionStub", VmIds::addStub(Runtime1::entry_for(Runtime1::graal_unwind_exception_call_id)));
-  set_long(env, config, "osrMigrationEndStub", VmIds::addStub(Runtime1::entry_for(Runtime1::graal_OSR_migration_end_id)));
-  set_long(env, config, "registerFinalizerStub", VmIds::addStub(Runtime1::entry_for(Runtime1::register_finalizer_id)));
-  set_long(env, config, "setDeoptInfoStub", VmIds::addStub(Runtime1::entry_for(Runtime1::graal_set_deopt_info_id)));
-  set_long(env, config, "createNullPointerExceptionStub", VmIds::addStub(Runtime1::entry_for(Runtime1::graal_create_null_pointer_exception_id)));
-  set_long(env, config, "createOutOfBoundsExceptionStub", VmIds::addStub(Runtime1::entry_for(Runtime1::graal_create_out_of_bounds_exception_id)));
+  set_long(env, config, "unwindExceptionStub", VmIds::addStub(GraalRuntime::entry_for(GraalRuntime::graal_unwind_exception_call_id)));
+  set_long(env, config, "osrMigrationEndStub", VmIds::addStub(GraalRuntime::entry_for(GraalRuntime::graal_OSR_migration_end_id)));
+  set_long(env, config, "registerFinalizerStub", VmIds::addStub(GraalRuntime::entry_for(GraalRuntime::graal_register_finalizer_id)));
+  set_long(env, config, "setDeoptInfoStub", VmIds::addStub(GraalRuntime::entry_for(GraalRuntime::graal_set_deopt_info_id)));
+  set_long(env, config, "createNullPointerExceptionStub", VmIds::addStub(GraalRuntime::entry_for(GraalRuntime::graal_create_null_pointer_exception_id)));
+  set_long(env, config, "createOutOfBoundsExceptionStub", VmIds::addStub(GraalRuntime::entry_for(GraalRuntime::graal_create_out_of_bounds_exception_id)));
   set_long(env, config, "javaTimeMillisStub", VmIds::addStub(CAST_FROM_FN_PTR(address, os::javaTimeMillis)));
   set_long(env, config, "javaTimeNanosStub", VmIds::addStub(CAST_FROM_FN_PTR(address, os::javaTimeNanos)));
-  set_long(env, config, "arithmeticFremStub", VmIds::addStub(Runtime1::entry_for(Runtime1::graal_arithmetic_frem_id)));
-  set_long(env, config, "arithmeticDremStub", VmIds::addStub(Runtime1::entry_for(Runtime1::graal_arithmetic_drem_id)));
+  set_long(env, config, "arithmeticFremStub", VmIds::addStub(GraalRuntime::entry_for(GraalRuntime::graal_arithmetic_frem_id)));
+  set_long(env, config, "arithmeticDremStub", VmIds::addStub(GraalRuntime::entry_for(GraalRuntime::graal_arithmetic_drem_id)));
   set_long(env, config, "arithmeticSinStub", VmIds::addStub(CAST_FROM_FN_PTR(address, SharedRuntime::dsin)));
   set_long(env, config, "arithmeticCosStub", VmIds::addStub(CAST_FROM_FN_PTR(address, SharedRuntime::dcos)));
   set_long(env, config, "arithmeticTanStub", VmIds::addStub(CAST_FROM_FN_PTR(address, SharedRuntime::dtan)));
-  set_long(env, config, "logPrimitiveStub", VmIds::addStub(Runtime1::entry_for(Runtime1::graal_log_primitive_id)));
-  set_long(env, config, "logObjectStub", VmIds::addStub(Runtime1::entry_for(Runtime1::graal_log_object_id)));
-  set_long(env, config, "logPrintfStub", VmIds::addStub(Runtime1::entry_for(Runtime1::graal_log_printf_id)));
+  set_long(env, config, "logPrimitiveStub", VmIds::addStub(GraalRuntime::entry_for(GraalRuntime::graal_log_primitive_id)));
+  set_long(env, config, "logObjectStub", VmIds::addStub(GraalRuntime::entry_for(GraalRuntime::graal_log_object_id)));
+  set_long(env, config, "logPrintfStub", VmIds::addStub(GraalRuntime::entry_for(GraalRuntime::graal_log_printf_id)));
 
 
   BarrierSet* bs = Universe::heap()->barrier_set();
--- a/src/share/vm/graal/graalEnv.cpp	Tue Dec 04 10:09:25 2012 +0100
+++ b/src/share/vm/graal/graalEnv.cpp	Tue Dec 04 10:10:02 2012 +0100
@@ -42,7 +42,7 @@
 #include "runtime/reflection.hpp"
 #include "runtime/sharedRuntime.hpp"
 #include "utilities/dtrace.hpp"
-#include "c1/c1_Runtime1.hpp"
+#include "graal/graalRuntime.hpp"
 
 // ------------------------------------------------------------------
 // Note: the logic of this method should mirror the logic of
@@ -70,7 +70,7 @@
                                           constantPoolHandle& cpool,
                                           Symbol* sym,
                                           bool require_local) {
-  EXCEPTION_CONTEXT;
+  GRAAL_EXCEPTION_CONTEXT;
 
   // Now we need to check the SystemDictionary
   if (sym->byte_at(0) == 'L' &&
@@ -162,7 +162,7 @@
                                         int index,
                                         bool& is_accessible,
                                         KlassHandle& accessor) {
-  EXCEPTION_CONTEXT;
+  GRAAL_EXCEPTION_CONTEXT;
   KlassHandle klass (THREAD, ConstantPool::klass_at_if_loaded(cpool, index));
   Symbol* klass_name = NULL;
   if (klass.is_null()) {
@@ -233,7 +233,7 @@
 // in the accessor klass.
 void GraalEnv::get_field_by_index_impl(instanceKlassHandle& klass, fieldDescriptor& field_desc,
                                         int index) {
-  EXCEPTION_CONTEXT;
+  GRAAL_EXCEPTION_CONTEXT;
 
   assert(klass->is_linked(), "must be linked before using its constant-pool");
 
@@ -285,7 +285,7 @@
                                Symbol*       name,
                                Symbol*       sig,
                                Bytecodes::Code bc) {
-  EXCEPTION_CONTEXT;
+  GRAAL_EXCEPTION_CONTEXT;
   LinkResolver::check_klass_accessability(h_accessor, h_holder, KILL_COMPILE_ON_FATAL_(NULL));
   methodHandle dest_method;
   switch (bc) {
@@ -421,7 +421,7 @@
                                 bool has_debug_info,
                                 bool has_unsafe_access,
                                 Handle installed_code) {
-  EXCEPTION_CONTEXT;
+  GRAAL_EXCEPTION_CONTEXT;
   NMethodSweeper::possibly_sweep();
   nm = NULL;
   int comp_level = CompLevel_simple;
--- a/src/share/vm/graal/graalEnv.hpp	Tue Dec 04 10:09:25 2012 +0100
+++ b/src/share/vm/graal/graalEnv.hpp	Tue Dec 04 10:10:02 2012 +0100
@@ -34,6 +34,19 @@
 
 class CompileTask;
 
+// Bring the Graal compiler thread into the VM state.
+#define GRAAL_VM_ENTRY_MARK                       \
+  JavaThread* thread = JavaThread::current(); \
+  ThreadInVMfromNative __tiv(thread);       \
+  ResetNoHandleMark rnhm;                   \
+  HandleMarkCleaner __hm(thread);           \
+  Thread* THREAD = thread;                  \
+  debug_only(VMNativeEntryWrapper __vew;)
+
+#define GRAAL_EXCEPTION_CONTEXT \
+  JavaThread* thread=JavaThread::current(); \
+  Thread* THREAD = thread;
+
 //
 // This class is the top level broker for requests from the compiler
 // to the VM.
--- /dev/null	Thu Jan 01 00:00:00 1970 +0000
+++ b/src/share/vm/graal/graalGlobals.cpp	Tue Dec 04 10:10:02 2012 +0100
@@ -0,0 +1,28 @@
+/*
+ * Copyright (c) 2000, 2010, Oracle and/or its affiliates. All rights reserved.
+ * DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER.
+ *
+ * This code is free software; you can redistribute it and/or modify it
+ * under the terms of the GNU General Public License version 2 only, as
+ * published by the Free Software Foundation.
+ *
+ * This code is distributed in the hope that it will be useful, but WITHOUT
+ * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
+ * FITNESS FOR A PARTICULAR PURPOSE.  See the GNU General Public License
+ * version 2 for more details (a copy is included in the LICENSE file that
+ * accompanied this code).
+ *
+ * You should have received a copy of the GNU General Public License version
+ * 2 along with this work; if not, write to the Free Software Foundation,
+ * Inc., 51 Franklin St, Fifth Floor, Boston, MA 02110-1301 USA.
+ *
+ * Please contact Oracle, 500 Oracle Parkway, Redwood Shores, CA 94065 USA
+ * or visit www.oracle.com if you need additional information or have any
+ * questions.
+ *
+ */
+
+#include "precompiled.hpp"
+#include "graal/graalGlobals.hpp"
+
+GRAAL_FLAGS(MATERIALIZE_DEVELOPER_FLAG, MATERIALIZE_PD_DEVELOPER_FLAG, MATERIALIZE_PRODUCT_FLAG, MATERIALIZE_PD_PRODUCT_FLAG, MATERIALIZE_NOTPRODUCT_FLAG)
--- /dev/null	Thu Jan 01 00:00:00 1970 +0000
+++ b/src/share/vm/graal/graalGlobals.hpp	Tue Dec 04 10:10:02 2012 +0100
@@ -0,0 +1,89 @@
+/*
+ * Copyright (c) 2000, 2012, Oracle and/or its affiliates. All rights reserved.
+ * DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER.
+ *
+ * This code is free software; you can redistribute it and/or modify it
+ * under the terms of the GNU General Public License version 2 only, as
+ * published by the Free Software Foundation.
+ *
+ * This code is distributed in the hope that it will be useful, but WITHOUT
+ * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
+ * FITNESS FOR A PARTICULAR PURPOSE.  See the GNU General Public License
+ * version 2 for more details (a copy is included in the LICENSE file that
+ * accompanied this code).
+ *
+ * You should have received a copy of the GNU General Public License version
+ * 2 along with this work; if not, write to the Free Software Foundation,
+ * Inc., 51 Franklin St, Fifth Floor, Boston, MA 02110-1301 USA.
+ *
+ * Please contact Oracle, 500 Oracle Parkway, Redwood Shores, CA 94065 USA
+ * or visit www.oracle.com if you need additional information or have any
+ * questions.
+ *
+ */
+
+#ifndef SHARE_VM_GRAAL_GRAALGLOBALS_HPP
+#define SHARE_VM_GRAAL_GRAALGLOBALS_HPP
+
+#include "runtime/globals.hpp"
+#ifdef TARGET_ARCH_x86
+# include "graalGlobals_x86.hpp"
+#endif
+#ifdef TARGET_ARCH_sparc
+# include "graalGlobals_sparc.hpp"
+#endif
+#ifdef TARGET_ARCH_arm
+# include "graalGlobals_arm.hpp"
+#endif
+#ifdef TARGET_ARCH_ppc
+# include "graalGlobals_ppc.hpp"
+#endif
+
+//
+// Defines all global flags used by the Graal compiler.
+//
+#define GRAAL_FLAGS(develop, develop_pd, product, product_pd, notproduct)      \
+                                                                            \
+  product(bool, DebugGraal, true,                                           \
+          "Enable JVMTI for the compiler thread")                           \
+                                                                            \
+  product(bool, BootstrapGraal, true,                                       \
+          "Bootstrap Graal before running Java main method")                \
+                                                                            \
+  product(ccstr, GraalClassPath, NULL,                                      \
+          "Use the class path for Graal classes")                           \
+                                                                            \
+  product(intx, TraceGraal, 0,                                              \
+          "Trace level for Graal")                                          \
+                                                                            \
+  product(bool, TraceSignals, false,                                        \
+          "Trace signals and implicit exception handling")                  \
+                                                                            \
+  product_pd(intx, SafepointPollOffset,                                     \
+          "Offset added to polling address (Intel only)")                   \
+                                                                            \
+  develop(bool, UseFastNewInstance, true,                                   \
+          "Use fast inlined instance allocation")                           \
+                                                                            \
+  develop(bool, UseFastNewTypeArray, true,                                  \
+          "Use fast inlined type array allocation")                         \
+                                                                            \
+  develop(bool, UseFastNewObjectArray, true,                                \
+          "Use fast inlined object array allocation")                       \
+                                                                            \
+  develop(bool, UseFastLocking, true,                                       \
+          "Use fast inlined locking code")                                  \
+                                                                            \
+  develop(intx, GraalNMethodSizeLimit, (64*K)*wordSize,                     \
+          "Maximum size of a compiled method.")                             \
+                                                                            \
+  notproduct(bool, PrintSimpleStubs, false,                                 \
+          "Print SimpleStubs")                                              \
+                                                                            \
+
+
+// Read default values for Graal globals
+
+GRAAL_FLAGS(DECLARE_DEVELOPER_FLAG, DECLARE_PD_DEVELOPER_FLAG, DECLARE_PRODUCT_FLAG, DECLARE_PD_PRODUCT_FLAG, DECLARE_NOTPRODUCT_FLAG)
+
+#endif // SHARE_VM_GRAAL_GRAALGLOBALS_HPP
--- a/src/share/vm/graal/graalRuntime.cpp	Tue Dec 04 10:09:25 2012 +0100
+++ b/src/share/vm/graal/graalRuntime.cpp	Tue Dec 04 10:10:02 2012 +0100
@@ -24,7 +24,573 @@
 #include "precompiled.hpp"
 #include "runtime/interfaceSupport.hpp"
 #include "prims/jvm.h"
+#include "graal/graalRuntime.hpp"
 #include "graal/graalVMToCompiler.hpp"
+#include "asm/codeBuffer.hpp"
+#include "runtime/biasedLocking.hpp"
+
+// Implementation of GraalStubAssembler
+
+GraalStubAssembler::GraalStubAssembler(CodeBuffer* code, const char * name, int stub_id) : MacroAssembler(code) {
+  _name = name;
+  _must_gc_arguments = false;
+  _frame_size = no_frame_size;
+  _num_rt_args = 0;
+  _stub_id = stub_id;
+}
+
+
+void GraalStubAssembler::set_info(const char* name, bool must_gc_arguments) {
+  _name = name;
+  _must_gc_arguments = must_gc_arguments;
+}
+
+
+void GraalStubAssembler::set_frame_size(int size) {
+  if (_frame_size == no_frame_size) {
+    _frame_size = size;
+  }
+  assert(_frame_size == size, "can't change the frame size");
+}
+
+
+void GraalStubAssembler::set_num_rt_args(int args) {
+  if (_num_rt_args == 0) {
+    _num_rt_args = args;
+  }
+  assert(_num_rt_args == args, "can't change the number of args");
+}
+
+// Implementation of GraalRuntime
+
+CodeBlob* GraalRuntime::_blobs[GraalRuntime::number_of_ids];
+const char *GraalRuntime::_blob_names[] = {
+  GRAAL_STUBS(STUB_NAME, LAST_STUB_NAME)
+};
+
+// Simple helper to see if the caller of a runtime stub which
+// entered the VM has been deoptimized
+
+static bool caller_is_deopted() {
+  JavaThread* thread = JavaThread::current();
+  RegisterMap reg_map(thread, false);
+  frame runtime_frame = thread->last_frame();
+  frame caller_frame = runtime_frame.sender(&reg_map);
+  assert(caller_frame.is_compiled_frame(), "must be compiled");
+  return caller_frame.is_deoptimized_frame();
+}
+
+// Stress deoptimization
+static void deopt_caller() {
+  if ( !caller_is_deopted()) {
+    JavaThread* thread = JavaThread::current();
+    RegisterMap reg_map(thread, false);
+    frame runtime_frame = thread->last_frame();
+    frame caller_frame = runtime_frame.sender(&reg_map);
+    Deoptimization::deoptimize_frame(thread, caller_frame.id(), Deoptimization::Reason_constraint);
+    assert(caller_is_deopted(), "Must be deoptimized");
+  }
+}
+
+static bool setup_code_buffer(CodeBuffer* code) {
+  // Preinitialize the consts section to some large size:
+  int locs_buffer_size = 1 * (relocInfo::length_limit + sizeof(relocInfo));
+  char* locs_buffer = NEW_RESOURCE_ARRAY(char, locs_buffer_size);
+  code->insts()->initialize_shared_locs((relocInfo*)locs_buffer,
+                                        locs_buffer_size / sizeof(relocInfo));
+
+  // Global stubs have neither constants nor local stubs
+  code->initialize_consts_size(0);
+  code->initialize_stubs_size(0);
+
+  return true;
+}
+
+void GraalRuntime::generate_blob_for(BufferBlob* buffer_blob, StubID id) {
+  assert(0 <= id && id < number_of_ids, "illegal stub id");
+  ResourceMark rm;
+  // create code buffer for code storage
+  CodeBuffer code(buffer_blob);
+
+  setup_code_buffer(&code);
+
+  // create assembler for code generation
+  GraalStubAssembler* sasm = new GraalStubAssembler(&code, name_for(id), id);
+  // generate code for runtime stub
+  OopMapSet* oop_maps;
+  oop_maps = generate_code_for(id, sasm);
+  assert(oop_maps == NULL || sasm->frame_size() != no_frame_size,
+         "if stub has an oop map it must have a valid frame size");
+
+#ifdef ASSERT
+  // Make sure that stubs that need oopmaps have them
+  switch (id) {
+    // These stubs don't need to have an oopmap
+    case graal_slow_subtype_check_id:
+#if defined(SPARC) || defined(PPC)
+    case handle_exception_nofpu_id:  // Unused on sparc
+#endif
+#ifdef GRAAL
+    case graal_verify_oop_id:
+    case graal_unwind_exception_call_id:
+    case graal_OSR_migration_end_id:
+    case graal_arithmetic_frem_id:
+    case graal_arithmetic_drem_id:
+    case graal_set_deopt_info_id:
+#endif
+      break;
+
+    // All other stubs should have oopmaps
+    default:
+      assert(oop_maps != NULL, "must have an oopmap");
+  }
+#endif
+
+  // align so printing shows nop's instead of random code at the end (SimpleStubs are aligned)
+  sasm->align(BytesPerWord);
+  // make sure all code is in code buffer
+  sasm->flush();
+  // create blob - distinguish a few special cases
+  CodeBlob* blob = RuntimeStub::new_runtime_stub(name_for(id),
+                                                 &code,
+                                                 CodeOffsets::frame_never_safe,
+                                                 sasm->frame_size(),
+                                                 oop_maps,
+                                                 sasm->must_gc_arguments());
+  // install blob
+  assert(blob != NULL, "blob must exist");
+  _blobs[id] = blob;
+}
+
+
+void GraalRuntime::initialize(BufferBlob* blob) {
+  // generate stubs
+  for (int id = 0; id < number_of_ids; id++) generate_blob_for(blob, (StubID)id);
+  // printing
+#ifndef PRODUCT
+  if (PrintSimpleStubs) {
+    ResourceMark rm;
+    for (int id = 0; id < number_of_ids; id++) {
+      _blobs[id]->print();
+      if (_blobs[id]->oop_maps() != NULL) {
+        _blobs[id]->oop_maps()->print();
+      }
+    }
+  }
+#endif
+}
+
+
+CodeBlob* GraalRuntime::blob_for(StubID id) {
+  assert(0 <= id && id < number_of_ids, "illegal stub id");
+  return _blobs[id];
+}
+
+
+const char* GraalRuntime::name_for(StubID id) {
+  assert(0 <= id && id < number_of_ids, "illegal stub id");
+  return _blob_names[id];
+}
+
+const char* GraalRuntime::name_for_address(address entry) {
+  for (int id = 0; id < number_of_ids; id++) {
+    if (entry == entry_for((StubID)id)) return name_for((StubID)id);
+  }
+
+#define FUNCTION_CASE(a, f) \
+  if ((intptr_t)a == CAST_FROM_FN_PTR(intptr_t, f))  return #f
+
+  FUNCTION_CASE(entry, os::javaTimeMillis);
+  FUNCTION_CASE(entry, os::javaTimeNanos);
+  FUNCTION_CASE(entry, SharedRuntime::OSR_migration_end);
+  FUNCTION_CASE(entry, SharedRuntime::d2f);
+  FUNCTION_CASE(entry, SharedRuntime::d2i);
+  FUNCTION_CASE(entry, SharedRuntime::d2l);
+  FUNCTION_CASE(entry, SharedRuntime::dcos);
+  FUNCTION_CASE(entry, SharedRuntime::dexp);
+  FUNCTION_CASE(entry, SharedRuntime::dlog);
+  FUNCTION_CASE(entry, SharedRuntime::dlog10);
+  FUNCTION_CASE(entry, SharedRuntime::dpow);
+  FUNCTION_CASE(entry, SharedRuntime::drem);
+  FUNCTION_CASE(entry, SharedRuntime::dsin);
+  FUNCTION_CASE(entry, SharedRuntime::dtan);
+  FUNCTION_CASE(entry, SharedRuntime::f2i);
+  FUNCTION_CASE(entry, SharedRuntime::f2l);
+  FUNCTION_CASE(entry, SharedRuntime::frem);
+  FUNCTION_CASE(entry, SharedRuntime::l2d);
+  FUNCTION_CASE(entry, SharedRuntime::l2f);
+  FUNCTION_CASE(entry, SharedRuntime::ldiv);
+  FUNCTION_CASE(entry, SharedRuntime::lmul);
+  FUNCTION_CASE(entry, SharedRuntime::lrem);
+  FUNCTION_CASE(entry, SharedRuntime::lrem);
+  FUNCTION_CASE(entry, SharedRuntime::dtrace_method_entry);
+  FUNCTION_CASE(entry, SharedRuntime::dtrace_method_exit);
+#ifdef TRACE_HAVE_INTRINSICS
+  FUNCTION_CASE(entry, TRACE_TIME_METHOD);
+#endif
+
+#undef FUNCTION_CASE
+}
+
+
+JRT_ENTRY(void, GraalRuntime::new_instance(JavaThread* thread, Klass* klass))
+  assert(klass->is_klass(), "not a class");
+  instanceKlassHandle h(thread, klass);
+  h->check_valid_for_instantiation(true, CHECK);
+  // make sure klass is initialized
+  h->initialize(CHECK);
+  // allocate instance and return via TLS
+  oop obj = h->allocate_instance(CHECK);
+  thread->set_vm_result(obj);
+JRT_END
+
+
+JRT_ENTRY(void, GraalRuntime::new_type_array(JavaThread* thread, Klass* klass, jint length))
+  // Note: no handle for klass needed since they are not used
+  //       anymore after new_typeArray() and no GC can happen before.
+  //       (This may have to change if this code changes!)
+  assert(klass->is_klass(), "not a class");
+  BasicType elt_type = TypeArrayKlass::cast(klass)->element_type();
+  oop obj = oopFactory::new_typeArray(elt_type, length, CHECK);
+  thread->set_vm_result(obj);
+  // This is pretty rare but this runtime patch is stressful to deoptimization
+  // if we deoptimize here so force a deopt to stress the path.
+  if (DeoptimizeALot) {
+    deopt_caller();
+  }
+
+JRT_END
+
+
+JRT_ENTRY(void, GraalRuntime::new_object_array(JavaThread* thread, Klass* array_klass, jint length))
+  // Note: no handle for klass needed since they are not used
+  //       anymore after new_objArray() and no GC can happen before.
+  //       (This may have to change if this code changes!)
+  assert(array_klass->is_klass(), "not a class");
+  Klass* elem_klass = ObjArrayKlass::cast(array_klass)->element_klass();
+  objArrayOop obj = oopFactory::new_objArray(elem_klass, length, CHECK);
+  thread->set_vm_result(obj);
+  // This is pretty rare but this runtime patch is stressful to deoptimization
+  // if we deoptimize here so force a deopt to stress the path.
+  if (DeoptimizeALot) {
+    deopt_caller();
+  }
+JRT_END
+
+
+JRT_ENTRY(void, GraalRuntime::new_multi_array(JavaThread* thread, Klass* klass, int rank, jint* dims))
+  assert(klass->is_klass(), "not a class");
+  assert(rank >= 1, "rank must be nonzero");
+  oop obj = ArrayKlass::cast(klass)->multi_allocate(rank, dims, CHECK);
+  thread->set_vm_result(obj);
+JRT_END
+
+JRT_ENTRY(void, GraalRuntime::unimplemented_entry(JavaThread* thread, StubID id))
+  tty->print_cr("GraalRuntime::entry_for(%d) returned unimplemented entry point", id);
+JRT_END
+
+extern void vm_exit(int code);
+
+// Enter this method from compiled code handler below. This is where we transition
+// to VM mode. This is done as a helper routine so that the method called directly
+// from compiled code does not have to transition to VM. This allows the entry
+// method to see if the nmethod that we have just looked up a handler for has
+// been deoptimized while we were in the vm. This simplifies the assembly code
+// cpu directories.
+//
+// We are entering here from exception stub (via the entry method below)
+// If there is a compiled exception handler in this method, we will continue there;
+// otherwise we will unwind the stack and continue at the caller of top frame method
+// Note: we enter in Java using a special JRT wrapper. This wrapper allows us to
+// control the area where we can allow a safepoint. After we exit the safepoint area we can
+// check to see if the handler we are going to return is now in a nmethod that has
+// been deoptimized. If that is the case we return the deopt blob
+// unpack_with_exception entry instead. This makes life for the exception blob easier
+// because making that same check and diverting is painful from assembly language.
+JRT_ENTRY_NO_ASYNC(static address, exception_handler_for_pc_helper(JavaThread* thread, oopDesc* ex, address pc, nmethod*& nm))
+  // Reset method handle flag.
+  thread->set_is_method_handle_return(false);
+
+  Handle exception(thread, ex);
+  nm = CodeCache::find_nmethod(pc);
+  assert(nm != NULL, "this is not an nmethod");
+  // Adjust the pc as needed/
+  if (nm->is_deopt_pc(pc)) {
+    RegisterMap map(thread, false);
+    frame exception_frame = thread->last_frame().sender(&map);
+    // if the frame isn't deopted then pc must not correspond to the caller of last_frame
+    assert(exception_frame.is_deoptimized_frame(), "must be deopted");
+    pc = exception_frame.pc();
+  }
+#ifdef ASSERT
+  assert(exception.not_null(), "NULL exceptions should be handled by throw_exception");
+  assert(exception->is_oop(), "just checking");
+  // Check that exception is a subclass of Throwable, otherwise we have a VerifyError
+  if (!(exception->is_a(SystemDictionary::Throwable_klass()))) {
+    if (ExitVMOnVerifyError) vm_exit(-1);
+    ShouldNotReachHere();
+  }
+#endif
+
+  // Check the stack guard pages and reenable them if necessary and there is
+  // enough space on the stack to do so.  Use fast exceptions only if the guard
+  // pages are enabled.
+  bool guard_pages_enabled = thread->stack_yellow_zone_enabled();
+  if (!guard_pages_enabled) guard_pages_enabled = thread->reguard_stack();
+
+  if (JvmtiExport::can_post_on_exceptions()) {
+    // To ensure correct notification of exception catches and throws
+    // we have to deoptimize here.  If we attempted to notify the
+    // catches and throws during this exception lookup it's possible
+    // we could deoptimize on the way out of the VM and end back in
+    // the interpreter at the throw site.  This would result in double
+    // notifications since the interpreter would also notify about
+    // these same catches and throws as it unwound the frame.
+
+    RegisterMap reg_map(thread);
+    frame stub_frame = thread->last_frame();
+    frame caller_frame = stub_frame.sender(&reg_map);
+
+    // We don't really want to deoptimize the nmethod itself since we
+    // can actually continue in the exception handler ourselves but I
+    // don't see an easy way to have the desired effect.
+    Deoptimization::deoptimize_frame(thread, caller_frame.id(), Deoptimization::Reason_constraint);
+    assert(caller_is_deopted(), "Must be deoptimized");
+
+    return SharedRuntime::deopt_blob()->unpack_with_exception_in_tls();
+  }
+
+  // ExceptionCache is used only for exceptions at call sites and not for implicit exceptions
+  if (guard_pages_enabled) {
+    address fast_continuation = nm->handler_for_exception_and_pc(exception, pc);
+    if (fast_continuation != NULL) {
+      // Set flag if return address is a method handle call site.
+      thread->set_is_method_handle_return(nm->is_method_handle_return(pc));
+      return fast_continuation;
+    }
+  }
+
+  // If the stack guard pages are enabled, check whether there is a handler in
+  // the current method.  Otherwise (guard pages disabled), force an unwind and
+  // skip the exception cache update (i.e., just leave continuation==NULL).
+  address continuation = NULL;
+  if (guard_pages_enabled) {
+
+    // New exception handling mechanism can support inlined methods
+    // with exception handlers since the mappings are from PC to PC
+
+    // debugging support
+    // tracing
+    if (TraceExceptions) {
+      ttyLocker ttyl;
+      ResourceMark rm;
+      int offset = pc - nm->code_begin();
+      tty->print_cr("Exception <%s> (0x%x) thrown in compiled method <%s> at PC " PTR_FORMAT " [" PTR_FORMAT "+%d] for thread 0x%x",
+                    exception->print_value_string(), (address)exception(), nm->method()->print_value_string(), pc, nm->code_begin(), offset, thread);
+    }
+    // for AbortVMOnException flag
+    NOT_PRODUCT(Exceptions::debug_check_abort(exception));
+
+    // Clear out the exception oop and pc since looking up an
+    // exception handler can cause class loading, which might throw an
+    // exception and those fields are expected to be clear during
+    // normal bytecode execution.
+    thread->set_exception_oop(NULL);
+    thread->set_exception_pc(NULL);
+
+    continuation = SharedRuntime::compute_compiled_exc_handler(nm, pc, exception, false, false);
+    // If an exception was thrown during exception dispatch, the exception oop may have changed
+    thread->set_exception_oop(exception());
+    thread->set_exception_pc(pc);
+
+    // the exception cache is used only by non-implicit exceptions
+    if (continuation != NULL && !SharedRuntime::deopt_blob()->contains(continuation)) {
+      nm->add_handler_for_exception_and_pc(exception, pc, continuation);
+    }
+  }
+
+  thread->set_vm_result(exception());
+  // Set flag if return address is a method handle call site.
+  thread->set_is_method_handle_return(nm->is_method_handle_return(pc));
+
+  if (TraceExceptions) {
+    ttyLocker ttyl;
+    ResourceMark rm;
+    tty->print_cr("Thread " PTR_FORMAT " continuing at PC " PTR_FORMAT " for exception thrown at PC " PTR_FORMAT,
+                  thread, continuation, pc);
+  }
+
+  return continuation;
+JRT_END
+
+// Enter this method from compiled code only if there is a Java exception handler
+// in the method handling the exception.
+// We are entering here from exception stub. We don't do a normal VM transition here.
+// We do it in a helper. This is so we can check to see if the nmethod we have just
+// searched for an exception handler has been deoptimized in the meantime.
+address GraalRuntime::exception_handler_for_pc(JavaThread* thread) {
+  oop exception = thread->exception_oop();
+  address pc = thread->exception_pc();
+  // Still in Java mode
+  DEBUG_ONLY(ResetNoHandleMark rnhm);
+  nmethod* nm = NULL;
+  address continuation = NULL;
+  {
+    // Enter VM mode by calling the helper
+    ResetNoHandleMark rnhm;
+    continuation = exception_handler_for_pc_helper(thread, exception, pc, nm);
+  }
+  // Back in JAVA, use no oops DON'T safepoint
+
+  // Now check to see if the nmethod we were called from is now deoptimized.
+  // If so we must return to the deopt blob and deoptimize the nmethod
+  if (nm != NULL && caller_is_deopted()) {
+    continuation = SharedRuntime::deopt_blob()->unpack_with_exception_in_tls();
+  }
+
+  assert(continuation != NULL, "no handler found");
+  return continuation;
+}
+
+JRT_ENTRY(void, GraalRuntime::graal_create_null_exception(JavaThread* thread))
+  thread->set_vm_result(Exceptions::new_exception(thread, vmSymbols::java_lang_NullPointerException(), NULL)());
+JRT_END
+
+JRT_ENTRY(void, GraalRuntime::graal_create_out_of_bounds_exception(JavaThread* thread, jint index))
+  char message[jintAsStringSize];
+  sprintf(message, "%d", index);
+  thread->set_vm_result(Exceptions::new_exception(thread, vmSymbols::java_lang_ArrayIndexOutOfBoundsException(), message)());
+JRT_END
+
+JRT_ENTRY_NO_ASYNC(void, GraalRuntime::graal_monitorenter(JavaThread* thread, oopDesc* obj, BasicLock* lock))
+  if (TraceGraal >= 3) {
+    char type[O_BUFLEN];
+    obj->klass()->name()->as_C_string(type, O_BUFLEN);
+    markOop mark = obj->mark();
+    tty->print_cr("%s: entered locking slow case with obj=" INTPTR_FORMAT ", type=%s, mark=" INTPTR_FORMAT ", lock=" INTPTR_FORMAT, thread->name(), obj, type, mark, lock);
+    tty->flush();
+  }
+#ifdef ASSERT
+  if (PrintBiasedLockingStatistics) {
+    Atomic::inc(BiasedLocking::slow_path_entry_count_addr());
+  }
+#endif
+  Handle h_obj(thread, obj);
+  assert(h_obj()->is_oop(), "must be NULL or an object");
+  if (UseBiasedLocking) {
+    // Retry fast entry if bias is revoked to avoid unnecessary inflation
+    ObjectSynchronizer::fast_enter(h_obj, lock, true, CHECK);
+  } else {
+    if (UseFastLocking) {
+      // When using fast locking, the compiled code has already tried the fast case
+      ObjectSynchronizer::slow_enter(h_obj, lock, THREAD);
+    } else {
+      ObjectSynchronizer::fast_enter(h_obj, lock, false, THREAD);
+    }
+  }
+  if (TraceGraal >= 3) {
+    tty->print_cr("%s: exiting locking slow with obj=" INTPTR_FORMAT, thread->name(), obj);
+  }
+JRT_END
+
+
+JRT_LEAF(void, GraalRuntime::graal_monitorexit(JavaThread* thread, oopDesc* obj, BasicLock* lock))
+  assert(thread == JavaThread::current(), "threads must correspond");
+  assert(thread->last_Java_sp(), "last_Java_sp must be set");
+  // monitorexit is non-blocking (leaf routine) => no exceptions can be thrown
+  EXCEPTION_MARK;
+
+#ifdef DEBUG
+  if (!obj->is_oop()) {
+    ResetNoHandleMark rhm;
+    nmethod* method = thread->last_frame().cb()->as_nmethod_or_null();
+    if (method != NULL) {
+      tty->print_cr("ERROR in monitorexit in method %s wrong obj " INTPTR_FORMAT, method->name(), obj);
+    }
+    thread->print_stack_on(tty);
+    assert(false, "invalid lock object pointer dected");
+  }
+#endif
+
+  if (UseFastLocking) {
+    // When using fast locking, the compiled code has already tried the fast case
+    ObjectSynchronizer::slow_exit(obj, lock, THREAD);
+  } else {
+    ObjectSynchronizer::fast_exit(obj, lock, THREAD);
+  }
+  if (TraceGraal >= 3) {
+    char type[O_BUFLEN];
+    obj->klass()->name()->as_C_string(type, O_BUFLEN);
+    tty->print_cr("%s: exited locking slow case with obj=" INTPTR_FORMAT ", type=%s, mark=" INTPTR_FORMAT ", lock=" INTPTR_FORMAT, thread->name(), obj, type, obj->mark(), lock);
+    tty->flush();
+  }
+JRT_END
+
+JRT_ENTRY(void, GraalRuntime::graal_log_object(JavaThread* thread, oop obj, jint flags))
+  bool string =  mask_bits_are_true(flags, LOG_OBJECT_STRING);
+  bool address = mask_bits_are_true(flags, LOG_OBJECT_ADDRESS);
+  bool newline = mask_bits_are_true(flags, LOG_OBJECT_NEWLINE);
+  if (!string) {
+    if (!address && obj->is_oop_or_null(true)) {
+      char buf[O_BUFLEN];
+      tty->print("%s@%p", obj->klass()->name()->as_C_string(buf, O_BUFLEN), obj);
+    } else {
+      tty->print("%p", obj);
+    }
+  } else {
+    ResourceMark rm;
+    assert(obj != NULL && java_lang_String::is_instance(obj), "must be");
+    char *buf = java_lang_String::as_utf8_string(obj);
+    tty->print(buf);
+  }
+  if (newline) {
+    tty->cr();
+  }
+JRT_END
+
+JRT_ENTRY(void, GraalRuntime::graal_vm_error(JavaThread* thread, oop where, oop format, jlong value))
+  ResourceMark rm;
+  assert(where == NULL || java_lang_String::is_instance(where), "must be");
+  const char *error_msg = where == NULL ? "<internal Graal error>" : java_lang_String::as_utf8_string(where);
+  char *detail_msg = NULL;
+  if (format != NULL) {
+    const char* buf = java_lang_String::as_utf8_string(format);
+    size_t detail_msg_length = strlen(buf) * 2;
+    detail_msg = (char *) NEW_RESOURCE_ARRAY(u_char, detail_msg_length);
+    jio_snprintf(detail_msg, detail_msg_length, buf, value);
+  }
+  report_vm_error(__FILE__, __LINE__, error_msg, detail_msg);
+JRT_END
+
+JRT_ENTRY(void, GraalRuntime::graal_log_printf(JavaThread* thread, oop format, jlong val))
+  ResourceMark rm;
+  assert(format != NULL && java_lang_String::is_instance(format), "must be");
+  char *buf = java_lang_String::as_utf8_string(format);
+  tty->print(buf, val);
+JRT_END
+
+JRT_ENTRY(void, GraalRuntime::graal_log_primitive(JavaThread* thread, jchar typeChar, jlong value, jboolean newline))
+  union {
+      jlong l;
+      jdouble d;
+      jfloat f;
+  } uu;
+  uu.l = value;
+  switch (typeChar) {
+    case 'z': tty->print(value == 0 ? "false" : "true"); break;
+    case 'b': tty->print("%d", (jbyte) value); break;
+    case 'c': tty->print("%c", (jchar) value); break;
+    case 's': tty->print("%d", (jshort) value); break;
+    case 'i': tty->print("%d", (jint) value); break;
+    case 'f': tty->print("%f", uu.f); break;
+    case 'j': tty->print(INT64_FORMAT, value); break;
+    case 'd': tty->print("%lf", uu.d); break;
+    default: assert(false, "unknown typeChar"); break;
+  }
+  if (newline) {
+    tty->cr();
+  }
+JRT_END
 
 // JVM_InitializeGraalRuntime
 JVM_ENTRY(jobject, JVM_InitializeGraalRuntime(JNIEnv *env, jclass graalclass))
--- a/src/share/vm/graal/graalRuntime.hpp	Tue Dec 04 10:09:25 2012 +0100
+++ b/src/share/vm/graal/graalRuntime.hpp	Tue Dec 04 10:10:02 2012 +0100
@@ -24,4 +24,144 @@
 #ifndef SHARE_VM_GRAAL_GRAAL_RUNTIME_HPP
 #define SHARE_VM_GRAAL_GRAAL_RUNTIME_HPP
 
+#include "code/stubs.hpp"
+#include "interpreter/interpreter.hpp"
+#include "memory/allocation.hpp"
+#include "runtime/deoptimization.hpp"
+
+// A GraalStubAssembler is a MacroAssembler w/ extra functionality for runtime
+// stubs. Currently it 'knows' some stub info. Eventually, the information
+// may be set automatically or can be asserted when using specialised
+// GraalStubAssembler functions.
+
+class GraalStubAssembler: public MacroAssembler {
+ private:
+  const char* _name;
+  bool        _must_gc_arguments;
+  int         _frame_size;
+  int         _num_rt_args;
+  int         _stub_id;
+
+ public:
+  // creation
+  GraalStubAssembler(CodeBuffer* code, const char * name, int stub_id);
+  void set_info(const char* name, bool must_gc_arguments);
+
+  void set_frame_size(int size);
+  void set_num_rt_args(int args);
+
+  // accessors
+  const char* name() const                       { return _name; }
+  bool  must_gc_arguments() const                { return _must_gc_arguments; }
+  int frame_size() const                         { return _frame_size; }
+  int num_rt_args() const                        { return _num_rt_args; }
+  int stub_id() const                            { return _stub_id; }
+
+  void verify_stack_oop(int offset) PRODUCT_RETURN;
+  void verify_not_null_oop(Register r)  PRODUCT_RETURN;
+
+  // runtime calls (return offset of call to be used by GC map)
+  int call_RT(Register oop_result1, Register metadata_result, address entry, int args_size = 0);
+  int call_RT(Register oop_result1, Register metadata_result, address entry, Register arg1);
+  int call_RT(Register oop_result1, Register metadata_result, address entry, Register arg1, Register arg2);
+  int call_RT(Register oop_result1, Register metadata_result, address entry, Register arg1, Register arg2, Register arg3);
+};
+
+// set frame size and return address offset to these values in blobs
+// (if the compiled frame uses ebp as link pointer on IA; otherwise,
+// the frame size must be fixed)
+enum {
+  no_frame_size            = -1
+};
+
+// Holds all assembly stubs and VM
+// runtime routines needed by code code generated
+// by Graal.
+#define GRAAL_STUBS(stub, last_entry) \
+  stub(graal_register_finalizer)      \
+  stub(graal_new_instance)            \
+  stub(graal_new_type_array)          \
+  stub(graal_new_object_array)  \
+  stub(graal_new_multi_array)         \
+  stub(graal_handle_exception_nofpu) /* optimized version that does not preserve fpu registers */ \
+  stub(graal_slow_subtype_check)      \
+  stub(graal_unwind_exception_call)   \
+  stub(graal_OSR_migration_end)       \
+  stub(graal_arithmetic_frem)         \
+  stub(graal_arithmetic_drem)         \
+  stub(graal_monitorenter)            \
+  stub(graal_monitorexit)             \
+  stub(graal_verify_oop)              \
+  stub(graal_vm_error)                \
+  stub(graal_set_deopt_info)          \
+  stub(graal_create_null_pointer_exception) \
+  stub(graal_create_out_of_bounds_exception) \
+  stub(graal_log_object)              \
+  stub(graal_log_printf)              \
+  stub(graal_log_primitive)           \
+  last_entry(number_of_ids)
+
+#define DECLARE_STUB_ID(x)       x ## _id ,
+#define DECLARE_LAST_STUB_ID(x)  x
+#define STUB_NAME(x)             #x " GraalRuntime stub",
+#define LAST_STUB_NAME(x)        #x " GraalRuntime stub"
+
+class GraalRuntime: public AllStatic {
+  friend class VMStructs;
+
+ public:
+  enum StubID {
+    GRAAL_STUBS(DECLARE_STUB_ID, DECLARE_LAST_STUB_ID)
+  };
+
+ private:
+  static CodeBlob* _blobs[number_of_ids];
+  static const char* _blob_names[];
+
+  // stub generation
+  static void       generate_blob_for(BufferBlob* blob, StubID id);
+  static OopMapSet* generate_code_for(StubID id, GraalStubAssembler* sasm);
+  static OopMapSet* generate_handle_exception(StubID id, GraalStubAssembler* sasm);
+  static void       generate_unwind_exception(GraalStubAssembler *sasm);
+
+  static OopMapSet* generate_stub_call(GraalStubAssembler* sasm, Register result, address entry,
+                                       Register arg1 = noreg, Register arg2 = noreg, Register arg3 = noreg);
+
+  // runtime entry points
+  static void new_instance    (JavaThread* thread, Klass* klass);
+  static void new_type_array  (JavaThread* thread, Klass* klass, jint length);
+  static void new_object_array(JavaThread* thread, Klass* klass, jint length);
+  static void new_multi_array (JavaThread* thread, Klass* klass, int rank, jint* dims);
+
+  static void unimplemented_entry   (JavaThread* thread, StubID id);
+
+  static address exception_handler_for_pc(JavaThread* thread);
+
+  static void graal_create_null_exception(JavaThread* thread);
+  static void graal_create_out_of_bounds_exception(JavaThread* thread, jint index);
+  static void graal_monitorenter(JavaThread* thread, oopDesc* obj, BasicLock* lock);
+  static void graal_monitorexit (JavaThread* thread, oopDesc* obj, BasicLock* lock);
+  static void graal_vm_error(JavaThread* thread, oop where, oop format, jlong value);
+  static void graal_log_printf(JavaThread* thread, oop format, jlong value);
+  static void graal_log_primitive(JavaThread* thread, jchar typeChar, jlong value, jboolean newline);
+
+  // Note: Must be kept in sync with constants in com.oracle.graal.snippets.Log
+  enum {
+    LOG_OBJECT_NEWLINE = 0x01,
+    LOG_OBJECT_STRING  = 0x02,
+    LOG_OBJECT_ADDRESS = 0x04
+  };
+  static void graal_log_object(JavaThread* thread, oop msg, jint flags);
+
+ public:
+  // initialization
+  static void initialize(BufferBlob* blob);
+
+  // stubs
+  static CodeBlob* blob_for (StubID id);
+  static address   entry_for(StubID id)          { return blob_for(id)->code_begin(); }
+  static const char* name_for (StubID id);
+  static const char* name_for_address(address entry);
+};
+
 #endif // SHARE_VM_GRAAL_GRAAL_RUNTIME_HPP
--- a/src/share/vm/interpreter/interpreter.cpp	Tue Dec 04 10:09:25 2012 +0100
+++ b/src/share/vm/interpreter/interpreter.cpp	Tue Dec 04 10:10:02 2012 +0100
@@ -385,7 +385,8 @@
 address AbstractInterpreter::deopt_reexecute_entry(Method* method, address bcp) {
   assert(method->contains(bcp), "just checkin'");
   Bytecodes::Code code   = Bytecodes::java_code_at(method, bcp);
-#ifdef COMPILER1
+#if defined(COMPILER1) || defined(GRAAL)
+
   if(code == Bytecodes::_athrow ) {
     return Interpreter::rethrow_exception_entry();
   }
@@ -451,7 +452,8 @@
     case Bytecodes::_getstatic :
     case Bytecodes::_putstatic :
     case Bytecodes::_aastore   :
-#ifdef COMPILER1
+#if defined(COMPILER1) || defined(GRAAL)
+
     //special case of reexecution
     case Bytecodes::_athrow    :
 #endif
--- a/src/share/vm/memory/allocation.hpp	Tue Dec 04 10:09:25 2012 +0100
+++ b/src/share/vm/memory/allocation.hpp	Tue Dec 04 10:10:02 2012 +0100
@@ -34,6 +34,9 @@
 #ifdef COMPILER2
 #include "opto/c2_globals.hpp"
 #endif
+#ifdef GRAAL
+#include "graal/graalGlobals.hpp"
+#endif
 
 #include <new>
 
--- a/src/share/vm/oops/klass.hpp	Tue Dec 04 10:09:25 2012 +0100
+++ b/src/share/vm/oops/klass.hpp	Tue Dec 04 10:10:02 2012 +0100
@@ -256,7 +256,7 @@
   void set_java_mirror(oop m) { klass_oop_store(&_java_mirror, m); }
 
 #ifdef GRAAL
-  // graal mirror
+  // Graal mirror
   oop graal_mirror() const               { return _graal_mirror; }
   void set_graal_mirror(oop m)           { oop_store((oop*) &_graal_mirror, m); }
 #endif
--- a/src/share/vm/precompiled/precompiled.hpp	Tue Dec 04 10:09:25 2012 +0100
+++ b/src/share/vm/precompiled/precompiled.hpp	Tue Dec 04 10:10:02 2012 +0100
@@ -285,6 +285,9 @@
 # include "c1/c1_ValueType.hpp"
 # include "c1/c1_globals.hpp"
 #endif // COMPILER1
+#ifdef GRAAL
+# include "graal/graalGlobals.hpp"
+#endif // GRAAL
 #ifndef SERIALGC
 # include "gc_implementation/concurrentMarkSweep/cmsOopClosures.hpp"
 # include "gc_implementation/concurrentMarkSweep/compactibleFreeListSpace.hpp"
--- a/src/share/vm/prims/jni.cpp	Tue Dec 04 10:09:25 2012 +0100
+++ b/src/share/vm/prims/jni.cpp	Tue Dec 04 10:10:02 2012 +0100
@@ -5152,8 +5152,8 @@
     *(JNIEnv**)penv = thread->jni_environment();
 
 #ifdef GRAAL
-    GraalCompiler* compiler = GraalCompiler::instance();
-    compiler->initialize();
+    GraalCompiler* graal_compiler = GraalCompiler::instance();
+    graal_compiler->initialize();
 #endif
 
     // Tracks the time application was running before GC
--- a/src/share/vm/prims/jvm.cpp	Tue Dec 04 10:09:25 2012 +0100
+++ b/src/share/vm/prims/jvm.cpp	Tue Dec 04 10:10:02 2012 +0100
@@ -372,6 +372,8 @@
     const char* compiler_name = "HotSpot " CSIZE "Client Compiler";
 #elif defined(COMPILER2)
     const char* compiler_name = "HotSpot " CSIZE "Server Compiler";
+#elif defined(GRAAL)
+    const char* compiler_name = "HotSpot " CSIZE "Graal Compiler";
 #else
     const char* compiler_name = "";
 #endif // compilers
--- a/src/share/vm/runtime/arguments.cpp	Tue Dec 04 10:09:25 2012 +0100
+++ b/src/share/vm/runtime/arguments.cpp	Tue Dec 04 10:10:02 2012 +0100
@@ -1515,7 +1515,7 @@
 
 void Arguments::set_g1_gc_flags() {
   assert(UseG1GC, "Error");
-#ifdef COMPILER1
+#if defined(COMPILER1) || defined(GRAAL)
   FastTLABRefill = false;
 #endif
   FLAG_SET_DEFAULT(ParallelGCThreads,
@@ -2034,16 +2034,26 @@
     jio_fprintf(defaultStream::error_stream(),
                     "CompressedOops are not supported in Graal at the moment\n");
         status = false;
+  } else {
+    // This prevents the flag being set to true by set_ergonomics_flags()
+    FLAG_SET_CMDLINE(bool, UseCompressedOops, false);
   }
+
   if (UseCompressedKlassPointers) {
     jio_fprintf(defaultStream::error_stream(),
                     "UseCompressedKlassPointers are not supported in Graal at the moment\n");
         status = false;
+  } else {
+    // This prevents the flag being set to true by set_ergonomics_flags()
+    FLAG_SET_CMDLINE(bool, UseCompressedKlassPointers, false);
   }
   if (UseG1GC) {
     jio_fprintf(defaultStream::error_stream(),
                         "G1 is not supported in Graal at the moment\n");
         status = false;
+  } else {
+    // This prevents the flag being set to true by set_ergonomics_flags()
+    FLAG_SET_CMDLINE(bool, UseG1GC, false);
   }
 
   if (!ScavengeRootsInCode) {
@@ -2957,9 +2967,9 @@
     }
 #ifdef GRAAL
     else if (match_option(option, "-G:", &tail)) { // -G:XXX
-      // Option for the graal compiler.
+      // Option for the Graal compiler.
       if (PrintVMOptions) {
-        tty->print_cr("graal option %s", tail);
+        tty->print_cr("Graal option %s", tail);
       }
       Arguments::add_graal_arg(tail);
 
@@ -3432,7 +3442,7 @@
   // which are subtlely different from each other but neither works with
   // biased locking.
   if (UseHeavyMonitors
-#ifdef COMPILER1
+#if defined(COMPILER1) || defined(GRAAL)
       || !UseFastLocking
 #endif // COMPILER1
     ) {
--- a/src/share/vm/runtime/arguments.hpp	Tue Dec 04 10:09:25 2012 +0100
+++ b/src/share/vm/runtime/arguments.hpp	Tue Dec 04 10:10:02 2012 +0100
@@ -229,7 +229,7 @@
   static char** _jvm_args_array;
   static int    _num_jvm_args;
 #ifdef GRAAL
-  // an array containing all graal arguments specified in the command line
+  // an array containing all Graal arguments specified in the command line
   static char** _graal_args_array;
   static int    _num_graal_args;
 #endif
--- a/src/share/vm/runtime/compilationPolicy.cpp	Tue Dec 04 10:09:25 2012 +0100
+++ b/src/share/vm/runtime/compilationPolicy.cpp	Tue Dec 04 10:10:02 2012 +0100
@@ -188,6 +188,7 @@
 #endif
 
 #ifdef COMPILER1
+  GRAAL_ONLY(ShouldNotReachHere();)
   if (is_c1_compile(comp_level)) {
     return _compiler_count;
   } else {
--- a/src/share/vm/runtime/globals.cpp	Tue Dec 04 10:09:25 2012 +0100
+++ b/src/share/vm/runtime/globals.cpp	Tue Dec 04 10:10:02 2012 +0100
@@ -36,6 +36,9 @@
 #ifdef COMPILER1
 #include "c1/c1_globals.hpp"
 #endif
+#ifdef GRAAL
+#include "graal/graalGlobals.hpp"
+#endif
 #ifdef COMPILER2
 #include "opto/c2_globals.hpp"
 #endif
@@ -215,6 +218,18 @@
   #define C1_NOTPRODUCT_FLAG_STRUCT(type, name, value, doc) { #type, XSTR(name), &name, doc, "{C1 notproduct}", DEFAULT },
 #endif
 
+#define GRAAL_PRODUCT_FLAG_STRUCT(type, name, value, doc) { #type, XSTR(name), &name, NOT_PRODUCT_ARG(doc) "{Graal product}", DEFAULT },
+#define GRAAL_PD_PRODUCT_FLAG_STRUCT(type, name, doc)     { #type, XSTR(name), &name, NOT_PRODUCT_ARG(doc) "{Graal pd product}", DEFAULT },
+#ifdef PRODUCT
+  #define GRAAL_DEVELOP_FLAG_STRUCT(type, name, value, doc) /* flag is constant */
+  #define GRAAL_PD_DEVELOP_FLAG_STRUCT(type, name, doc)     /* flag is constant */
+  #define GRAAL_NOTPRODUCT_FLAG_STRUCT(type, name, value, doc)
+#else
+  #define GRAAL_DEVELOP_FLAG_STRUCT(type, name, value, doc) { #type, XSTR(name), &name, doc, "{Graal}", DEFAULT },
+  #define GRAAL_PD_DEVELOP_FLAG_STRUCT(type, name, doc)     { #type, XSTR(name), &name, doc, "{Graal pd}", DEFAULT },
+  #define GRAAL_NOTPRODUCT_FLAG_STRUCT(type, name, value, doc) { #type, XSTR(name), &name, doc, "{Graal notproduct}", DEFAULT },
+#endif
+
 #define C2_PRODUCT_FLAG_STRUCT(type, name, value, doc) { #type, XSTR(name), &name, NOT_PRODUCT_ARG(doc) "{C2 product}", DEFAULT },
 #define C2_PD_PRODUCT_FLAG_STRUCT(type, name, doc)     { #type, XSTR(name), &name, NOT_PRODUCT_ARG(doc) "{C2 pd product}", DEFAULT },
 #define C2_DIAGNOSTIC_FLAG_STRUCT(type, name, value, doc) { #type, XSTR(name), &name, NOT_PRODUCT_ARG(doc) "{C2 diagnostic}", DEFAULT },
@@ -262,6 +277,9 @@
 #ifdef COMPILER1
  C1_FLAGS(C1_DEVELOP_FLAG_STRUCT, C1_PD_DEVELOP_FLAG_STRUCT, C1_PRODUCT_FLAG_STRUCT, C1_PD_PRODUCT_FLAG_STRUCT, C1_NOTPRODUCT_FLAG_STRUCT)
 #endif
+#ifdef GRAAL
+ GRAAL_FLAGS(GRAAL_DEVELOP_FLAG_STRUCT, GRAAL_PD_DEVELOP_FLAG_STRUCT, GRAAL_PRODUCT_FLAG_STRUCT, GRAAL_PD_PRODUCT_FLAG_STRUCT, GRAAL_NOTPRODUCT_FLAG_STRUCT)
+#endif
 #ifdef COMPILER2
  C2_FLAGS(C2_DEVELOP_FLAG_STRUCT, C2_PD_DEVELOP_FLAG_STRUCT, C2_PRODUCT_FLAG_STRUCT, C2_PD_PRODUCT_FLAG_STRUCT, C2_DIAGNOSTIC_FLAG_STRUCT, C2_EXPERIMENTAL_FLAG_STRUCT, C2_NOTPRODUCT_FLAG_STRUCT)
 #endif
--- a/src/share/vm/runtime/globals.hpp	Tue Dec 04 10:09:25 2012 +0100
+++ b/src/share/vm/runtime/globals.hpp	Tue Dec 04 10:10:02 2012 +0100
@@ -120,6 +120,20 @@
 # include "c1_globals_bsd.hpp"
 #endif
 #endif
+#ifdef GRAAL
+#ifdef TARGET_ARCH_x86
+# include "graalGlobals_x86.hpp"
+#endif
+#ifdef TARGET_ARCH_sparc
+# include "graalGlobals_sparc.hpp"
+#endif
+#ifdef TARGET_ARCH_arm
+# include "graalGlobals_arm.hpp"
+#endif
+#ifdef TARGET_ARCH_ppc
+# include "graalGlobals_ppc.hpp"
+#endif
+#endif // GRAAL
 #ifdef COMPILER2
 #ifdef TARGET_ARCH_x86
 # include "c2_globals_x86.hpp"
@@ -149,7 +163,7 @@
 #endif
 #endif
 
-#if !defined(COMPILER1) && !defined(COMPILER2) && !defined(SHARK)
+#if !defined(COMPILER1) && !defined(COMPILER2) && !defined(SHARK) && !defined(GRAAL)
 define_pd_global(bool, BackgroundCompilation,        false);
 define_pd_global(bool, UseTLAB,                      false);
 define_pd_global(bool, CICompileOSR,                 false);
--- a/src/share/vm/runtime/globals_extension.hpp	Tue Dec 04 10:09:25 2012 +0100
+++ b/src/share/vm/runtime/globals_extension.hpp	Tue Dec 04 10:10:02 2012 +0100
@@ -66,6 +66,18 @@
   #define C1_NOTPRODUCT_FLAG_MEMBER(type, name, value, doc)    FLAG_MEMBER(name),
 #endif
 
+#define GRAAL_PRODUCT_FLAG_MEMBER(type, name, value, doc)         FLAG_MEMBER(name),
+#define GRAAL_PD_PRODUCT_FLAG_MEMBER(type, name, doc)             FLAG_MEMBER(name),
+#ifdef PRODUCT
+  #define GRAAL_DEVELOP_FLAG_MEMBER(type, name, value, doc)       /* flag is constant */
+  #define GRAAL_PD_DEVELOP_FLAG_MEMBER(type, name, doc)           /* flag is constant */
+  #define GRAAL_NOTPRODUCT_FLAG_MEMBER(type, name, value, doc)
+#else
+  #define GRAAL_DEVELOP_FLAG_MEMBER(type, name, value, doc)       FLAG_MEMBER(name),
+  #define GRAAL_PD_DEVELOP_FLAG_MEMBER(type, name, doc)           FLAG_MEMBER(name),
+  #define GRAAL_NOTPRODUCT_FLAG_MEMBER(type, name, value, doc)    FLAG_MEMBER(name),
+#endif
+
 #define C2_PRODUCT_FLAG_MEMBER(type, name, value, doc)         FLAG_MEMBER(name),
 #define C2_PD_PRODUCT_FLAG_MEMBER(type, name, doc)             FLAG_MEMBER(name),
 #define C2_DIAGNOSTIC_FLAG_MEMBER(type, name, value, doc)      FLAG_MEMBER(name),
@@ -100,6 +112,9 @@
 #ifdef COMPILER1
  C1_FLAGS(C1_DEVELOP_FLAG_MEMBER, C1_PD_DEVELOP_FLAG_MEMBER, C1_PRODUCT_FLAG_MEMBER, C1_PD_PRODUCT_FLAG_MEMBER, C1_NOTPRODUCT_FLAG_MEMBER)
 #endif
+#ifdef GRAAL
+ GRAAL_FLAGS(GRAAL_DEVELOP_FLAG_MEMBER, GRAAL_PD_DEVELOP_FLAG_MEMBER, GRAAL_PRODUCT_FLAG_MEMBER, GRAAL_PD_PRODUCT_FLAG_MEMBER, GRAAL_NOTPRODUCT_FLAG_MEMBER)
+#endif
 #ifdef COMPILER2
  C2_FLAGS(C2_DEVELOP_FLAG_MEMBER, C2_PD_DEVELOP_FLAG_MEMBER, C2_PRODUCT_FLAG_MEMBER, C2_PD_PRODUCT_FLAG_MEMBER, C2_DIAGNOSTIC_FLAG_MEMBER, C2_EXPERIMENTAL_FLAG_MEMBER, C2_NOTPRODUCT_FLAG_MEMBER)
 #endif
@@ -139,6 +154,17 @@
   #define C1_PD_DEVELOP_FLAG_MEMBER_WITH_TYPE(type, name, doc)           FLAG_MEMBER_WITH_TYPE(name,type),
   #define C1_NOTPRODUCT_FLAG_MEMBER_WITH_TYPE(type, name, value, doc)    FLAG_MEMBER_WITH_TYPE(name,type),
 #endif
+#define GRAAL_PRODUCT_FLAG_MEMBER_WITH_TYPE(type, name, value, doc)         FLAG_MEMBER_WITH_TYPE(name,type),
+#define GRAAL_PD_PRODUCT_FLAG_MEMBER_WITH_TYPE(type, name, doc)             FLAG_MEMBER_WITH_TYPE(name,type),
+#ifdef PRODUCT
+  #define GRAAL_DEVELOP_FLAG_MEMBER_WITH_TYPE(type, name, value, doc)       /* flag is constant */
+  #define GRAAL_PD_DEVELOP_FLAG_MEMBER_WITH_TYPE(type, name, doc)           /* flag is constant */
+  #define GRAAL_NOTPRODUCT_FLAG_MEMBER_WITH_TYPE(type, name, value, doc)
+#else
+  #define GRAAL_DEVELOP_FLAG_MEMBER_WITH_TYPE(type, name, value, doc)       FLAG_MEMBER_WITH_TYPE(name,type),
+  #define GRAAL_PD_DEVELOP_FLAG_MEMBER_WITH_TYPE(type, name, doc)           FLAG_MEMBER_WITH_TYPE(name,type),
+  #define GRAAL_NOTPRODUCT_FLAG_MEMBER_WITH_TYPE(type, name, value, doc)    FLAG_MEMBER_WITH_TYPE(name,type),
+#endif
 #ifdef _LP64
 #define RUNTIME_LP64_PRODUCT_FLAG_MEMBER_WITH_TYPE(type, name, value, doc)    FLAG_MEMBER_WITH_TYPE(name,type),
 #else
@@ -205,6 +231,13 @@
           C1_PD_PRODUCT_FLAG_MEMBER_WITH_TYPE,
           C1_NOTPRODUCT_FLAG_MEMBER_WITH_TYPE)
 #endif
+#ifdef GRAAL
+ GRAAL_FLAGS(GRAAL_DEVELOP_FLAG_MEMBER_WITH_TYPE,
+          GRAAL_PD_DEVELOP_FLAG_MEMBER_WITH_TYPE,
+          GRAAL_PRODUCT_FLAG_MEMBER_WITH_TYPE,
+          GRAAL_PD_PRODUCT_FLAG_MEMBER_WITH_TYPE,
+          GRAAL_NOTPRODUCT_FLAG_MEMBER_WITH_TYPE)
+#endif
 #ifdef COMPILER2
  C2_FLAGS(C2_DEVELOP_FLAG_MEMBER_WITH_TYPE,
           C2_PD_DEVELOP_FLAG_MEMBER_WITH_TYPE,
--- a/src/share/vm/runtime/safepoint.cpp	Tue Dec 04 10:09:25 2012 +0100
+++ b/src/share/vm/runtime/safepoint.cpp	Tue Dec 04 10:10:02 2012 +0100
@@ -90,6 +90,9 @@
 #ifdef COMPILER1
 #include "c1/c1_globals.hpp"
 #endif
+#ifdef GRAAL
+#include "graal/graalGlobals.hpp"
+#endif
 
 // --------------------------------------------------------------------------------------------------
 // Implementation of Safepoint begin/end
--- a/src/share/vm/runtime/sharedRuntime.cpp	Tue Dec 04 10:09:25 2012 +0100
+++ b/src/share/vm/runtime/sharedRuntime.cpp	Tue Dec 04 10:10:02 2012 +0100
@@ -892,7 +892,7 @@
 #endif
 #ifdef GRAAL
         if (TraceSignals) {
-          tty->print_cr("graal implicit div0");
+          tty->print_cr("Graal implicit div0");
         }
         target_pc = deoptimize_for_implicit_exception(thread, pc, nm, Deoptimization::Reason_div0_check);
 #else
--- a/src/share/vm/runtime/thread.cpp	Tue Dec 04 10:09:25 2012 +0100
+++ b/src/share/vm/runtime/thread.cpp	Tue Dec 04 10:10:02 2012 +0100
@@ -3680,7 +3680,7 @@
   }
 
   // initialize compiler(s)
-#if defined(COMPILER1) || defined(COMPILER2)
+#if defined(COMPILER1) || defined(COMPILER2) || defined(GRAAL)
   CompileBroker::compilation_init();
 #endif
 
--- a/src/share/vm/utilities/globalDefinitions.hpp	Tue Dec 04 10:09:25 2012 +0100
+++ b/src/share/vm/utilities/globalDefinitions.hpp	Tue Dec 04 10:10:02 2012 +0100
@@ -798,16 +798,16 @@
 
 #if defined(COMPILER2) || defined(SHARK)
   CompLevel_highest_tier      = CompLevel_full_optimization,  // pure C2 and tiered
-#elif defined(COMPILER1)
-  CompLevel_highest_tier      = CompLevel_simple,             // pure C1
+#elif defined(COMPILER1) || defined(GRAAL)
+  CompLevel_highest_tier      = CompLevel_simple,             // pure C1 or Graal
 #else
   CompLevel_highest_tier      = CompLevel_none,
 #endif
 
 #if defined(TIERED)
   CompLevel_initial_compile   = CompLevel_full_profile        // tiered
-#elif defined(COMPILER1)
-  CompLevel_initial_compile   = CompLevel_simple              // pure C1
+#elif defined(COMPILER1) || defined(GRAAL)
+  CompLevel_initial_compile   = CompLevel_simple              // pure C1 or Graal
 #elif defined(COMPILER2) || defined(SHARK)
   CompLevel_initial_compile   = CompLevel_full_optimization   // pure C2
 #else
--- a/src/share/vm/utilities/top.hpp	Tue Dec 04 10:09:25 2012 +0100
+++ b/src/share/vm/utilities/top.hpp	Tue Dec 04 10:10:02 2012 +0100
@@ -39,6 +39,9 @@
 #ifdef COMPILER1
 #include "c1/c1_globals.hpp"
 #endif
+#ifdef GRAAL
+#include "graal/graalGlobals.hpp"
+#endif
 #ifdef COMPILER2
 #include "opto/c2_globals.hpp"
 #endif