changeset 11177:d0aeaf72c7bd

Merge.
author Doug Simon <doug.simon@oracle.com>
date Mon, 05 Aug 2013 11:25:14 +0200
parents 0630959b64e8 (current diff) 19648527ec72 (diff)
children d416f67d6f91 1e6d5dec4a4e
files src/cpu/x86/vm/graalGlobals_x86.hpp src/share/vm/code/nmethod.cpp src/share/vm/gc_implementation/g1/g1CollectedHeap.cpp
diffstat 92 files changed, 1642 insertions(+), 790 deletions(-) [+]
line wrap: on
line diff
--- a/graal/com.oracle.graal.api.code/src/com/oracle/graal/api/code/CodeUtil.java	Thu Aug 01 00:57:27 2013 +0200
+++ b/graal/com.oracle.graal.api.code/src/com/oracle/graal/api/code/CodeUtil.java	Mon Aug 05 11:25:14 2013 +0200
@@ -75,7 +75,7 @@
      */
     public static int log2(int val) {
         assert val > 0;
-        return 31 - Integer.numberOfLeadingZeros(val);
+        return (Integer.SIZE - 1) - Integer.numberOfLeadingZeros(val);
     }
 
     /**
@@ -87,7 +87,7 @@
      */
     public static int log2(long val) {
         assert val > 0;
-        return 63 - Long.numberOfLeadingZeros(val);
+        return (Long.SIZE - 1) - Long.numberOfLeadingZeros(val);
     }
 
     /**
--- a/graal/com.oracle.graal.api.code/src/com/oracle/graal/api/code/TargetDescription.java	Thu Aug 01 00:57:27 2013 +0200
+++ b/graal/com.oracle.graal.api.code/src/com/oracle/graal/api/code/TargetDescription.java	Mon Aug 05 11:25:14 2013 +0200
@@ -81,16 +81,4 @@
         this.implicitNullCheckLimit = implicitNullCheckLimit;
         this.inlineObjects = inlineObjects;
     }
-
-    /**
-     * Aligns the given frame size (without return instruction pointer) to the stack alignment size
-     * and return the aligned size (without return instruction pointer).
-     * 
-     * @param frameSize the initial frame size to be aligned
-     * @return the aligned frame size
-     */
-    public int alignFrameSize(int frameSize) {
-        int x = frameSize + arch.getReturnAddressSize() + (stackAlignment - 1);
-        return (x / stackAlignment) * stackAlignment - arch.getReturnAddressSize();
-    }
 }
--- a/graal/com.oracle.graal.compiler.hsail/src/com/oracle/graal/compiler/hsail/HSAILBackend.java	Thu Aug 01 00:57:27 2013 +0200
+++ b/graal/com.oracle.graal.compiler.hsail/src/com/oracle/graal/compiler/hsail/HSAILBackend.java	Mon Aug 05 11:25:14 2013 +0200
@@ -23,7 +23,6 @@
 package com.oracle.graal.compiler.hsail;
 
 import static com.oracle.graal.api.code.CallingConvention.Type.*;
-
 import static com.oracle.graal.api.code.ValueUtil.*;
 
 import com.oracle.graal.api.code.*;
@@ -35,6 +34,7 @@
 import com.oracle.graal.debug.*;
 import com.oracle.graal.lir.*;
 import com.oracle.graal.lir.asm.*;
+import com.oracle.graal.lir.hsail.*;
 import com.oracle.graal.nodes.*;
 import com.oracle.graal.hsail.*;
 
@@ -60,6 +60,11 @@
     }
 
     @Override
+    public FrameMap newFrameMap() {
+        return new HSAILFrameMap(runtime(), target, runtime().lookupRegisterConfig());
+    }
+
+    @Override
     public LIRGenerator newLIRGenerator(StructuredGraph graph, FrameMap frameMap, CallingConvention cc, LIR lir) {
         return new HSAILLIRGenerator(graph, runtime(), target, frameMap, cc, lir);
     }
--- a/graal/com.oracle.graal.compiler.ptx/src/com/oracle/graal/compiler/ptx/PTXBackend.java	Thu Aug 01 00:57:27 2013 +0200
+++ b/graal/com.oracle.graal.compiler.ptx/src/com/oracle/graal/compiler/ptx/PTXBackend.java	Mon Aug 05 11:25:14 2013 +0200
@@ -30,6 +30,7 @@
 import com.oracle.graal.compiler.target.*;
 import com.oracle.graal.lir.*;
 import com.oracle.graal.lir.asm.*;
+import com.oracle.graal.lir.ptx.*;
 import com.oracle.graal.nodes.*;
 
 /**
@@ -42,6 +43,11 @@
     }
 
     @Override
+    public FrameMap newFrameMap() {
+        return new PTXFrameMap(runtime(), target, runtime().lookupRegisterConfig());
+    }
+
+    @Override
     public LIRGenerator newLIRGenerator(StructuredGraph graph, FrameMap frameMap, CallingConvention cc, LIR lir) {
         return new PTXLIRGenerator(graph, runtime(), target, frameMap, cc, lir);
     }
--- a/graal/com.oracle.graal.compiler.test/src/com/oracle/graal/compiler/test/BoxingEliminationTest.java	Thu Aug 01 00:57:27 2013 +0200
+++ b/graal/com.oracle.graal.compiler.test/src/com/oracle/graal/compiler/test/BoxingEliminationTest.java	Mon Aug 05 11:25:14 2013 +0200
@@ -307,8 +307,8 @@
     private void processMethod(final String snippet) {
         graph = parse(snippet);
         Assumptions assumptions = new Assumptions(false);
-        HighTierContext context = new HighTierContext(runtime(), assumptions, replacements);
-        new InliningPhase(runtime(), null, replacements, assumptions, null, getDefaultPhasePlan(), OptimisticOptimizations.ALL).apply(graph);
+        HighTierContext context = new HighTierContext(runtime(), assumptions, replacements, null, getDefaultPhasePlan(), OptimisticOptimizations.ALL);
+        new InliningPhase().apply(graph, context);
         new PartialEscapePhase(false, new CanonicalizerPhase(true)).apply(graph, context);
     }
 
@@ -324,9 +324,9 @@
                 graph = parse(snippet);
 
                 Assumptions assumptions = new Assumptions(false);
-                HighTierContext context = new HighTierContext(runtime(), assumptions, replacements);
+                HighTierContext context = new HighTierContext(runtime(), assumptions, replacements, null, getDefaultPhasePlan(), OptimisticOptimizations.ALL);
                 CanonicalizerPhase canonicalizer = new CanonicalizerPhase(true);
-                new InliningPhase(runtime(), null, replacements, assumptions, null, getDefaultPhasePlan(), OptimisticOptimizations.ALL).apply(graph);
+                new InliningPhase().apply(graph, context);
                 if (loopPeeling) {
                     new LoopTransformHighPhase().apply(graph);
                 }
@@ -338,7 +338,7 @@
                 canonicalizer.apply(graph, context);
 
                 StructuredGraph referenceGraph = parse(referenceSnippet);
-                new InliningPhase(runtime(), null, replacements, assumptions, null, getDefaultPhasePlan(), OptimisticOptimizations.ALL).apply(referenceGraph);
+                new InliningPhase().apply(referenceGraph, context);
                 new DeadCodeEliminationPhase().apply(referenceGraph);
                 new CanonicalizerPhase(true).apply(referenceGraph, context);
 
--- a/graal/com.oracle.graal.compiler.test/src/com/oracle/graal/compiler/test/DegeneratedLoopsTest.java	Thu Aug 01 00:57:27 2013 +0200
+++ b/graal/com.oracle.graal.compiler.test/src/com/oracle/graal/compiler/test/DegeneratedLoopsTest.java	Mon Aug 05 11:25:14 2013 +0200
@@ -29,6 +29,7 @@
 import com.oracle.graal.nodes.*;
 import com.oracle.graal.phases.*;
 import com.oracle.graal.phases.common.*;
+import com.oracle.graal.phases.tiers.*;
 
 /**
  * In the following tests, the usages of local variable "a" are replaced with the integer constant
@@ -81,8 +82,9 @@
 
             public void run() {
                 StructuredGraph graph = parse(snippet);
-                new InliningPhase(runtime(), null, replacements, new Assumptions(false), null, getDefaultPhasePlan(), OptimisticOptimizations.ALL).apply(graph);
-                new CanonicalizerPhase.Instance(runtime(), new Assumptions(false), true).apply(graph);
+                HighTierContext context = new HighTierContext(runtime(), new Assumptions(false), replacements, null, getDefaultPhasePlan(), OptimisticOptimizations.ALL);
+                new InliningPhase().apply(graph, context);
+                new CanonicalizerPhase(true).apply(graph, context);
                 Debug.dump(graph, "Graph");
                 StructuredGraph referenceGraph = parse(REFERENCE_SNIPPET);
                 Debug.dump(referenceGraph, "ReferenceGraph");
--- a/graal/com.oracle.graal.compiler.test/src/com/oracle/graal/compiler/test/EliminateNestedCheckCastsTest.java	Thu Aug 01 00:57:27 2013 +0200
+++ b/graal/com.oracle.graal.compiler.test/src/com/oracle/graal/compiler/test/EliminateNestedCheckCastsTest.java	Mon Aug 05 11:25:14 2013 +0200
@@ -106,11 +106,11 @@
     }
 
     private StructuredGraph compileSnippet(final String snippet, final int checkcasts, final int afterCanon) {
-        return Debug.scope(snippet, new Callable<StructuredGraph>() {
+        final StructuredGraph graph = parse(snippet);
+        return Debug.scope("NestedCheckCastsTest", graph, new Callable<StructuredGraph>() {
 
             @Override
             public StructuredGraph call() throws Exception {
-                StructuredGraph graph = parse(snippet);
                 Debug.dump(graph, "After parsing: " + snippet);
                 Assert.assertEquals(checkcasts, graph.getNodes().filter(CheckCastNode.class).count());
                 new CanonicalizerPhase.Instance(runtime(), new Assumptions(false), true).apply(graph);
--- a/graal/com.oracle.graal.compiler.test/src/com/oracle/graal/compiler/test/FinalizableSubclassTest.java	Thu Aug 01 00:57:27 2013 +0200
+++ b/graal/com.oracle.graal.compiler.test/src/com/oracle/graal/compiler/test/FinalizableSubclassTest.java	Mon Aug 05 11:25:14 2013 +0200
@@ -39,6 +39,7 @@
 import com.oracle.graal.nodes.java.*;
 import com.oracle.graal.phases.*;
 import com.oracle.graal.phases.common.*;
+import com.oracle.graal.phases.tiers.*;
 
 public class FinalizableSubclassTest extends GraalCompilerTest {
 
@@ -67,8 +68,9 @@
 
         GraphBuilderConfiguration conf = GraphBuilderConfiguration.getSnippetDefault();
         new GraphBuilderPhase(runtime, conf, OptimisticOptimizations.ALL).apply(graph);
-        new InliningPhase(runtime(), null, replacements, assumptions, null, getDefaultPhasePlan(), OptimisticOptimizations.ALL).apply(graph);
-        new CanonicalizerPhase.Instance(runtime(), assumptions, true).apply(graph);
+        HighTierContext context = new HighTierContext(runtime(), assumptions, replacements, null, getDefaultPhasePlan(), OptimisticOptimizations.ALL);
+        new InliningPhase().apply(graph, context);
+        new CanonicalizerPhase(true).apply(graph, context);
         return graph;
     }
 
--- a/graal/com.oracle.graal.compiler.test/src/com/oracle/graal/compiler/test/FloatingReadTest.java	Thu Aug 01 00:57:27 2013 +0200
+++ b/graal/com.oracle.graal.compiler.test/src/com/oracle/graal/compiler/test/FloatingReadTest.java	Mon Aug 05 11:25:14 2013 +0200
@@ -59,7 +59,7 @@
 
             public void run() {
                 StructuredGraph graph = parse(snippet);
-                HighTierContext context = new HighTierContext(runtime(), new Assumptions(false), replacements);
+                PhaseContext context = new PhaseContext(runtime(), new Assumptions(false), replacements);
                 new LoweringPhase(LoweringType.BEFORE_GUARDS).apply(graph, context);
                 new FloatingReadPhase().apply(graph);
 
--- a/graal/com.oracle.graal.compiler.test/src/com/oracle/graal/compiler/test/GraalCompilerTest.java	Thu Aug 01 00:57:27 2013 +0200
+++ b/graal/com.oracle.graal.compiler.test/src/com/oracle/graal/compiler/test/GraalCompilerTest.java	Mon Aug 05 11:25:14 2013 +0200
@@ -403,16 +403,6 @@
     }
 
     /**
-     * Can be overridden to modify the compilation phases applied for a test.
-     * 
-     * @param method the method being compiled
-     * @param graph the graph being compiled
-     * @param phasePlan the phase plan to be edited
-     */
-    protected void editPhasePlan(ResolvedJavaMethod method, StructuredGraph graph, PhasePlan phasePlan) {
-    }
-
-    /**
      * Gets installed code for a given method and graph, compiling it first if necessary.
      * 
      * @param forceCompile specifies whether to ignore any previous code cached for the (method,
@@ -442,7 +432,6 @@
                 PhasePlan phasePlan = new PhasePlan();
                 GraphBuilderPhase graphBuilderPhase = new GraphBuilderPhase(runtime, GraphBuilderConfiguration.getDefault(), OptimisticOptimizations.ALL);
                 phasePlan.addPhase(PhasePosition.AFTER_PARSING, graphBuilderPhase);
-                editPhasePlan(method, graph, phasePlan);
                 CallingConvention cc = getCallingConvention(runtime, Type.JavaCallee, graph.method(), false);
                 final CompilationResult compResult = GraalCompiler.compileGraph(graph, cc, method, runtime, replacements, backend, runtime().getTarget(), null, phasePlan, OptimisticOptimizations.ALL,
                                 new SpeculationLog(), suites, new CompilationResult());
--- a/graal/com.oracle.graal.compiler.test/src/com/oracle/graal/compiler/test/InvokeExceptionTest.java	Thu Aug 01 00:57:27 2013 +0200
+++ b/graal/com.oracle.graal.compiler.test/src/com/oracle/graal/compiler/test/InvokeExceptionTest.java	Mon Aug 05 11:25:14 2013 +0200
@@ -30,6 +30,7 @@
 import com.oracle.graal.nodes.*;
 import com.oracle.graal.phases.*;
 import com.oracle.graal.phases.common.*;
+import com.oracle.graal.phases.tiers.*;
 
 public class InvokeExceptionTest extends GraalCompilerTest {
 
@@ -65,8 +66,9 @@
             hints.put(invoke, 1000d);
         }
         Assumptions assumptions = new Assumptions(false);
-        new InliningPhase(runtime(), hints, replacements, assumptions, null, getDefaultPhasePlan(), OptimisticOptimizations.ALL).apply(graph);
-        new CanonicalizerPhase.Instance(runtime(), assumptions, true).apply(graph);
+        HighTierContext context = new HighTierContext(runtime(), assumptions, replacements, null, getDefaultPhasePlan(), OptimisticOptimizations.ALL);
+        new InliningPhase(hints).apply(graph, context);
+        new CanonicalizerPhase(true).apply(graph, context);
         new DeadCodeEliminationPhase().apply(graph);
     }
 }
--- a/graal/com.oracle.graal.compiler.test/src/com/oracle/graal/compiler/test/InvokeHintsTest.java	Thu Aug 01 00:57:27 2013 +0200
+++ b/graal/com.oracle.graal.compiler.test/src/com/oracle/graal/compiler/test/InvokeHintsTest.java	Mon Aug 05 11:25:14 2013 +0200
@@ -30,6 +30,7 @@
 import com.oracle.graal.nodes.*;
 import com.oracle.graal.phases.*;
 import com.oracle.graal.phases.common.*;
+import com.oracle.graal.phases.tiers.*;
 
 public class InvokeHintsTest extends GraalCompilerTest {
 
@@ -76,8 +77,9 @@
         }
 
         Assumptions assumptions = new Assumptions(false);
-        new InliningPhase(runtime(), hints, replacements, assumptions, null, getDefaultPhasePlan(), OptimisticOptimizations.ALL).apply(graph);
-        new CanonicalizerPhase.Instance(runtime(), assumptions, true).apply(graph);
+        HighTierContext context = new HighTierContext(runtime(), assumptions, replacements, null, getDefaultPhasePlan(), OptimisticOptimizations.ALL);
+        new InliningPhase(hints).apply(graph, context);
+        new CanonicalizerPhase(true).apply(graph, context);
         new DeadCodeEliminationPhase().apply(graph);
         StructuredGraph referenceGraph = parse(REFERENCE_SNIPPET);
         assertEquals(referenceGraph, graph);
--- a/graal/com.oracle.graal.compiler.test/src/com/oracle/graal/compiler/test/LockEliminationTest.java	Thu Aug 01 00:57:27 2013 +0200
+++ b/graal/com.oracle.graal.compiler.test/src/com/oracle/graal/compiler/test/LockEliminationTest.java	Mon Aug 05 11:25:14 2013 +0200
@@ -91,11 +91,12 @@
         StructuredGraph graph = parse(method);
         PhasePlan phasePlan = getDefaultPhasePlan();
         Assumptions assumptions = new Assumptions(true);
-        new CanonicalizerPhase.Instance(runtime(), assumptions, true).apply(graph);
-        new InliningPhase(runtime(), null, replacements, assumptions, null, phasePlan, OptimisticOptimizations.ALL).apply(graph);
-        new CanonicalizerPhase.Instance(runtime(), assumptions, true).apply(graph);
+        HighTierContext context = new HighTierContext(runtime(), assumptions, replacements, null, phasePlan, OptimisticOptimizations.ALL);
+        new CanonicalizerPhase(true).apply(graph, context);
+        new InliningPhase().apply(graph, context);
+        new CanonicalizerPhase(true).apply(graph, context);
         new DeadCodeEliminationPhase().apply(graph);
-        new LoweringPhase(LoweringType.BEFORE_GUARDS).apply(graph, new PhaseContext(runtime, assumptions, replacements));
+        new LoweringPhase(LoweringType.BEFORE_GUARDS).apply(graph, context);
         new ValueAnchorCleanupPhase().apply(graph);
         new LockEliminationPhase().apply(graph);
         return graph;
--- a/graal/com.oracle.graal.compiler.test/src/com/oracle/graal/compiler/test/MemoryScheduleTest.java	Thu Aug 01 00:57:27 2013 +0200
+++ b/graal/com.oracle.graal.compiler.test/src/com/oracle/graal/compiler/test/MemoryScheduleTest.java	Mon Aug 05 11:25:14 2013 +0200
@@ -85,7 +85,7 @@
     @Test
     public void testSimple() {
         for (TestMode mode : TestMode.values()) {
-            SchedulePhase schedule = getFinalSchedule("testSimpleSnippet", mode, MemoryScheduling.OPTIMAL, false);
+            SchedulePhase schedule = getFinalSchedule("testSimpleSnippet", mode, MemoryScheduling.OPTIMAL);
             StructuredGraph graph = schedule.getCFG().graph;
             assertReadAndWriteInSameBlock(schedule, true);
             assertOrderedAfterSchedule(schedule, graph.getNodes().filter(FloatingReadNode.class).first(), graph.getNodes().filter(WriteNode.class).first());
@@ -110,7 +110,7 @@
     @Test
     public void testSplit1() {
         for (TestMode mode : TestMode.values()) {
-            SchedulePhase schedule = getFinalSchedule("testSplitSnippet1", mode, MemoryScheduling.OPTIMAL, false);
+            SchedulePhase schedule = getFinalSchedule("testSplitSnippet1", mode, MemoryScheduling.OPTIMAL);
             assertReadWithinStartBlock(schedule, true);
             assertReadWithinReturnBlock(schedule, false);
         }
@@ -133,7 +133,7 @@
 
     @Test
     public void testSplit2() {
-        SchedulePhase schedule = getFinalSchedule("testSplit2Snippet", TestMode.WITHOUT_FRAMESTATES, MemoryScheduling.OPTIMAL, false);
+        SchedulePhase schedule = getFinalSchedule("testSplit2Snippet", TestMode.WITHOUT_FRAMESTATES, MemoryScheduling.OPTIMAL);
         assertReadWithinStartBlock(schedule, false);
         assertReadWithinReturnBlock(schedule, true);
     }
@@ -157,7 +157,7 @@
 
     @Test
     public void testLoop1() {
-        SchedulePhase schedule = getFinalSchedule("testLoop1Snippet", TestMode.WITHOUT_FRAMESTATES, MemoryScheduling.OPTIMAL, false);
+        SchedulePhase schedule = getFinalSchedule("testLoop1Snippet", TestMode.WITHOUT_FRAMESTATES, MemoryScheduling.OPTIMAL);
         assertEquals(6, schedule.getCFG().getBlocks().length);
         assertReadWithinStartBlock(schedule, true);
         assertReadWithinReturnBlock(schedule, false);
@@ -182,13 +182,35 @@
 
     @Test
     public void testLoop2() {
-        SchedulePhase schedule = getFinalSchedule("testLoop2Snippet", TestMode.WITHOUT_FRAMESTATES, MemoryScheduling.OPTIMAL, false);
+        SchedulePhase schedule = getFinalSchedule("testLoop2Snippet", TestMode.WITHOUT_FRAMESTATES, MemoryScheduling.OPTIMAL);
         assertEquals(6, schedule.getCFG().getBlocks().length);
         assertReadWithinStartBlock(schedule, false);
         assertReadWithinReturnBlock(schedule, true);
     }
 
     /**
+     * Here the read should float out of the loop.
+     */
+    public static int testLoop3Snippet(int a) {
+        int j = 0;
+        for (int i = 0; i < a; i++) {
+            if (i - container.a == 0) {
+                break;
+            }
+            j++;
+        }
+        return j;
+    }
+
+    @Test
+    public void testLoop3() {
+        SchedulePhase schedule = getFinalSchedule("testLoop3Snippet", TestMode.WITHOUT_FRAMESTATES, MemoryScheduling.OPTIMAL);
+        assertEquals(7, schedule.getCFG().getBlocks().length);
+        assertReadWithinStartBlock(schedule, true);
+        assertReadWithinReturnBlock(schedule, false);
+    }
+
+    /**
      * Here the read should float to the end (into the same block as the return).
      */
     public static int testArrayCopySnippet(Integer intValue, char[] a, char[] b, int len) {
@@ -198,7 +220,7 @@
 
     @Test
     public void testArrayCopy() {
-        SchedulePhase schedule = getFinalSchedule("testArrayCopySnippet", TestMode.INLINED_WITHOUT_FRAMESTATES, MemoryScheduling.OPTIMAL, false);
+        SchedulePhase schedule = getFinalSchedule("testArrayCopySnippet", TestMode.INLINED_WITHOUT_FRAMESTATES, MemoryScheduling.OPTIMAL);
         StructuredGraph graph = schedule.getCFG().getStartBlock().getBeginNode().graph();
         ReturnNode ret = graph.getNodes(ReturnNode.class).first();
         assertTrue(ret.result() instanceof FloatingReadNode);
@@ -219,7 +241,7 @@
 
     @Test
     public void testIfRead1() {
-        SchedulePhase schedule = getFinalSchedule("testIfRead1Snippet", TestMode.WITHOUT_FRAMESTATES, MemoryScheduling.OPTIMAL, false);
+        SchedulePhase schedule = getFinalSchedule("testIfRead1Snippet", TestMode.WITHOUT_FRAMESTATES, MemoryScheduling.OPTIMAL);
         assertEquals(4, schedule.getCFG().getBlocks().length);
         assertReadWithinStartBlock(schedule, true);
         assertReadAndWriteInSameBlock(schedule, false);
@@ -240,7 +262,7 @@
 
     @Test
     public void testIfRead2() {
-        SchedulePhase schedule = getFinalSchedule("testIfRead2Snippet", TestMode.WITHOUT_FRAMESTATES, MemoryScheduling.OPTIMAL, false);
+        SchedulePhase schedule = getFinalSchedule("testIfRead2Snippet", TestMode.WITHOUT_FRAMESTATES, MemoryScheduling.OPTIMAL);
         assertEquals(4, schedule.getCFG().getBlocks().length);
         assertEquals(1, schedule.getCFG().graph.getNodes().filter(FloatingReadNode.class).count());
         assertReadWithinStartBlock(schedule, false);
@@ -262,7 +284,7 @@
 
     @Test
     public void testIfRead3() {
-        SchedulePhase schedule = getFinalSchedule("testIfRead3Snippet", TestMode.WITHOUT_FRAMESTATES, MemoryScheduling.OPTIMAL, false);
+        SchedulePhase schedule = getFinalSchedule("testIfRead3Snippet", TestMode.WITHOUT_FRAMESTATES, MemoryScheduling.OPTIMAL);
         assertEquals(4, schedule.getCFG().getBlocks().length);
         assertReadWithinStartBlock(schedule, false);
         assertReadWithinReturnBlock(schedule, true);
@@ -283,7 +305,7 @@
 
     @Test
     public void testIfRead4() {
-        SchedulePhase schedule = getFinalSchedule("testIfRead4Snippet", TestMode.WITHOUT_FRAMESTATES, MemoryScheduling.OPTIMAL, false);
+        SchedulePhase schedule = getFinalSchedule("testIfRead4Snippet", TestMode.WITHOUT_FRAMESTATES, MemoryScheduling.OPTIMAL);
         assertEquals(4, schedule.getCFG().getBlocks().length);
         assertReadWithinStartBlock(schedule, false);
         assertReadWithinReturnBlock(schedule, false);
@@ -309,7 +331,7 @@
 
     @Test
     public void testBlockSchedule() {
-        SchedulePhase schedule = getFinalSchedule("testBlockScheduleSnippet", TestMode.WITHOUT_FRAMESTATES, MemoryScheduling.OPTIMAL, false);
+        SchedulePhase schedule = getFinalSchedule("testBlockScheduleSnippet", TestMode.WITHOUT_FRAMESTATES, MemoryScheduling.OPTIMAL);
         StructuredGraph graph = schedule.getCFG().graph;
         NodeIterable<WriteNode> writeNodes = graph.getNodes().filter(WriteNode.class);
 
@@ -344,7 +366,7 @@
 
     @Test
     public void testProxy1() {
-        SchedulePhase schedule = getFinalSchedule("testProxy1Snippet", TestMode.WITHOUT_FRAMESTATES, MemoryScheduling.OPTIMAL, false);
+        SchedulePhase schedule = getFinalSchedule("testProxy1Snippet", TestMode.WITHOUT_FRAMESTATES, MemoryScheduling.OPTIMAL);
         assertReadWithinStartBlock(schedule, true); // read of container.a should be in start block
         /*
          * read of container.b for increment operation should be in return block. TODO: not sure
@@ -370,7 +392,62 @@
 
     @Test
     public void testProxy2() {
-        SchedulePhase schedule = getFinalSchedule("testProxy2Snippet", TestMode.WITHOUT_FRAMESTATES, MemoryScheduling.OPTIMAL, false);
+        SchedulePhase schedule = getFinalSchedule("testProxy2Snippet", TestMode.WITHOUT_FRAMESTATES, MemoryScheduling.OPTIMAL);
+        assertReadWithinStartBlock(schedule, false);
+        assertReadWithinReturnBlock(schedule, false);
+    }
+
+    private int hash = 0;
+    private final char[] value = new char[3];
+
+    public int testStringHashCodeSnippet() {
+        int h = hash;
+        if (h == 0 && value.length > 0) {
+            char[] val = value;
+
+            for (int i = 0; i < value.length; i++) {
+                h = 31 * h + val[i];
+            }
+            hash = h;
+        }
+        return h;
+    }
+
+    @Test
+    public void testStringHashCode() {
+        SchedulePhase schedule = getFinalSchedule("testStringHashCodeSnippet", TestMode.WITHOUT_FRAMESTATES, MemoryScheduling.OPTIMAL);
+        assertReadWithinStartBlock(schedule, true);
+        assertReadWithinReturnBlock(schedule, false);
+
+        hash = 0x1337;
+        value[0] = 'a';
+        value[1] = 'b';
+        value[2] = 'c';
+        test("testStringHashCodeSnippet");
+    }
+
+    public static int testLoop4Snippet(int count) {
+        int[] a = new int[count];
+
+        for (int i = 0; i < a.length; i++) {
+            a[i] = i;
+        }
+
+        int i = 0;
+        int iwrap = count - 1;
+        int sum = 0;
+
+        while (i < count) {
+            sum += (a[i] + a[iwrap]) / 2;
+            iwrap = i;
+            i++;
+        }
+        return sum;
+    }
+
+    @Test
+    public void testLoop4() {
+        SchedulePhase schedule = getFinalSchedule("testLoop4Snippet", TestMode.WITHOUT_FRAMESTATES, MemoryScheduling.OPTIMAL);
         assertReadWithinStartBlock(schedule, false);
         assertReadWithinReturnBlock(schedule, false);
     }
@@ -415,17 +492,17 @@
         assertTrue(!(inSame ^ schedule.getCFG().blockFor(read) == schedule.getCFG().blockFor(write)));
     }
 
-    private SchedulePhase getFinalSchedule(final String snippet, final TestMode mode, final MemoryScheduling memsched, final boolean printSchedule) {
+    private SchedulePhase getFinalSchedule(final String snippet, final TestMode mode, final MemoryScheduling memsched) {
         final StructuredGraph graph = parse(snippet);
         return Debug.scope("FloatingReadTest", graph, new Callable<SchedulePhase>() {
 
             @Override
             public SchedulePhase call() throws Exception {
                 Assumptions assumptions = new Assumptions(false);
-                HighTierContext context = new HighTierContext(runtime(), assumptions, replacements);
+                HighTierContext context = new HighTierContext(runtime(), assumptions, replacements, null, getDefaultPhasePlan(), OptimisticOptimizations.ALL);
                 new CanonicalizerPhase(true).apply(graph, context);
                 if (mode == TestMode.INLINED_WITHOUT_FRAMESTATES) {
-                    new InliningPhase(runtime(), null, replacements, assumptions, null, getDefaultPhasePlan(), OptimisticOptimizations.ALL).apply(graph);
+                    new InliningPhase().apply(graph, context);
                 }
                 new LoweringPhase(LoweringType.BEFORE_GUARDS).apply(graph, context);
                 if (mode == TestMode.WITHOUT_FRAMESTATES || mode == TestMode.INLINED_WITHOUT_FRAMESTATES) {
@@ -444,7 +521,12 @@
                 new FloatingReadPhase().apply(graph);
                 new RemoveValueProxyPhase().apply(graph);
 
-                SchedulePhase schedule = new SchedulePhase(SchedulingStrategy.LATEST_OUT_OF_LOOPS, memsched, printSchedule);
+                MidTierContext midContext = new MidTierContext(runtime(), assumptions, replacements, runtime().getTarget(), OptimisticOptimizations.ALL);
+                new GuardLoweringPhase().apply(graph, midContext);
+                new LoweringPhase(LoweringType.AFTER_GUARDS).apply(graph, midContext);
+                new LoweringPhase(LoweringType.AFTER_FSA).apply(graph, midContext);
+
+                SchedulePhase schedule = new SchedulePhase(SchedulingStrategy.LATEST_OUT_OF_LOOPS, memsched);
                 schedule.apply(graph);
                 assertEquals(1, graph.getNodes().filter(StartNode.class).count());
                 return schedule;
--- a/graal/com.oracle.graal.compiler.test/src/com/oracle/graal/compiler/test/MonitorGraphTest.java	Thu Aug 01 00:57:27 2013 +0200
+++ b/graal/com.oracle.graal.compiler.test/src/com/oracle/graal/compiler/test/MonitorGraphTest.java	Mon Aug 05 11:25:14 2013 +0200
@@ -35,6 +35,7 @@
 import com.oracle.graal.nodes.java.*;
 import com.oracle.graal.phases.*;
 import com.oracle.graal.phases.common.*;
+import com.oracle.graal.phases.tiers.*;
 
 /**
  * In the following tests, the usages of local variable "a" are replaced with the integer constant
@@ -93,8 +94,9 @@
             hints.put(invoke, 1000d);
         }
         Assumptions assumptions = new Assumptions(false);
-        new InliningPhase(runtime(), hints, replacements, assumptions, null, getDefaultPhasePlan(), OptimisticOptimizations.ALL).apply(graph);
-        new CanonicalizerPhase.Instance(runtime(), assumptions, true).apply(graph);
+        HighTierContext context = new HighTierContext(runtime(), assumptions, replacements, null, getDefaultPhasePlan(), OptimisticOptimizations.ALL);
+        new InliningPhase(hints).apply(graph, context);
+        new CanonicalizerPhase(true).apply(graph, context);
         new DeadCodeEliminationPhase().apply(graph);
         return graph;
     }
--- a/graal/com.oracle.graal.compiler.test/src/com/oracle/graal/compiler/test/PushNodesThroughPiTest.java	Thu Aug 01 00:57:27 2013 +0200
+++ b/graal/com.oracle.graal.compiler.test/src/com/oracle/graal/compiler/test/PushNodesThroughPiTest.java	Mon Aug 05 11:25:14 2013 +0200
@@ -90,7 +90,7 @@
 
     private StructuredGraph compileTestSnippet(final String snippet) {
         StructuredGraph graph = parse(snippet);
-        HighTierContext context = new HighTierContext(runtime(), new Assumptions(false), replacements);
+        PhaseContext context = new PhaseContext(runtime(), new Assumptions(false), replacements);
         CanonicalizerPhase canonicalizer = new CanonicalizerPhase(true);
         new LoweringPhase(LoweringType.BEFORE_GUARDS).apply(graph, context);
         canonicalizer.apply(graph, context);
--- a/graal/com.oracle.graal.compiler.test/src/com/oracle/graal/compiler/test/ReadAfterCheckCastTest.java	Thu Aug 01 00:57:27 2013 +0200
+++ b/graal/com.oracle.graal.compiler.test/src/com/oracle/graal/compiler/test/ReadAfterCheckCastTest.java	Mon Aug 05 11:25:14 2013 +0200
@@ -84,7 +84,7 @@
             // structure changes significantly
             public void run() {
                 StructuredGraph graph = parse(snippet);
-                HighTierContext context = new HighTierContext(runtime(), new Assumptions(false), replacements);
+                PhaseContext context = new PhaseContext(runtime(), new Assumptions(false), replacements);
                 new LoweringPhase(LoweringType.BEFORE_GUARDS).apply(graph, context);
                 new FloatingReadPhase().apply(graph);
                 new EliminatePartiallyRedundantGuardsPhase(true, false).apply(graph);
--- a/graal/com.oracle.graal.compiler.test/src/com/oracle/graal/compiler/test/ea/EarlyReadEliminationTest.java	Thu Aug 01 00:57:27 2013 +0200
+++ b/graal/com.oracle.graal.compiler.test/src/com/oracle/graal/compiler/test/ea/EarlyReadEliminationTest.java	Mon Aug 05 11:25:14 2013 +0200
@@ -41,8 +41,8 @@
     protected void processMethod(final String snippet) {
         graph = parse(snippet);
         Assumptions assumptions = new Assumptions(false);
-        HighTierContext context = new HighTierContext(runtime(), assumptions, replacements);
-        new InliningPhase(runtime(), null, replacements, assumptions, null, getDefaultPhasePlan(), OptimisticOptimizations.ALL).apply(graph);
+        HighTierContext context = new HighTierContext(runtime(), assumptions, replacements, null, getDefaultPhasePlan(), OptimisticOptimizations.ALL);
+        new InliningPhase().apply(graph, context);
         CanonicalizerPhase canonicalizer = new CanonicalizerPhase(true);
         new EarlyReadEliminationPhase(canonicalizer).apply(graph, context);
     }
--- a/graal/com.oracle.graal.compiler.test/src/com/oracle/graal/compiler/test/ea/EscapeAnalysisTest.java	Thu Aug 01 00:57:27 2013 +0200
+++ b/graal/com.oracle.graal.compiler.test/src/com/oracle/graal/compiler/test/ea/EscapeAnalysisTest.java	Mon Aug 05 11:25:14 2013 +0200
@@ -217,8 +217,8 @@
                 new GraphBuilderPhase(runtime, GraphBuilderConfiguration.getEagerDefault(), OptimisticOptimizations.ALL).apply(graph);
 
                 Assumptions assumptions = new Assumptions(false);
-                HighTierContext context = new HighTierContext(runtime(), assumptions, replacements);
-                new InliningPhase(runtime(), null, replacements, assumptions, null, getDefaultPhasePlan(), OptimisticOptimizations.ALL).apply(graph);
+                HighTierContext context = new HighTierContext(runtime(), assumptions, replacements, null, getDefaultPhasePlan(), OptimisticOptimizations.ALL);
+                new InliningPhase().apply(graph, context);
                 new DeadCodeEliminationPhase().apply(graph);
                 new PartialEscapePhase(iterativeEscapeAnalysis, new CanonicalizerPhase(true)).apply(graph, context);
                 Assert.assertEquals(1, graph.getNodes(ReturnNode.class).count());
--- a/graal/com.oracle.graal.compiler.test/src/com/oracle/graal/compiler/test/ea/IterativeInliningTest.java	Thu Aug 01 00:57:27 2013 +0200
+++ b/graal/com.oracle.graal.compiler.test/src/com/oracle/graal/compiler/test/ea/IterativeInliningTest.java	Mon Aug 05 11:25:14 2013 +0200
@@ -88,7 +88,7 @@
 
     private void processMethod(final String snippet) {
         graph = parse(snippet);
-        HighTierContext context = new HighTierContext(runtime(), new Assumptions(false), replacements);
-        new IterativeInliningPhase(null, getDefaultPhasePlan(), OptimisticOptimizations.ALL, new CanonicalizerPhase(true)).apply(graph, context);
+        HighTierContext context = new HighTierContext(runtime(), new Assumptions(false), replacements, null, getDefaultPhasePlan(), OptimisticOptimizations.ALL);
+        new IterativeInliningPhase(new CanonicalizerPhase(true)).apply(graph, context);
     }
 }
--- a/graal/com.oracle.graal.compiler.test/src/com/oracle/graal/compiler/test/ea/PEAReadEliminationTest.java	Thu Aug 01 00:57:27 2013 +0200
+++ b/graal/com.oracle.graal.compiler.test/src/com/oracle/graal/compiler/test/ea/PEAReadEliminationTest.java	Mon Aug 05 11:25:14 2013 +0200
@@ -243,8 +243,8 @@
     protected void processMethod(final String snippet) {
         graph = parse(snippet);
         Assumptions assumptions = new Assumptions(false);
-        HighTierContext context = new HighTierContext(runtime(), assumptions, replacements);
-        new InliningPhase(runtime(), null, replacements, assumptions, null, getDefaultPhasePlan(), OptimisticOptimizations.ALL).apply(graph);
+        HighTierContext context = new HighTierContext(runtime(), assumptions, replacements, null, getDefaultPhasePlan(), OptimisticOptimizations.ALL);
+        new InliningPhase().apply(graph, context);
         CanonicalizerPhase canonicalizer = new CanonicalizerPhase(true);
         new PartialEscapePhase(false, true, canonicalizer).apply(graph, context);
     }
--- a/graal/com.oracle.graal.compiler.test/src/com/oracle/graal/compiler/test/ea/PartialEscapeAnalysisTest.java	Thu Aug 01 00:57:27 2013 +0200
+++ b/graal/com.oracle.graal.compiler.test/src/com/oracle/graal/compiler/test/ea/PartialEscapeAnalysisTest.java	Mon Aug 05 11:25:14 2013 +0200
@@ -159,8 +159,8 @@
                 StructuredGraph graph = parse(snippet);
 
                 Assumptions assumptions = new Assumptions(false);
-                HighTierContext context = new HighTierContext(runtime(), assumptions, replacements);
-                new InliningPhase(runtime(), null, replacements, assumptions, null, getDefaultPhasePlan(), OptimisticOptimizations.ALL).apply(graph);
+                HighTierContext context = new HighTierContext(runtime(), assumptions, replacements, null, getDefaultPhasePlan(), OptimisticOptimizations.ALL);
+                new InliningPhase().apply(graph, context);
                 new DeadCodeEliminationPhase().apply(graph);
                 CanonicalizerPhase canonicalizer = new CanonicalizerPhase(true);
                 canonicalizer.apply(graph, context);
--- a/graal/com.oracle.graal.compiler.test/src/com/oracle/graal/compiler/test/inlining/InliningTest.java	Thu Aug 01 00:57:27 2013 +0200
+++ b/graal/com.oracle.graal.compiler.test/src/com/oracle/graal/compiler/test/inlining/InliningTest.java	Mon Aug 05 11:25:14 2013 +0200
@@ -36,6 +36,7 @@
 import com.oracle.graal.nodes.*;
 import com.oracle.graal.phases.*;
 import com.oracle.graal.phases.common.*;
+import com.oracle.graal.phases.tiers.*;
 import com.oracle.graal.test.*;
 
 public class InliningTest extends GraalCompilerTest {
@@ -236,11 +237,12 @@
                 StructuredGraph graph = eagerInfopointMode ? parseDebug(method) : parse(method);
                 PhasePlan phasePlan = getDefaultPhasePlan(eagerInfopointMode);
                 Assumptions assumptions = new Assumptions(true);
+                HighTierContext context = new HighTierContext(runtime(), assumptions, replacements, null, phasePlan, OptimisticOptimizations.ALL);
                 Debug.dump(graph, "Graph");
-                new CanonicalizerPhase.Instance(runtime(), assumptions, true).apply(graph);
-                new InliningPhase(runtime(), null, replacements, assumptions, null, phasePlan, OptimisticOptimizations.ALL).apply(graph);
+                new CanonicalizerPhase(true).apply(graph, context);
+                new InliningPhase().apply(graph, context);
                 Debug.dump(graph, "Graph");
-                new CanonicalizerPhase.Instance(runtime(), assumptions, true).apply(graph);
+                new CanonicalizerPhase(true).apply(graph, context);
                 new DeadCodeEliminationPhase().apply(graph);
                 return graph;
             }
--- a/graal/com.oracle.graal.compiler/src/com/oracle/graal/compiler/GraalCompiler.java	Thu Aug 01 00:57:27 2013 +0200
+++ b/graal/com.oracle.graal.compiler/src/com/oracle/graal/compiler/GraalCompiler.java	Mon Aug 05 11:25:14 2013 +0200
@@ -40,28 +40,18 @@
 import com.oracle.graal.nodes.cfg.*;
 import com.oracle.graal.nodes.spi.*;
 import com.oracle.graal.nodes.util.*;
-import com.oracle.graal.options.*;
 import com.oracle.graal.phases.*;
 import com.oracle.graal.phases.PhasePlan.PhasePosition;
 import com.oracle.graal.phases.common.*;
 import com.oracle.graal.phases.graph.*;
 import com.oracle.graal.phases.schedule.*;
 import com.oracle.graal.phases.tiers.*;
-import com.oracle.graal.phases.verify.*;
-import com.oracle.graal.virtual.phases.ea.*;
 
 /**
  * Static methods for orchestrating the compilation of a {@linkplain StructuredGraph graph}.
  */
 public class GraalCompiler {
 
-    // @formatter:off
-    @Option(help = "")
-    public static final OptionValue<Boolean> VerifyUsageWithEquals = new OptionValue<>(true);
-    @Option(help = "Enable inlining")
-    public static final OptionValue<Boolean> Inline = new OptionValue<>(true);
-    // @formatter:on
-
     /**
      * Requests compilation of a given graph.
      * 
@@ -138,34 +128,8 @@
         } else {
             Debug.dump(graph, "initial state");
         }
-        if (VerifyUsageWithEquals.getValue()) {
-            new VerifyUsageWithEquals(runtime, Value.class).apply(graph);
-            new VerifyUsageWithEquals(runtime, Register.class).apply(graph);
-        }
 
-        CanonicalizerPhase canonicalizer = new CanonicalizerPhase(!AOTCompilation.getValue());
-        HighTierContext highTierContext = new HighTierContext(runtime, assumptions, replacements);
-
-        if (OptCanonicalizer.getValue()) {
-            canonicalizer.apply(graph, highTierContext);
-        }
-
-        if (Inline.getValue() && !plan.isPhaseDisabled(InliningPhase.class)) {
-            if (IterativeInlining.getValue()) {
-                new IterativeInliningPhase(cache, plan, optimisticOpts, canonicalizer).apply(graph, highTierContext);
-            } else {
-                new InliningPhase(runtime, null, replacements, assumptions, cache, plan, optimisticOpts).apply(graph);
-                new DeadCodeEliminationPhase().apply(graph);
-
-                if (ConditionalElimination.getValue() && OptCanonicalizer.getValue()) {
-                    canonicalizer.apply(graph, highTierContext);
-                    new IterativeConditionalEliminationPhase().apply(graph, highTierContext);
-                }
-            }
-        }
-        TypeProfileProxyNode.cleanFromGraph(graph);
-
-        plan.runPhases(PhasePosition.HIGH_LEVEL, graph);
+        HighTierContext highTierContext = new HighTierContext(runtime, assumptions, replacements, cache, plan, optimisticOpts);
         suites.getHighTier().apply(graph, highTierContext);
 
         MidTierContext midTierContext = new MidTierContext(runtime, assumptions, replacements, target, optimisticOpts);
--- a/graal/com.oracle.graal.compiler/src/com/oracle/graal/compiler/alloc/LinearScan.java	Thu Aug 01 00:57:27 2013 +0200
+++ b/graal/com.oracle.graal.compiler/src/com/oracle/graal/compiler/alloc/LinearScan.java	Mon Aug 05 11:25:14 2013 +0200
@@ -63,6 +63,8 @@
     final RegisterAttributes[] registerAttributes;
     final Register[] registers;
 
+    boolean callKillsRegisters;
+
     private static final int INITIAL_SPLIT_INTERVALS_CAPACITY = 32;
 
     public static class BlockData {
@@ -1702,7 +1704,7 @@
     }
 
     private void computeDebugInfo(IntervalWalker iw, final LIRInstruction op, LIRFrameState info) {
-        BitSet registerRefMap = op.destroysCallerSavedRegisters() ? null : frameMap.initRegisterRefMap();
+        BitSet registerRefMap = op.destroysCallerSavedRegisters() && callKillsRegisters ? null : frameMap.initRegisterRefMap();
         BitSet frameRefMap = frameMap.initFrameRefMap();
         computeOopMap(iw, op, registerRefMap, frameRefMap);
 
--- a/graal/com.oracle.graal.compiler/src/com/oracle/graal/compiler/alloc/LinearScanWalker.java	Thu Aug 01 00:57:27 2013 +0200
+++ b/graal/com.oracle.graal.compiler/src/com/oracle/graal/compiler/alloc/LinearScanWalker.java	Mon Aug 05 11:25:14 2013 +0200
@@ -45,8 +45,6 @@
  */
 final class LinearScanWalker extends IntervalWalker {
 
-    private final boolean callKillsRegisters;
-
     private Register[] availableRegs;
 
     private final int[] usePos;
@@ -77,7 +75,7 @@
         // The register allocator can save time not trying to find a register at a call site.
         HashSet<Register> registers = new HashSet<>(Arrays.asList(allocator.frameMap.registerConfig.getAllocatableRegisters()));
         registers.removeAll(Arrays.asList(allocator.frameMap.registerConfig.getCallerSaveRegisters()));
-        callKillsRegisters = registers.size() == 0;
+        allocator.callKillsRegisters = registers.size() == 0;
 
         moveResolver = new MoveResolver(allocator);
         spillIntervals = Util.uncheckedCast(new List[allocator.registers.length]);
@@ -784,8 +782,7 @@
     }
 
     boolean noAllocationPossible(Interval interval) {
-
-        if (callKillsRegisters) {
+        if (allocator.callKillsRegisters) {
             // fast calculation of intervals that can never get a register because the
             // the next instruction is a call that blocks all registers
             // Note: this only works if a call kills all registers
--- a/graal/com.oracle.graal.compiler/src/com/oracle/graal/compiler/phases/HighTier.java	Thu Aug 01 00:57:27 2013 +0200
+++ b/graal/com.oracle.graal.compiler/src/com/oracle/graal/compiler/phases/HighTier.java	Mon Aug 05 11:25:14 2013 +0200
@@ -24,18 +24,54 @@
 
 import static com.oracle.graal.phases.GraalOptions.*;
 
+import com.oracle.graal.api.code.*;
+import com.oracle.graal.api.meta.*;
 import com.oracle.graal.loop.phases.*;
 import com.oracle.graal.nodes.spi.Lowerable.LoweringType;
+import com.oracle.graal.options.*;
 import com.oracle.graal.phases.*;
 import com.oracle.graal.phases.common.*;
 import com.oracle.graal.phases.tiers.*;
+import com.oracle.graal.phases.verify.*;
 import com.oracle.graal.virtual.phases.ea.*;
 
 public class HighTier extends PhaseSuite<HighTierContext> {
 
+    // @formatter:off
+    @Option(help = "")
+    public static final OptionValue<Boolean> VerifyUsageWithEquals = new OptionValue<>(true);
+    @Option(help = "Enable inlining")
+    public static final OptionValue<Boolean> Inline = new OptionValue<>(true);
+    // @formatter:on
+
     public HighTier() {
         CanonicalizerPhase canonicalizer = new CanonicalizerPhase(!AOTCompilation.getValue());
 
+        if (VerifyUsageWithEquals.getValue()) {
+            appendPhase(new VerifyUsageWithEquals(Value.class));
+            appendPhase(new VerifyUsageWithEquals(Register.class));
+        }
+
+        if (OptCanonicalizer.getValue()) {
+            appendPhase(canonicalizer);
+        }
+
+        if (Inline.getValue()) {
+            if (IterativeInlining.getValue()) {
+                appendPhase(new IterativeInliningPhase(canonicalizer));
+            } else {
+                appendPhase(new InliningPhase());
+                appendPhase(new DeadCodeEliminationPhase());
+
+                if (ConditionalElimination.getValue() && OptCanonicalizer.getValue()) {
+                    appendPhase(canonicalizer);
+                    appendPhase(new IterativeConditionalEliminationPhase());
+                }
+            }
+        }
+
+        appendPhase(new CleanTypeProfileProxyPhase());
+
         if (FullUnroll.getValue()) {
             appendPhase(new LoopFullUnrollPhase(!AOTCompilation.getValue()));
         }
--- a/graal/com.oracle.graal.compiler/src/com/oracle/graal/compiler/target/Backend.java	Thu Aug 01 00:57:27 2013 +0200
+++ b/graal/com.oracle.graal.compiler/src/com/oracle/graal/compiler/target/Backend.java	Mon Aug 05 11:25:14 2013 +0200
@@ -47,9 +47,7 @@
         return runtime;
     }
 
-    public FrameMap newFrameMap() {
-        return new FrameMap(runtime, target, runtime.lookupRegisterConfig());
-    }
+    public abstract FrameMap newFrameMap();
 
     public abstract LIRGenerator newLIRGenerator(StructuredGraph graph, FrameMap frameMap, CallingConvention cc, LIR lir);
 
--- a/graal/com.oracle.graal.debug/src/com/oracle/graal/debug/Debug.java	Thu Aug 01 00:57:27 2013 +0200
+++ b/graal/com.oracle.graal.debug/src/com/oracle/graal/debug/Debug.java	Mon Aug 05 11:25:14 2013 +0200
@@ -164,6 +164,12 @@
         }
     }
 
+    public static void printf(String msg, Object... args) {
+        if (ENABLED && DebugScope.getInstance().isLogEnabled()) {
+            DebugScope.getInstance().printf(msg, args);
+        }
+    }
+
     public static void dump(Object object, String msg, Object... args) {
         if (ENABLED && DebugScope.getInstance().isDumpEnabled()) {
             DebugScope.getInstance().dump(object, msg, args);
--- a/graal/com.oracle.graal.debug/src/com/oracle/graal/debug/internal/DebugScope.java	Thu Aug 01 00:57:27 2013 +0200
+++ b/graal/com.oracle.graal.debug/src/com/oracle/graal/debug/internal/DebugScope.java	Mon Aug 05 11:25:14 2013 +0200
@@ -114,6 +114,16 @@
         }
     }
 
+    public void printf(String msg, Object... args) {
+        if (isLogEnabled()) {
+            if (lastLogScope.get() == null || !lastLogScope.get().qualifiedName.equals(this.qualifiedName)) {
+                output.println("scope: " + qualifiedName);
+                lastLogScope.set(this);
+            }
+            output.printf(msg, args);
+        }
+    }
+
     public void dump(Object object, String formatString, Object[] args) {
         if (isDumpEnabled()) {
             DebugConfig config = getConfig();
--- a/graal/com.oracle.graal.hotspot.amd64/src/com/oracle/graal/hotspot/amd64/AMD64HotSpotBackend.java	Thu Aug 01 00:57:27 2013 +0200
+++ b/graal/com.oracle.graal.hotspot.amd64/src/com/oracle/graal/hotspot/amd64/AMD64HotSpotBackend.java	Mon Aug 05 11:25:14 2013 +0200
@@ -63,6 +63,11 @@
     }
 
     @Override
+    public FrameMap newFrameMap() {
+        return new AMD64FrameMap(runtime(), target, runtime().lookupRegisterConfig());
+    }
+
+    @Override
     public LIRGenerator newLIRGenerator(StructuredGraph graph, FrameMap frameMap, CallingConvention cc, LIR lir) {
         return new AMD64HotSpotLIRGenerator(graph, runtime(), target, frameMap, cc, lir);
     }
--- a/graal/com.oracle.graal.hotspot.amd64/src/com/oracle/graal/hotspot/amd64/AMD64HotSpotGraalRuntime.java	Thu Aug 01 00:57:27 2013 +0200
+++ b/graal/com.oracle.graal.hotspot.amd64/src/com/oracle/graal/hotspot/amd64/AMD64HotSpotGraalRuntime.java	Mon Aug 05 11:25:14 2013 +0200
@@ -58,7 +58,8 @@
     protected TargetDescription createTarget() {
         final int stackFrameAlignment = 16;
         final int implicitNullCheckLimit = 4096;
-        return new TargetDescription(createArchitecture(), true, stackFrameAlignment, implicitNullCheckLimit, true);
+        final boolean inlineObjects = true;
+        return new TargetDescription(createArchitecture(), true, stackFrameAlignment, implicitNullCheckLimit, inlineObjects);
     }
 
     @Override
--- a/graal/com.oracle.graal.hotspot.amd64/src/com/oracle/graal/hotspot/amd64/AMD64SafepointOp.java	Thu Aug 01 00:57:27 2013 +0200
+++ b/graal/com.oracle.graal.hotspot.amd64/src/com/oracle/graal/hotspot/amd64/AMD64SafepointOp.java	Mon Aug 05 11:25:14 2013 +0200
@@ -23,8 +23,6 @@
 package com.oracle.graal.hotspot.amd64;
 
 import static com.oracle.graal.amd64.AMD64.*;
-import static com.oracle.graal.phases.GraalOptions.*;
-import sun.misc.*;
 
 import com.oracle.graal.api.code.*;
 import com.oracle.graal.api.meta.*;
@@ -42,8 +40,6 @@
 @Opcode("SAFEPOINT")
 public class AMD64SafepointOp extends AMD64LIRInstruction {
 
-    private static final Unsafe unsafe = Unsafe.getUnsafe();
-
     @State protected LIRFrameState state;
     @Temp({OperandFlag.REG}) private AllocatableValue temp;
 
@@ -57,11 +53,10 @@
 
     @Override
     public void emitCode(TargetMethodAssembler tasm, AMD64MacroAssembler asm) {
-        int pos = asm.codeBuffer.position();
-        int offset = SafepointPollOffset.getValue() % unsafe.pageSize();
+        final int pos = asm.codeBuffer.position();
         RegisterValue scratch = (RegisterValue) temp;
         if (config.isPollingPageFar) {
-            asm.movq(scratch.getRegister(), config.safepointPollingAddress + offset);
+            asm.movq(scratch.getRegister(), config.safepointPollingAddress);
             tasm.recordMark(Marks.MARK_POLL_FAR);
             tasm.recordInfopoint(pos, state, InfopointReason.SAFEPOINT);
             asm.movq(scratch.getRegister(), new AMD64Address(scratch.getRegister()));
@@ -70,7 +65,7 @@
             tasm.recordInfopoint(pos, state, InfopointReason.SAFEPOINT);
             // The C++ code transforms the polling page offset into an RIP displacement
             // to the real address at that offset in the polling page.
-            asm.movq(scratch.getRegister(), new AMD64Address(rip, offset));
+            asm.movq(scratch.getRegister(), new AMD64Address(rip, 0));
         }
     }
 }
--- a/graal/com.oracle.graal.hotspot.sparc/src/com/oracle/graal/hotspot/sparc/SPARCHotSpotBackend.java	Thu Aug 01 00:57:27 2013 +0200
+++ b/graal/com.oracle.graal.hotspot.sparc/src/com/oracle/graal/hotspot/sparc/SPARCHotSpotBackend.java	Mon Aug 05 11:25:14 2013 +0200
@@ -60,6 +60,11 @@
     }
 
     @Override
+    public FrameMap newFrameMap() {
+        return new SPARCFrameMap(runtime(), target, runtime().lookupRegisterConfig());
+    }
+
+    @Override
     public LIRGenerator newLIRGenerator(StructuredGraph graph, FrameMap frameMap, CallingConvention cc, LIR lir) {
         return new SPARCHotSpotLIRGenerator(graph, runtime(), target, frameMap, cc, lir);
     }
@@ -73,7 +78,7 @@
     protected static void emitStackOverflowCheck(TargetMethodAssembler tasm, boolean afterFrameInit) {
         if (StackShadowPages.getValue() > 0) {
             SPARCMacroAssembler masm = (SPARCMacroAssembler) tasm.asm;
-            final int frameSize = tasm.frameMap.frameSize();
+            final int frameSize = tasm.frameMap.totalFrameSize();
             if (frameSize > 0) {
                 int lastFramePage = frameSize / unsafe.pageSize();
                 // emit multiple stack bangs for methods with frames larger than a page
@@ -106,9 +111,7 @@
 
         @Override
         public void enter(TargetMethodAssembler tasm) {
-            final int alignment = target.wordSize * 2;
-            final int frameSize = (tasm.frameMap.frameSize() + (alignment - 1)) & ~(alignment - 1);
-            assert frameSize % alignment == 0 : "must preserve 2*wordSize alignment";
+            final int frameSize = tasm.frameMap.totalFrameSize();
 
             SPARCMacroAssembler masm = (SPARCMacroAssembler) tasm.asm;
             if (!isStub) {
--- a/graal/com.oracle.graal.hotspot.sparc/src/com/oracle/graal/hotspot/sparc/SPARCSafepointOp.java	Thu Aug 01 00:57:27 2013 +0200
+++ b/graal/com.oracle.graal.hotspot.sparc/src/com/oracle/graal/hotspot/sparc/SPARCSafepointOp.java	Mon Aug 05 11:25:14 2013 +0200
@@ -24,8 +24,6 @@
 
 import static com.oracle.graal.asm.sparc.SPARCMacroAssembler.*;
 import static com.oracle.graal.sparc.SPARC.*;
-import static com.oracle.graal.phases.GraalOptions.*;
-import sun.misc.*;
 
 import com.oracle.graal.api.code.*;
 import com.oracle.graal.api.meta.*;
@@ -42,8 +40,6 @@
 @Opcode("SAFEPOINT")
 public class SPARCSafepointOp extends SPARCLIRInstruction {
 
-    private static final Unsafe unsafe = Unsafe.getUnsafe();
-
     @State protected LIRFrameState state;
     @Temp({OperandFlag.REG}) private AllocatableValue temp;
 
@@ -58,9 +54,8 @@
     @Override
     public void emitCode(TargetMethodAssembler tasm, SPARCMacroAssembler masm) {
         final int pos = masm.codeBuffer.position();
-        final int offset = SafepointPollOffset.getValue() % unsafe.pageSize();
         Register scratch = ((RegisterValue) temp).getRegister();
-        new Setx(config.safepointPollingAddress + offset, scratch).emit(masm);
+        new Setx(config.safepointPollingAddress, scratch).emit(masm);
         tasm.recordInfopoint(pos, state, InfopointReason.SAFEPOINT);
         new Ldx(new SPARCAddress(scratch, 0), g0).emit(masm);
     }
--- a/graal/com.oracle.graal.hotspot.test/src/com/oracle/graal/hotspot/test/AheadOfTimeCompilationTest.java	Thu Aug 01 00:57:27 2013 +0200
+++ b/graal/com.oracle.graal.hotspot.test/src/com/oracle/graal/hotspot/test/AheadOfTimeCompilationTest.java	Mon Aug 05 11:25:14 2013 +0200
@@ -202,7 +202,6 @@
         PhasePlan phasePlan = new PhasePlan();
         GraphBuilderPhase graphBuilderPhase = new GraphBuilderPhase(runtime, GraphBuilderConfiguration.getDefault(), OptimisticOptimizations.ALL);
         phasePlan.addPhase(PhasePosition.AFTER_PARSING, graphBuilderPhase);
-        editPhasePlan(method, graph, phasePlan);
         CallingConvention cc = getCallingConvention(runtime, Type.JavaCallee, graph.method(), false);
         // create suites everytime, as we modify options for the compiler
         final Suites suitesLocal = Graal.getRequiredCapability(SuitesProvider.class).createSuites();
--- a/graal/com.oracle.graal.hotspot.test/src/com/oracle/graal/hotspot/test/WriteBarrierAdditionTest.java	Thu Aug 01 00:57:27 2013 +0200
+++ b/graal/com.oracle.graal.hotspot.test/src/com/oracle/graal/hotspot/test/WriteBarrierAdditionTest.java	Mon Aug 05 11:25:14 2013 +0200
@@ -207,6 +207,27 @@
         test2("testUnsafeLoad", wr, new Long(16), new Integer(16));
     }
 
+    static Object[] src = new Object[1];
+    static Object[] dst = new Object[1];
+
+    static {
+        for (int i = 0; i < src.length; i++) {
+            src[i] = new Object();
+        }
+        for (int i = 0; i < dst.length; i++) {
+            dst[i] = new Object();
+        }
+    }
+
+    public static void testArrayCopy(Object a, Object b, Object c) throws Exception {
+        System.arraycopy(a, 0, b, 0, (int) c);
+    }
+
+    @Test
+    public void test11() throws Exception {
+        test2("testArrayCopy", src, dst, dst.length);
+    }
+
     public static Object testUnsafeLoad(Object a, Object b, Object c) throws Exception {
         final int offset = (c == null ? 0 : ((Integer) c).intValue());
         final long displacement = (b == null ? 0 : ((Long) b).longValue());
@@ -225,8 +246,8 @@
 
             public void run() {
                 StructuredGraph graph = parse(snippet);
-                HighTierContext context = new HighTierContext(runtime(), new Assumptions(false), replacements);
-                new InliningPhase(runtime(), replacements, context.getAssumptions(), null, getDefaultPhasePlan(), OptimisticOptimizations.ALL, new InliningPhase.InlineEverythingPolicy()).apply(graph);
+                HighTierContext context = new HighTierContext(runtime(), new Assumptions(false), replacements, null, getDefaultPhasePlan(), OptimisticOptimizations.ALL);
+                new InliningPhase(new InliningPhase.InlineEverythingPolicy()).apply(graph, context);
                 new LoweringPhase(LoweringType.BEFORE_GUARDS).apply(graph, context);
                 new WriteBarrierAdditionPhase().apply(graph);
                 Debug.dump(graph, "After Write Barrier Addition");
--- a/graal/com.oracle.graal.hotspot.test/src/com/oracle/graal/hotspot/test/WriteBarrierVerificationTest.java	Thu Aug 01 00:57:27 2013 +0200
+++ b/graal/com.oracle.graal.hotspot.test/src/com/oracle/graal/hotspot/test/WriteBarrierVerificationTest.java	Mon Aug 05 11:25:14 2013 +0200
@@ -632,8 +632,8 @@
 
             public AssertionError call() {
                 final StructuredGraph graph = parse(snippet);
-                HighTierContext highTierContext = new HighTierContext(runtime(), new Assumptions(false), replacements);
-                new InliningPhase(runtime(), null, replacements, new Assumptions(false), null, getDefaultPhasePlan(), OptimisticOptimizations.ALL).apply(graph);
+                HighTierContext highTierContext = new HighTierContext(runtime(), new Assumptions(false), replacements, null, getDefaultPhasePlan(), OptimisticOptimizations.ALL);
+                new InliningPhase().apply(graph, highTierContext);
 
                 MidTierContext midTierContext = new MidTierContext(runtime(), new Assumptions(false), replacements, runtime().getTarget(), OptimisticOptimizations.ALL);
 
--- a/graal/com.oracle.graal.hotspot/src/com/oracle/graal/hotspot/HotSpotVMConfig.java	Thu Aug 01 00:57:27 2013 +0200
+++ b/graal/com.oracle.graal.hotspot/src/com/oracle/graal/hotspot/HotSpotVMConfig.java	Mon Aug 05 11:25:14 2013 +0200
@@ -506,6 +506,7 @@
     public long vmErrorAddress;
     public long writeBarrierPreAddress;
     public long writeBarrierPostAddress;
+    public long validateObject;
     public long javaTimeMillisAddress;
     public long javaTimeNanosAddress;
     public long arithmeticSinAddress;
--- a/graal/com.oracle.graal.hotspot/src/com/oracle/graal/hotspot/meta/HotSpotRuntime.java	Thu Aug 01 00:57:27 2013 +0200
+++ b/graal/com.oracle.graal.hotspot/src/com/oracle/graal/hotspot/meta/HotSpotRuntime.java	Mon Aug 05 11:25:14 2013 +0200
@@ -311,6 +311,8 @@
         linkForeignCall(r, OSR_MIGRATION_END, c.osrMigrationEndAddress, DONT_PREPEND_THREAD, LEAF, NOT_REEXECUTABLE, NO_LOCATIONS);
         linkForeignCall(r, G1WBPRECALL, c.writeBarrierPreAddress, PREPEND_THREAD, LEAF, REEXECUTABLE, NO_LOCATIONS);
         linkForeignCall(r, G1WBPOSTCALL, c.writeBarrierPostAddress, PREPEND_THREAD, LEAF, REEXECUTABLE, NO_LOCATIONS);
+        linkForeignCall(r, VALIDATEOBJECT, c.validateObject, PREPEND_THREAD, LEAF, REEXECUTABLE, NO_LOCATIONS);
+
         if (IntrinsifyObjectMethods.getValue()) {
             r.registerSubstitutions(ObjectSubstitutions.class);
         }
@@ -634,18 +636,20 @@
             graph.replaceFixedWithFixed(storeIndexed, memoryWrite);
 
         } else if (n instanceof UnsafeLoadNode) {
-            UnsafeLoadNode load = (UnsafeLoadNode) n;
-            assert load.kind() != Kind.Illegal;
-            boolean compressible = (!load.object().isNullConstant() && load.accessKind() == Kind.Object);
-            if (addReadBarrier(load, tool)) {
-                unsafeLoadSnippets.lower(load, tool);
-            } else {
-                IndexedLocationNode location = IndexedLocationNode.create(ANY_LOCATION, load.accessKind(), load.displacement(), load.offset(), graph, 1);
-                ReadNode memoryRead = graph.add(new ReadNode(load.object(), location, load.stamp(), BarrierType.NONE, compressible));
-                // An unsafe read must not float outside its block otherwise
-                // it may float above an explicit null check on its object.
-                memoryRead.setGuard(AbstractBeginNode.prevBegin(load));
-                graph.replaceFixedWithFixed(load, memoryRead);
+            if (tool.getLoweringType() != LoweringType.BEFORE_GUARDS) {
+                UnsafeLoadNode load = (UnsafeLoadNode) n;
+                assert load.kind() != Kind.Illegal;
+                boolean compressible = (!load.object().isNullConstant() && load.accessKind() == Kind.Object);
+                if (addReadBarrier(load, tool)) {
+                    unsafeLoadSnippets.lower(load, tool);
+                } else {
+                    IndexedLocationNode location = IndexedLocationNode.create(ANY_LOCATION, load.accessKind(), load.displacement(), load.offset(), graph, 1);
+                    ReadNode memoryRead = graph.add(new ReadNode(load.object(), location, load.stamp(), BarrierType.NONE, compressible));
+                    // An unsafe read must not float outside its block otherwise
+                    // it may float above an explicit null check on its object.
+                    memoryRead.setGuard(AbstractBeginNode.prevBegin(load));
+                    graph.replaceFixedWithFixed(load, memoryRead);
+                }
             }
         } else if (n instanceof UnsafeStoreNode) {
             UnsafeStoreNode store = (UnsafeStoreNode) n;
@@ -885,7 +889,7 @@
 
     private static BarrierType getFieldStoreBarrierType(StoreFieldNode storeField) {
         BarrierType barrierType = BarrierType.NONE;
-        if (storeField.field().getKind() == Kind.Object && !storeField.value().objectStamp().alwaysNull()) {
+        if (storeField.field().getKind() == Kind.Object) {
             barrierType = BarrierType.IMPRECISE;
         }
         return barrierType;
@@ -893,7 +897,7 @@
 
     private static BarrierType getArrayStoreBarrierType(StoreIndexedNode store) {
         BarrierType barrierType = BarrierType.NONE;
-        if (store.elementKind() == Kind.Object && !store.value().objectStamp().alwaysNull()) {
+        if (store.elementKind() == Kind.Object) {
             barrierType = BarrierType.PRECISE;
         }
         return barrierType;
@@ -901,12 +905,12 @@
 
     private static BarrierType getUnsafeStoreBarrierType(UnsafeStoreNode store) {
         BarrierType barrierType = BarrierType.NONE;
-        if (store.value().kind() == Kind.Object && !store.value().objectStamp().alwaysNull()) {
+        if (store.value().kind() == Kind.Object) {
             ResolvedJavaType type = store.object().objectStamp().type();
-            if (type != null && type.isArray()) {
+            if (type != null && !type.isArray()) {
+                barrierType = BarrierType.IMPRECISE;
+            } else {
                 barrierType = BarrierType.PRECISE;
-            } else {
-                barrierType = BarrierType.IMPRECISE;
             }
         }
         return barrierType;
@@ -914,12 +918,12 @@
 
     private static BarrierType getCompareAndSwapBarrier(CompareAndSwapNode cas) {
         BarrierType barrierType = BarrierType.NONE;
-        if (cas.expected().kind() == Kind.Object && !cas.newValue().objectStamp().alwaysNull()) {
+        if (cas.expected().kind() == Kind.Object) {
             ResolvedJavaType type = cas.object().objectStamp().type();
-            if (type != null && type.isArray()) {
+            if (type != null && !type.isArray()) {
+                barrierType = BarrierType.IMPRECISE;
+            } else {
                 barrierType = BarrierType.PRECISE;
-            } else {
-                barrierType = BarrierType.IMPRECISE;
             }
         }
         return barrierType;
--- a/graal/com.oracle.graal.hotspot/src/com/oracle/graal/hotspot/phases/AheadOfTimeVerificationPhase.java	Thu Aug 01 00:57:27 2013 +0200
+++ b/graal/com.oracle.graal.hotspot/src/com/oracle/graal/hotspot/phases/AheadOfTimeVerificationPhase.java	Mon Aug 05 11:25:14 2013 +0200
@@ -25,6 +25,7 @@
 import com.oracle.graal.api.meta.*;
 import com.oracle.graal.nodes.*;
 import com.oracle.graal.phases.*;
+import com.oracle.graal.phases.tiers.*;
 
 /**
  * Checks for illegal object constants in a graph processed for AOT compilation. The only legal
@@ -33,10 +34,10 @@
  * 
  * @see LoadJavaMirrorWithKlassPhase
  */
-public class AheadOfTimeVerificationPhase extends VerifyPhase {
+public class AheadOfTimeVerificationPhase extends VerifyPhase<PhaseContext> {
 
     @Override
-    protected boolean verify(StructuredGraph graph) {
+    protected boolean verify(StructuredGraph graph, PhaseContext context) {
         for (ConstantNode node : graph.getNodes().filter(ConstantNode.class)) {
             assert !isObject(node) || isNullReference(node) || isInternedString(node) : "illegal object constant: " + node;
         }
--- a/graal/com.oracle.graal.hotspot/src/com/oracle/graal/hotspot/phases/WriteBarrierAdditionPhase.java	Thu Aug 01 00:57:27 2013 +0200
+++ b/graal/com.oracle.graal.hotspot/src/com/oracle/graal/hotspot/phases/WriteBarrierAdditionPhase.java	Mon Aug 05 11:25:14 2013 +0200
@@ -56,7 +56,7 @@
     private static void addReadNodeBarriers(ReadNode node, StructuredGraph graph) {
         if (node.getBarrierType() == BarrierType.PRECISE) {
             assert useG1GC();
-            G1ReferentFieldReadBarrier barrier = graph.add(new G1ReferentFieldReadBarrier(node.object(), node, node.location(), false, false));
+            G1ReferentFieldReadBarrier barrier = graph.add(new G1ReferentFieldReadBarrier(node.object(), node, node.location(), false));
             graph.addAfterFixed(node, barrier);
         } else {
             assert node.getBarrierType() == BarrierType.NONE : "Non precise read barrier has been attached to read node.";
@@ -75,7 +75,7 @@
                 }
                 graph.addAfterFixed(node, graph.add(new G1PostWriteBarrier(node.object(), node.value(), node.location(), true)));
             } else {
-                graph.addAfterFixed(node, graph.add(new SerialWriteBarrier(node.object(), node.location(), true)));
+                graph.addAfterFixed(node, graph.add(new SerialWriteBarrier(node.object(), node.value(), node.location(), true)));
             }
         } else if (barrierType == BarrierType.IMPRECISE) {
             if (useG1GC()) {
@@ -85,7 +85,7 @@
                 graph.addBeforeFixed(node, preBarrier);
                 graph.addAfterFixed(node, graph.add(new G1PostWriteBarrier(node.object(), node.value(), node.location(), false)));
             } else {
-                graph.addAfterFixed(node, graph.add(new SerialWriteBarrier(node.object(), node.location(), false)));
+                graph.addAfterFixed(node, graph.add(new SerialWriteBarrier(node.object(), node.value(), node.location(), false)));
             }
         } else {
             assert barrierType == BarrierType.NONE;
@@ -100,14 +100,14 @@
                 graph.addBeforeFixed(node, graph.add(new G1PreWriteBarrier(node.object(), node.getExpectedValue(), node.getLocation(), false, false)));
                 graph.addAfterFixed(node, graph.add(new G1PostWriteBarrier(node.object(), node.getNewValue(), node.getLocation(), true)));
             } else {
-                graph.addAfterFixed(node, graph.add(new SerialWriteBarrier(node.object(), node.getLocation(), true)));
+                graph.addAfterFixed(node, graph.add(new SerialWriteBarrier(node.object(), node.getNewValue(), node.getLocation(), true)));
             }
         } else if (barrierType == BarrierType.IMPRECISE) {
             if (useG1GC()) {
                 graph.addBeforeFixed(node, graph.add(new G1PreWriteBarrier(node.object(), node.getExpectedValue(), node.getLocation(), false, false)));
                 graph.addAfterFixed(node, graph.add(new G1PostWriteBarrier(node.object(), node.getNewValue(), node.getLocation(), false)));
             } else {
-                graph.addAfterFixed(node, graph.add(new SerialWriteBarrier(node.object(), node.getLocation(), false)));
+                graph.addAfterFixed(node, graph.add(new SerialWriteBarrier(node.object(), node.getNewValue(), node.getLocation(), false)));
             }
         } else {
             assert barrierType == BarrierType.NONE;
--- a/graal/com.oracle.graal.hotspot/src/com/oracle/graal/hotspot/replacements/ArrayCopyNode.java	Thu Aug 01 00:57:27 2013 +0200
+++ b/graal/com.oracle.graal.hotspot/src/com/oracle/graal/hotspot/replacements/ArrayCopyNode.java	Mon Aug 05 11:25:14 2013 +0200
@@ -83,7 +83,7 @@
         }
         // the canonicalization before loop unrolling is needed to propagate the length into
         // additions, etc.
-        HighTierContext context = new HighTierContext(tool.getRuntime(), tool.assumptions(), tool.getReplacements());
+        PhaseContext context = new PhaseContext(tool.getRuntime(), tool.assumptions(), tool.getReplacements());
         new CanonicalizerPhase(true).apply(snippetGraph, context);
         new LoopFullUnrollPhase(true).apply(snippetGraph, context);
         new CanonicalizerPhase(true).apply(snippetGraph, context);
--- a/graal/com.oracle.graal.hotspot/src/com/oracle/graal/hotspot/replacements/UnsafeArrayCopySnippets.java	Thu Aug 01 00:57:27 2013 +0200
+++ b/graal/com.oracle.graal.hotspot/src/com/oracle/graal/hotspot/replacements/UnsafeArrayCopySnippets.java	Mon Aug 05 11:25:14 2013 +0200
@@ -59,7 +59,7 @@
         long postLoopBytes;
 
         // We can easily vectorize the loop if both offsets have the same alignment.
-        if ((srcOffset % VECTOR_SIZE) == (destOffset % VECTOR_SIZE)) {
+        if (byteLength >= VECTOR_SIZE && (srcOffset % VECTOR_SIZE) == (destOffset % VECTOR_SIZE)) {
             preLoopBytes = NumUtil.roundUp(arrayBaseOffset + srcOffset, VECTOR_SIZE) - (arrayBaseOffset + srcOffset);
             postLoopBytes = (byteLength - preLoopBytes) % VECTOR_SIZE;
             mainLoopBytes = byteLength - preLoopBytes - postLoopBytes;
--- a/graal/com.oracle.graal.hotspot/src/com/oracle/graal/hotspot/replacements/UnsafeLoadSnippets.java	Thu Aug 01 00:57:27 2013 +0200
+++ b/graal/com.oracle.graal.hotspot/src/com/oracle/graal/hotspot/replacements/UnsafeLoadSnippets.java	Mon Aug 05 11:25:14 2013 +0200
@@ -26,6 +26,7 @@
 import static com.oracle.graal.replacements.SnippetTemplate.*;
 
 import com.oracle.graal.api.code.*;
+import com.oracle.graal.hotspot.nodes.*;
 import com.oracle.graal.nodes.HeapAccess.BarrierType;
 import com.oracle.graal.nodes.extended.*;
 import com.oracle.graal.nodes.spi.*;
@@ -39,11 +40,12 @@
 
     @Snippet
     public static Object lowerUnsafeLoad(Object object, long offset, int disp) {
+        Object fixedObject = FixedValueAnchorNode.getObject(object);
         long displacement = disp + offset;
         if (object instanceof java.lang.ref.Reference && referentOffset() == displacement) {
-            return Word.fromObject(object).readObject((int) displacement, BarrierType.PRECISE, true);
+            return Word.fromObject(fixedObject).readObject((int) displacement, BarrierType.PRECISE, true);
         } else {
-            return Word.fromObject(object).readObject((int) displacement, BarrierType.NONE, true);
+            return Word.fromObject(fixedObject).readObject((int) displacement, BarrierType.NONE, true);
         }
     }
 
--- a/graal/com.oracle.graal.hotspot/src/com/oracle/graal/hotspot/replacements/WriteBarrierSnippets.java	Thu Aug 01 00:57:27 2013 +0200
+++ b/graal/com.oracle.graal.hotspot/src/com/oracle/graal/hotspot/replacements/WriteBarrierSnippets.java	Mon Aug 05 11:25:14 2013 +0200
@@ -50,16 +50,30 @@
     private static final SnippetCounter.Group countersWriteBarriers = SnippetCounters.getValue() ? new SnippetCounter.Group("WriteBarriers") : null;
     private static final SnippetCounter serialFieldWriteBarrierCounter = new SnippetCounter(countersWriteBarriers, "serialFieldWriteBarrier", "Number of Serial Field Write Barriers");
     private static final SnippetCounter serialArrayWriteBarrierCounter = new SnippetCounter(countersWriteBarriers, "serialArrayWriteBarrier", "Number of Serial Array Write Barriers");
+    private static final SnippetCounter g1AttemptedPostWriteBarrierCounter = new SnippetCounter(countersWriteBarriers, "g1AttemptedPostWriteBarrierCounter",
+                    "Number of attempted G1 Post Write Barriers");
+    private static final SnippetCounter g1AttemptedPreWriteBarrierCounter = new SnippetCounter(countersWriteBarriers, "g1AttemptedPreWriteBarrierCounter", "Number of G1 attempted Pre Write Barriers");
+    private static final SnippetCounter g1AttemptedRefFieldBarrierCounter = new SnippetCounter(countersWriteBarriers, "g1AttemptedPreWriteBarrierCounter",
+                    "Number of G1 attempted Ref Field Read Barriers");
+    private static final SnippetCounter g1EffectivePostWriteBarrierCounter = new SnippetCounter(countersWriteBarriers, "g1EffectivePostWriteBarrierCounter",
+                    "Number of effective G1 Post Write Barriers");
+    private static final SnippetCounter g1EffectivePreWriteBarrierCounter = new SnippetCounter(countersWriteBarriers, "g1EffectivePreWriteBarrierCounter", "Number of G1 effective Pre Write Barriers");
+    private static final SnippetCounter g1EffectiveRefFieldBarrierCounter = new SnippetCounter(countersWriteBarriers, "g1EffectiveRefFieldBarrierCounter",
+                    "Number of G1 effective Ref Field Read Barriers");
 
     @Snippet
-    public static void serialArrayWriteBarrier(Object obj, Object location, @ConstantParameter boolean usePrecise) {
-        Object object = FixedValueAnchorNode.getObject(obj);
+    public static void serialWriteBarrier(Object object, Object location, @ConstantParameter boolean usePrecise, @ConstantParameter boolean alwaysNull) {
+        // No barriers are added if we are always storing a null.
+        if (alwaysNull) {
+            return;
+        }
+        Object fixedObject = FixedValueAnchorNode.getObject(object);
         Pointer oop;
         if (usePrecise) {
-            oop = Word.fromArray(object, location);
+            oop = Word.fromArray(fixedObject, location);
             serialArrayWriteBarrierCounter.inc();
         } else {
-            oop = Word.fromObject(object);
+            oop = Word.fromObject(fixedObject);
             serialFieldWriteBarrierCounter.inc();
         }
         Word base = (Word) oop.unsignedShiftRight(cardTableShift());
@@ -75,6 +89,9 @@
 
     @Snippet
     public static void serialArrayRangeWriteBarrier(Object object, int startIndex, int length) {
+        if (length == 0) {
+            return;
+        }
         Object dest = FixedValueAnchorNode.getObject(object);
         int cardShift = cardTableShift();
         long cardStart = cardTableStart();
@@ -124,9 +141,17 @@
                     log(trace, "[%d] G1-Pre Thread %p Previous Object %p\n ", gcCycle, thread.rawValue(), previousOop.rawValue());
                     verifyOop(previousOop.toObject());
                 }
+                g1AttemptedPreWriteBarrierCounter.inc();
+            } else {
+                g1AttemptedRefFieldBarrierCounter.inc();
             }
             // If the previous value is null the barrier should not be issued.
             if (probability(FREQUENT_PROBABILITY, previousOop.notEqual(0))) {
+                if (doLoad) {
+                    g1EffectivePreWriteBarrierCounter.inc();
+                } else {
+                    g1EffectiveRefFieldBarrierCounter.inc();
+                }
                 // If the thread-local SATB buffer is full issue a native call which will
                 // initialize a new one and add the entry.
                 if (probability(FREQUENT_PROBABILITY, indexValue.notEqual(0))) {
@@ -143,24 +168,28 @@
     }
 
     @Snippet
-    public static void g1PostWriteBarrier(Object object, Object value, Object location, @ConstantParameter boolean usePrecise, @ConstantParameter boolean trace) {
+    public static void g1PostWriteBarrier(Object object, Object value, Object location, @ConstantParameter boolean usePrecise, @ConstantParameter boolean alwaysNull, @ConstantParameter boolean trace) {
+        // No barriers are added if we are always storing a null.
+        if (alwaysNull) {
+            return;
+        }
         Word thread = thread();
         Object fixedObject = FixedValueAnchorNode.getObject(object);
         Object fixedValue = FixedValueAnchorNode.getObject(value);
         verifyOop(fixedObject);
         verifyOop(fixedValue);
-        Word oop = (Word) Word.fromObject(fixedObject);
-        Word field;
+        validateObject(fixedObject, fixedValue);
+        Word oop;
         if (usePrecise) {
-            field = (Word) Word.fromArray(fixedObject, location);
+            oop = (Word) Word.fromArray(fixedObject, location);
         } else {
-            field = oop;
+            oop = (Word) Word.fromObject(fixedObject);
         }
         int gcCycle = 0;
         if (trace) {
             gcCycle = (int) Word.unsigned(HotSpotReplacementsUtil.gcTotalCollectionsAddress()).readLong(0);
             log(trace, "[%d] G1-Post Thread: %p Object: %p\n", gcCycle, thread.rawValue(), Word.fromObject(fixedObject).rawValue());
-            log(trace, "[%d] G1-Post Thread: %p Field: %p\n", gcCycle, thread.rawValue(), field.rawValue());
+            log(trace, "[%d] G1-Post Thread: %p Field: %p\n", gcCycle, thread.rawValue(), oop.rawValue());
         }
         Word writtenValue = (Word) Word.fromObject(fixedValue);
         Word bufferAddress = thread.readWord(g1CardQueueBufferOffset());
@@ -168,11 +197,11 @@
         Word indexValue = thread.readWord(g1CardQueueIndexOffset());
         // The result of the xor reveals whether the installed pointer crosses heap regions.
         // In case it does the write barrier has to be issued.
-        Word xorResult = (field.xor(writtenValue)).unsignedShiftRight(logOfHeapRegionGrainBytes());
+        Word xorResult = (oop.xor(writtenValue)).unsignedShiftRight(logOfHeapRegionGrainBytes());
 
         // Calculate the address of the card to be enqueued to the
         // thread local card queue.
-        Word cardBase = field.unsignedShiftRight(cardTableShift());
+        Word cardBase = oop.unsignedShiftRight(cardTableShift());
         long startAddress = cardTableStart();
         int displacement = 0;
         if (((int) startAddress) == startAddress) {
@@ -182,12 +211,14 @@
         }
         Word cardAddress = cardBase.add(displacement);
 
+        g1AttemptedPostWriteBarrierCounter.inc();
         if (probability(LIKELY_PROBABILITY, xorResult.notEqual(0))) {
             // If the written value is not null continue with the barrier addition.
             if (probability(FREQUENT_PROBABILITY, writtenValue.notEqual(0))) {
                 byte cardByte = cardAddress.readByte(0);
                 // If the card is already dirty, (hence already enqueued) skip the insertion.
                 if (probability(LIKELY_PROBABILITY, cardByte != (byte) 0)) {
+                    g1EffectivePostWriteBarrierCounter.inc();
                     log(trace, "[%d] G1-Post Thread: %p Card: %p \n", gcCycle, thread.rawValue(), Word.unsigned(cardByte).rawValue());
                     cardAddress.writeByte(0, (byte) 0);
                     // If the thread local card queue is full, issue a native call which will
@@ -209,31 +240,31 @@
 
     @Snippet
     public static void g1ArrayRangePreWriteBarrier(Object object, int startIndex, int length) {
-        Object dest = FixedValueAnchorNode.getObject(object);
         Word thread = thread();
         byte markingValue = thread.readByte(g1SATBQueueMarkingOffset());
+        // If the concurrent marker is not enabled or the vector length is zero, return.
+        if (markingValue == (byte) 0 || length == 0) {
+            return;
+        }
+        Object dest = FixedValueAnchorNode.getObject(object);
         Word bufferAddress = thread.readWord(g1SATBQueueBufferOffset());
         Word indexAddress = thread.add(g1SATBQueueIndexOffset());
-        Word indexValue = indexAddress.readWord(0);
-
-        // If the concurrent marker is not enabled return.
-        if (markingValue == (byte) 0) {
-            return;
-        }
-        Word oop;
+        long dstAddr = GetObjectAddressNode.get(dest);
+        long indexValue = indexAddress.readWord(0).rawValue();
         final int scale = arrayIndexScale(Kind.Object);
         int header = arrayBaseOffset(Kind.Object);
 
         for (int i = startIndex; i < length; i++) {
-            Word address = (Word) Word.fromObject(dest).add(header).add(Word.unsigned(i * (long) scale));
-            oop = (Word) Word.fromObject(address.readObject(0, BarrierType.NONE, true));
+            long address = dstAddr + header + (i * scale);
+            Pointer oop = Word.fromObject(Word.unsigned(address).readObject(0, BarrierType.NONE, true));
+            verifyOop(oop.toObject());
             if (oop.notEqual(0)) {
-                if (indexValue.notEqual(0)) {
-                    Word nextIndex = indexValue.subtract(wordSize());
-                    Word logAddress = bufferAddress.add(nextIndex);
+                if (indexValue != 0) {
+                    indexValue = indexValue - wordSize();
+                    Word logAddress = bufferAddress.add(Word.unsigned(indexValue));
                     // Log the object to be marked as well as update the SATB's buffer next index.
                     logAddress.writeWord(0, oop);
-                    indexAddress.writeWord(0, nextIndex);
+                    indexAddress.writeWord(0, Word.unsigned(indexValue));
                 } else {
                     g1PreBarrierStub(G1WBPRECALL, oop.toObject());
                 }
@@ -243,11 +274,14 @@
 
     @Snippet
     public static void g1ArrayRangePostWriteBarrier(Object object, int startIndex, int length) {
+        if (length == 0) {
+            return;
+        }
         Object dest = FixedValueAnchorNode.getObject(object);
         Word thread = thread();
         Word bufferAddress = thread.readWord(g1CardQueueBufferOffset());
         Word indexAddress = thread.add(g1CardQueueIndexOffset());
-        Word indexValue = thread.readWord(g1CardQueueIndexOffset());
+        long indexValue = thread.readWord(g1CardQueueIndexOffset()).rawValue();
 
         int cardShift = cardTableShift();
         long cardStart = cardTableStart();
@@ -266,13 +300,13 @@
                 cardAddress.writeByte(0, (byte) 0);
                 // If the thread local card queue is full, issue a native call which will
                 // initialize a new one and add the card entry.
-                if (indexValue.notEqual(0)) {
-                    Word nextIndex = indexValue.subtract(wordSize());
-                    Word logAddress = bufferAddress.add(nextIndex);
+                if (indexValue != 0) {
+                    indexValue = indexValue - wordSize();
+                    Word logAddress = bufferAddress.add(Word.unsigned(indexValue));
                     // Log the object to be scanned as well as update
                     // the card queue's next index.
                     logAddress.writeWord(0, cardAddress);
-                    indexAddress.writeWord(0, nextIndex);
+                    indexAddress.writeWord(0, Word.unsigned(indexValue));
                 } else {
                     g1PostBarrierStub(G1WBPOSTCALL, cardAddress);
                 }
@@ -292,10 +326,9 @@
 
     public static class Templates extends AbstractTemplates {
 
-        private final SnippetInfo serialArrayWriteBarrier = snippet(WriteBarrierSnippets.class, "serialArrayWriteBarrier");
+        private final SnippetInfo serialWriteBarrier = snippet(WriteBarrierSnippets.class, "serialWriteBarrier");
         private final SnippetInfo serialArrayRangeWriteBarrier = snippet(WriteBarrierSnippets.class, "serialArrayRangeWriteBarrier");
         private final SnippetInfo g1PreWriteBarrier = snippet(WriteBarrierSnippets.class, "g1PreWriteBarrier");
-
         private final SnippetInfo g1ReferentReadBarrier = snippet(WriteBarrierSnippets.class, "g1PreWriteBarrier");
         private final SnippetInfo g1PostWriteBarrier = snippet(WriteBarrierSnippets.class, "g1PostWriteBarrier");
         private final SnippetInfo g1ArrayRangePreWriteBarrier = snippet(WriteBarrierSnippets.class, "g1ArrayRangePreWriteBarrier");
@@ -305,12 +338,13 @@
             super(runtime, replacements, target);
         }
 
-        public void lower(SerialWriteBarrier arrayWriteBarrier, @SuppressWarnings("unused") LoweringTool tool) {
-            Arguments args = new Arguments(serialArrayWriteBarrier);
-            args.add("obj", arrayWriteBarrier.getObject());
-            args.add("location", arrayWriteBarrier.getLocation());
-            args.addConst("usePrecise", arrayWriteBarrier.usePrecise());
-            template(args).instantiate(runtime, arrayWriteBarrier, DEFAULT_REPLACER, args);
+        public void lower(SerialWriteBarrier writeBarrier, @SuppressWarnings("unused") LoweringTool tool) {
+            Arguments args = new Arguments(serialWriteBarrier);
+            args.add("object", writeBarrier.getObject());
+            args.add("location", writeBarrier.getLocation());
+            args.addConst("usePrecise", writeBarrier.usePrecise());
+            args.addConst("alwaysNull", writeBarrier.getValue().objectStamp().alwaysNull());
+            template(args).instantiate(runtime, writeBarrier, DEFAULT_REPLACER, args);
         }
 
         public void lower(SerialArrayRangeWriteBarrier arrayRangeWriteBarrier, @SuppressWarnings("unused") LoweringTool tool) {
@@ -338,7 +372,7 @@
             args.add("expectedObject", readBarrier.getExpectedObject());
             args.add("location", readBarrier.getLocation());
             args.addConst("doLoad", readBarrier.doLoad());
-            args.addConst("nullCheck", readBarrier.getNullCheck());
+            args.addConst("nullCheck", false);
             args.addConst("trace", traceBarrier());
             template(args).instantiate(runtime, readBarrier, DEFAULT_REPLACER, args);
         }
@@ -349,6 +383,7 @@
             args.add("value", writeBarrierPost.getValue());
             args.add("location", writeBarrierPost.getLocation());
             args.addConst("usePrecise", writeBarrierPost.usePrecise());
+            args.addConst("alwaysNull", writeBarrierPost.getValue().objectStamp().alwaysNull());
             args.addConst("trace", traceBarrier());
             template(args).instantiate(runtime, writeBarrierPost, DEFAULT_REPLACER, args);
         }
@@ -394,4 +429,23 @@
     private static boolean traceBarrier() {
         return GraalOptions.GCDebugStartCycle.getValue() > 0 && ((int) Word.unsigned(HotSpotReplacementsUtil.gcTotalCollectionsAddress()).readLong(0) > GraalOptions.GCDebugStartCycle.getValue());
     }
+
+    /**
+     * Validation helper method which performs sanity checks on write operations. The addresses of
+     * both the object and the value being written are checked in order to determine if they reside
+     * in a valid heap region. If an object is stale, an invalid access is performed in order to
+     * prematurely crash the VM and debug the stack trace of the faulty method.
+     */
+    private static void validateObject(Object parent, Object child) {
+        if (verifyOops() && child != null && !validateOop(VALIDATEOBJECT, parent, child)) {
+            log(true, "Verification ERROR, Parent: %p Child: %p\n", Word.fromObject(parent).rawValue(), Word.fromObject(child).rawValue());
+            DirectObjectStoreNode.storeWord(null, 0, 0, Word.zero());
+        }
+    }
+
+    public static final ForeignCallDescriptor VALIDATEOBJECT = new ForeignCallDescriptor("validate_object", boolean.class, Word.class, Word.class);
+
+    @NodeIntrinsic(ForeignCallNode.class)
+    private static native boolean validateOop(@ConstantNodeParameter ForeignCallDescriptor descriptor, Object parent, Object object);
+
 }
--- a/graal/com.oracle.graal.lir.amd64/src/com/oracle/graal/lir/amd64/AMD64Arithmetic.java	Thu Aug 01 00:57:27 2013 +0200
+++ b/graal/com.oracle.graal.lir.amd64/src/com/oracle/graal/lir/amd64/AMD64Arithmetic.java	Mon Aug 05 11:25:14 2013 +0200
@@ -34,8 +34,10 @@
 import com.oracle.graal.lir.*;
 import com.oracle.graal.lir.asm.*;
 
-// @formatter:off
 public enum AMD64Arithmetic {
+
+    // @formatter:off
+
     IADD, ISUB, IMUL, IDIV, IDIVREM, IREM, IUDIV, IUREM, IAND, IOR, IXOR, ISHL, ISHR, IUSHR,
     LADD, LSUB, LMUL, LDIV, LDIVREM, LREM, LUDIV, LUREM, LAND, LOR, LXOR, LSHL, LSHR, LUSHR,
     FADD, FSUB, FMUL, FDIV, FREM, FAND, FOR, FXOR,
@@ -53,10 +55,13 @@
      */
     F2I, D2I, F2L, D2L;
 
+    // @formatter:on
+
     /**
-     * Unary operation with separate source and destination operand. 
+     * Unary operation with separate source and destination operand.
      */
     public static class Unary2Op extends AMD64LIRInstruction {
+
         @Opcode private final AMD64Arithmetic opcode;
         @Def({REG}) protected AllocatableValue result;
         @Use({REG, STACK}) protected AllocatableValue x;
@@ -74,9 +79,10 @@
     }
 
     /**
-     * Unary operation with single operand for source and destination. 
+     * Unary operation with single operand for source and destination.
      */
     public static class Unary1Op extends AMD64LIRInstruction {
+
         @Opcode private final AMD64Arithmetic opcode;
         @Def({REG, HINT}) protected AllocatableValue result;
         @Use({REG, STACK}) protected AllocatableValue x;
@@ -95,10 +101,11 @@
     }
 
     /**
-     * Binary operation with two operands. The first source operand is combined with the destination.
-     * The second source operand may be a stack slot. 
+     * Binary operation with two operands. The first source operand is combined with the
+     * destination. The second source operand may be a stack slot.
      */
     public static class BinaryRegStack extends AMD64LIRInstruction {
+
         @Opcode private final AMD64Arithmetic opcode;
         @Def({REG, HINT}) protected AllocatableValue result;
         @Use({REG, STACK}) protected AllocatableValue x;
@@ -126,10 +133,11 @@
     }
 
     /**
-     * Binary operation with two operands. The first source operand is combined with the destination.
-     * The second source operand must be a register. 
+     * Binary operation with two operands. The first source operand is combined with the
+     * destination. The second source operand must be a register.
      */
     public static class BinaryRegReg extends AMD64LIRInstruction {
+
         @Opcode private final AMD64Arithmetic opcode;
         @Def({REG, HINT}) protected AllocatableValue result;
         @Use({REG, STACK}) protected AllocatableValue x;
@@ -160,6 +168,7 @@
      * Binary operation with single source/destination operand and one constant.
      */
     public static class BinaryRegConst extends AMD64LIRInstruction {
+
         @Opcode private final AMD64Arithmetic opcode;
         @Def({REG, HINT}) protected AllocatableValue result;
         @Use({REG, STACK}) protected AllocatableValue x;
@@ -186,9 +195,11 @@
     }
 
     /**
-     * Commutative binary operation with two operands. One of the operands is combined with the result.
+     * Commutative binary operation with two operands. One of the operands is combined with the
+     * result.
      */
     public static class BinaryCommutative extends AMD64LIRInstruction {
+
         @Opcode private final AMD64Arithmetic opcode;
         @Def({REG, HINT}) protected AllocatableValue result;
         @Use({REG, STACK}) protected AllocatableValue x;
@@ -222,6 +233,7 @@
      * Binary operation with separate source and destination and one constant operand.
      */
     public static class BinaryRegStackConst extends AMD64LIRInstruction {
+
         @Opcode private final AMD64Arithmetic opcode;
         @Def({REG}) protected AllocatableValue result;
         @Use({REG, STACK}) protected AllocatableValue x;
@@ -248,6 +260,7 @@
     }
 
     public static class DivRemOp extends AMD64LIRInstruction {
+
         @Opcode private final AMD64Arithmetic opcode;
         @Def protected AllocatableValue divResult;
         @Def protected AllocatableValue remResult;
@@ -272,7 +285,8 @@
         @Override
         protected void verify() {
             super.verify();
-            // left input in rax, right input in any register but rax and rdx, result quotient in rax, result remainder in rdx
+            // left input in rax, right input in any register but rax and rdx, result quotient in
+            // rax, result remainder in rdx
             assert asRegister(x).equals(AMD64.rax);
             assert differentRegisters(y, AMD64.rax.asValue(), AMD64.rdx.asValue());
             verifyKind(opcode, divResult, x, y);
@@ -281,6 +295,7 @@
     }
 
     public static class FPDivRemOp extends AMD64LIRInstruction {
+
         @Opcode private final AMD64Arithmetic opcode;
         @Def protected AllocatableValue result;
         @Use protected AllocatableValue x;
@@ -342,11 +357,20 @@
     @SuppressWarnings("unused")
     protected static void emit(TargetMethodAssembler tasm, AMD64MacroAssembler masm, AMD64Arithmetic opcode, AllocatableValue result) {
         switch (opcode) {
-            case INEG: masm.negl(asIntReg(result)); break;
-            case LNEG: masm.negq(asLongReg(result)); break;
-            case L2I:  masm.andl(asIntReg(result), 0xFFFFFFFF); break;
-            case I2C:  masm.andl(asIntReg(result), 0xFFFF); break;
-            default:   throw GraalInternalError.shouldNotReachHere();
+            case INEG:
+                masm.negl(asIntReg(result));
+                break;
+            case LNEG:
+                masm.negq(asLongReg(result));
+                break;
+            case L2I:
+                masm.andl(asIntReg(result), 0xFFFFFFFF);
+                break;
+            case I2C:
+                masm.andl(asIntReg(result), 0xFFFF);
+                break;
+            default:
+                throw GraalInternalError.shouldNotReachHere();
         }
     }
 
@@ -354,51 +378,139 @@
         int exceptionOffset = -1;
         if (isRegister(src)) {
             switch (opcode) {
-                case IADD: masm.addl(asIntReg(dst),  asIntReg(src)); break;
-                case ISUB: masm.subl(asIntReg(dst),  asIntReg(src)); break;
-                case IAND: masm.andl(asIntReg(dst),  asIntReg(src)); break;
-                case IMUL: masm.imull(asIntReg(dst), asIntReg(src)); break;
-                case IOR:  masm.orl(asIntReg(dst),   asIntReg(src)); break;
-                case IXOR: masm.xorl(asIntReg(dst),  asIntReg(src)); break;
-                case ISHL: assert asIntReg(src).equals(AMD64.rcx); masm.shll(asIntReg(dst)); break;
-                case ISHR: assert asIntReg(src).equals(AMD64.rcx); masm.sarl(asIntReg(dst)); break;
-                case IUSHR: assert asIntReg(src).equals(AMD64.rcx); masm.shrl(asIntReg(dst)); break;
+                case IADD:
+                    masm.addl(asIntReg(dst), asIntReg(src));
+                    break;
+                case ISUB:
+                    masm.subl(asIntReg(dst), asIntReg(src));
+                    break;
+                case IAND:
+                    masm.andl(asIntReg(dst), asIntReg(src));
+                    break;
+                case IMUL:
+                    masm.imull(asIntReg(dst), asIntReg(src));
+                    break;
+                case IOR:
+                    masm.orl(asIntReg(dst), asIntReg(src));
+                    break;
+                case IXOR:
+                    masm.xorl(asIntReg(dst), asIntReg(src));
+                    break;
+                case ISHL:
+                    assert asIntReg(src).equals(AMD64.rcx);
+                    masm.shll(asIntReg(dst));
+                    break;
+                case ISHR:
+                    assert asIntReg(src).equals(AMD64.rcx);
+                    masm.sarl(asIntReg(dst));
+                    break;
+                case IUSHR:
+                    assert asIntReg(src).equals(AMD64.rcx);
+                    masm.shrl(asIntReg(dst));
+                    break;
 
-                case LADD: masm.addq(asLongReg(dst),  asLongReg(src)); break;
-                case LSUB: masm.subq(asLongReg(dst),  asLongReg(src)); break;
-                case LMUL: masm.imulq(asLongReg(dst), asLongReg(src)); break;
-                case LAND: masm.andq(asLongReg(dst),  asLongReg(src)); break;
-                case LOR:  masm.orq(asLongReg(dst),   asLongReg(src)); break;
-                case LXOR: masm.xorq(asLongReg(dst),  asLongReg(src)); break;
-                case LSHL: assert asIntReg(src).equals(AMD64.rcx); masm.shlq(asLongReg(dst)); break;
-                case LSHR: assert asIntReg(src).equals(AMD64.rcx); masm.sarq(asLongReg(dst)); break;
-                case LUSHR: assert asIntReg(src).equals(AMD64.rcx); masm.shrq(asLongReg(dst)); break;
+                case LADD:
+                    masm.addq(asLongReg(dst), asLongReg(src));
+                    break;
+                case LSUB:
+                    masm.subq(asLongReg(dst), asLongReg(src));
+                    break;
+                case LMUL:
+                    masm.imulq(asLongReg(dst), asLongReg(src));
+                    break;
+                case LAND:
+                    masm.andq(asLongReg(dst), asLongReg(src));
+                    break;
+                case LOR:
+                    masm.orq(asLongReg(dst), asLongReg(src));
+                    break;
+                case LXOR:
+                    masm.xorq(asLongReg(dst), asLongReg(src));
+                    break;
+                case LSHL:
+                    assert asIntReg(src).equals(AMD64.rcx);
+                    masm.shlq(asLongReg(dst));
+                    break;
+                case LSHR:
+                    assert asIntReg(src).equals(AMD64.rcx);
+                    masm.sarq(asLongReg(dst));
+                    break;
+                case LUSHR:
+                    assert asIntReg(src).equals(AMD64.rcx);
+                    masm.shrq(asLongReg(dst));
+                    break;
 
-                case FADD: masm.addss(asFloatReg(dst), asFloatReg(src)); break;
-                case FSUB: masm.subss(asFloatReg(dst), asFloatReg(src)); break;
-                case FMUL: masm.mulss(asFloatReg(dst), asFloatReg(src)); break;
-                case FDIV: masm.divss(asFloatReg(dst), asFloatReg(src)); break;
-                case FAND: masm.andps(asFloatReg(dst), asFloatReg(src)); break;
-                case FOR:  masm.orps(asFloatReg(dst),  asFloatReg(src)); break;
-                case FXOR: masm.xorps(asFloatReg(dst), asFloatReg(src)); break;
+                case FADD:
+                    masm.addss(asFloatReg(dst), asFloatReg(src));
+                    break;
+                case FSUB:
+                    masm.subss(asFloatReg(dst), asFloatReg(src));
+                    break;
+                case FMUL:
+                    masm.mulss(asFloatReg(dst), asFloatReg(src));
+                    break;
+                case FDIV:
+                    masm.divss(asFloatReg(dst), asFloatReg(src));
+                    break;
+                case FAND:
+                    masm.andps(asFloatReg(dst), asFloatReg(src));
+                    break;
+                case FOR:
+                    masm.orps(asFloatReg(dst), asFloatReg(src));
+                    break;
+                case FXOR:
+                    masm.xorps(asFloatReg(dst), asFloatReg(src));
+                    break;
 
-                case DADD: masm.addsd(asDoubleReg(dst), asDoubleReg(src)); break;
-                case DSUB: masm.subsd(asDoubleReg(dst), asDoubleReg(src)); break;
-                case DMUL: masm.mulsd(asDoubleReg(dst), asDoubleReg(src)); break;
-                case DDIV: masm.divsd(asDoubleReg(dst), asDoubleReg(src)); break;
-                case DAND: masm.andpd(asDoubleReg(dst), asDoubleReg(src)); break;
-                case DOR:  masm.orpd(asDoubleReg(dst),  asDoubleReg(src)); break;
-                case DXOR: masm.xorpd(asDoubleReg(dst), asDoubleReg(src)); break;
+                case DADD:
+                    masm.addsd(asDoubleReg(dst), asDoubleReg(src));
+                    break;
+                case DSUB:
+                    masm.subsd(asDoubleReg(dst), asDoubleReg(src));
+                    break;
+                case DMUL:
+                    masm.mulsd(asDoubleReg(dst), asDoubleReg(src));
+                    break;
+                case DDIV:
+                    masm.divsd(asDoubleReg(dst), asDoubleReg(src));
+                    break;
+                case DAND:
+                    masm.andpd(asDoubleReg(dst), asDoubleReg(src));
+                    break;
+                case DOR:
+                    masm.orpd(asDoubleReg(dst), asDoubleReg(src));
+                    break;
+                case DXOR:
+                    masm.xorpd(asDoubleReg(dst), asDoubleReg(src));
+                    break;
 
-                case I2B: masm.movsxb(asIntReg(dst), asIntReg(src)); break;
-                case I2S: masm.movsxw(asIntReg(dst), asIntReg(src)); break;
-                case I2L: masm.movslq(asLongReg(dst), asIntReg(src)); break;
-                case F2D: masm.cvtss2sd(asDoubleReg(dst), asFloatReg(src)); break;
-                case D2F: masm.cvtsd2ss(asFloatReg(dst), asDoubleReg(src)); break;
-                case I2F: masm.cvtsi2ssl(asFloatReg(dst), asIntReg(src)); break;
-                case I2D: masm.cvtsi2sdl(asDoubleReg(dst), asIntReg(src)); break;
-                case L2F: masm.cvtsi2ssq(asFloatReg(dst), asLongReg(src)); break;
-                case L2D: masm.cvtsi2sdq(asDoubleReg(dst), asLongReg(src)); break;
+                case I2B:
+                    masm.movsxb(asIntReg(dst), asIntReg(src));
+                    break;
+                case I2S:
+                    masm.movsxw(asIntReg(dst), asIntReg(src));
+                    break;
+                case I2L:
+                    masm.movslq(asLongReg(dst), asIntReg(src));
+                    break;
+                case F2D:
+                    masm.cvtss2sd(asDoubleReg(dst), asFloatReg(src));
+                    break;
+                case D2F:
+                    masm.cvtsd2ss(asFloatReg(dst), asDoubleReg(src));
+                    break;
+                case I2F:
+                    masm.cvtsi2ssl(asFloatReg(dst), asIntReg(src));
+                    break;
+                case I2D:
+                    masm.cvtsi2sdl(asDoubleReg(dst), asIntReg(src));
+                    break;
+                case L2F:
+                    masm.cvtsi2ssq(asFloatReg(dst), asLongReg(src));
+                    break;
+                case L2D:
+                    masm.cvtsi2sdq(asDoubleReg(dst), asLongReg(src));
+                    break;
                 case F2I:
                     masm.cvttss2sil(asIntReg(dst), asFloatReg(src));
                     break;
@@ -411,10 +523,18 @@
                 case D2L:
                     masm.cvttsd2siq(asLongReg(dst), asDoubleReg(src));
                     break;
-                case MOV_I2F: masm.movdl(asFloatReg(dst), asIntReg(src)); break;
-                case MOV_L2D: masm.movdq(asDoubleReg(dst), asLongReg(src)); break;
-                case MOV_F2I: masm.movdl(asIntReg(dst), asFloatReg(src)); break;
-                case MOV_D2L: masm.movdq(asLongReg(dst), asDoubleReg(src)); break;
+                case MOV_I2F:
+                    masm.movdl(asFloatReg(dst), asIntReg(src));
+                    break;
+                case MOV_L2D:
+                    masm.movdq(asDoubleReg(dst), asLongReg(src));
+                    break;
+                case MOV_F2I:
+                    masm.movdl(asIntReg(dst), asFloatReg(src));
+                    break;
+                case MOV_D2L:
+                    masm.movdq(asLongReg(dst), asDoubleReg(src));
+                    break;
 
                 case IDIVREM:
                 case IDIV:
@@ -452,78 +572,201 @@
             }
         } else if (isConstant(src)) {
             switch (opcode) {
-                case IADD: masm.incrementl(asIntReg(dst), tasm.asIntConst(src)); break;
-                case ISUB: masm.decrementl(asIntReg(dst), tasm.asIntConst(src)); break;
-                case IMUL: masm.imull(asIntReg(dst), asIntReg(dst), tasm.asIntConst(src)); break;
-                case IAND: masm.andl(asIntReg(dst), tasm.asIntConst(src)); break;
-                case IOR:  masm.orl(asIntReg(dst),  tasm.asIntConst(src)); break;
-                case IXOR: masm.xorl(asIntReg(dst), tasm.asIntConst(src)); break;
-                case ISHL: masm.shll(asIntReg(dst), tasm.asIntConst(src) & 31); break;
-                case ISHR: masm.sarl(asIntReg(dst), tasm.asIntConst(src) & 31); break;
-                case IUSHR:masm.shrl(asIntReg(dst), tasm.asIntConst(src) & 31); break;
+                case IADD:
+                    masm.incrementl(asIntReg(dst), tasm.asIntConst(src));
+                    break;
+                case ISUB:
+                    masm.decrementl(asIntReg(dst), tasm.asIntConst(src));
+                    break;
+                case IMUL:
+                    masm.imull(asIntReg(dst), asIntReg(dst), tasm.asIntConst(src));
+                    break;
+                case IAND:
+                    masm.andl(asIntReg(dst), tasm.asIntConst(src));
+                    break;
+                case IOR:
+                    masm.orl(asIntReg(dst), tasm.asIntConst(src));
+                    break;
+                case IXOR:
+                    masm.xorl(asIntReg(dst), tasm.asIntConst(src));
+                    break;
+                case ISHL:
+                    masm.shll(asIntReg(dst), tasm.asIntConst(src) & 31);
+                    break;
+                case ISHR:
+                    masm.sarl(asIntReg(dst), tasm.asIntConst(src) & 31);
+                    break;
+                case IUSHR:
+                    masm.shrl(asIntReg(dst), tasm.asIntConst(src) & 31);
+                    break;
 
-                case LADD: masm.addq(asLongReg(dst), tasm.asIntConst(src)); break;
-                case LSUB: masm.subq(asLongReg(dst), tasm.asIntConst(src)); break;
-                case LMUL: masm.imulq(asLongReg(dst), asLongReg(dst), tasm.asIntConst(src)); break;
-                case LAND: masm.andq(asLongReg(dst), tasm.asIntConst(src)); break;
-                case LOR:  masm.orq(asLongReg(dst),  tasm.asIntConst(src)); break;
-                case LXOR: masm.xorq(asLongReg(dst), tasm.asIntConst(src)); break;
-                case LSHL: masm.shlq(asLongReg(dst), tasm.asIntConst(src) & 63); break;
-                case LSHR: masm.sarq(asLongReg(dst), tasm.asIntConst(src) & 63); break;
-                case LUSHR:masm.shrq(asLongReg(dst), tasm.asIntConst(src) & 63); break;
+                case LADD:
+                    masm.addq(asLongReg(dst), tasm.asIntConst(src));
+                    break;
+                case LSUB:
+                    masm.subq(asLongReg(dst), tasm.asIntConst(src));
+                    break;
+                case LMUL:
+                    masm.imulq(asLongReg(dst), asLongReg(dst), tasm.asIntConst(src));
+                    break;
+                case LAND:
+                    masm.andq(asLongReg(dst), tasm.asIntConst(src));
+                    break;
+                case LOR:
+                    masm.orq(asLongReg(dst), tasm.asIntConst(src));
+                    break;
+                case LXOR:
+                    masm.xorq(asLongReg(dst), tasm.asIntConst(src));
+                    break;
+                case LSHL:
+                    masm.shlq(asLongReg(dst), tasm.asIntConst(src) & 63);
+                    break;
+                case LSHR:
+                    masm.sarq(asLongReg(dst), tasm.asIntConst(src) & 63);
+                    break;
+                case LUSHR:
+                    masm.shrq(asLongReg(dst), tasm.asIntConst(src) & 63);
+                    break;
 
-                case FADD: masm.addss(asFloatReg(dst), (AMD64Address) tasm.asFloatConstRef(src)); break;
-                case FSUB: masm.subss(asFloatReg(dst), (AMD64Address) tasm.asFloatConstRef(src)); break;
-                case FMUL: masm.mulss(asFloatReg(dst), (AMD64Address) tasm.asFloatConstRef(src)); break;
-                case FAND: masm.andps(asFloatReg(dst), (AMD64Address) tasm.asFloatConstRef(src, 16)); break;
-                case FOR:  masm.orps(asFloatReg(dst),  (AMD64Address) tasm.asFloatConstRef(src, 16)); break;
-                case FXOR: masm.xorps(asFloatReg(dst), (AMD64Address) tasm.asFloatConstRef(src, 16)); break;
-                case FDIV: masm.divss(asFloatReg(dst), (AMD64Address) tasm.asFloatConstRef(src)); break;
+                case FADD:
+                    masm.addss(asFloatReg(dst), (AMD64Address) tasm.asFloatConstRef(src));
+                    break;
+                case FSUB:
+                    masm.subss(asFloatReg(dst), (AMD64Address) tasm.asFloatConstRef(src));
+                    break;
+                case FMUL:
+                    masm.mulss(asFloatReg(dst), (AMD64Address) tasm.asFloatConstRef(src));
+                    break;
+                case FAND:
+                    masm.andps(asFloatReg(dst), (AMD64Address) tasm.asFloatConstRef(src, 16));
+                    break;
+                case FOR:
+                    masm.orps(asFloatReg(dst), (AMD64Address) tasm.asFloatConstRef(src, 16));
+                    break;
+                case FXOR:
+                    masm.xorps(asFloatReg(dst), (AMD64Address) tasm.asFloatConstRef(src, 16));
+                    break;
+                case FDIV:
+                    masm.divss(asFloatReg(dst), (AMD64Address) tasm.asFloatConstRef(src));
+                    break;
 
-                case DADD: masm.addsd(asDoubleReg(dst), (AMD64Address) tasm.asDoubleConstRef(src)); break;
-                case DSUB: masm.subsd(asDoubleReg(dst), (AMD64Address) tasm.asDoubleConstRef(src)); break;
-                case DMUL: masm.mulsd(asDoubleReg(dst), (AMD64Address) tasm.asDoubleConstRef(src)); break;
-                case DDIV: masm.divsd(asDoubleReg(dst), (AMD64Address) tasm.asDoubleConstRef(src)); break;
-                case DAND: masm.andpd(asDoubleReg(dst), (AMD64Address) tasm.asDoubleConstRef(src, 16)); break;
-                case DOR:  masm.orpd(asDoubleReg(dst),  (AMD64Address) tasm.asDoubleConstRef(src, 16)); break;
-                case DXOR: masm.xorpd(asDoubleReg(dst), (AMD64Address) tasm.asDoubleConstRef(src, 16)); break;
-                default:   throw GraalInternalError.shouldNotReachHere();
+                case DADD:
+                    masm.addsd(asDoubleReg(dst), (AMD64Address) tasm.asDoubleConstRef(src));
+                    break;
+                case DSUB:
+                    masm.subsd(asDoubleReg(dst), (AMD64Address) tasm.asDoubleConstRef(src));
+                    break;
+                case DMUL:
+                    masm.mulsd(asDoubleReg(dst), (AMD64Address) tasm.asDoubleConstRef(src));
+                    break;
+                case DDIV:
+                    masm.divsd(asDoubleReg(dst), (AMD64Address) tasm.asDoubleConstRef(src));
+                    break;
+                case DAND:
+                    masm.andpd(asDoubleReg(dst), (AMD64Address) tasm.asDoubleConstRef(src, 16));
+                    break;
+                case DOR:
+                    masm.orpd(asDoubleReg(dst), (AMD64Address) tasm.asDoubleConstRef(src, 16));
+                    break;
+                case DXOR:
+                    masm.xorpd(asDoubleReg(dst), (AMD64Address) tasm.asDoubleConstRef(src, 16));
+                    break;
+                default:
+                    throw GraalInternalError.shouldNotReachHere();
             }
         } else {
             switch (opcode) {
-                case IADD: masm.addl(asIntReg(dst), (AMD64Address) tasm.asIntAddr(src)); break;
-                case ISUB: masm.subl(asIntReg(dst), (AMD64Address) tasm.asIntAddr(src)); break;
-                case IAND: masm.andl(asIntReg(dst), (AMD64Address) tasm.asIntAddr(src)); break;
-                case IMUL: masm.imull(asIntReg(dst), (AMD64Address) tasm.asIntAddr(src)); break;
-                case IOR:  masm.orl(asIntReg(dst),  (AMD64Address) tasm.asIntAddr(src)); break;
-                case IXOR: masm.xorl(asIntReg(dst), (AMD64Address) tasm.asIntAddr(src)); break;
+                case IADD:
+                    masm.addl(asIntReg(dst), (AMD64Address) tasm.asIntAddr(src));
+                    break;
+                case ISUB:
+                    masm.subl(asIntReg(dst), (AMD64Address) tasm.asIntAddr(src));
+                    break;
+                case IAND:
+                    masm.andl(asIntReg(dst), (AMD64Address) tasm.asIntAddr(src));
+                    break;
+                case IMUL:
+                    masm.imull(asIntReg(dst), (AMD64Address) tasm.asIntAddr(src));
+                    break;
+                case IOR:
+                    masm.orl(asIntReg(dst), (AMD64Address) tasm.asIntAddr(src));
+                    break;
+                case IXOR:
+                    masm.xorl(asIntReg(dst), (AMD64Address) tasm.asIntAddr(src));
+                    break;
 
-                case LADD: masm.addq(asLongReg(dst), (AMD64Address) tasm.asLongAddr(src)); break;
-                case LSUB: masm.subq(asLongReg(dst), (AMD64Address) tasm.asLongAddr(src)); break;
-                case LMUL: masm.imulq(asLongReg(dst), (AMD64Address) tasm.asLongAddr(src)); break;
-                case LAND: masm.andq(asLongReg(dst), (AMD64Address) tasm.asLongAddr(src)); break;
-                case LOR:  masm.orq(asLongReg(dst),  (AMD64Address) tasm.asLongAddr(src)); break;
-                case LXOR: masm.xorq(asLongReg(dst), (AMD64Address) tasm.asLongAddr(src)); break;
+                case LADD:
+                    masm.addq(asLongReg(dst), (AMD64Address) tasm.asLongAddr(src));
+                    break;
+                case LSUB:
+                    masm.subq(asLongReg(dst), (AMD64Address) tasm.asLongAddr(src));
+                    break;
+                case LMUL:
+                    masm.imulq(asLongReg(dst), (AMD64Address) tasm.asLongAddr(src));
+                    break;
+                case LAND:
+                    masm.andq(asLongReg(dst), (AMD64Address) tasm.asLongAddr(src));
+                    break;
+                case LOR:
+                    masm.orq(asLongReg(dst), (AMD64Address) tasm.asLongAddr(src));
+                    break;
+                case LXOR:
+                    masm.xorq(asLongReg(dst), (AMD64Address) tasm.asLongAddr(src));
+                    break;
 
-                case FADD: masm.addss(asFloatReg(dst), (AMD64Address) tasm.asFloatAddr(src)); break;
-                case FSUB: masm.subss(asFloatReg(dst), (AMD64Address) tasm.asFloatAddr(src)); break;
-                case FMUL: masm.mulss(asFloatReg(dst), (AMD64Address) tasm.asFloatAddr(src)); break;
-                case FDIV: masm.divss(asFloatReg(dst), (AMD64Address) tasm.asFloatAddr(src)); break;
+                case FADD:
+                    masm.addss(asFloatReg(dst), (AMD64Address) tasm.asFloatAddr(src));
+                    break;
+                case FSUB:
+                    masm.subss(asFloatReg(dst), (AMD64Address) tasm.asFloatAddr(src));
+                    break;
+                case FMUL:
+                    masm.mulss(asFloatReg(dst), (AMD64Address) tasm.asFloatAddr(src));
+                    break;
+                case FDIV:
+                    masm.divss(asFloatReg(dst), (AMD64Address) tasm.asFloatAddr(src));
+                    break;
 
-                case DADD: masm.addsd(asDoubleReg(dst), (AMD64Address) tasm.asDoubleAddr(src)); break;
-                case DSUB: masm.subsd(asDoubleReg(dst), (AMD64Address) tasm.asDoubleAddr(src)); break;
-                case DMUL: masm.mulsd(asDoubleReg(dst), (AMD64Address) tasm.asDoubleAddr(src)); break;
-                case DDIV: masm.divsd(asDoubleReg(dst), (AMD64Address) tasm.asDoubleAddr(src)); break;
+                case DADD:
+                    masm.addsd(asDoubleReg(dst), (AMD64Address) tasm.asDoubleAddr(src));
+                    break;
+                case DSUB:
+                    masm.subsd(asDoubleReg(dst), (AMD64Address) tasm.asDoubleAddr(src));
+                    break;
+                case DMUL:
+                    masm.mulsd(asDoubleReg(dst), (AMD64Address) tasm.asDoubleAddr(src));
+                    break;
+                case DDIV:
+                    masm.divsd(asDoubleReg(dst), (AMD64Address) tasm.asDoubleAddr(src));
+                    break;
 
-                case I2B: masm.movsxb(asIntReg(dst), (AMD64Address) tasm.asIntAddr(src)); break;
-                case I2S: masm.movsxw(asIntReg(dst), (AMD64Address) tasm.asIntAddr(src)); break;
-                case I2L: masm.movslq(asLongReg(dst), (AMD64Address) tasm.asIntAddr(src)); break;
-                case F2D: masm.cvtss2sd(asDoubleReg(dst), (AMD64Address) tasm.asFloatAddr(src)); break;
-                case D2F: masm.cvtsd2ss(asFloatReg(dst), (AMD64Address) tasm.asDoubleAddr(src)); break;
-                case I2F: masm.cvtsi2ssl(asFloatReg(dst), (AMD64Address) tasm.asIntAddr(src)); break;
-                case I2D: masm.cvtsi2sdl(asDoubleReg(dst), (AMD64Address) tasm.asIntAddr(src)); break;
-                case L2F: masm.cvtsi2ssq(asFloatReg(dst), (AMD64Address) tasm.asLongAddr(src)); break;
-                case L2D: masm.cvtsi2sdq(asDoubleReg(dst), (AMD64Address) tasm.asLongAddr(src)); break;
+                case I2B:
+                    masm.movsxb(asIntReg(dst), (AMD64Address) tasm.asIntAddr(src));
+                    break;
+                case I2S:
+                    masm.movsxw(asIntReg(dst), (AMD64Address) tasm.asIntAddr(src));
+                    break;
+                case I2L:
+                    masm.movslq(asLongReg(dst), (AMD64Address) tasm.asIntAddr(src));
+                    break;
+                case F2D:
+                    masm.cvtss2sd(asDoubleReg(dst), (AMD64Address) tasm.asFloatAddr(src));
+                    break;
+                case D2F:
+                    masm.cvtsd2ss(asFloatReg(dst), (AMD64Address) tasm.asDoubleAddr(src));
+                    break;
+                case I2F:
+                    masm.cvtsi2ssl(asFloatReg(dst), (AMD64Address) tasm.asIntAddr(src));
+                    break;
+                case I2D:
+                    masm.cvtsi2sdl(asDoubleReg(dst), (AMD64Address) tasm.asIntAddr(src));
+                    break;
+                case L2F:
+                    masm.cvtsi2ssq(asFloatReg(dst), (AMD64Address) tasm.asLongAddr(src));
+                    break;
+                case L2D:
+                    masm.cvtsi2sdq(asDoubleReg(dst), (AMD64Address) tasm.asLongAddr(src));
+                    break;
                 case F2I:
                     masm.cvttss2sil(asIntReg(dst), (AMD64Address) tasm.asFloatAddr(src));
                     break;
@@ -536,12 +779,21 @@
                 case D2L:
                     masm.cvttsd2siq(asLongReg(dst), (AMD64Address) tasm.asDoubleAddr(src));
                     break;
-                case MOV_I2F: masm.movss(asFloatReg(dst), (AMD64Address) tasm.asIntAddr(src)); break;
-                case MOV_L2D: masm.movsd(asDoubleReg(dst), (AMD64Address) tasm.asLongAddr(src)); break;
-                case MOV_F2I: masm.movl(asIntReg(dst), (AMD64Address) tasm.asFloatAddr(src)); break;
-                case MOV_D2L: masm.movq(asLongReg(dst), (AMD64Address) tasm.asDoubleAddr(src)); break;
+                case MOV_I2F:
+                    masm.movss(asFloatReg(dst), (AMD64Address) tasm.asIntAddr(src));
+                    break;
+                case MOV_L2D:
+                    masm.movsd(asDoubleReg(dst), (AMD64Address) tasm.asLongAddr(src));
+                    break;
+                case MOV_F2I:
+                    masm.movl(asIntReg(dst), (AMD64Address) tasm.asFloatAddr(src));
+                    break;
+                case MOV_D2L:
+                    masm.movq(asLongReg(dst), (AMD64Address) tasm.asDoubleAddr(src));
+                    break;
 
-                default:   throw GraalInternalError.shouldNotReachHere();
+                default:
+                    throw GraalInternalError.shouldNotReachHere();
             }
         }
 
@@ -552,10 +804,10 @@
     }
 
     private static void verifyKind(AMD64Arithmetic opcode, Value result, Value x, Value y) {
-        assert (opcode.name().startsWith("I") && result.getKind() == Kind.Int && x.getKind().getStackKind() == Kind.Int && y.getKind().getStackKind() == Kind.Int)
-            || (opcode.name().startsWith("L") && result.getKind() == Kind.Long && x.getKind() == Kind.Long && y.getKind() == Kind.Long)
-            || (opcode.name().startsWith("F") && result.getKind() == Kind.Float && x.getKind() == Kind.Float && y.getKind() == Kind.Float)
-            || (opcode.name().startsWith("D") && result.getKind() == Kind.Double && x.getKind() == Kind.Double && y.getKind() == Kind.Double)
-            || (opcode.name().matches(".U?SH.") && result.getKind() == x.getKind() && y.getKind() == Kind.Int && (isConstant(y) || asRegister(y).equals(AMD64.rcx)));
+        assert (opcode.name().startsWith("I") && result.getKind() == Kind.Int && x.getKind().getStackKind() == Kind.Int && y.getKind().getStackKind() == Kind.Int) ||
+                        (opcode.name().startsWith("L") && result.getKind() == Kind.Long && x.getKind() == Kind.Long && y.getKind() == Kind.Long) ||
+                        (opcode.name().startsWith("F") && result.getKind() == Kind.Float && x.getKind() == Kind.Float && y.getKind() == Kind.Float) ||
+                        (opcode.name().startsWith("D") && result.getKind() == Kind.Double && x.getKind() == Kind.Double && y.getKind() == Kind.Double) ||
+                        (opcode.name().matches(".U?SH.") && result.getKind() == x.getKind() && y.getKind() == Kind.Int && (isConstant(y) || asRegister(y).equals(AMD64.rcx)));
     }
 }
--- a/graal/com.oracle.graal.lir.amd64/src/com/oracle/graal/lir/amd64/AMD64ControlFlow.java	Thu Aug 01 00:57:27 2013 +0200
+++ b/graal/com.oracle.graal.lir.amd64/src/com/oracle/graal/lir/amd64/AMD64ControlFlow.java	Mon Aug 05 11:25:14 2013 +0200
@@ -168,11 +168,11 @@
                     masm.jcc(ConditionFlag.Equal, keyTargets[i].label());
                 }
             } else if (key.getKind() == Kind.Object) {
-                Register intKey = asObjectReg(key);
+                Register objectKey = asObjectReg(key);
                 Register temp = asObjectReg(scratch);
                 for (int i = 0; i < keyConstants.length; i++) {
                     AMD64Move.move(tasm, masm, temp.asValue(Kind.Object), keyConstants[i]);
-                    masm.cmpptr(intKey, temp);
+                    masm.cmpptr(objectKey, temp);
                     masm.jcc(ConditionFlag.Equal, keyTargets[i].label());
                 }
             } else {
--- /dev/null	Thu Jan 01 00:00:00 1970 +0000
+++ b/graal/com.oracle.graal.lir.amd64/src/com/oracle/graal/lir/amd64/AMD64FrameMap.java	Mon Aug 05 11:25:14 2013 +0200
@@ -0,0 +1,104 @@
+/*
+ * Copyright (c) 2013, Oracle and/or its affiliates. All rights reserved.
+ * DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER.
+ *
+ * This code is free software; you can redistribute it and/or modify it
+ * under the terms of the GNU General Public License version 2 only, as
+ * published by the Free Software Foundation.
+ *
+ * This code is distributed in the hope that it will be useful, but WITHOUT
+ * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
+ * FITNESS FOR A PARTICULAR PURPOSE.  See the GNU General Public License
+ * version 2 for more details (a copy is included in the LICENSE file that
+ * accompanied this code).
+ *
+ * You should have received a copy of the GNU General Public License version
+ * 2 along with this work; if not, write to the Free Software Foundation,
+ * Inc., 51 Franklin St, Fifth Floor, Boston, MA 02110-1301 USA.
+ *
+ * Please contact Oracle, 500 Oracle Parkway, Redwood Shores, CA 94065 USA
+ * or visit www.oracle.com if you need additional information or have any
+ * questions.
+ */
+package com.oracle.graal.lir.amd64;
+
+import com.oracle.graal.api.code.*;
+import com.oracle.graal.api.meta.*;
+import com.oracle.graal.lir.*;
+
+/**
+ * AMD64 specific frame map.
+ * 
+ * This is the format of an AMD64 stack frame:
+ * 
+ * <pre>
+ *   Base       Contents
+ * 
+ *            :                                :  -----
+ *   caller   | incoming overflow argument n   |    ^
+ *   frame    :     ...                        :    | positive
+ *            | incoming overflow argument 0   |    | offsets
+ *   ---------+--------------------------------+---------------------
+ *            | return address                 |    |            ^
+ *   current  +--------------------------------+    |            |    -----
+ *   frame    |                                |    |            |      ^
+ *            : callee save area               :    |            |      |
+ *            |                                |    |            |      |
+ *            +--------------------------------+    |            |      |
+ *            | spill slot 0                   |    | negative   |      |
+ *            :     ...                        :    v offsets    |      |
+ *            | spill slot n                   |  -----        total  frame
+ *            +--------------------------------+               frame  size
+ *            | alignment padding              |               size     |
+ *            +--------------------------------+  -----          |      |
+ *            | outgoing overflow argument n   |    ^            |      |
+ *            :     ...                        :    | positive   |      |
+ *            | outgoing overflow argument 0   |    | offsets    v      v
+ *    %sp-->  +--------------------------------+---------------------------
+ * 
+ * </pre>
+ * 
+ * The spill slot area also includes stack allocated memory blocks (ALLOCA blocks). The size of such
+ * a block may be greater than the size of a normal spill slot or the word size.
+ * <p>
+ * A runtime can reserve space at the beginning of the overflow argument area. The calling
+ * convention can specify that the first overflow stack argument is not at offset 0, but at a
+ * specified offset. Use {@link CodeCacheProvider#getMinimumOutgoingSize()} to make sure that
+ * call-free methods also have this space reserved. Then the VM can use the memory at offset 0
+ * relative to the stack pointer.
+ */
+public final class AMD64FrameMap extends FrameMap {
+
+    public AMD64FrameMap(CodeCacheProvider runtime, TargetDescription target, RegisterConfig registerConfig) {
+        super(runtime, target, registerConfig);
+        // (negative) offset relative to sp + total frame size
+        initialSpillSize = returnAddressSize() + calleeSaveAreaSize();
+        spillSize = initialSpillSize;
+    }
+
+    @Override
+    public int totalFrameSize() {
+        return frameSize() + returnAddressSize();
+    }
+
+    @Override
+    public int currentFrameSize() {
+        return alignFrameSize(outgoingSize + spillSize - returnAddressSize());
+    }
+
+    @Override
+    protected int alignFrameSize(int size) {
+        int x = size + returnAddressSize() + (target.stackAlignment - 1);
+        return (x / target.stackAlignment) * target.stackAlignment - returnAddressSize();
+    }
+
+    @Override
+    public int offsetToCalleeSaveArea() {
+        return frameSize() - calleeSaveAreaSize();
+    }
+
+    @Override
+    protected StackSlot allocateNewSpillSlot(PlatformKind kind, int additionalOffset) {
+        return StackSlot.get(kind, -spillSize + additionalOffset, true);
+    }
+}
--- /dev/null	Thu Jan 01 00:00:00 1970 +0000
+++ b/graal/com.oracle.graal.lir.hsail/src/com/oracle/graal/lir/hsail/HSAILFrameMap.java	Mon Aug 05 11:25:14 2013 +0200
@@ -0,0 +1,72 @@
+/*
+ * Copyright (c) 2013, Oracle and/or its affiliates. All rights reserved.
+ * DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER.
+ *
+ * This code is free software; you can redistribute it and/or modify it
+ * under the terms of the GNU General Public License version 2 only, as
+ * published by the Free Software Foundation.
+ *
+ * This code is distributed in the hope that it will be useful, but WITHOUT
+ * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
+ * FITNESS FOR A PARTICULAR PURPOSE.  See the GNU General Public License
+ * version 2 for more details (a copy is included in the LICENSE file that
+ * accompanied this code).
+ *
+ * You should have received a copy of the GNU General Public License version
+ * 2 along with this work; if not, write to the Free Software Foundation,
+ * Inc., 51 Franklin St, Fifth Floor, Boston, MA 02110-1301 USA.
+ *
+ * Please contact Oracle, 500 Oracle Parkway, Redwood Shores, CA 94065 USA
+ * or visit www.oracle.com if you need additional information or have any
+ * questions.
+ */
+package com.oracle.graal.lir.hsail;
+
+import com.oracle.graal.api.code.*;
+import com.oracle.graal.api.meta.*;
+import com.oracle.graal.lir.*;
+
+/**
+ * HSAIL specific frame map.
+ * 
+ * This is the format of a HSAIL stack frame:
+ * 
+ * <pre>
+ * TODO stack frame layout
+ * </pre>
+ */
+public final class HSAILFrameMap extends FrameMap {
+
+    public HSAILFrameMap(CodeCacheProvider runtime, TargetDescription target, RegisterConfig registerConfig) {
+        super(runtime, target, registerConfig);
+    }
+
+    @Override
+    public int totalFrameSize() {
+        // FIXME return some sane values
+        return frameSize();
+    }
+
+    @Override
+    public int currentFrameSize() {
+        // FIXME return some sane values
+        return alignFrameSize(outgoingSize + spillSize);
+    }
+
+    @Override
+    protected int alignFrameSize(int size) {
+        // FIXME return some sane values
+        int x = size + (target.stackAlignment - 1);
+        return (x / target.stackAlignment) * target.stackAlignment;
+    }
+
+    @Override
+    public int offsetToCalleeSaveArea() {
+        return frameSize() - calleeSaveAreaSize();
+    }
+
+    @Override
+    protected StackSlot allocateNewSpillSlot(PlatformKind kind, int additionalOffset) {
+        return StackSlot.get(kind, -spillSize + additionalOffset, true);
+    }
+}
--- /dev/null	Thu Jan 01 00:00:00 1970 +0000
+++ b/graal/com.oracle.graal.lir.ptx/src/com/oracle/graal/lir/ptx/PTXFrameMap.java	Mon Aug 05 11:25:14 2013 +0200
@@ -0,0 +1,72 @@
+/*
+ * Copyright (c) 2013, Oracle and/or its affiliates. All rights reserved.
+ * DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER.
+ *
+ * This code is free software; you can redistribute it and/or modify it
+ * under the terms of the GNU General Public License version 2 only, as
+ * published by the Free Software Foundation.
+ *
+ * This code is distributed in the hope that it will be useful, but WITHOUT
+ * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
+ * FITNESS FOR A PARTICULAR PURPOSE.  See the GNU General Public License
+ * version 2 for more details (a copy is included in the LICENSE file that
+ * accompanied this code).
+ *
+ * You should have received a copy of the GNU General Public License version
+ * 2 along with this work; if not, write to the Free Software Foundation,
+ * Inc., 51 Franklin St, Fifth Floor, Boston, MA 02110-1301 USA.
+ *
+ * Please contact Oracle, 500 Oracle Parkway, Redwood Shores, CA 94065 USA
+ * or visit www.oracle.com if you need additional information or have any
+ * questions.
+ */
+package com.oracle.graal.lir.ptx;
+
+import com.oracle.graal.api.code.*;
+import com.oracle.graal.api.meta.*;
+import com.oracle.graal.lir.*;
+
+/**
+ * PTX specific frame map.
+ * 
+ * This is the format of a PTX stack frame:
+ * 
+ * <pre>
+ * TODO stack frame layout
+ * </pre>
+ */
+public final class PTXFrameMap extends FrameMap {
+
+    public PTXFrameMap(CodeCacheProvider runtime, TargetDescription target, RegisterConfig registerConfig) {
+        super(runtime, target, registerConfig);
+    }
+
+    @Override
+    public int totalFrameSize() {
+        // FIXME return some sane values
+        return frameSize();
+    }
+
+    @Override
+    public int currentFrameSize() {
+        // FIXME return some sane values
+        return alignFrameSize(outgoingSize + spillSize);
+    }
+
+    @Override
+    protected int alignFrameSize(int size) {
+        // FIXME return some sane values
+        int x = size + (target.stackAlignment - 1);
+        return (x / target.stackAlignment) * target.stackAlignment;
+    }
+
+    @Override
+    public int offsetToCalleeSaveArea() {
+        return frameSize() - calleeSaveAreaSize();
+    }
+
+    @Override
+    protected StackSlot allocateNewSpillSlot(PlatformKind kind, int additionalOffset) {
+        return StackSlot.get(kind, -spillSize + additionalOffset, true);
+    }
+}
--- /dev/null	Thu Jan 01 00:00:00 1970 +0000
+++ b/graal/com.oracle.graal.lir.sparc/src/com/oracle/graal/lir/sparc/SPARCFrameMap.java	Mon Aug 05 11:25:14 2013 +0200
@@ -0,0 +1,104 @@
+/*
+ * Copyright (c) 2013, Oracle and/or its affiliates. All rights reserved.
+ * DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER.
+ *
+ * This code is free software; you can redistribute it and/or modify it
+ * under the terms of the GNU General Public License version 2 only, as
+ * published by the Free Software Foundation.
+ *
+ * This code is distributed in the hope that it will be useful, but WITHOUT
+ * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
+ * FITNESS FOR A PARTICULAR PURPOSE.  See the GNU General Public License
+ * version 2 for more details (a copy is included in the LICENSE file that
+ * accompanied this code).
+ *
+ * You should have received a copy of the GNU General Public License version
+ * 2 along with this work; if not, write to the Free Software Foundation,
+ * Inc., 51 Franklin St, Fifth Floor, Boston, MA 02110-1301 USA.
+ *
+ * Please contact Oracle, 500 Oracle Parkway, Redwood Shores, CA 94065 USA
+ * or visit www.oracle.com if you need additional information or have any
+ * questions.
+ */
+package com.oracle.graal.lir.sparc;
+
+import com.oracle.graal.api.code.*;
+import com.oracle.graal.api.meta.*;
+import com.oracle.graal.lir.*;
+
+/**
+ * SPARC specific frame map.
+ * 
+ * This is the format of a SPARC stack frame:
+ * 
+ * <pre>
+ *   Base       Contents
+ * 
+ *            :                                :  -----
+ *   caller   | incoming overflow argument n   |    ^
+ *   frame    :     ...                        :    | positive
+ *            | incoming overflow argument 0   |    | offsets
+ *   ---------+--------------------------------+---------------------------
+ *            | spill slot 0                   |    | negative   ^      ^
+ *            :     ...                        :    v offsets    |      |
+ *            | spill slot n                   |  -----        total    |
+ *            +--------------------------------+               frame    |
+ *   current  | alignment padding              |               size     |
+ *   frame    +--------------------------------+  -----          |      |
+ *            | outgoing overflow argument n   |    ^            |    frame
+ *            :     ...                        :    | positive   |    size
+ *            | outgoing overflow argument 0   |    | offsets    |      |
+ *            +--------------------------------+    |            |      |
+ *            | return address                 |    |            |      |
+ *            +--------------------------------+    |            |      |
+ *            |                                |    |            |      |
+ *            : callee save area               :    |            |      |
+ *            |                                |    |            v      v
+ *    %sp-->  +--------------------------------+---------------------------
+ * 
+ * </pre>
+ * 
+ * The spill slot area also includes stack allocated memory blocks (ALLOCA blocks). The size of such
+ * a block may be greater than the size of a normal spill slot or the word size.
+ * <p>
+ * A runtime can reserve space at the beginning of the overflow argument area. The calling
+ * convention can specify that the first overflow stack argument is not at offset 0, but at a
+ * specified offset. Use {@link CodeCacheProvider#getMinimumOutgoingSize()} to make sure that
+ * call-free methods also have this space reserved. Then the VM can use the memory at offset 0
+ * relative to the stack pointer.
+ */
+public final class SPARCFrameMap extends FrameMap {
+
+    public SPARCFrameMap(CodeCacheProvider runtime, TargetDescription target, RegisterConfig registerConfig) {
+        super(runtime, target, registerConfig);
+        // offset relative to sp + total frame size
+        initialSpillSize = 0;
+        spillSize = initialSpillSize;
+    }
+
+    @Override
+    public int totalFrameSize() {
+        return frameSize();
+    }
+
+    @Override
+    public int currentFrameSize() {
+        return alignFrameSize(calleeSaveAreaSize() + returnAddressSize() + outgoingSize + spillSize);
+    }
+
+    @Override
+    protected int alignFrameSize(int size) {
+        int x = size + (target.stackAlignment - 1);
+        return (x / target.stackAlignment) * target.stackAlignment;
+    }
+
+    @Override
+    public int offsetToCalleeSaveArea() {
+        return 0;
+    }
+
+    @Override
+    protected StackSlot allocateNewSpillSlot(PlatformKind kind, int additionalOffset) {
+        return StackSlot.get(kind, -spillSize + additionalOffset, true);
+    }
+}
--- a/graal/com.oracle.graal.lir/src/com/oracle/graal/lir/FrameMap.java	Thu Aug 01 00:57:27 2013 +0200
+++ b/graal/com.oracle.graal.lir/src/com/oracle/graal/lir/FrameMap.java	Mon Aug 05 11:25:14 2013 +0200
@@ -37,46 +37,8 @@
  * area and the spill are can grow until then. Therefore, outgoing arguments are indexed from the
  * stack pointer, while spill slots are indexed from the beginning of the frame (and the total frame
  * size has to be added to get the actual offset from the stack pointer).
- * <p>
- * This is the format of a stack frame:
- * 
- * <pre>
- *   Base       Contents
- * 
- *            :                                :  -----
- *   caller   | incoming overflow argument n   |    ^
- *   frame    :     ...                        :    | positive
- *            | incoming overflow argument 0   |    | offsets
- *   ---------+--------------------------------+---------------------
- *            | return address                 |    |            ^
- *   current  +--------------------------------+    |            |    -----
- *   frame    |                                |    |            |      ^
- *            : callee save area               :    |            |      |
- *            |                                |    |            |      |
- *            +--------------------------------+    |            |      |
- *            | spill slot 0                   |    | negative   |      |
- *            :     ...                        :    v offsets    |      |
- *            | spill slot n                   |  -----        total  frame
- *            +--------------------------------+               frame  size
- *            | alignment padding              |               size     |
- *            +--------------------------------+  -----          |      |
- *            | outgoing overflow argument n   |    ^            |      |
- *            :     ...                        :    | positive   |      |
- *            | outgoing overflow argument 0   |    | offsets    v      v
- *    %sp-->  +--------------------------------+---------------------------
- * 
- * </pre>
- * 
- * The spill slot area also includes stack allocated memory blocks (ALLOCA blocks). The size of such
- * a block may be greater than the size of a normal spill slot or the word size.
- * <p>
- * A runtime can reserve space at the beginning of the overflow argument area. The calling
- * convention can specify that the first overflow stack argument is not at offset 0, but at a
- * specified offset. Use {@link CodeCacheProvider#getMinimumOutgoingSize()} to make sure that
- * call-free methods also have this space reserved. Then the VM can use the memory at offset 0
- * relative to the stack pointer.
  */
-public final class FrameMap {
+public abstract class FrameMap {
 
     public final CodeCacheProvider runtime;
     public final TargetDescription target;
@@ -90,16 +52,21 @@
     private int frameSize;
 
     /**
+     * Initial size of the area occupied by spill slots and other stack-allocated memory blocks.
+     */
+    protected int initialSpillSize;
+
+    /**
      * Size of the area occupied by spill slots and other stack-allocated memory blocks.
      */
-    private int spillSize;
+    protected int spillSize;
 
     /**
      * Size of the area occupied by outgoing overflow arguments. This value is adjusted as calling
      * conventions for outgoing calls are retrieved. On some platforms, there is a minimum outgoing
      * size even if no overflow arguments are on the stack.
      */
-    private int outgoingSize;
+    protected int outgoingSize;
 
     /**
      * Determines if this frame has values on the stack for outgoing calls.
@@ -125,16 +92,15 @@
         this.target = target;
         this.registerConfig = registerConfig;
         this.frameSize = -1;
-        this.spillSize = returnAddressSize() + calleeSaveAreaSize();
         this.outgoingSize = runtime.getMinimumOutgoingSize();
         this.objectStackBlocks = new ArrayList<>();
     }
 
-    private int returnAddressSize() {
+    protected int returnAddressSize() {
         return target.arch.getReturnAddressSize();
     }
 
-    private int calleeSaveAreaSize() {
+    protected int calleeSaveAreaSize() {
         CalleeSaveLayout csl = registerConfig.getCalleeSaveLayout();
         return csl != null ? csl.size : 0;
     }
@@ -173,17 +139,21 @@
      * 
      * @return The total size of the frame (in bytes).
      */
-    public int totalFrameSize() {
-        return frameSize() + returnAddressSize();
-    }
+    public abstract int totalFrameSize();
 
     /**
      * Gets the current size of this frame. This is the size that would be returned by
      * {@link #frameSize()} if {@link #finish()} were called now.
      */
-    public int currentFrameSize() {
-        return target.alignFrameSize(outgoingSize + spillSize - returnAddressSize());
-    }
+    public abstract int currentFrameSize();
+
+    /**
+     * Aligns the given frame size to the stack alignment size and return the aligned size.
+     * 
+     * @param size the initial frame size to be aligned
+     * @return the aligned frame size
+     */
+    protected abstract int alignFrameSize(int size);
 
     /**
      * Computes the final size of this frame. After this method has been called, methods that change
@@ -200,7 +170,6 @@
             for (StackSlot s : freedSlots) {
                 total += target.arch.getSizeInBytes(s.getKind());
             }
-            int initialSpillSize = returnAddressSize() + calleeSaveAreaSize();
             if (total == spillSize - initialSpillSize) {
                 // reset spill area size
                 spillSize = initialSpillSize;
@@ -238,13 +207,12 @@
     }
 
     /**
-     * Gets the offset to the stack area where callee-saved registers are stored.
+     * Gets the offset from the stack pointer to the stack area where callee-saved registers are
+     * stored.
      * 
      * @return The offset to the callee save area (in bytes).
      */
-    public int offsetToCalleeSaveArea() {
-        return frameSize() - calleeSaveAreaSize();
-    }
+    public abstract int offsetToCalleeSaveArea();
 
     /**
      * Informs the frame map that the compiled code calls a particular method, which may need stack
@@ -267,9 +235,16 @@
         hasOutgoingStackArguments = hasOutgoingStackArguments || argsSize > 0;
     }
 
-    private StackSlot getSlot(PlatformKind kind, int additionalOffset) {
-        return StackSlot.get(kind, -spillSize + additionalOffset, true);
-    }
+    /**
+     * Reserves a new spill slot in the frame of the method being compiled. The returned slot is
+     * aligned on its natural alignment, i.e., an 8-byte spill slot is aligned at an 8-byte
+     * boundary.
+     * 
+     * @param kind The kind of the spill slot to be reserved.
+     * @param additionalOffset
+     * @return A spill slot denoting the reserved memory area.
+     */
+    protected abstract StackSlot allocateNewSpillSlot(PlatformKind kind, int additionalOffset);
 
     /**
      * Reserves a spill slot in the frame of the method being compiled. The returned slot is aligned
@@ -294,7 +269,7 @@
         }
         int size = target.arch.getSizeInBytes(kind);
         spillSize = NumUtil.roundUp(spillSize + size, size);
-        return getSlot(kind, 0);
+        return allocateNewSpillSlot(kind, 0);
     }
 
     private Set<StackSlot> freedSlots;
@@ -329,15 +304,15 @@
 
         if (refs) {
             assert size % target.wordSize == 0;
-            StackSlot result = getSlot(Kind.Object, 0);
+            StackSlot result = allocateNewSpillSlot(Kind.Object, 0);
             objectStackBlocks.add(result);
             for (int i = target.wordSize; i < size; i += target.wordSize) {
-                objectStackBlocks.add(getSlot(Kind.Object, i));
+                objectStackBlocks.add(allocateNewSpillSlot(Kind.Object, i));
             }
             return result;
 
         } else {
-            return getSlot(target.wordKind, 0);
+            return allocateNewSpillSlot(target.wordKind, 0);
         }
     }
 
--- a/graal/com.oracle.graal.loop/src/com/oracle/graal/loop/phases/LoopFullUnrollPhase.java	Thu Aug 01 00:57:27 2013 +0200
+++ b/graal/com.oracle.graal.loop/src/com/oracle/graal/loop/phases/LoopFullUnrollPhase.java	Mon Aug 05 11:25:14 2013 +0200
@@ -28,7 +28,7 @@
 import com.oracle.graal.phases.*;
 import com.oracle.graal.phases.tiers.*;
 
-public class LoopFullUnrollPhase extends BasePhase<HighTierContext> {
+public class LoopFullUnrollPhase extends BasePhase<PhaseContext> {
 
     private static final DebugMetric FULLY_UNROLLED_LOOPS = Debug.metric("FullUnrolls");
     private final boolean canonicalizeReads;
@@ -38,7 +38,7 @@
     }
 
     @Override
-    protected void run(StructuredGraph graph, HighTierContext context) {
+    protected void run(StructuredGraph graph, PhaseContext context) {
         if (graph.hasLoops()) {
             boolean peeled;
             do {
--- a/graal/com.oracle.graal.nodes/src/com/oracle/graal/nodes/FixedGuardNode.java	Thu Aug 01 00:57:27 2013 +0200
+++ b/graal/com.oracle.graal.nodes/src/com/oracle/graal/nodes/FixedGuardNode.java	Mon Aug 05 11:25:14 2013 +0200
@@ -84,16 +84,18 @@
     public void simplify(SimplifierTool tool) {
         if (condition instanceof LogicConstantNode) {
             LogicConstantNode c = (LogicConstantNode) condition;
-            if (c.getValue() != negated) {
-                this.replaceAtUsages(BeginNode.prevBegin(this));
-                graph().removeFixed(this);
-            } else {
-                FixedWithNextNode predecessor = (FixedWithNextNode) predecessor();
+            if (c.getValue() == negated) {
+                FixedNode next = this.next();
+                if (next != null) {
+                    tool.deleteBranch(next);
+                }
+
                 DeoptimizeNode deopt = graph().add(new DeoptimizeNode(DeoptimizationAction.InvalidateRecompile, reason));
                 deopt.setDeoptimizationState(getDeoptimizationState());
-                tool.deleteBranch(this);
-                predecessor.setNext(deopt);
+                setNext(deopt);
             }
+            this.replaceAtUsages(BeginNode.prevBegin(this));
+            graph().removeFixed(this);
         }
     }
 
--- a/graal/com.oracle.graal.nodes/src/com/oracle/graal/nodes/G1ReferentFieldReadBarrier.java	Thu Aug 01 00:57:27 2013 +0200
+++ b/graal/com.oracle.graal.nodes/src/com/oracle/graal/nodes/G1ReferentFieldReadBarrier.java	Mon Aug 05 11:25:14 2013 +0200
@@ -22,7 +22,6 @@
  */
 package com.oracle.graal.nodes;
 
-import com.oracle.graal.api.meta.*;
 import com.oracle.graal.nodes.extended.*;
 
 /**
@@ -31,14 +30,11 @@
  * {@code UnsafeLoadNode}). The return value of the read is passed to the snippet implementing the
  * read barrier and consequently is added to the SATB queue if the concurrent marker is enabled.
  */
-public class G1ReferentFieldReadBarrier extends WriteBarrier implements DeoptimizingNode {
+public class G1ReferentFieldReadBarrier extends WriteBarrier {
 
     @Input private ValueNode expectedObject;
     private final boolean doLoad;
 
-    @Input private FrameState deoptimizationState;
-    private final boolean nullCheck;
-
     public ValueNode getExpectedObject() {
         return expectedObject;
     }
@@ -47,35 +43,9 @@
         return doLoad;
     }
 
-    public boolean getNullCheck() {
-        return nullCheck;
-    }
-
-    public G1ReferentFieldReadBarrier(ValueNode object, ValueNode expectedObject, LocationNode location, boolean doLoad, boolean nullCheck) {
+    public G1ReferentFieldReadBarrier(ValueNode object, ValueNode expectedObject, LocationNode location, boolean doLoad) {
         super(object, location, true);
         this.doLoad = doLoad;
-        this.nullCheck = nullCheck;
         this.expectedObject = expectedObject;
     }
-
-    @Override
-    public boolean canDeoptimize() {
-        return nullCheck;
-    }
-
-    @Override
-    public FrameState getDeoptimizationState() {
-        return deoptimizationState;
-    }
-
-    @Override
-    public void setDeoptimizationState(FrameState state) {
-        updateUsages(deoptimizationState, state);
-        deoptimizationState = state;
-    }
-
-    @Override
-    public DeoptimizationReason getDeoptimizationReason() {
-        return DeoptimizationReason.NullCheckException;
-    }
 }
--- a/graal/com.oracle.graal.nodes/src/com/oracle/graal/nodes/SerialWriteBarrier.java	Thu Aug 01 00:57:27 2013 +0200
+++ b/graal/com.oracle.graal.nodes/src/com/oracle/graal/nodes/SerialWriteBarrier.java	Mon Aug 05 11:25:14 2013 +0200
@@ -26,7 +26,14 @@
 
 public class SerialWriteBarrier extends WriteBarrier {
 
-    public SerialWriteBarrier(ValueNode object, LocationNode location, boolean precise) {
+    @Input private ValueNode value;
+
+    public ValueNode getValue() {
+        return value;
+    }
+
+    public SerialWriteBarrier(ValueNode object, ValueNode value, LocationNode location, boolean precise) {
         super(object, location, precise);
+        this.value = value;
     }
 }
--- a/graal/com.oracle.graal.nodes/src/com/oracle/graal/nodes/TypeProfileProxyNode.java	Thu Aug 01 00:57:27 2013 +0200
+++ b/graal/com.oracle.graal.nodes/src/com/oracle/graal/nodes/TypeProfileProxyNode.java	Mon Aug 05 11:25:14 2013 +0200
@@ -118,13 +118,6 @@
         return this;
     }
 
-    public static void cleanFromGraph(StructuredGraph graph) {
-        for (TypeProfileProxyNode proxy : graph.getNodes(TypeProfileProxyNode.class)) {
-            graph.replaceFloating(proxy, proxy.getObject());
-        }
-        assert graph.getNodes(TypeProfileProxyNode.class).count() == 0;
-    }
-
     @Override
     public ValueNode getOriginalValue() {
         return object;
--- a/graal/com.oracle.graal.nodes/src/com/oracle/graal/nodes/java/CheckCastNode.java	Thu Aug 01 00:57:27 2013 +0200
+++ b/graal/com.oracle.graal.nodes/src/com/oracle/graal/nodes/java/CheckCastNode.java	Mon Aug 05 11:25:14 2013 +0200
@@ -146,7 +146,8 @@
             return object();
         }
 
-        // remove checkcast if next node is a more specific checkcast
+        // if the previous node is also a checkcast, with a less precise and compatible type,
+        // replace both with one checkcast checking the more specific type.
         if (predecessor() instanceof CheckCastNode) {
             CheckCastNode ccn = (CheckCastNode) predecessor();
             if (ccn != null && ccn.type != null && ccn == object && ccn.forStoreCheck == forStoreCheck && ccn.type.isAssignableFrom(type)) {
--- /dev/null	Thu Jan 01 00:00:00 1970 +0000
+++ b/graal/com.oracle.graal.phases.common/src/com/oracle/graal/phases/common/AbstractInliningPhase.java	Mon Aug 05 11:25:14 2013 +0200
@@ -0,0 +1,32 @@
+/*
+ * Copyright (c) 2013, Oracle and/or its affiliates. All rights reserved.
+ * DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER.
+ *
+ * This code is free software; you can redistribute it and/or modify it
+ * under the terms of the GNU General Public License version 2 only, as
+ * published by the Free Software Foundation.
+ *
+ * This code is distributed in the hope that it will be useful, but WITHOUT
+ * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
+ * FITNESS FOR A PARTICULAR PURPOSE.  See the GNU General Public License
+ * version 2 for more details (a copy is included in the LICENSE file that
+ * accompanied this code).
+ *
+ * You should have received a copy of the GNU General Public License version
+ * 2 along with this work; if not, write to the Free Software Foundation,
+ * Inc., 51 Franklin St, Fifth Floor, Boston, MA 02110-1301 USA.
+ *
+ * Please contact Oracle, 500 Oracle Parkway, Redwood Shores, CA 94065 USA
+ * or visit www.oracle.com if you need additional information or have any
+ * questions.
+ */
+package com.oracle.graal.phases.common;
+
+import com.oracle.graal.phases.*;
+import com.oracle.graal.phases.tiers.*;
+
+/**
+ * Common superclass for phases that perform inlining.
+ */
+public abstract class AbstractInliningPhase extends BasePhase<HighTierContext> {
+}
--- /dev/null	Thu Jan 01 00:00:00 1970 +0000
+++ b/graal/com.oracle.graal.phases.common/src/com/oracle/graal/phases/common/CleanTypeProfileProxyPhase.java	Mon Aug 05 11:25:14 2013 +0200
@@ -0,0 +1,37 @@
+/*
+ * Copyright (c) 2013, Oracle and/or its affiliates. All rights reserved.
+ * DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER.
+ *
+ * This code is free software; you can redistribute it and/or modify it
+ * under the terms of the GNU General Public License version 2 only, as
+ * published by the Free Software Foundation.
+ *
+ * This code is distributed in the hope that it will be useful, but WITHOUT
+ * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
+ * FITNESS FOR A PARTICULAR PURPOSE.  See the GNU General Public License
+ * version 2 for more details (a copy is included in the LICENSE file that
+ * accompanied this code).
+ *
+ * You should have received a copy of the GNU General Public License version
+ * 2 along with this work; if not, write to the Free Software Foundation,
+ * Inc., 51 Franklin St, Fifth Floor, Boston, MA 02110-1301 USA.
+ *
+ * Please contact Oracle, 500 Oracle Parkway, Redwood Shores, CA 94065 USA
+ * or visit www.oracle.com if you need additional information or have any
+ * questions.
+ */
+package com.oracle.graal.phases.common;
+
+import com.oracle.graal.nodes.*;
+import com.oracle.graal.phases.*;
+
+public class CleanTypeProfileProxyPhase extends Phase {
+
+    @Override
+    protected void run(StructuredGraph graph) {
+        for (TypeProfileProxyNode proxy : graph.getNodes(TypeProfileProxyNode.class)) {
+            graph.replaceFloating(proxy, proxy.getObject());
+        }
+        assert graph.getNodes(TypeProfileProxyNode.class).count() == 0;
+    }
+}
--- a/graal/com.oracle.graal.phases.common/src/com/oracle/graal/phases/common/InliningPhase.java	Thu Aug 01 00:57:27 2013 +0200
+++ b/graal/com.oracle.graal.phases.common/src/com/oracle/graal/phases/common/InliningPhase.java	Mon Aug 05 11:25:14 2013 +0200
@@ -38,17 +38,17 @@
 import com.oracle.graal.nodes.type.*;
 import com.oracle.graal.nodes.util.*;
 import com.oracle.graal.options.*;
-import com.oracle.graal.phases.*;
 import com.oracle.graal.phases.PhasePlan.PhasePosition;
 import com.oracle.graal.phases.common.CanonicalizerPhase.CustomCanonicalizer;
 import com.oracle.graal.phases.common.InliningUtil.InlineInfo;
 import com.oracle.graal.phases.common.InliningUtil.Inlineable;
+import com.oracle.graal.phases.common.InliningUtil.InlineableGraph;
 import com.oracle.graal.phases.common.InliningUtil.InlineableMacroNode;
-import com.oracle.graal.phases.common.InliningUtil.InlineableGraph;
 import com.oracle.graal.phases.common.InliningUtil.InliningPolicy;
 import com.oracle.graal.phases.graph.*;
+import com.oracle.graal.phases.tiers.*;
 
-public class InliningPhase extends Phase {
+public class InliningPhase extends AbstractInliningPhase {
 
     static class Options {
 
@@ -58,15 +58,9 @@
         // @formatter:on
     }
 
-    private final PhasePlan plan;
-    private final MetaAccessProvider runtime;
-    private final Assumptions compilationAssumptions;
-    private final Replacements replacements;
-    private final GraphCache cache;
     private final InliningPolicy inliningPolicy;
-    private final OptimisticOptimizations optimisticOpts;
+    private final CustomCanonicalizer customCanonicalizer;
 
-    private CustomCanonicalizer customCanonicalizer;
     private int inliningCount;
     private int maxMethodPerInlining = Integer.MAX_VALUE;
 
@@ -76,33 +70,24 @@
     private static final DebugMetric metricInliningStoppedByMaxDesiredSize = Debug.metric("InliningStoppedByMaxDesiredSize");
     private static final DebugMetric metricInliningRuns = Debug.metric("Runs");
 
-    public InliningPhase(MetaAccessProvider runtime, Map<Invoke, Double> hints, Replacements replacements, Assumptions assumptions, GraphCache cache, PhasePlan plan,
-                    OptimisticOptimizations optimisticOpts) {
-        this(runtime, replacements, assumptions, cache, plan, optimisticOpts, hints);
+    public InliningPhase() {
+        this(new GreedyInliningPolicy(null), null);
+    }
+
+    public InliningPhase(CustomCanonicalizer canonicalizer) {
+        this(new GreedyInliningPolicy(null), canonicalizer);
     }
 
-    private InliningPhase(MetaAccessProvider runtime, Replacements replacements, Assumptions assumptions, GraphCache cache, PhasePlan plan, OptimisticOptimizations optimisticOpts,
-                    Map<Invoke, Double> hints) {
-        this.runtime = runtime;
-        this.replacements = replacements;
-        this.compilationAssumptions = assumptions;
-        this.cache = cache;
-        this.plan = plan;
-        this.inliningPolicy = new GreedyInliningPolicy(replacements, hints);
-        this.optimisticOpts = optimisticOpts;
+    public InliningPhase(Map<Invoke, Double> hints) {
+        this(new GreedyInliningPolicy(hints), null);
     }
 
-    public InliningPhase(MetaAccessProvider runtime, Replacements replacements, Assumptions assumptions, GraphCache cache, PhasePlan plan, OptimisticOptimizations optimisticOpts, InliningPolicy policy) {
-        this.runtime = runtime;
-        this.replacements = replacements;
-        this.compilationAssumptions = assumptions;
-        this.cache = cache;
-        this.plan = plan;
-        this.inliningPolicy = policy;
-        this.optimisticOpts = optimisticOpts;
+    public InliningPhase(InliningPolicy policy) {
+        this(policy, null);
     }
 
-    public void setCustomCanonicalizer(CustomCanonicalizer customCanonicalizer) {
+    private InliningPhase(InliningPolicy policy, CustomCanonicalizer customCanonicalizer) {
+        this.inliningPolicy = policy;
         this.customCanonicalizer = customCanonicalizer;
     }
 
@@ -123,19 +108,21 @@
     }
 
     @Override
-    protected void run(final StructuredGraph graph) {
-        final InliningData data = new InliningData(graph, compilationAssumptions);
+    protected void run(final StructuredGraph graph, final HighTierContext context) {
+        final InliningData data = new InliningData(graph, context.getAssumptions());
 
         while (data.hasUnprocessedGraphs()) {
             final MethodInvocation currentInvocation = data.currentInvocation();
             GraphInfo graphInfo = data.currentGraph();
-            if (!currentInvocation.isRoot() && !inliningPolicy.isWorthInlining(currentInvocation.callee(), data.inliningDepth(), currentInvocation.probability(), currentInvocation.relevance(), false)) {
+            if (!currentInvocation.isRoot() &&
+                            !inliningPolicy.isWorthInlining(context.getReplacements(), currentInvocation.callee(), data.inliningDepth(), currentInvocation.probability(),
+                                            currentInvocation.relevance(), false)) {
                 int remainingGraphs = currentInvocation.totalGraphs() - currentInvocation.processedGraphs();
                 assert remainingGraphs > 0;
                 data.popGraphs(remainingGraphs);
                 data.popInvocation();
             } else if (graphInfo.hasRemainingInvokes() && inliningPolicy.continueInlining(graphInfo.graph())) {
-                processNextInvoke(data, graphInfo);
+                processNextInvoke(data, graphInfo, context);
             } else {
                 data.popGraph();
                 if (!currentInvocation.isRoot()) {
@@ -148,7 +135,7 @@
 
                             @Override
                             public void run() {
-                                tryToInline(data.currentGraph(), currentInvocation, parentInvoke, data.inliningDepth() + 1);
+                                tryToInline(data.currentGraph(), currentInvocation, parentInvoke, data.inliningDepth() + 1, context);
                             }
                         });
 
@@ -164,11 +151,11 @@
     /**
      * Process the next invoke and enqueue all its graphs for processing.
      */
-    private void processNextInvoke(InliningData data, GraphInfo graphInfo) {
+    private void processNextInvoke(InliningData data, GraphInfo graphInfo, HighTierContext context) {
         Invoke invoke = graphInfo.popInvoke();
         MethodInvocation callerInvocation = data.currentInvocation();
         Assumptions parentAssumptions = callerInvocation.assumptions();
-        InlineInfo info = InliningUtil.getInlineInfo(data, invoke, maxMethodPerInlining, replacements, parentAssumptions, optimisticOpts);
+        InlineInfo info = InliningUtil.getInlineInfo(data, invoke, maxMethodPerInlining, context.getReplacements(), parentAssumptions, context.getOptimisticOptimizations());
 
         if (info != null) {
             double invokeProbability = graphInfo.invokeProbability(invoke);
@@ -176,7 +163,7 @@
             MethodInvocation calleeInvocation = data.pushInvocation(info, parentAssumptions, invokeProbability, invokeRelevance);
 
             for (int i = 0; i < info.numberOfMethods(); i++) {
-                Inlineable elem = getInlineableElement(info.methodAt(i), info.invoke(), calleeInvocation.assumptions());
+                Inlineable elem = getInlineableElement(info.methodAt(i), info.invoke(), calleeInvocation.assumptions(), context);
                 info.setInlinableElement(i, elem);
                 if (elem instanceof InlineableGraph) {
                     data.pushGraph(((InlineableGraph) elem).getGraph(), invokeProbability * info.probabilityAt(i), invokeRelevance * info.relevanceAt(i));
@@ -188,32 +175,32 @@
         }
     }
 
-    private void tryToInline(GraphInfo callerGraphInfo, MethodInvocation calleeInfo, MethodInvocation parentInvocation, int inliningDepth) {
+    private void tryToInline(GraphInfo callerGraphInfo, MethodInvocation calleeInfo, MethodInvocation parentInvocation, int inliningDepth, HighTierContext context) {
         InlineInfo callee = calleeInfo.callee();
         Assumptions callerAssumptions = parentInvocation.assumptions();
 
-        if (inliningPolicy.isWorthInlining(callee, inliningDepth, calleeInfo.probability(), calleeInfo.relevance(), true)) {
-            doInline(callerGraphInfo, calleeInfo, callerAssumptions);
-        } else if (optimisticOpts.devirtualizeInvokes()) {
-            callee.tryToDevirtualizeInvoke(runtime, callerAssumptions);
+        if (inliningPolicy.isWorthInlining(context.getReplacements(), callee, inliningDepth, calleeInfo.probability(), calleeInfo.relevance(), true)) {
+            doInline(callerGraphInfo, calleeInfo, callerAssumptions, context);
+        } else if (context.getOptimisticOptimizations().devirtualizeInvokes()) {
+            callee.tryToDevirtualizeInvoke(context.getRuntime(), callerAssumptions);
         }
         metricInliningConsidered.increment();
     }
 
-    private void doInline(GraphInfo callerGraphInfo, MethodInvocation calleeInfo, Assumptions callerAssumptions) {
+    private void doInline(GraphInfo callerGraphInfo, MethodInvocation calleeInfo, Assumptions callerAssumptions, HighTierContext context) {
         StructuredGraph callerGraph = callerGraphInfo.graph();
         int markBeforeInlining = callerGraph.getMark();
         InlineInfo callee = calleeInfo.callee();
         try {
             List<Node> invokeUsages = callee.invoke().asNode().usages().snapshot();
-            callee.inline(runtime, callerAssumptions, replacements);
+            callee.inline(context.getRuntime(), callerAssumptions, context.getReplacements());
             callerAssumptions.record(calleeInfo.assumptions());
             metricInliningRuns.increment();
             Debug.dump(callerGraph, "after %s", callee);
 
             if (OptCanonicalizer.getValue()) {
                 int markBeforeCanonicalization = callerGraph.getMark();
-                new CanonicalizerPhase.Instance(runtime, callerAssumptions, !AOTCompilation.getValue(), invokeUsages, markBeforeInlining, customCanonicalizer).apply(callerGraph);
+                new CanonicalizerPhase.Instance(context.getRuntime(), callerAssumptions, !AOTCompilation.getValue(), invokeUsages, markBeforeInlining, customCanonicalizer).apply(callerGraph);
 
                 // process invokes that are possibly created during canonicalization
                 for (Node newNode : callerGraph.getNewNodes(markBeforeCanonicalization)) {
@@ -236,27 +223,27 @@
         }
     }
 
-    private Inlineable getInlineableElement(final ResolvedJavaMethod method, Invoke invoke, Assumptions assumptions) {
-        Class<? extends FixedWithNextNode> macroNodeClass = InliningUtil.getMacroNodeClass(replacements, method);
+    private static Inlineable getInlineableElement(final ResolvedJavaMethod method, Invoke invoke, Assumptions assumptions, HighTierContext context) {
+        Class<? extends FixedWithNextNode> macroNodeClass = InliningUtil.getMacroNodeClass(context.getReplacements(), method);
         if (macroNodeClass != null) {
             return new InlineableMacroNode(macroNodeClass);
         } else {
-            return new InlineableGraph(buildGraph(method, invoke, assumptions));
+            return new InlineableGraph(buildGraph(method, invoke, assumptions, context));
         }
     }
 
-    private StructuredGraph buildGraph(final ResolvedJavaMethod method, final Invoke invoke, final Assumptions assumptions) {
+    private static StructuredGraph buildGraph(final ResolvedJavaMethod method, final Invoke invoke, final Assumptions assumptions, final HighTierContext context) {
         final StructuredGraph newGraph;
         final boolean parseBytecodes;
 
         // TODO (chaeubl): copying the graph is only necessary if it is modified or if it contains
         // any invokes
-        StructuredGraph intrinsicGraph = InliningUtil.getIntrinsicGraph(replacements, method);
+        StructuredGraph intrinsicGraph = InliningUtil.getIntrinsicGraph(context.getReplacements(), method);
         if (intrinsicGraph != null) {
             newGraph = intrinsicGraph.copy();
             parseBytecodes = false;
         } else {
-            StructuredGraph cachedGraph = getCachedGraph(method);
+            StructuredGraph cachedGraph = getCachedGraph(method, context);
             if (cachedGraph != null) {
                 newGraph = cachedGraph.copy();
                 parseBytecodes = false;
@@ -271,7 +258,7 @@
             @Override
             public StructuredGraph call() throws Exception {
                 if (parseBytecodes) {
-                    parseBytecodes(newGraph, assumptions);
+                    parseBytecodes(newGraph, assumptions, context);
                 }
 
                 boolean callerHasMoreInformationAboutArguments = false;
@@ -280,7 +267,7 @@
                     ValueNode arg = args.get(localNode.index());
                     if (arg.isConstant()) {
                         Constant constant = arg.asConstant();
-                        newGraph.replaceFloating(localNode, ConstantNode.forConstant(constant, runtime, newGraph));
+                        newGraph.replaceFloating(localNode, ConstantNode.forConstant(constant, context.getRuntime(), newGraph));
                         callerHasMoreInformationAboutArguments = true;
                     } else {
                         Stamp joinedStamp = localNode.stamp().join(arg.stamp());
@@ -298,7 +285,7 @@
                 }
 
                 if (OptCanonicalizer.getValue()) {
-                    new CanonicalizerPhase.Instance(runtime, assumptions, !AOTCompilation.getValue()).apply(newGraph);
+                    new CanonicalizerPhase.Instance(context.getRuntime(), assumptions, !AOTCompilation.getValue()).apply(newGraph);
                 }
 
                 return newGraph;
@@ -306,9 +293,9 @@
         });
     }
 
-    private StructuredGraph getCachedGraph(ResolvedJavaMethod method) {
-        if (CacheGraphs.getValue() && cache != null) {
-            StructuredGraph cachedGraph = cache.get(method);
+    private static StructuredGraph getCachedGraph(ResolvedJavaMethod method, HighTierContext context) {
+        if (CacheGraphs.getValue() && context.getGraphCache() != null) {
+            StructuredGraph cachedGraph = context.getGraphCache().get(method);
             if (cachedGraph != null) {
                 return cachedGraph;
             }
@@ -316,22 +303,22 @@
         return null;
     }
 
-    private StructuredGraph parseBytecodes(StructuredGraph newGraph, Assumptions assumptions) {
+    private static StructuredGraph parseBytecodes(StructuredGraph newGraph, Assumptions assumptions, HighTierContext context) {
         boolean hasMatureProfilingInfo = newGraph.method().getProfilingInfo().isMature();
 
-        if (plan != null) {
-            plan.runPhases(PhasePosition.AFTER_PARSING, newGraph);
+        if (context.getPhasePlan() != null) {
+            context.getPhasePlan().runPhases(PhasePosition.AFTER_PARSING, newGraph);
         }
         assert newGraph.start().next() != null : "graph needs to be populated during PhasePosition.AFTER_PARSING";
 
         new DeadCodeEliminationPhase().apply(newGraph);
 
         if (OptCanonicalizer.getValue()) {
-            new CanonicalizerPhase.Instance(runtime, assumptions, !AOTCompilation.getValue()).apply(newGraph);
+            new CanonicalizerPhase.Instance(context.getRuntime(), assumptions, !AOTCompilation.getValue()).apply(newGraph);
         }
 
-        if (CacheGraphs.getValue() && cache != null) {
-            cache.put(newGraph.copy(), hasMatureProfilingInfo);
+        if (CacheGraphs.getValue() && context.getGraphCache() != null) {
+            context.getGraphCache().put(newGraph.copy(), hasMatureProfilingInfo);
         }
         return newGraph;
     }
@@ -347,11 +334,9 @@
 
     private abstract static class AbstractInliningPolicy implements InliningPolicy {
 
-        protected final Replacements replacements;
         protected final Map<Invoke, Double> hints;
 
-        public AbstractInliningPolicy(Replacements replacements, Map<Invoke, Double> hints) {
-            this.replacements = replacements;
+        public AbstractInliningPolicy(Map<Invoke, Double> hints) {
             this.hints = hints;
         }
 
@@ -367,15 +352,15 @@
             return 1;
         }
 
-        protected boolean isIntrinsic(InlineInfo info) {
+        protected boolean isIntrinsic(Replacements replacements, InlineInfo info) {
             if (AlwaysInlineIntrinsics.getValue()) {
-                return onlyIntrinsics(info);
+                return onlyIntrinsics(replacements, info);
             } else {
-                return onlyForcedIntrinsics(info);
+                return onlyForcedIntrinsics(replacements, info);
             }
         }
 
-        private boolean onlyIntrinsics(InlineInfo info) {
+        private static boolean onlyIntrinsics(Replacements replacements, InlineInfo info) {
             for (int i = 0; i < info.numberOfMethods(); i++) {
                 if (!InliningUtil.canIntrinsify(replacements, info.methodAt(i))) {
                     return false;
@@ -384,7 +369,7 @@
             return true;
         }
 
-        private boolean onlyForcedIntrinsics(InlineInfo info) {
+        private static boolean onlyForcedIntrinsics(Replacements replacements, InlineInfo info) {
             for (int i = 0; i < info.numberOfMethods(); i++) {
                 if (!InliningUtil.canIntrinsify(replacements, info.methodAt(i))) {
                     return false;
@@ -433,8 +418,8 @@
 
     private static final class GreedyInliningPolicy extends AbstractInliningPolicy {
 
-        public GreedyInliningPolicy(Replacements replacements, Map<Invoke, Double> hints) {
-            super(replacements, hints);
+        public GreedyInliningPolicy(Map<Invoke, Double> hints) {
+            super(hints);
         }
 
         public boolean continueInlining(StructuredGraph currentGraph) {
@@ -447,12 +432,12 @@
         }
 
         @Override
-        public boolean isWorthInlining(InlineInfo info, int inliningDepth, double probability, double relevance, boolean fullyProcessed) {
+        public boolean isWorthInlining(Replacements replacements, InlineInfo info, int inliningDepth, double probability, double relevance, boolean fullyProcessed) {
             if (InlineEverything.getValue()) {
                 return InliningUtil.logInlinedMethod(info, inliningDepth, fullyProcessed, "inline everything");
             }
 
-            if (isIntrinsic(info)) {
+            if (isIntrinsic(replacements, info)) {
                 return InliningUtil.logInlinedMethod(info, inliningDepth, fullyProcessed, "intrinsic");
             }
 
@@ -501,7 +486,7 @@
             return true;
         }
 
-        public boolean isWorthInlining(InlineInfo info, int inliningDepth, double probability, double relevance, boolean fullyProcessed) {
+        public boolean isWorthInlining(Replacements replacements, InlineInfo info, int inliningDepth, double probability, double relevance, boolean fullyProcessed) {
             return true;
         }
     }
--- a/graal/com.oracle.graal.phases.common/src/com/oracle/graal/phases/common/InliningUtil.java	Thu Aug 01 00:57:27 2013 +0200
+++ b/graal/com.oracle.graal.phases.common/src/com/oracle/graal/phases/common/InliningUtil.java	Mon Aug 05 11:25:14 2013 +0200
@@ -66,7 +66,7 @@
 
         boolean continueInlining(StructuredGraph graph);
 
-        boolean isWorthInlining(InlineInfo info, int inliningDepth, double probability, double relevance, boolean fullyProcessed);
+        boolean isWorthInlining(Replacements replacements, InlineInfo info, int inliningDepth, double probability, double relevance, boolean fullyProcessed);
     }
 
     public interface Inlineable {
@@ -708,7 +708,7 @@
                 if (opportunities > 0) {
                     metricInliningTailDuplication.increment();
                     Debug.log("MultiTypeGuardInlineInfo starting tail duplication (%d opportunities)", opportunities);
-                    TailDuplicationPhase.tailDuplicate(returnMerge, TailDuplicationPhase.TRUE_DECISION, replacementNodes, new HighTierContext(runtime, assumptions, replacements));
+                    TailDuplicationPhase.tailDuplicate(returnMerge, TailDuplicationPhase.TRUE_DECISION, replacementNodes, new PhaseContext(runtime, assumptions, replacements));
                 }
             }
         }
--- a/graal/com.oracle.graal.phases/src/com/oracle/graal/phases/GraalOptions.java	Thu Aug 01 00:57:27 2013 +0200
+++ b/graal/com.oracle.graal.phases/src/com/oracle/graal/phases/GraalOptions.java	Mon Aug 05 11:25:14 2013 +0200
@@ -230,8 +230,6 @@
     public static final OptionValue<Boolean> ResolveClassBeforeStaticInvoke = new OptionValue<>(false);
     @Option(help = "")
     public static final OptionValue<Boolean> CanOmitFrame = new OptionValue<>(true);
-    @Option(help = "")
-    public static final OptionValue<Integer> SafepointPollOffset = new OptionValue<>(256);
 
     @Option(help = "")
     public static final OptionValue<Boolean> MemoryAwareScheduling = new OptionValue<>(true);
--- a/graal/com.oracle.graal.phases/src/com/oracle/graal/phases/PhasePlan.java	Thu Aug 01 00:57:27 2013 +0200
+++ b/graal/com.oracle.graal.phases/src/com/oracle/graal/phases/PhasePlan.java	Mon Aug 05 11:25:14 2013 +0200
@@ -49,13 +49,11 @@
      * A compiler extension phase can chose to run at the end of periods 1-3.
      */
     public static enum PhasePosition {
-        AFTER_PARSING,
-        HIGH_LEVEL
+        AFTER_PARSING
     }
     // @formatter:on
 
     @SuppressWarnings("unchecked") private final ArrayList<Phase>[] phases = new ArrayList[PhasePosition.values().length];
-    private final Set<Class<? extends Phase>> disabledPhases = new HashSet<>();
 
     public void addPhase(PhasePosition pos, Phase phase) {
         if (phases[pos.ordinal()] == null) {
@@ -71,12 +69,4 @@
             }
         }
     }
-
-    public void disablePhase(Class<? extends Phase> clazz) {
-        disabledPhases.add(clazz);
-    }
-
-    public boolean isPhaseDisabled(Class<? extends Phase> clazz) {
-        return disabledPhases.contains(clazz);
-    }
 }
--- a/graal/com.oracle.graal.phases/src/com/oracle/graal/phases/VerifyPhase.java	Thu Aug 01 00:57:27 2013 +0200
+++ b/graal/com.oracle.graal.phases/src/com/oracle/graal/phases/VerifyPhase.java	Mon Aug 05 11:25:14 2013 +0200
@@ -26,16 +26,16 @@
 
 /***
  * This phase serves as a verification, in order to check the graph for certain properties. The
- * {@link #verify(StructuredGraph)} method will be used as an assertion, and implements the actual
- * check. Instead of returning false, it is also valid to throw an {@link AssertionError} in the
- * implemented {@link #verify(StructuredGraph)} method.
+ * {@link #verify(StructuredGraph, Object)} method will be used as an assertion, and implements the
+ * actual check. Instead of returning false, it is also valid to throw an {@link AssertionError} in
+ * the implemented {@link #verify(StructuredGraph, Object)} method.
  */
-public abstract class VerifyPhase extends Phase {
+public abstract class VerifyPhase<C> extends BasePhase<C> {
 
     @Override
-    protected final void run(StructuredGraph graph) {
-        assert verify(graph);
+    protected final void run(StructuredGraph graph, C context) {
+        assert verify(graph, context);
     }
 
-    protected abstract boolean verify(StructuredGraph graph);
+    protected abstract boolean verify(StructuredGraph graph, C context);
 }
--- a/graal/com.oracle.graal.phases/src/com/oracle/graal/phases/schedule/SchedulePhase.java	Thu Aug 01 00:57:27 2013 +0200
+++ b/graal/com.oracle.graal.phases/src/com/oracle/graal/phases/schedule/SchedulePhase.java	Mon Aug 05 11:25:14 2013 +0200
@@ -202,10 +202,17 @@
 
         @Override
         protected Map<LocationIdentity, Node> merge(Block merge, List<Map<LocationIdentity, Node>> states) {
+            return merge(merge, states, false);
+        }
+
+        protected Map<LocationIdentity, Node> merge(Block merge, List<Map<LocationIdentity, Node>> states, boolean loopbegin) {
             assert merge.getBeginNode() instanceof MergeNode;
             MergeNode mergeNode = (MergeNode) merge.getBeginNode();
 
-            Map<LocationIdentity, Node> initKillMap = cloneState(getBlockToKillMap().get(merge));
+            Map<LocationIdentity, Node> initKillMap = new HashMap<>();
+            if (loopbegin) {
+                initKillMap.putAll(getBlockToKillMap().get(merge));
+            }
             for (Map<LocationIdentity, Node> state : states) {
                 for (LocationIdentity locid : state.keySet()) {
                     if (initKillMap.containsKey(locid)) {
@@ -219,6 +226,9 @@
             }
 
             getMergeToKillMap().set(mergeNode, cloneState(initKillMap));
+            if (!loopbegin) {
+                initKillMap.putAll(getBlockToKillMap().get(merge));
+            }
             return initKillMap;
         }
 
@@ -232,7 +242,7 @@
             LoopInfo<Map<LocationIdentity, Node>> info = ReentrantBlockIterator.processLoop(this, loop, new HashMap<>(state));
 
             assert loop.header.getBeginNode() instanceof LoopBeginNode;
-            Map<LocationIdentity, Node> headerState = merge(loop.header, info.endStates);
+            Map<LocationIdentity, Node> headerState = merge(loop.header, info.endStates, true);
             getBlockToKillMap().put(loop.header, headerState);
 
             for (Map<LocationIdentity, Node> exitState : info.exitStates) {
@@ -258,7 +268,6 @@
     private final Map<FixedNode, List<FloatingNode>> phantomInputs = new IdentityHashMap<>();
     private final SchedulingStrategy selectedStrategy;
     private final MemoryScheduling memsched;
-    private final boolean dumpSchedule;
 
     public SchedulePhase() {
         this(OptScheduleOutOfLoops.getValue() ? SchedulingStrategy.LATEST_OUT_OF_LOOPS : SchedulingStrategy.LATEST);
@@ -276,13 +285,11 @@
             this.memsched = MemoryScheduling.NONE;
         }
         this.selectedStrategy = strategy;
-        this.dumpSchedule = false;
     }
 
-    public SchedulePhase(SchedulingStrategy strategy, MemoryScheduling memsched, boolean dumpSchedule) {
+    public SchedulePhase(SchedulingStrategy strategy, MemoryScheduling memsched) {
         this.selectedStrategy = strategy;
         this.memsched = memsched;
-        this.dumpSchedule = dumpSchedule;
     }
 
     @Override
@@ -347,52 +354,52 @@
     }
 
     private void printSchedule(String desc) {
-        if (dumpSchedule) {
-            TTY.print("=== %s / %s / %s (%s) ===\n", getCFG().getStartBlock().getBeginNode().graph(), selectedStrategy, memsched, desc);
-            for (Block b : getCFG().getBlocks()) {
-                TTY.print("==== b: %s. ", b);
-                TTY.print("dom: %s. ", b.getDominator());
-                TTY.print("post-dom: %s. ", b.getPostdominator());
-                TTY.print("preds: %s. ", b.getPredecessors());
-                TTY.print("succs: %s ====\n", b.getSuccessors());
-                BlockMap<Map<LocationIdentity, Node>> killMaps = getBlockToKillMap();
-                if (killMaps != null) {
-                    TTY.print("X block kills: \n");
-                    for (LocationIdentity locId : killMaps.get(b).keySet()) {
-                        TTY.print("X %s killed by %s\n", locId, killMaps.get(b).get(locId));
-                    }
-                }
-
-                if (getBlockToNodesMap().get(b) != null) {
-                    for (Node n : nodesFor(b)) {
-                        printNode(n);
-                    }
-                } else {
-                    for (Node n : b.getNodes()) {
-                        printNode(n);
-                    }
+        Debug.printf("=== %s / %s / %s (%s) ===\n", getCFG().getStartBlock().getBeginNode().graph(), selectedStrategy, memsched, desc);
+        for (Block b : getCFG().getBlocks()) {
+            Debug.printf("==== b: %s. ", b);
+            Debug.printf("dom: %s. ", b.getDominator());
+            Debug.printf("post-dom: %s. ", b.getPostdominator());
+            Debug.printf("preds: %s. ", b.getPredecessors());
+            Debug.printf("succs: %s ====\n", b.getSuccessors());
+            BlockMap<Map<LocationIdentity, Node>> killMaps = getBlockToKillMap();
+            if (killMaps != null) {
+                Debug.printf("X block kills: \n");
+                for (LocationIdentity locId : killMaps.get(b).keySet()) {
+                    Debug.printf("X %s killed by %s\n", locId, killMaps.get(b).get(locId));
                 }
             }
-            TTY.print("\n\n");
+
+            if (getBlockToNodesMap().get(b) != null) {
+                for (Node n : nodesFor(b)) {
+                    printNode(n);
+                }
+            } else {
+                for (Node n : b.getNodes()) {
+                    printNode(n);
+                }
+            }
         }
+        Debug.printf("\n\n");
     }
 
     private static void printNode(Node n) {
-        TTY.print("%s", n);
+        Debug.printf("%s", n);
         if (n instanceof MemoryCheckpoint.Single) {
-            TTY.print(" // kills %s", ((MemoryCheckpoint.Single) n).getLocationIdentity());
+            Debug.printf(" // kills %s", ((MemoryCheckpoint.Single) n).getLocationIdentity());
         } else if (n instanceof MemoryCheckpoint.Multi) {
-            TTY.print(" // kills ");
+            Debug.printf(" // kills ");
             for (LocationIdentity locid : ((MemoryCheckpoint.Multi) n).getLocationIdentities()) {
-                TTY.print("%s, ", locid);
+                Debug.printf("%s, ", locid);
             }
         } else if (n instanceof FloatingReadNode) {
             FloatingReadNode frn = (FloatingReadNode) n;
-            TTY.print(" // from %s", frn.location().getLocationIdentity());
-            TTY.print(", lastAccess: %s", frn.lastLocationAccess());
-            TTY.print(", object: %s", frn.object());
+            Debug.printf(" // from %s", frn.location().getLocationIdentity());
+            Debug.printf(", lastAccess: %s", frn.lastLocationAccess());
+            Debug.printf(", object: %s", frn.object());
+        } else if (n instanceof GuardNode) {
+            Debug.printf(", guard: %s", ((GuardNode) n).getGuard());
         }
-        TTY.print("\n");
+        Debug.printf("\n");
     }
 
     public ControlFlowGraph getCFG() {
@@ -464,39 +471,60 @@
                 break;
             case LATEST:
             case LATEST_OUT_OF_LOOPS:
-                if (memsched == MemoryScheduling.OPTIMAL && node instanceof FloatingReadNode) {
+                if (memsched == MemoryScheduling.OPTIMAL && node instanceof FloatingReadNode && ((FloatingReadNode) node).location().getLocationIdentity() != FINAL_LOCATION) {
                     block = optimalBlock((FloatingReadNode) node, strategy);
                 } else {
                     block = latestBlock(node, strategy);
-                }
-                if (block == null) {
-                    block = earliestBlock(node);
-                } else if (strategy == SchedulingStrategy.LATEST_OUT_OF_LOOPS && !(node instanceof VirtualObjectNode)) {
-                    // schedule at the latest position possible in the outermost loop possible
-                    Block earliestBlock = earliestBlock(node);
-                    Block before = block;
-                    block = scheduleOutOfLoops(node, block, earliestBlock);
-                    if (!earliestBlock.dominates(block)) {
-                        throw new SchedulingError("%s: Graph cannot be scheduled : inconsistent for %s, %d usages, (%s needs to dominate %s (before %s))", node.graph(), node, node.usages().count(),
-                                        earliestBlock, block, before);
+                    if (block == null) {
+                        block = earliestBlock(node);
+                    } else if (strategy == SchedulingStrategy.LATEST_OUT_OF_LOOPS && !(node instanceof VirtualObjectNode)) {
+                        // schedule at the latest position possible in the outermost loop possible
+                        Block earliestBlock = earliestBlock(node);
+                        Block before = block;
+                        block = scheduleOutOfLoops(node, block, earliestBlock);
+                        if (!earliestBlock.dominates(block)) {
+                            throw new SchedulingError("%s: Graph cannot be scheduled : inconsistent for %s, %d usages, (%s needs to dominate %s (before %s))", node.graph(), node,
+                                            node.usages().count(), earliestBlock, block, before);
+                        }
                     }
                 }
                 break;
             default:
                 throw new GraalInternalError("unknown scheduling strategy");
         }
+        assert earliestBlock(node).dominates(block) : "node " + node + " in block " + block + " is not dominated by earliest " + earliestBlock(node);
         cfg.getNodeToBlock().set(node, block);
         blockToNodesMap.get(block).add(node);
     }
 
-    // assign floating read to block according to kill maps
+    /**
+     * this method tries to find the latest position for a read, by taking the information gathered
+     * by {@link NewMemoryScheduleClosure} into account.
+     * 
+     * The idea is to iterate the dominator tree starting with the latest schedule of the read.
+     * 
+     * <pre>
+     *    U      upperbound block, defined by last access location of the floating read
+     *    &#9650;
+     *    E      earliest block
+     *    &#9650;
+     *    O      optimal block, first block that contains a kill of the read's location
+     *    &#9650;
+     *    L      latest block
+     * </pre>
+     * 
+     * i.e. <code>upperbound `dom` earliest `dom` optimal `dom` latest</code>. However, there're
+     * cases where <code>earliest `dom` optimal</code> is not true, because the position is
+     * (impliclitly) bounded by an anchor of the read's guard. In such cases, the earliest schedule
+     * is taken.
+     * 
+     */
     private Block optimalBlock(FloatingReadNode n, SchedulingStrategy strategy) {
         assert memsched == MemoryScheduling.OPTIMAL;
 
         LocationIdentity locid = n.location().getLocationIdentity();
-        if (locid == FINAL_LOCATION) {
-            return latestBlock(n, strategy);
-        }
+        assert locid != FINAL_LOCATION;
+
         Node upperBound = n.lastLocationAccess();
         Block upperBoundBlock = forKillLocation(upperBound);
         Block earliestBlock = earliestBlock(n);
@@ -506,35 +534,68 @@
         assert currentBlock != null && earliestBlock.dominates(currentBlock) : "earliest (" + earliestBlock + ") should dominate latest block (" + currentBlock + ")";
         Block previousBlock = currentBlock;
 
+        Debug.printf("processing %s (accessing %s): latest %s, earliest %s, upper bound %s (%s)\n", n, locid, currentBlock, earliestBlock, upperBoundBlock, upperBound);
+
         int iterations = 0;
-        // iterate the dominator tree until the last kill matches or reaching a upper bound
+        // iterate the dominator tree
         while (true) {
             iterations++;
+            assert earliestBlock.dominates(previousBlock) : "iterations: " + iterations;
             Node lastKill = blockToKillMap.get(currentBlock).get(locid);
+            boolean isAtEarliest = earliestBlock == previousBlock && previousBlock != currentBlock;
 
             if (lastKill.equals(upperBound)) {
                 // assign node to the block which kills the location
 
-                if (previousBlock.getBeginNode() instanceof MergeNode) {
-                    // merges kill locations right at the beginning of a block
+                boolean outOfLoop = false;
+
+                // schedule read out of the loop if possible, in terms of killMaps and earliest
+                // schedule
+                if (currentBlock != earliestBlock && previousBlock != earliestBlock) {
+                    assert earliestBlock.dominates(currentBlock);
+                    Block t = currentBlock;
+                    while (t.getLoop() != null && t.getDominator() != null && earliestBlock.dominates(t)) {
+                        Block dom = t.getDominator();
+                        if (dom.getLoopDepth() < currentBlock.getLoopDepth() && blockToKillMap.get(dom).get(locid) == upperBound && earliestBlock.dominates(dom)) {
+                            printIterations(iterations, "moved out of loop, from " + currentBlock + " to " + dom);
+                            previousBlock = currentBlock = dom;
+                            outOfLoop = true;
+                        }
+                        t = dom;
+                    }
+                }
+
+                if (!outOfLoop && previousBlock.getBeginNode() instanceof MergeNode) {
+                    // merges kill locations right at the beginning of a block. if a merge is the
+                    // killing node, we assign it to the dominating node.
+
                     MergeNode merge = (MergeNode) previousBlock.getBeginNode();
-                    if (getMergeToKillMap().get(merge).containsKey(locid)) {
-                        printIterations(iterations, "kill by merge");
+                    Node killer = getMergeToKillMap().get(merge).get(locid);
+
+                    if (killer != null && killer == merge) {
+                        // check if we violate earliest schedule condition
+                        if (isAtEarliest) {
+                            printIterations(iterations, "earliest bound in merge: " + earliestBlock);
+                            return earliestBlock;
+                        }
+                        printIterations(iterations, "kill by merge: " + currentBlock);
                         return currentBlock;
                     }
                 }
 
-                printIterations(iterations, "regular kill");
+                // current block matches last access, that means the previous (dominated) block
+                // kills the location, therefore schedule read to previous block.
+                printIterations(iterations, "regular kill: " + previousBlock);
                 return previousBlock;
             }
 
-            if (upperBoundBlock == currentBlock) { // reached upper bound
-                printIterations(iterations, "upper bound");
-                return currentBlock;
+            if (isAtEarliest) {
+                printIterations(iterations, "earliest bound: " + earliestBlock);
+                return earliestBlock;
             }
 
-            if (earliestBlock == currentBlock) { // reached earliest block
-                printIterations(iterations, "earliest block");
+            if (upperBoundBlock == currentBlock) {
+                printIterations(iterations, "upper bound: " + currentBlock + ", previous: " + previousBlock);
                 return currentBlock;
             }
 
@@ -544,10 +605,8 @@
         }
     }
 
-    private void printIterations(int iterations, String desc) {
-        if (dumpSchedule) {
-            TTY.print("\niterations: %d,  %s\n\n", iterations, desc);
-        }
+    private static void printIterations(int iterations, String desc) {
+        Debug.printf("iterations: %d,  %s\n", iterations, desc);
     }
 
     /**
@@ -578,7 +637,7 @@
             }
         }
 
-        assert cdbc.block == null || earliestBlock(node).dominates(cdbc.block) : "failed to find correct latest schedule for " + node;
+        assert cdbc.block == null || earliestBlock(node).dominates(cdbc.block) : "failed to find correct latest schedule for " + node + ". cdbc: " + cdbc.block + ", earliest: " + earliestBlock(node);
         return cdbc.block;
     }
 
@@ -750,8 +809,9 @@
 
     private void sortNodesWithinBlocks(StructuredGraph graph, SchedulingStrategy strategy) {
         NodeBitMap visited = graph.createNodeBitMap();
+        NodeBitMap beforeLastLocation = graph.createNodeBitMap();
         for (Block b : cfg.getBlocks()) {
-            sortNodesWithinBlock(b, visited, strategy);
+            sortNodesWithinBlock(b, visited, beforeLastLocation, strategy);
             assert noDuplicatedNodesInBlock(b) : "duplicated nodes in " + b;
         }
     }
@@ -762,7 +822,7 @@
         return list.size() == hashset.size();
     }
 
-    private void sortNodesWithinBlock(Block b, NodeBitMap visited, SchedulingStrategy strategy) {
+    private void sortNodesWithinBlock(Block b, NodeBitMap visited, NodeBitMap beforeLastLocation, SchedulingStrategy strategy) {
         if (visited.isMarked(b.getBeginNode()) || cfg.blockFor(b.getBeginNode()) != b) {
             throw new SchedulingError();
         }
@@ -777,15 +837,37 @@
                 break;
             case LATEST:
             case LATEST_OUT_OF_LOOPS:
-                sortedInstructions = sortNodesWithinBlockLatest(b, visited);
+                sortedInstructions = sortNodesWithinBlockLatest(b, visited, beforeLastLocation);
                 break;
             default:
                 throw new GraalInternalError("unknown scheduling strategy");
         }
+        assert filterSchedulableNodes(blockToNodesMap.get(b)).size() == removeProxies(sortedInstructions).size() : "sorted block does not contain the same amount of nodes: " +
+                        filterSchedulableNodes(blockToNodesMap.get(b)) + " vs. " + removeProxies(sortedInstructions);
         assert sameOrderForFixedNodes(blockToNodesMap.get(b), sortedInstructions) : "fixed nodes in sorted block are not in the same order";
         blockToNodesMap.put(b, sortedInstructions);
     }
 
+    private static List<ScheduledNode> removeProxies(List<ScheduledNode> list) {
+        List<ScheduledNode> result = new ArrayList<>();
+        for (ScheduledNode n : list) {
+            if (!(n instanceof ProxyNode)) {
+                result.add(n);
+            }
+        }
+        return result;
+    }
+
+    private static List<ScheduledNode> filterSchedulableNodes(List<ScheduledNode> list) {
+        List<ScheduledNode> result = new ArrayList<>();
+        for (ScheduledNode n : list) {
+            if (!(n instanceof LocalNode) && !(n instanceof PhiNode)) {
+                result.add(n);
+            }
+        }
+        return result;
+    }
+
     private static boolean sameOrderForFixedNodes(List<ScheduledNode> fixed, List<ScheduledNode> sorted) {
         Iterator<ScheduledNode> fixedIterator = fixed.iterator();
         Iterator<ScheduledNode> sortedIterator = sorted.iterator();
@@ -813,12 +895,10 @@
      * all inputs. This means that a node is added to the list after all its inputs have been
      * processed.
      */
-    private List<ScheduledNode> sortNodesWithinBlockLatest(Block b, NodeBitMap visited) {
+    private List<ScheduledNode> sortNodesWithinBlockLatest(Block b, NodeBitMap visited, NodeBitMap beforeLastLocation) {
         List<ScheduledNode> instructions = blockToNodesMap.get(b);
         List<ScheduledNode> sortedInstructions = new ArrayList<>(blockToNodesMap.get(b).size() + 2);
         List<FloatingReadNode> reads = new ArrayList<>();
-        // TODO: need bitmap for just within a block
-        NodeBitMap beforeLastLocation = cfg.graph.createNodeBitMap();
 
         if (memsched == MemoryScheduling.OPTIMAL) {
             /*
--- a/graal/com.oracle.graal.phases/src/com/oracle/graal/phases/tiers/HighTierContext.java	Thu Aug 01 00:57:27 2013 +0200
+++ b/graal/com.oracle.graal.phases/src/com/oracle/graal/phases/tiers/HighTierContext.java	Mon Aug 05 11:25:14 2013 +0200
@@ -25,10 +25,31 @@
 import com.oracle.graal.api.code.*;
 import com.oracle.graal.api.meta.*;
 import com.oracle.graal.nodes.spi.*;
+import com.oracle.graal.phases.*;
 
 public class HighTierContext extends PhaseContext {
 
-    public HighTierContext(MetaAccessProvider runtime, Assumptions assumptions, Replacements replacements) {
+    private final PhasePlan plan;
+
+    private final GraphCache cache;
+    private final OptimisticOptimizations optimisticOpts;
+
+    public HighTierContext(MetaAccessProvider runtime, Assumptions assumptions, Replacements replacements, GraphCache cache, PhasePlan plan, OptimisticOptimizations optimisticOpts) {
         super(runtime, assumptions, replacements);
+        this.plan = plan;
+        this.cache = cache;
+        this.optimisticOpts = optimisticOpts;
+    }
+
+    public PhasePlan getPhasePlan() {
+        return plan;
+    }
+
+    public GraphCache getGraphCache() {
+        return cache;
+    }
+
+    public OptimisticOptimizations getOptimisticOptimizations() {
+        return optimisticOpts;
     }
 }
--- a/graal/com.oracle.graal.phases/src/com/oracle/graal/phases/verify/VerifyUsageWithEquals.java	Thu Aug 01 00:57:27 2013 +0200
+++ b/graal/com.oracle.graal.phases/src/com/oracle/graal/phases/verify/VerifyUsageWithEquals.java	Mon Aug 05 11:25:14 2013 +0200
@@ -27,23 +27,22 @@
 import com.oracle.graal.nodes.calc.*;
 import com.oracle.graal.nodes.type.*;
 import com.oracle.graal.phases.*;
+import com.oracle.graal.phases.tiers.*;
 
 /**
  * For certain types object identity should not be used for object equality check. This phase checks
  * the correct usage of the given type. Equality checks with == or != (except null checks) results
  * in an {@link AssertionError}.
  */
-public class VerifyUsageWithEquals extends VerifyPhase {
+public class VerifyUsageWithEquals extends VerifyPhase<PhaseContext> {
 
-    private MetaAccessProvider runtime;
-    private Class<?> klass;
+    private final Class<?> klass;
 
-    public VerifyUsageWithEquals(MetaAccessProvider runtime, Class<?> klass) {
-        this.runtime = runtime;
+    public VerifyUsageWithEquals(Class<?> klass) {
         this.klass = klass;
     }
 
-    private boolean isAssignableType(ValueNode node) {
+    private boolean isAssignableType(ValueNode node, MetaAccessProvider runtime) {
         if (node.stamp() instanceof ObjectStamp) {
             ResolvedJavaType valueType = runtime.lookupJavaType(klass);
             ResolvedJavaType nodeType = node.objectStamp().type();
@@ -59,8 +58,8 @@
         return node.isConstant() && node.asConstant().isNull();
     }
 
-    private boolean checkUsage(ValueNode x, ValueNode y) {
-        return isAssignableType(x) && !isNullConstant(y);
+    private boolean checkUsage(ValueNode x, ValueNode y, MetaAccessProvider runtime) {
+        return isAssignableType(x, runtime) && !isNullConstant(y);
     }
 
     private static boolean isEqualsMethod(StructuredGraph graph) {
@@ -69,12 +68,12 @@
     }
 
     @Override
-    protected boolean verify(StructuredGraph graph) {
+    protected boolean verify(StructuredGraph graph, PhaseContext context) {
         for (ObjectEqualsNode cn : graph.getNodes().filter(ObjectEqualsNode.class)) {
             if (!isEqualsMethod(graph)) {
                 // bail out if we compare an object of type klass with == or != (except null checks)
-                assert !(checkUsage(cn.x(), cn.y()) && checkUsage(cn.y(), cn.x())) : "Verifcation of " + klass.getName() + " usage failed: Comparing " + cn.x() + " and" + cn.y() + " in " +
-                                graph.method() + " must use .equals() for object equality, not '==' or '!='";
+                assert !(checkUsage(cn.x(), cn.y(), context.getRuntime()) && checkUsage(cn.y(), cn.x(), context.getRuntime())) : "Verifcation of " + klass.getName() + " usage failed: Comparing " +
+                                cn.x() + " and" + cn.y() + " in " + graph.method() + " must use .equals() for object equality, not '==' or '!='";
             }
         }
         return true;
--- a/graal/com.oracle.graal.replacements.test/src/com/oracle/graal/replacements/test/CompiledExceptionHandlerTest.java	Thu Aug 01 00:57:27 2013 +0200
+++ b/graal/com.oracle.graal.replacements.test/src/com/oracle/graal/replacements/test/CompiledExceptionHandlerTest.java	Mon Aug 05 11:25:14 2013 +0200
@@ -26,11 +26,9 @@
 
 import org.junit.*;
 
-import com.oracle.graal.api.meta.*;
 import com.oracle.graal.compiler.test.*;
 import com.oracle.graal.nodes.*;
 import com.oracle.graal.nodes.java.*;
-import com.oracle.graal.phases.*;
 import com.oracle.graal.phases.common.*;
 
 /**
@@ -38,9 +36,8 @@
  */
 public class CompiledExceptionHandlerTest extends GraalCompilerTest {
 
-    @Override
-    protected void editPhasePlan(ResolvedJavaMethod method, StructuredGraph graph, PhasePlan phasePlan) {
-        phasePlan.disablePhase(InliningPhase.class);
+    public CompiledExceptionHandlerTest() {
+        suites.getHighTier().findPhase(AbstractInliningPhase.class).remove();
     }
 
     @Override
--- a/graal/com.oracle.graal.replacements.test/src/com/oracle/graal/replacements/test/DeoptimizeOnExceptionTest.java	Thu Aug 01 00:57:27 2013 +0200
+++ b/graal/com.oracle.graal.replacements.test/src/com/oracle/graal/replacements/test/DeoptimizeOnExceptionTest.java	Mon Aug 05 11:25:14 2013 +0200
@@ -24,10 +24,7 @@
 
 import org.junit.*;
 
-import com.oracle.graal.api.meta.*;
 import com.oracle.graal.compiler.test.*;
-import com.oracle.graal.nodes.*;
-import com.oracle.graal.phases.*;
 import com.oracle.graal.phases.common.*;
 
 /**
@@ -35,9 +32,8 @@
  */
 public class DeoptimizeOnExceptionTest extends GraalCompilerTest {
 
-    @Override
-    protected void editPhasePlan(ResolvedJavaMethod method, StructuredGraph graph, PhasePlan phasePlan) {
-        phasePlan.disablePhase(InliningPhase.class);
+    public DeoptimizeOnExceptionTest() {
+        suites.getHighTier().findPhase(AbstractInliningPhase.class).remove();
     }
 
     private static void raiseException(String m1, String m2, String m3, String m4, String m5) {
--- a/graal/com.oracle.graal.replacements.test/src/com/oracle/graal/replacements/test/InstanceOfTest.java	Thu Aug 01 00:57:27 2013 +0200
+++ b/graal/com.oracle.graal.replacements.test/src/com/oracle/graal/replacements/test/InstanceOfTest.java	Mon Aug 05 11:25:14 2013 +0200
@@ -30,7 +30,6 @@
 import com.oracle.graal.api.meta.*;
 import com.oracle.graal.nodes.*;
 import com.oracle.graal.nodes.java.*;
-import com.oracle.graal.phases.*;
 import com.oracle.graal.phases.common.*;
 import com.oracle.graal.replacements.test.CheckCastTest.Depth12;
 import com.oracle.graal.replacements.test.CheckCastTest.Depth13;
@@ -42,9 +41,8 @@
  */
 public class InstanceOfTest extends TypeCheckTest {
 
-    @Override
-    protected void editPhasePlan(ResolvedJavaMethod method, StructuredGraph graph, PhasePlan phasePlan) {
-        phasePlan.disablePhase(InliningPhase.class);
+    public InstanceOfTest() {
+        suites.getHighTier().findPhase(AbstractInliningPhase.class).remove();
     }
 
     @Override
--- a/graal/com.oracle.graal.replacements.test/src/com/oracle/graal/replacements/test/InvokeTest.java	Thu Aug 01 00:57:27 2013 +0200
+++ b/graal/com.oracle.graal.replacements.test/src/com/oracle/graal/replacements/test/InvokeTest.java	Mon Aug 05 11:25:14 2013 +0200
@@ -24,10 +24,7 @@
 
 import org.junit.*;
 
-import com.oracle.graal.api.meta.*;
 import com.oracle.graal.compiler.test.*;
-import com.oracle.graal.nodes.*;
-import com.oracle.graal.phases.*;
 import com.oracle.graal.phases.common.*;
 
 /**
@@ -35,9 +32,8 @@
  */
 public class InvokeTest extends GraalCompilerTest {
 
-    @Override
-    protected void editPhasePlan(ResolvedJavaMethod method, StructuredGraph graph, PhasePlan phasePlan) {
-        phasePlan.disablePhase(InliningPhase.class);
+    public InvokeTest() {
+        suites.getHighTier().findPhase(AbstractInliningPhase.class).remove();
     }
 
     public interface I {
--- a/graal/com.oracle.graal.replacements.test/src/com/oracle/graal/replacements/test/MethodSubstitutionTest.java	Thu Aug 01 00:57:27 2013 +0200
+++ b/graal/com.oracle.graal.replacements.test/src/com/oracle/graal/replacements/test/MethodSubstitutionTest.java	Mon Aug 05 11:25:14 2013 +0200
@@ -34,6 +34,7 @@
 import com.oracle.graal.nodes.*;
 import com.oracle.graal.phases.*;
 import com.oracle.graal.phases.common.*;
+import com.oracle.graal.phases.tiers.*;
 
 /**
  * Tests if {@link MethodSubstitution}s are inlined correctly. Most test cases only assert that
@@ -50,10 +51,11 @@
                 StructuredGraph graph = parse(snippet);
                 PhasePlan phasePlan = getDefaultPhasePlan();
                 Assumptions assumptions = new Assumptions(true);
+                HighTierContext context = new HighTierContext(runtime(), assumptions, replacements, null, phasePlan, OptimisticOptimizations.ALL);
                 Debug.dump(graph, "Graph");
-                new InliningPhase(runtime(), null, replacements, assumptions, null, phasePlan, OptimisticOptimizations.ALL).apply(graph);
+                new InliningPhase().apply(graph, context);
                 Debug.dump(graph, "Graph");
-                new CanonicalizerPhase.Instance(runtime(), assumptions, true).apply(graph);
+                new CanonicalizerPhase(true).apply(graph, context);
                 new DeadCodeEliminationPhase().apply(graph);
 
                 assertNotInGraph(graph, Invoke.class);
--- a/graal/com.oracle.graal.truffle.test/src/com/oracle/graal/truffle/test/PartialEvaluationTest.java	Thu Aug 01 00:57:27 2013 +0200
+++ b/graal/com.oracle.graal.truffle.test/src/com/oracle/graal/truffle/test/PartialEvaluationTest.java	Mon Aug 05 11:25:14 2013 +0200
@@ -106,7 +106,7 @@
             public StructuredGraph call() {
                 StructuredGraph resultGraph = partialEvaluator.createGraph(compilable, assumptions);
                 CanonicalizerPhase canonicalizer = new CanonicalizerPhase(canonicalizeReads);
-                HighTierContext context = new HighTierContext(runtime, assumptions, replacements);
+                PhaseContext context = new PhaseContext(runtime, assumptions, replacements);
 
                 if (resultGraph.hasLoops()) {
                     boolean unrolled;
@@ -186,15 +186,16 @@
                 canonicalizerPhase.apply(graph);
                 new DeadCodeEliminationPhase().apply(graph);
 
-                InliningPhase inliningPhase = new InliningPhase(runtime, null, replacements, assumptions, null, plan, OptimisticOptimizations.NONE);
-                inliningPhase.apply(graph);
+                HighTierContext context = new HighTierContext(runtime, assumptions, replacements, null, plan, OptimisticOptimizations.NONE);
+                InliningPhase inliningPhase = new InliningPhase();
+                inliningPhase.apply(graph, context);
                 removeFrameStates(graph);
 
                 new ConvertDeoptimizeToGuardPhase().apply(graph);
                 canonicalizerPhase.apply(graph);
                 new DeadCodeEliminationPhase().apply(graph);
 
-                new LoweringPhase(LoweringType.BEFORE_GUARDS).apply(graph, new PhaseContext(runtime, assumptions, replacements));
+                new LoweringPhase(LoweringType.BEFORE_GUARDS).apply(graph, context);
                 canonicalizerPhase.apply(graph);
                 new DeadCodeEliminationPhase().apply(graph);
                 return graph;
--- a/graal/com.oracle.graal.truffle/src/com/oracle/graal/truffle/PartialEvaluator.java	Thu Aug 01 00:57:27 2013 +0200
+++ b/graal/com.oracle.graal.truffle/src/com/oracle/graal/truffle/PartialEvaluator.java	Mon Aug 05 11:25:14 2013 +0200
@@ -160,9 +160,9 @@
                 canonicalizerPhase.apply(graph);
                 new DeadCodeEliminationPhase().apply(graph);
 
-                InliningPhase inliningPhase = new InliningPhase(metaAccessProvider, null, replacements, assumptions, cache, plan, OptimisticOptimizations.NONE);
-                inliningPhase.setCustomCanonicalizer(customCanonicalizer);
-                inliningPhase.apply(graph);
+                HighTierContext context = new HighTierContext(metaAccessProvider, assumptions, replacements, cache, plan, OptimisticOptimizations.NONE);
+                InliningPhase inliningPhase = new InliningPhase(customCanonicalizer);
+                inliningPhase.apply(graph, context);
 
                 // Convert deopt to guards.
                 new ConvertDeoptimizeToGuardPhase().apply(graph);
@@ -177,7 +177,7 @@
 
                 // EA frame and clean up.
                 new VerifyFrameDoesNotEscapePhase().apply(graph, false);
-                new PartialEscapePhase(false, new CanonicalizerPhase(!AOTCompilation.getValue())).apply(graph, new HighTierContext(metaAccessProvider, assumptions, replacements));
+                new PartialEscapePhase(false, new CanonicalizerPhase(!AOTCompilation.getValue())).apply(graph, new PhaseContext(metaAccessProvider, assumptions, replacements));
                 new VerifyNoIntrinsicsLeftPhase().apply(graph, false);
                 for (MaterializeFrameNode materializeNode : graph.getNodes(MaterializeFrameNode.class).snapshot()) {
                     materializeNode.replaceAtUsages(materializeNode.getFrame());
--- a/graal/com.oracle.graal.truffle/src/com/oracle/graal/truffle/TruffleCache.java	Thu Aug 01 00:57:27 2013 +0200
+++ b/graal/com.oracle.graal.truffle/src/com/oracle/graal/truffle/TruffleCache.java	Mon Aug 05 11:25:14 2013 +0200
@@ -110,7 +110,7 @@
 
                     optimizeGraph(newGraph, tmpAssumptions);
 
-                    HighTierContext context = new HighTierContext(metaAccessProvider, tmpAssumptions, replacements);
+                    PhaseContext context = new PhaseContext(metaAccessProvider, tmpAssumptions, replacements);
                     PartialEscapePhase partialEscapePhase = new PartialEscapePhase(false, new CanonicalizerPhase(true));
                     partialEscapePhase.apply(newGraph, context);
 
@@ -155,7 +155,7 @@
         CanonicalizerPhase.Instance canonicalizerPhase = new CanonicalizerPhase.Instance(metaAccessProvider, assumptions, !AOTCompilation.getValue(), null, null);
 
         EarlyReadEliminationPhase earlyRead = new EarlyReadEliminationPhase(new CanonicalizerPhase(true));
-        HighTierContext context = new HighTierContext(metaAccessProvider, assumptions, replacements);
+        PhaseContext context = new PhaseContext(metaAccessProvider, assumptions, replacements);
         Integer maxNodes = TruffleCompilerOptions.TruffleOperationCacheMaxNodes.getValue();
 
         contractGraph(newGraph, eliminate, convertDeoptimizeToGuardPhase, canonicalizerPhase, earlyRead, context);
@@ -180,7 +180,7 @@
     }
 
     private static void contractGraph(StructuredGraph newGraph, ConditionalEliminationPhase eliminate, ConvertDeoptimizeToGuardPhase convertDeoptimizeToGuardPhase,
-                    CanonicalizerPhase.Instance canonicalizerPhase, EarlyReadEliminationPhase earlyRead, HighTierContext context) {
+                    CanonicalizerPhase.Instance canonicalizerPhase, EarlyReadEliminationPhase earlyRead, PhaseContext context) {
         // Canonicalize / constant propagate.
         canonicalizerPhase.apply(newGraph);
 
--- a/graal/com.oracle.graal.virtual/src/com/oracle/graal/virtual/phases/ea/IterativeInliningPhase.java	Thu Aug 01 00:57:27 2013 +0200
+++ b/graal/com.oracle.graal.virtual/src/com/oracle/graal/virtual/phases/ea/IterativeInliningPhase.java	Mon Aug 05 11:25:14 2013 +0200
@@ -29,23 +29,14 @@
 
 import com.oracle.graal.debug.*;
 import com.oracle.graal.nodes.*;
-import com.oracle.graal.nodes.spi.*;
-import com.oracle.graal.phases.*;
 import com.oracle.graal.phases.common.*;
 import com.oracle.graal.phases.tiers.*;
 
-public class IterativeInliningPhase extends BasePhase<HighTierContext> {
+public class IterativeInliningPhase extends AbstractInliningPhase {
 
-    private final PhasePlan plan;
-
-    private final GraphCache cache;
-    private final OptimisticOptimizations optimisticOpts;
     private final CanonicalizerPhase canonicalizer;
 
-    public IterativeInliningPhase(GraphCache cache, PhasePlan plan, OptimisticOptimizations optimisticOpts, CanonicalizerPhase canonicalizer) {
-        this.cache = cache;
-        this.plan = plan;
-        this.optimisticOpts = optimisticOpts;
+    public IterativeInliningPhase(CanonicalizerPhase canonicalizer) {
         this.canonicalizer = canonicalizer;
     }
 
@@ -75,9 +66,9 @@
 
                     Map<Invoke, Double> hints = PEAInliningHints.getValue() ? PartialEscapePhase.getHints(graph) : null;
 
-                    InliningPhase inlining = new InliningPhase(context.getRuntime(), hints, context.getReplacements(), context.getAssumptions(), cache, plan, optimisticOpts);
+                    InliningPhase inlining = new InliningPhase(hints);
                     inlining.setMaxMethodsPerInlining(simple ? 1 : Integer.MAX_VALUE);
-                    inlining.apply(graph);
+                    inlining.apply(graph, context);
                     progress |= inlining.getInliningCount() > 0;
 
                     new DeadCodeEliminationPhase().apply(graph);
--- a/graal/com.oracle.graal.virtual/src/com/oracle/graal/virtual/phases/ea/PEReadEliminationClosure.java	Thu Aug 01 00:57:27 2013 +0200
+++ b/graal/com.oracle.graal.virtual/src/com/oracle/graal/virtual/phases/ea/PEReadEliminationClosure.java	Mon Aug 05 11:25:14 2013 +0200
@@ -55,27 +55,35 @@
         if (!deleted) {
             if (node instanceof LoadFieldNode) {
                 LoadFieldNode load = (LoadFieldNode) node;
-                ValueNode object = GraphUtil.unproxify(load.object());
-                ValueNode cachedValue = state.getReadCache(object, load.field());
-                if (cachedValue != null) {
-                    effects.replaceAtUsages(load, cachedValue);
-                    state.addScalarAlias(load, cachedValue);
-                    deleted = true;
+                if (!load.isVolatile()) {
+                    ValueNode object = GraphUtil.unproxify(load.object());
+                    ValueNode cachedValue = state.getReadCache(object, load.field());
+                    if (cachedValue != null) {
+                        effects.replaceAtUsages(load, cachedValue);
+                        state.addScalarAlias(load, cachedValue);
+                        deleted = true;
+                    } else {
+                        state.addReadCache(object, load.field(), load);
+                    }
                 } else {
-                    state.addReadCache(object, load.field(), load);
+                    processIdentity(state, ANY_LOCATION);
                 }
             } else if (node instanceof StoreFieldNode) {
                 StoreFieldNode store = (StoreFieldNode) node;
-                ValueNode object = GraphUtil.unproxify(store.object());
-                ValueNode cachedValue = state.getReadCache(object, store.field());
+                if (!store.isVolatile()) {
+                    ValueNode object = GraphUtil.unproxify(store.object());
+                    ValueNode cachedValue = state.getReadCache(object, store.field());
 
-                ValueNode value = state.getScalarAlias(store.value());
-                if (GraphUtil.unproxify(value) == GraphUtil.unproxify(cachedValue)) {
-                    effects.deleteFixedNode(store);
-                    deleted = true;
+                    ValueNode value = state.getScalarAlias(store.value());
+                    if (GraphUtil.unproxify(value) == GraphUtil.unproxify(cachedValue)) {
+                        effects.deleteFixedNode(store);
+                        deleted = true;
+                    }
+                    state.killReadCache(store.field());
+                    state.addReadCache(object, store.field(), value);
+                } else {
+                    processIdentity(state, ANY_LOCATION);
                 }
-                state.killReadCache(store.field());
-                state.addReadCache(object, store.field(), value);
             } else if (node instanceof MemoryCheckpoint.Single) {
                 METRIC_MEMORYCHECKOINT.increment();
                 LocationIdentity identity = ((MemoryCheckpoint.Single) node).getLocationIdentity();
--- a/graal/com.oracle.graal.virtual/src/com/oracle/graal/virtual/phases/ea/ReadEliminationClosure.java	Thu Aug 01 00:57:27 2013 +0200
+++ b/graal/com.oracle.graal.virtual/src/com/oracle/graal/virtual/phases/ea/ReadEliminationClosure.java	Mon Aug 05 11:25:14 2013 +0200
@@ -55,29 +55,37 @@
         boolean deleted = false;
         if (node instanceof LoadFieldNode) {
             LoadFieldNode load = (LoadFieldNode) node;
-            ValueNode object = GraphUtil.unproxify(load.object());
-            LoadCacheEntry identifier = new LoadCacheEntry(object, load.field());
-            ValueNode cachedValue = state.getCacheEntry(identifier);
-            if (cachedValue != null) {
-                effects.replaceAtUsages(load, cachedValue);
-                state.addScalarAlias(load, cachedValue);
-                deleted = true;
+            if (!load.isVolatile()) {
+                ValueNode object = GraphUtil.unproxify(load.object());
+                LoadCacheEntry identifier = new LoadCacheEntry(object, load.field());
+                ValueNode cachedValue = state.getCacheEntry(identifier);
+                if (cachedValue != null) {
+                    effects.replaceAtUsages(load, cachedValue);
+                    state.addScalarAlias(load, cachedValue);
+                    deleted = true;
+                } else {
+                    state.addCacheEntry(identifier, load);
+                }
             } else {
-                state.addCacheEntry(identifier, load);
+                processIdentity(state, ANY_LOCATION);
             }
         } else if (node instanceof StoreFieldNode) {
             StoreFieldNode store = (StoreFieldNode) node;
-            ValueNode object = GraphUtil.unproxify(store.object());
-            LoadCacheEntry identifier = new LoadCacheEntry(object, store.field());
-            ValueNode cachedValue = state.getCacheEntry(identifier);
+            if (!store.isVolatile()) {
+                ValueNode object = GraphUtil.unproxify(store.object());
+                LoadCacheEntry identifier = new LoadCacheEntry(object, store.field());
+                ValueNode cachedValue = state.getCacheEntry(identifier);
 
-            ValueNode value = state.getScalarAlias(store.value());
-            if (GraphUtil.unproxify(value) == GraphUtil.unproxify(cachedValue)) {
-                effects.deleteFixedNode(store);
-                deleted = true;
+                ValueNode value = state.getScalarAlias(store.value());
+                if (GraphUtil.unproxify(value) == GraphUtil.unproxify(cachedValue)) {
+                    effects.deleteFixedNode(store);
+                    deleted = true;
+                }
+                state.killReadCache((LocationIdentity) store.field());
+                state.addCacheEntry(identifier, value);
+            } else {
+                processIdentity(state, ANY_LOCATION);
             }
-            state.killReadCache((LocationIdentity) store.field());
-            state.addCacheEntry(identifier, value);
         } else if (node instanceof ReadNode) {
             ReadNode read = (ReadNode) node;
             if (read.location() instanceof ConstantLocationNode) {
@@ -104,13 +112,14 @@
                     effects.deleteFixedNode(write);
                     deleted = true;
                 }
-                state.killReadCache(write.location().getLocationIdentity());
+                processIdentity(state, write.location().getLocationIdentity());
                 state.addCacheEntry(identifier, value);
             } else {
-                state.killReadCache(write.location().getLocationIdentity());
+                processIdentity(state, write.location().getLocationIdentity());
             }
         } else if (node instanceof MemoryCheckpoint.Single) {
-            processIdentity(state, ((MemoryCheckpoint.Single) node).getLocationIdentity());
+            LocationIdentity identity = ((MemoryCheckpoint.Single) node).getLocationIdentity();
+            processIdentity(state, identity);
         } else if (node instanceof MemoryCheckpoint.Multi) {
             for (LocationIdentity identity : ((MemoryCheckpoint.Multi) node).getLocationIdentities()) {
                 processIdentity(state, identity);
--- a/mx/commands.py	Thu Aug 01 00:57:27 2013 +0200
+++ b/mx/commands.py	Mon Aug 05 11:25:14 2013 +0200
@@ -954,6 +954,11 @@
         t = Task('BootstrapWithGCVerification:product')
         vm(['-XX:+UnlockDiagnosticVMOptions', '-XX:+VerifyBeforeGC', '-XX:+VerifyAfterGC', '-version'])
         tasks.append(t.stop())
+    
+        _vmbuild = 'product'
+        t = Task('BootstrapWithG1GCVerification:product')
+        vm(['-XX:+UnlockDiagnosticVMOptions', '-XX:-UseSerialGC','-XX:+UseG1GC','-XX:+UseNewCode','-XX:+VerifyBeforeGC', '-XX:+VerifyAfterGC', '-version'])
+        tasks.append(t.stop())
 
         _vmbuild = 'product'
         t = Task('BootstrapWithRegisterPressure:product')
--- a/src/cpu/x86/vm/graalGlobals_x86.hpp	Thu Aug 01 00:57:27 2013 +0200
+++ b/src/cpu/x86/vm/graalGlobals_x86.hpp	Mon Aug 05 11:25:14 2013 +0200
@@ -31,8 +31,6 @@
 // Sets the default values for platform dependent flags used by the Graal compiler.
 // (see graalGlobals.hpp)
 
-define_pd_global(intx, GraalSafepointPollOffset,     256  );
-
 #if !defined(COMPILER1) && !defined(COMPILER2)
 define_pd_global(bool, BackgroundCompilation,        true );
 define_pd_global(bool, UseTLAB,                      true );
--- a/src/share/vm/code/codeCache.cpp	Thu Aug 01 00:57:27 2013 +0200
+++ b/src/share/vm/code/codeCache.cpp	Mon Aug 05 11:25:14 2013 +0200
@@ -304,15 +304,6 @@
   }
 }
 
-#ifdef GRAAL
-void CodeCache::alive_nmethods_do_graal_methods(OopClosure* closure) {
-  assert_locked_or_safepoint(CodeCache_lock);
-  FOR_ALL_ALIVE_NMETHODS(nm) {
-    nm->mark_graal_reference(closure);
-  }
-}
-#endif
-
 int CodeCache::alignment_unit() {
   return (int)_heap->alignment_unit();
 }
--- a/src/share/vm/code/codeCache.hpp	Thu Aug 01 00:57:27 2013 +0200
+++ b/src/share/vm/code/codeCache.hpp	Mon Aug 05 11:25:14 2013 +0200
@@ -85,12 +85,7 @@
   static void blobs_do(CodeBlobClosure* f);         // iterates over all CodeBlobs
   static void nmethods_do(void f(nmethod* nm));     // iterates over all nmethods
   static void alive_nmethods_do(void f(nmethod* nm)); // iterates over all alive nmethods
-#ifdef GRAAL
-  //Special method iterating and marking all HotSpotNMethods which are weakly referenced by nmethods.
-  //This has to be done since the HotSpotNMethods are only referenced from within the nmethods and the GC
-  //believes they are dead since they are not marked.
-  static void alive_nmethods_do_graal_methods(OopClosure* closure);
-#endif
+
   // Lookup
   static CodeBlob* find_blob(void* start);
   static nmethod*  find_nmethod(void* start);
--- a/src/share/vm/code/nmethod.cpp	Thu Aug 01 00:57:27 2013 +0200
+++ b/src/share/vm/code/nmethod.cpp	Mon Aug 05 11:25:14 2013 +0200
@@ -1856,14 +1856,6 @@
 #endif
 }
 
-#ifdef GRAAL
-void nmethod::mark_graal_reference(OopClosure* f) {
-  if (_graal_installed_code != NULL) {
-    f->do_oop((oop*) &_graal_installed_code);
-  }
-}
-#endif
-
 // Iterate over metadata calling this function.   Used by RedefineClasses
 void nmethod::metadata_do(void f(Metadata*)) {
   address low_boundary = verified_entry_point();
--- a/src/share/vm/code/nmethod.hpp	Thu Aug 01 00:57:27 2013 +0200
+++ b/src/share/vm/code/nmethod.hpp	Mon Aug 05 11:25:14 2013 +0200
@@ -747,10 +747,6 @@
     nm->metadata_do(Metadata::mark_on_stack);
   }
   void metadata_do(void f(Metadata*));
-
-#ifdef GRAAL
-  void mark_graal_reference(OopClosure* f);
-#endif
 };
 
 // Locks an nmethod so its code will not get removed and it will not
--- a/src/share/vm/gc_implementation/g1/g1CollectedHeap.cpp	Thu Aug 01 00:57:27 2013 +0200
+++ b/src/share/vm/gc_implementation/g1/g1CollectedHeap.cpp	Mon Aug 05 11:25:14 2013 +0200
@@ -5119,12 +5119,20 @@
 
   // Walk the code cache w/o buffering, because StarTask cannot handle
   // unaligned oop locations.
-  G1FilteredCodeBlobToOopClosure eager_scan_code_roots(this, scan_non_heap_roots);
+  G1FilteredCodeBlobToOopClosure eager_scan_cs_code_roots(this, scan_non_heap_roots);
+
+  // Scan all code roots from stack
+  CodeBlobToOopClosure eager_scan_all_code_roots(scan_non_heap_roots, true);
+  CodeBlobToOopClosure* blobs = &eager_scan_cs_code_roots;
+  if (UseNewCode && g1_policy()->during_initial_mark_pause()) {
+    // during initial-mark we need to take care to follow all code roots
+    blobs = &eager_scan_all_code_roots;
+  }
 
   process_strong_roots(false, // no scoping; this is parallel code
                        is_scavenging, so,
                        &buf_scan_non_heap_roots,
-                       &eager_scan_code_roots,
+                       blobs,
                        scan_klasses
                        );
 
--- a/src/share/vm/graal/graalCompilerToVM.cpp	Thu Aug 01 00:57:27 2013 +0200
+++ b/src/share/vm/graal/graalCompilerToVM.cpp	Mon Aug 05 11:25:14 2013 +0200
@@ -702,6 +702,7 @@
   set_int("klassHasFinalizerFlag", JVM_ACC_HAS_FINALIZER);
   set_int("threadExceptionOopOffset", in_bytes(JavaThread::exception_oop_offset()));
   set_int("threadExceptionPcOffset", in_bytes(JavaThread::exception_pc_offset()));
+  set_long("safepointPollingAddress", (jlong)(os::get_polling_page()));
 #ifdef TARGET_ARCH_x86
   set_boolean("isPollingPageFar", Assembler::is_polling_page_far());
   set_int("runtimeCallStackSize", (jint)frame::arg_reg_save_area_bytes);
@@ -858,6 +859,7 @@
   set_address("writeBarrierPreAddress", GraalRuntime::write_barrier_pre);
   set_address("writeBarrierPostAddress", GraalRuntime::write_barrier_post);
   set_address("gcTotalCollectionsAddress", (jlong)(address)(Universe::heap()->total_collections_address()));
+  set_address("validateObject", GraalRuntime::validate_object);
 
   BarrierSet* bs = Universe::heap()->barrier_set();
   switch (bs->kind()) {
--- a/src/share/vm/graal/graalRuntime.cpp	Thu Aug 01 00:57:27 2013 +0200
+++ b/src/share/vm/graal/graalRuntime.cpp	Mon Aug 05 11:25:14 2013 +0200
@@ -372,6 +372,21 @@
   thread->dirty_card_queue().enqueue(card_addr);
 JRT_END
 
+JRT_LEAF(jboolean, GraalRuntime::validate_object(JavaThread* thread,oopDesc* parent, oopDesc* child))
+  bool ret = true;
+  if(!Universe::heap()->is_in_closed_subset(parent)) {
+    tty->print_cr("Parent Object "INTPTR_FORMAT" not in heap", parent);
+    parent->print();
+    ret=false;
+  }
+  if(!Universe::heap()->is_in_closed_subset(child)) {
+    tty->print_cr("Child Object "INTPTR_FORMAT" not in heap", child);
+    child->print();
+    ret=false;
+  }
+  return (jint)ret;
+JRT_END
+
 JRT_ENTRY(void, GraalRuntime::vm_error(JavaThread* thread, oop where, oop format, jlong value))
   ResourceMark rm;
   assert(where == NULL || java_lang_String::is_instance(where), "must be");
--- a/src/share/vm/graal/graalRuntime.hpp	Thu Aug 01 00:57:27 2013 +0200
+++ b/src/share/vm/graal/graalRuntime.hpp	Mon Aug 05 11:25:14 2013 +0200
@@ -55,6 +55,7 @@
   static void log_object(JavaThread* thread, oop msg, jint flags);
   static void write_barrier_pre(JavaThread* thread, oopDesc* obj);
   static void write_barrier_post(JavaThread* thread, void* card);
+  static jboolean validate_object(JavaThread* thread, oopDesc* parent, oopDesc* child);
 };
 
 #endif // SHARE_VM_GRAAL_GRAAL_RUNTIME_HPP
--- a/src/share/vm/memory/referenceProcessor.cpp	Thu Aug 01 00:57:27 2013 +0200
+++ b/src/share/vm/memory/referenceProcessor.cpp	Mon Aug 05 11:25:14 2013 +0200
@@ -34,10 +34,6 @@
 #include "oops/oop.inline.hpp"
 #include "runtime/java.hpp"
 #include "runtime/jniHandles.hpp"
-#ifdef GRAAL
-#include "code/codeCache.hpp"
-#include "code/nmethod.hpp"
-#endif
 
 ReferencePolicy* ReferenceProcessor::_always_clear_soft_ref_policy = NULL;
 ReferencePolicy* ReferenceProcessor::_default_soft_ref_policy      = NULL;
@@ -267,9 +263,6 @@
       task_executor->set_single_threaded_mode();
     }
     process_phaseJNI(is_alive, keep_alive, complete_gc);
-#ifdef GRAAL
-    process_phaseGraalNMethods(keep_alive, complete_gc);
-#endif
   }
 
   return ReferenceProcessorStats(soft_count, weak_count, final_count, phantom_count);
@@ -312,15 +305,6 @@
   complete_gc->do_void();
 }
 
-#ifdef GRAAL
-void ReferenceProcessor::process_phaseGraalNMethods(OopClosure*  keep_alive,
-                                                    VoidClosure* complete_gc) {
-  CodeCache::alive_nmethods_do_graal_methods(keep_alive);
-  complete_gc->do_void();
-}
-
-#endif
-
 template <class T>
 bool enqueue_discovered_ref_helper(ReferenceProcessor* ref,
                                    AbstractRefProcTaskExecutor* task_executor) {
--- a/src/share/vm/memory/referenceProcessor.hpp	Thu Aug 01 00:57:27 2013 +0200
+++ b/src/share/vm/memory/referenceProcessor.hpp	Mon Aug 05 11:25:14 2013 +0200
@@ -301,10 +301,6 @@
   void process_phaseJNI(BoolObjectClosure* is_alive,
                         OopClosure*        keep_alive,
                         VoidClosure*       complete_gc);
-#ifdef GRAAL
-  void process_phaseGraalNMethods(OopClosure*        keep_alive,
-                        VoidClosure*       complete_gc);
-#endif
 
   // Work methods used by the method process_discovered_reflist
   // Phase1: keep alive all those referents that are otherwise