changeset 11869:18824519c172

Merge.
author Chris Seaton <chris.seaton@oracle.com>
date Wed, 02 Oct 2013 15:37:06 +0100
parents 44257a9160f1 (current diff) 2111551cfb34 (diff)
children acfff1de2aa7
files
diffstat 6 files changed, 207 insertions(+), 73 deletions(-) [+]
line wrap: on
line diff
--- a/graal/com.oracle.graal.compiler.test/src/com/oracle/graal/compiler/test/MemoryScheduleTest.java	Wed Oct 02 14:46:30 2013 +0100
+++ b/graal/com.oracle.graal.compiler.test/src/com/oracle/graal/compiler/test/MemoryScheduleTest.java	Wed Oct 02 15:37:06 2013 +0100
@@ -94,7 +94,7 @@
     /**
      * In this case the read should be scheduled in the first block.
      */
-    public static int testSplitSnippet1(int a) {
+    public static int testSplit1Snippet(int a) {
         try {
             return container.a;
         } finally {
@@ -109,7 +109,7 @@
     @Test
     public void testSplit1() {
         for (TestMode mode : TestMode.values()) {
-            SchedulePhase schedule = getFinalSchedule("testSplitSnippet1", mode, MemoryScheduling.OPTIMAL);
+            SchedulePhase schedule = getFinalSchedule("testSplit1Snippet", mode, MemoryScheduling.OPTIMAL);
             assertReadWithinStartBlock(schedule, true);
             assertReadWithinReturnBlock(schedule, false);
         }
@@ -209,6 +209,42 @@
         assertReadWithinReturnBlock(schedule, false);
     }
 
+    public String testStringReplaceSnippet(String input) {
+        return input.replace('a', 'b');
+    }
+
+    @Test
+    public void testStringReplace() {
+        getFinalSchedule("testStringReplaceSnippet", TestMode.INLINED_WITHOUT_FRAMESTATES, MemoryScheduling.OPTIMAL);
+        test("testStringReplaceSnippet", "acbaaa");
+    }
+
+    /**
+     * Here the read should float out of the loop.
+     */
+    public static int testLoop5Snippet(int a, int b, MemoryScheduleTest obj) {
+        int ret = 0;
+        int bb = b;
+        for (int i = 0; i < a; i++) {
+            ret = obj.hash;
+            if (a > 10) {
+                bb++;
+            } else {
+                bb--;
+            }
+            ret = ret / 10;
+        }
+        return ret + bb;
+    }
+
+    @Test
+    public void testLoop5() {
+        SchedulePhase schedule = getFinalSchedule("testLoop5Snippet", TestMode.WITHOUT_FRAMESTATES, MemoryScheduling.OPTIMAL);
+        assertEquals(7, schedule.getCFG().getBlocks().length);
+        assertReadWithinStartBlock(schedule, false);
+        assertReadWithinReturnBlock(schedule, false);
+    }
+
     /**
      * Here the read should float to the end (into the same block as the return).
      */
@@ -312,6 +348,25 @@
     }
 
     /**
+     * Here the read should float to the end.
+     */
+    public static int testIfRead5Snippet(int a) {
+        if (a < 0) {
+            container.a = 10;
+        }
+        return container.a;
+    }
+
+    @Test
+    public void testIfRead5() {
+        SchedulePhase schedule = getFinalSchedule("testIfRead5Snippet", TestMode.WITHOUT_FRAMESTATES, MemoryScheduling.OPTIMAL);
+        assertEquals(4, schedule.getCFG().getBlocks().length);
+        assertReadWithinStartBlock(schedule, false);
+        assertReadWithinReturnBlock(schedule, true);
+        assertReadAndWriteInSameBlock(schedule, false);
+    }
+
+    /**
      * testing scheduling within a block.
      */
     public static int testBlockScheduleSnippet() {
--- a/graal/com.oracle.graal.hotspot/src/com/oracle/graal/hotspot/meta/HotSpotRuntime.java	Wed Oct 02 14:46:30 2013 +0100
+++ b/graal/com.oracle.graal.hotspot/src/com/oracle/graal/hotspot/meta/HotSpotRuntime.java	Wed Oct 02 15:37:06 2013 +0100
@@ -556,7 +556,7 @@
             ValueNode object = loadField.isStatic() ? ConstantNode.forObject(field.getDeclaringClass().mirror(), this, graph) : loadField.object();
             assert loadField.kind() != Kind.Illegal;
             BarrierType barrierType = getFieldLoadBarrierType(field);
-            ReadNode memoryRead = graph.add(new ReadNode(object, createFieldLocation(graph, field), loadField.stamp(), barrierType, (loadField.kind() == Kind.Object)));
+            ReadNode memoryRead = graph.add(new ReadNode(object, createFieldLocation(graph, field, false), loadField.stamp(), barrierType, (loadField.kind() == Kind.Object)));
             graph.replaceFixedWithFixed(loadField, memoryRead);
             tool.createNullCheckGuard(memoryRead, object);
 
@@ -571,7 +571,7 @@
             HotSpotResolvedJavaField field = (HotSpotResolvedJavaField) storeField.field();
             ValueNode object = storeField.isStatic() ? ConstantNode.forObject(field.getDeclaringClass().mirror(), this, graph) : storeField.object();
             BarrierType barrierType = getFieldStoreBarrierType(storeField);
-            WriteNode memoryWrite = graph.add(new WriteNode(object, storeField.value(), createFieldLocation(graph, field), barrierType, storeField.field().getKind() == Kind.Object));
+            WriteNode memoryWrite = graph.add(new WriteNode(object, storeField.value(), createFieldLocation(graph, field, false), barrierType, storeField.field().getKind() == Kind.Object));
             tool.createNullCheckGuard(memoryWrite, object);
             memoryWrite.setStateAfter(storeField.stateAfter());
             graph.replaceFixedWithFixed(storeField, memoryWrite);
@@ -596,7 +596,7 @@
             LoadIndexedNode loadIndexed = (LoadIndexedNode) n;
             GuardingNode boundsCheck = createBoundsCheck(loadIndexed, tool);
             Kind elementKind = loadIndexed.elementKind();
-            LocationNode arrayLocation = createArrayLocation(graph, elementKind, loadIndexed.index());
+            LocationNode arrayLocation = createArrayLocation(graph, elementKind, loadIndexed.index(), false);
             ReadNode memoryRead = graph.add(new ReadNode(loadIndexed.array(), arrayLocation, loadIndexed.stamp(), BarrierType.NONE, elementKind == Kind.Object));
             memoryRead.setGuard(boundsCheck);
             graph.replaceFixedWithFixed(loadIndexed, memoryRead);
@@ -604,7 +604,7 @@
             StoreIndexedNode storeIndexed = (StoreIndexedNode) n;
             GuardingNode boundsCheck = createBoundsCheck(storeIndexed, tool);
             Kind elementKind = storeIndexed.elementKind();
-            LocationNode arrayLocation = createArrayLocation(graph, elementKind, storeIndexed.index());
+            LocationNode arrayLocation = createArrayLocation(graph, elementKind, storeIndexed.index(), false);
             ValueNode value = storeIndexed.value();
             ValueNode array = storeIndexed.array();
 
@@ -719,7 +719,7 @@
                                 value = allocations[commit.getVirtualObjects().indexOf(value)];
                             }
                             if (!(value.isConstant() && value.asConstant().isDefaultForKind())) {
-                                WriteNode write = new WriteNode(newObject, value, createFieldLocation(graph, (HotSpotResolvedJavaField) virtualInstance.field(i)),
+                                WriteNode write = new WriteNode(newObject, value, createFieldLocation(graph, (HotSpotResolvedJavaField) virtualInstance.field(i), true),
                                                 virtualInstance.field(i).getKind() == Kind.Object ? BarrierType.IMPRECISE : BarrierType.NONE, virtualInstance.field(i).getKind() == Kind.Object);
 
                                 graph.addBeforeFixed(commit, graph.add(write));
@@ -737,7 +737,7 @@
                                 value = allocations[indexOf];
                             }
                             if (!(value.isConstant() && value.asConstant().isDefaultForKind())) {
-                                WriteNode write = new WriteNode(newObject, value, createArrayLocation(graph, element.getKind(), ConstantNode.forInt(i, graph)),
+                                WriteNode write = new WriteNode(newObject, value, createArrayLocation(graph, element.getKind(), ConstantNode.forInt(i, graph), true),
                                                 value.kind() == Kind.Object ? BarrierType.PRECISE : BarrierType.NONE, value.kind() == Kind.Object);
                                 graph.addBeforeFixed(commit, graph.add(write));
                             }
@@ -943,8 +943,9 @@
         return barrierType;
     }
 
-    protected static ConstantLocationNode createFieldLocation(StructuredGraph graph, HotSpotResolvedJavaField field) {
-        return ConstantLocationNode.create(field, field.getKind(), field.offset(), graph);
+    protected static ConstantLocationNode createFieldLocation(StructuredGraph graph, HotSpotResolvedJavaField field, boolean initialization) {
+        LocationIdentity loc = initialization ? INIT_LOCATION : field;
+        return ConstantLocationNode.create(loc, field.getKind(), field.offset(), graph);
     }
 
     public int getScalingFactor(Kind kind) {
@@ -955,9 +956,10 @@
         }
     }
 
-    protected IndexedLocationNode createArrayLocation(Graph graph, Kind elementKind, ValueNode index) {
+    protected IndexedLocationNode createArrayLocation(Graph graph, Kind elementKind, ValueNode index, boolean initialization) {
+        LocationIdentity loc = initialization ? INIT_LOCATION : NamedLocationIdentity.getArrayLocation(elementKind);
         int scale = getScalingFactor(elementKind);
-        return IndexedLocationNode.create(NamedLocationIdentity.getArrayLocation(elementKind), elementKind, getArrayBaseOffset(elementKind), index, graph, scale);
+        return IndexedLocationNode.create(loc, elementKind, getArrayBaseOffset(elementKind), index, graph, scale);
     }
 
     @Override
--- a/graal/com.oracle.graal.nodes/src/com/oracle/graal/nodes/InvokeWithExceptionNode.java	Wed Oct 02 14:46:30 2013 +0100
+++ b/graal/com.oracle.graal.nodes/src/com/oracle/graal/nodes/InvokeWithExceptionNode.java	Wed Oct 02 15:37:06 2013 +0100
@@ -121,7 +121,7 @@
     @Override
     public void setNext(FixedNode x) {
         if (x != null) {
-            this.setNext(AbstractBeginNode.begin(x));
+            this.setNext(KillingBeginNode.begin(x, getLocationIdentity()));
         } else {
             this.setNext(null);
         }
--- /dev/null	Thu Jan 01 00:00:00 1970 +0000
+++ b/graal/com.oracle.graal.nodes/src/com/oracle/graal/nodes/KillingBeginNode.java	Wed Oct 02 15:37:06 2013 +0100
@@ -0,0 +1,49 @@
+/*
+ * Copyright (c) 2013, Oracle and/or its affiliates. All rights reserved.
+ * DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER.
+ *
+ * This code is free software; you can redistribute it and/or modify it
+ * under the terms of the GNU General Public License version 2 only, as
+ * published by the Free Software Foundation.
+ *
+ * This code is distributed in the hope that it will be useful, but WITHOUT
+ * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
+ * FITNESS FOR A PARTICULAR PURPOSE.  See the GNU General Public License
+ * version 2 for more details (a copy is included in the LICENSE file that
+ * accompanied this code).
+ *
+ * You should have received a copy of the GNU General Public License version
+ * 2 along with this work; if not, write to the Free Software Foundation,
+ * Inc., 51 Franklin St, Fifth Floor, Boston, MA 02110-1301 USA.
+ *
+ * Please contact Oracle, 500 Oracle Parkway, Redwood Shores, CA 94065 USA
+ * or visit www.oracle.com if you need additional information or have any
+ * questions.
+ */
+package com.oracle.graal.nodes;
+
+import com.oracle.graal.api.meta.*;
+import com.oracle.graal.nodes.extended.*;
+
+public class KillingBeginNode extends AbstractBeginNode implements MemoryCheckpoint.Single {
+
+    private LocationIdentity locationIdentity;
+
+    public KillingBeginNode(LocationIdentity locationIdentity) {
+        this.locationIdentity = locationIdentity;
+    }
+
+    public static KillingBeginNode begin(FixedNode with, LocationIdentity locationIdentity) {
+        if (with instanceof KillingBeginNode) {
+            return (KillingBeginNode) with;
+        }
+        KillingBeginNode begin = with.graph().add(new KillingBeginNode(locationIdentity));
+        begin.setNext(with);
+        return begin;
+    }
+
+    @Override
+    public LocationIdentity getLocationIdentity() {
+        return locationIdentity;
+    }
+}
--- a/graal/com.oracle.graal.nodes/src/com/oracle/graal/nodes/extended/FloatingReadNode.java	Wed Oct 02 14:46:30 2013 +0100
+++ b/graal/com.oracle.graal.nodes/src/com/oracle/graal/nodes/extended/FloatingReadNode.java	Wed Oct 02 15:37:06 2013 +0100
@@ -25,6 +25,7 @@
 import com.oracle.graal.api.meta.*;
 import com.oracle.graal.graph.*;
 import com.oracle.graal.nodes.*;
+import com.oracle.graal.nodes.PhiNode.PhiType;
 import com.oracle.graal.nodes.spi.*;
 import com.oracle.graal.nodes.type.*;
 
@@ -73,4 +74,25 @@
     public Access asFixedNode() {
         return graph().add(new ReadNode(object(), nullCheckLocation(), stamp(), getGuard(), getBarrierType(), isCompressible()));
     }
+
+    private static boolean isMemoryCheckPoint(Node n) {
+        return n instanceof MemoryCheckpoint.Single || n instanceof MemoryCheckpoint.Multi;
+    }
+
+    private static boolean isMemoryPhi(Node n) {
+        return n instanceof PhiNode && ((PhiNode) n).type() == PhiType.Memory;
+    }
+
+    private static boolean isMemoryProxy(Node n) {
+        return n instanceof ProxyNode && ((ProxyNode) n).type() == PhiType.Memory;
+    }
+
+    @Override
+    public boolean verify() {
+        Node lla = lastLocationAccess();
+        if (lla != null && !(isMemoryCheckPoint(lla) || isMemoryPhi(lla) || isMemoryProxy(lla))) {
+            assert false : "lastLocationAccess of " + this + " should be a MemoryCheckpoint, but is " + lla;
+        }
+        return super.verify();
+    }
 }
--- a/graal/com.oracle.graal.phases/src/com/oracle/graal/phases/schedule/SchedulePhase.java	Wed Oct 02 14:46:30 2013 +0100
+++ b/graal/com.oracle.graal.phases/src/com/oracle/graal/phases/schedule/SchedulePhase.java	Wed Oct 02 15:37:06 2013 +0100
@@ -33,6 +33,7 @@
 import com.oracle.graal.graph.*;
 import com.oracle.graal.graph.Node.Verbosity;
 import com.oracle.graal.nodes.*;
+import com.oracle.graal.nodes.PhiNode.PhiType;
 import com.oracle.graal.nodes.calc.*;
 import com.oracle.graal.nodes.cfg.*;
 import com.oracle.graal.nodes.extended.*;
@@ -99,7 +100,7 @@
 
         @Override
         protected HashSet<FloatingReadNode> processBlock(Block block, HashSet<FloatingReadNode> currentState) {
-            for (Node node : getBlockToNodesMap().get(block)) {
+            for (Node node : blockToNodesMap.get(block)) {
                 if (node instanceof FloatingReadNode) {
                     currentState.add((FloatingReadNode) node);
                 } else if (node instanceof MemoryCheckpoint.Single) {
@@ -183,37 +184,49 @@
 
         @Override
         protected Map<LocationIdentity, Node> processBlock(Block block, Map<LocationIdentity, Node> currentState) {
-            Map<LocationIdentity, Node> initKillMap = getBlockToKillMap().get(block);
-            initKillMap.putAll(currentState);
+
+            if (block.getBeginNode() instanceof MergeNode) {
+                MergeNode mergeNode = (MergeNode) block.getBeginNode();
+                for (PhiNode phi : mergeNode.usages().filter(PhiNode.class)) {
+                    if (phi.type() == PhiType.Memory) {
+                        LocationIdentity identity = (LocationIdentity) phi.getIdentity();
+                        locationKilledBy(identity, phi, currentState);
+                    }
+                }
+            }
+            currentState.putAll(blockToKillMapInit.get(block));
 
             for (Node node : block.getNodes()) {
                 if (node instanceof MemoryCheckpoint.Single) {
                     LocationIdentity identity = ((MemoryCheckpoint.Single) node).getLocationIdentity();
-                    initKillMap.put(identity, node);
+                    locationKilledBy(identity, node, currentState);
                 } else if (node instanceof MemoryCheckpoint.Multi) {
                     for (LocationIdentity identity : ((MemoryCheckpoint.Multi) node).getLocationIdentities()) {
-                        initKillMap.put(identity, node);
+                        locationKilledBy(identity, node, currentState);
                     }
                 }
                 assert MemoryCheckpoint.TypeAssertion.correctType(node);
             }
 
-            return cloneState(initKillMap);
+            blockToKillMap.put(block, currentState);
+            return cloneState(currentState);
+        }
+
+        private void locationKilledBy(LocationIdentity identity, Node checkpoint, Map<LocationIdentity, Node> state) {
+            state.put(identity, checkpoint);
+            if (identity == ANY_LOCATION) {
+                for (LocationIdentity locid : state.keySet()) {
+                    state.put(locid, checkpoint);
+                }
+            }
         }
 
         @Override
         protected Map<LocationIdentity, Node> merge(Block merge, List<Map<LocationIdentity, Node>> states) {
-            return merge(merge, states, false);
-        }
-
-        protected Map<LocationIdentity, Node> merge(Block merge, List<Map<LocationIdentity, Node>> states, boolean loopbegin) {
             assert merge.getBeginNode() instanceof MergeNode;
             MergeNode mergeNode = (MergeNode) merge.getBeginNode();
 
             Map<LocationIdentity, Node> initKillMap = new HashMap<>();
-            if (loopbegin) {
-                initKillMap.putAll(getBlockToKillMap().get(merge));
-            }
             for (Map<LocationIdentity, Node> state : states) {
                 for (LocationIdentity locid : state.keySet()) {
                     if (initKillMap.containsKey(locid)) {
@@ -226,10 +239,7 @@
                 }
             }
 
-            getMergeToKillMap().set(mergeNode, cloneState(initKillMap));
-            if (!loopbegin) {
-                initKillMap.putAll(getBlockToKillMap().get(merge));
-            }
+            mergeToKillMap.set(mergeNode, cloneState(initKillMap));
             return initKillMap;
         }
 
@@ -240,18 +250,27 @@
 
         @Override
         protected List<Map<LocationIdentity, Node>> processLoop(Loop loop, Map<LocationIdentity, Node> state) {
-            LoopInfo<Map<LocationIdentity, Node>> info = ReentrantBlockIterator.processLoop(this, loop, new HashMap<>(state));
+            LoopInfo<Map<LocationIdentity, Node>> info = ReentrantBlockIterator.processLoop(this, loop, cloneState(state));
 
             assert loop.header.getBeginNode() instanceof LoopBeginNode;
-            Map<LocationIdentity, Node> headerState = merge(loop.header, info.endStates, true);
-            getBlockToKillMap().put(loop.header, headerState);
+            Map<LocationIdentity, Node> headerState = merge(loop.header, info.endStates);
+            // second iteration, for computing information at loop exits
+            info = ReentrantBlockIterator.processLoop(this, loop, cloneState(headerState));
+
+            int i = 0;
+            for (Block exit : loop.exits) {
+                Map<LocationIdentity, Node> exitState = info.exitStates.get(i++);
 
-            for (Map<LocationIdentity, Node> exitState : info.exitStates) {
-                for (LocationIdentity key : headerState.keySet()) {
-                    exitState.put(key, headerState.get(key));
+                Node begin = exit.getBeginNode();
+                assert begin instanceof LoopExitNode;
+                for (Node usage : begin.usages()) {
+                    if (usage instanceof ProxyNode && ((ProxyNode) usage).type() == PhiType.Memory) {
+                        ProxyNode proxy = (ProxyNode) usage;
+                        LocationIdentity identity = (LocationIdentity) proxy.getIdentity();
+                        locationKilledBy(identity, proxy, exitState);
+                    }
                 }
             }
-
             return info.exitStates;
         }
     }
@@ -263,6 +282,7 @@
      * Map from blocks to the nodes in each block.
      */
     private BlockMap<List<ScheduledNode>> blockToNodesMap;
+    private BlockMap<Map<LocationIdentity, Node>> blockToKillMapInit;
     private BlockMap<Map<LocationIdentity, Node>> blockToKillMap;
     private NodeMap<Map<LocationIdentity, Node>> mergeToKillMap;
     private final Map<FloatingNode, List<FixedNode>> phantomUsages = new IdentityHashMap<>();
@@ -315,8 +335,10 @@
         } else if (memsched == MemoryScheduling.OPTIMAL && selectedStrategy != SchedulingStrategy.EARLIEST && graph.getNodes(FloatingReadNode.class).isNotEmpty()) {
             mergeToKillMap = graph.createNodeMap();
 
+            blockToKillMapInit = new BlockMap<>(cfg);
             blockToKillMap = new BlockMap<>(cfg);
             for (Block b : cfg.getBlocks()) {
+                blockToKillMapInit.put(b, new HashMap<LocationIdentity, Node>());
                 blockToKillMap.put(b, new HashMap<LocationIdentity, Node>());
             }
 
@@ -328,7 +350,7 @@
                 Node first = n.lastLocationAccess();
                 assert first != null;
 
-                Map<LocationIdentity, Node> killMap = blockToKillMap.get(forKillLocation(first));
+                Map<LocationIdentity, Node> killMap = blockToKillMapInit.get(forKillLocation(first));
                 killMap.put(n.location().getLocationIdentity(), first);
             }
 
@@ -357,20 +379,27 @@
     private void printSchedule(String desc) {
         Debug.printf("=== %s / %s / %s (%s) ===\n", getCFG().getStartBlock().getBeginNode().graph(), selectedStrategy, memsched, desc);
         for (Block b : getCFG().getBlocks()) {
-            Debug.printf("==== b: %s. ", b);
+            Debug.printf("==== b: %s (loopDepth: %s). ", b, b.getLoopDepth());
             Debug.printf("dom: %s. ", b.getDominator());
             Debug.printf("post-dom: %s. ", b.getPostdominator());
             Debug.printf("preds: %s. ", b.getPredecessors());
             Debug.printf("succs: %s ====\n", b.getSuccessors());
-            BlockMap<Map<LocationIdentity, Node>> killMaps = getBlockToKillMap();
+            BlockMap<Map<LocationIdentity, Node>> killMaps = blockToKillMap;
             if (killMaps != null) {
+                if (b.getBeginNode() instanceof MergeNode) {
+                    MergeNode merge = (MergeNode) b.getBeginNode();
+                    Debug.printf("M merge kills: \n");
+                    for (LocationIdentity locId : mergeToKillMap.get(merge).keySet()) {
+                        Debug.printf("M %s killed by %s\n", locId, mergeToKillMap.get(merge).get(locId));
+                    }
+                }
                 Debug.printf("X block kills: \n");
                 for (LocationIdentity locId : killMaps.get(b).keySet()) {
                     Debug.printf("X %s killed by %s\n", locId, killMaps.get(b).get(locId));
                 }
             }
 
-            if (getBlockToNodesMap().get(b) != null) {
+            if (blockToNodesMap.get(b) != null) {
                 for (Node n : nodesFor(b)) {
                     printNode(n);
                 }
@@ -414,14 +443,6 @@
         return blockToNodesMap;
     }
 
-    public BlockMap<Map<LocationIdentity, Node>> getBlockToKillMap() {
-        return blockToKillMap;
-    }
-
-    public NodeMap<Map<LocationIdentity, Node>> getMergeToKillMap() {
-        return mergeToKillMap;
-    }
-
     /**
      * Gets the nodes in a given block.
      */
@@ -465,10 +486,11 @@
             throw new SchedulingError("%s should already have been placed in a block", node);
         }
 
+        Block earliestBlock = earliestBlock(node);
         Block block;
         switch (strategy) {
             case EARLIEST:
-                block = earliestBlock(node);
+                block = earliestBlock;
                 break;
             case LATEST:
             case LATEST_OUT_OF_LOOPS:
@@ -477,23 +499,19 @@
                 } else {
                     block = latestBlock(node, strategy);
                     if (block == null) {
-                        block = earliestBlock(node);
+                        block = earliestBlock;
                     } else if (strategy == SchedulingStrategy.LATEST_OUT_OF_LOOPS && !(node instanceof VirtualObjectNode)) {
                         // schedule at the latest position possible in the outermost loop possible
-                        Block earliestBlock = earliestBlock(node);
-                        Block before = block;
                         block = scheduleOutOfLoops(node, block, earliestBlock);
-                        if (!earliestBlock.dominates(block)) {
-                            throw new SchedulingError("%s: Graph cannot be scheduled : inconsistent for %s, %d usages, (%s needs to dominate %s (before %s))", node.graph(), node,
-                                            node.usages().count(), earliestBlock, block, before);
-                        }
                     }
                 }
                 break;
             default:
                 throw new GraalInternalError("unknown scheduling strategy");
         }
-        assert earliestBlock(node).dominates(block) : "node " + node + " in block " + block + " is not dominated by earliest " + earliestBlock(node);
+        if (!earliestBlock.dominates(block)) {
+            throw new SchedulingError("%s: Graph cannot be scheduled : inconsistent for %s, %d usages, (%s needs to dominate %s)", node.graph(), node, node.usages().count(), earliestBlock, block);
+        }
         cfg.getNodeToBlock().set(node, block);
         blockToNodesMap.get(block).add(node);
     }
@@ -541,9 +559,8 @@
         // iterate the dominator tree
         while (true) {
             iterations++;
-            assert earliestBlock.dominates(previousBlock) : "iterations: " + iterations;
             Node lastKill = blockToKillMap.get(currentBlock).get(locid);
-            boolean isAtEarliest = earliestBlock == previousBlock && previousBlock != currentBlock;
+            assert lastKill != null : "should be never null, due to init of killMaps: " + currentBlock + ", location: " + locid;
 
             if (lastKill.equals(upperBound)) {
                 // assign node to the block which kills the location
@@ -553,7 +570,6 @@
                 // schedule read out of the loop if possible, in terms of killMaps and earliest
                 // schedule
                 if (currentBlock != earliestBlock && previousBlock != earliestBlock) {
-                    assert earliestBlock.dominates(currentBlock);
                     Block t = currentBlock;
                     while (t.getLoop() != null && t.getDominator() != null && earliestBlock.dominates(t)) {
                         Block dom = t.getDominator();
@@ -568,17 +584,12 @@
 
                 if (!outOfLoop && previousBlock.getBeginNode() instanceof MergeNode) {
                     // merges kill locations right at the beginning of a block. if a merge is the
-                    // killing node, we assign it to the dominating node.
+                    // killing node, we assign it to the dominating block.
 
                     MergeNode merge = (MergeNode) previousBlock.getBeginNode();
-                    Node killer = getMergeToKillMap().get(merge).get(locid);
+                    Node killer = mergeToKillMap.get(merge).get(locid);
 
                     if (killer != null && killer == merge) {
-                        // check if we violate earliest schedule condition
-                        if (isAtEarliest) {
-                            printIterations(iterations, "earliest bound in merge: " + earliestBlock);
-                            return earliestBlock;
-                        }
                         printIterations(iterations, "kill by merge: " + currentBlock);
                         return currentBlock;
                     }
@@ -590,11 +601,6 @@
                 return previousBlock;
             }
 
-            if (isAtEarliest) {
-                printIterations(iterations, "earliest bound: " + earliestBlock);
-                return earliestBlock;
-            }
-
             if (upperBoundBlock == currentBlock) {
                 printIterations(iterations, "upper bound: " + currentBlock + ", previous: " + previousBlock);
                 return currentBlock;