changeset 21339:1c56b7be2731

LinearScan: renamed sub phases.
author Josef Eisl <josef.eisl@jku.at>
date Tue, 12 May 2015 14:19:57 +0200
parents 5010ea46630a
children dd013bfdccc3
files graal/com.oracle.graal.lir/src/com/oracle/graal/lir/alloc/lsra/AssignLocations.java graal/com.oracle.graal.lir/src/com/oracle/graal/lir/alloc/lsra/EliminateSpillMove.java graal/com.oracle.graal.lir/src/com/oracle/graal/lir/alloc/lsra/LifetimeAnalysis.java graal/com.oracle.graal.lir/src/com/oracle/graal/lir/alloc/lsra/LinearScan.java graal/com.oracle.graal.lir/src/com/oracle/graal/lir/alloc/lsra/LinearScanAssignLocationsPhase.java graal/com.oracle.graal.lir/src/com/oracle/graal/lir/alloc/lsra/LinearScanEliminateSpillMovePhase.java graal/com.oracle.graal.lir/src/com/oracle/graal/lir/alloc/lsra/LinearScanLifetimeAnalysisPhase.java graal/com.oracle.graal.lir/src/com/oracle/graal/lir/alloc/lsra/LinearScanOptimizeSpillPositionPhase.java graal/com.oracle.graal.lir/src/com/oracle/graal/lir/alloc/lsra/LinearScanRegisterAllocationPhase.java graal/com.oracle.graal.lir/src/com/oracle/graal/lir/alloc/lsra/LinearScanResolveDataFlowPhase.java graal/com.oracle.graal.lir/src/com/oracle/graal/lir/alloc/lsra/OptimizeSpillPosition.java graal/com.oracle.graal.lir/src/com/oracle/graal/lir/alloc/lsra/RegisterAllocation.java graal/com.oracle.graal.lir/src/com/oracle/graal/lir/alloc/lsra/ResolveDataFlow.java graal/com.oracle.graal.lir/src/com/oracle/graal/lir/alloc/lsra/SSAEliminateSpillMove.java graal/com.oracle.graal.lir/src/com/oracle/graal/lir/alloc/lsra/SSALifetimeAnalysis.java graal/com.oracle.graal.lir/src/com/oracle/graal/lir/alloc/lsra/SSALinearScan.java graal/com.oracle.graal.lir/src/com/oracle/graal/lir/alloc/lsra/SSAResolveDataFlow.java
diffstat 17 files changed, 1752 insertions(+), 1752 deletions(-) [+]
line wrap: on
line diff
--- a/graal/com.oracle.graal.lir/src/com/oracle/graal/lir/alloc/lsra/AssignLocations.java	Tue May 12 14:04:40 2015 +0200
+++ /dev/null	Thu Jan 01 00:00:00 1970 +0000
@@ -1,212 +0,0 @@
-/*
- * Copyright (c) 2015, 2015, Oracle and/or its affiliates. All rights reserved.
- * DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER.
- *
- * This code is free software; you can redistribute it and/or modify it
- * under the terms of the GNU General Public License version 2 only, as
- * published by the Free Software Foundation.
- *
- * This code is distributed in the hope that it will be useful, but WITHOUT
- * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
- * FITNESS FOR A PARTICULAR PURPOSE.  See the GNU General Public License
- * version 2 for more details (a copy is included in the LICENSE file that
- * accompanied this code).
- *
- * You should have received a copy of the GNU General Public License version
- * 2 along with this work; if not, write to the Free Software Foundation,
- * Inc., 51 Franklin St, Fifth Floor, Boston, MA 02110-1301 USA.
- *
- * Please contact Oracle, 500 Oracle Parkway, Redwood Shores, CA 94065 USA
- * or visit www.oracle.com if you need additional information or have any
- * questions.
- */
-package com.oracle.graal.lir.alloc.lsra;
-
-import static com.oracle.graal.api.code.ValueUtil.*;
-import static com.oracle.graal.compiler.common.GraalOptions.*;
-import static com.oracle.graal.lir.LIRValueUtil.*;
-
-import java.util.*;
-
-import com.oracle.graal.api.code.*;
-import com.oracle.graal.api.meta.*;
-import com.oracle.graal.compiler.common.cfg.*;
-import com.oracle.graal.debug.*;
-import com.oracle.graal.lir.*;
-import com.oracle.graal.lir.LIRInstruction.*;
-import com.oracle.graal.lir.StandardOp.*;
-import com.oracle.graal.lir.gen.*;
-import com.oracle.graal.lir.gen.LIRGeneratorTool.*;
-import com.oracle.graal.lir.phases.*;
-
-/** Phase 7: assign register numbers back to LIR */
-final class AssignLocations extends AllocationPhase {
-
-    private final LinearScan allocator;
-
-    AssignLocations(LinearScan allocator) {
-        this.allocator = allocator;
-    }
-
-    @Override
-    protected <B extends AbstractBlockBase<B>> void run(TargetDescription target, LIRGenerationResult lirGenRes, List<B> codeEmittingOrder, List<B> linearScanOrder, SpillMoveFactory spillMoveFactory) {
-        assignLocations();
-    }
-
-    /**
-     * Assigns the allocated location for an LIR instruction operand back into the instruction.
-     *
-     * @param operand an LIR instruction operand
-     * @param opId the id of the LIR instruction using {@code operand}
-     * @param mode the usage mode for {@code operand} by the instruction
-     * @return the location assigned for the operand
-     */
-    private Value colorLirOperand(Variable operand, int opId, OperandMode mode) {
-        Interval interval = allocator.intervalFor(operand);
-        assert interval != null : "interval must exist";
-
-        if (opId != -1) {
-            if (DetailedAsserts.getValue()) {
-                AbstractBlockBase<?> block = allocator.blockForId(opId);
-                if (block.getSuccessorCount() <= 1 && opId == allocator.getLastLirInstructionId(block)) {
-                    /*
-                     * Check if spill moves could have been appended at the end of this block, but
-                     * before the branch instruction. So the split child information for this branch
-                     * would be incorrect.
-                     */
-                    LIRInstruction instr = allocator.ir.getLIRforBlock(block).get(allocator.ir.getLIRforBlock(block).size() - 1);
-                    if (instr instanceof StandardOp.JumpOp) {
-                        if (allocator.getBlockData(block).liveOut.get(allocator.operandNumber(operand))) {
-                            assert false : String.format(
-                                            "can't get split child for the last branch of a block because the information would be incorrect (moves are inserted before the branch in resolveDataFlow) block=%s, instruction=%s, operand=%s",
-                                            block, instr, operand);
-                        }
-                    }
-                }
-            }
-
-            /*
-             * Operands are not changed when an interval is split during allocation, so search the
-             * right interval here.
-             */
-            interval = allocator.splitChildAtOpId(interval, opId, mode);
-        }
-
-        if (isIllegal(interval.location()) && interval.canMaterialize()) {
-            assert mode != OperandMode.DEF;
-            return interval.getMaterializedValue();
-        }
-        return interval.location();
-    }
-
-    /**
-     * @param op
-     * @param operand
-     * @param valueMode
-     * @param flags
-     * @see InstructionValueProcedure#doValue(LIRInstruction, Value, OperandMode, EnumSet)
-     */
-    private Value debugInfoProcedure(LIRInstruction op, Value operand, OperandMode valueMode, EnumSet<OperandFlag> flags) {
-        if (isVirtualStackSlot(operand)) {
-            return operand;
-        }
-        int tempOpId = op.id();
-        OperandMode mode = OperandMode.USE;
-        AbstractBlockBase<?> block = allocator.blockForId(tempOpId);
-        if (block.getSuccessorCount() == 1 && tempOpId == allocator.getLastLirInstructionId(block)) {
-            /*
-             * Generating debug information for the last instruction of a block. If this instruction
-             * is a branch, spill moves are inserted before this branch and so the wrong operand
-             * would be returned (spill moves at block boundaries are not considered in the live
-             * ranges of intervals).
-             * 
-             * Solution: use the first opId of the branch target block instead.
-             */
-            final LIRInstruction instr = allocator.ir.getLIRforBlock(block).get(allocator.ir.getLIRforBlock(block).size() - 1);
-            if (instr instanceof StandardOp.JumpOp) {
-                if (allocator.getBlockData(block).liveOut.get(allocator.operandNumber(operand))) {
-                    tempOpId = allocator.getFirstLirInstructionId(block.getSuccessors().iterator().next());
-                    mode = OperandMode.DEF;
-                }
-            }
-        }
-
-        /*
-         * Get current location of operand. The operand must be live because debug information is
-         * considered when building the intervals if the interval is not live, colorLirOperand will
-         * cause an assert on failure.
-         */
-        Value result = colorLirOperand((Variable) operand, tempOpId, mode);
-        assert !allocator.hasCall(tempOpId) || isStackSlotValue(result) || isConstant(result) || !allocator.isCallerSave(result) : "cannot have caller-save register operands at calls";
-        return result;
-    }
-
-    private void computeDebugInfo(final LIRInstruction op, LIRFrameState info) {
-        info.forEachState(op, this::debugInfoProcedure);
-    }
-
-    private void assignLocations(List<LIRInstruction> instructions) {
-        int numInst = instructions.size();
-        boolean hasDead = false;
-
-        InstructionValueProcedure assignProc = (op, operand, mode, flags) -> isVariable(operand) ? colorLirOperand((Variable) operand, op.id(), mode) : operand;
-        for (int j = 0; j < numInst; j++) {
-            final LIRInstruction op = instructions.get(j);
-            if (op == null) {
-                /*
-                 * this can happen when spill-moves are removed in eliminateSpillMoves
-                 */
-                hasDead = true;
-                continue;
-            }
-
-            // remove useless moves
-            MoveOp move = null;
-            if (op instanceof MoveOp) {
-                move = (MoveOp) op;
-                AllocatableValue result = move.getResult();
-                if (isVariable(result) && allocator.isMaterialized(result, op.id(), OperandMode.DEF)) {
-                    /*
-                     * This happens if a materializable interval is originally not spilled but then
-                     * kicked out in LinearScanWalker.splitForSpilling(). When kicking out such an
-                     * interval this move operation was already generated.
-                     */
-                    instructions.set(j, null);
-                    hasDead = true;
-                    continue;
-                }
-            }
-
-            op.forEachInput(assignProc);
-            op.forEachAlive(assignProc);
-            op.forEachTemp(assignProc);
-            op.forEachOutput(assignProc);
-
-            // compute reference map and debug information
-            op.forEachState((inst, state) -> computeDebugInfo(inst, state));
-
-            // remove useless moves
-            if (move != null) {
-                if (move.getInput().equals(move.getResult())) {
-                    instructions.set(j, null);
-                    hasDead = true;
-                }
-            }
-        }
-
-        if (hasDead) {
-            // Remove null values from the list.
-            instructions.removeAll(Collections.singleton(null));
-        }
-    }
-
-    private void assignLocations() {
-        try (Indent indent = Debug.logAndIndent("assign locations")) {
-            for (AbstractBlockBase<?> block : allocator.sortedBlocks) {
-                try (Indent indent2 = Debug.logAndIndent("assign locations in block B%d", block.getId())) {
-                    assignLocations(allocator.ir.getLIRforBlock(block));
-                }
-            }
-        }
-    }
-}
--- a/graal/com.oracle.graal.lir/src/com/oracle/graal/lir/alloc/lsra/EliminateSpillMove.java	Tue May 12 14:04:40 2015 +0200
+++ /dev/null	Thu Jan 01 00:00:00 1970 +0000
@@ -1,211 +0,0 @@
-/*
- * Copyright (c) 2015, 2015, Oracle and/or its affiliates. All rights reserved.
- * DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER.
- *
- * This code is free software; you can redistribute it and/or modify it
- * under the terms of the GNU General Public License version 2 only, as
- * published by the Free Software Foundation.
- *
- * This code is distributed in the hope that it will be useful, but WITHOUT
- * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
- * FITNESS FOR A PARTICULAR PURPOSE.  See the GNU General Public License
- * version 2 for more details (a copy is included in the LICENSE file that
- * accompanied this code).
- *
- * You should have received a copy of the GNU General Public License version
- * 2 along with this work; if not, write to the Free Software Foundation,
- * Inc., 51 Franklin St, Fifth Floor, Boston, MA 02110-1301 USA.
- *
- * Please contact Oracle, 500 Oracle Parkway, Redwood Shores, CA 94065 USA
- * or visit www.oracle.com if you need additional information or have any
- * questions.
- */
-package com.oracle.graal.lir.alloc.lsra;
-
-import static com.oracle.graal.api.code.ValueUtil.*;
-import static com.oracle.graal.compiler.common.GraalOptions.*;
-import static com.oracle.graal.lir.LIRValueUtil.*;
-
-import java.util.*;
-
-import com.oracle.graal.api.code.*;
-import com.oracle.graal.api.meta.*;
-import com.oracle.graal.compiler.common.cfg.*;
-import com.oracle.graal.debug.*;
-import com.oracle.graal.lir.*;
-import com.oracle.graal.lir.StandardOp.*;
-import com.oracle.graal.lir.alloc.lsra.Interval.*;
-import com.oracle.graal.lir.alloc.lsra.LinearScan.*;
-import com.oracle.graal.lir.gen.*;
-import com.oracle.graal.lir.gen.LIRGeneratorTool.*;
-import com.oracle.graal.lir.phases.*;
-
-class EliminateSpillMove extends AllocationPhase {
-
-    private static final IntervalPredicate mustStoreAtDefinition = new LinearScan.IntervalPredicate() {
-
-        @Override
-        public boolean apply(Interval i) {
-            return i.isSplitParent() && i.spillState() == SpillState.StoreAtDefinition;
-        }
-    };
-
-    protected final LinearScan allocator;
-
-    EliminateSpillMove(LinearScan allocator) {
-        this.allocator = allocator;
-    }
-
-    @Override
-    protected <B extends AbstractBlockBase<B>> void run(TargetDescription target, LIRGenerationResult lirGenRes, List<B> codeEmittingOrder, List<B> linearScanOrder, SpillMoveFactory spillMoveFactory) {
-        beforeSpillMoveElimination();
-        eliminateSpillMoves();
-    }
-
-    protected void beforeSpillMoveElimination() {
-    }
-
-    /**
-     * @return the index of the first instruction that is of interest for
-     *         {@link #eliminateSpillMoves()}
-     */
-    protected int firstInstructionOfInterest() {
-        // skip the first because it is always a label
-        return 1;
-    }
-
-    // called once before assignment of register numbers
-    void eliminateSpillMoves() {
-        try (Indent indent = Debug.logAndIndent("Eliminating unnecessary spill moves")) {
-
-            /*
-             * collect all intervals that must be stored after their definition. The list is sorted
-             * by Interval.spillDefinitionPos.
-             */
-            Interval interval;
-            interval = allocator.createUnhandledLists(mustStoreAtDefinition, null).first;
-            if (DetailedAsserts.getValue()) {
-                checkIntervals(interval);
-            }
-
-            LIRInsertionBuffer insertionBuffer = new LIRInsertionBuffer();
-            for (AbstractBlockBase<?> block : allocator.sortedBlocks) {
-                try (Indent indent1 = Debug.logAndIndent("Handle %s", block)) {
-                    List<LIRInstruction> instructions = allocator.ir.getLIRforBlock(block);
-                    int numInst = instructions.size();
-
-                    // iterate all instructions of the block.
-                    for (int j = firstInstructionOfInterest(); j < numInst; j++) {
-                        LIRInstruction op = instructions.get(j);
-                        int opId = op.id();
-
-                        if (opId == -1) {
-                            MoveOp move = (MoveOp) op;
-                            /*
-                             * Remove move from register to stack if the stack slot is guaranteed to
-                             * be correct. Only moves that have been inserted by LinearScan can be
-                             * removed.
-                             */
-                            if (canEliminateSpillMove(block, move)) {
-                                /*
-                                 * Move target is a stack slot that is always correct, so eliminate
-                                 * instruction.
-                                 */
-                                if (Debug.isLogEnabled()) {
-                                    Debug.log("eliminating move from interval %d (%s) to %d (%s) in block %s", allocator.operandNumber(move.getInput()), move.getInput(),
-                                                    allocator.operandNumber(move.getResult()), move.getResult(), block);
-                                }
-
-                                // null-instructions are deleted by assignRegNum
-                                instructions.set(j, null);
-                            }
-
-                        } else {
-                            /*
-                             * Insert move from register to stack just after the beginning of the
-                             * interval.
-                             */
-                            assert interval == Interval.EndMarker || interval.spillDefinitionPos() >= opId : "invalid order";
-                            assert interval == Interval.EndMarker || (interval.isSplitParent() && interval.spillState() == SpillState.StoreAtDefinition) : "invalid interval";
-
-                            while (interval != Interval.EndMarker && interval.spillDefinitionPos() == opId) {
-                                if (!interval.canMaterialize()) {
-                                    if (!insertionBuffer.initialized()) {
-                                        /*
-                                         * prepare insertion buffer (appended when all instructions
-                                         * in the block are processed)
-                                         */
-                                        insertionBuffer.init(instructions);
-                                    }
-
-                                    AllocatableValue fromLocation = interval.location();
-                                    AllocatableValue toLocation = LinearScan.canonicalSpillOpr(interval);
-                                    if (!fromLocation.equals(toLocation)) {
-
-                                        assert isRegister(fromLocation) : "from operand must be a register but is: " + fromLocation + " toLocation=" + toLocation + " spillState=" +
-                                                        interval.spillState();
-                                        assert isStackSlotValue(toLocation) : "to operand must be a stack slot";
-
-                                        LIRInstruction move = allocator.getSpillMoveFactory().createMove(toLocation, fromLocation);
-                                        insertionBuffer.append(j + 1, move);
-
-                                        if (Debug.isLogEnabled()) {
-                                            Debug.log("inserting move after definition of interval %d to stack slot %s at opId %d", interval.operandNumber, interval.spillSlot(), opId);
-                                        }
-                                    }
-                                }
-                                interval = interval.next;
-                            }
-                        }
-                    } // end of instruction iteration
-
-                    if (insertionBuffer.initialized()) {
-                        insertionBuffer.finish();
-                    }
-                }
-            } // end of block iteration
-
-            assert interval == Interval.EndMarker : "missed an interval";
-        }
-    }
-
-    /**
-     * @param block The block {@code move} is located in.
-     * @param move Spill move.
-     */
-    protected boolean canEliminateSpillMove(AbstractBlockBase<?> block, MoveOp move) {
-        assert isVariable(move.getResult()) : "LinearScan inserts only moves to variables: " + move;
-
-        Interval curInterval = allocator.intervalFor(move.getResult());
-
-        if (!isRegister(curInterval.location()) && curInterval.alwaysInMemory()) {
-            assert isStackSlotValue(curInterval.location()) : "Not a stack slot: " + curInterval.location();
-            return true;
-        }
-        return false;
-    }
-
-    private static void checkIntervals(Interval interval) {
-        Interval prev = null;
-        Interval temp = interval;
-        while (temp != Interval.EndMarker) {
-            assert temp.spillDefinitionPos() > 0 : "invalid spill definition pos";
-            if (prev != null) {
-                assert temp.from() >= prev.from() : "intervals not sorted";
-                assert temp.spillDefinitionPos() >= prev.spillDefinitionPos() : "when intervals are sorted by from :  then they must also be sorted by spillDefinitionPos";
-            }
-
-            assert temp.spillSlot() != null || temp.canMaterialize() : "interval has no spill slot assigned";
-            assert temp.spillDefinitionPos() >= temp.from() : "invalid order";
-            assert temp.spillDefinitionPos() <= temp.from() + 2 : "only intervals defined once at their start-pos can be optimized";
-
-            if (Debug.isLogEnabled()) {
-                Debug.log("interval %d (from %d to %d) must be stored at %d", temp.operandNumber, temp.from(), temp.to(), temp.spillDefinitionPos());
-            }
-
-            prev = temp;
-            temp = temp.next;
-        }
-    }
-
-}
--- a/graal/com.oracle.graal.lir/src/com/oracle/graal/lir/alloc/lsra/LifetimeAnalysis.java	Tue May 12 14:04:40 2015 +0200
+++ /dev/null	Thu Jan 01 00:00:00 1970 +0000
@@ -1,829 +0,0 @@
-/*
- * Copyright (c) 2015, 2015, Oracle and/or its affiliates. All rights reserved.
- * DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER.
- *
- * This code is free software; you can redistribute it and/or modify it
- * under the terms of the GNU General Public License version 2 only, as
- * published by the Free Software Foundation.
- *
- * This code is distributed in the hope that it will be useful, but WITHOUT
- * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
- * FITNESS FOR A PARTICULAR PURPOSE.  See the GNU General Public License
- * version 2 for more details (a copy is included in the LICENSE file that
- * accompanied this code).
- *
- * You should have received a copy of the GNU General Public License version
- * 2 along with this work; if not, write to the Free Software Foundation,
- * Inc., 51 Franklin St, Fifth Floor, Boston, MA 02110-1301 USA.
- *
- * Please contact Oracle, 500 Oracle Parkway, Redwood Shores, CA 94065 USA
- * or visit www.oracle.com if you need additional information or have any
- * questions.
- */
-package com.oracle.graal.lir.alloc.lsra;
-
-import static com.oracle.graal.api.code.ValueUtil.*;
-import static com.oracle.graal.compiler.common.GraalOptions.*;
-import static com.oracle.graal.lir.LIRValueUtil.*;
-import static com.oracle.graal.lir.debug.LIRGenerationDebugContext.*;
-
-import java.util.*;
-
-import com.oracle.graal.api.code.*;
-import com.oracle.graal.api.meta.*;
-import com.oracle.graal.compiler.common.*;
-import com.oracle.graal.compiler.common.alloc.*;
-import com.oracle.graal.compiler.common.cfg.*;
-import com.oracle.graal.compiler.common.util.*;
-import com.oracle.graal.debug.*;
-import com.oracle.graal.debug.Debug.Scope;
-import com.oracle.graal.lir.*;
-import com.oracle.graal.lir.LIRInstruction.OperandFlag;
-import com.oracle.graal.lir.LIRInstruction.OperandMode;
-import com.oracle.graal.lir.StandardOp.LabelOp;
-import com.oracle.graal.lir.StandardOp.MoveOp;
-import com.oracle.graal.lir.alloc.lsra.Interval.RegisterPriority;
-import com.oracle.graal.lir.alloc.lsra.Interval.SpillState;
-import com.oracle.graal.lir.alloc.lsra.LinearScan.BlockData;
-import com.oracle.graal.lir.gen.*;
-import com.oracle.graal.lir.gen.LIRGeneratorTool.SpillMoveFactory;
-import com.oracle.graal.lir.phases.*;
-
-class LifetimeAnalysis extends AllocationPhase {
-
-    protected final LinearScan allocator;
-
-    /**
-     * @param linearScan
-     */
-    LifetimeAnalysis(LinearScan linearScan) {
-        allocator = linearScan;
-    }
-
-    @Override
-    protected <B extends AbstractBlockBase<B>> void run(TargetDescription target, LIRGenerationResult lirGenRes, List<B> codeEmittingOrder, List<B> linearScanOrder, SpillMoveFactory spillMoveFactory) {
-        numberInstructions();
-        allocator.printLir("Before register allocation", true);
-        computeLocalLiveSets();
-        computeGlobalLiveSets();
-        buildIntervals();
-    }
-
-    /**
-     * Bit set for each variable that is contained in each loop.
-     */
-    BitMap2D intervalInLoop;
-
-    boolean isIntervalInLoop(int interval, int loop) {
-        return intervalInLoop.at(interval, loop);
-    }
-
-    /**
-     * Numbers all instructions in all blocks. The numbering follows the
-     * {@linkplain ComputeBlockOrder linear scan order}.
-     */
-    void numberInstructions() {
-
-        allocator.initIntervals();
-
-        ValueConsumer setVariableConsumer = (value, mode, flags) -> {
-            if (isVariable(value)) {
-                allocator.getOrCreateInterval(asVariable(value));
-            }
-        };
-
-        // Assign IDs to LIR nodes and build a mapping, lirOps, from ID to LIRInstruction node.
-        int numInstructions = 0;
-        for (AbstractBlockBase<?> block : allocator.sortedBlocks) {
-            numInstructions += allocator.ir.getLIRforBlock(block).size();
-        }
-
-        // initialize with correct length
-        allocator.initOpIdMaps(numInstructions);
-
-        int opId = 0;
-        int index = 0;
-        for (AbstractBlockBase<?> block : allocator.sortedBlocks) {
-            allocator.initBlockData(block);
-
-            List<LIRInstruction> instructions = allocator.ir.getLIRforBlock(block);
-
-            int numInst = instructions.size();
-            for (int j = 0; j < numInst; j++) {
-                LIRInstruction op = instructions.get(j);
-                op.setId(opId);
-
-                allocator.putOpIdMaps(index, op, block);
-                assert allocator.instructionForId(opId) == op : "must match";
-
-                op.visitEachTemp(setVariableConsumer);
-                op.visitEachOutput(setVariableConsumer);
-
-                index++;
-                opId += 2; // numbering of lirOps by two
-            }
-        }
-        assert index == numInstructions : "must match";
-        assert (index << 1) == opId : "must match: " + (index << 1);
-    }
-
-    /**
-     * Computes local live sets (i.e. {@link BlockData#liveGen} and {@link BlockData#liveKill})
-     * separately for each block.
-     */
-    void computeLocalLiveSets() {
-        int liveSize = allocator.liveSetSize();
-
-        intervalInLoop = new BitMap2D(allocator.operandSize(), allocator.numLoops());
-
-        // iterate all blocks
-        for (final AbstractBlockBase<?> block : allocator.sortedBlocks) {
-            try (Indent indent = Debug.logAndIndent("compute local live sets for block %s", block)) {
-
-                final BitSet liveGen = new BitSet(liveSize);
-                final BitSet liveKill = new BitSet(liveSize);
-
-                List<LIRInstruction> instructions = allocator.ir.getLIRforBlock(block);
-                int numInst = instructions.size();
-
-                ValueConsumer useConsumer = (operand, mode, flags) -> {
-                    if (isVariable(operand)) {
-                        int operandNum = allocator.operandNumber(operand);
-                        if (!liveKill.get(operandNum)) {
-                            liveGen.set(operandNum);
-                            if (Debug.isLogEnabled()) {
-                                Debug.log("liveGen for operand %d(%s)", operandNum, operand);
-                            }
-                        }
-                        if (block.getLoop() != null) {
-                            intervalInLoop.setBit(operandNum, block.getLoop().getIndex());
-                        }
-                    }
-
-                    if (DetailedAsserts.getValue()) {
-                        verifyInput(block, liveKill, operand);
-                    }
-                };
-                ValueConsumer stateConsumer = (operand, mode, flags) -> {
-                    if (LinearScan.isVariableOrRegister(operand)) {
-                        int operandNum = allocator.operandNumber(operand);
-                        if (!liveKill.get(operandNum)) {
-                            liveGen.set(operandNum);
-                            if (Debug.isLogEnabled()) {
-                                Debug.log("liveGen in state for operand %d(%s)", operandNum, operand);
-                            }
-                        }
-                    }
-                };
-                ValueConsumer defConsumer = (operand, mode, flags) -> {
-                    if (isVariable(operand)) {
-                        int varNum = allocator.operandNumber(operand);
-                        liveKill.set(varNum);
-                        if (Debug.isLogEnabled()) {
-                            Debug.log("liveKill for operand %d(%s)", varNum, operand);
-                        }
-                        if (block.getLoop() != null) {
-                            intervalInLoop.setBit(varNum, block.getLoop().getIndex());
-                        }
-                    }
-
-                    if (DetailedAsserts.getValue()) {
-                        /*
-                         * Fixed intervals are never live at block boundaries, so they need not be
-                         * processed in live sets. Process them only in debug mode so that this can
-                         * be checked
-                         */
-                        verifyTemp(liveKill, operand);
-                    }
-                };
-
-                // iterate all instructions of the block
-                for (int j = 0; j < numInst; j++) {
-                    final LIRInstruction op = instructions.get(j);
-
-                    try (Indent indent2 = Debug.logAndIndent("handle op %d", op.id())) {
-                        op.visitEachInput(useConsumer);
-                        op.visitEachAlive(useConsumer);
-                        /*
-                         * Add uses of live locals from interpreter's point of view for proper debug
-                         * information generation.
-                         */
-                        op.visitEachState(stateConsumer);
-                        op.visitEachTemp(defConsumer);
-                        op.visitEachOutput(defConsumer);
-                    }
-                } // end of instruction iteration
-
-                BlockData blockSets = allocator.getBlockData(block);
-                blockSets.liveGen = liveGen;
-                blockSets.liveKill = liveKill;
-                blockSets.liveIn = new BitSet(liveSize);
-                blockSets.liveOut = new BitSet(liveSize);
-
-                if (Debug.isLogEnabled()) {
-                    Debug.log("liveGen  B%d %s", block.getId(), blockSets.liveGen);
-                    Debug.log("liveKill B%d %s", block.getId(), blockSets.liveKill);
-                }
-
-            }
-        } // end of block iteration
-    }
-
-    private void verifyTemp(BitSet liveKill, Value operand) {
-        /*
-         * Fixed intervals are never live at block boundaries, so they need not be processed in live
-         * sets. Process them only in debug mode so that this can be checked
-         */
-        if (isRegister(operand)) {
-            if (allocator.isProcessed(operand)) {
-                liveKill.set(allocator.operandNumber(operand));
-            }
-        }
-    }
-
-    private void verifyInput(AbstractBlockBase<?> block, BitSet liveKill, Value operand) {
-        /*
-         * Fixed intervals are never live at block boundaries, so they need not be processed in live
-         * sets. This is checked by these assertions to be sure about it. The entry block may have
-         * incoming values in registers, which is ok.
-         */
-        if (isRegister(operand) && block != allocator.ir.getControlFlowGraph().getStartBlock()) {
-            if (allocator.isProcessed(operand)) {
-                assert liveKill.get(allocator.operandNumber(operand)) : "using fixed register that is not defined in this block";
-            }
-        }
-    }
-
-    /**
-     * Performs a backward dataflow analysis to compute global live sets (i.e.
-     * {@link BlockData#liveIn} and {@link BlockData#liveOut}) for each block.
-     */
-    void computeGlobalLiveSets() {
-        try (Indent indent = Debug.logAndIndent("compute global live sets")) {
-            int numBlocks = allocator.blockCount();
-            boolean changeOccurred;
-            boolean changeOccurredInBlock;
-            int iterationCount = 0;
-            BitSet liveOut = new BitSet(allocator.liveSetSize()); // scratch set for calculations
-
-            /*
-             * Perform a backward dataflow analysis to compute liveOut and liveIn for each block.
-             * The loop is executed until a fixpoint is reached (no changes in an iteration).
-             */
-            do {
-                changeOccurred = false;
-
-                try (Indent indent2 = Debug.logAndIndent("new iteration %d", iterationCount)) {
-
-                    // iterate all blocks in reverse order
-                    for (int i = numBlocks - 1; i >= 0; i--) {
-                        AbstractBlockBase<?> block = allocator.blockAt(i);
-                        BlockData blockSets = allocator.getBlockData(block);
-
-                        changeOccurredInBlock = false;
-
-                        /* liveOut(block) is the union of liveIn(sux), for successors sux of block. */
-                        int n = block.getSuccessorCount();
-                        if (n > 0) {
-                            liveOut.clear();
-                            // block has successors
-                            if (n > 0) {
-                                for (AbstractBlockBase<?> successor : block.getSuccessors()) {
-                                    liveOut.or(allocator.getBlockData(successor).liveIn);
-                                }
-                            }
-
-                            if (!blockSets.liveOut.equals(liveOut)) {
-                                /*
-                                 * A change occurred. Swap the old and new live out sets to avoid
-                                 * copying.
-                                 */
-                                BitSet temp = blockSets.liveOut;
-                                blockSets.liveOut = liveOut;
-                                liveOut = temp;
-
-                                changeOccurred = true;
-                                changeOccurredInBlock = true;
-                            }
-                        }
-
-                        if (iterationCount == 0 || changeOccurredInBlock) {
-                            /*
-                             * liveIn(block) is the union of liveGen(block) with (liveOut(block) &
-                             * !liveKill(block)).
-                             * 
-                             * Note: liveIn has to be computed only in first iteration or if liveOut
-                             * has changed!
-                             */
-                            BitSet liveIn = blockSets.liveIn;
-                            liveIn.clear();
-                            liveIn.or(blockSets.liveOut);
-                            liveIn.andNot(blockSets.liveKill);
-                            liveIn.or(blockSets.liveGen);
-
-                            if (Debug.isLogEnabled()) {
-                                Debug.log("block %d: livein = %s,  liveout = %s", block.getId(), liveIn, blockSets.liveOut);
-                            }
-                        }
-                    }
-                    iterationCount++;
-
-                    if (changeOccurred && iterationCount > 50) {
-                        throw new BailoutException("too many iterations in computeGlobalLiveSets");
-                    }
-                }
-            } while (changeOccurred);
-
-            if (DetailedAsserts.getValue()) {
-                verifyLiveness();
-            }
-
-            // check that the liveIn set of the first block is empty
-            AbstractBlockBase<?> startBlock = allocator.ir.getControlFlowGraph().getStartBlock();
-            if (allocator.getBlockData(startBlock).liveIn.cardinality() != 0) {
-                if (DetailedAsserts.getValue()) {
-                    reportFailure(numBlocks);
-                }
-                // bailout if this occurs in product mode.
-                throw new GraalInternalError("liveIn set of first block must be empty: " + allocator.getBlockData(startBlock).liveIn);
-            }
-        }
-    }
-
-    private void reportFailure(int numBlocks) {
-        try (Scope s = Debug.forceLog()) {
-            try (Indent indent = Debug.logAndIndent("report failure")) {
-
-                BitSet startBlockLiveIn = allocator.getBlockData(allocator.ir.getControlFlowGraph().getStartBlock()).liveIn;
-                try (Indent indent2 = Debug.logAndIndent("Error: liveIn set of first block must be empty (when this fails, variables are used before they are defined):")) {
-                    for (int operandNum = startBlockLiveIn.nextSetBit(0); operandNum >= 0; operandNum = startBlockLiveIn.nextSetBit(operandNum + 1)) {
-                        Interval interval = allocator.intervalFor(operandNum);
-                        if (interval != null) {
-                            Value operand = interval.operand;
-                            Debug.log("var %d; operand=%s; node=%s", operandNum, operand, getSourceForOperandFromDebugContext(operand));
-                        } else {
-                            Debug.log("var %d; missing operand", operandNum);
-                        }
-                    }
-                }
-
-                // print some additional information to simplify debugging
-                for (int operandNum = startBlockLiveIn.nextSetBit(0); operandNum >= 0; operandNum = startBlockLiveIn.nextSetBit(operandNum + 1)) {
-                    Interval interval = allocator.intervalFor(operandNum);
-                    Value operand = null;
-                    Object valueForOperandFromDebugContext = null;
-                    if (interval != null) {
-                        operand = interval.operand;
-                        valueForOperandFromDebugContext = getSourceForOperandFromDebugContext(operand);
-                    }
-                    try (Indent indent2 = Debug.logAndIndent("---- Detailed information for var %d; operand=%s; node=%s ----", operandNum, operand, valueForOperandFromDebugContext)) {
-
-                        Deque<AbstractBlockBase<?>> definedIn = new ArrayDeque<>();
-                        HashSet<AbstractBlockBase<?>> usedIn = new HashSet<>();
-                        for (AbstractBlockBase<?> block : allocator.sortedBlocks) {
-                            if (allocator.getBlockData(block).liveGen.get(operandNum)) {
-                                usedIn.add(block);
-                                try (Indent indent3 = Debug.logAndIndent("used in block B%d", block.getId())) {
-                                    for (LIRInstruction ins : allocator.ir.getLIRforBlock(block)) {
-                                        try (Indent indent4 = Debug.logAndIndent("%d: %s", ins.id(), ins)) {
-                                            ins.forEachState((liveStateOperand, mode, flags) -> {
-                                                Debug.log("operand=%s", liveStateOperand);
-                                                return liveStateOperand;
-                                            });
-                                        }
-                                    }
-                                }
-                            }
-                            if (allocator.getBlockData(block).liveKill.get(operandNum)) {
-                                definedIn.add(block);
-                                try (Indent indent3 = Debug.logAndIndent("defined in block B%d", block.getId())) {
-                                    for (LIRInstruction ins : allocator.ir.getLIRforBlock(block)) {
-                                        Debug.log("%d: %s", ins.id(), ins);
-                                    }
-                                }
-                            }
-                        }
-
-                        int[] hitCount = new int[numBlocks];
-
-                        while (!definedIn.isEmpty()) {
-                            AbstractBlockBase<?> block = definedIn.removeFirst();
-                            usedIn.remove(block);
-                            for (AbstractBlockBase<?> successor : block.getSuccessors()) {
-                                if (successor.isLoopHeader()) {
-                                    if (!block.isLoopEnd()) {
-                                        definedIn.add(successor);
-                                    }
-                                } else {
-                                    if (++hitCount[successor.getId()] == successor.getPredecessorCount()) {
-                                        definedIn.add(successor);
-                                    }
-                                }
-                            }
-                        }
-                        try (Indent indent3 = Debug.logAndIndent("**** offending usages are in: ")) {
-                            for (AbstractBlockBase<?> block : usedIn) {
-                                Debug.log("B%d", block.getId());
-                            }
-                        }
-                    }
-                }
-            }
-        } catch (Throwable e) {
-            throw Debug.handle(e);
-        }
-    }
-
-    private void verifyLiveness() {
-        /*
-         * Check that fixed intervals are not live at block boundaries (live set must be empty at
-         * fixed intervals).
-         */
-        for (AbstractBlockBase<?> block : allocator.sortedBlocks) {
-            for (int j = 0; j <= allocator.maxRegisterNumber(); j++) {
-                assert !allocator.getBlockData(block).liveIn.get(j) : "liveIn  set of fixed register must be empty";
-                assert !allocator.getBlockData(block).liveOut.get(j) : "liveOut set of fixed register must be empty";
-                assert !allocator.getBlockData(block).liveGen.get(j) : "liveGen set of fixed register must be empty";
-            }
-        }
-    }
-
-    void addUse(AllocatableValue operand, int from, int to, RegisterPriority registerPriority, LIRKind kind) {
-        if (!allocator.isProcessed(operand)) {
-            return;
-        }
-
-        Interval interval = allocator.getOrCreateInterval(operand);
-        if (!kind.equals(LIRKind.Illegal)) {
-            interval.setKind(kind);
-        }
-
-        interval.addRange(from, to);
-
-        // Register use position at even instruction id.
-        interval.addUsePos(to & ~1, registerPriority);
-
-        if (Debug.isLogEnabled()) {
-            Debug.log("add use: %s, from %d to %d (%s)", interval, from, to, registerPriority.name());
-        }
-    }
-
-    void addTemp(AllocatableValue operand, int tempPos, RegisterPriority registerPriority, LIRKind kind) {
-        if (!allocator.isProcessed(operand)) {
-            return;
-        }
-
-        Interval interval = allocator.getOrCreateInterval(operand);
-        if (!kind.equals(LIRKind.Illegal)) {
-            interval.setKind(kind);
-        }
-
-        interval.addRange(tempPos, tempPos + 1);
-        interval.addUsePos(tempPos, registerPriority);
-        interval.addMaterializationValue(null);
-
-        if (Debug.isLogEnabled()) {
-            Debug.log("add temp: %s tempPos %d (%s)", interval, tempPos, RegisterPriority.MustHaveRegister.name());
-        }
-    }
-
-    void addDef(AllocatableValue operand, LIRInstruction op, RegisterPriority registerPriority, LIRKind kind) {
-        if (!allocator.isProcessed(operand)) {
-            return;
-        }
-        int defPos = op.id();
-
-        Interval interval = allocator.getOrCreateInterval(operand);
-        if (!kind.equals(LIRKind.Illegal)) {
-            interval.setKind(kind);
-        }
-
-        Range r = interval.first();
-        if (r.from <= defPos) {
-            /*
-             * Update the starting point (when a range is first created for a use, its start is the
-             * beginning of the current block until a def is encountered).
-             */
-            r.from = defPos;
-            interval.addUsePos(defPos, registerPriority);
-
-        } else {
-            /*
-             * Dead value - make vacuous interval also add register priority for dead intervals
-             */
-            interval.addRange(defPos, defPos + 1);
-            interval.addUsePos(defPos, registerPriority);
-            if (Debug.isLogEnabled()) {
-                Debug.log("Warning: def of operand %s at %d occurs without use", operand, defPos);
-            }
-        }
-
-        changeSpillDefinitionPos(interval, defPos);
-        if (registerPriority == RegisterPriority.None && interval.spillState().ordinal() <= SpillState.StartInMemory.ordinal() && isStackSlot(operand)) {
-            // detection of method-parameters and roundfp-results
-            interval.setSpillState(SpillState.StartInMemory);
-        }
-        interval.addMaterializationValue(getMaterializedValue(op, operand, interval));
-
-        if (Debug.isLogEnabled()) {
-            Debug.log("add def: %s defPos %d (%s)", interval, defPos, registerPriority.name());
-        }
-    }
-
-    /**
-     * Optimizes moves related to incoming stack based arguments. The interval for the destination
-     * of such moves is assigned the stack slot (which is in the caller's frame) as its spill slot.
-     */
-    void handleMethodArguments(LIRInstruction op) {
-        if (op instanceof MoveOp) {
-            MoveOp move = (MoveOp) op;
-            if (optimizeMethodArgument(move.getInput())) {
-                StackSlot slot = asStackSlot(move.getInput());
-                if (DetailedAsserts.getValue()) {
-                    assert op.id() > 0 : "invalid id";
-                    assert allocator.blockForId(op.id()).getPredecessorCount() == 0 : "move from stack must be in first block";
-                    assert isVariable(move.getResult()) : "result of move must be a variable";
-
-                    if (Debug.isLogEnabled()) {
-                        Debug.log("found move from stack slot %s to %s", slot, move.getResult());
-                    }
-                }
-
-                Interval interval = allocator.intervalFor(move.getResult());
-                interval.setSpillSlot(slot);
-                interval.assignLocation(slot);
-            }
-        }
-    }
-
-    void addRegisterHint(final LIRInstruction op, final Value targetValue, OperandMode mode, EnumSet<OperandFlag> flags, final boolean hintAtDef) {
-        if (flags.contains(OperandFlag.HINT) && LinearScan.isVariableOrRegister(targetValue)) {
-
-            op.forEachRegisterHint(targetValue, mode, (registerHint, valueMode, valueFlags) -> {
-                if (LinearScan.isVariableOrRegister(registerHint)) {
-                    Interval from = allocator.getOrCreateInterval((AllocatableValue) registerHint);
-                    Interval to = allocator.getOrCreateInterval((AllocatableValue) targetValue);
-
-                    /* hints always point from def to use */
-                    if (hintAtDef) {
-                        to.setLocationHint(from);
-                    } else {
-                        from.setLocationHint(to);
-                    }
-                    if (Debug.isLogEnabled()) {
-                        Debug.log("operation at opId %d: added hint from interval %d to %d", op.id(), from.operandNumber, to.operandNumber);
-                    }
-
-                    return registerHint;
-                }
-                return null;
-            });
-        }
-    }
-
-    /**
-     * Eliminates moves from register to stack if the stack slot is known to be correct.
-     */
-    void changeSpillDefinitionPos(Interval interval, int defPos) {
-        assert interval.isSplitParent() : "can only be called for split parents";
-
-        switch (interval.spillState()) {
-            case NoDefinitionFound:
-                assert interval.spillDefinitionPos() == -1 : "must no be set before";
-                interval.setSpillDefinitionPos(defPos);
-                interval.setSpillState(SpillState.NoSpillStore);
-                break;
-
-            case NoSpillStore:
-                assert defPos <= interval.spillDefinitionPos() : "positions are processed in reverse order when intervals are created";
-                if (defPos < interval.spillDefinitionPos() - 2) {
-                    // second definition found, so no spill optimization possible for this interval
-                    interval.setSpillState(SpillState.NoOptimization);
-                } else {
-                    // two consecutive definitions (because of two-operand LIR form)
-                    assert allocator.blockForId(defPos) == allocator.blockForId(interval.spillDefinitionPos()) : "block must be equal";
-                }
-                break;
-
-            case NoOptimization:
-                // nothing to do
-                break;
-
-            default:
-                throw new BailoutException("other states not allowed at this time");
-        }
-    }
-
-    private static boolean optimizeMethodArgument(Value value) {
-        /*
-         * Object method arguments that are passed on the stack are currently not optimized because
-         * this requires that the runtime visits method arguments during stack walking.
-         */
-        return isStackSlot(value) && asStackSlot(value).isInCallerFrame() && value.getKind() != Kind.Object;
-    }
-
-    /**
-     * Determines the register priority for an instruction's output/result operand.
-     */
-    private static RegisterPriority registerPriorityOfOutputOperand(LIRInstruction op) {
-        if (op instanceof MoveOp) {
-            MoveOp move = (MoveOp) op;
-            if (optimizeMethodArgument(move.getInput())) {
-                return RegisterPriority.None;
-            }
-        } else if (op instanceof LabelOp) {
-            LabelOp label = (LabelOp) op;
-            if (label.isPhiIn()) {
-                return RegisterPriority.None;
-            }
-        }
-
-        // all other operands require a register
-        return RegisterPriority.MustHaveRegister;
-    }
-
-    /**
-     * Determines the priority which with an instruction's input operand will be allocated a
-     * register.
-     */
-    private static RegisterPriority registerPriorityOfInputOperand(EnumSet<OperandFlag> flags) {
-        if (flags.contains(OperandFlag.STACK)) {
-            return RegisterPriority.ShouldHaveRegister;
-        }
-        // all other operands require a register
-        return RegisterPriority.MustHaveRegister;
-    }
-
-    void buildIntervals() {
-
-        try (Indent indent = Debug.logAndIndent("build intervals")) {
-            InstructionValueConsumer outputConsumer = (op, operand, mode, flags) -> {
-                if (LinearScan.isVariableOrRegister(operand)) {
-                    addDef((AllocatableValue) operand, op, registerPriorityOfOutputOperand(op), operand.getLIRKind());
-                    addRegisterHint(op, operand, mode, flags, true);
-                }
-            };
-
-            InstructionValueConsumer tempConsumer = (op, operand, mode, flags) -> {
-                if (LinearScan.isVariableOrRegister(operand)) {
-                    addTemp((AllocatableValue) operand, op.id(), RegisterPriority.MustHaveRegister, operand.getLIRKind());
-                    addRegisterHint(op, operand, mode, flags, false);
-                }
-            };
-
-            InstructionValueConsumer aliveConsumer = (op, operand, mode, flags) -> {
-                if (LinearScan.isVariableOrRegister(operand)) {
-                    RegisterPriority p = registerPriorityOfInputOperand(flags);
-                    int opId = op.id();
-                    int blockFrom = allocator.getFirstLirInstructionId((allocator.blockForId(opId)));
-                    addUse((AllocatableValue) operand, blockFrom, opId + 1, p, operand.getLIRKind());
-                    addRegisterHint(op, operand, mode, flags, false);
-                }
-            };
-
-            InstructionValueConsumer inputConsumer = (op, operand, mode, flags) -> {
-                if (LinearScan.isVariableOrRegister(operand)) {
-                    int opId = op.id();
-                    int blockFrom = allocator.getFirstLirInstructionId((allocator.blockForId(opId)));
-                    RegisterPriority p = registerPriorityOfInputOperand(flags);
-                    addUse((AllocatableValue) operand, blockFrom, opId, p, operand.getLIRKind());
-                    addRegisterHint(op, operand, mode, flags, false);
-                }
-            };
-
-            InstructionValueConsumer stateProc = (op, operand, mode, flags) -> {
-                if (LinearScan.isVariableOrRegister(operand)) {
-                    int opId = op.id();
-                    int blockFrom = allocator.getFirstLirInstructionId((allocator.blockForId(opId)));
-                    addUse((AllocatableValue) operand, blockFrom, opId + 1, RegisterPriority.None, operand.getLIRKind());
-                }
-            };
-
-            // create a list with all caller-save registers (cpu, fpu, xmm)
-            Register[] callerSaveRegs = allocator.regAllocConfig.getRegisterConfig().getCallerSaveRegisters();
-
-            // iterate all blocks in reverse order
-            for (int i = allocator.blockCount() - 1; i >= 0; i--) {
-
-                AbstractBlockBase<?> block = allocator.blockAt(i);
-                try (Indent indent2 = Debug.logAndIndent("handle block %d", block.getId())) {
-
-                    List<LIRInstruction> instructions = allocator.ir.getLIRforBlock(block);
-                    final int blockFrom = allocator.getFirstLirInstructionId(block);
-                    int blockTo = allocator.getLastLirInstructionId(block);
-
-                    assert blockFrom == instructions.get(0).id();
-                    assert blockTo == instructions.get(instructions.size() - 1).id();
-
-                    // Update intervals for operands live at the end of this block;
-                    BitSet live = allocator.getBlockData(block).liveOut;
-                    for (int operandNum = live.nextSetBit(0); operandNum >= 0; operandNum = live.nextSetBit(operandNum + 1)) {
-                        assert live.get(operandNum) : "should not stop here otherwise";
-                        AllocatableValue operand = allocator.intervalFor(operandNum).operand;
-                        if (Debug.isLogEnabled()) {
-                            Debug.log("live in %d: %s", operandNum, operand);
-                        }
-
-                        addUse(operand, blockFrom, blockTo + 2, RegisterPriority.None, LIRKind.Illegal);
-
-                        /*
-                         * Add special use positions for loop-end blocks when the interval is used
-                         * anywhere inside this loop. It's possible that the block was part of a
-                         * non-natural loop, so it might have an invalid loop index.
-                         */
-                        if (block.isLoopEnd() && block.getLoop() != null && isIntervalInLoop(operandNum, block.getLoop().getIndex())) {
-                            allocator.intervalFor(operandNum).addUsePos(blockTo + 1, RegisterPriority.LiveAtLoopEnd);
-                        }
-                    }
-
-                    /*
-                     * Iterate all instructions of the block in reverse order. definitions of
-                     * intervals are processed before uses.
-                     */
-                    for (int j = instructions.size() - 1; j >= 0; j--) {
-                        final LIRInstruction op = instructions.get(j);
-                        final int opId = op.id();
-
-                        try (Indent indent3 = Debug.logAndIndent("handle inst %d: %s", opId, op)) {
-
-                            // add a temp range for each register if operation destroys
-                            // caller-save registers
-                            if (op.destroysCallerSavedRegisters()) {
-                                for (Register r : callerSaveRegs) {
-                                    if (allocator.attributes(r).isAllocatable()) {
-                                        addTemp(r.asValue(), opId, RegisterPriority.None, LIRKind.Illegal);
-                                    }
-                                }
-                                if (Debug.isLogEnabled()) {
-                                    Debug.log("operation destroys all caller-save registers");
-                                }
-                            }
-
-                            op.visitEachOutput(outputConsumer);
-                            op.visitEachTemp(tempConsumer);
-                            op.visitEachAlive(aliveConsumer);
-                            op.visitEachInput(inputConsumer);
-
-                            /*
-                             * Add uses of live locals from interpreter's point of view for proper
-                             * debug information generation. Treat these operands as temp values (if
-                             * the live range is extended to a call site, the value would be in a
-                             * register at the call otherwise).
-                             */
-                            op.visitEachState(stateProc);
-
-                            // special steps for some instructions (especially moves)
-                            handleMethodArguments(op);
-
-                        }
-
-                    } // end of instruction iteration
-                }
-            } // end of block iteration
-
-            /*
-             * Add the range [0, 1] to all fixed intervals. the register allocator need not handle
-             * unhandled fixed intervals.
-             */
-            for (Interval interval : allocator.intervals()) {
-                if (interval != null && isRegister(interval.operand)) {
-                    interval.addRange(0, 1);
-                }
-            }
-        }
-    }
-
-    /**
-     * Returns a value for a interval definition, which can be used for re-materialization.
-     *
-     * @param op An instruction which defines a value
-     * @param operand The destination operand of the instruction
-     * @param interval The interval for this defined value.
-     * @return Returns the value which is moved to the instruction and which can be reused at all
-     *         reload-locations in case the interval of this instruction is spilled. Currently this
-     *         can only be a {@link JavaConstant}.
-     */
-    static JavaConstant getMaterializedValue(LIRInstruction op, Value operand, Interval interval) {
-        if (op instanceof MoveOp) {
-            MoveOp move = (MoveOp) op;
-            if (move.getInput() instanceof JavaConstant) {
-                /*
-                 * Check if the interval has any uses which would accept an stack location (priority
-                 * == ShouldHaveRegister). Rematerialization of such intervals can result in a
-                 * degradation, because rematerialization always inserts a constant load, even if
-                 * the value is not needed in a register.
-                 */
-                Interval.UsePosList usePosList = interval.usePosList();
-                int numUsePos = usePosList.size();
-                for (int useIdx = 0; useIdx < numUsePos; useIdx++) {
-                    Interval.RegisterPriority priority = usePosList.registerPriority(useIdx);
-                    if (priority == Interval.RegisterPriority.ShouldHaveRegister) {
-                        return null;
-                    }
-                }
-                return (JavaConstant) move.getInput();
-            }
-        }
-        return null;
-    }
-}
--- a/graal/com.oracle.graal.lir/src/com/oracle/graal/lir/alloc/lsra/LinearScan.java	Tue May 12 14:04:40 2015 +0200
+++ b/graal/com.oracle.graal.lir/src/com/oracle/graal/lir/alloc/lsra/LinearScan.java	Tue May 12 14:19:57 2015 +0200
@@ -654,28 +654,28 @@
         }
     }
 
-    protected LifetimeAnalysis createLifetimeAnalysisPhase() {
-        return new LifetimeAnalysis(this);
+    protected LinearScanLifetimeAnalysisPhase createLifetimeAnalysisPhase() {
+        return new LinearScanLifetimeAnalysisPhase(this);
     }
 
-    protected RegisterAllocation createRegisterAllocationPhase() {
-        return new RegisterAllocation(this);
+    protected LinearScanRegisterAllocationPhase createRegisterAllocationPhase() {
+        return new LinearScanRegisterAllocationPhase(this);
     }
 
-    protected OptimizeSpillPosition createOptimizeSpillPositionPhase() {
-        return new OptimizeSpillPosition(this);
+    protected LinearScanOptimizeSpillPositionPhase createOptimizeSpillPositionPhase() {
+        return new LinearScanOptimizeSpillPositionPhase(this);
     }
 
-    protected ResolveDataFlow createResolveDataFlowPhase() {
-        return new ResolveDataFlow(this);
+    protected LinearScanResolveDataFlowPhase createResolveDataFlowPhase() {
+        return new LinearScanResolveDataFlowPhase(this);
     }
 
-    protected EliminateSpillMove createSpillMoveEliminationPhase() {
-        return new EliminateSpillMove(this);
+    protected LinearScanEliminateSpillMovePhase createSpillMoveEliminationPhase() {
+        return new LinearScanEliminateSpillMovePhase(this);
     }
 
-    protected AssignLocations createAssignLocationsPhase() {
-        return new AssignLocations(this);
+    protected LinearScanAssignLocationsPhase createAssignLocationsPhase() {
+        return new LinearScanAssignLocationsPhase(this);
     }
 
     void printIntervals(String label) {
--- /dev/null	Thu Jan 01 00:00:00 1970 +0000
+++ b/graal/com.oracle.graal.lir/src/com/oracle/graal/lir/alloc/lsra/LinearScanAssignLocationsPhase.java	Tue May 12 14:19:57 2015 +0200
@@ -0,0 +1,212 @@
+/*
+ * Copyright (c) 2015, 2015, Oracle and/or its affiliates. All rights reserved.
+ * DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER.
+ *
+ * This code is free software; you can redistribute it and/or modify it
+ * under the terms of the GNU General Public License version 2 only, as
+ * published by the Free Software Foundation.
+ *
+ * This code is distributed in the hope that it will be useful, but WITHOUT
+ * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
+ * FITNESS FOR A PARTICULAR PURPOSE.  See the GNU General Public License
+ * version 2 for more details (a copy is included in the LICENSE file that
+ * accompanied this code).
+ *
+ * You should have received a copy of the GNU General Public License version
+ * 2 along with this work; if not, write to the Free Software Foundation,
+ * Inc., 51 Franklin St, Fifth Floor, Boston, MA 02110-1301 USA.
+ *
+ * Please contact Oracle, 500 Oracle Parkway, Redwood Shores, CA 94065 USA
+ * or visit www.oracle.com if you need additional information or have any
+ * questions.
+ */
+package com.oracle.graal.lir.alloc.lsra;
+
+import static com.oracle.graal.api.code.ValueUtil.*;
+import static com.oracle.graal.compiler.common.GraalOptions.*;
+import static com.oracle.graal.lir.LIRValueUtil.*;
+
+import java.util.*;
+
+import com.oracle.graal.api.code.*;
+import com.oracle.graal.api.meta.*;
+import com.oracle.graal.compiler.common.cfg.*;
+import com.oracle.graal.debug.*;
+import com.oracle.graal.lir.*;
+import com.oracle.graal.lir.LIRInstruction.*;
+import com.oracle.graal.lir.StandardOp.*;
+import com.oracle.graal.lir.gen.*;
+import com.oracle.graal.lir.gen.LIRGeneratorTool.*;
+import com.oracle.graal.lir.phases.*;
+
+/** Phase 7: assign register numbers back to LIR */
+final class LinearScanAssignLocationsPhase extends AllocationPhase {
+
+    private final LinearScan allocator;
+
+    LinearScanAssignLocationsPhase(LinearScan allocator) {
+        this.allocator = allocator;
+    }
+
+    @Override
+    protected <B extends AbstractBlockBase<B>> void run(TargetDescription target, LIRGenerationResult lirGenRes, List<B> codeEmittingOrder, List<B> linearScanOrder, SpillMoveFactory spillMoveFactory) {
+        assignLocations();
+    }
+
+    /**
+     * Assigns the allocated location for an LIR instruction operand back into the instruction.
+     *
+     * @param operand an LIR instruction operand
+     * @param opId the id of the LIR instruction using {@code operand}
+     * @param mode the usage mode for {@code operand} by the instruction
+     * @return the location assigned for the operand
+     */
+    private Value colorLirOperand(Variable operand, int opId, OperandMode mode) {
+        Interval interval = allocator.intervalFor(operand);
+        assert interval != null : "interval must exist";
+
+        if (opId != -1) {
+            if (DetailedAsserts.getValue()) {
+                AbstractBlockBase<?> block = allocator.blockForId(opId);
+                if (block.getSuccessorCount() <= 1 && opId == allocator.getLastLirInstructionId(block)) {
+                    /*
+                     * Check if spill moves could have been appended at the end of this block, but
+                     * before the branch instruction. So the split child information for this branch
+                     * would be incorrect.
+                     */
+                    LIRInstruction instr = allocator.ir.getLIRforBlock(block).get(allocator.ir.getLIRforBlock(block).size() - 1);
+                    if (instr instanceof StandardOp.JumpOp) {
+                        if (allocator.getBlockData(block).liveOut.get(allocator.operandNumber(operand))) {
+                            assert false : String.format(
+                                            "can't get split child for the last branch of a block because the information would be incorrect (moves are inserted before the branch in resolveDataFlow) block=%s, instruction=%s, operand=%s",
+                                            block, instr, operand);
+                        }
+                    }
+                }
+            }
+
+            /*
+             * Operands are not changed when an interval is split during allocation, so search the
+             * right interval here.
+             */
+            interval = allocator.splitChildAtOpId(interval, opId, mode);
+        }
+
+        if (isIllegal(interval.location()) && interval.canMaterialize()) {
+            assert mode != OperandMode.DEF;
+            return interval.getMaterializedValue();
+        }
+        return interval.location();
+    }
+
+    /**
+     * @param op
+     * @param operand
+     * @param valueMode
+     * @param flags
+     * @see InstructionValueProcedure#doValue(LIRInstruction, Value, OperandMode, EnumSet)
+     */
+    private Value debugInfoProcedure(LIRInstruction op, Value operand, OperandMode valueMode, EnumSet<OperandFlag> flags) {
+        if (isVirtualStackSlot(operand)) {
+            return operand;
+        }
+        int tempOpId = op.id();
+        OperandMode mode = OperandMode.USE;
+        AbstractBlockBase<?> block = allocator.blockForId(tempOpId);
+        if (block.getSuccessorCount() == 1 && tempOpId == allocator.getLastLirInstructionId(block)) {
+            /*
+             * Generating debug information for the last instruction of a block. If this instruction
+             * is a branch, spill moves are inserted before this branch and so the wrong operand
+             * would be returned (spill moves at block boundaries are not considered in the live
+             * ranges of intervals).
+             * 
+             * Solution: use the first opId of the branch target block instead.
+             */
+            final LIRInstruction instr = allocator.ir.getLIRforBlock(block).get(allocator.ir.getLIRforBlock(block).size() - 1);
+            if (instr instanceof StandardOp.JumpOp) {
+                if (allocator.getBlockData(block).liveOut.get(allocator.operandNumber(operand))) {
+                    tempOpId = allocator.getFirstLirInstructionId(block.getSuccessors().iterator().next());
+                    mode = OperandMode.DEF;
+                }
+            }
+        }
+
+        /*
+         * Get current location of operand. The operand must be live because debug information is
+         * considered when building the intervals if the interval is not live, colorLirOperand will
+         * cause an assert on failure.
+         */
+        Value result = colorLirOperand((Variable) operand, tempOpId, mode);
+        assert !allocator.hasCall(tempOpId) || isStackSlotValue(result) || isConstant(result) || !allocator.isCallerSave(result) : "cannot have caller-save register operands at calls";
+        return result;
+    }
+
+    private void computeDebugInfo(final LIRInstruction op, LIRFrameState info) {
+        info.forEachState(op, this::debugInfoProcedure);
+    }
+
+    private void assignLocations(List<LIRInstruction> instructions) {
+        int numInst = instructions.size();
+        boolean hasDead = false;
+
+        InstructionValueProcedure assignProc = (op, operand, mode, flags) -> isVariable(operand) ? colorLirOperand((Variable) operand, op.id(), mode) : operand;
+        for (int j = 0; j < numInst; j++) {
+            final LIRInstruction op = instructions.get(j);
+            if (op == null) {
+                /*
+                 * this can happen when spill-moves are removed in eliminateSpillMoves
+                 */
+                hasDead = true;
+                continue;
+            }
+
+            // remove useless moves
+            MoveOp move = null;
+            if (op instanceof MoveOp) {
+                move = (MoveOp) op;
+                AllocatableValue result = move.getResult();
+                if (isVariable(result) && allocator.isMaterialized(result, op.id(), OperandMode.DEF)) {
+                    /*
+                     * This happens if a materializable interval is originally not spilled but then
+                     * kicked out in LinearScanWalker.splitForSpilling(). When kicking out such an
+                     * interval this move operation was already generated.
+                     */
+                    instructions.set(j, null);
+                    hasDead = true;
+                    continue;
+                }
+            }
+
+            op.forEachInput(assignProc);
+            op.forEachAlive(assignProc);
+            op.forEachTemp(assignProc);
+            op.forEachOutput(assignProc);
+
+            // compute reference map and debug information
+            op.forEachState((inst, state) -> computeDebugInfo(inst, state));
+
+            // remove useless moves
+            if (move != null) {
+                if (move.getInput().equals(move.getResult())) {
+                    instructions.set(j, null);
+                    hasDead = true;
+                }
+            }
+        }
+
+        if (hasDead) {
+            // Remove null values from the list.
+            instructions.removeAll(Collections.singleton(null));
+        }
+    }
+
+    private void assignLocations() {
+        try (Indent indent = Debug.logAndIndent("assign locations")) {
+            for (AbstractBlockBase<?> block : allocator.sortedBlocks) {
+                try (Indent indent2 = Debug.logAndIndent("assign locations in block B%d", block.getId())) {
+                    assignLocations(allocator.ir.getLIRforBlock(block));
+                }
+            }
+        }
+    }
+}
--- /dev/null	Thu Jan 01 00:00:00 1970 +0000
+++ b/graal/com.oracle.graal.lir/src/com/oracle/graal/lir/alloc/lsra/LinearScanEliminateSpillMovePhase.java	Tue May 12 14:19:57 2015 +0200
@@ -0,0 +1,211 @@
+/*
+ * Copyright (c) 2015, 2015, Oracle and/or its affiliates. All rights reserved.
+ * DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER.
+ *
+ * This code is free software; you can redistribute it and/or modify it
+ * under the terms of the GNU General Public License version 2 only, as
+ * published by the Free Software Foundation.
+ *
+ * This code is distributed in the hope that it will be useful, but WITHOUT
+ * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
+ * FITNESS FOR A PARTICULAR PURPOSE.  See the GNU General Public License
+ * version 2 for more details (a copy is included in the LICENSE file that
+ * accompanied this code).
+ *
+ * You should have received a copy of the GNU General Public License version
+ * 2 along with this work; if not, write to the Free Software Foundation,
+ * Inc., 51 Franklin St, Fifth Floor, Boston, MA 02110-1301 USA.
+ *
+ * Please contact Oracle, 500 Oracle Parkway, Redwood Shores, CA 94065 USA
+ * or visit www.oracle.com if you need additional information or have any
+ * questions.
+ */
+package com.oracle.graal.lir.alloc.lsra;
+
+import static com.oracle.graal.api.code.ValueUtil.*;
+import static com.oracle.graal.compiler.common.GraalOptions.*;
+import static com.oracle.graal.lir.LIRValueUtil.*;
+
+import java.util.*;
+
+import com.oracle.graal.api.code.*;
+import com.oracle.graal.api.meta.*;
+import com.oracle.graal.compiler.common.cfg.*;
+import com.oracle.graal.debug.*;
+import com.oracle.graal.lir.*;
+import com.oracle.graal.lir.StandardOp.*;
+import com.oracle.graal.lir.alloc.lsra.Interval.*;
+import com.oracle.graal.lir.alloc.lsra.LinearScan.*;
+import com.oracle.graal.lir.gen.*;
+import com.oracle.graal.lir.gen.LIRGeneratorTool.*;
+import com.oracle.graal.lir.phases.*;
+
+class LinearScanEliminateSpillMovePhase extends AllocationPhase {
+
+    private static final IntervalPredicate mustStoreAtDefinition = new LinearScan.IntervalPredicate() {
+
+        @Override
+        public boolean apply(Interval i) {
+            return i.isSplitParent() && i.spillState() == SpillState.StoreAtDefinition;
+        }
+    };
+
+    protected final LinearScan allocator;
+
+    LinearScanEliminateSpillMovePhase(LinearScan allocator) {
+        this.allocator = allocator;
+    }
+
+    @Override
+    protected <B extends AbstractBlockBase<B>> void run(TargetDescription target, LIRGenerationResult lirGenRes, List<B> codeEmittingOrder, List<B> linearScanOrder, SpillMoveFactory spillMoveFactory) {
+        beforeSpillMoveElimination();
+        eliminateSpillMoves();
+    }
+
+    protected void beforeSpillMoveElimination() {
+    }
+
+    /**
+     * @return the index of the first instruction that is of interest for
+     *         {@link #eliminateSpillMoves()}
+     */
+    protected int firstInstructionOfInterest() {
+        // skip the first because it is always a label
+        return 1;
+    }
+
+    // called once before assignment of register numbers
+    void eliminateSpillMoves() {
+        try (Indent indent = Debug.logAndIndent("Eliminating unnecessary spill moves")) {
+
+            /*
+             * collect all intervals that must be stored after their definition. The list is sorted
+             * by Interval.spillDefinitionPos.
+             */
+            Interval interval;
+            interval = allocator.createUnhandledLists(mustStoreAtDefinition, null).first;
+            if (DetailedAsserts.getValue()) {
+                checkIntervals(interval);
+            }
+
+            LIRInsertionBuffer insertionBuffer = new LIRInsertionBuffer();
+            for (AbstractBlockBase<?> block : allocator.sortedBlocks) {
+                try (Indent indent1 = Debug.logAndIndent("Handle %s", block)) {
+                    List<LIRInstruction> instructions = allocator.ir.getLIRforBlock(block);
+                    int numInst = instructions.size();
+
+                    // iterate all instructions of the block.
+                    for (int j = firstInstructionOfInterest(); j < numInst; j++) {
+                        LIRInstruction op = instructions.get(j);
+                        int opId = op.id();
+
+                        if (opId == -1) {
+                            MoveOp move = (MoveOp) op;
+                            /*
+                             * Remove move from register to stack if the stack slot is guaranteed to
+                             * be correct. Only moves that have been inserted by LinearScan can be
+                             * removed.
+                             */
+                            if (canEliminateSpillMove(block, move)) {
+                                /*
+                                 * Move target is a stack slot that is always correct, so eliminate
+                                 * instruction.
+                                 */
+                                if (Debug.isLogEnabled()) {
+                                    Debug.log("eliminating move from interval %d (%s) to %d (%s) in block %s", allocator.operandNumber(move.getInput()), move.getInput(),
+                                                    allocator.operandNumber(move.getResult()), move.getResult(), block);
+                                }
+
+                                // null-instructions are deleted by assignRegNum
+                                instructions.set(j, null);
+                            }
+
+                        } else {
+                            /*
+                             * Insert move from register to stack just after the beginning of the
+                             * interval.
+                             */
+                            assert interval == Interval.EndMarker || interval.spillDefinitionPos() >= opId : "invalid order";
+                            assert interval == Interval.EndMarker || (interval.isSplitParent() && interval.spillState() == SpillState.StoreAtDefinition) : "invalid interval";
+
+                            while (interval != Interval.EndMarker && interval.spillDefinitionPos() == opId) {
+                                if (!interval.canMaterialize()) {
+                                    if (!insertionBuffer.initialized()) {
+                                        /*
+                                         * prepare insertion buffer (appended when all instructions
+                                         * in the block are processed)
+                                         */
+                                        insertionBuffer.init(instructions);
+                                    }
+
+                                    AllocatableValue fromLocation = interval.location();
+                                    AllocatableValue toLocation = LinearScan.canonicalSpillOpr(interval);
+                                    if (!fromLocation.equals(toLocation)) {
+
+                                        assert isRegister(fromLocation) : "from operand must be a register but is: " + fromLocation + " toLocation=" + toLocation + " spillState=" +
+                                                        interval.spillState();
+                                        assert isStackSlotValue(toLocation) : "to operand must be a stack slot";
+
+                                        LIRInstruction move = allocator.getSpillMoveFactory().createMove(toLocation, fromLocation);
+                                        insertionBuffer.append(j + 1, move);
+
+                                        if (Debug.isLogEnabled()) {
+                                            Debug.log("inserting move after definition of interval %d to stack slot %s at opId %d", interval.operandNumber, interval.spillSlot(), opId);
+                                        }
+                                    }
+                                }
+                                interval = interval.next;
+                            }
+                        }
+                    } // end of instruction iteration
+
+                    if (insertionBuffer.initialized()) {
+                        insertionBuffer.finish();
+                    }
+                }
+            } // end of block iteration
+
+            assert interval == Interval.EndMarker : "missed an interval";
+        }
+    }
+
+    /**
+     * @param block The block {@code move} is located in.
+     * @param move Spill move.
+     */
+    protected boolean canEliminateSpillMove(AbstractBlockBase<?> block, MoveOp move) {
+        assert isVariable(move.getResult()) : "LinearScan inserts only moves to variables: " + move;
+
+        Interval curInterval = allocator.intervalFor(move.getResult());
+
+        if (!isRegister(curInterval.location()) && curInterval.alwaysInMemory()) {
+            assert isStackSlotValue(curInterval.location()) : "Not a stack slot: " + curInterval.location();
+            return true;
+        }
+        return false;
+    }
+
+    private static void checkIntervals(Interval interval) {
+        Interval prev = null;
+        Interval temp = interval;
+        while (temp != Interval.EndMarker) {
+            assert temp.spillDefinitionPos() > 0 : "invalid spill definition pos";
+            if (prev != null) {
+                assert temp.from() >= prev.from() : "intervals not sorted";
+                assert temp.spillDefinitionPos() >= prev.spillDefinitionPos() : "when intervals are sorted by from :  then they must also be sorted by spillDefinitionPos";
+            }
+
+            assert temp.spillSlot() != null || temp.canMaterialize() : "interval has no spill slot assigned";
+            assert temp.spillDefinitionPos() >= temp.from() : "invalid order";
+            assert temp.spillDefinitionPos() <= temp.from() + 2 : "only intervals defined once at their start-pos can be optimized";
+
+            if (Debug.isLogEnabled()) {
+                Debug.log("interval %d (from %d to %d) must be stored at %d", temp.operandNumber, temp.from(), temp.to(), temp.spillDefinitionPos());
+            }
+
+            prev = temp;
+            temp = temp.next;
+        }
+    }
+
+}
--- /dev/null	Thu Jan 01 00:00:00 1970 +0000
+++ b/graal/com.oracle.graal.lir/src/com/oracle/graal/lir/alloc/lsra/LinearScanLifetimeAnalysisPhase.java	Tue May 12 14:19:57 2015 +0200
@@ -0,0 +1,829 @@
+/*
+ * Copyright (c) 2015, 2015, Oracle and/or its affiliates. All rights reserved.
+ * DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER.
+ *
+ * This code is free software; you can redistribute it and/or modify it
+ * under the terms of the GNU General Public License version 2 only, as
+ * published by the Free Software Foundation.
+ *
+ * This code is distributed in the hope that it will be useful, but WITHOUT
+ * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
+ * FITNESS FOR A PARTICULAR PURPOSE.  See the GNU General Public License
+ * version 2 for more details (a copy is included in the LICENSE file that
+ * accompanied this code).
+ *
+ * You should have received a copy of the GNU General Public License version
+ * 2 along with this work; if not, write to the Free Software Foundation,
+ * Inc., 51 Franklin St, Fifth Floor, Boston, MA 02110-1301 USA.
+ *
+ * Please contact Oracle, 500 Oracle Parkway, Redwood Shores, CA 94065 USA
+ * or visit www.oracle.com if you need additional information or have any
+ * questions.
+ */
+package com.oracle.graal.lir.alloc.lsra;
+
+import static com.oracle.graal.api.code.ValueUtil.*;
+import static com.oracle.graal.compiler.common.GraalOptions.*;
+import static com.oracle.graal.lir.LIRValueUtil.*;
+import static com.oracle.graal.lir.debug.LIRGenerationDebugContext.*;
+
+import java.util.*;
+
+import com.oracle.graal.api.code.*;
+import com.oracle.graal.api.meta.*;
+import com.oracle.graal.compiler.common.*;
+import com.oracle.graal.compiler.common.alloc.*;
+import com.oracle.graal.compiler.common.cfg.*;
+import com.oracle.graal.compiler.common.util.*;
+import com.oracle.graal.debug.*;
+import com.oracle.graal.debug.Debug.Scope;
+import com.oracle.graal.lir.*;
+import com.oracle.graal.lir.LIRInstruction.OperandFlag;
+import com.oracle.graal.lir.LIRInstruction.OperandMode;
+import com.oracle.graal.lir.StandardOp.LabelOp;
+import com.oracle.graal.lir.StandardOp.MoveOp;
+import com.oracle.graal.lir.alloc.lsra.Interval.RegisterPriority;
+import com.oracle.graal.lir.alloc.lsra.Interval.SpillState;
+import com.oracle.graal.lir.alloc.lsra.LinearScan.BlockData;
+import com.oracle.graal.lir.gen.*;
+import com.oracle.graal.lir.gen.LIRGeneratorTool.SpillMoveFactory;
+import com.oracle.graal.lir.phases.*;
+
+class LinearScanLifetimeAnalysisPhase extends AllocationPhase {
+
+    protected final LinearScan allocator;
+
+    /**
+     * @param linearScan
+     */
+    LinearScanLifetimeAnalysisPhase(LinearScan linearScan) {
+        allocator = linearScan;
+    }
+
+    @Override
+    protected <B extends AbstractBlockBase<B>> void run(TargetDescription target, LIRGenerationResult lirGenRes, List<B> codeEmittingOrder, List<B> linearScanOrder, SpillMoveFactory spillMoveFactory) {
+        numberInstructions();
+        allocator.printLir("Before register allocation", true);
+        computeLocalLiveSets();
+        computeGlobalLiveSets();
+        buildIntervals();
+    }
+
+    /**
+     * Bit set for each variable that is contained in each loop.
+     */
+    BitMap2D intervalInLoop;
+
+    boolean isIntervalInLoop(int interval, int loop) {
+        return intervalInLoop.at(interval, loop);
+    }
+
+    /**
+     * Numbers all instructions in all blocks. The numbering follows the
+     * {@linkplain ComputeBlockOrder linear scan order}.
+     */
+    void numberInstructions() {
+
+        allocator.initIntervals();
+
+        ValueConsumer setVariableConsumer = (value, mode, flags) -> {
+            if (isVariable(value)) {
+                allocator.getOrCreateInterval(asVariable(value));
+            }
+        };
+
+        // Assign IDs to LIR nodes and build a mapping, lirOps, from ID to LIRInstruction node.
+        int numInstructions = 0;
+        for (AbstractBlockBase<?> block : allocator.sortedBlocks) {
+            numInstructions += allocator.ir.getLIRforBlock(block).size();
+        }
+
+        // initialize with correct length
+        allocator.initOpIdMaps(numInstructions);
+
+        int opId = 0;
+        int index = 0;
+        for (AbstractBlockBase<?> block : allocator.sortedBlocks) {
+            allocator.initBlockData(block);
+
+            List<LIRInstruction> instructions = allocator.ir.getLIRforBlock(block);
+
+            int numInst = instructions.size();
+            for (int j = 0; j < numInst; j++) {
+                LIRInstruction op = instructions.get(j);
+                op.setId(opId);
+
+                allocator.putOpIdMaps(index, op, block);
+                assert allocator.instructionForId(opId) == op : "must match";
+
+                op.visitEachTemp(setVariableConsumer);
+                op.visitEachOutput(setVariableConsumer);
+
+                index++;
+                opId += 2; // numbering of lirOps by two
+            }
+        }
+        assert index == numInstructions : "must match";
+        assert (index << 1) == opId : "must match: " + (index << 1);
+    }
+
+    /**
+     * Computes local live sets (i.e. {@link BlockData#liveGen} and {@link BlockData#liveKill})
+     * separately for each block.
+     */
+    void computeLocalLiveSets() {
+        int liveSize = allocator.liveSetSize();
+
+        intervalInLoop = new BitMap2D(allocator.operandSize(), allocator.numLoops());
+
+        // iterate all blocks
+        for (final AbstractBlockBase<?> block : allocator.sortedBlocks) {
+            try (Indent indent = Debug.logAndIndent("compute local live sets for block %s", block)) {
+
+                final BitSet liveGen = new BitSet(liveSize);
+                final BitSet liveKill = new BitSet(liveSize);
+
+                List<LIRInstruction> instructions = allocator.ir.getLIRforBlock(block);
+                int numInst = instructions.size();
+
+                ValueConsumer useConsumer = (operand, mode, flags) -> {
+                    if (isVariable(operand)) {
+                        int operandNum = allocator.operandNumber(operand);
+                        if (!liveKill.get(operandNum)) {
+                            liveGen.set(operandNum);
+                            if (Debug.isLogEnabled()) {
+                                Debug.log("liveGen for operand %d(%s)", operandNum, operand);
+                            }
+                        }
+                        if (block.getLoop() != null) {
+                            intervalInLoop.setBit(operandNum, block.getLoop().getIndex());
+                        }
+                    }
+
+                    if (DetailedAsserts.getValue()) {
+                        verifyInput(block, liveKill, operand);
+                    }
+                };
+                ValueConsumer stateConsumer = (operand, mode, flags) -> {
+                    if (LinearScan.isVariableOrRegister(operand)) {
+                        int operandNum = allocator.operandNumber(operand);
+                        if (!liveKill.get(operandNum)) {
+                            liveGen.set(operandNum);
+                            if (Debug.isLogEnabled()) {
+                                Debug.log("liveGen in state for operand %d(%s)", operandNum, operand);
+                            }
+                        }
+                    }
+                };
+                ValueConsumer defConsumer = (operand, mode, flags) -> {
+                    if (isVariable(operand)) {
+                        int varNum = allocator.operandNumber(operand);
+                        liveKill.set(varNum);
+                        if (Debug.isLogEnabled()) {
+                            Debug.log("liveKill for operand %d(%s)", varNum, operand);
+                        }
+                        if (block.getLoop() != null) {
+                            intervalInLoop.setBit(varNum, block.getLoop().getIndex());
+                        }
+                    }
+
+                    if (DetailedAsserts.getValue()) {
+                        /*
+                         * Fixed intervals are never live at block boundaries, so they need not be
+                         * processed in live sets. Process them only in debug mode so that this can
+                         * be checked
+                         */
+                        verifyTemp(liveKill, operand);
+                    }
+                };
+
+                // iterate all instructions of the block
+                for (int j = 0; j < numInst; j++) {
+                    final LIRInstruction op = instructions.get(j);
+
+                    try (Indent indent2 = Debug.logAndIndent("handle op %d", op.id())) {
+                        op.visitEachInput(useConsumer);
+                        op.visitEachAlive(useConsumer);
+                        /*
+                         * Add uses of live locals from interpreter's point of view for proper debug
+                         * information generation.
+                         */
+                        op.visitEachState(stateConsumer);
+                        op.visitEachTemp(defConsumer);
+                        op.visitEachOutput(defConsumer);
+                    }
+                } // end of instruction iteration
+
+                BlockData blockSets = allocator.getBlockData(block);
+                blockSets.liveGen = liveGen;
+                blockSets.liveKill = liveKill;
+                blockSets.liveIn = new BitSet(liveSize);
+                blockSets.liveOut = new BitSet(liveSize);
+
+                if (Debug.isLogEnabled()) {
+                    Debug.log("liveGen  B%d %s", block.getId(), blockSets.liveGen);
+                    Debug.log("liveKill B%d %s", block.getId(), blockSets.liveKill);
+                }
+
+            }
+        } // end of block iteration
+    }
+
+    private void verifyTemp(BitSet liveKill, Value operand) {
+        /*
+         * Fixed intervals are never live at block boundaries, so they need not be processed in live
+         * sets. Process them only in debug mode so that this can be checked
+         */
+        if (isRegister(operand)) {
+            if (allocator.isProcessed(operand)) {
+                liveKill.set(allocator.operandNumber(operand));
+            }
+        }
+    }
+
+    private void verifyInput(AbstractBlockBase<?> block, BitSet liveKill, Value operand) {
+        /*
+         * Fixed intervals are never live at block boundaries, so they need not be processed in live
+         * sets. This is checked by these assertions to be sure about it. The entry block may have
+         * incoming values in registers, which is ok.
+         */
+        if (isRegister(operand) && block != allocator.ir.getControlFlowGraph().getStartBlock()) {
+            if (allocator.isProcessed(operand)) {
+                assert liveKill.get(allocator.operandNumber(operand)) : "using fixed register that is not defined in this block";
+            }
+        }
+    }
+
+    /**
+     * Performs a backward dataflow analysis to compute global live sets (i.e.
+     * {@link BlockData#liveIn} and {@link BlockData#liveOut}) for each block.
+     */
+    void computeGlobalLiveSets() {
+        try (Indent indent = Debug.logAndIndent("compute global live sets")) {
+            int numBlocks = allocator.blockCount();
+            boolean changeOccurred;
+            boolean changeOccurredInBlock;
+            int iterationCount = 0;
+            BitSet liveOut = new BitSet(allocator.liveSetSize()); // scratch set for calculations
+
+            /*
+             * Perform a backward dataflow analysis to compute liveOut and liveIn for each block.
+             * The loop is executed until a fixpoint is reached (no changes in an iteration).
+             */
+            do {
+                changeOccurred = false;
+
+                try (Indent indent2 = Debug.logAndIndent("new iteration %d", iterationCount)) {
+
+                    // iterate all blocks in reverse order
+                    for (int i = numBlocks - 1; i >= 0; i--) {
+                        AbstractBlockBase<?> block = allocator.blockAt(i);
+                        BlockData blockSets = allocator.getBlockData(block);
+
+                        changeOccurredInBlock = false;
+
+                        /* liveOut(block) is the union of liveIn(sux), for successors sux of block. */
+                        int n = block.getSuccessorCount();
+                        if (n > 0) {
+                            liveOut.clear();
+                            // block has successors
+                            if (n > 0) {
+                                for (AbstractBlockBase<?> successor : block.getSuccessors()) {
+                                    liveOut.or(allocator.getBlockData(successor).liveIn);
+                                }
+                            }
+
+                            if (!blockSets.liveOut.equals(liveOut)) {
+                                /*
+                                 * A change occurred. Swap the old and new live out sets to avoid
+                                 * copying.
+                                 */
+                                BitSet temp = blockSets.liveOut;
+                                blockSets.liveOut = liveOut;
+                                liveOut = temp;
+
+                                changeOccurred = true;
+                                changeOccurredInBlock = true;
+                            }
+                        }
+
+                        if (iterationCount == 0 || changeOccurredInBlock) {
+                            /*
+                             * liveIn(block) is the union of liveGen(block) with (liveOut(block) &
+                             * !liveKill(block)).
+                             * 
+                             * Note: liveIn has to be computed only in first iteration or if liveOut
+                             * has changed!
+                             */
+                            BitSet liveIn = blockSets.liveIn;
+                            liveIn.clear();
+                            liveIn.or(blockSets.liveOut);
+                            liveIn.andNot(blockSets.liveKill);
+                            liveIn.or(blockSets.liveGen);
+
+                            if (Debug.isLogEnabled()) {
+                                Debug.log("block %d: livein = %s,  liveout = %s", block.getId(), liveIn, blockSets.liveOut);
+                            }
+                        }
+                    }
+                    iterationCount++;
+
+                    if (changeOccurred && iterationCount > 50) {
+                        throw new BailoutException("too many iterations in computeGlobalLiveSets");
+                    }
+                }
+            } while (changeOccurred);
+
+            if (DetailedAsserts.getValue()) {
+                verifyLiveness();
+            }
+
+            // check that the liveIn set of the first block is empty
+            AbstractBlockBase<?> startBlock = allocator.ir.getControlFlowGraph().getStartBlock();
+            if (allocator.getBlockData(startBlock).liveIn.cardinality() != 0) {
+                if (DetailedAsserts.getValue()) {
+                    reportFailure(numBlocks);
+                }
+                // bailout if this occurs in product mode.
+                throw new GraalInternalError("liveIn set of first block must be empty: " + allocator.getBlockData(startBlock).liveIn);
+            }
+        }
+    }
+
+    private void reportFailure(int numBlocks) {
+        try (Scope s = Debug.forceLog()) {
+            try (Indent indent = Debug.logAndIndent("report failure")) {
+
+                BitSet startBlockLiveIn = allocator.getBlockData(allocator.ir.getControlFlowGraph().getStartBlock()).liveIn;
+                try (Indent indent2 = Debug.logAndIndent("Error: liveIn set of first block must be empty (when this fails, variables are used before they are defined):")) {
+                    for (int operandNum = startBlockLiveIn.nextSetBit(0); operandNum >= 0; operandNum = startBlockLiveIn.nextSetBit(operandNum + 1)) {
+                        Interval interval = allocator.intervalFor(operandNum);
+                        if (interval != null) {
+                            Value operand = interval.operand;
+                            Debug.log("var %d; operand=%s; node=%s", operandNum, operand, getSourceForOperandFromDebugContext(operand));
+                        } else {
+                            Debug.log("var %d; missing operand", operandNum);
+                        }
+                    }
+                }
+
+                // print some additional information to simplify debugging
+                for (int operandNum = startBlockLiveIn.nextSetBit(0); operandNum >= 0; operandNum = startBlockLiveIn.nextSetBit(operandNum + 1)) {
+                    Interval interval = allocator.intervalFor(operandNum);
+                    Value operand = null;
+                    Object valueForOperandFromDebugContext = null;
+                    if (interval != null) {
+                        operand = interval.operand;
+                        valueForOperandFromDebugContext = getSourceForOperandFromDebugContext(operand);
+                    }
+                    try (Indent indent2 = Debug.logAndIndent("---- Detailed information for var %d; operand=%s; node=%s ----", operandNum, operand, valueForOperandFromDebugContext)) {
+
+                        Deque<AbstractBlockBase<?>> definedIn = new ArrayDeque<>();
+                        HashSet<AbstractBlockBase<?>> usedIn = new HashSet<>();
+                        for (AbstractBlockBase<?> block : allocator.sortedBlocks) {
+                            if (allocator.getBlockData(block).liveGen.get(operandNum)) {
+                                usedIn.add(block);
+                                try (Indent indent3 = Debug.logAndIndent("used in block B%d", block.getId())) {
+                                    for (LIRInstruction ins : allocator.ir.getLIRforBlock(block)) {
+                                        try (Indent indent4 = Debug.logAndIndent("%d: %s", ins.id(), ins)) {
+                                            ins.forEachState((liveStateOperand, mode, flags) -> {
+                                                Debug.log("operand=%s", liveStateOperand);
+                                                return liveStateOperand;
+                                            });
+                                        }
+                                    }
+                                }
+                            }
+                            if (allocator.getBlockData(block).liveKill.get(operandNum)) {
+                                definedIn.add(block);
+                                try (Indent indent3 = Debug.logAndIndent("defined in block B%d", block.getId())) {
+                                    for (LIRInstruction ins : allocator.ir.getLIRforBlock(block)) {
+                                        Debug.log("%d: %s", ins.id(), ins);
+                                    }
+                                }
+                            }
+                        }
+
+                        int[] hitCount = new int[numBlocks];
+
+                        while (!definedIn.isEmpty()) {
+                            AbstractBlockBase<?> block = definedIn.removeFirst();
+                            usedIn.remove(block);
+                            for (AbstractBlockBase<?> successor : block.getSuccessors()) {
+                                if (successor.isLoopHeader()) {
+                                    if (!block.isLoopEnd()) {
+                                        definedIn.add(successor);
+                                    }
+                                } else {
+                                    if (++hitCount[successor.getId()] == successor.getPredecessorCount()) {
+                                        definedIn.add(successor);
+                                    }
+                                }
+                            }
+                        }
+                        try (Indent indent3 = Debug.logAndIndent("**** offending usages are in: ")) {
+                            for (AbstractBlockBase<?> block : usedIn) {
+                                Debug.log("B%d", block.getId());
+                            }
+                        }
+                    }
+                }
+            }
+        } catch (Throwable e) {
+            throw Debug.handle(e);
+        }
+    }
+
+    private void verifyLiveness() {
+        /*
+         * Check that fixed intervals are not live at block boundaries (live set must be empty at
+         * fixed intervals).
+         */
+        for (AbstractBlockBase<?> block : allocator.sortedBlocks) {
+            for (int j = 0; j <= allocator.maxRegisterNumber(); j++) {
+                assert !allocator.getBlockData(block).liveIn.get(j) : "liveIn  set of fixed register must be empty";
+                assert !allocator.getBlockData(block).liveOut.get(j) : "liveOut set of fixed register must be empty";
+                assert !allocator.getBlockData(block).liveGen.get(j) : "liveGen set of fixed register must be empty";
+            }
+        }
+    }
+
+    void addUse(AllocatableValue operand, int from, int to, RegisterPriority registerPriority, LIRKind kind) {
+        if (!allocator.isProcessed(operand)) {
+            return;
+        }
+
+        Interval interval = allocator.getOrCreateInterval(operand);
+        if (!kind.equals(LIRKind.Illegal)) {
+            interval.setKind(kind);
+        }
+
+        interval.addRange(from, to);
+
+        // Register use position at even instruction id.
+        interval.addUsePos(to & ~1, registerPriority);
+
+        if (Debug.isLogEnabled()) {
+            Debug.log("add use: %s, from %d to %d (%s)", interval, from, to, registerPriority.name());
+        }
+    }
+
+    void addTemp(AllocatableValue operand, int tempPos, RegisterPriority registerPriority, LIRKind kind) {
+        if (!allocator.isProcessed(operand)) {
+            return;
+        }
+
+        Interval interval = allocator.getOrCreateInterval(operand);
+        if (!kind.equals(LIRKind.Illegal)) {
+            interval.setKind(kind);
+        }
+
+        interval.addRange(tempPos, tempPos + 1);
+        interval.addUsePos(tempPos, registerPriority);
+        interval.addMaterializationValue(null);
+
+        if (Debug.isLogEnabled()) {
+            Debug.log("add temp: %s tempPos %d (%s)", interval, tempPos, RegisterPriority.MustHaveRegister.name());
+        }
+    }
+
+    void addDef(AllocatableValue operand, LIRInstruction op, RegisterPriority registerPriority, LIRKind kind) {
+        if (!allocator.isProcessed(operand)) {
+            return;
+        }
+        int defPos = op.id();
+
+        Interval interval = allocator.getOrCreateInterval(operand);
+        if (!kind.equals(LIRKind.Illegal)) {
+            interval.setKind(kind);
+        }
+
+        Range r = interval.first();
+        if (r.from <= defPos) {
+            /*
+             * Update the starting point (when a range is first created for a use, its start is the
+             * beginning of the current block until a def is encountered).
+             */
+            r.from = defPos;
+            interval.addUsePos(defPos, registerPriority);
+
+        } else {
+            /*
+             * Dead value - make vacuous interval also add register priority for dead intervals
+             */
+            interval.addRange(defPos, defPos + 1);
+            interval.addUsePos(defPos, registerPriority);
+            if (Debug.isLogEnabled()) {
+                Debug.log("Warning: def of operand %s at %d occurs without use", operand, defPos);
+            }
+        }
+
+        changeSpillDefinitionPos(interval, defPos);
+        if (registerPriority == RegisterPriority.None && interval.spillState().ordinal() <= SpillState.StartInMemory.ordinal() && isStackSlot(operand)) {
+            // detection of method-parameters and roundfp-results
+            interval.setSpillState(SpillState.StartInMemory);
+        }
+        interval.addMaterializationValue(getMaterializedValue(op, operand, interval));
+
+        if (Debug.isLogEnabled()) {
+            Debug.log("add def: %s defPos %d (%s)", interval, defPos, registerPriority.name());
+        }
+    }
+
+    /**
+     * Optimizes moves related to incoming stack based arguments. The interval for the destination
+     * of such moves is assigned the stack slot (which is in the caller's frame) as its spill slot.
+     */
+    void handleMethodArguments(LIRInstruction op) {
+        if (op instanceof MoveOp) {
+            MoveOp move = (MoveOp) op;
+            if (optimizeMethodArgument(move.getInput())) {
+                StackSlot slot = asStackSlot(move.getInput());
+                if (DetailedAsserts.getValue()) {
+                    assert op.id() > 0 : "invalid id";
+                    assert allocator.blockForId(op.id()).getPredecessorCount() == 0 : "move from stack must be in first block";
+                    assert isVariable(move.getResult()) : "result of move must be a variable";
+
+                    if (Debug.isLogEnabled()) {
+                        Debug.log("found move from stack slot %s to %s", slot, move.getResult());
+                    }
+                }
+
+                Interval interval = allocator.intervalFor(move.getResult());
+                interval.setSpillSlot(slot);
+                interval.assignLocation(slot);
+            }
+        }
+    }
+
+    void addRegisterHint(final LIRInstruction op, final Value targetValue, OperandMode mode, EnumSet<OperandFlag> flags, final boolean hintAtDef) {
+        if (flags.contains(OperandFlag.HINT) && LinearScan.isVariableOrRegister(targetValue)) {
+
+            op.forEachRegisterHint(targetValue, mode, (registerHint, valueMode, valueFlags) -> {
+                if (LinearScan.isVariableOrRegister(registerHint)) {
+                    Interval from = allocator.getOrCreateInterval((AllocatableValue) registerHint);
+                    Interval to = allocator.getOrCreateInterval((AllocatableValue) targetValue);
+
+                    /* hints always point from def to use */
+                    if (hintAtDef) {
+                        to.setLocationHint(from);
+                    } else {
+                        from.setLocationHint(to);
+                    }
+                    if (Debug.isLogEnabled()) {
+                        Debug.log("operation at opId %d: added hint from interval %d to %d", op.id(), from.operandNumber, to.operandNumber);
+                    }
+
+                    return registerHint;
+                }
+                return null;
+            });
+        }
+    }
+
+    /**
+     * Eliminates moves from register to stack if the stack slot is known to be correct.
+     */
+    void changeSpillDefinitionPos(Interval interval, int defPos) {
+        assert interval.isSplitParent() : "can only be called for split parents";
+
+        switch (interval.spillState()) {
+            case NoDefinitionFound:
+                assert interval.spillDefinitionPos() == -1 : "must no be set before";
+                interval.setSpillDefinitionPos(defPos);
+                interval.setSpillState(SpillState.NoSpillStore);
+                break;
+
+            case NoSpillStore:
+                assert defPos <= interval.spillDefinitionPos() : "positions are processed in reverse order when intervals are created";
+                if (defPos < interval.spillDefinitionPos() - 2) {
+                    // second definition found, so no spill optimization possible for this interval
+                    interval.setSpillState(SpillState.NoOptimization);
+                } else {
+                    // two consecutive definitions (because of two-operand LIR form)
+                    assert allocator.blockForId(defPos) == allocator.blockForId(interval.spillDefinitionPos()) : "block must be equal";
+                }
+                break;
+
+            case NoOptimization:
+                // nothing to do
+                break;
+
+            default:
+                throw new BailoutException("other states not allowed at this time");
+        }
+    }
+
+    private static boolean optimizeMethodArgument(Value value) {
+        /*
+         * Object method arguments that are passed on the stack are currently not optimized because
+         * this requires that the runtime visits method arguments during stack walking.
+         */
+        return isStackSlot(value) && asStackSlot(value).isInCallerFrame() && value.getKind() != Kind.Object;
+    }
+
+    /**
+     * Determines the register priority for an instruction's output/result operand.
+     */
+    private static RegisterPriority registerPriorityOfOutputOperand(LIRInstruction op) {
+        if (op instanceof MoveOp) {
+            MoveOp move = (MoveOp) op;
+            if (optimizeMethodArgument(move.getInput())) {
+                return RegisterPriority.None;
+            }
+        } else if (op instanceof LabelOp) {
+            LabelOp label = (LabelOp) op;
+            if (label.isPhiIn()) {
+                return RegisterPriority.None;
+            }
+        }
+
+        // all other operands require a register
+        return RegisterPriority.MustHaveRegister;
+    }
+
+    /**
+     * Determines the priority which with an instruction's input operand will be allocated a
+     * register.
+     */
+    private static RegisterPriority registerPriorityOfInputOperand(EnumSet<OperandFlag> flags) {
+        if (flags.contains(OperandFlag.STACK)) {
+            return RegisterPriority.ShouldHaveRegister;
+        }
+        // all other operands require a register
+        return RegisterPriority.MustHaveRegister;
+    }
+
+    void buildIntervals() {
+
+        try (Indent indent = Debug.logAndIndent("build intervals")) {
+            InstructionValueConsumer outputConsumer = (op, operand, mode, flags) -> {
+                if (LinearScan.isVariableOrRegister(operand)) {
+                    addDef((AllocatableValue) operand, op, registerPriorityOfOutputOperand(op), operand.getLIRKind());
+                    addRegisterHint(op, operand, mode, flags, true);
+                }
+            };
+
+            InstructionValueConsumer tempConsumer = (op, operand, mode, flags) -> {
+                if (LinearScan.isVariableOrRegister(operand)) {
+                    addTemp((AllocatableValue) operand, op.id(), RegisterPriority.MustHaveRegister, operand.getLIRKind());
+                    addRegisterHint(op, operand, mode, flags, false);
+                }
+            };
+
+            InstructionValueConsumer aliveConsumer = (op, operand, mode, flags) -> {
+                if (LinearScan.isVariableOrRegister(operand)) {
+                    RegisterPriority p = registerPriorityOfInputOperand(flags);
+                    int opId = op.id();
+                    int blockFrom = allocator.getFirstLirInstructionId((allocator.blockForId(opId)));
+                    addUse((AllocatableValue) operand, blockFrom, opId + 1, p, operand.getLIRKind());
+                    addRegisterHint(op, operand, mode, flags, false);
+                }
+            };
+
+            InstructionValueConsumer inputConsumer = (op, operand, mode, flags) -> {
+                if (LinearScan.isVariableOrRegister(operand)) {
+                    int opId = op.id();
+                    int blockFrom = allocator.getFirstLirInstructionId((allocator.blockForId(opId)));
+                    RegisterPriority p = registerPriorityOfInputOperand(flags);
+                    addUse((AllocatableValue) operand, blockFrom, opId, p, operand.getLIRKind());
+                    addRegisterHint(op, operand, mode, flags, false);
+                }
+            };
+
+            InstructionValueConsumer stateProc = (op, operand, mode, flags) -> {
+                if (LinearScan.isVariableOrRegister(operand)) {
+                    int opId = op.id();
+                    int blockFrom = allocator.getFirstLirInstructionId((allocator.blockForId(opId)));
+                    addUse((AllocatableValue) operand, blockFrom, opId + 1, RegisterPriority.None, operand.getLIRKind());
+                }
+            };
+
+            // create a list with all caller-save registers (cpu, fpu, xmm)
+            Register[] callerSaveRegs = allocator.regAllocConfig.getRegisterConfig().getCallerSaveRegisters();
+
+            // iterate all blocks in reverse order
+            for (int i = allocator.blockCount() - 1; i >= 0; i--) {
+
+                AbstractBlockBase<?> block = allocator.blockAt(i);
+                try (Indent indent2 = Debug.logAndIndent("handle block %d", block.getId())) {
+
+                    List<LIRInstruction> instructions = allocator.ir.getLIRforBlock(block);
+                    final int blockFrom = allocator.getFirstLirInstructionId(block);
+                    int blockTo = allocator.getLastLirInstructionId(block);
+
+                    assert blockFrom == instructions.get(0).id();
+                    assert blockTo == instructions.get(instructions.size() - 1).id();
+
+                    // Update intervals for operands live at the end of this block;
+                    BitSet live = allocator.getBlockData(block).liveOut;
+                    for (int operandNum = live.nextSetBit(0); operandNum >= 0; operandNum = live.nextSetBit(operandNum + 1)) {
+                        assert live.get(operandNum) : "should not stop here otherwise";
+                        AllocatableValue operand = allocator.intervalFor(operandNum).operand;
+                        if (Debug.isLogEnabled()) {
+                            Debug.log("live in %d: %s", operandNum, operand);
+                        }
+
+                        addUse(operand, blockFrom, blockTo + 2, RegisterPriority.None, LIRKind.Illegal);
+
+                        /*
+                         * Add special use positions for loop-end blocks when the interval is used
+                         * anywhere inside this loop. It's possible that the block was part of a
+                         * non-natural loop, so it might have an invalid loop index.
+                         */
+                        if (block.isLoopEnd() && block.getLoop() != null && isIntervalInLoop(operandNum, block.getLoop().getIndex())) {
+                            allocator.intervalFor(operandNum).addUsePos(blockTo + 1, RegisterPriority.LiveAtLoopEnd);
+                        }
+                    }
+
+                    /*
+                     * Iterate all instructions of the block in reverse order. definitions of
+                     * intervals are processed before uses.
+                     */
+                    for (int j = instructions.size() - 1; j >= 0; j--) {
+                        final LIRInstruction op = instructions.get(j);
+                        final int opId = op.id();
+
+                        try (Indent indent3 = Debug.logAndIndent("handle inst %d: %s", opId, op)) {
+
+                            // add a temp range for each register if operation destroys
+                            // caller-save registers
+                            if (op.destroysCallerSavedRegisters()) {
+                                for (Register r : callerSaveRegs) {
+                                    if (allocator.attributes(r).isAllocatable()) {
+                                        addTemp(r.asValue(), opId, RegisterPriority.None, LIRKind.Illegal);
+                                    }
+                                }
+                                if (Debug.isLogEnabled()) {
+                                    Debug.log("operation destroys all caller-save registers");
+                                }
+                            }
+
+                            op.visitEachOutput(outputConsumer);
+                            op.visitEachTemp(tempConsumer);
+                            op.visitEachAlive(aliveConsumer);
+                            op.visitEachInput(inputConsumer);
+
+                            /*
+                             * Add uses of live locals from interpreter's point of view for proper
+                             * debug information generation. Treat these operands as temp values (if
+                             * the live range is extended to a call site, the value would be in a
+                             * register at the call otherwise).
+                             */
+                            op.visitEachState(stateProc);
+
+                            // special steps for some instructions (especially moves)
+                            handleMethodArguments(op);
+
+                        }
+
+                    } // end of instruction iteration
+                }
+            } // end of block iteration
+
+            /*
+             * Add the range [0, 1] to all fixed intervals. the register allocator need not handle
+             * unhandled fixed intervals.
+             */
+            for (Interval interval : allocator.intervals()) {
+                if (interval != null && isRegister(interval.operand)) {
+                    interval.addRange(0, 1);
+                }
+            }
+        }
+    }
+
+    /**
+     * Returns a value for a interval definition, which can be used for re-materialization.
+     *
+     * @param op An instruction which defines a value
+     * @param operand The destination operand of the instruction
+     * @param interval The interval for this defined value.
+     * @return Returns the value which is moved to the instruction and which can be reused at all
+     *         reload-locations in case the interval of this instruction is spilled. Currently this
+     *         can only be a {@link JavaConstant}.
+     */
+    static JavaConstant getMaterializedValue(LIRInstruction op, Value operand, Interval interval) {
+        if (op instanceof MoveOp) {
+            MoveOp move = (MoveOp) op;
+            if (move.getInput() instanceof JavaConstant) {
+                /*
+                 * Check if the interval has any uses which would accept an stack location (priority
+                 * == ShouldHaveRegister). Rematerialization of such intervals can result in a
+                 * degradation, because rematerialization always inserts a constant load, even if
+                 * the value is not needed in a register.
+                 */
+                Interval.UsePosList usePosList = interval.usePosList();
+                int numUsePos = usePosList.size();
+                for (int useIdx = 0; useIdx < numUsePos; useIdx++) {
+                    Interval.RegisterPriority priority = usePosList.registerPriority(useIdx);
+                    if (priority == Interval.RegisterPriority.ShouldHaveRegister) {
+                        return null;
+                    }
+                }
+                return (JavaConstant) move.getInput();
+            }
+        }
+        return null;
+    }
+}
--- /dev/null	Thu Jan 01 00:00:00 1970 +0000
+++ b/graal/com.oracle.graal.lir/src/com/oracle/graal/lir/alloc/lsra/LinearScanOptimizeSpillPositionPhase.java	Tue May 12 14:19:57 2015 +0200
@@ -0,0 +1,215 @@
+/*
+ * Copyright (c) 2015, 2015, Oracle and/or its affiliates. All rights reserved.
+ * DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER.
+ *
+ * This code is free software; you can redistribute it and/or modify it
+ * under the terms of the GNU General Public License version 2 only, as
+ * published by the Free Software Foundation.
+ *
+ * This code is distributed in the hope that it will be useful, but WITHOUT
+ * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
+ * FITNESS FOR A PARTICULAR PURPOSE.  See the GNU General Public License
+ * version 2 for more details (a copy is included in the LICENSE file that
+ * accompanied this code).
+ *
+ * You should have received a copy of the GNU General Public License version
+ * 2 along with this work; if not, write to the Free Software Foundation,
+ * Inc., 51 Franklin St, Fifth Floor, Boston, MA 02110-1301 USA.
+ *
+ * Please contact Oracle, 500 Oracle Parkway, Redwood Shores, CA 94065 USA
+ * or visit www.oracle.com if you need additional information or have any
+ * questions.
+ */
+package com.oracle.graal.lir.alloc.lsra;
+
+import static com.oracle.graal.api.code.ValueUtil.*;
+import static com.oracle.graal.compiler.common.cfg.AbstractControlFlowGraph.*;
+
+import java.util.*;
+
+import com.oracle.graal.api.code.*;
+import com.oracle.graal.api.meta.*;
+import com.oracle.graal.compiler.common.cfg.*;
+import com.oracle.graal.debug.*;
+import com.oracle.graal.lir.*;
+import com.oracle.graal.lir.LIRInstruction.OperandMode;
+import com.oracle.graal.lir.alloc.lsra.Interval.SpillState;
+import com.oracle.graal.lir.gen.*;
+import com.oracle.graal.lir.gen.LIRGeneratorTool.SpillMoveFactory;
+import com.oracle.graal.lir.phases.*;
+
+final class LinearScanOptimizeSpillPositionPhase extends AllocationPhase {
+
+    private static final DebugMetric betterSpillPos = Debug.metric("BetterSpillPosition");
+    private static final DebugMetric betterSpillPosWithLowerProbability = Debug.metric("BetterSpillPositionWithLowerProbability");
+
+    private final LinearScan allocator;
+
+    LinearScanOptimizeSpillPositionPhase(LinearScan allocator) {
+        this.allocator = allocator;
+    }
+
+    @Override
+    protected <B extends AbstractBlockBase<B>> void run(TargetDescription target, LIRGenerationResult lirGenRes, List<B> codeEmittingOrder, List<B> linearScanOrder, SpillMoveFactory spillMoveFactory) {
+        optimizeSpillPosition();
+        allocator.printIntervals("After optimize spill position");
+    }
+
+    private void optimizeSpillPosition() {
+        LIRInsertionBuffer[] insertionBuffers = new LIRInsertionBuffer[allocator.ir.linearScanOrder().size()];
+        for (Interval interval : allocator.intervals()) {
+            if (interval != null && interval.isSplitParent() && interval.spillState() == SpillState.SpillInDominator) {
+                AbstractBlockBase<?> defBlock = allocator.blockForId(interval.spillDefinitionPos());
+                AbstractBlockBase<?> spillBlock = null;
+                Interval firstSpillChild = null;
+                try (Indent indent = Debug.logAndIndent("interval %s (%s)", interval, defBlock)) {
+                    for (Interval splitChild : interval.getSplitChildren()) {
+                        if (isStackSlotValue(splitChild.location())) {
+                            if (firstSpillChild == null || splitChild.from() < firstSpillChild.from()) {
+                                firstSpillChild = splitChild;
+                            } else {
+                                assert firstSpillChild.from() < splitChild.from();
+                            }
+                            // iterate all blocks where the interval has use positions
+                            for (AbstractBlockBase<?> splitBlock : blocksForInterval(splitChild)) {
+                                if (dominates(defBlock, splitBlock)) {
+                                    if (Debug.isLogEnabled()) {
+                                        Debug.log("Split interval %s, block %s", splitChild, splitBlock);
+                                    }
+                                    if (spillBlock == null) {
+                                        spillBlock = splitBlock;
+                                    } else {
+                                        spillBlock = commonDominator(spillBlock, splitBlock);
+                                        assert spillBlock != null;
+                                    }
+                                }
+                            }
+                        }
+                    }
+                    if (spillBlock == null) {
+                        // no spill interval
+                        interval.setSpillState(SpillState.StoreAtDefinition);
+                    } else {
+                        // move out of loops
+                        if (defBlock.getLoopDepth() < spillBlock.getLoopDepth()) {
+                            spillBlock = moveSpillOutOfLoop(defBlock, spillBlock);
+                        }
+
+                        /*
+                         * If the spill block is the begin of the first split child (aka the value
+                         * is on the stack) spill in the dominator.
+                         */
+                        assert firstSpillChild != null;
+                        if (!defBlock.equals(spillBlock) && spillBlock.equals(allocator.blockForId(firstSpillChild.from()))) {
+                            AbstractBlockBase<?> dom = spillBlock.getDominator();
+                            if (Debug.isLogEnabled()) {
+                                Debug.log("Spill block (%s) is the beginning of a spill child -> use dominator (%s)", spillBlock, dom);
+                            }
+                            spillBlock = dom;
+                        }
+
+                        if (!defBlock.equals(spillBlock)) {
+                            assert dominates(defBlock, spillBlock);
+                            betterSpillPos.increment();
+                            if (Debug.isLogEnabled()) {
+                                Debug.log("Better spill position found (Block %s)", spillBlock);
+                            }
+
+                            if (defBlock.probability() <= spillBlock.probability()) {
+                                // better spill block has the same probability -> do nothing
+                                interval.setSpillState(SpillState.StoreAtDefinition);
+                            } else {
+                                LIRInsertionBuffer insertionBuffer = insertionBuffers[spillBlock.getId()];
+                                if (insertionBuffer == null) {
+                                    insertionBuffer = new LIRInsertionBuffer();
+                                    insertionBuffers[spillBlock.getId()] = insertionBuffer;
+                                    insertionBuffer.init(allocator.ir.getLIRforBlock(spillBlock));
+                                }
+                                int spillOpId = allocator.getFirstLirInstructionId(spillBlock);
+                                // insert spill move
+                                AllocatableValue fromLocation = interval.getSplitChildAtOpId(spillOpId, OperandMode.DEF, allocator).location();
+                                AllocatableValue toLocation = LinearScan.canonicalSpillOpr(interval);
+                                LIRInstruction move = allocator.getSpillMoveFactory().createMove(toLocation, fromLocation);
+                                move.setId(LinearScan.DOMINATOR_SPILL_MOVE_ID);
+                                /*
+                                 * We can use the insertion buffer directly because we always insert
+                                 * at position 1.
+                                 */
+                                insertionBuffer.append(1, move);
+
+                                betterSpillPosWithLowerProbability.increment();
+                                interval.setSpillDefinitionPos(spillOpId);
+                            }
+                        } else {
+                            // definition is the best choice
+                            interval.setSpillState(SpillState.StoreAtDefinition);
+                        }
+                    }
+                }
+            }
+        }
+        for (LIRInsertionBuffer insertionBuffer : insertionBuffers) {
+            if (insertionBuffer != null) {
+                assert insertionBuffer.initialized() : "Insertion buffer is nonnull but not initialized!";
+                insertionBuffer.finish();
+            }
+        }
+    }
+
+    /**
+     * Iterate over all {@link AbstractBlockBase blocks} of an interval.
+     */
+    private class IntervalBlockIterator implements Iterator<AbstractBlockBase<?>> {
+
+        Range range;
+        AbstractBlockBase<?> block;
+
+        public IntervalBlockIterator(Interval interval) {
+            range = interval.first();
+            block = allocator.blockForId(range.from);
+        }
+
+        public AbstractBlockBase<?> next() {
+            AbstractBlockBase<?> currentBlock = block;
+            int nextBlockIndex = block.getLinearScanNumber() + 1;
+            if (nextBlockIndex < allocator.sortedBlocks.size()) {
+                block = allocator.sortedBlocks.get(nextBlockIndex);
+                if (range.to <= allocator.getFirstLirInstructionId(block)) {
+                    range = range.next;
+                    if (range == Range.EndMarker) {
+                        block = null;
+                    } else {
+                        block = allocator.blockForId(range.from);
+                    }
+                }
+            } else {
+                block = null;
+            }
+            return currentBlock;
+        }
+
+        public boolean hasNext() {
+            return block != null;
+        }
+    }
+
+    private Iterable<AbstractBlockBase<?>> blocksForInterval(Interval interval) {
+        return new Iterable<AbstractBlockBase<?>>() {
+            public Iterator<AbstractBlockBase<?>> iterator() {
+                return new IntervalBlockIterator(interval);
+            }
+        };
+    }
+
+    private static AbstractBlockBase<?> moveSpillOutOfLoop(AbstractBlockBase<?> defBlock, AbstractBlockBase<?> spillBlock) {
+        int defLoopDepth = defBlock.getLoopDepth();
+        for (AbstractBlockBase<?> block = spillBlock.getDominator(); !defBlock.equals(block); block = block.getDominator()) {
+            assert block != null : "spill block not dominated by definition block?";
+            if (block.getLoopDepth() <= defLoopDepth) {
+                assert block.getLoopDepth() == defLoopDepth : "Cannot spill an interval outside of the loop where it is defined!";
+                return block;
+            }
+        }
+        return defBlock;
+    }
+}
--- /dev/null	Thu Jan 01 00:00:00 1970 +0000
+++ b/graal/com.oracle.graal.lir/src/com/oracle/graal/lir/alloc/lsra/LinearScanRegisterAllocationPhase.java	Tue May 12 14:19:57 2015 +0200
@@ -0,0 +1,70 @@
+/*
+ * Copyright (c) 2015, 2015, Oracle and/or its affiliates. All rights reserved.
+ * DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER.
+ *
+ * This code is free software; you can redistribute it and/or modify it
+ * under the terms of the GNU General Public License version 2 only, as
+ * published by the Free Software Foundation.
+ *
+ * This code is distributed in the hope that it will be useful, but WITHOUT
+ * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
+ * FITNESS FOR A PARTICULAR PURPOSE.  See the GNU General Public License
+ * version 2 for more details (a copy is included in the LICENSE file that
+ * accompanied this code).
+ *
+ * You should have received a copy of the GNU General Public License version
+ * 2 along with this work; if not, write to the Free Software Foundation,
+ * Inc., 51 Franklin St, Fifth Floor, Boston, MA 02110-1301 USA.
+ *
+ * Please contact Oracle, 500 Oracle Parkway, Redwood Shores, CA 94065 USA
+ * or visit www.oracle.com if you need additional information or have any
+ * questions.
+ */
+package com.oracle.graal.lir.alloc.lsra;
+
+import java.util.*;
+
+import com.oracle.graal.api.code.*;
+import com.oracle.graal.compiler.common.cfg.*;
+import com.oracle.graal.debug.*;
+import com.oracle.graal.lir.gen.*;
+import com.oracle.graal.lir.gen.LIRGeneratorTool.*;
+import com.oracle.graal.lir.phases.*;
+
+final class LinearScanRegisterAllocationPhase extends AllocationPhase {
+
+    private final LinearScan allocator;
+
+    LinearScanRegisterAllocationPhase(LinearScan allocator) {
+        this.allocator = allocator;
+    }
+
+    @Override
+    protected <B extends AbstractBlockBase<B>> void run(TargetDescription target, LIRGenerationResult lirGenRes, List<B> codeEmittingOrder, List<B> linearScanOrder, SpillMoveFactory spillMoveFactory) {
+        allocator.printIntervals("Before register allocation");
+        allocateRegisters();
+        allocator.printIntervals("After register allocation");
+    }
+
+    void allocateRegisters() {
+        try (Indent indent = Debug.logAndIndent("allocate registers")) {
+            Interval precoloredIntervals;
+            Interval notPrecoloredIntervals;
+
+            Interval.Pair result = allocator.createUnhandledLists(LinearScan.IS_PRECOLORED_INTERVAL, LinearScan.IS_VARIABLE_INTERVAL);
+            precoloredIntervals = result.first;
+            notPrecoloredIntervals = result.second;
+
+            // allocate cpu registers
+            LinearScanWalker lsw;
+            if (OptimizingLinearScanWalker.Options.LSRAOptimization.getValue()) {
+                lsw = new OptimizingLinearScanWalker(allocator, precoloredIntervals, notPrecoloredIntervals);
+            } else {
+                lsw = new LinearScanWalker(allocator, precoloredIntervals, notPrecoloredIntervals);
+            }
+            lsw.walk();
+            lsw.finishAllocation();
+        }
+    }
+
+}
--- /dev/null	Thu Jan 01 00:00:00 1970 +0000
+++ b/graal/com.oracle.graal.lir/src/com/oracle/graal/lir/alloc/lsra/LinearScanResolveDataFlowPhase.java	Tue May 12 14:19:57 2015 +0200
@@ -0,0 +1,197 @@
+/*
+ * Copyright (c) 2015, 2015, Oracle and/or its affiliates. All rights reserved.
+ * DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER.
+ *
+ * This code is free software; you can redistribute it and/or modify it
+ * under the terms of the GNU General Public License version 2 only, as
+ * published by the Free Software Foundation.
+ *
+ * This code is distributed in the hope that it will be useful, but WITHOUT
+ * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
+ * FITNESS FOR A PARTICULAR PURPOSE.  See the GNU General Public License
+ * version 2 for more details (a copy is included in the LICENSE file that
+ * accompanied this code).
+ *
+ * You should have received a copy of the GNU General Public License version
+ * 2 along with this work; if not, write to the Free Software Foundation,
+ * Inc., 51 Franklin St, Fifth Floor, Boston, MA 02110-1301 USA.
+ *
+ * Please contact Oracle, 500 Oracle Parkway, Redwood Shores, CA 94065 USA
+ * or visit www.oracle.com if you need additional information or have any
+ * questions.
+ */
+package com.oracle.graal.lir.alloc.lsra;
+
+import static com.oracle.graal.compiler.common.GraalOptions.*;
+
+import java.util.*;
+
+import com.oracle.graal.api.code.*;
+import com.oracle.graal.compiler.common.cfg.*;
+import com.oracle.graal.debug.*;
+import com.oracle.graal.lir.*;
+import com.oracle.graal.lir.gen.*;
+import com.oracle.graal.lir.gen.LIRGeneratorTool.SpillMoveFactory;
+import com.oracle.graal.lir.phases.*;
+
+/**
+ * Phase 6: resolve data flow
+ *
+ * Insert moves at edges between blocks if intervals have been split.
+ */
+class LinearScanResolveDataFlowPhase extends AllocationPhase {
+
+    protected final LinearScan allocator;
+
+    LinearScanResolveDataFlowPhase(LinearScan allocator) {
+        this.allocator = allocator;
+    }
+
+    @Override
+    protected <B extends AbstractBlockBase<B>> void run(TargetDescription target, LIRGenerationResult lirGenRes, List<B> codeEmittingOrder, List<B> linearScanOrder, SpillMoveFactory spillMoveFactory) {
+        resolveDataFlow();
+    }
+
+    void resolveCollectMappings(AbstractBlockBase<?> fromBlock, AbstractBlockBase<?> toBlock, AbstractBlockBase<?> midBlock, MoveResolver moveResolver) {
+        assert moveResolver.checkEmpty();
+        assert midBlock == null ||
+                        (midBlock.getPredecessorCount() == 1 && midBlock.getSuccessorCount() == 1 && midBlock.getPredecessors().get(0).equals(fromBlock) && midBlock.getSuccessors().get(0).equals(
+                                        toBlock));
+
+        int toBlockFirstInstructionId = allocator.getFirstLirInstructionId(toBlock);
+        int fromBlockLastInstructionId = allocator.getLastLirInstructionId(fromBlock) + 1;
+        int numOperands = allocator.operandSize();
+        BitSet liveAtEdge = allocator.getBlockData(toBlock).liveIn;
+
+        // visit all variables for which the liveAtEdge bit is set
+        for (int operandNum = liveAtEdge.nextSetBit(0); operandNum >= 0; operandNum = liveAtEdge.nextSetBit(operandNum + 1)) {
+            assert operandNum < numOperands : "live information set for not exisiting interval";
+            assert allocator.getBlockData(fromBlock).liveOut.get(operandNum) && allocator.getBlockData(toBlock).liveIn.get(operandNum) : "interval not live at this edge";
+
+            Interval fromInterval = allocator.splitChildAtOpId(allocator.intervalFor(operandNum), fromBlockLastInstructionId, LIRInstruction.OperandMode.DEF);
+            Interval toInterval = allocator.splitChildAtOpId(allocator.intervalFor(operandNum), toBlockFirstInstructionId, LIRInstruction.OperandMode.DEF);
+
+            if (fromInterval != toInterval && !fromInterval.location().equals(toInterval.location())) {
+                // need to insert move instruction
+                moveResolver.addMapping(fromInterval, toInterval);
+            }
+        }
+    }
+
+    void resolveFindInsertPos(AbstractBlockBase<?> fromBlock, AbstractBlockBase<?> toBlock, MoveResolver moveResolver) {
+        if (fromBlock.getSuccessorCount() <= 1) {
+            if (Debug.isLogEnabled()) {
+                Debug.log("inserting moves at end of fromBlock B%d", fromBlock.getId());
+            }
+
+            List<LIRInstruction> instructions = allocator.ir.getLIRforBlock(fromBlock);
+            LIRInstruction instr = instructions.get(instructions.size() - 1);
+            if (instr instanceof StandardOp.JumpOp) {
+                // insert moves before branch
+                moveResolver.setInsertPosition(instructions, instructions.size() - 1);
+            } else {
+                moveResolver.setInsertPosition(instructions, instructions.size());
+            }
+
+        } else {
+            if (Debug.isLogEnabled()) {
+                Debug.log("inserting moves at beginning of toBlock B%d", toBlock.getId());
+            }
+
+            if (DetailedAsserts.getValue()) {
+                assert allocator.ir.getLIRforBlock(fromBlock).get(0) instanceof StandardOp.LabelOp : "block does not start with a label";
+
+                /*
+                 * Because the number of predecessor edges matches the number of successor edges,
+                 * blocks which are reached by switch statements may have be more than one
+                 * predecessor but it will be guaranteed that all predecessors will be the same.
+                 */
+                for (AbstractBlockBase<?> predecessor : toBlock.getPredecessors()) {
+                    assert fromBlock == predecessor : "all critical edges must be broken";
+                }
+            }
+
+            moveResolver.setInsertPosition(allocator.ir.getLIRforBlock(toBlock), 1);
+        }
+    }
+
+    /**
+     * Inserts necessary moves (spilling or reloading) at edges between blocks for intervals that
+     * have been split.
+     */
+    void resolveDataFlow() {
+        try (Indent indent = Debug.logAndIndent("resolve data flow")) {
+
+            int numBlocks = allocator.blockCount();
+            MoveResolver moveResolver = allocator.createMoveResolver();
+            BitSet blockCompleted = new BitSet(numBlocks);
+            BitSet alreadyResolved = new BitSet(numBlocks);
+
+            for (AbstractBlockBase<?> block : allocator.sortedBlocks) {
+
+                // check if block has only one predecessor and only one successor
+                if (block.getPredecessorCount() == 1 && block.getSuccessorCount() == 1) {
+                    List<LIRInstruction> instructions = allocator.ir.getLIRforBlock(block);
+                    assert instructions.get(0) instanceof StandardOp.LabelOp : "block must start with label";
+                    assert instructions.get(instructions.size() - 1) instanceof StandardOp.JumpOp : "block with successor must end with unconditional jump";
+
+                    // check if block is empty (only label and branch)
+                    if (instructions.size() == 2) {
+                        AbstractBlockBase<?> pred = block.getPredecessors().iterator().next();
+                        AbstractBlockBase<?> sux = block.getSuccessors().iterator().next();
+
+                        // prevent optimization of two consecutive blocks
+                        if (!blockCompleted.get(pred.getLinearScanNumber()) && !blockCompleted.get(sux.getLinearScanNumber())) {
+                            if (Debug.isLogEnabled()) {
+                                Debug.log(" optimizing empty block B%d (pred: B%d, sux: B%d)", block.getId(), pred.getId(), sux.getId());
+                            }
+
+                            blockCompleted.set(block.getLinearScanNumber());
+
+                            /*
+                             * Directly resolve between pred and sux (without looking at the empty
+                             * block between).
+                             */
+                            resolveCollectMappings(pred, sux, block, moveResolver);
+                            if (moveResolver.hasMappings()) {
+                                moveResolver.setInsertPosition(instructions, 1);
+                                moveResolver.resolveAndAppendMoves();
+                            }
+                        }
+                    }
+                }
+            }
+
+            for (AbstractBlockBase<?> fromBlock : allocator.sortedBlocks) {
+                if (!blockCompleted.get(fromBlock.getLinearScanNumber())) {
+                    alreadyResolved.clear();
+                    alreadyResolved.or(blockCompleted);
+
+                    for (AbstractBlockBase<?> toBlock : fromBlock.getSuccessors()) {
+
+                        /*
+                         * Check for duplicate edges between the same blocks (can happen with switch
+                         * blocks).
+                         */
+                        if (!alreadyResolved.get(toBlock.getLinearScanNumber())) {
+                            if (Debug.isLogEnabled()) {
+                                Debug.log("processing edge between B%d and B%d", fromBlock.getId(), toBlock.getId());
+                            }
+
+                            alreadyResolved.set(toBlock.getLinearScanNumber());
+
+                            // collect all intervals that have been split between
+                            // fromBlock and toBlock
+                            resolveCollectMappings(fromBlock, toBlock, null, moveResolver);
+                            if (moveResolver.hasMappings()) {
+                                resolveFindInsertPos(fromBlock, toBlock, moveResolver);
+                                moveResolver.resolveAndAppendMoves();
+                            }
+                        }
+                    }
+                }
+            }
+
+        }
+    }
+}
--- a/graal/com.oracle.graal.lir/src/com/oracle/graal/lir/alloc/lsra/OptimizeSpillPosition.java	Tue May 12 14:04:40 2015 +0200
+++ /dev/null	Thu Jan 01 00:00:00 1970 +0000
@@ -1,215 +0,0 @@
-/*
- * Copyright (c) 2015, 2015, Oracle and/or its affiliates. All rights reserved.
- * DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER.
- *
- * This code is free software; you can redistribute it and/or modify it
- * under the terms of the GNU General Public License version 2 only, as
- * published by the Free Software Foundation.
- *
- * This code is distributed in the hope that it will be useful, but WITHOUT
- * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
- * FITNESS FOR A PARTICULAR PURPOSE.  See the GNU General Public License
- * version 2 for more details (a copy is included in the LICENSE file that
- * accompanied this code).
- *
- * You should have received a copy of the GNU General Public License version
- * 2 along with this work; if not, write to the Free Software Foundation,
- * Inc., 51 Franklin St, Fifth Floor, Boston, MA 02110-1301 USA.
- *
- * Please contact Oracle, 500 Oracle Parkway, Redwood Shores, CA 94065 USA
- * or visit www.oracle.com if you need additional information or have any
- * questions.
- */
-package com.oracle.graal.lir.alloc.lsra;
-
-import static com.oracle.graal.api.code.ValueUtil.*;
-import static com.oracle.graal.compiler.common.cfg.AbstractControlFlowGraph.*;
-
-import java.util.*;
-
-import com.oracle.graal.api.code.*;
-import com.oracle.graal.api.meta.*;
-import com.oracle.graal.compiler.common.cfg.*;
-import com.oracle.graal.debug.*;
-import com.oracle.graal.lir.*;
-import com.oracle.graal.lir.LIRInstruction.OperandMode;
-import com.oracle.graal.lir.alloc.lsra.Interval.SpillState;
-import com.oracle.graal.lir.gen.*;
-import com.oracle.graal.lir.gen.LIRGeneratorTool.SpillMoveFactory;
-import com.oracle.graal.lir.phases.*;
-
-final class OptimizeSpillPosition extends AllocationPhase {
-
-    private static final DebugMetric betterSpillPos = Debug.metric("BetterSpillPosition");
-    private static final DebugMetric betterSpillPosWithLowerProbability = Debug.metric("BetterSpillPositionWithLowerProbability");
-
-    private final LinearScan allocator;
-
-    OptimizeSpillPosition(LinearScan allocator) {
-        this.allocator = allocator;
-    }
-
-    @Override
-    protected <B extends AbstractBlockBase<B>> void run(TargetDescription target, LIRGenerationResult lirGenRes, List<B> codeEmittingOrder, List<B> linearScanOrder, SpillMoveFactory spillMoveFactory) {
-        optimizeSpillPosition();
-        allocator.printIntervals("After optimize spill position");
-    }
-
-    private void optimizeSpillPosition() {
-        LIRInsertionBuffer[] insertionBuffers = new LIRInsertionBuffer[allocator.ir.linearScanOrder().size()];
-        for (Interval interval : allocator.intervals()) {
-            if (interval != null && interval.isSplitParent() && interval.spillState() == SpillState.SpillInDominator) {
-                AbstractBlockBase<?> defBlock = allocator.blockForId(interval.spillDefinitionPos());
-                AbstractBlockBase<?> spillBlock = null;
-                Interval firstSpillChild = null;
-                try (Indent indent = Debug.logAndIndent("interval %s (%s)", interval, defBlock)) {
-                    for (Interval splitChild : interval.getSplitChildren()) {
-                        if (isStackSlotValue(splitChild.location())) {
-                            if (firstSpillChild == null || splitChild.from() < firstSpillChild.from()) {
-                                firstSpillChild = splitChild;
-                            } else {
-                                assert firstSpillChild.from() < splitChild.from();
-                            }
-                            // iterate all blocks where the interval has use positions
-                            for (AbstractBlockBase<?> splitBlock : blocksForInterval(splitChild)) {
-                                if (dominates(defBlock, splitBlock)) {
-                                    if (Debug.isLogEnabled()) {
-                                        Debug.log("Split interval %s, block %s", splitChild, splitBlock);
-                                    }
-                                    if (spillBlock == null) {
-                                        spillBlock = splitBlock;
-                                    } else {
-                                        spillBlock = commonDominator(spillBlock, splitBlock);
-                                        assert spillBlock != null;
-                                    }
-                                }
-                            }
-                        }
-                    }
-                    if (spillBlock == null) {
-                        // no spill interval
-                        interval.setSpillState(SpillState.StoreAtDefinition);
-                    } else {
-                        // move out of loops
-                        if (defBlock.getLoopDepth() < spillBlock.getLoopDepth()) {
-                            spillBlock = moveSpillOutOfLoop(defBlock, spillBlock);
-                        }
-
-                        /*
-                         * If the spill block is the begin of the first split child (aka the value
-                         * is on the stack) spill in the dominator.
-                         */
-                        assert firstSpillChild != null;
-                        if (!defBlock.equals(spillBlock) && spillBlock.equals(allocator.blockForId(firstSpillChild.from()))) {
-                            AbstractBlockBase<?> dom = spillBlock.getDominator();
-                            if (Debug.isLogEnabled()) {
-                                Debug.log("Spill block (%s) is the beginning of a spill child -> use dominator (%s)", spillBlock, dom);
-                            }
-                            spillBlock = dom;
-                        }
-
-                        if (!defBlock.equals(spillBlock)) {
-                            assert dominates(defBlock, spillBlock);
-                            betterSpillPos.increment();
-                            if (Debug.isLogEnabled()) {
-                                Debug.log("Better spill position found (Block %s)", spillBlock);
-                            }
-
-                            if (defBlock.probability() <= spillBlock.probability()) {
-                                // better spill block has the same probability -> do nothing
-                                interval.setSpillState(SpillState.StoreAtDefinition);
-                            } else {
-                                LIRInsertionBuffer insertionBuffer = insertionBuffers[spillBlock.getId()];
-                                if (insertionBuffer == null) {
-                                    insertionBuffer = new LIRInsertionBuffer();
-                                    insertionBuffers[spillBlock.getId()] = insertionBuffer;
-                                    insertionBuffer.init(allocator.ir.getLIRforBlock(spillBlock));
-                                }
-                                int spillOpId = allocator.getFirstLirInstructionId(spillBlock);
-                                // insert spill move
-                                AllocatableValue fromLocation = interval.getSplitChildAtOpId(spillOpId, OperandMode.DEF, allocator).location();
-                                AllocatableValue toLocation = LinearScan.canonicalSpillOpr(interval);
-                                LIRInstruction move = allocator.getSpillMoveFactory().createMove(toLocation, fromLocation);
-                                move.setId(LinearScan.DOMINATOR_SPILL_MOVE_ID);
-                                /*
-                                 * We can use the insertion buffer directly because we always insert
-                                 * at position 1.
-                                 */
-                                insertionBuffer.append(1, move);
-
-                                betterSpillPosWithLowerProbability.increment();
-                                interval.setSpillDefinitionPos(spillOpId);
-                            }
-                        } else {
-                            // definition is the best choice
-                            interval.setSpillState(SpillState.StoreAtDefinition);
-                        }
-                    }
-                }
-            }
-        }
-        for (LIRInsertionBuffer insertionBuffer : insertionBuffers) {
-            if (insertionBuffer != null) {
-                assert insertionBuffer.initialized() : "Insertion buffer is nonnull but not initialized!";
-                insertionBuffer.finish();
-            }
-        }
-    }
-
-    /**
-     * Iterate over all {@link AbstractBlockBase blocks} of an interval.
-     */
-    private class IntervalBlockIterator implements Iterator<AbstractBlockBase<?>> {
-
-        Range range;
-        AbstractBlockBase<?> block;
-
-        public IntervalBlockIterator(Interval interval) {
-            range = interval.first();
-            block = allocator.blockForId(range.from);
-        }
-
-        public AbstractBlockBase<?> next() {
-            AbstractBlockBase<?> currentBlock = block;
-            int nextBlockIndex = block.getLinearScanNumber() + 1;
-            if (nextBlockIndex < allocator.sortedBlocks.size()) {
-                block = allocator.sortedBlocks.get(nextBlockIndex);
-                if (range.to <= allocator.getFirstLirInstructionId(block)) {
-                    range = range.next;
-                    if (range == Range.EndMarker) {
-                        block = null;
-                    } else {
-                        block = allocator.blockForId(range.from);
-                    }
-                }
-            } else {
-                block = null;
-            }
-            return currentBlock;
-        }
-
-        public boolean hasNext() {
-            return block != null;
-        }
-    }
-
-    private Iterable<AbstractBlockBase<?>> blocksForInterval(Interval interval) {
-        return new Iterable<AbstractBlockBase<?>>() {
-            public Iterator<AbstractBlockBase<?>> iterator() {
-                return new IntervalBlockIterator(interval);
-            }
-        };
-    }
-
-    private static AbstractBlockBase<?> moveSpillOutOfLoop(AbstractBlockBase<?> defBlock, AbstractBlockBase<?> spillBlock) {
-        int defLoopDepth = defBlock.getLoopDepth();
-        for (AbstractBlockBase<?> block = spillBlock.getDominator(); !defBlock.equals(block); block = block.getDominator()) {
-            assert block != null : "spill block not dominated by definition block?";
-            if (block.getLoopDepth() <= defLoopDepth) {
-                assert block.getLoopDepth() == defLoopDepth : "Cannot spill an interval outside of the loop where it is defined!";
-                return block;
-            }
-        }
-        return defBlock;
-    }
-}
--- a/graal/com.oracle.graal.lir/src/com/oracle/graal/lir/alloc/lsra/RegisterAllocation.java	Tue May 12 14:04:40 2015 +0200
+++ /dev/null	Thu Jan 01 00:00:00 1970 +0000
@@ -1,70 +0,0 @@
-/*
- * Copyright (c) 2015, 2015, Oracle and/or its affiliates. All rights reserved.
- * DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER.
- *
- * This code is free software; you can redistribute it and/or modify it
- * under the terms of the GNU General Public License version 2 only, as
- * published by the Free Software Foundation.
- *
- * This code is distributed in the hope that it will be useful, but WITHOUT
- * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
- * FITNESS FOR A PARTICULAR PURPOSE.  See the GNU General Public License
- * version 2 for more details (a copy is included in the LICENSE file that
- * accompanied this code).
- *
- * You should have received a copy of the GNU General Public License version
- * 2 along with this work; if not, write to the Free Software Foundation,
- * Inc., 51 Franklin St, Fifth Floor, Boston, MA 02110-1301 USA.
- *
- * Please contact Oracle, 500 Oracle Parkway, Redwood Shores, CA 94065 USA
- * or visit www.oracle.com if you need additional information or have any
- * questions.
- */
-package com.oracle.graal.lir.alloc.lsra;
-
-import java.util.*;
-
-import com.oracle.graal.api.code.*;
-import com.oracle.graal.compiler.common.cfg.*;
-import com.oracle.graal.debug.*;
-import com.oracle.graal.lir.gen.*;
-import com.oracle.graal.lir.gen.LIRGeneratorTool.*;
-import com.oracle.graal.lir.phases.*;
-
-final class RegisterAllocation extends AllocationPhase {
-
-    private final LinearScan allocator;
-
-    RegisterAllocation(LinearScan allocator) {
-        this.allocator = allocator;
-    }
-
-    @Override
-    protected <B extends AbstractBlockBase<B>> void run(TargetDescription target, LIRGenerationResult lirGenRes, List<B> codeEmittingOrder, List<B> linearScanOrder, SpillMoveFactory spillMoveFactory) {
-        allocator.printIntervals("Before register allocation");
-        allocateRegisters();
-        allocator.printIntervals("After register allocation");
-    }
-
-    void allocateRegisters() {
-        try (Indent indent = Debug.logAndIndent("allocate registers")) {
-            Interval precoloredIntervals;
-            Interval notPrecoloredIntervals;
-
-            Interval.Pair result = allocator.createUnhandledLists(LinearScan.IS_PRECOLORED_INTERVAL, LinearScan.IS_VARIABLE_INTERVAL);
-            precoloredIntervals = result.first;
-            notPrecoloredIntervals = result.second;
-
-            // allocate cpu registers
-            LinearScanWalker lsw;
-            if (OptimizingLinearScanWalker.Options.LSRAOptimization.getValue()) {
-                lsw = new OptimizingLinearScanWalker(allocator, precoloredIntervals, notPrecoloredIntervals);
-            } else {
-                lsw = new LinearScanWalker(allocator, precoloredIntervals, notPrecoloredIntervals);
-            }
-            lsw.walk();
-            lsw.finishAllocation();
-        }
-    }
-
-}
--- a/graal/com.oracle.graal.lir/src/com/oracle/graal/lir/alloc/lsra/ResolveDataFlow.java	Tue May 12 14:04:40 2015 +0200
+++ /dev/null	Thu Jan 01 00:00:00 1970 +0000
@@ -1,197 +0,0 @@
-/*
- * Copyright (c) 2015, 2015, Oracle and/or its affiliates. All rights reserved.
- * DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER.
- *
- * This code is free software; you can redistribute it and/or modify it
- * under the terms of the GNU General Public License version 2 only, as
- * published by the Free Software Foundation.
- *
- * This code is distributed in the hope that it will be useful, but WITHOUT
- * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
- * FITNESS FOR A PARTICULAR PURPOSE.  See the GNU General Public License
- * version 2 for more details (a copy is included in the LICENSE file that
- * accompanied this code).
- *
- * You should have received a copy of the GNU General Public License version
- * 2 along with this work; if not, write to the Free Software Foundation,
- * Inc., 51 Franklin St, Fifth Floor, Boston, MA 02110-1301 USA.
- *
- * Please contact Oracle, 500 Oracle Parkway, Redwood Shores, CA 94065 USA
- * or visit www.oracle.com if you need additional information or have any
- * questions.
- */
-package com.oracle.graal.lir.alloc.lsra;
-
-import static com.oracle.graal.compiler.common.GraalOptions.*;
-
-import java.util.*;
-
-import com.oracle.graal.api.code.*;
-import com.oracle.graal.compiler.common.cfg.*;
-import com.oracle.graal.debug.*;
-import com.oracle.graal.lir.*;
-import com.oracle.graal.lir.gen.*;
-import com.oracle.graal.lir.gen.LIRGeneratorTool.SpillMoveFactory;
-import com.oracle.graal.lir.phases.*;
-
-/**
- * Phase 6: resolve data flow
- *
- * Insert moves at edges between blocks if intervals have been split.
- */
-class ResolveDataFlow extends AllocationPhase {
-
-    protected final LinearScan allocator;
-
-    ResolveDataFlow(LinearScan allocator) {
-        this.allocator = allocator;
-    }
-
-    @Override
-    protected <B extends AbstractBlockBase<B>> void run(TargetDescription target, LIRGenerationResult lirGenRes, List<B> codeEmittingOrder, List<B> linearScanOrder, SpillMoveFactory spillMoveFactory) {
-        resolveDataFlow();
-    }
-
-    void resolveCollectMappings(AbstractBlockBase<?> fromBlock, AbstractBlockBase<?> toBlock, AbstractBlockBase<?> midBlock, MoveResolver moveResolver) {
-        assert moveResolver.checkEmpty();
-        assert midBlock == null ||
-                        (midBlock.getPredecessorCount() == 1 && midBlock.getSuccessorCount() == 1 && midBlock.getPredecessors().get(0).equals(fromBlock) && midBlock.getSuccessors().get(0).equals(
-                                        toBlock));
-
-        int toBlockFirstInstructionId = allocator.getFirstLirInstructionId(toBlock);
-        int fromBlockLastInstructionId = allocator.getLastLirInstructionId(fromBlock) + 1;
-        int numOperands = allocator.operandSize();
-        BitSet liveAtEdge = allocator.getBlockData(toBlock).liveIn;
-
-        // visit all variables for which the liveAtEdge bit is set
-        for (int operandNum = liveAtEdge.nextSetBit(0); operandNum >= 0; operandNum = liveAtEdge.nextSetBit(operandNum + 1)) {
-            assert operandNum < numOperands : "live information set for not exisiting interval";
-            assert allocator.getBlockData(fromBlock).liveOut.get(operandNum) && allocator.getBlockData(toBlock).liveIn.get(operandNum) : "interval not live at this edge";
-
-            Interval fromInterval = allocator.splitChildAtOpId(allocator.intervalFor(operandNum), fromBlockLastInstructionId, LIRInstruction.OperandMode.DEF);
-            Interval toInterval = allocator.splitChildAtOpId(allocator.intervalFor(operandNum), toBlockFirstInstructionId, LIRInstruction.OperandMode.DEF);
-
-            if (fromInterval != toInterval && !fromInterval.location().equals(toInterval.location())) {
-                // need to insert move instruction
-                moveResolver.addMapping(fromInterval, toInterval);
-            }
-        }
-    }
-
-    void resolveFindInsertPos(AbstractBlockBase<?> fromBlock, AbstractBlockBase<?> toBlock, MoveResolver moveResolver) {
-        if (fromBlock.getSuccessorCount() <= 1) {
-            if (Debug.isLogEnabled()) {
-                Debug.log("inserting moves at end of fromBlock B%d", fromBlock.getId());
-            }
-
-            List<LIRInstruction> instructions = allocator.ir.getLIRforBlock(fromBlock);
-            LIRInstruction instr = instructions.get(instructions.size() - 1);
-            if (instr instanceof StandardOp.JumpOp) {
-                // insert moves before branch
-                moveResolver.setInsertPosition(instructions, instructions.size() - 1);
-            } else {
-                moveResolver.setInsertPosition(instructions, instructions.size());
-            }
-
-        } else {
-            if (Debug.isLogEnabled()) {
-                Debug.log("inserting moves at beginning of toBlock B%d", toBlock.getId());
-            }
-
-            if (DetailedAsserts.getValue()) {
-                assert allocator.ir.getLIRforBlock(fromBlock).get(0) instanceof StandardOp.LabelOp : "block does not start with a label";
-
-                /*
-                 * Because the number of predecessor edges matches the number of successor edges,
-                 * blocks which are reached by switch statements may have be more than one
-                 * predecessor but it will be guaranteed that all predecessors will be the same.
-                 */
-                for (AbstractBlockBase<?> predecessor : toBlock.getPredecessors()) {
-                    assert fromBlock == predecessor : "all critical edges must be broken";
-                }
-            }
-
-            moveResolver.setInsertPosition(allocator.ir.getLIRforBlock(toBlock), 1);
-        }
-    }
-
-    /**
-     * Inserts necessary moves (spilling or reloading) at edges between blocks for intervals that
-     * have been split.
-     */
-    void resolveDataFlow() {
-        try (Indent indent = Debug.logAndIndent("resolve data flow")) {
-
-            int numBlocks = allocator.blockCount();
-            MoveResolver moveResolver = allocator.createMoveResolver();
-            BitSet blockCompleted = new BitSet(numBlocks);
-            BitSet alreadyResolved = new BitSet(numBlocks);
-
-            for (AbstractBlockBase<?> block : allocator.sortedBlocks) {
-
-                // check if block has only one predecessor and only one successor
-                if (block.getPredecessorCount() == 1 && block.getSuccessorCount() == 1) {
-                    List<LIRInstruction> instructions = allocator.ir.getLIRforBlock(block);
-                    assert instructions.get(0) instanceof StandardOp.LabelOp : "block must start with label";
-                    assert instructions.get(instructions.size() - 1) instanceof StandardOp.JumpOp : "block with successor must end with unconditional jump";
-
-                    // check if block is empty (only label and branch)
-                    if (instructions.size() == 2) {
-                        AbstractBlockBase<?> pred = block.getPredecessors().iterator().next();
-                        AbstractBlockBase<?> sux = block.getSuccessors().iterator().next();
-
-                        // prevent optimization of two consecutive blocks
-                        if (!blockCompleted.get(pred.getLinearScanNumber()) && !blockCompleted.get(sux.getLinearScanNumber())) {
-                            if (Debug.isLogEnabled()) {
-                                Debug.log(" optimizing empty block B%d (pred: B%d, sux: B%d)", block.getId(), pred.getId(), sux.getId());
-                            }
-
-                            blockCompleted.set(block.getLinearScanNumber());
-
-                            /*
-                             * Directly resolve between pred and sux (without looking at the empty
-                             * block between).
-                             */
-                            resolveCollectMappings(pred, sux, block, moveResolver);
-                            if (moveResolver.hasMappings()) {
-                                moveResolver.setInsertPosition(instructions, 1);
-                                moveResolver.resolveAndAppendMoves();
-                            }
-                        }
-                    }
-                }
-            }
-
-            for (AbstractBlockBase<?> fromBlock : allocator.sortedBlocks) {
-                if (!blockCompleted.get(fromBlock.getLinearScanNumber())) {
-                    alreadyResolved.clear();
-                    alreadyResolved.or(blockCompleted);
-
-                    for (AbstractBlockBase<?> toBlock : fromBlock.getSuccessors()) {
-
-                        /*
-                         * Check for duplicate edges between the same blocks (can happen with switch
-                         * blocks).
-                         */
-                        if (!alreadyResolved.get(toBlock.getLinearScanNumber())) {
-                            if (Debug.isLogEnabled()) {
-                                Debug.log("processing edge between B%d and B%d", fromBlock.getId(), toBlock.getId());
-                            }
-
-                            alreadyResolved.set(toBlock.getLinearScanNumber());
-
-                            // collect all intervals that have been split between
-                            // fromBlock and toBlock
-                            resolveCollectMappings(fromBlock, toBlock, null, moveResolver);
-                            if (moveResolver.hasMappings()) {
-                                resolveFindInsertPos(fromBlock, toBlock, moveResolver);
-                                moveResolver.resolveAndAppendMoves();
-                            }
-                        }
-                    }
-                }
-            }
-
-        }
-    }
-}
--- a/graal/com.oracle.graal.lir/src/com/oracle/graal/lir/alloc/lsra/SSAEliminateSpillMove.java	Tue May 12 14:04:40 2015 +0200
+++ b/graal/com.oracle.graal.lir/src/com/oracle/graal/lir/alloc/lsra/SSAEliminateSpillMove.java	Tue May 12 14:19:57 2015 +0200
@@ -32,7 +32,7 @@
 import com.oracle.graal.lir.StandardOp.*;
 import com.oracle.graal.lir.ssa.*;
 
-public class SSAEliminateSpillMove extends EliminateSpillMove {
+public class SSAEliminateSpillMove extends LinearScanEliminateSpillMovePhase {
 
     SSAEliminateSpillMove(LinearScan allocator) {
         super(allocator);
--- a/graal/com.oracle.graal.lir/src/com/oracle/graal/lir/alloc/lsra/SSALifetimeAnalysis.java	Tue May 12 14:04:40 2015 +0200
+++ b/graal/com.oracle.graal.lir/src/com/oracle/graal/lir/alloc/lsra/SSALifetimeAnalysis.java	Tue May 12 14:19:57 2015 +0200
@@ -31,7 +31,7 @@
 import com.oracle.graal.lir.StandardOp.*;
 import com.oracle.graal.lir.ssa.*;
 
-public class SSALifetimeAnalysis extends LifetimeAnalysis {
+public class SSALifetimeAnalysis extends LinearScanLifetimeAnalysisPhase {
 
     SSALifetimeAnalysis(LinearScan linearScan) {
         super(linearScan);
--- a/graal/com.oracle.graal.lir/src/com/oracle/graal/lir/alloc/lsra/SSALinearScan.java	Tue May 12 14:04:40 2015 +0200
+++ b/graal/com.oracle.graal.lir/src/com/oracle/graal/lir/alloc/lsra/SSALinearScan.java	Tue May 12 14:19:57 2015 +0200
@@ -41,17 +41,17 @@
     }
 
     @Override
-    protected LifetimeAnalysis createLifetimeAnalysisPhase() {
+    protected LinearScanLifetimeAnalysisPhase createLifetimeAnalysisPhase() {
         return new SSALifetimeAnalysis(this);
     }
 
     @Override
-    protected ResolveDataFlow createResolveDataFlowPhase() {
+    protected LinearScanResolveDataFlowPhase createResolveDataFlowPhase() {
         return new SSAResolveDataFlow(this);
     }
 
     @Override
-    protected EliminateSpillMove createSpillMoveEliminationPhase() {
+    protected LinearScanEliminateSpillMovePhase createSpillMoveEliminationPhase() {
         return new SSAEliminateSpillMove(this);
     }
 
--- a/graal/com.oracle.graal.lir/src/com/oracle/graal/lir/alloc/lsra/SSAResolveDataFlow.java	Tue May 12 14:04:40 2015 +0200
+++ b/graal/com.oracle.graal.lir/src/com/oracle/graal/lir/alloc/lsra/SSAResolveDataFlow.java	Tue May 12 14:19:57 2015 +0200
@@ -33,7 +33,7 @@
 import com.oracle.graal.lir.ssa.*;
 import com.oracle.graal.lir.ssa.SSAUtils.PhiValueVisitor;
 
-class SSAResolveDataFlow extends ResolveDataFlow {
+class SSAResolveDataFlow extends LinearScanResolveDataFlowPhase {
 
     private static final DebugMetric numPhiResolutionMoves = Debug.metric("SSA LSRA[numPhiResolutionMoves]");
     private static final DebugMetric numStackToStackMoves = Debug.metric("SSA LSRA[numStackToStackMoves]");