changeset 22599:b83f27f75684

TraceRA: introduce TraceAllocationPhase and TraceLinearScanAllocationPhase.
author Josef Eisl <josef.eisl@jku.at>
date Tue, 08 Sep 2015 18:53:43 +0200
parents 5396006b321a
children 530daaf84457
files graal/com.oracle.graal.lir/src/com/oracle/graal/lir/alloc/trace/TraceAllocationPhase.java graal/com.oracle.graal.lir/src/com/oracle/graal/lir/alloc/trace/TraceGlobalMoveResolutionPhase.java graal/com.oracle.graal.lir/src/com/oracle/graal/lir/alloc/trace/TraceLinearScan.java graal/com.oracle.graal.lir/src/com/oracle/graal/lir/alloc/trace/TraceLinearScanAllocationPhase.java graal/com.oracle.graal.lir/src/com/oracle/graal/lir/alloc/trace/TraceLinearScanAssignLocationsPhase.java graal/com.oracle.graal.lir/src/com/oracle/graal/lir/alloc/trace/TraceLinearScanEliminateSpillMovePhase.java graal/com.oracle.graal.lir/src/com/oracle/graal/lir/alloc/trace/TraceLinearScanLifetimeAnalysisPhase.java graal/com.oracle.graal.lir/src/com/oracle/graal/lir/alloc/trace/TraceLinearScanRegisterAllocationPhase.java graal/com.oracle.graal.lir/src/com/oracle/graal/lir/alloc/trace/TraceLinearScanResolveDataFlowPhase.java graal/com.oracle.graal.lir/src/com/oracle/graal/lir/alloc/trace/TraceRegisterAllocationPhase.java graal/com.oracle.graal.lir/src/com/oracle/graal/lir/alloc/trace/TraceTrivialAllocator.java
diffstat 11 files changed, 1504 insertions(+), 1308 deletions(-) [+]
line wrap: on
line diff
--- /dev/null	Thu Jan 01 00:00:00 1970 +0000
+++ b/graal/com.oracle.graal.lir/src/com/oracle/graal/lir/alloc/trace/TraceAllocationPhase.java	Tue Sep 08 18:53:43 2015 +0200
@@ -0,0 +1,56 @@
+/*
+ * Copyright (c) 2015, 2015, Oracle and/or its affiliates. All rights reserved.
+ * DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER.
+ *
+ * This code is free software; you can redistribute it and/or modify it
+ * under the terms of the GNU General Public License version 2 only, as
+ * published by the Free Software Foundation.
+ *
+ * This code is distributed in the hope that it will be useful, but WITHOUT
+ * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
+ * FITNESS FOR A PARTICULAR PURPOSE.  See the GNU General Public License
+ * version 2 for more details (a copy is included in the LICENSE file that
+ * accompanied this code).
+ *
+ * You should have received a copy of the GNU General Public License version
+ * 2 along with this work; if not, write to the Free Software Foundation,
+ * Inc., 51 Franklin St, Fifth Floor, Boston, MA 02110-1301 USA.
+ *
+ * Please contact Oracle, 500 Oracle Parkway, Redwood Shores, CA 94065 USA
+ * or visit www.oracle.com if you need additional information or have any
+ * questions.
+ */
+package com.oracle.graal.lir.alloc.trace;
+
+import java.util.List;
+
+import jdk.internal.jvmci.code.TargetDescription;
+
+import com.oracle.graal.compiler.common.alloc.RegisterAllocationConfig;
+import com.oracle.graal.compiler.common.cfg.AbstractBlockBase;
+import com.oracle.graal.lir.gen.LIRGenerationResult;
+import com.oracle.graal.lir.gen.LIRGeneratorTool.SpillMoveFactory;
+import com.oracle.graal.lir.phases.LIRPhase;
+
+public abstract class TraceAllocationPhase extends LIRPhase<TraceAllocationPhase.TraceAllocationContext> {
+
+    public static final class TraceAllocationContext {
+        private final SpillMoveFactory spillMoveFactory;
+        private final RegisterAllocationConfig registerAllocationConfig;
+
+        public TraceAllocationContext(SpillMoveFactory spillMoveFactory, RegisterAllocationConfig registerAllocationConfig) {
+            this.spillMoveFactory = spillMoveFactory;
+            this.registerAllocationConfig = registerAllocationConfig;
+        }
+    }
+
+    @Override
+    protected final <B extends AbstractBlockBase<B>> void run(TargetDescription target, LIRGenerationResult lirGenRes, List<B> codeEmittingOrder, List<B> linearScanOrder,
+                    TraceAllocationContext context) {
+        run(target, lirGenRes, codeEmittingOrder, linearScanOrder, context.spillMoveFactory, context.registerAllocationConfig);
+    }
+
+    protected abstract <B extends AbstractBlockBase<B>> void run(TargetDescription target, LIRGenerationResult lirGenRes, List<B> codeEmittingOrder, List<B> linearScanOrder,
+                    SpillMoveFactory spillMoveFactory, RegisterAllocationConfig registerAllocationConfig);
+
+}
--- a/graal/com.oracle.graal.lir/src/com/oracle/graal/lir/alloc/trace/TraceGlobalMoveResolutionPhase.java	Tue Sep 08 10:50:21 2015 +0200
+++ b/graal/com.oracle.graal.lir/src/com/oracle/graal/lir/alloc/trace/TraceGlobalMoveResolutionPhase.java	Tue Sep 08 18:53:43 2015 +0200
@@ -22,26 +22,30 @@
  */
 package com.oracle.graal.lir.alloc.trace;
 
-import static com.oracle.graal.lir.alloc.trace.TraceUtil.*;
-import static jdk.internal.jvmci.code.ValueUtil.*;
+import static com.oracle.graal.lir.alloc.trace.TraceUtil.asShadowedRegisterValue;
+import static com.oracle.graal.lir.alloc.trace.TraceUtil.isShadowedRegisterValue;
+import static jdk.internal.jvmci.code.ValueUtil.isIllegal;
 
-import java.util.*;
+import java.util.List;
 
-import jdk.internal.jvmci.code.*;
-import jdk.internal.jvmci.meta.*;
+import jdk.internal.jvmci.code.Architecture;
+import jdk.internal.jvmci.code.TargetDescription;
+import jdk.internal.jvmci.meta.AllocatableValue;
+import jdk.internal.jvmci.meta.Value;
 
-import com.oracle.graal.compiler.common.alloc.*;
+import com.oracle.graal.compiler.common.alloc.RegisterAllocationConfig;
 import com.oracle.graal.compiler.common.alloc.TraceBuilder.TraceBuilderResult;
-import com.oracle.graal.compiler.common.cfg.*;
-import com.oracle.graal.debug.*;
-import com.oracle.graal.lir.*;
-import com.oracle.graal.lir.gen.*;
+import com.oracle.graal.compiler.common.cfg.AbstractBlockBase;
+import com.oracle.graal.debug.Debug;
+import com.oracle.graal.debug.Indent;
+import com.oracle.graal.lir.LIR;
+import com.oracle.graal.lir.LIRInstruction;
+import com.oracle.graal.lir.gen.LIRGenerationResult;
 import com.oracle.graal.lir.gen.LIRGeneratorTool.SpillMoveFactory;
-import com.oracle.graal.lir.phases.*;
 import com.oracle.graal.lir.ssa.SSAUtil.PhiValueVisitor;
-import com.oracle.graal.lir.ssi.*;
+import com.oracle.graal.lir.ssi.SSIUtil;
 
-final class TraceGlobalMoveResolutionPhase extends AllocationPhase {
+final class TraceGlobalMoveResolutionPhase extends TraceAllocationPhase {
 
     private final TraceBuilderResult<?> resultTraces;
 
--- a/graal/com.oracle.graal.lir/src/com/oracle/graal/lir/alloc/trace/TraceLinearScan.java	Tue Sep 08 10:50:21 2015 +0200
+++ b/graal/com.oracle.graal.lir/src/com/oracle/graal/lir/alloc/trace/TraceLinearScan.java	Tue Sep 08 18:53:43 2015 +0200
@@ -22,29 +22,49 @@
  */
 package com.oracle.graal.lir.alloc.trace;
 
-import static com.oracle.graal.compiler.common.GraalOptions.*;
-import static com.oracle.graal.lir.LIRValueUtil.*;
-import static jdk.internal.jvmci.code.CodeUtil.*;
-import static jdk.internal.jvmci.code.ValueUtil.*;
+import static com.oracle.graal.compiler.common.GraalOptions.DetailedAsserts;
+import static com.oracle.graal.lir.LIRValueUtil.isVariable;
+import static jdk.internal.jvmci.code.CodeUtil.isEven;
+import static jdk.internal.jvmci.code.ValueUtil.asRegister;
+import static jdk.internal.jvmci.code.ValueUtil.asRegisterValue;
+import static jdk.internal.jvmci.code.ValueUtil.isIllegal;
+import static jdk.internal.jvmci.code.ValueUtil.isLegal;
+import static jdk.internal.jvmci.code.ValueUtil.isRegister;
 
-import java.util.*;
+import java.util.Arrays;
+import java.util.BitSet;
+import java.util.EnumSet;
+import java.util.List;
 
-import jdk.internal.jvmci.code.*;
-import jdk.internal.jvmci.common.*;
-import jdk.internal.jvmci.meta.*;
+import jdk.internal.jvmci.code.BailoutException;
+import jdk.internal.jvmci.code.Register;
+import jdk.internal.jvmci.code.RegisterAttributes;
+import jdk.internal.jvmci.code.RegisterValue;
+import jdk.internal.jvmci.code.StackSlotValue;
+import jdk.internal.jvmci.code.TargetDescription;
+import jdk.internal.jvmci.code.VirtualStackSlot;
+import jdk.internal.jvmci.common.JVMCIError;
+import jdk.internal.jvmci.meta.AllocatableValue;
+import jdk.internal.jvmci.meta.LIRKind;
+import jdk.internal.jvmci.meta.Value;
 
-import com.oracle.graal.compiler.common.alloc.*;
+import com.oracle.graal.compiler.common.alloc.RegisterAllocationConfig;
 import com.oracle.graal.compiler.common.alloc.TraceBuilder.TraceBuilderResult;
-import com.oracle.graal.compiler.common.cfg.*;
-import com.oracle.graal.debug.*;
+import com.oracle.graal.compiler.common.cfg.AbstractBlockBase;
+import com.oracle.graal.compiler.common.cfg.BlockMap;
+import com.oracle.graal.debug.Debug;
 import com.oracle.graal.debug.Debug.Scope;
-import com.oracle.graal.lir.*;
+import com.oracle.graal.debug.Indent;
+import com.oracle.graal.lir.LIR;
+import com.oracle.graal.lir.LIRInstruction;
 import com.oracle.graal.lir.LIRInstruction.OperandFlag;
 import com.oracle.graal.lir.LIRInstruction.OperandMode;
-import com.oracle.graal.lir.framemap.*;
-import com.oracle.graal.lir.gen.*;
+import com.oracle.graal.lir.ValueConsumer;
+import com.oracle.graal.lir.Variable;
+import com.oracle.graal.lir.alloc.trace.TraceLinearScanAllocationPhase.TraceLinearScanAllocationContext;
+import com.oracle.graal.lir.framemap.FrameMapBuilder;
+import com.oracle.graal.lir.gen.LIRGenerationResult;
 import com.oracle.graal.lir.gen.LIRGeneratorTool.SpillMoveFactory;
-import com.oracle.graal.lir.phases.AllocationPhase.AllocationContext;
 
 /**
  * An implementation of the linear scan register allocator algorithm described in <a
@@ -694,7 +714,7 @@
          * This is the point to enable debug logging for the whole register allocation.
          */
         try (Indent indent = Debug.logAndIndent("LinearScan allocate")) {
-            AllocationContext context = new AllocationContext(spillMoveFactory, registerAllocationConfig);
+            TraceLinearScanAllocationContext context = new TraceLinearScanAllocationContext(spillMoveFactory, registerAllocationConfig, traceBuilderResult, this);
 
             createLifetimeAnalysisPhase().apply(target, lirGenRes, codeEmittingOrder, linearScanOrder, context, false);
 
@@ -722,26 +742,26 @@
     }
 
     protected TraceLinearScanLifetimeAnalysisPhase createLifetimeAnalysisPhase() {
-        return new TraceLinearScanLifetimeAnalysisPhase(this, traceBuilderResult);
+        return new TraceLinearScanLifetimeAnalysisPhase();
     }
 
     protected TraceLinearScanResolveDataFlowPhase createResolveDataFlowPhase() {
-        return new TraceLinearScanResolveDataFlowPhase(this, traceBuilderResult);
+        return new TraceLinearScanResolveDataFlowPhase();
     }
 
     protected TraceLinearScanEliminateSpillMovePhase createSpillMoveEliminationPhase() {
-        return new TraceLinearScanEliminateSpillMovePhase(this);
+        return new TraceLinearScanEliminateSpillMovePhase();
     }
 
     protected TraceLinearScanAssignLocationsPhase createAssignLocationsPhase() {
-        return new TraceLinearScanAssignLocationsPhase(this, traceBuilderResult);
+        return new TraceLinearScanAssignLocationsPhase();
     }
 
     protected void beforeSpillMoveElimination() {
     }
 
     protected TraceLinearScanRegisterAllocationPhase createRegisterAllocationPhase() {
-        return new TraceLinearScanRegisterAllocationPhase(this);
+        return new TraceLinearScanRegisterAllocationPhase();
     }
 
     @SuppressWarnings("try")
--- /dev/null	Thu Jan 01 00:00:00 1970 +0000
+++ b/graal/com.oracle.graal.lir/src/com/oracle/graal/lir/alloc/trace/TraceLinearScanAllocationPhase.java	Tue Sep 08 18:53:43 2015 +0200
@@ -0,0 +1,62 @@
+/*
+ * Copyright (c) 2015, 2015, Oracle and/or its affiliates. All rights reserved.
+ * DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER.
+ *
+ * This code is free software; you can redistribute it and/or modify it
+ * under the terms of the GNU General Public License version 2 only, as
+ * published by the Free Software Foundation.
+ *
+ * This code is distributed in the hope that it will be useful, but WITHOUT
+ * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
+ * FITNESS FOR A PARTICULAR PURPOSE.  See the GNU General Public License
+ * version 2 for more details (a copy is included in the LICENSE file that
+ * accompanied this code).
+ *
+ * You should have received a copy of the GNU General Public License version
+ * 2 along with this work; if not, write to the Free Software Foundation,
+ * Inc., 51 Franklin St, Fifth Floor, Boston, MA 02110-1301 USA.
+ *
+ * Please contact Oracle, 500 Oracle Parkway, Redwood Shores, CA 94065 USA
+ * or visit www.oracle.com if you need additional information or have any
+ * questions.
+ */
+package com.oracle.graal.lir.alloc.trace;
+
+import java.util.List;
+
+import jdk.internal.jvmci.code.TargetDescription;
+
+import com.oracle.graal.compiler.common.alloc.RegisterAllocationConfig;
+import com.oracle.graal.compiler.common.alloc.TraceBuilder.TraceBuilderResult;
+import com.oracle.graal.compiler.common.cfg.AbstractBlockBase;
+import com.oracle.graal.lir.gen.LIRGenerationResult;
+import com.oracle.graal.lir.gen.LIRGeneratorTool.SpillMoveFactory;
+import com.oracle.graal.lir.phases.LIRPhase;
+
+public abstract class TraceLinearScanAllocationPhase extends LIRPhase<TraceLinearScanAllocationPhase.TraceLinearScanAllocationContext> {
+
+    public static final class TraceLinearScanAllocationContext {
+        private final SpillMoveFactory spillMoveFactory;
+        private final RegisterAllocationConfig registerAllocationConfig;
+        private final TraceBuilderResult<?> traceBuilderResult;
+        private final TraceLinearScan allocator;
+
+        public TraceLinearScanAllocationContext(SpillMoveFactory spillMoveFactory, RegisterAllocationConfig registerAllocationConfig, TraceBuilderResult<?> traceBuilderResult,
+                        TraceLinearScan allocator) {
+            this.spillMoveFactory = spillMoveFactory;
+            this.registerAllocationConfig = registerAllocationConfig;
+            this.traceBuilderResult = traceBuilderResult;
+            this.allocator = allocator;
+        }
+    }
+
+    @Override
+    protected final <B extends AbstractBlockBase<B>> void run(TargetDescription target, LIRGenerationResult lirGenRes, List<B> codeEmittingOrder, List<B> linearScanOrder,
+                    TraceLinearScanAllocationContext context) {
+        run(target, lirGenRes, codeEmittingOrder, linearScanOrder, context.spillMoveFactory, context.registerAllocationConfig, context.traceBuilderResult, context.allocator);
+    }
+
+    protected abstract <B extends AbstractBlockBase<B>> void run(TargetDescription target, LIRGenerationResult lirGenRes, List<B> codeEmittingOrder, List<B> linearScanOrder,
+                    SpillMoveFactory spillMoveFactory, RegisterAllocationConfig registerAllocationConfig, TraceBuilderResult<?> traceBuilderResult, TraceLinearScan allocator);
+
+}
--- a/graal/com.oracle.graal.lir/src/com/oracle/graal/lir/alloc/trace/TraceLinearScanAssignLocationsPhase.java	Tue Sep 08 10:50:21 2015 +0200
+++ b/graal/com.oracle.graal.lir/src/com/oracle/graal/lir/alloc/trace/TraceLinearScanAssignLocationsPhase.java	Tue Sep 08 18:53:43 2015 +0200
@@ -22,252 +22,268 @@
  */
 package com.oracle.graal.lir.alloc.trace;
 
-import static com.oracle.graal.compiler.common.GraalOptions.*;
-import static com.oracle.graal.lir.LIRValueUtil.*;
-import static com.oracle.graal.lir.alloc.trace.TraceRegisterAllocationPhase.Options.*;
-import static jdk.internal.jvmci.code.ValueUtil.*;
+import static com.oracle.graal.compiler.common.GraalOptions.DetailedAsserts;
+import static com.oracle.graal.lir.LIRValueUtil.isConstantValue;
+import static com.oracle.graal.lir.LIRValueUtil.isVariable;
+import static com.oracle.graal.lir.alloc.trace.TraceRegisterAllocationPhase.Options.TraceRAshareSpillInformation;
+import static jdk.internal.jvmci.code.ValueUtil.isIllegal;
+import static jdk.internal.jvmci.code.ValueUtil.isRegister;
+import static jdk.internal.jvmci.code.ValueUtil.isStackSlotValue;
+import static jdk.internal.jvmci.code.ValueUtil.isVirtualStackSlot;
 
-import java.util.*;
+import java.util.Collections;
+import java.util.EnumSet;
+import java.util.List;
 
-import jdk.internal.jvmci.code.*;
-import jdk.internal.jvmci.meta.*;
+import jdk.internal.jvmci.code.RegisterValue;
+import jdk.internal.jvmci.code.StackSlotValue;
+import jdk.internal.jvmci.code.TargetDescription;
+import jdk.internal.jvmci.meta.AllocatableValue;
+import jdk.internal.jvmci.meta.Value;
 
-import com.oracle.graal.compiler.common.alloc.*;
+import com.oracle.graal.compiler.common.alloc.RegisterAllocationConfig;
 import com.oracle.graal.compiler.common.alloc.TraceBuilder.TraceBuilderResult;
-import com.oracle.graal.compiler.common.cfg.*;
-import com.oracle.graal.debug.*;
-import com.oracle.graal.lir.*;
+import com.oracle.graal.compiler.common.cfg.AbstractBlockBase;
+import com.oracle.graal.debug.Debug;
+import com.oracle.graal.debug.Indent;
+import com.oracle.graal.lir.ConstantValue;
+import com.oracle.graal.lir.InstructionValueProcedure;
+import com.oracle.graal.lir.LIRFrameState;
+import com.oracle.graal.lir.LIRInstruction;
 import com.oracle.graal.lir.LIRInstruction.OperandFlag;
 import com.oracle.graal.lir.LIRInstruction.OperandMode;
+import com.oracle.graal.lir.StandardOp;
 import com.oracle.graal.lir.StandardOp.BlockEndOp;
 import com.oracle.graal.lir.StandardOp.MoveOp;
 import com.oracle.graal.lir.StandardOp.ValueMoveOp;
-import com.oracle.graal.lir.gen.*;
+import com.oracle.graal.lir.Variable;
+import com.oracle.graal.lir.gen.LIRGenerationResult;
 import com.oracle.graal.lir.gen.LIRGeneratorTool.SpillMoveFactory;
-import com.oracle.graal.lir.phases.*;
 
 /**
  * Specialization of {@link com.oracle.graal.lir.alloc.lsra.LinearScanAssignLocationsPhase} that
  * inserts {@link ShadowedRegisterValue}s to describe {@link RegisterValue}s that are also available
  * on the {@link StackSlotValue stack}.
  */
-final class TraceLinearScanAssignLocationsPhase extends AllocationPhase {
-
-    protected final TraceLinearScan allocator;
-    private final TraceBuilderResult<?> traceBuilderResult;
-
-    public TraceLinearScanAssignLocationsPhase(TraceLinearScan allocator, TraceBuilderResult<?> traceBuilderResult) {
-        this.allocator = allocator;
-        this.traceBuilderResult = traceBuilderResult;
-    }
+final class TraceLinearScanAssignLocationsPhase extends TraceLinearScanAllocationPhase {
 
     @Override
     protected <B extends AbstractBlockBase<B>> void run(TargetDescription target, LIRGenerationResult lirGenRes, List<B> codeEmittingOrder, List<B> linearScanOrder, SpillMoveFactory spillMoveFactory,
-                    RegisterAllocationConfig registerAllocationConfig) {
-        assignLocations();
+                    RegisterAllocationConfig registerAllocationConfig, TraceBuilderResult<?> traceBuilderResult, TraceLinearScan allocator) {
+        new Assigner(allocator, traceBuilderResult).assignLocations();
     }
 
-    /**
-     * Assigns the allocated location for an LIR instruction operand back into the instruction.
-     *
-     * @param op current {@link LIRInstruction}
-     * @param operand an LIR instruction operand
-     * @param mode the usage mode for {@code operand} by the instruction
-     * @return the location assigned for the operand
-     */
-    protected Value colorLirOperand(LIRInstruction op, Variable operand, OperandMode mode) {
-        int opId = op.id();
-        TraceInterval interval = allocator.intervalFor(operand);
-        assert interval != null : "interval must exist";
+    private static final class Assigner {
+        private final TraceLinearScan allocator;
+        private final TraceBuilderResult<?> traceBuilderResult;
+
+        private Assigner(TraceLinearScan allocator, TraceBuilderResult<?> traceBuilderResult) {
+            this.allocator = allocator;
+            this.traceBuilderResult = traceBuilderResult;
+        }
+
+        /**
+         * Assigns the allocated location for an LIR instruction operand back into the instruction.
+         *
+         * @param op current {@link LIRInstruction}
+         * @param operand an LIR instruction operand
+         * @param mode the usage mode for {@code operand} by the instruction
+         * @return the location assigned for the operand
+         */
+        private Value colorLirOperand(LIRInstruction op, Variable operand, OperandMode mode) {
+            int opId = op.id();
+            TraceInterval interval = allocator.intervalFor(operand);
+            assert interval != null : "interval must exist";
+
+            if (opId != -1) {
+                if (DetailedAsserts.getValue()) {
+                    AbstractBlockBase<?> block = allocator.blockForId(opId);
+                    if (block.getSuccessorCount() <= 1 && opId == allocator.getLastLirInstructionId(block)) {
+                        /*
+                         * Check if spill moves could have been appended at the end of this block,
+                         * but before the branch instruction. So the split child information for
+                         * this branch would be incorrect.
+                         */
+                        LIRInstruction instr = allocator.getLIR().getLIRforBlock(block).get(allocator.getLIR().getLIRforBlock(block).size() - 1);
+                        if (instr instanceof StandardOp.JumpOp) {
+                            if (allocator.getBlockData(block).liveOut.get(allocator.operandNumber(operand))) {
+                                assert false : String.format(
+                                                "can't get split child for the last branch of a block because the information would be incorrect (moves are inserted before the branch in resolveDataFlow) block=%s, instruction=%s, operand=%s",
+                                                block, instr, operand);
+                            }
+                        }
+                    }
+                }
 
-        if (opId != -1) {
-            if (DetailedAsserts.getValue()) {
-                AbstractBlockBase<?> block = allocator.blockForId(opId);
-                if (block.getSuccessorCount() <= 1 && opId == allocator.getLastLirInstructionId(block)) {
-                    /*
-                     * Check if spill moves could have been appended at the end of this block, but
-                     * before the branch instruction. So the split child information for this branch
-                     * would be incorrect.
-                     */
-                    LIRInstruction instr = allocator.getLIR().getLIRforBlock(block).get(allocator.getLIR().getLIRforBlock(block).size() - 1);
-                    if (instr instanceof StandardOp.JumpOp) {
-                        if (allocator.getBlockData(block).liveOut.get(allocator.operandNumber(operand))) {
-                            assert false : String.format(
-                                            "can't get split child for the last branch of a block because the information would be incorrect (moves are inserted before the branch in resolveDataFlow) block=%s, instruction=%s, operand=%s",
-                                            block, instr, operand);
-                        }
+                /*
+                 * Operands are not changed when an interval is split during allocation, so search
+                 * the right interval here.
+                 */
+                interval = allocator.splitChildAtOpId(interval, opId, mode);
+            }
+
+            if (isIllegal(interval.location()) && interval.canMaterialize()) {
+                assert mode != OperandMode.DEF;
+                return new ConstantValue(interval.kind(), interval.getMaterializedValue());
+            }
+            return interval.location();
+        }
+
+        /**
+         * @param op
+         * @param operand
+         * @param valueMode
+         * @param flags
+         * @see InstructionValueProcedure#doValue(LIRInstruction, Value, OperandMode, EnumSet)
+         */
+        private Value debugInfoProcedure(LIRInstruction op, Value operand, OperandMode valueMode, EnumSet<OperandFlag> flags) {
+            if (isVirtualStackSlot(operand)) {
+                return operand;
+            }
+            int tempOpId = op.id();
+            OperandMode mode = OperandMode.USE;
+            AbstractBlockBase<?> block = allocator.blockForId(tempOpId);
+            if (block.getSuccessorCount() == 1 && tempOpId == allocator.getLastLirInstructionId(block)) {
+                /*
+                 * Generating debug information for the last instruction of a block. If this
+                 * instruction is a branch, spill moves are inserted before this branch and so the
+                 * wrong operand would be returned (spill moves at block boundaries are not
+                 * considered in the live ranges of intervals).
+                 * 
+                 * Solution: use the first opId of the branch target block instead.
+                 */
+                final LIRInstruction instr = allocator.getLIR().getLIRforBlock(block).get(allocator.getLIR().getLIRforBlock(block).size() - 1);
+                if (instr instanceof StandardOp.JumpOp) {
+                    if (allocator.getBlockData(block).liveOut.get(allocator.operandNumber(operand))) {
+                        tempOpId = allocator.getFirstLirInstructionId(block.getSuccessors().iterator().next());
+                        mode = OperandMode.DEF;
                     }
                 }
             }
 
             /*
-             * Operands are not changed when an interval is split during allocation, so search the
-             * right interval here.
+             * Get current location of operand. The operand must be live because debug information
+             * is considered when building the intervals if the interval is not live,
+             * colorLirOperand will cause an assert on failure.
              */
-            interval = allocator.splitChildAtOpId(interval, opId, mode);
+            Value result = colorLirOperand(op, (Variable) operand, mode);
+            assert !allocator.hasCall(tempOpId) || isStackSlotValue(result) || isConstantValue(result) || !allocator.isCallerSave(result) : "cannot have caller-save register operands at calls";
+            return result;
+        }
+
+        private void computeDebugInfo(final LIRInstruction op, LIRFrameState info) {
+            info.forEachState(op, this::debugInfoProcedure);
+        }
+
+        private void assignLocations(List<LIRInstruction> instructions) {
+            int numInst = instructions.size();
+            boolean hasDead = false;
+
+            for (int j = 0; j < numInst; j++) {
+                final LIRInstruction op = instructions.get(j);
+                if (op == null) {
+                    /*
+                     * this can happen when spill-moves are removed in eliminateSpillMoves
+                     */
+                    hasDead = true;
+                } else if (assignLocations(op)) {
+                    instructions.set(j, null);
+                    hasDead = true;
+                }
+            }
+
+            if (hasDead) {
+                // Remove null values from the list.
+                instructions.removeAll(Collections.singleton(null));
+            }
         }
 
-        if (isIllegal(interval.location()) && interval.canMaterialize()) {
-            assert mode != OperandMode.DEF;
-            return new ConstantValue(interval.kind(), interval.getMaterializedValue());
-        }
-        return interval.location();
-    }
+        /**
+         * Assigns the operand of an {@link LIRInstruction}.
+         *
+         * @param op The {@link LIRInstruction} that should be colored.
+         * @return {@code true} if the instruction should be deleted.
+         */
+        private boolean assignLocations(LIRInstruction op) {
+            assert op != null;
+            if (TraceRAshareSpillInformation.getValue() && isBlockEndWithEdgeToUnallocatedTrace(op)) {
+                ((BlockEndOp) op).forEachOutgoingValue(colorOutgoingValues);
+            }
 
-    /**
-     * @param op
-     * @param operand
-     * @param valueMode
-     * @param flags
-     * @see InstructionValueProcedure#doValue(LIRInstruction, Value, OperandMode, EnumSet)
-     */
-    private Value debugInfoProcedure(LIRInstruction op, Value operand, OperandMode valueMode, EnumSet<OperandFlag> flags) {
-        if (isVirtualStackSlot(operand)) {
-            return operand;
+            InstructionValueProcedure assignProc = (inst, operand, mode, flags) -> isVariable(operand) ? colorLirOperand(inst, (Variable) operand, mode) : operand;
+            // remove useless moves
+            if (op instanceof MoveOp) {
+                AllocatableValue result = ((MoveOp) op).getResult();
+                if (isVariable(result) && allocator.isMaterialized(result, op.id(), OperandMode.DEF)) {
+                    /*
+                     * This happens if a materializable interval is originally not spilled but then
+                     * kicked out in LinearScanWalker.splitForSpilling(). When kicking out such an
+                     * interval this move operation was already generated.
+                     */
+                    return true;
+                }
+            }
+
+            op.forEachInput(assignProc);
+            op.forEachAlive(assignProc);
+            op.forEachTemp(assignProc);
+            op.forEachOutput(assignProc);
+
+            // compute reference map and debug information
+            op.forEachState((inst, state) -> computeDebugInfo(inst, state));
+
+            // remove useless moves
+            if (op instanceof ValueMoveOp) {
+                ValueMoveOp move = (ValueMoveOp) op;
+                if (move.getInput().equals(move.getResult())) {
+                    return true;
+                }
+            }
+            return false;
         }
-        int tempOpId = op.id();
-        OperandMode mode = OperandMode.USE;
-        AbstractBlockBase<?> block = allocator.blockForId(tempOpId);
-        if (block.getSuccessorCount() == 1 && tempOpId == allocator.getLastLirInstructionId(block)) {
-            /*
-             * Generating debug information for the last instruction of a block. If this instruction
-             * is a branch, spill moves are inserted before this branch and so the wrong operand
-             * would be returned (spill moves at block boundaries are not considered in the live
-             * ranges of intervals).
-             *
-             * Solution: use the first opId of the branch target block instead.
-             */
-            final LIRInstruction instr = allocator.getLIR().getLIRforBlock(block).get(allocator.getLIR().getLIRforBlock(block).size() - 1);
-            if (instr instanceof StandardOp.JumpOp) {
-                if (allocator.getBlockData(block).liveOut.get(allocator.operandNumber(operand))) {
-                    tempOpId = allocator.getFirstLirInstructionId(block.getSuccessors().iterator().next());
-                    mode = OperandMode.DEF;
+
+        @SuppressWarnings("try")
+        private void assignLocations() {
+            try (Indent indent = Debug.logAndIndent("assign locations")) {
+                for (AbstractBlockBase<?> block : allocator.sortedBlocks()) {
+                    try (Indent indent2 = Debug.logAndIndent("assign locations in block B%d", block.getId())) {
+                        assignLocations(allocator.getLIR().getLIRforBlock(block));
+                    }
                 }
             }
         }
 
-        /*
-         * Get current location of operand. The operand must be live because debug information is
-         * considered when building the intervals if the interval is not live, colorLirOperand will
-         * cause an assert on failure.
-         */
-        Value result = colorLirOperand(op, (Variable) operand, mode);
-        assert !allocator.hasCall(tempOpId) || isStackSlotValue(result) || isConstantValue(result) || !allocator.isCallerSave(result) : "cannot have caller-save register operands at calls";
-        return result;
-    }
+        private InstructionValueProcedure colorOutgoingValues = new InstructionValueProcedure() {
 
-    private void computeDebugInfo(final LIRInstruction op, LIRFrameState info) {
-        info.forEachState(op, this::debugInfoProcedure);
-    }
+            public Value doValue(LIRInstruction instruction, Value value, OperandMode mode, EnumSet<OperandFlag> flags) {
+                if (isRegister(value) || isVariable(value)) {
+                    TraceInterval interval = allocator.intervalFor(value);
+                    assert interval != null : "interval must exist";
+                    interval = allocator.splitChildAtOpId(interval, instruction.id(), mode);
 
-    private void assignLocations(List<LIRInstruction> instructions) {
-        int numInst = instructions.size();
-        boolean hasDead = false;
+                    if (interval.alwaysInMemory() && isRegister(interval.location())) {
+                        return new ShadowedRegisterValue((RegisterValue) interval.location(), interval.spillSlot());
+                    }
+                }
+                return value;
+            }
+        };
 
-        for (int j = 0; j < numInst; j++) {
-            final LIRInstruction op = instructions.get(j);
-            if (op == null) {
-                /*
-                 * this can happen when spill-moves are removed in eliminateSpillMoves
-                 */
-                hasDead = true;
-            } else if (assignLocations(op)) {
-                instructions.set(j, null);
-                hasDead = true;
+        private boolean isBlockEndWithEdgeToUnallocatedTrace(LIRInstruction op) {
+            if (!(op instanceof BlockEndOp)) {
+                return false;
             }
-        }
+            AbstractBlockBase<?> block = allocator.blockForId(op.id());
+            int currentTrace = traceBuilderResult.getTraceForBlock(block);
 
-        if (hasDead) {
-            // Remove null values from the list.
-            instructions.removeAll(Collections.singleton(null));
+            for (AbstractBlockBase<?> succ : block.getSuccessors()) {
+                if (currentTrace < traceBuilderResult.getTraceForBlock(succ)) {
+                    // succ is not yet allocated
+                    return true;
+                }
+            }
+            return false;
         }
     }
 
-    /**
-     * Assigns the operand of an {@link LIRInstruction}.
-     *
-     * @param op The {@link LIRInstruction} that should be colored.
-     * @return {@code true} if the instruction should be deleted.
-     */
-    protected boolean assignLocations(LIRInstruction op) {
-        assert op != null;
-        if (TraceRAshareSpillInformation.getValue() && isBlockEndWithEdgeToUnallocatedTrace(op)) {
-            ((BlockEndOp) op).forEachOutgoingValue(colorOutgoingValues);
-        }
-
-        InstructionValueProcedure assignProc = (inst, operand, mode, flags) -> isVariable(operand) ? colorLirOperand(inst, (Variable) operand, mode) : operand;
-        // remove useless moves
-        if (op instanceof MoveOp) {
-            AllocatableValue result = ((MoveOp) op).getResult();
-            if (isVariable(result) && allocator.isMaterialized(result, op.id(), OperandMode.DEF)) {
-                /*
-                 * This happens if a materializable interval is originally not spilled but then
-                 * kicked out in LinearScanWalker.splitForSpilling(). When kicking out such an
-                 * interval this move operation was already generated.
-                 */
-                return true;
-            }
-        }
-
-        op.forEachInput(assignProc);
-        op.forEachAlive(assignProc);
-        op.forEachTemp(assignProc);
-        op.forEachOutput(assignProc);
-
-        // compute reference map and debug information
-        op.forEachState((inst, state) -> computeDebugInfo(inst, state));
-
-        // remove useless moves
-        if (op instanceof ValueMoveOp) {
-            ValueMoveOp move = (ValueMoveOp) op;
-            if (move.getInput().equals(move.getResult())) {
-                return true;
-            }
-        }
-        return false;
-    }
-
-    @SuppressWarnings("try")
-    private void assignLocations() {
-        try (Indent indent = Debug.logAndIndent("assign locations")) {
-            for (AbstractBlockBase<?> block : allocator.sortedBlocks()) {
-                try (Indent indent2 = Debug.logAndIndent("assign locations in block B%d", block.getId())) {
-                    assignLocations(allocator.getLIR().getLIRforBlock(block));
-                }
-            }
-        }
-    }
-
-    private InstructionValueProcedure colorOutgoingValues = new InstructionValueProcedure() {
-
-        public Value doValue(LIRInstruction instruction, Value value, OperandMode mode, EnumSet<OperandFlag> flags) {
-            if (isRegister(value) || isVariable(value)) {
-                TraceInterval interval = allocator.intervalFor(value);
-                assert interval != null : "interval must exist";
-                interval = allocator.splitChildAtOpId(interval, instruction.id(), mode);
-
-                if (interval.alwaysInMemory() && isRegister(interval.location())) {
-                    return new ShadowedRegisterValue((RegisterValue) interval.location(), interval.spillSlot());
-                }
-            }
-            return value;
-        }
-    };
-
-    private boolean isBlockEndWithEdgeToUnallocatedTrace(LIRInstruction op) {
-        if (!(op instanceof BlockEndOp)) {
-            return false;
-        }
-        AbstractBlockBase<?> block = allocator.blockForId(op.id());
-        int currentTrace = traceBuilderResult.getTraceForBlock(block);
-
-        for (AbstractBlockBase<?> succ : block.getSuccessors()) {
-            if (currentTrace < traceBuilderResult.getTraceForBlock(succ)) {
-                // succ is not yet allocated
-                return true;
-            }
-        }
-        return false;
-    }
-
 }
--- a/graal/com.oracle.graal.lir/src/com/oracle/graal/lir/alloc/trace/TraceLinearScanEliminateSpillMovePhase.java	Tue Sep 08 10:50:21 2015 +0200
+++ b/graal/com.oracle.graal.lir/src/com/oracle/graal/lir/alloc/trace/TraceLinearScanEliminateSpillMovePhase.java	Tue Sep 08 18:53:43 2015 +0200
@@ -22,28 +22,31 @@
  */
 package com.oracle.graal.lir.alloc.trace;
 
-import static com.oracle.graal.compiler.common.GraalOptions.*;
-import static jdk.internal.jvmci.code.ValueUtil.*;
+import static com.oracle.graal.compiler.common.GraalOptions.DetailedAsserts;
+import static jdk.internal.jvmci.code.ValueUtil.isRegister;
+import static jdk.internal.jvmci.code.ValueUtil.isStackSlotValue;
 
-import java.util.*;
+import java.util.List;
 
-import jdk.internal.jvmci.code.*;
-import jdk.internal.jvmci.meta.*;
+import jdk.internal.jvmci.code.TargetDescription;
+import jdk.internal.jvmci.meta.AllocatableValue;
 
-import com.oracle.graal.compiler.common.alloc.*;
-import com.oracle.graal.compiler.common.cfg.*;
-import com.oracle.graal.debug.*;
-import com.oracle.graal.lir.*;
+import com.oracle.graal.compiler.common.alloc.RegisterAllocationConfig;
+import com.oracle.graal.compiler.common.alloc.TraceBuilder.TraceBuilderResult;
+import com.oracle.graal.compiler.common.cfg.AbstractBlockBase;
+import com.oracle.graal.debug.Debug;
+import com.oracle.graal.debug.Indent;
+import com.oracle.graal.lir.LIRInsertionBuffer;
+import com.oracle.graal.lir.LIRInstruction;
 import com.oracle.graal.lir.StandardOp.LoadConstantOp;
 import com.oracle.graal.lir.StandardOp.MoveOp;
 import com.oracle.graal.lir.StandardOp.ValueMoveOp;
 import com.oracle.graal.lir.alloc.trace.TraceInterval.SpillState;
 import com.oracle.graal.lir.alloc.trace.TraceLinearScan.IntervalPredicate;
-import com.oracle.graal.lir.gen.*;
+import com.oracle.graal.lir.gen.LIRGenerationResult;
 import com.oracle.graal.lir.gen.LIRGeneratorTool.SpillMoveFactory;
-import com.oracle.graal.lir.phases.*;
 
-final class TraceLinearScanEliminateSpillMovePhase extends AllocationPhase {
+final class TraceLinearScanEliminateSpillMovePhase extends TraceLinearScanAllocationPhase {
 
     private static final IntervalPredicate mustStoreAtDefinition = new TraceLinearScan.IntervalPredicate() {
 
@@ -53,30 +56,15 @@
         }
     };
 
-    protected final TraceLinearScan allocator;
-
-    protected TraceLinearScanEliminateSpillMovePhase(TraceLinearScan allocator) {
-        this.allocator = allocator;
-    }
-
     @Override
     protected <B extends AbstractBlockBase<B>> void run(TargetDescription target, LIRGenerationResult lirGenRes, List<B> codeEmittingOrder, List<B> linearScanOrder, SpillMoveFactory spillMoveFactory,
-                    RegisterAllocationConfig registerAllocationConfig) {
-        eliminateSpillMoves();
-    }
-
-    /**
-     * @return the index of the first instruction that is of interest for
-     *         {@link #eliminateSpillMoves()}
-     */
-    protected static int firstInstructionOfInterest() {
-        // also look at Labels as they define PHI values
-        return 0;
+                    RegisterAllocationConfig registerAllocationConfig, TraceBuilderResult<?> traceBuilderResult, TraceLinearScan allocator) {
+        eliminateSpillMoves(allocator);
     }
 
     // called once before assignment of register numbers
     @SuppressWarnings("try")
-    void eliminateSpillMoves() {
+    private static void eliminateSpillMoves(TraceLinearScan allocator) {
         try (Indent indent = Debug.logAndIndent("Eliminating unnecessary spill moves")) {
 
             /*
@@ -95,7 +83,7 @@
                     int numInst = instructions.size();
 
                     // iterate all instructions of the block.
-                    for (int j = firstInstructionOfInterest(); j < numInst; j++) {
+                    for (int j = 0; j < numInst; j++) {
                         LIRInstruction op = instructions.get(j);
                         int opId = op.id();
 
@@ -179,7 +167,7 @@
      * @param block The block {@code move} is located in.
      * @param move Spill move.
      */
-    protected static boolean canEliminateSpillMove(AbstractBlockBase<?> block, MoveOp move) {
+    private static boolean canEliminateSpillMove(AbstractBlockBase<?> block, MoveOp move) {
         // TODO (je) do not eliminate spill moves yet!
         return false;
     }
--- a/graal/com.oracle.graal.lir/src/com/oracle/graal/lir/alloc/trace/TraceLinearScanLifetimeAnalysisPhase.java	Tue Sep 08 10:50:21 2015 +0200
+++ b/graal/com.oracle.graal.lir/src/com/oracle/graal/lir/alloc/trace/TraceLinearScanLifetimeAnalysisPhase.java	Tue Sep 08 18:53:43 2015 +0200
@@ -22,26 +22,49 @@
  */
 package com.oracle.graal.lir.alloc.trace;
 
-import static com.oracle.graal.compiler.common.GraalOptions.*;
-import static com.oracle.graal.lir.LIRValueUtil.*;
-import static com.oracle.graal.lir.alloc.trace.TraceLinearScan.*;
-import static com.oracle.graal.lir.alloc.trace.TraceRegisterAllocationPhase.Options.*;
-import static com.oracle.graal.lir.debug.LIRGenerationDebugContext.*;
-import static jdk.internal.jvmci.code.ValueUtil.*;
+import static com.oracle.graal.compiler.common.GraalOptions.DetailedAsserts;
+import static com.oracle.graal.lir.LIRValueUtil.asVariable;
+import static com.oracle.graal.lir.LIRValueUtil.isVariable;
+import static com.oracle.graal.lir.alloc.trace.TraceLinearScan.isVariableOrRegister;
+import static com.oracle.graal.lir.alloc.trace.TraceRegisterAllocationPhase.Options.TraceRAshareSpillInformation;
+import static com.oracle.graal.lir.alloc.trace.TraceRegisterAllocationPhase.Options.TraceRAuseInterTraceHints;
+import static com.oracle.graal.lir.debug.LIRGenerationDebugContext.getSourceForOperandFromDebugContext;
+import static jdk.internal.jvmci.code.ValueUtil.asRegisterValue;
+import static jdk.internal.jvmci.code.ValueUtil.asStackSlot;
+import static jdk.internal.jvmci.code.ValueUtil.isIllegal;
+import static jdk.internal.jvmci.code.ValueUtil.isRegister;
+import static jdk.internal.jvmci.code.ValueUtil.isStackSlot;
 
-import java.util.*;
+import java.util.ArrayDeque;
+import java.util.BitSet;
+import java.util.Deque;
+import java.util.EnumSet;
+import java.util.HashSet;
+import java.util.List;
 
-import jdk.internal.jvmci.code.*;
-import jdk.internal.jvmci.common.*;
-import jdk.internal.jvmci.meta.*;
+import jdk.internal.jvmci.code.BailoutException;
+import jdk.internal.jvmci.code.Register;
+import jdk.internal.jvmci.code.RegisterValue;
+import jdk.internal.jvmci.code.StackSlot;
+import jdk.internal.jvmci.code.TargetDescription;
+import jdk.internal.jvmci.common.JVMCIError;
+import jdk.internal.jvmci.meta.AllocatableValue;
+import jdk.internal.jvmci.meta.JavaConstant;
+import jdk.internal.jvmci.meta.Kind;
+import jdk.internal.jvmci.meta.LIRKind;
+import jdk.internal.jvmci.meta.Value;
 
-import com.oracle.graal.compiler.common.alloc.*;
+import com.oracle.graal.compiler.common.alloc.ComputeBlockOrder;
+import com.oracle.graal.compiler.common.alloc.RegisterAllocationConfig;
 import com.oracle.graal.compiler.common.alloc.TraceBuilder.TraceBuilderResult;
-import com.oracle.graal.compiler.common.cfg.*;
-import com.oracle.graal.compiler.common.util.*;
-import com.oracle.graal.debug.*;
+import com.oracle.graal.compiler.common.cfg.AbstractBlockBase;
+import com.oracle.graal.compiler.common.util.BitMap2D;
+import com.oracle.graal.debug.Debug;
 import com.oracle.graal.debug.Debug.Scope;
-import com.oracle.graal.lir.*;
+import com.oracle.graal.debug.Indent;
+import com.oracle.graal.lir.InstructionValueConsumer;
+import com.oracle.graal.lir.LIR;
+import com.oracle.graal.lir.LIRInstruction;
 import com.oracle.graal.lir.LIRInstruction.OperandFlag;
 import com.oracle.graal.lir.LIRInstruction.OperandMode;
 import com.oracle.graal.lir.StandardOp.BlockEndOp;
@@ -49,927 +72,941 @@
 import com.oracle.graal.lir.StandardOp.LoadConstantOp;
 import com.oracle.graal.lir.StandardOp.StackStoreOp;
 import com.oracle.graal.lir.StandardOp.ValueMoveOp;
+import com.oracle.graal.lir.ValueConsumer;
+import com.oracle.graal.lir.Variable;
 import com.oracle.graal.lir.alloc.trace.TraceInterval.RegisterPriority;
 import com.oracle.graal.lir.alloc.trace.TraceInterval.SpillState;
 import com.oracle.graal.lir.alloc.trace.TraceLinearScan.BlockData;
-import com.oracle.graal.lir.gen.*;
+import com.oracle.graal.lir.gen.LIRGenerationResult;
 import com.oracle.graal.lir.gen.LIRGeneratorTool.SpillMoveFactory;
-import com.oracle.graal.lir.phases.*;
-import com.oracle.graal.lir.ssi.*;
-
-final class TraceLinearScanLifetimeAnalysisPhase extends AllocationPhase {
+import com.oracle.graal.lir.ssi.SSIUtil;
 
-    private static final int DUMP_DURING_ANALYSIS_LEVEL = 4;
-    protected final TraceLinearScan allocator;
-    private final TraceBuilderResult<?> traceBuilderResult;
-
-    /**
-     * @param linearScan
-     * @param traceBuilderResult
-     */
-    protected TraceLinearScanLifetimeAnalysisPhase(TraceLinearScan linearScan, TraceBuilderResult<?> traceBuilderResult) {
-        allocator = linearScan;
-        this.traceBuilderResult = traceBuilderResult;
-    }
+final class TraceLinearScanLifetimeAnalysisPhase extends TraceLinearScanAllocationPhase {
 
     @Override
     protected <B extends AbstractBlockBase<B>> void run(TargetDescription target, LIRGenerationResult lirGenRes, List<B> codeEmittingOrder, List<B> linearScanOrder, SpillMoveFactory spillMoveFactory,
-                    RegisterAllocationConfig registerAllocationConfig) {
-        numberInstructions();
-        allocator.printLir("Before register allocation", true);
-        buildIntervals();
-    }
-
-    private boolean sameTrace(AbstractBlockBase<?> a, AbstractBlockBase<?> b) {
-        return traceBuilderResult.getTraceForBlock(b) == traceBuilderResult.getTraceForBlock(a);
-    }
-
-    private boolean isAllocatedOrCurrent(AbstractBlockBase<?> currentBlock, AbstractBlockBase<?> other) {
-        return traceBuilderResult.getTraceForBlock(other) <= traceBuilderResult.getTraceForBlock(currentBlock);
-    }
-
-    static void setHint(final LIRInstruction op, TraceInterval to, IntervalHint from) {
-        IntervalHint currentHint = to.locationHint(false);
-        if (currentHint == null) {
-            /*
-             * Update hint if there was none or if the hint interval starts after the hinted
-             * interval.
-             */
-            to.setLocationHint(from);
-            if (Debug.isLogEnabled()) {
-                Debug.log("operation at opId %d: added hint from interval %s to %s", op.id(), from, to);
-            }
-        }
+                    RegisterAllocationConfig registerAllocationConfig, TraceBuilderResult<?> traceBuilderResult, TraceLinearScan allocator) {
+        new Analyser(allocator, traceBuilderResult).analyze();
     }
 
-    /**
-     * Bit set for each variable that is contained in each loop.
-     */
-    private BitMap2D intervalInLoop;
+    private static class Analyser {
+        private static final int DUMP_DURING_ANALYSIS_LEVEL = 4;
+        protected final TraceLinearScan allocator;
+        private final TraceBuilderResult<?> traceBuilderResult;
+
+        /**
+         * @param linearScan
+         * @param traceBuilderResult
+         */
+        private Analyser(TraceLinearScan linearScan, TraceBuilderResult<?> traceBuilderResult) {
+            allocator = linearScan;
+            this.traceBuilderResult = traceBuilderResult;
+        }
+
+        private void analyze() {
+            numberInstructions();
+            allocator.printLir("Before register allocation", true);
+            buildIntervals();
+        }
 
-    boolean isIntervalInLoop(int interval, int loop) {
-        return intervalInLoop.at(interval, loop);
-    }
+        private boolean sameTrace(AbstractBlockBase<?> a, AbstractBlockBase<?> b) {
+            return traceBuilderResult.getTraceForBlock(b) == traceBuilderResult.getTraceForBlock(a);
+        }
+
+        private boolean isAllocatedOrCurrent(AbstractBlockBase<?> currentBlock, AbstractBlockBase<?> other) {
+            return traceBuilderResult.getTraceForBlock(other) <= traceBuilderResult.getTraceForBlock(currentBlock);
+        }
+
+        static void setHint(final LIRInstruction op, TraceInterval to, IntervalHint from) {
+            IntervalHint currentHint = to.locationHint(false);
+            if (currentHint == null) {
+                /*
+                 * Update hint if there was none or if the hint interval starts after the hinted
+                 * interval.
+                 */
+                to.setLocationHint(from);
+                if (Debug.isLogEnabled()) {
+                    Debug.log("operation at opId %d: added hint from interval %s to %s", op.id(), from, to);
+                }
+            }
+        }
+
+        /**
+         * Bit set for each variable that is contained in each loop.
+         */
+        private BitMap2D intervalInLoop;
 
-    /**
-     * Numbers all instructions in all blocks. The numbering follows the
-     * {@linkplain ComputeBlockOrder linear scan order}.
-     */
-    protected void numberInstructions() {
+        boolean isIntervalInLoop(int interval, int loop) {
+            return intervalInLoop.at(interval, loop);
+        }
+
+        /**
+         * Numbers all instructions in all blocks. The numbering follows the
+         * {@linkplain ComputeBlockOrder linear scan order}.
+         */
+        protected void numberInstructions() {
+
+            allocator.initIntervals();
 
-        allocator.initIntervals();
+            ValueConsumer setVariableConsumer = (value, mode, flags) -> {
+                if (isVariable(value)) {
+                    allocator.getOrCreateInterval(asVariable(value));
+                }
+            };
+
+            // Assign IDs to LIR nodes and build a mapping, lirOps, from ID to LIRInstruction node.
+            int numInstructions = 0;
+            for (AbstractBlockBase<?> block : allocator.sortedBlocks()) {
+                numInstructions += allocator.getLIR().getLIRforBlock(block).size();
+            }
 
-        ValueConsumer setVariableConsumer = (value, mode, flags) -> {
-            if (isVariable(value)) {
-                allocator.getOrCreateInterval(asVariable(value));
+            // initialize with correct length
+            allocator.initOpIdMaps(numInstructions);
+
+            int opId = 0;
+            int index = 0;
+            for (AbstractBlockBase<?> block : allocator.sortedBlocks()) {
+                allocator.initBlockData(block);
+
+                List<LIRInstruction> instructions = allocator.getLIR().getLIRforBlock(block);
+
+                int numInst = instructions.size();
+                for (int j = 0; j < numInst; j++) {
+                    LIRInstruction op = instructions.get(j);
+                    op.setId(opId);
+
+                    allocator.putOpIdMaps(index, op, block);
+                    assert allocator.instructionForId(opId) == op : "must match";
+
+                    op.visitEachTemp(setVariableConsumer);
+                    op.visitEachOutput(setVariableConsumer);
+
+                    index++;
+                    opId += 2; // numbering of lirOps by two
+                }
             }
-        };
-
-        // Assign IDs to LIR nodes and build a mapping, lirOps, from ID to LIRInstruction node.
-        int numInstructions = 0;
-        for (AbstractBlockBase<?> block : allocator.sortedBlocks()) {
-            numInstructions += allocator.getLIR().getLIRforBlock(block).size();
+            assert index == numInstructions : "must match";
+            assert (index << 1) == opId : "must match: " + (index << 1);
         }
 
-        // initialize with correct length
-        allocator.initOpIdMaps(numInstructions);
+        /**
+         * Computes local live sets (i.e. {@link BlockData#liveGen} and {@link BlockData#liveKill})
+         * separately for each block.
+         */
+        @SuppressWarnings("try")
+        void computeLocalLiveSets() {
+            int liveSize = allocator.liveSetSize();
+
+            intervalInLoop = new BitMap2D(allocator.operandSize(), allocator.numLoops());
+
+            // iterate all blocks
+            for (final AbstractBlockBase<?> block : allocator.sortedBlocks()) {
+                try (Indent indent = Debug.logAndIndent("compute local live sets for block %s", block)) {
+
+                    final BitSet liveGen = new BitSet(liveSize);
+                    final BitSet liveKill = new BitSet(liveSize);
 
-        int opId = 0;
-        int index = 0;
-        for (AbstractBlockBase<?> block : allocator.sortedBlocks()) {
-            allocator.initBlockData(block);
+                    List<LIRInstruction> instructions = allocator.getLIR().getLIRforBlock(block);
+                    int numInst = instructions.size();
 
-            List<LIRInstruction> instructions = allocator.getLIR().getLIRforBlock(block);
+                    ValueConsumer useConsumer = (operand, mode, flags) -> {
+                        if (isVariable(operand)) {
+                            int operandNum = allocator.operandNumber(operand);
+                            if (!liveKill.get(operandNum)) {
+                                liveGen.set(operandNum);
+                                if (Debug.isLogEnabled()) {
+                                    Debug.log("liveGen for operand %d(%s)", operandNum, operand);
+                                }
+                            }
+                            if (block.getLoop() != null) {
+                                intervalInLoop.setBit(operandNum, block.getLoop().getIndex());
+                            }
+                        }
 
-            int numInst = instructions.size();
-            for (int j = 0; j < numInst; j++) {
-                LIRInstruction op = instructions.get(j);
-                op.setId(opId);
+                        if (DetailedAsserts.getValue()) {
+                            verifyInput(block, liveKill, operand);
+                        }
+                    };
+                    ValueConsumer stateConsumer = (operand, mode, flags) -> {
+                        if (TraceLinearScan.isVariableOrRegister(operand)) {
+                            int operandNum = allocator.operandNumber(operand);
+                            if (!liveKill.get(operandNum)) {
+                                liveGen.set(operandNum);
+                                if (Debug.isLogEnabled()) {
+                                    Debug.log("liveGen in state for operand %d(%s)", operandNum, operand);
+                                }
+                            }
+                        }
+                    };
+                    ValueConsumer defConsumer = (operand, mode, flags) -> {
+                        if (isVariable(operand)) {
+                            int varNum = allocator.operandNumber(operand);
+                            liveKill.set(varNum);
+                            if (Debug.isLogEnabled()) {
+                                Debug.log("liveKill for operand %d(%s)", varNum, operand);
+                            }
+                            if (block.getLoop() != null) {
+                                intervalInLoop.setBit(varNum, block.getLoop().getIndex());
+                            }
+                        }
+
+                        if (DetailedAsserts.getValue()) {
+                            /*
+                             * Fixed intervals are never live at block boundaries, so they need not
+                             * be processed in live sets. Process them only in debug mode so that
+                             * this can be checked
+                             */
+                            verifyTemp(liveKill, operand);
+                        }
+                    };
 
-                allocator.putOpIdMaps(index, op, block);
-                assert allocator.instructionForId(opId) == op : "must match";
+                    // iterate all instructions of the block
+                    for (int j = 0; j < numInst; j++) {
+                        final LIRInstruction op = instructions.get(j);
+
+                        try (Indent indent2 = Debug.logAndIndent("handle op %d: %s", op.id(), op)) {
+                            op.visitEachInput(useConsumer);
+                            op.visitEachAlive(useConsumer);
+                            /*
+                             * Add uses of live locals from interpreter's point of view for proper
+                             * debug information generation.
+                             */
+                            op.visitEachState(stateConsumer);
+                            op.visitEachTemp(defConsumer);
+                            op.visitEachOutput(defConsumer);
+                        }
+                    } // end of instruction iteration
 
-                op.visitEachTemp(setVariableConsumer);
-                op.visitEachOutput(setVariableConsumer);
+                    BlockData blockSets = allocator.getBlockData(block);
+                    blockSets.liveGen = liveGen;
+                    blockSets.liveKill = liveKill;
+                    blockSets.liveIn = new BitSet(liveSize);
+                    blockSets.liveOut = new BitSet(liveSize);
+
+                    if (Debug.isLogEnabled()) {
+                        Debug.log("liveGen  B%d %s", block.getId(), blockSets.liveGen);
+                        Debug.log("liveKill B%d %s", block.getId(), blockSets.liveKill);
+                    }
 
-                index++;
-                opId += 2; // numbering of lirOps by two
+                }
+            } // end of block iteration
+        }
+
+        private void verifyTemp(BitSet liveKill, Value operand) {
+            /*
+             * Fixed intervals are never live at block boundaries, so they need not be processed in
+             * live sets. Process them only in debug mode so that this can be checked
+             */
+            if (isRegister(operand)) {
+                if (allocator.isProcessed(operand)) {
+                    liveKill.set(allocator.operandNumber(operand));
+                }
             }
         }
-        assert index == numInstructions : "must match";
-        assert (index << 1) == opId : "must match: " + (index << 1);
-    }
+
+        private void verifyInput(AbstractBlockBase<?> block, BitSet liveKill, Value operand) {
+            /*
+             * Fixed intervals are never live at block boundaries, so they need not be processed in
+             * live sets. This is checked by these assertions to be sure about it. The entry block
+             * may have incoming values in registers, which is ok.
+             */
+            if (isRegister(operand) && block != allocator.getLIR().getControlFlowGraph().getStartBlock()) {
+                if (allocator.isProcessed(operand)) {
+                    assert liveKill.get(allocator.operandNumber(operand)) : "using fixed register that is not defined in this block";
+                }
+            }
+        }
 
-    /**
-     * Computes local live sets (i.e. {@link BlockData#liveGen} and {@link BlockData#liveKill})
-     * separately for each block.
-     */
-    @SuppressWarnings("try")
-    void computeLocalLiveSets() {
-        int liveSize = allocator.liveSetSize();
+        /**
+         * Performs a backward dataflow analysis to compute global live sets (i.e.
+         * {@link BlockData#liveIn} and {@link BlockData#liveOut}) for each block.
+         */
+        @SuppressWarnings("try")
+        protected void computeGlobalLiveSets() {
+            try (Indent indent = Debug.logAndIndent("compute global live sets")) {
+                int numBlocks = allocator.blockCount();
+                boolean changeOccurred;
+                boolean changeOccurredInBlock;
+                int iterationCount = 0;
+                BitSet liveOut = new BitSet(allocator.liveSetSize()); // scratch set for
+// calculations
 
-        intervalInLoop = new BitMap2D(allocator.operandSize(), allocator.numLoops());
+                /*
+                 * Perform a backward dataflow analysis to compute liveOut and liveIn for each
+                 * block. The loop is executed until a fixpoint is reached (no changes in an
+                 * iteration).
+                 */
+                do {
+                    changeOccurred = false;
+
+                    try (Indent indent2 = Debug.logAndIndent("new iteration %d", iterationCount)) {
+
+                        // iterate all blocks in reverse order
+                        for (int i = numBlocks - 1; i >= 0; i--) {
+                            AbstractBlockBase<?> block = allocator.blockAt(i);
+                            BlockData blockSets = allocator.getBlockData(block);
+
+                            changeOccurredInBlock = false;
 
-        // iterate all blocks
-        for (final AbstractBlockBase<?> block : allocator.sortedBlocks()) {
-            try (Indent indent = Debug.logAndIndent("compute local live sets for block %s", block)) {
-
-                final BitSet liveGen = new BitSet(liveSize);
-                final BitSet liveKill = new BitSet(liveSize);
+                            /*
+                             * liveOut(block) is the union of liveIn(sux), for successors sux of
+                             * block.
+                             */
+                            int n = block.getSuccessorCount();
+                            if (n > 0) {
+                                liveOut.clear();
+                                // block has successors
+                                if (n > 0) {
+                                    for (AbstractBlockBase<?> successor : block.getSuccessors()) {
+                                        if (allocator.sortedBlocks().contains(successor)) {
+                                            liveOut.or(allocator.getBlockData(successor).liveIn);
+                                        }
+                                    }
+                                }
 
-                List<LIRInstruction> instructions = allocator.getLIR().getLIRforBlock(block);
-                int numInst = instructions.size();
+                                if (!blockSets.liveOut.equals(liveOut)) {
+                                    /*
+                                     * A change occurred. Swap the old and new live out sets to
+                                     * avoid copying.
+                                     */
+                                    BitSet temp = blockSets.liveOut;
+                                    blockSets.liveOut = liveOut;
+                                    liveOut = temp;
+
+                                    changeOccurred = true;
+                                    changeOccurredInBlock = true;
+                                }
+                            }
 
-                ValueConsumer useConsumer = (operand, mode, flags) -> {
-                    if (isVariable(operand)) {
-                        int operandNum = allocator.operandNumber(operand);
-                        if (!liveKill.get(operandNum)) {
-                            liveGen.set(operandNum);
-                            if (Debug.isLogEnabled()) {
-                                Debug.log("liveGen for operand %d(%s)", operandNum, operand);
+                            if (iterationCount == 0 || changeOccurredInBlock) {
+                                /*
+                                 * liveIn(block) is the union of liveGen(block) with (liveOut(block)
+                                 * & !liveKill(block)).
+                                 * 
+                                 * Note: liveIn has to be computed only in first iteration or if
+                                 * liveOut has changed!
+                                 */
+                                BitSet liveIn = blockSets.liveIn;
+                                liveIn.clear();
+                                liveIn.or(blockSets.liveOut);
+                                liveIn.andNot(blockSets.liveKill);
+                                liveIn.or(blockSets.liveGen);
+
+                                if (Debug.isLogEnabled()) {
+                                    Debug.log("block %d: livein = %s,  liveout = %s", block.getId(), liveIn, blockSets.liveOut);
+                                }
                             }
                         }
-                        if (block.getLoop() != null) {
-                            intervalInLoop.setBit(operandNum, block.getLoop().getIndex());
+                        iterationCount++;
+
+                        if (changeOccurred && iterationCount > 50) {
+                            throw new BailoutException("too many iterations in computeGlobalLiveSets");
+                        }
+                    }
+                } while (changeOccurred);
+
+                if (DetailedAsserts.getValue()) {
+                    verifyLiveness();
+                }
+
+                // check that the liveIn set of the first block is empty
+                AbstractBlockBase<?> startBlock = allocator.blockAt(0);
+                if (allocator.getBlockData(startBlock).liveIn.cardinality() != 0) {
+                    if (DetailedAsserts.getValue()) {
+                        reportFailure(numBlocks);
+                    }
+                    // bailout if this occurs in product mode.
+                    throw new JVMCIError("liveIn set of first block must be empty: " + allocator.getBlockData(startBlock).liveIn);
+                }
+            }
+        }
+
+        @SuppressWarnings("try")
+        protected void reportFailure(int numBlocks) {
+            try (Scope s = Debug.forceLog()) {
+                try (Indent indent = Debug.logAndIndent("report failure")) {
+
+                    BitSet startBlockLiveIn = allocator.getBlockData(allocator.getLIR().getControlFlowGraph().getStartBlock()).liveIn;
+                    try (Indent indent2 = Debug.logAndIndent("Error: liveIn set of first block must be empty (when this fails, variables are used before they are defined):")) {
+                        for (int operandNum = startBlockLiveIn.nextSetBit(0); operandNum >= 0; operandNum = startBlockLiveIn.nextSetBit(operandNum + 1)) {
+                            TraceInterval interval = allocator.intervalFor(operandNum);
+                            if (interval != null) {
+                                Value operand = interval.operand;
+                                Debug.log("var %d; operand=%s; node=%s", operandNum, operand, getSourceForOperandFromDebugContext(operand));
+                            } else {
+                                Debug.log("var %d; missing operand", operandNum);
+                            }
                         }
                     }
 
-                    if (DetailedAsserts.getValue()) {
-                        verifyInput(block, liveKill, operand);
-                    }
-                };
-                ValueConsumer stateConsumer = (operand, mode, flags) -> {
-                    if (TraceLinearScan.isVariableOrRegister(operand)) {
-                        int operandNum = allocator.operandNumber(operand);
-                        if (!liveKill.get(operandNum)) {
-                            liveGen.set(operandNum);
-                            if (Debug.isLogEnabled()) {
-                                Debug.log("liveGen in state for operand %d(%s)", operandNum, operand);
-                            }
-                        }
-                    }
-                };
-                ValueConsumer defConsumer = (operand, mode, flags) -> {
-                    if (isVariable(operand)) {
-                        int varNum = allocator.operandNumber(operand);
-                        liveKill.set(varNum);
-                        if (Debug.isLogEnabled()) {
-                            Debug.log("liveKill for operand %d(%s)", varNum, operand);
-                        }
-                        if (block.getLoop() != null) {
-                            intervalInLoop.setBit(varNum, block.getLoop().getIndex());
+                    // print some additional information to simplify debugging
+                    for (int operandNum = startBlockLiveIn.nextSetBit(0); operandNum >= 0; operandNum = startBlockLiveIn.nextSetBit(operandNum + 1)) {
+                        TraceInterval interval = allocator.intervalFor(operandNum);
+                        Value operand = null;
+                        Object valueForOperandFromDebugContext = null;
+                        if (interval != null) {
+                            operand = interval.operand;
+                            valueForOperandFromDebugContext = getSourceForOperandFromDebugContext(operand);
                         }
-                    }
-
-                    if (DetailedAsserts.getValue()) {
-                        /*
-                         * Fixed intervals are never live at block boundaries, so they need not be
-                         * processed in live sets. Process them only in debug mode so that this can
-                         * be checked
-                         */
-                        verifyTemp(liveKill, operand);
-                    }
-                };
-
-                // iterate all instructions of the block
-                for (int j = 0; j < numInst; j++) {
-                    final LIRInstruction op = instructions.get(j);
-
-                    try (Indent indent2 = Debug.logAndIndent("handle op %d: %s", op.id(), op)) {
-                        op.visitEachInput(useConsumer);
-                        op.visitEachAlive(useConsumer);
-                        /*
-                         * Add uses of live locals from interpreter's point of view for proper debug
-                         * information generation.
-                         */
-                        op.visitEachState(stateConsumer);
-                        op.visitEachTemp(defConsumer);
-                        op.visitEachOutput(defConsumer);
-                    }
-                } // end of instruction iteration
-
-                BlockData blockSets = allocator.getBlockData(block);
-                blockSets.liveGen = liveGen;
-                blockSets.liveKill = liveKill;
-                blockSets.liveIn = new BitSet(liveSize);
-                blockSets.liveOut = new BitSet(liveSize);
-
-                if (Debug.isLogEnabled()) {
-                    Debug.log("liveGen  B%d %s", block.getId(), blockSets.liveGen);
-                    Debug.log("liveKill B%d %s", block.getId(), blockSets.liveKill);
-                }
+                        try (Indent indent2 = Debug.logAndIndent("---- Detailed information for var %d; operand=%s; node=%s ----", operandNum, operand, valueForOperandFromDebugContext)) {
 
-            }
-        } // end of block iteration
-    }
-
-    private void verifyTemp(BitSet liveKill, Value operand) {
-        /*
-         * Fixed intervals are never live at block boundaries, so they need not be processed in live
-         * sets. Process them only in debug mode so that this can be checked
-         */
-        if (isRegister(operand)) {
-            if (allocator.isProcessed(operand)) {
-                liveKill.set(allocator.operandNumber(operand));
-            }
-        }
-    }
-
-    private void verifyInput(AbstractBlockBase<?> block, BitSet liveKill, Value operand) {
-        /*
-         * Fixed intervals are never live at block boundaries, so they need not be processed in live
-         * sets. This is checked by these assertions to be sure about it. The entry block may have
-         * incoming values in registers, which is ok.
-         */
-        if (isRegister(operand) && block != allocator.getLIR().getControlFlowGraph().getStartBlock()) {
-            if (allocator.isProcessed(operand)) {
-                assert liveKill.get(allocator.operandNumber(operand)) : "using fixed register that is not defined in this block";
-            }
-        }
-    }
-
-    /**
-     * Performs a backward dataflow analysis to compute global live sets (i.e.
-     * {@link BlockData#liveIn} and {@link BlockData#liveOut}) for each block.
-     */
-    @SuppressWarnings("try")
-    protected void computeGlobalLiveSets() {
-        try (Indent indent = Debug.logAndIndent("compute global live sets")) {
-            int numBlocks = allocator.blockCount();
-            boolean changeOccurred;
-            boolean changeOccurredInBlock;
-            int iterationCount = 0;
-            BitSet liveOut = new BitSet(allocator.liveSetSize()); // scratch set for calculations
-
-            /*
-             * Perform a backward dataflow analysis to compute liveOut and liveIn for each block.
-             * The loop is executed until a fixpoint is reached (no changes in an iteration).
-             */
-            do {
-                changeOccurred = false;
-
-                try (Indent indent2 = Debug.logAndIndent("new iteration %d", iterationCount)) {
-
-                    // iterate all blocks in reverse order
-                    for (int i = numBlocks - 1; i >= 0; i--) {
-                        AbstractBlockBase<?> block = allocator.blockAt(i);
-                        BlockData blockSets = allocator.getBlockData(block);
-
-                        changeOccurredInBlock = false;
-
-                        /* liveOut(block) is the union of liveIn(sux), for successors sux of block. */
-                        int n = block.getSuccessorCount();
-                        if (n > 0) {
-                            liveOut.clear();
-                            // block has successors
-                            if (n > 0) {
-                                for (AbstractBlockBase<?> successor : block.getSuccessors()) {
-                                    if (allocator.sortedBlocks().contains(successor)) {
-                                        liveOut.or(allocator.getBlockData(successor).liveIn);
+                            Deque<AbstractBlockBase<?>> definedIn = new ArrayDeque<>();
+                            HashSet<AbstractBlockBase<?>> usedIn = new HashSet<>();
+                            for (AbstractBlockBase<?> block : allocator.sortedBlocks()) {
+                                if (allocator.getBlockData(block).liveGen.get(operandNum)) {
+                                    usedIn.add(block);
+                                    try (Indent indent3 = Debug.logAndIndent("used in block B%d", block.getId())) {
+                                        for (LIRInstruction ins : allocator.getLIR().getLIRforBlock(block)) {
+                                            try (Indent indent4 = Debug.logAndIndent("%d: %s", ins.id(), ins)) {
+                                                ins.forEachState((liveStateOperand, mode, flags) -> {
+                                                    Debug.log("operand=%s", liveStateOperand);
+                                                    return liveStateOperand;
+                                                });
+                                            }
+                                        }
+                                    }
+                                }
+                                if (allocator.getBlockData(block).liveKill.get(operandNum)) {
+                                    definedIn.add(block);
+                                    try (Indent indent3 = Debug.logAndIndent("defined in block B%d", block.getId())) {
+                                        for (LIRInstruction ins : allocator.getLIR().getLIRforBlock(block)) {
+                                            Debug.log("%d: %s", ins.id(), ins);
+                                        }
                                     }
                                 }
                             }
 
-                            if (!blockSets.liveOut.equals(liveOut)) {
-                                /*
-                                 * A change occurred. Swap the old and new live out sets to avoid
-                                 * copying.
-                                 */
-                                BitSet temp = blockSets.liveOut;
-                                blockSets.liveOut = liveOut;
-                                liveOut = temp;
-
-                                changeOccurred = true;
-                                changeOccurredInBlock = true;
-                            }
-                        }
-
-                        if (iterationCount == 0 || changeOccurredInBlock) {
-                            /*
-                             * liveIn(block) is the union of liveGen(block) with (liveOut(block) &
-                             * !liveKill(block)).
-                             *
-                             * Note: liveIn has to be computed only in first iteration or if liveOut
-                             * has changed!
-                             */
-                            BitSet liveIn = blockSets.liveIn;
-                            liveIn.clear();
-                            liveIn.or(blockSets.liveOut);
-                            liveIn.andNot(blockSets.liveKill);
-                            liveIn.or(blockSets.liveGen);
-
-                            if (Debug.isLogEnabled()) {
-                                Debug.log("block %d: livein = %s,  liveout = %s", block.getId(), liveIn, blockSets.liveOut);
-                            }
-                        }
-                    }
-                    iterationCount++;
-
-                    if (changeOccurred && iterationCount > 50) {
-                        throw new BailoutException("too many iterations in computeGlobalLiveSets");
-                    }
-                }
-            } while (changeOccurred);
-
-            if (DetailedAsserts.getValue()) {
-                verifyLiveness();
-            }
+                            int[] hitCount = new int[numBlocks];
 
-            // check that the liveIn set of the first block is empty
-            AbstractBlockBase<?> startBlock = allocator.blockAt(0);
-            if (allocator.getBlockData(startBlock).liveIn.cardinality() != 0) {
-                if (DetailedAsserts.getValue()) {
-                    reportFailure(numBlocks);
-                }
-                // bailout if this occurs in product mode.
-                throw new JVMCIError("liveIn set of first block must be empty: " + allocator.getBlockData(startBlock).liveIn);
-            }
-        }
-    }
-
-    @SuppressWarnings("try")
-    protected void reportFailure(int numBlocks) {
-        try (Scope s = Debug.forceLog()) {
-            try (Indent indent = Debug.logAndIndent("report failure")) {
-
-                BitSet startBlockLiveIn = allocator.getBlockData(allocator.getLIR().getControlFlowGraph().getStartBlock()).liveIn;
-                try (Indent indent2 = Debug.logAndIndent("Error: liveIn set of first block must be empty (when this fails, variables are used before they are defined):")) {
-                    for (int operandNum = startBlockLiveIn.nextSetBit(0); operandNum >= 0; operandNum = startBlockLiveIn.nextSetBit(operandNum + 1)) {
-                        TraceInterval interval = allocator.intervalFor(operandNum);
-                        if (interval != null) {
-                            Value operand = interval.operand;
-                            Debug.log("var %d; operand=%s; node=%s", operandNum, operand, getSourceForOperandFromDebugContext(operand));
-                        } else {
-                            Debug.log("var %d; missing operand", operandNum);
-                        }
-                    }
-                }
-
-                // print some additional information to simplify debugging
-                for (int operandNum = startBlockLiveIn.nextSetBit(0); operandNum >= 0; operandNum = startBlockLiveIn.nextSetBit(operandNum + 1)) {
-                    TraceInterval interval = allocator.intervalFor(operandNum);
-                    Value operand = null;
-                    Object valueForOperandFromDebugContext = null;
-                    if (interval != null) {
-                        operand = interval.operand;
-                        valueForOperandFromDebugContext = getSourceForOperandFromDebugContext(operand);
-                    }
-                    try (Indent indent2 = Debug.logAndIndent("---- Detailed information for var %d; operand=%s; node=%s ----", operandNum, operand, valueForOperandFromDebugContext)) {
-
-                        Deque<AbstractBlockBase<?>> definedIn = new ArrayDeque<>();
-                        HashSet<AbstractBlockBase<?>> usedIn = new HashSet<>();
-                        for (AbstractBlockBase<?> block : allocator.sortedBlocks()) {
-                            if (allocator.getBlockData(block).liveGen.get(operandNum)) {
-                                usedIn.add(block);
-                                try (Indent indent3 = Debug.logAndIndent("used in block B%d", block.getId())) {
-                                    for (LIRInstruction ins : allocator.getLIR().getLIRforBlock(block)) {
-                                        try (Indent indent4 = Debug.logAndIndent("%d: %s", ins.id(), ins)) {
-                                            ins.forEachState((liveStateOperand, mode, flags) -> {
-                                                Debug.log("operand=%s", liveStateOperand);
-                                                return liveStateOperand;
-                                            });
+                            while (!definedIn.isEmpty()) {
+                                AbstractBlockBase<?> block = definedIn.removeFirst();
+                                usedIn.remove(block);
+                                for (AbstractBlockBase<?> successor : block.getSuccessors()) {
+                                    if (successor.isLoopHeader()) {
+                                        if (!block.isLoopEnd()) {
+                                            definedIn.add(successor);
+                                        }
+                                    } else {
+                                        if (++hitCount[successor.getId()] == successor.getPredecessorCount()) {
+                                            definedIn.add(successor);
                                         }
                                     }
                                 }
                             }
-                            if (allocator.getBlockData(block).liveKill.get(operandNum)) {
-                                definedIn.add(block);
-                                try (Indent indent3 = Debug.logAndIndent("defined in block B%d", block.getId())) {
-                                    for (LIRInstruction ins : allocator.getLIR().getLIRforBlock(block)) {
-                                        Debug.log("%d: %s", ins.id(), ins);
-                                    }
+                            try (Indent indent3 = Debug.logAndIndent("**** offending usages are in: ")) {
+                                for (AbstractBlockBase<?> block : usedIn) {
+                                    Debug.log("B%d", block.getId());
                                 }
                             }
                         }
-
-                        int[] hitCount = new int[numBlocks];
-
-                        while (!definedIn.isEmpty()) {
-                            AbstractBlockBase<?> block = definedIn.removeFirst();
-                            usedIn.remove(block);
-                            for (AbstractBlockBase<?> successor : block.getSuccessors()) {
-                                if (successor.isLoopHeader()) {
-                                    if (!block.isLoopEnd()) {
-                                        definedIn.add(successor);
-                                    }
-                                } else {
-                                    if (++hitCount[successor.getId()] == successor.getPredecessorCount()) {
-                                        definedIn.add(successor);
-                                    }
-                                }
-                            }
-                        }
-                        try (Indent indent3 = Debug.logAndIndent("**** offending usages are in: ")) {
-                            for (AbstractBlockBase<?> block : usedIn) {
-                                Debug.log("B%d", block.getId());
-                            }
-                        }
                     }
                 }
-            }
-        } catch (Throwable e) {
-            throw Debug.handle(e);
-        }
-    }
-
-    protected void verifyLiveness() {
-        /*
-         * Check that fixed intervals are not live at block boundaries (live set must be empty at
-         * fixed intervals).
-         */
-        for (AbstractBlockBase<?> block : allocator.sortedBlocks()) {
-            for (int j = 0; j < allocator.numRegisters(); j++) {
-                assert !allocator.getBlockData(block).liveIn.get(j) : "liveIn  set of fixed register must be empty";
-                assert !allocator.getBlockData(block).liveOut.get(j) : "liveOut set of fixed register must be empty";
-                assert !allocator.getBlockData(block).liveGen.get(j) : "liveGen set of fixed register must be empty";
+            } catch (Throwable e) {
+                throw Debug.handle(e);
             }
         }
-    }
-
-    protected void addUse(AllocatableValue operand, int from, int to, RegisterPriority registerPriority, LIRKind kind) {
-        if (!allocator.isProcessed(operand)) {
-            return;
-        }
-        if (isRegister(operand)) {
-            addFixedUse(asRegisterValue(operand), from, to);
-        } else {
-            assert isVariable(operand) : operand;
-            addVariableUse(asVariable(operand), from, to, registerPriority, kind);
-        }
-    }
-
-    private void addFixedUse(RegisterValue reg, int from, int to) {
-        FixedInterval interval = allocator.getOrCreateFixedInterval(reg);
-        interval.addRange(from, to);
-        if (Debug.isLogEnabled()) {
-            Debug.log("add fixed use: %s, at %d", interval, to);
-        }
-    }
-
-    private void addVariableUse(Variable operand, int from, int to, RegisterPriority registerPriority, LIRKind kind) {
-        TraceInterval interval = allocator.getOrCreateInterval(operand);
-
-        if (!kind.equals(LIRKind.Illegal)) {
-            interval.setKind(kind);
-        }
-
-        interval.addRange(from, to);
-
-        // Register use position at even instruction id.
-        interval.addUsePos(to & ~1, registerPriority);
-
-        if (Debug.isLogEnabled()) {
-            Debug.log("add use: %s, at %d (%s)", interval, to, registerPriority.name());
-        }
-    }
-
-    protected void addTemp(AllocatableValue operand, int tempPos, RegisterPriority registerPriority, LIRKind kind) {
-        if (!allocator.isProcessed(operand)) {
-            return;
-        }
-        if (isRegister(operand)) {
-            addFixedTemp(asRegisterValue(operand), tempPos);
-        } else {
-            assert isVariable(operand) : operand;
-            addVariableTemp(asVariable(operand), tempPos, registerPriority, kind);
-        }
-    }
-
-    private void addFixedTemp(RegisterValue reg, int tempPos) {
-        FixedInterval interval = allocator.getOrCreateFixedInterval(reg);
-        interval.addRange(tempPos, tempPos + 1);
-        if (Debug.isLogEnabled()) {
-            Debug.log("add fixed temp: %s, at %d", interval, tempPos);
-        }
-    }
-
-    private void addVariableTemp(Variable operand, int tempPos, RegisterPriority registerPriority, LIRKind kind) {
-        TraceInterval interval = allocator.getOrCreateInterval(operand);
-
-        if (!kind.equals(LIRKind.Illegal)) {
-            interval.setKind(kind);
-        }
-
-        if (interval.isEmpty()) {
-            interval.addRange(tempPos, tempPos + 1);
-        } else if (interval.from() > tempPos) {
-            interval.setFrom(tempPos);
-        }
-
-        interval.addUsePos(tempPos, registerPriority);
-        interval.addMaterializationValue(null);
-
-        if (Debug.isLogEnabled()) {
-            Debug.log("add temp: %s tempPos %d (%s)", interval, tempPos, RegisterPriority.MustHaveRegister.name());
-        }
-    }
-
-    protected void addDef(AllocatableValue operand, LIRInstruction op, RegisterPriority registerPriority, LIRKind kind) {
-        if (!allocator.isProcessed(operand)) {
-            return;
-        }
-        if (isRegister(operand)) {
-            addFixedDef(asRegisterValue(operand), op);
-        } else {
-            assert isVariable(operand) : operand;
-            addVariableDef(asVariable(operand), op, registerPriority, kind);
-        }
-    }
-
-    private void addFixedDef(RegisterValue reg, LIRInstruction op) {
-        FixedInterval interval = allocator.getOrCreateFixedInterval(reg);
-        int defPos = op.id();
-        if (interval.from() <= defPos) {
-            /*
-             * Update the starting point (when a range is first created for a use, its start is the
-             * beginning of the current block until a def is encountered).
-             */
-            interval.setFrom(defPos);
-
-        } else {
-            /*
-             * Dead value - make vacuous interval also add register priority for dead intervals
-             */
-            interval.addRange(defPos, defPos + 1);
-            if (Debug.isLogEnabled()) {
-                Debug.log("Warning: def of operand %s at %d occurs without use", reg, defPos);
-            }
-        }
-        if (Debug.isLogEnabled()) {
-            Debug.log("add fixed def: %s, at %d", interval, defPos);
-        }
-    }
-
-    private void addVariableDef(Variable operand, LIRInstruction op, RegisterPriority registerPriority, LIRKind kind) {
-        int defPos = op.id();
-
-        TraceInterval interval = allocator.getOrCreateInterval(operand);
-
-        if (!kind.equals(LIRKind.Illegal)) {
-            interval.setKind(kind);
-        }
-
-        if (interval.isEmpty()) {
-            /*
-             * Dead value - make vacuous interval also add register priority for dead intervals
-             */
-            interval.addRange(defPos, defPos + 1);
-            interval.addUsePos(defPos, registerPriority);
-            if (Debug.isLogEnabled()) {
-                Debug.log("Warning: def of operand %s at %d occurs without use", operand, defPos);
-            }
-        } else {
-            /*
-             * Update the starting point (when a range is first created for a use, its start is the
-             * beginning of the current block until a def is encountered).
-             */
-            interval.setFrom(defPos);
-            interval.addUsePos(defPos, registerPriority);
-        }
 
-        changeSpillDefinitionPos(op, operand, interval, defPos);
-        if (registerPriority == RegisterPriority.None && interval.spillState().ordinal() <= SpillState.StartInMemory.ordinal() && isStackSlot(operand)) {
-            // detection of method-parameters and roundfp-results
-            interval.setSpillState(SpillState.StartInMemory);
-        }
-        interval.addMaterializationValue(getMaterializedValue(op, operand, interval));
-
-        if (Debug.isLogEnabled()) {
-            Debug.log("add def: %s defPos %d (%s)", interval, defPos, registerPriority.name());
-        }
-    }
-
-    /**
-     * Optimizes moves related to incoming stack based arguments. The interval for the destination
-     * of such moves is assigned the stack slot (which is in the caller's frame) as its spill slot.
-     */
-    protected void handleMethodArguments(LIRInstruction op) {
-        if (op instanceof StackStoreOp) {
-            StackStoreOp store = (StackStoreOp) op;
-            StackSlot slot = asStackSlot(store.getStackSlot());
-            TraceInterval interval = allocator.intervalFor(store.getResult());
-            interval.setSpillSlot(slot);
-            interval.setSpillState(SpillState.StartInMemory);
-        }
-    }
-
-    protected void addRegisterHint(final LIRInstruction op, final Value targetValue, OperandMode mode, EnumSet<OperandFlag> flags, final boolean hintAtDef) {
-        if (flags.contains(OperandFlag.HINT) && TraceLinearScan.isVariableOrRegister(targetValue)) {
-
-            op.forEachRegisterHint(targetValue, mode, (registerHint, valueMode, valueFlags) -> {
-                if (TraceLinearScan.isVariableOrRegister(registerHint)) {
-                    AllocatableValue fromValue;
-                    AllocatableValue toValue;
-                    /* hints always point from def to use */
-                    if (hintAtDef) {
-                        fromValue = (AllocatableValue) registerHint;
-                        toValue = (AllocatableValue) targetValue;
-                    } else {
-                        fromValue = (AllocatableValue) targetValue;
-                        toValue = (AllocatableValue) registerHint;
-                    }
-                    if (isRegister(toValue)) {
-                        /* fixed register: no need for a hint */
-                        return null;
-                    }
-
-                    TraceInterval to = allocator.getOrCreateInterval(toValue);
-                    IntervalHint from = getIntervalHint(fromValue);
-
-                    to.setLocationHint(from);
-                    if (Debug.isLogEnabled()) {
-                        Debug.log("operation at opId %d: added hint from interval %s to %s", op.id(), from, to);
-                    }
-
-                    return registerHint;
-                }
-                return null;
-            });
-        }
-    }
-
-    private IntervalHint getIntervalHint(AllocatableValue from) {
-        if (isRegister(from)) {
-            return allocator.getOrCreateFixedInterval(asRegisterValue(from));
-        }
-        return allocator.getOrCreateInterval(from);
-    }
-
-    /**
-     * Eliminates moves from register to stack if the stack slot is known to be correct.
-     *
-     * @param op
-     * @param operand
-     */
-    protected void changeSpillDefinitionPos(LIRInstruction op, AllocatableValue operand, TraceInterval interval, int defPos) {
-        assert interval.isSplitParent() : "can only be called for split parents";
-
-        switch (interval.spillState()) {
-            case NoDefinitionFound:
-                // assert interval.spillDefinitionPos() == -1 : "must no be set before";
-                interval.setSpillDefinitionPos(defPos);
-                if (!(op instanceof LabelOp)) {
-                    // Do not update state for labels. This will be done afterwards.
-                    interval.setSpillState(SpillState.NoSpillStore);
-                }
-                break;
-
-            case NoSpillStore:
-                assert defPos <= interval.spillDefinitionPos() : "positions are processed in reverse order when intervals are created";
-                if (defPos < interval.spillDefinitionPos() - 2) {
-                    // second definition found, so no spill optimization possible for this interval
-                    interval.setSpillState(SpillState.NoOptimization);
-                } else {
-                    // two consecutive definitions (because of two-operand LIR form)
-                    assert allocator.blockForId(defPos) == allocator.blockForId(interval.spillDefinitionPos()) : "block must be equal";
-                }
-                break;
-
-            case NoOptimization:
-                // nothing to do
-                break;
-
-            default:
-                throw new BailoutException("other states not allowed at this time");
-        }
-    }
-
-    private static boolean optimizeMethodArgument(Value value) {
-        /*
-         * Object method arguments that are passed on the stack are currently not optimized because
-         * this requires that the runtime visits method arguments during stack walking.
-         */
-        return isStackSlot(value) && asStackSlot(value).isInCallerFrame() && value.getLIRKind().isValue();
-    }
-
-    /**
-     * Determines the register priority for an instruction's output/result operand.
-     */
-    protected static RegisterPriority registerPriorityOfOutputOperand(LIRInstruction op) {
-        if (op instanceof LabelOp) {
-            // skip method header
-            return RegisterPriority.None;
-        }
-        if (op instanceof ValueMoveOp) {
-            ValueMoveOp move = (ValueMoveOp) op;
-            if (optimizeMethodArgument(move.getInput())) {
-                return RegisterPriority.None;
-            }
-        } else if (op instanceof StackStoreOp) {
-            return RegisterPriority.ShouldHaveRegister;
-        }
-
-        // all other operands require a register
-        return RegisterPriority.MustHaveRegister;
-    }
-
-    /**
-     * Determines the priority which with an instruction's input operand will be allocated a
-     * register.
-     */
-    protected static RegisterPriority registerPriorityOfInputOperand(EnumSet<OperandFlag> flags) {
-        if (flags.contains(OperandFlag.STACK)) {
-            return RegisterPriority.ShouldHaveRegister;
-        }
-        // all other operands require a register
-        return RegisterPriority.MustHaveRegister;
-    }
-
-    @SuppressWarnings("try")
-    protected void buildIntervals() {
-
-        try (Indent indent = Debug.logAndIndent("build intervals")) {
-            InstructionValueConsumer outputConsumer = (op, operand, mode, flags) -> {
-                if (TraceLinearScan.isVariableOrRegister(operand)) {
-                    addDef((AllocatableValue) operand, op, registerPriorityOfOutputOperand(op), operand.getLIRKind());
-                    addRegisterHint(op, operand, mode, flags, true);
-                }
-            };
-
-            InstructionValueConsumer tempConsumer = (op, operand, mode, flags) -> {
-                if (TraceLinearScan.isVariableOrRegister(operand)) {
-                    addTemp((AllocatableValue) operand, op.id(), RegisterPriority.MustHaveRegister, operand.getLIRKind());
-                    addRegisterHint(op, operand, mode, flags, false);
-                }
-            };
-
-            InstructionValueConsumer aliveConsumer = (op, operand, mode, flags) -> {
-                if (TraceLinearScan.isVariableOrRegister(operand)) {
-                    RegisterPriority p = registerPriorityOfInputOperand(flags);
-                    int opId = op.id();
-                    int blockFrom = allocator.getFirstLirInstructionId((allocator.blockForId(opId)));
-                    addUse((AllocatableValue) operand, blockFrom, opId + 1, p, operand.getLIRKind());
-                    addRegisterHint(op, operand, mode, flags, false);
-                }
-            };
-
-            InstructionValueConsumer inputConsumer = (op, operand, mode, flags) -> {
-                if (TraceLinearScan.isVariableOrRegister(operand)) {
-                    int opId = op.id();
-                    RegisterPriority p = registerPriorityOfInputOperand(flags);
-                    int blockFrom = allocator.getFirstLirInstructionId((allocator.blockForId(opId)));
-                    addUse((AllocatableValue) operand, blockFrom, opId, p, operand.getLIRKind());
-                    addRegisterHint(op, operand, mode, flags, false);
-                }
-            };
-
-            InstructionValueConsumer stateProc = (op, operand, mode, flags) -> {
-                if (TraceLinearScan.isVariableOrRegister(operand)) {
-                    int opId = op.id();
-                    int blockFrom = allocator.getFirstLirInstructionId((allocator.blockForId(opId)));
-                    addUse((AllocatableValue) operand, blockFrom, opId + 1, RegisterPriority.None, operand.getLIRKind());
-                }
-            };
-
-            // create a list with all caller-save registers (cpu, fpu, xmm)
-            Register[] callerSaveRegs = allocator.getRegisterAllocationConfig().getRegisterConfig().getCallerSaveRegisters();
-
-            // iterate all blocks in reverse order
-            for (int i = allocator.blockCount() - 1; i >= 0; i--) {
-
-                AbstractBlockBase<?> block = allocator.blockAt(i);
-                // TODO (je) make empty bitset - remove
-                allocator.getBlockData(block).liveIn = new BitSet();
-                allocator.getBlockData(block).liveOut = new BitSet();
-                try (Indent indent2 = Debug.logAndIndent("handle block %d", block.getId())) {
-
-                    List<LIRInstruction> instructions = allocator.getLIR().getLIRforBlock(block);
-
-                    /*
-                     * Iterate all instructions of the block in reverse order. definitions of
-                     * intervals are processed before uses.
-                     */
-                    for (int j = instructions.size() - 1; j >= 0; j--) {
-                        final LIRInstruction op = instructions.get(j);
-                        final int opId = op.id();
-
-                        try (Indent indent3 = Debug.logAndIndent("handle inst %d: %s", opId, op)) {
-
-                            // add a temp range for each register if operation destroys
-                            // caller-save registers
-                            if (op.destroysCallerSavedRegisters()) {
-                                for (Register r : callerSaveRegs) {
-                                    if (allocator.attributes(r).isAllocatable()) {
-                                        addTemp(r.asValue(), opId, RegisterPriority.None, LIRKind.Illegal);
-                                    }
-                                }
-                                if (Debug.isLogEnabled()) {
-                                    Debug.log("operation destroys all caller-save registers");
-                                }
-                            }
-
-                            op.visitEachOutput(outputConsumer);
-                            op.visitEachTemp(tempConsumer);
-                            op.visitEachAlive(aliveConsumer);
-                            op.visitEachInput(inputConsumer);
-
-                            /*
-                             * Add uses of live locals from interpreter's point of view for proper
-                             * debug information generation. Treat these operands as temp values (if
-                             * the live range is extended to a call site, the value would be in a
-                             * register at the call otherwise).
-                             */
-                            op.visitEachState(stateProc);
-
-                            // special steps for some instructions (especially moves)
-                            handleMethodArguments(op);
-
-                        }
-
-                    } // end of instruction iteration
-                }
-                if (Debug.isDumpEnabled(DUMP_DURING_ANALYSIS_LEVEL)) {
-                    allocator.printIntervals("After Block " + block);
-                }
-            } // end of block iteration
-
-            // fix spill state for phi/sigma intervals
-            for (TraceInterval interval : allocator.intervals()) {
-                if (interval != null && interval.spillState().equals(SpillState.NoDefinitionFound) && interval.spillDefinitionPos() != -1) {
-                    // there was a definition in a phi/sigma
-                    interval.setSpillState(SpillState.NoSpillStore);
-                }
-            }
-            if (TraceRAuseInterTraceHints.getValue()) {
-                addInterTraceHints();
-            }
+        protected void verifyLiveness() {
             /*
-             * Add the range [-1, 0] to all fixed intervals. the register allocator need not handle
-             * unhandled fixed intervals.
+             * Check that fixed intervals are not live at block boundaries (live set must be empty
+             * at fixed intervals).
              */
-            for (FixedInterval interval : allocator.fixedIntervals()) {
-                if (interval != null) {
-                    /* We use [-1, 0] to avoid intersection with incoming values. */
-                    interval.addRange(-1, 0);
+            for (AbstractBlockBase<?> block : allocator.sortedBlocks()) {
+                for (int j = 0; j < allocator.numRegisters(); j++) {
+                    assert !allocator.getBlockData(block).liveIn.get(j) : "liveIn  set of fixed register must be empty";
+                    assert !allocator.getBlockData(block).liveOut.get(j) : "liveOut set of fixed register must be empty";
+                    assert !allocator.getBlockData(block).liveGen.get(j) : "liveGen set of fixed register must be empty";
                 }
             }
         }
-    }
+
+        protected void addUse(AllocatableValue operand, int from, int to, RegisterPriority registerPriority, LIRKind kind) {
+            if (!allocator.isProcessed(operand)) {
+                return;
+            }
+            if (isRegister(operand)) {
+                addFixedUse(asRegisterValue(operand), from, to);
+            } else {
+                assert isVariable(operand) : operand;
+                addVariableUse(asVariable(operand), from, to, registerPriority, kind);
+            }
+        }
+
+        private void addFixedUse(RegisterValue reg, int from, int to) {
+            FixedInterval interval = allocator.getOrCreateFixedInterval(reg);
+            interval.addRange(from, to);
+            if (Debug.isLogEnabled()) {
+                Debug.log("add fixed use: %s, at %d", interval, to);
+            }
+        }
+
+        private void addVariableUse(Variable operand, int from, int to, RegisterPriority registerPriority, LIRKind kind) {
+            TraceInterval interval = allocator.getOrCreateInterval(operand);
+
+            if (!kind.equals(LIRKind.Illegal)) {
+                interval.setKind(kind);
+            }
+
+            interval.addRange(from, to);
+
+            // Register use position at even instruction id.
+            interval.addUsePos(to & ~1, registerPriority);
+
+            if (Debug.isLogEnabled()) {
+                Debug.log("add use: %s, at %d (%s)", interval, to, registerPriority.name());
+            }
+        }
+
+        protected void addTemp(AllocatableValue operand, int tempPos, RegisterPriority registerPriority, LIRKind kind) {
+            if (!allocator.isProcessed(operand)) {
+                return;
+            }
+            if (isRegister(operand)) {
+                addFixedTemp(asRegisterValue(operand), tempPos);
+            } else {
+                assert isVariable(operand) : operand;
+                addVariableTemp(asVariable(operand), tempPos, registerPriority, kind);
+            }
+        }
+
+        private void addFixedTemp(RegisterValue reg, int tempPos) {
+            FixedInterval interval = allocator.getOrCreateFixedInterval(reg);
+            interval.addRange(tempPos, tempPos + 1);
+            if (Debug.isLogEnabled()) {
+                Debug.log("add fixed temp: %s, at %d", interval, tempPos);
+            }
+        }
+
+        private void addVariableTemp(Variable operand, int tempPos, RegisterPriority registerPriority, LIRKind kind) {
+            TraceInterval interval = allocator.getOrCreateInterval(operand);
+
+            if (!kind.equals(LIRKind.Illegal)) {
+                interval.setKind(kind);
+            }
+
+            if (interval.isEmpty()) {
+                interval.addRange(tempPos, tempPos + 1);
+            } else if (interval.from() > tempPos) {
+                interval.setFrom(tempPos);
+            }
+
+            interval.addUsePos(tempPos, registerPriority);
+            interval.addMaterializationValue(null);
+
+            if (Debug.isLogEnabled()) {
+                Debug.log("add temp: %s tempPos %d (%s)", interval, tempPos, RegisterPriority.MustHaveRegister.name());
+            }
+        }
+
+        protected void addDef(AllocatableValue operand, LIRInstruction op, RegisterPriority registerPriority, LIRKind kind) {
+            if (!allocator.isProcessed(operand)) {
+                return;
+            }
+            if (isRegister(operand)) {
+                addFixedDef(asRegisterValue(operand), op);
+            } else {
+                assert isVariable(operand) : operand;
+                addVariableDef(asVariable(operand), op, registerPriority, kind);
+            }
+        }
+
+        private void addFixedDef(RegisterValue reg, LIRInstruction op) {
+            FixedInterval interval = allocator.getOrCreateFixedInterval(reg);
+            int defPos = op.id();
+            if (interval.from() <= defPos) {
+                /*
+                 * Update the starting point (when a range is first created for a use, its start is
+                 * the beginning of the current block until a def is encountered).
+                 */
+                interval.setFrom(defPos);
+
+            } else {
+                /*
+                 * Dead value - make vacuous interval also add register priority for dead intervals
+                 */
+                interval.addRange(defPos, defPos + 1);
+                if (Debug.isLogEnabled()) {
+                    Debug.log("Warning: def of operand %s at %d occurs without use", reg, defPos);
+                }
+            }
+            if (Debug.isLogEnabled()) {
+                Debug.log("add fixed def: %s, at %d", interval, defPos);
+            }
+        }
+
+        private void addVariableDef(Variable operand, LIRInstruction op, RegisterPriority registerPriority, LIRKind kind) {
+            int defPos = op.id();
+
+            TraceInterval interval = allocator.getOrCreateInterval(operand);
+
+            if (!kind.equals(LIRKind.Illegal)) {
+                interval.setKind(kind);
+            }
+
+            if (interval.isEmpty()) {
+                /*
+                 * Dead value - make vacuous interval also add register priority for dead intervals
+                 */
+                interval.addRange(defPos, defPos + 1);
+                interval.addUsePos(defPos, registerPriority);
+                if (Debug.isLogEnabled()) {
+                    Debug.log("Warning: def of operand %s at %d occurs without use", operand, defPos);
+                }
+            } else {
+                /*
+                 * Update the starting point (when a range is first created for a use, its start is
+                 * the beginning of the current block until a def is encountered).
+                 */
+                interval.setFrom(defPos);
+                interval.addUsePos(defPos, registerPriority);
+            }
+
+            changeSpillDefinitionPos(op, operand, interval, defPos);
+            if (registerPriority == RegisterPriority.None && interval.spillState().ordinal() <= SpillState.StartInMemory.ordinal() && isStackSlot(operand)) {
+                // detection of method-parameters and roundfp-results
+                interval.setSpillState(SpillState.StartInMemory);
+            }
+            interval.addMaterializationValue(getMaterializedValue(op, operand, interval));
+
+            if (Debug.isLogEnabled()) {
+                Debug.log("add def: %s defPos %d (%s)", interval, defPos, registerPriority.name());
+            }
+        }
+
+        /**
+         * Optimizes moves related to incoming stack based arguments. The interval for the
+         * destination of such moves is assigned the stack slot (which is in the caller's frame) as
+         * its spill slot.
+         */
+        protected void handleMethodArguments(LIRInstruction op) {
+            if (op instanceof StackStoreOp) {
+                StackStoreOp store = (StackStoreOp) op;
+                StackSlot slot = asStackSlot(store.getStackSlot());
+                TraceInterval interval = allocator.intervalFor(store.getResult());
+                interval.setSpillSlot(slot);
+                interval.setSpillState(SpillState.StartInMemory);
+            }
+        }
+
+        protected void addRegisterHint(final LIRInstruction op, final Value targetValue, OperandMode mode, EnumSet<OperandFlag> flags, final boolean hintAtDef) {
+            if (flags.contains(OperandFlag.HINT) && TraceLinearScan.isVariableOrRegister(targetValue)) {
+
+                op.forEachRegisterHint(targetValue, mode, (registerHint, valueMode, valueFlags) -> {
+                    if (TraceLinearScan.isVariableOrRegister(registerHint)) {
+                        AllocatableValue fromValue;
+                        AllocatableValue toValue;
+                        /* hints always point from def to use */
+                        if (hintAtDef) {
+                            fromValue = (AllocatableValue) registerHint;
+                            toValue = (AllocatableValue) targetValue;
+                        } else {
+                            fromValue = (AllocatableValue) targetValue;
+                            toValue = (AllocatableValue) registerHint;
+                        }
+                        if (isRegister(toValue)) {
+                            /* fixed register: no need for a hint */
+                            return null;
+                        }
+
+                        TraceInterval to = allocator.getOrCreateInterval(toValue);
+                        IntervalHint from = getIntervalHint(fromValue);
+
+                        to.setLocationHint(from);
+                        if (Debug.isLogEnabled()) {
+                            Debug.log("operation at opId %d: added hint from interval %s to %s", op.id(), from, to);
+                        }
+
+                        return registerHint;
+                    }
+                    return null;
+                });
+            }
+        }
+
+        private IntervalHint getIntervalHint(AllocatableValue from) {
+            if (isRegister(from)) {
+                return allocator.getOrCreateFixedInterval(asRegisterValue(from));
+            }
+            return allocator.getOrCreateInterval(from);
+        }
+
+        /**
+         * Eliminates moves from register to stack if the stack slot is known to be correct.
+         *
+         * @param op
+         * @param operand
+         */
+        protected void changeSpillDefinitionPos(LIRInstruction op, AllocatableValue operand, TraceInterval interval, int defPos) {
+            assert interval.isSplitParent() : "can only be called for split parents";
 
-    private void addInterTraceHints() {
-        // set hints for phi/sigma intervals
-        LIR lir = allocator.getLIR();
-        for (AbstractBlockBase<?> block : allocator.sortedBlocks()) {
-            LabelOp label = SSIUtil.incoming(lir, block);
-            for (AbstractBlockBase<?> pred : block.getPredecessors()) {
-                if (isAllocatedOrCurrent(block, pred)) {
-                    BlockEndOp outgoing = SSIUtil.outgoing(lir, pred);
-                    for (int i = 0; i < outgoing.getOutgoingSize(); i++) {
-                        Value toValue = label.getIncomingValue(i);
-                        if (!isIllegal(toValue) && !isRegister(toValue)) {
-                            Value fromValue = outgoing.getOutgoingValue(i);
-                            assert sameTrace(block, pred) || !isVariable(fromValue) : "Unallocated variable: " + fromValue;
+            switch (interval.spillState()) {
+                case NoDefinitionFound:
+                    // assert interval.spillDefinitionPos() == -1 : "must no be set before";
+                    interval.setSpillDefinitionPos(defPos);
+                    if (!(op instanceof LabelOp)) {
+                        // Do not update state for labels. This will be done afterwards.
+                        interval.setSpillState(SpillState.NoSpillStore);
+                    }
+                    break;
+
+                case NoSpillStore:
+                    assert defPos <= interval.spillDefinitionPos() : "positions are processed in reverse order when intervals are created";
+                    if (defPos < interval.spillDefinitionPos() - 2) {
+                        // second definition found, so no spill optimization possible for this
+// interval
+                        interval.setSpillState(SpillState.NoOptimization);
+                    } else {
+                        // two consecutive definitions (because of two-operand LIR form)
+                        assert allocator.blockForId(defPos) == allocator.blockForId(interval.spillDefinitionPos()) : "block must be equal";
+                    }
+                    break;
+
+                case NoOptimization:
+                    // nothing to do
+                    break;
+
+                default:
+                    throw new BailoutException("other states not allowed at this time");
+            }
+        }
+
+        private static boolean optimizeMethodArgument(Value value) {
+            /*
+             * Object method arguments that are passed on the stack are currently not optimized
+             * because this requires that the runtime visits method arguments during stack walking.
+             */
+            return isStackSlot(value) && asStackSlot(value).isInCallerFrame() && value.getLIRKind().isValue();
+        }
+
+        /**
+         * Determines the register priority for an instruction's output/result operand.
+         */
+        protected static RegisterPriority registerPriorityOfOutputOperand(LIRInstruction op) {
+            if (op instanceof LabelOp) {
+                // skip method header
+                return RegisterPriority.None;
+            }
+            if (op instanceof ValueMoveOp) {
+                ValueMoveOp move = (ValueMoveOp) op;
+                if (optimizeMethodArgument(move.getInput())) {
+                    return RegisterPriority.None;
+                }
+            } else if (op instanceof StackStoreOp) {
+                return RegisterPriority.ShouldHaveRegister;
+            }
+
+            // all other operands require a register
+            return RegisterPriority.MustHaveRegister;
+        }
+
+        /**
+         * Determines the priority which with an instruction's input operand will be allocated a
+         * register.
+         */
+        protected static RegisterPriority registerPriorityOfInputOperand(EnumSet<OperandFlag> flags) {
+            if (flags.contains(OperandFlag.STACK)) {
+                return RegisterPriority.ShouldHaveRegister;
+            }
+            // all other operands require a register
+            return RegisterPriority.MustHaveRegister;
+        }
+
+        @SuppressWarnings("try")
+        protected void buildIntervals() {
+
+            try (Indent indent = Debug.logAndIndent("build intervals")) {
+                InstructionValueConsumer outputConsumer = (op, operand, mode, flags) -> {
+                    if (TraceLinearScan.isVariableOrRegister(operand)) {
+                        addDef((AllocatableValue) operand, op, registerPriorityOfOutputOperand(op), operand.getLIRKind());
+                        addRegisterHint(op, operand, mode, flags, true);
+                    }
+                };
+
+                InstructionValueConsumer tempConsumer = (op, operand, mode, flags) -> {
+                    if (TraceLinearScan.isVariableOrRegister(operand)) {
+                        addTemp((AllocatableValue) operand, op.id(), RegisterPriority.MustHaveRegister, operand.getLIRKind());
+                        addRegisterHint(op, operand, mode, flags, false);
+                    }
+                };
+
+                InstructionValueConsumer aliveConsumer = (op, operand, mode, flags) -> {
+                    if (TraceLinearScan.isVariableOrRegister(operand)) {
+                        RegisterPriority p = registerPriorityOfInputOperand(flags);
+                        int opId = op.id();
+                        int blockFrom = allocator.getFirstLirInstructionId((allocator.blockForId(opId)));
+                        addUse((AllocatableValue) operand, blockFrom, opId + 1, p, operand.getLIRKind());
+                        addRegisterHint(op, operand, mode, flags, false);
+                    }
+                };
+
+                InstructionValueConsumer inputConsumer = (op, operand, mode, flags) -> {
+                    if (TraceLinearScan.isVariableOrRegister(operand)) {
+                        int opId = op.id();
+                        RegisterPriority p = registerPriorityOfInputOperand(flags);
+                        int blockFrom = allocator.getFirstLirInstructionId((allocator.blockForId(opId)));
+                        addUse((AllocatableValue) operand, blockFrom, opId, p, operand.getLIRKind());
+                        addRegisterHint(op, operand, mode, flags, false);
+                    }
+                };
 
-                            if (isVariableOrRegister(fromValue)) {
-                                IntervalHint from = getIntervalHint((AllocatableValue) fromValue);
-                                TraceInterval to = allocator.getOrCreateInterval((AllocatableValue) toValue);
-                                setHint(label, to, from);
-                            } else if (TraceRAshareSpillInformation.getValue() && TraceUtil.isShadowedRegisterValue(fromValue)) {
-                                ShadowedRegisterValue shadowedRegisterValue = TraceUtil.asShadowedRegisterValue(fromValue);
-                                IntervalHint from = getIntervalHint(shadowedRegisterValue.getRegister());
-                                TraceInterval to = allocator.getOrCreateInterval((AllocatableValue) toValue);
-                                setHint(label, to, from);
-                                to.setSpillSlot(shadowedRegisterValue.getStackSlot());
-                                to.setSpillState(SpillState.StartInMemory);
+                InstructionValueConsumer stateProc = (op, operand, mode, flags) -> {
+                    if (TraceLinearScan.isVariableOrRegister(operand)) {
+                        int opId = op.id();
+                        int blockFrom = allocator.getFirstLirInstructionId((allocator.blockForId(opId)));
+                        addUse((AllocatableValue) operand, blockFrom, opId + 1, RegisterPriority.None, operand.getLIRKind());
+                    }
+                };
+
+                // create a list with all caller-save registers (cpu, fpu, xmm)
+                Register[] callerSaveRegs = allocator.getRegisterAllocationConfig().getRegisterConfig().getCallerSaveRegisters();
+
+                // iterate all blocks in reverse order
+                for (int i = allocator.blockCount() - 1; i >= 0; i--) {
+
+                    AbstractBlockBase<?> block = allocator.blockAt(i);
+                    // TODO (je) make empty bitset - remove
+                    allocator.getBlockData(block).liveIn = new BitSet();
+                    allocator.getBlockData(block).liveOut = new BitSet();
+                    try (Indent indent2 = Debug.logAndIndent("handle block %d", block.getId())) {
+
+                        List<LIRInstruction> instructions = allocator.getLIR().getLIRforBlock(block);
+
+                        /*
+                         * Iterate all instructions of the block in reverse order. definitions of
+                         * intervals are processed before uses.
+                         */
+                        for (int j = instructions.size() - 1; j >= 0; j--) {
+                            final LIRInstruction op = instructions.get(j);
+                            final int opId = op.id();
+
+                            try (Indent indent3 = Debug.logAndIndent("handle inst %d: %s", opId, op)) {
+
+                                // add a temp range for each register if operation destroys
+                                // caller-save registers
+                                if (op.destroysCallerSavedRegisters()) {
+                                    for (Register r : callerSaveRegs) {
+                                        if (allocator.attributes(r).isAllocatable()) {
+                                            addTemp(r.asValue(), opId, RegisterPriority.None, LIRKind.Illegal);
+                                        }
+                                    }
+                                    if (Debug.isLogEnabled()) {
+                                        Debug.log("operation destroys all caller-save registers");
+                                    }
+                                }
+
+                                op.visitEachOutput(outputConsumer);
+                                op.visitEachTemp(tempConsumer);
+                                op.visitEachAlive(aliveConsumer);
+                                op.visitEachInput(inputConsumer);
+
+                                /*
+                                 * Add uses of live locals from interpreter's point of view for
+                                 * proper debug information generation. Treat these operands as temp
+                                 * values (if the live range is extended to a call site, the value
+                                 * would be in a register at the call otherwise).
+                                 */
+                                op.visitEachState(stateProc);
+
+                                // special steps for some instructions (especially moves)
+                                handleMethodArguments(op);
+
+                            }
+
+                        } // end of instruction iteration
+                    }
+                    if (Debug.isDumpEnabled(DUMP_DURING_ANALYSIS_LEVEL)) {
+                        allocator.printIntervals("After Block " + block);
+                    }
+                } // end of block iteration
+
+                // fix spill state for phi/sigma intervals
+                for (TraceInterval interval : allocator.intervals()) {
+                    if (interval != null && interval.spillState().equals(SpillState.NoDefinitionFound) && interval.spillDefinitionPos() != -1) {
+                        // there was a definition in a phi/sigma
+                        interval.setSpillState(SpillState.NoSpillStore);
+                    }
+                }
+                if (TraceRAuseInterTraceHints.getValue()) {
+                    addInterTraceHints();
+                }
+                /*
+                 * Add the range [-1, 0] to all fixed intervals. the register allocator need not
+                 * handle unhandled fixed intervals.
+                 */
+                for (FixedInterval interval : allocator.fixedIntervals()) {
+                    if (interval != null) {
+                        /* We use [-1, 0] to avoid intersection with incoming values. */
+                        interval.addRange(-1, 0);
+                    }
+                }
+            }
+        }
+
+        private void addInterTraceHints() {
+            // set hints for phi/sigma intervals
+            LIR lir = allocator.getLIR();
+            for (AbstractBlockBase<?> block : allocator.sortedBlocks()) {
+                LabelOp label = SSIUtil.incoming(lir, block);
+                for (AbstractBlockBase<?> pred : block.getPredecessors()) {
+                    if (isAllocatedOrCurrent(block, pred)) {
+                        BlockEndOp outgoing = SSIUtil.outgoing(lir, pred);
+                        for (int i = 0; i < outgoing.getOutgoingSize(); i++) {
+                            Value toValue = label.getIncomingValue(i);
+                            if (!isIllegal(toValue) && !isRegister(toValue)) {
+                                Value fromValue = outgoing.getOutgoingValue(i);
+                                assert sameTrace(block, pred) || !isVariable(fromValue) : "Unallocated variable: " + fromValue;
+
+                                if (isVariableOrRegister(fromValue)) {
+                                    IntervalHint from = getIntervalHint((AllocatableValue) fromValue);
+                                    TraceInterval to = allocator.getOrCreateInterval((AllocatableValue) toValue);
+                                    setHint(label, to, from);
+                                } else if (TraceRAshareSpillInformation.getValue() && TraceUtil.isShadowedRegisterValue(fromValue)) {
+                                    ShadowedRegisterValue shadowedRegisterValue = TraceUtil.asShadowedRegisterValue(fromValue);
+                                    IntervalHint from = getIntervalHint(shadowedRegisterValue.getRegister());
+                                    TraceInterval to = allocator.getOrCreateInterval((AllocatableValue) toValue);
+                                    setHint(label, to, from);
+                                    to.setSpillSlot(shadowedRegisterValue.getStackSlot());
+                                    to.setSpillState(SpillState.StartInMemory);
+                                }
                             }
                         }
                     }
                 }
             }
         }
-    }
 
-    /**
-     * Returns a value for a interval definition, which can be used for re-materialization.
-     *
-     * @param op An instruction which defines a value
-     * @param operand The destination operand of the instruction
-     * @param interval The interval for this defined value.
-     * @return Returns the value which is moved to the instruction and which can be reused at all
-     *         reload-locations in case the interval of this instruction is spilled. Currently this
-     *         can only be a {@link JavaConstant}.
-     */
-    protected static JavaConstant getMaterializedValue(LIRInstruction op, Value operand, TraceInterval interval) {
-        if (op instanceof LoadConstantOp) {
-            LoadConstantOp move = (LoadConstantOp) op;
-            if (move.getConstant() instanceof JavaConstant) {
-                /*
-                 * Check if the interval has any uses which would accept an stack location (priority
-                 * == ShouldHaveRegister). Rematerialization of such intervals can result in a
-                 * degradation, because rematerialization always inserts a constant load, even if
-                 * the value is not needed in a register.
-                 */
-                UsePosList usePosList = interval.usePosList();
-                int numUsePos = usePosList.size();
-                for (int useIdx = 0; useIdx < numUsePos; useIdx++) {
-                    TraceInterval.RegisterPriority priority = usePosList.registerPriority(useIdx);
-                    if (priority == TraceInterval.RegisterPriority.ShouldHaveRegister) {
-                        return null;
+        /**
+         * Returns a value for a interval definition, which can be used for re-materialization.
+         *
+         * @param op An instruction which defines a value
+         * @param operand The destination operand of the instruction
+         * @param interval The interval for this defined value.
+         * @return Returns the value which is moved to the instruction and which can be reused at
+         *         all reload-locations in case the interval of this instruction is spilled.
+         *         Currently this can only be a {@link JavaConstant}.
+         */
+        protected static JavaConstant getMaterializedValue(LIRInstruction op, Value operand, TraceInterval interval) {
+            if (op instanceof LoadConstantOp) {
+                LoadConstantOp move = (LoadConstantOp) op;
+                if (move.getConstant() instanceof JavaConstant) {
+                    /*
+                     * Check if the interval has any uses which would accept an stack location
+                     * (priority == ShouldHaveRegister). Rematerialization of such intervals can
+                     * result in a degradation, because rematerialization always inserts a constant
+                     * load, even if the value is not needed in a register.
+                     */
+                    UsePosList usePosList = interval.usePosList();
+                    int numUsePos = usePosList.size();
+                    for (int useIdx = 0; useIdx < numUsePos; useIdx++) {
+                        TraceInterval.RegisterPriority priority = usePosList.registerPriority(useIdx);
+                        if (priority == TraceInterval.RegisterPriority.ShouldHaveRegister) {
+                            return null;
+                        }
                     }
+                    return (JavaConstant) move.getConstant();
                 }
-                return (JavaConstant) move.getConstant();
             }
+            return null;
         }
-        return null;
     }
 }
--- a/graal/com.oracle.graal.lir/src/com/oracle/graal/lir/alloc/trace/TraceLinearScanRegisterAllocationPhase.java	Tue Sep 08 10:50:21 2015 +0200
+++ b/graal/com.oracle.graal.lir/src/com/oracle/graal/lir/alloc/trace/TraceLinearScanRegisterAllocationPhase.java	Tue Sep 08 18:53:43 2015 +0200
@@ -22,35 +22,30 @@
  */
 package com.oracle.graal.lir.alloc.trace;
 
-import java.util.*;
+import java.util.List;
 
-import jdk.internal.jvmci.code.*;
+import jdk.internal.jvmci.code.TargetDescription;
 
-import com.oracle.graal.debug.*;
-import com.oracle.graal.compiler.common.alloc.*;
-import com.oracle.graal.compiler.common.cfg.*;
-import com.oracle.graal.lir.gen.*;
+import com.oracle.graal.compiler.common.alloc.RegisterAllocationConfig;
+import com.oracle.graal.compiler.common.alloc.TraceBuilder.TraceBuilderResult;
+import com.oracle.graal.compiler.common.cfg.AbstractBlockBase;
+import com.oracle.graal.debug.Debug;
+import com.oracle.graal.debug.Indent;
+import com.oracle.graal.lir.gen.LIRGenerationResult;
 import com.oracle.graal.lir.gen.LIRGeneratorTool.SpillMoveFactory;
-import com.oracle.graal.lir.phases.*;
 
-final class TraceLinearScanRegisterAllocationPhase extends AllocationPhase {
-
-    private final TraceLinearScan allocator;
-
-    TraceLinearScanRegisterAllocationPhase(TraceLinearScan allocator) {
-        this.allocator = allocator;
-    }
+final class TraceLinearScanRegisterAllocationPhase extends TraceLinearScanAllocationPhase {
 
     @Override
     protected <B extends AbstractBlockBase<B>> void run(TargetDescription target, LIRGenerationResult lirGenRes, List<B> codeEmittingOrder, List<B> linearScanOrder, SpillMoveFactory spillMoveFactory,
-                    RegisterAllocationConfig registerAllocationConfig) {
+                    RegisterAllocationConfig registerAllocationConfig, TraceBuilderResult<?> traceBuilderResult, TraceLinearScan allocator) {
         allocator.printIntervals("Before register allocation");
-        allocateRegisters();
+        allocateRegisters(allocator);
         allocator.printIntervals("After register allocation");
     }
 
     @SuppressWarnings("try")
-    void allocateRegisters() {
+    private static void allocateRegisters(TraceLinearScan allocator) {
         try (Indent indent = Debug.logAndIndent("allocate registers")) {
             FixedInterval precoloredIntervals = allocator.createFixedUnhandledList();
             TraceInterval notPrecoloredIntervals = allocator.createUnhandledList(TraceLinearScan.IS_VARIABLE_INTERVAL);
--- a/graal/com.oracle.graal.lir/src/com/oracle/graal/lir/alloc/trace/TraceLinearScanResolveDataFlowPhase.java	Tue Sep 08 10:50:21 2015 +0200
+++ b/graal/com.oracle.graal.lir/src/com/oracle/graal/lir/alloc/trace/TraceLinearScanResolveDataFlowPhase.java	Tue Sep 08 18:53:43 2015 +0200
@@ -22,176 +22,186 @@
  */
 package com.oracle.graal.lir.alloc.trace;
 
-import static com.oracle.graal.compiler.common.GraalOptions.*;
-import static com.oracle.graal.lir.LIRValueUtil.*;
-import static jdk.internal.jvmci.code.ValueUtil.*;
+import static com.oracle.graal.compiler.common.GraalOptions.DetailedAsserts;
+import static com.oracle.graal.lir.LIRValueUtil.asConstant;
+import static com.oracle.graal.lir.LIRValueUtil.isConstantValue;
+import static jdk.internal.jvmci.code.ValueUtil.isRegister;
+import static jdk.internal.jvmci.code.ValueUtil.isStackSlotValue;
+import static jdk.internal.jvmci.code.ValueUtil.isVirtualStackSlot;
 
-import java.util.*;
+import java.util.List;
+import java.util.ListIterator;
 
-import jdk.internal.jvmci.code.*;
-import jdk.internal.jvmci.meta.*;
+import jdk.internal.jvmci.code.TargetDescription;
+import jdk.internal.jvmci.meta.Value;
 
-import com.oracle.graal.compiler.common.alloc.*;
+import com.oracle.graal.compiler.common.alloc.RegisterAllocationConfig;
 import com.oracle.graal.compiler.common.alloc.TraceBuilder.TraceBuilderResult;
-import com.oracle.graal.compiler.common.cfg.*;
-import com.oracle.graal.debug.*;
-import com.oracle.graal.lir.*;
-import com.oracle.graal.lir.gen.*;
+import com.oracle.graal.compiler.common.cfg.AbstractBlockBase;
+import com.oracle.graal.debug.Debug;
+import com.oracle.graal.debug.DebugMetric;
+import com.oracle.graal.debug.Indent;
+import com.oracle.graal.lir.LIRInstruction;
+import com.oracle.graal.lir.StandardOp;
+import com.oracle.graal.lir.gen.LIRGenerationResult;
 import com.oracle.graal.lir.gen.LIRGeneratorTool.SpillMoveFactory;
-import com.oracle.graal.lir.phases.*;
 import com.oracle.graal.lir.ssa.SSAUtil.PhiValueVisitor;
-import com.oracle.graal.lir.ssi.*;
+import com.oracle.graal.lir.ssi.SSIUtil;
 
 /**
  * Phase 6: resolve data flow
  *
  * Insert moves at edges between blocks if intervals have been split.
  */
-final class TraceLinearScanResolveDataFlowPhase extends AllocationPhase {
-
-    private final TraceLinearScan allocator;
-    private final TraceBuilderResult<?> traceBuilderResult;
-
-    protected TraceLinearScanResolveDataFlowPhase(TraceLinearScan allocator, TraceBuilderResult<?> traceBuilderResult) {
-        this.allocator = allocator;
-        this.traceBuilderResult = traceBuilderResult;
-    }
+final class TraceLinearScanResolveDataFlowPhase extends TraceLinearScanAllocationPhase {
 
     @Override
     protected <B extends AbstractBlockBase<B>> void run(TargetDescription target, LIRGenerationResult lirGenRes, List<B> codeEmittingOrder, List<B> linearScanOrder, SpillMoveFactory spillMoveFactory,
-                    RegisterAllocationConfig registerAllocationConfig) {
-        resolveDataFlow(allocator.sortedBlocks());
-    }
-
-    private void resolveFindInsertPos(AbstractBlockBase<?> fromBlock, AbstractBlockBase<?> toBlock, TraceLocalMoveResolver moveResolver) {
-        if (fromBlock.getSuccessorCount() <= 1) {
-            if (Debug.isLogEnabled()) {
-                Debug.log("inserting moves at end of fromBlock B%d", fromBlock.getId());
-            }
-
-            List<LIRInstruction> instructions = allocator.getLIR().getLIRforBlock(fromBlock);
-            LIRInstruction instr = instructions.get(instructions.size() - 1);
-            if (instr instanceof StandardOp.JumpOp) {
-                // insert moves before branch
-                moveResolver.setInsertPosition(instructions, instructions.size() - 1);
-            } else {
-                moveResolver.setInsertPosition(instructions, instructions.size());
-            }
-
-        } else {
-            if (Debug.isLogEnabled()) {
-                Debug.log("inserting moves at beginning of toBlock B%d", toBlock.getId());
-            }
-
-            if (DetailedAsserts.getValue()) {
-                assert allocator.getLIR().getLIRforBlock(fromBlock).get(0) instanceof StandardOp.LabelOp : "block does not start with a label";
-
-                /*
-                 * Because the number of predecessor edges matches the number of successor edges,
-                 * blocks which are reached by switch statements may have be more than one
-                 * predecessor but it will be guaranteed that all predecessors will be the same.
-                 */
-                for (AbstractBlockBase<?> predecessor : toBlock.getPredecessors()) {
-                    assert fromBlock == predecessor : "all critical edges must be broken";
-                }
-            }
-
-            moveResolver.setInsertPosition(allocator.getLIR().getLIRforBlock(toBlock), 1);
-        }
+                    RegisterAllocationConfig registerAllocationConfig, TraceBuilderResult<?> traceBuilderResult, TraceLinearScan allocator) {
+        new Resolver(allocator, traceBuilderResult).resolveDataFlow(allocator.sortedBlocks());
     }
 
-    /**
-     * Inserts necessary moves (spilling or reloading) at edges between blocks for intervals that
-     * have been split.
-     */
-    @SuppressWarnings("try")
-    private void resolveDataFlow(List<? extends AbstractBlockBase<?>> blocks) {
-        if (blocks.size() < 2) {
-            // no resolution necessary
-            return;
+    private static final class Resolver {
+        private final TraceLinearScan allocator;
+        private final TraceBuilderResult<?> traceBuilderResult;
+
+        private Resolver(TraceLinearScan allocator, TraceBuilderResult<?> traceBuilderResult) {
+            this.allocator = allocator;
+            this.traceBuilderResult = traceBuilderResult;
         }
-        try (Indent indent = Debug.logAndIndent("resolve data flow")) {
+
+        private void resolveFindInsertPos(AbstractBlockBase<?> fromBlock, AbstractBlockBase<?> toBlock, TraceLocalMoveResolver moveResolver) {
+            if (fromBlock.getSuccessorCount() <= 1) {
+                if (Debug.isLogEnabled()) {
+                    Debug.log("inserting moves at end of fromBlock B%d", fromBlock.getId());
+                }
 
-            TraceLocalMoveResolver moveResolver = allocator.createMoveResolver();
-            ListIterator<? extends AbstractBlockBase<?>> it = blocks.listIterator();
-            AbstractBlockBase<?> toBlock = null;
-            for (AbstractBlockBase<?> fromBlock = it.next(); it.hasNext(); fromBlock = toBlock) {
-                toBlock = it.next();
-                assert containedInTrace(fromBlock) : "Not in Trace: " + fromBlock;
-                assert containedInTrace(toBlock) : "Not in Trace: " + toBlock;
-                resolveCollectMappings(fromBlock, toBlock, moveResolver);
-            }
-            assert blocks.get(blocks.size() - 1).equals(toBlock);
-            if (toBlock.isLoopEnd()) {
-                assert toBlock.getSuccessorCount() == 1;
-                AbstractBlockBase<?> loopHeader = toBlock.getSuccessors().get(0);
-                if (containedInTrace(loopHeader)) {
-                    resolveCollectMappings(toBlock, loopHeader, moveResolver);
+                List<LIRInstruction> instructions = allocator.getLIR().getLIRforBlock(fromBlock);
+                LIRInstruction instr = instructions.get(instructions.size() - 1);
+                if (instr instanceof StandardOp.JumpOp) {
+                    // insert moves before branch
+                    moveResolver.setInsertPosition(instructions, instructions.size() - 1);
+                } else {
+                    moveResolver.setInsertPosition(instructions, instructions.size());
+                }
+
+            } else {
+                if (Debug.isLogEnabled()) {
+                    Debug.log("inserting moves at beginning of toBlock B%d", toBlock.getId());
                 }
-            }
 
-        }
-    }
+                if (DetailedAsserts.getValue()) {
+                    assert allocator.getLIR().getLIRforBlock(fromBlock).get(0) instanceof StandardOp.LabelOp : "block does not start with a label";
 
-    private void resolveCollectMappings(AbstractBlockBase<?> fromBlock, AbstractBlockBase<?> toBlock, TraceLocalMoveResolver moveResolver) {
-        try (Indent indent0 = Debug.logAndIndent("Edge %s -> %s", fromBlock, toBlock)) {
-            // collect all intervals that have been split between
-            // fromBlock and toBlock
-            SSIUtil.forEachValuePair(allocator.getLIR(), toBlock, fromBlock, new MyPhiValueVisitor(moveResolver, toBlock, fromBlock));
-            if (moveResolver.hasMappings()) {
-                resolveFindInsertPos(fromBlock, toBlock, moveResolver);
-                moveResolver.resolveAndAppendMoves();
+                    /*
+                     * Because the number of predecessor edges matches the number of successor
+                     * edges, blocks which are reached by switch statements may have be more than
+                     * one predecessor but it will be guaranteed that all predecessors will be the
+                     * same.
+                     */
+                    for (AbstractBlockBase<?> predecessor : toBlock.getPredecessors()) {
+                        assert fromBlock == predecessor : "all critical edges must be broken";
+                    }
+                }
+
+                moveResolver.setInsertPosition(allocator.getLIR().getLIRforBlock(toBlock), 1);
             }
         }
-    }
 
-    private boolean containedInTrace(AbstractBlockBase<?> block) {
-        return currentTrace() == traceBuilderResult.getTraceForBlock(block);
-    }
-
-    private int currentTrace() {
-        return traceBuilderResult.getTraceForBlock(allocator.sortedBlocks().get(0));
-    }
+        /**
+         * Inserts necessary moves (spilling or reloading) at edges between blocks for intervals
+         * that have been split.
+         */
+        @SuppressWarnings("try")
+        private void resolveDataFlow(List<? extends AbstractBlockBase<?>> blocks) {
+            if (blocks.size() < 2) {
+                // no resolution necessary
+                return;
+            }
+            try (Indent indent = Debug.logAndIndent("resolve data flow")) {
 
-    private static final DebugMetric numSSIResolutionMoves = Debug.metric("SSI LSRA[numSSIResolutionMoves]");
-    private static final DebugMetric numStackToStackMoves = Debug.metric("SSI LSRA[numStackToStackMoves]");
+                TraceLocalMoveResolver moveResolver = allocator.createMoveResolver();
+                ListIterator<? extends AbstractBlockBase<?>> it = blocks.listIterator();
+                AbstractBlockBase<?> toBlock = null;
+                for (AbstractBlockBase<?> fromBlock = it.next(); it.hasNext(); fromBlock = toBlock) {
+                    toBlock = it.next();
+                    assert containedInTrace(fromBlock) : "Not in Trace: " + fromBlock;
+                    assert containedInTrace(toBlock) : "Not in Trace: " + toBlock;
+                    resolveCollectMappings(fromBlock, toBlock, moveResolver);
+                }
+                assert blocks.get(blocks.size() - 1).equals(toBlock);
+                if (toBlock.isLoopEnd()) {
+                    assert toBlock.getSuccessorCount() == 1;
+                    AbstractBlockBase<?> loopHeader = toBlock.getSuccessors().get(0);
+                    if (containedInTrace(loopHeader)) {
+                        resolveCollectMappings(toBlock, loopHeader, moveResolver);
+                    }
+                }
 
-    private class MyPhiValueVisitor implements PhiValueVisitor {
-        final TraceLocalMoveResolver moveResolver;
-        final int toId;
-        final int fromId;
+            }
+        }
 
-        public MyPhiValueVisitor(TraceLocalMoveResolver moveResolver, AbstractBlockBase<?> toBlock, AbstractBlockBase<?> fromBlock) {
-            this.moveResolver = moveResolver;
-            toId = allocator.getFirstLirInstructionId(toBlock);
-            fromId = allocator.getLastLirInstructionId(fromBlock);
-            assert fromId >= 0;
+        @SuppressWarnings("try")
+        private void resolveCollectMappings(AbstractBlockBase<?> fromBlock, AbstractBlockBase<?> toBlock, TraceLocalMoveResolver moveResolver) {
+            try (Indent indent0 = Debug.logAndIndent("Edge %s -> %s", fromBlock, toBlock)) {
+                // collect all intervals that have been split between
+                // fromBlock and toBlock
+                SSIUtil.forEachValuePair(allocator.getLIR(), toBlock, fromBlock, new MyPhiValueVisitor(moveResolver, toBlock, fromBlock));
+                if (moveResolver.hasMappings()) {
+                    resolveFindInsertPos(fromBlock, toBlock, moveResolver);
+                    moveResolver.resolveAndAppendMoves();
+                }
+            }
         }
 
-        public void visit(Value phiIn, Value phiOut) {
-            assert !isRegister(phiOut) : "Out is a register: " + phiOut;
-            assert !isRegister(phiIn) : "In is a register: " + phiIn;
-            if (Value.ILLEGAL.equals(phiIn)) {
-                // The value not needed in this branch.
-                return;
-            }
-            if (isVirtualStackSlot(phiIn) && isVirtualStackSlot(phiOut) && phiIn.equals(phiOut)) {
-                // no need to handle virtual stack slots
-                return;
+        private boolean containedInTrace(AbstractBlockBase<?> block) {
+            return currentTrace() == traceBuilderResult.getTraceForBlock(block);
+        }
+
+        private int currentTrace() {
+            return traceBuilderResult.getTraceForBlock(allocator.sortedBlocks().get(0));
+        }
+
+        private static final DebugMetric numSSIResolutionMoves = Debug.metric("SSI LSRA[numSSIResolutionMoves]");
+        private static final DebugMetric numStackToStackMoves = Debug.metric("SSI LSRA[numStackToStackMoves]");
+
+        private class MyPhiValueVisitor implements PhiValueVisitor {
+            final TraceLocalMoveResolver moveResolver;
+            final int toId;
+            final int fromId;
+
+            public MyPhiValueVisitor(TraceLocalMoveResolver moveResolver, AbstractBlockBase<?> toBlock, AbstractBlockBase<?> fromBlock) {
+                this.moveResolver = moveResolver;
+                toId = allocator.getFirstLirInstructionId(toBlock);
+                fromId = allocator.getLastLirInstructionId(fromBlock);
+                assert fromId >= 0;
             }
-            TraceInterval toInterval = allocator.splitChildAtOpId(allocator.intervalFor(phiIn), toId, LIRInstruction.OperandMode.DEF);
-            if (isConstantValue(phiOut)) {
-                numSSIResolutionMoves.increment();
-                moveResolver.addMapping(asConstant(phiOut), toInterval);
-            } else {
-                TraceInterval fromInterval = allocator.splitChildAtOpId(allocator.intervalFor(phiOut), fromId, LIRInstruction.OperandMode.DEF);
-                if (fromInterval != toInterval) {
+
+            public void visit(Value phiIn, Value phiOut) {
+                assert !isRegister(phiOut) : "Out is a register: " + phiOut;
+                assert !isRegister(phiIn) : "In is a register: " + phiIn;
+                if (Value.ILLEGAL.equals(phiIn)) {
+                    // The value not needed in this branch.
+                    return;
+                }
+                if (isVirtualStackSlot(phiIn) && isVirtualStackSlot(phiOut) && phiIn.equals(phiOut)) {
+                    // no need to handle virtual stack slots
+                    return;
+                }
+                TraceInterval toInterval = allocator.splitChildAtOpId(allocator.intervalFor(phiIn), toId, LIRInstruction.OperandMode.DEF);
+                if (isConstantValue(phiOut)) {
                     numSSIResolutionMoves.increment();
-                    if (!(isStackSlotValue(toInterval.location()) && isStackSlotValue(fromInterval.location()))) {
-                        moveResolver.addMapping(fromInterval, toInterval);
-                    } else {
-                        numStackToStackMoves.increment();
-                        moveResolver.addMapping(fromInterval, toInterval);
+                    moveResolver.addMapping(asConstant(phiOut), toInterval);
+                } else {
+                    TraceInterval fromInterval = allocator.splitChildAtOpId(allocator.intervalFor(phiOut), fromId, LIRInstruction.OperandMode.DEF);
+                    if (fromInterval != toInterval) {
+                        numSSIResolutionMoves.increment();
+                        if (!(isStackSlotValue(toInterval.location()) && isStackSlotValue(fromInterval.location()))) {
+                            moveResolver.addMapping(fromInterval, toInterval);
+                        } else {
+                            numStackToStackMoves.increment();
+                            moveResolver.addMapping(fromInterval, toInterval);
+                        }
                     }
                 }
             }
--- a/graal/com.oracle.graal.lir/src/com/oracle/graal/lir/alloc/trace/TraceRegisterAllocationPhase.java	Tue Sep 08 10:50:21 2015 +0200
+++ b/graal/com.oracle.graal.lir/src/com/oracle/graal/lir/alloc/trace/TraceRegisterAllocationPhase.java	Tue Sep 08 18:53:43 2015 +0200
@@ -22,25 +22,34 @@
  */
 package com.oracle.graal.lir.alloc.trace;
 
-import static com.oracle.graal.lir.alloc.trace.TraceRegisterAllocationPhase.Options.*;
-import static jdk.internal.jvmci.code.ValueUtil.*;
+import static jdk.internal.jvmci.code.ValueUtil.isStackSlotValue;
 
-import java.util.*;
+import java.util.List;
 
-import jdk.internal.jvmci.code.*;
-import jdk.internal.jvmci.options.*;
+import jdk.internal.jvmci.code.TargetDescription;
+import jdk.internal.jvmci.options.Option;
+import jdk.internal.jvmci.options.OptionType;
+import jdk.internal.jvmci.options.OptionValue;
 
-import com.oracle.graal.compiler.common.alloc.*;
+import com.oracle.graal.compiler.common.alloc.RegisterAllocationConfig;
+import com.oracle.graal.compiler.common.alloc.TraceBuilder;
 import com.oracle.graal.compiler.common.alloc.TraceBuilder.TraceBuilderResult;
-import com.oracle.graal.compiler.common.cfg.*;
-import com.oracle.graal.debug.*;
+import com.oracle.graal.compiler.common.cfg.AbstractBlockBase;
+import com.oracle.graal.debug.Debug;
 import com.oracle.graal.debug.Debug.Scope;
-import com.oracle.graal.lir.*;
-import com.oracle.graal.lir.StandardOp.*;
-import com.oracle.graal.lir.gen.*;
+import com.oracle.graal.debug.DebugMetric;
+import com.oracle.graal.debug.Indent;
+import com.oracle.graal.lir.LIR;
+import com.oracle.graal.lir.LIRInstruction;
+import com.oracle.graal.lir.StandardOp.JumpOp;
+import com.oracle.graal.lir.StandardOp.LabelOp;
+import com.oracle.graal.lir.StandardOp.ValueMoveOp;
+import com.oracle.graal.lir.alloc.trace.TraceAllocationPhase.TraceAllocationContext;
+import com.oracle.graal.lir.gen.LIRGenerationResult;
 import com.oracle.graal.lir.gen.LIRGeneratorTool.SpillMoveFactory;
-import com.oracle.graal.lir.phases.*;
-import com.oracle.graal.lir.ssi.*;
+import com.oracle.graal.lir.phases.AllocationPhase;
+import com.oracle.graal.lir.ssi.SSIUtil;
+import com.oracle.graal.lir.ssi.SSIVerifier;
 
 public final class TraceRegisterAllocationPhase extends AllocationPhase {
     public static class Options {
@@ -77,8 +86,8 @@
                     trivialTracesMetric.increment();
                 }
                 Debug.dump(TRACE_DUMP_LEVEL, trace, "Trace" + traceNumber + ": " + trace);
-                if (TraceRAtrivialBlockAllocator.getValue() && isTrivialTrace(lir, trace)) {
-                    new TraceTrivialAllocator(resultTraces).apply(target, lirGenRes, codeEmittingOrder, trace, new AllocationContext(spillMoveFactory, registerAllocationConfig), false);
+                if (Options.TraceRAtrivialBlockAllocator.getValue() && isTrivialTrace(lir, trace)) {
+                    new TraceTrivialAllocator(resultTraces).apply(target, lirGenRes, codeEmittingOrder, trace, new TraceAllocationContext(spillMoveFactory, registerAllocationConfig), false);
                 } else {
                     TraceLinearScan allocator = new TraceLinearScan(target, lirGenRes, spillMoveFactory, registerAllocationConfig, trace, resultTraces);
                     allocator.allocate(target, lirGenRes, codeEmittingOrder, linearScanOrder, spillMoveFactory, registerAllocationConfig);
@@ -92,7 +101,7 @@
         }
         Debug.dump(lir, "After trace allocation");
 
-        new TraceGlobalMoveResolutionPhase(resultTraces).apply(target, lirGenRes, codeEmittingOrder, linearScanOrder, new AllocationContext(spillMoveFactory, registerAllocationConfig));
+        new TraceGlobalMoveResolutionPhase(resultTraces).apply(target, lirGenRes, codeEmittingOrder, linearScanOrder, new TraceAllocationContext(spillMoveFactory, registerAllocationConfig));
 
         try (Scope s = Debug.scope("TraceRegisterAllocationFixup")) {
             if (replaceStackToStackMoves(lir, spillMoveFactory)) {
--- a/graal/com.oracle.graal.lir/src/com/oracle/graal/lir/alloc/trace/TraceTrivialAllocator.java	Tue Sep 08 10:50:21 2015 +0200
+++ b/graal/com.oracle.graal.lir/src/com/oracle/graal/lir/alloc/trace/TraceTrivialAllocator.java	Tue Sep 08 18:53:43 2015 +0200
@@ -43,7 +43,6 @@
 import com.oracle.graal.lir.Variable;
 import com.oracle.graal.lir.gen.LIRGenerationResult;
 import com.oracle.graal.lir.gen.LIRGeneratorTool.SpillMoveFactory;
-import com.oracle.graal.lir.phases.AllocationPhase;
 import com.oracle.graal.lir.ssi.SSIUtil;
 import com.oracle.graal.lir.util.VariableVirtualStackValueMap;
 
@@ -51,7 +50,7 @@
  * Allocates a trivial trace i.e. a trace consisting of a single block with no instructions other
  * than the {@link LabelOp} and the {@link JumpOp}.
  */
-final class TraceTrivialAllocator extends AllocationPhase {
+final class TraceTrivialAllocator extends TraceAllocationPhase {
 
     private final TraceBuilderResult<?> resultTraces;