changeset 22545:14a2a5d935d7

TraceRA: copy LSRA code over to the trace package.
author Josef Eisl <josef.eisl@jku.at>
date Mon, 31 Aug 2015 13:06:17 +0200
parents 4acd3f56553c
children 9cd80c19d8b7
files graal/com.oracle.graal.lir/src/com/oracle/graal/lir/alloc/trace/Interval.java graal/com.oracle.graal.lir/src/com/oracle/graal/lir/alloc/trace/IntervalWalker.java graal/com.oracle.graal.lir/src/com/oracle/graal/lir/alloc/trace/LinearScan.java graal/com.oracle.graal.lir/src/com/oracle/graal/lir/alloc/trace/LinearScanAssignLocationsPhase.java graal/com.oracle.graal.lir/src/com/oracle/graal/lir/alloc/trace/LinearScanEliminateSpillMovePhase.java graal/com.oracle.graal.lir/src/com/oracle/graal/lir/alloc/trace/LinearScanLifetimeAnalysisPhase.java graal/com.oracle.graal.lir/src/com/oracle/graal/lir/alloc/trace/LinearScanOptimizeSpillPositionPhase.java graal/com.oracle.graal.lir/src/com/oracle/graal/lir/alloc/trace/LinearScanRegisterAllocationPhase.java graal/com.oracle.graal.lir/src/com/oracle/graal/lir/alloc/trace/LinearScanResolveDataFlowPhase.java graal/com.oracle.graal.lir/src/com/oracle/graal/lir/alloc/trace/LinearScanWalker.java graal/com.oracle.graal.lir/src/com/oracle/graal/lir/alloc/trace/MoveResolver.java graal/com.oracle.graal.lir/src/com/oracle/graal/lir/alloc/trace/OptimizingLinearScanWalker.java graal/com.oracle.graal.lir/src/com/oracle/graal/lir/alloc/trace/OutOfRegistersException.java graal/com.oracle.graal.lir/src/com/oracle/graal/lir/alloc/trace/Range.java graal/com.oracle.graal.lir/src/com/oracle/graal/lir/alloc/trace/RegisterVerifier.java graal/com.oracle.graal.lir/src/com/oracle/graal/lir/alloc/trace/SSAMoveResolver.java graal/com.oracle.graal.lir/src/com/oracle/graal/lir/alloc/trace/SSILinearScanEliminateSpillMovePhase.java graal/com.oracle.graal.lir/src/com/oracle/graal/lir/alloc/trace/TraceGlobalMoveResolver.java graal/com.oracle.graal.lir/src/com/oracle/graal/lir/alloc/trace/TraceLinearScan.java graal/com.oracle.graal.lir/src/com/oracle/graal/lir/alloc/trace/TraceLinearScanAssignLocationsPhase.java graal/com.oracle.graal.lir/src/com/oracle/graal/lir/alloc/trace/TraceLinearScanLifetimeAnalysisPhase.java graal/com.oracle.graal.lir/src/com/oracle/graal/lir/alloc/trace/TraceLinearScanResolveDataFlowPhase.java graal/com.oracle.graal.lir/src/com/oracle/graal/lir/alloc/trace/TraceSimpleLifetimeAnalysisPhase.java
diffstat 23 files changed, 6722 insertions(+), 13 deletions(-) [+]
line wrap: on
line diff
--- /dev/null	Thu Jan 01 00:00:00 1970 +0000
+++ b/graal/com.oracle.graal.lir/src/com/oracle/graal/lir/alloc/trace/Interval.java	Mon Aug 31 13:06:17 2015 +0200
@@ -0,0 +1,1304 @@
+/*
+ * Copyright (c) 2009, 2014, Oracle and/or its affiliates. All rights reserved.
+ * DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER.
+ *
+ * This code is free software; you can redistribute it and/or modify it
+ * under the terms of the GNU General Public License version 2 only, as
+ * published by the Free Software Foundation.
+ *
+ * This code is distributed in the hope that it will be useful, but WITHOUT
+ * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
+ * FITNESS FOR A PARTICULAR PURPOSE.  See the GNU General Public License
+ * version 2 for more details (a copy is included in the LICENSE file that
+ * accompanied this code).
+ *
+ * You should have received a copy of the GNU General Public License version
+ * 2 along with this work; if not, write to the Free Software Foundation,
+ * Inc., 51 Franklin St, Fifth Floor, Boston, MA 02110-1301 USA.
+ *
+ * Please contact Oracle, 500 Oracle Parkway, Redwood Shores, CA 94065 USA
+ * or visit www.oracle.com if you need additional information or have any
+ * questions.
+ */
+package com.oracle.graal.lir.alloc.trace;
+
+import static com.oracle.graal.compiler.common.GraalOptions.*;
+import static com.oracle.graal.lir.LIRValueUtil.*;
+import static jdk.internal.jvmci.code.ValueUtil.*;
+
+import java.util.*;
+
+import jdk.internal.jvmci.code.*;
+import jdk.internal.jvmci.common.*;
+import jdk.internal.jvmci.meta.*;
+
+import com.oracle.graal.compiler.common.util.*;
+import com.oracle.graal.debug.*;
+import com.oracle.graal.lir.*;
+
+/**
+ * Represents an interval in the {@linkplain LinearScan linear scan register allocator}.
+ */
+public final class Interval {
+
+    /**
+     * A pair of intervals.
+     */
+    static final class Pair {
+
+        public final Interval first;
+        public final Interval second;
+
+        public Pair(Interval first, Interval second) {
+            this.first = first;
+            this.second = second;
+        }
+    }
+
+    /**
+     * A set of interval lists, one per {@linkplain RegisterBinding binding} type.
+     */
+    static final class RegisterBindingLists {
+
+        /**
+         * List of intervals whose binding is currently {@link RegisterBinding#Fixed}.
+         */
+        public Interval fixed;
+
+        /**
+         * List of intervals whose binding is currently {@link RegisterBinding#Any}.
+         */
+        public Interval any;
+
+        /**
+         * List of intervals whose binding is currently {@link RegisterBinding#Stack}.
+         */
+        public Interval stack;
+
+        public RegisterBindingLists(Interval fixed, Interval any, Interval stack) {
+            this.fixed = fixed;
+            this.any = any;
+            this.stack = stack;
+        }
+
+        /**
+         * Gets the list for a specified binding.
+         *
+         * @param binding specifies the list to be returned
+         * @return the list of intervals whose binding is {@code binding}
+         */
+        public Interval get(RegisterBinding binding) {
+            switch (binding) {
+                case Any:
+                    return any;
+                case Fixed:
+                    return fixed;
+                case Stack:
+                    return stack;
+            }
+            throw JVMCIError.shouldNotReachHere();
+        }
+
+        /**
+         * Sets the list for a specified binding.
+         *
+         * @param binding specifies the list to be replaced
+         * @param list a list of intervals whose binding is {@code binding}
+         */
+        public void set(RegisterBinding binding, Interval list) {
+            assert list != null;
+            switch (binding) {
+                case Any:
+                    any = list;
+                    break;
+                case Fixed:
+                    fixed = list;
+                    break;
+                case Stack:
+                    stack = list;
+                    break;
+            }
+        }
+
+        /**
+         * Adds an interval to a list sorted by {@linkplain Interval#currentFrom() current from}
+         * positions.
+         *
+         * @param binding specifies the list to be updated
+         * @param interval the interval to add
+         */
+        public void addToListSortedByCurrentFromPositions(RegisterBinding binding, Interval interval) {
+            Interval list = get(binding);
+            Interval prev = null;
+            Interval cur = list;
+            while (cur.currentFrom() < interval.currentFrom()) {
+                prev = cur;
+                cur = cur.next;
+            }
+            Interval result = list;
+            if (prev == null) {
+                // add to head of list
+                result = interval;
+            } else {
+                // add before 'cur'
+                prev.next = interval;
+            }
+            interval.next = cur;
+            set(binding, result);
+        }
+
+        /**
+         * Adds an interval to a list sorted by {@linkplain Interval#from() start} positions and
+         * {@linkplain Interval#firstUsage(RegisterPriority) first usage} positions.
+         *
+         * @param binding specifies the list to be updated
+         * @param interval the interval to add
+         */
+        public void addToListSortedByStartAndUsePositions(RegisterBinding binding, Interval interval) {
+            Interval list = get(binding);
+            Interval prev = null;
+            Interval cur = list;
+            while (cur.from() < interval.from() || (cur.from() == interval.from() && cur.firstUsage(RegisterPriority.None) < interval.firstUsage(RegisterPriority.None))) {
+                prev = cur;
+                cur = cur.next;
+            }
+            if (prev == null) {
+                list = interval;
+            } else {
+                prev.next = interval;
+            }
+            interval.next = cur;
+            set(binding, list);
+        }
+
+        /**
+         * Removes an interval from a list.
+         *
+         * @param binding specifies the list to be updated
+         * @param i the interval to remove
+         */
+        public void remove(RegisterBinding binding, Interval i) {
+            Interval list = get(binding);
+            Interval prev = null;
+            Interval cur = list;
+            while (cur != i) {
+                assert cur != null && cur != Interval.EndMarker : "interval has not been found in list: " + i;
+                prev = cur;
+                cur = cur.next;
+            }
+            if (prev == null) {
+                set(binding, cur.next);
+            } else {
+                prev.next = cur.next;
+            }
+        }
+    }
+
+    /**
+     * Constants denoting the register usage priority for an interval. The constants are declared in
+     * increasing order of priority are are used to optimize spilling when multiple overlapping
+     * intervals compete for limited registers.
+     */
+    public enum RegisterPriority {
+        /**
+         * No special reason for an interval to be allocated a register.
+         */
+        None,
+
+        /**
+         * Priority level for intervals live at the end of a loop.
+         */
+        LiveAtLoopEnd,
+
+        /**
+         * Priority level for intervals that should be allocated to a register.
+         */
+        ShouldHaveRegister,
+
+        /**
+         * Priority level for intervals that must be allocated to a register.
+         */
+        MustHaveRegister;
+
+        public static final RegisterPriority[] VALUES = values();
+
+        /**
+         * Determines if this priority is higher than or equal to a given priority.
+         */
+        public boolean greaterEqual(RegisterPriority other) {
+            return ordinal() >= other.ordinal();
+        }
+
+        /**
+         * Determines if this priority is lower than a given priority.
+         */
+        public boolean lessThan(RegisterPriority other) {
+            return ordinal() < other.ordinal();
+        }
+    }
+
+    /**
+     * Constants denoting whether an interval is bound to a specific register. This models platform
+     * dependencies on register usage for certain instructions.
+     */
+    enum RegisterBinding {
+        /**
+         * Interval is bound to a specific register as required by the platform.
+         */
+        Fixed,
+
+        /**
+         * Interval has no specific register requirements.
+         */
+        Any,
+
+        /**
+         * Interval is bound to a stack slot.
+         */
+        Stack;
+
+        public static final RegisterBinding[] VALUES = values();
+    }
+
+    /**
+     * Constants denoting the linear-scan states an interval may be in with respect to the
+     * {@linkplain Interval#from() start} {@code position} of the interval being processed.
+     */
+    enum State {
+        /**
+         * An interval that starts after {@code position}.
+         */
+        Unhandled,
+
+        /**
+         * An interval that {@linkplain Interval#covers covers} {@code position} and has an assigned
+         * register.
+         */
+        Active,
+
+        /**
+         * An interval that starts before and ends after {@code position} but does not
+         * {@linkplain Interval#covers cover} it due to a lifetime hole.
+         */
+        Inactive,
+
+        /**
+         * An interval that ends before {@code position} or is spilled to memory.
+         */
+        Handled;
+    }
+
+    /**
+     * Constants used in optimization of spilling of an interval.
+     */
+    public enum SpillState {
+        /**
+         * Starting state of calculation: no definition found yet.
+         */
+        NoDefinitionFound,
+
+        /**
+         * One definition has already been found. Two consecutive definitions are treated as one
+         * (e.g. a consecutive move and add because of two-operand LIR form). The position of this
+         * definition is given by {@link Interval#spillDefinitionPos()}.
+         */
+        NoSpillStore,
+
+        /**
+         * One spill move has already been inserted.
+         */
+        OneSpillStore,
+
+        /**
+         * The interval is spilled multiple times or is spilled in a loop. Place the store somewhere
+         * on the dominator path between the definition and the usages.
+         */
+        SpillInDominator,
+
+        /**
+         * The interval should be stored immediately after its definition to prevent multiple
+         * redundant stores.
+         */
+        StoreAtDefinition,
+
+        /**
+         * The interval starts in memory (e.g. method parameter), so a store is never necessary.
+         */
+        StartInMemory,
+
+        /**
+         * The interval has more than one definition (e.g. resulting from phi moves), so stores to
+         * memory are not optimized.
+         */
+        NoOptimization
+    }
+
+    /**
+     * List of use positions. Each entry in the list records the use position and register priority
+     * associated with the use position. The entries in the list are in descending order of use
+     * position.
+     *
+     */
+    public static final class UsePosList {
+
+        private IntList list;
+
+        /**
+         * Creates a use list.
+         *
+         * @param initialCapacity the initial capacity of the list in terms of entries
+         */
+        public UsePosList(int initialCapacity) {
+            list = new IntList(initialCapacity * 2);
+        }
+
+        private UsePosList(IntList list) {
+            this.list = list;
+        }
+
+        /**
+         * Splits this list around a given position. All entries in this list with a use position
+         * greater or equal than {@code splitPos} are removed from this list and added to the
+         * returned list.
+         *
+         * @param splitPos the position for the split
+         * @return a use position list containing all entries removed from this list that have a use
+         *         position greater or equal than {@code splitPos}
+         */
+        public UsePosList splitAt(int splitPos) {
+            int i = size() - 1;
+            int len = 0;
+            while (i >= 0 && usePos(i) < splitPos) {
+                --i;
+                len += 2;
+            }
+            int listSplitIndex = (i + 1) * 2;
+            IntList childList = list;
+            list = IntList.copy(this.list, listSplitIndex, len);
+            childList.setSize(listSplitIndex);
+            UsePosList child = new UsePosList(childList);
+            return child;
+        }
+
+        /**
+         * Gets the use position at a specified index in this list.
+         *
+         * @param index the index of the entry for which the use position is returned
+         * @return the use position of entry {@code index} in this list
+         */
+        public int usePos(int index) {
+            return list.get(index << 1);
+        }
+
+        /**
+         * Gets the register priority for the use position at a specified index in this list.
+         *
+         * @param index the index of the entry for which the register priority is returned
+         * @return the register priority of entry {@code index} in this list
+         */
+        public RegisterPriority registerPriority(int index) {
+            return RegisterPriority.VALUES[list.get((index << 1) + 1)];
+        }
+
+        public void add(int usePos, RegisterPriority registerPriority) {
+            assert list.size() == 0 || usePos(size() - 1) > usePos;
+            list.add(usePos);
+            list.add(registerPriority.ordinal());
+        }
+
+        public int size() {
+            return list.size() >> 1;
+        }
+
+        public void removeLowestUsePos() {
+            list.setSize(list.size() - 2);
+        }
+
+        public void setRegisterPriority(int index, RegisterPriority registerPriority) {
+            list.set((index << 1) + 1, registerPriority.ordinal());
+        }
+
+        @Override
+        public String toString() {
+            StringBuilder buf = new StringBuilder("[");
+            for (int i = size() - 1; i >= 0; --i) {
+                if (buf.length() != 1) {
+                    buf.append(", ");
+                }
+                RegisterPriority prio = registerPriority(i);
+                buf.append(usePos(i)).append(" -> ").append(prio.ordinal()).append(':').append(prio);
+            }
+            return buf.append("]").toString();
+        }
+    }
+
+    /**
+     * The {@linkplain RegisterValue register} or {@linkplain Variable variable} for this interval
+     * prior to register allocation.
+     */
+    public final AllocatableValue operand;
+
+    /**
+     * The operand number for this interval's {@linkplain #operand operand}.
+     */
+    public final int operandNumber;
+
+    /**
+     * The {@linkplain RegisterValue register} or {@linkplain StackSlot spill slot} assigned to this
+     * interval. In case of a spilled interval which is re-materialized this is
+     * {@link Value#ILLEGAL}.
+     */
+    private AllocatableValue location;
+
+    /**
+     * The stack slot to which all splits of this interval are spilled if necessary.
+     */
+    private StackSlotValue spillSlot;
+
+    /**
+     * The kind of this interval.
+     */
+    private LIRKind kind;
+
+    /**
+     * The head of the list of ranges describing this interval. This list is sorted by
+     * {@linkplain LIRInstruction#id instruction ids}.
+     */
+    private Range first;
+
+    /**
+     * List of (use-positions, register-priorities) pairs, sorted by use-positions.
+     */
+    private UsePosList usePosList;
+
+    /**
+     * Iterator used to traverse the ranges of an interval.
+     */
+    private Range current;
+
+    /**
+     * Link to next interval in a sorted list of intervals that ends with {@link #EndMarker}.
+     */
+    Interval next;
+
+    /**
+     * The linear-scan state of this interval.
+     */
+    State state;
+
+    private int cachedTo; // cached value: to of last range (-1: not cached)
+
+    /**
+     * The interval from which this one is derived. If this is a {@linkplain #isSplitParent() split
+     * parent}, it points to itself.
+     */
+    private Interval splitParent;
+
+    /**
+     * List of all intervals that are split off from this interval. This is only used if this is a
+     * {@linkplain #isSplitParent() split parent}.
+     */
+    private List<Interval> splitChildren = Collections.emptyList();
+
+    /**
+     * Current split child that has been active or inactive last (always stored in split parents).
+     */
+    private Interval currentSplitChild;
+
+    /**
+     * Specifies if move is inserted between currentSplitChild and this interval when interval gets
+     * active the first time.
+     */
+    private boolean insertMoveWhenActivated;
+
+    /**
+     * For spill move optimization.
+     */
+    private SpillState spillState;
+
+    /**
+     * Position where this interval is defined (if defined only once).
+     */
+    private int spillDefinitionPos;
+
+    /**
+     * This interval should be assigned the same location as the hint interval.
+     */
+    private Interval locationHint;
+
+    /**
+     * The value with which a spilled child interval can be re-materialized. Currently this must be
+     * a Constant.
+     */
+    private JavaConstant materializedValue;
+
+    /**
+     * The number of times {@link #addMaterializationValue(JavaConstant)} is called.
+     */
+    private int numMaterializationValuesAdded;
+
+    void assignLocation(AllocatableValue newLocation) {
+        if (isRegister(newLocation)) {
+            assert this.location == null : "cannot re-assign location for " + this;
+            if (newLocation.getLIRKind().equals(LIRKind.Illegal) && !kind.equals(LIRKind.Illegal)) {
+                this.location = asRegister(newLocation).asValue(kind);
+                return;
+            }
+        } else if (isIllegal(newLocation)) {
+            assert canMaterialize();
+        } else {
+            assert this.location == null || isRegister(this.location) || (isVirtualStackSlot(this.location) && isStackSlot(newLocation)) : "cannot re-assign location for " + this;
+            assert isStackSlotValue(newLocation);
+            assert !newLocation.getLIRKind().equals(LIRKind.Illegal);
+            assert newLocation.getLIRKind().equals(this.kind);
+        }
+        this.location = newLocation;
+    }
+
+    /**
+     * Gets the {@linkplain RegisterValue register} or {@linkplain StackSlot spill slot} assigned to
+     * this interval.
+     */
+    public AllocatableValue location() {
+        return location;
+    }
+
+    public LIRKind kind() {
+        assert !isRegister(operand) : "cannot access type for fixed interval";
+        return kind;
+    }
+
+    public void setKind(LIRKind kind) {
+        assert isRegister(operand) || this.kind().equals(LIRKind.Illegal) || this.kind().equals(kind) : "overwriting existing type";
+        this.kind = kind;
+    }
+
+    public Range first() {
+        return first;
+    }
+
+    public int from() {
+        return first.from;
+    }
+
+    int to() {
+        if (cachedTo == -1) {
+            cachedTo = calcTo();
+        }
+        assert cachedTo == calcTo() : "invalid cached value";
+        return cachedTo;
+    }
+
+    int numUsePositions() {
+        return usePosList.size();
+    }
+
+    public void setLocationHint(Interval interval) {
+        locationHint = interval;
+    }
+
+    public boolean isSplitParent() {
+        return splitParent == this;
+    }
+
+    boolean isSplitChild() {
+        return splitParent != this;
+    }
+
+    /**
+     * Gets the split parent for this interval.
+     */
+    public Interval splitParent() {
+        assert splitParent.isSplitParent() : "not a split parent: " + this;
+        return splitParent;
+    }
+
+    /**
+     * Gets the canonical spill slot for this interval.
+     */
+    public StackSlotValue spillSlot() {
+        return splitParent().spillSlot;
+    }
+
+    public void setSpillSlot(StackSlotValue slot) {
+        assert splitParent().spillSlot == null || (isVirtualStackSlot(splitParent().spillSlot) && isStackSlot(slot)) : "connot overwrite existing spill slot";
+        splitParent().spillSlot = slot;
+    }
+
+    Interval currentSplitChild() {
+        return splitParent().currentSplitChild;
+    }
+
+    void makeCurrentSplitChild() {
+        splitParent().currentSplitChild = this;
+    }
+
+    boolean insertMoveWhenActivated() {
+        return insertMoveWhenActivated;
+    }
+
+    void setInsertMoveWhenActivated(boolean b) {
+        insertMoveWhenActivated = b;
+    }
+
+    // for spill optimization
+    public SpillState spillState() {
+        return splitParent().spillState;
+    }
+
+    public int spillDefinitionPos() {
+        return splitParent().spillDefinitionPos;
+    }
+
+    public void setSpillState(SpillState state) {
+        assert state.ordinal() >= spillState().ordinal() : "state cannot decrease";
+        splitParent().spillState = state;
+    }
+
+    public void setSpillDefinitionPos(int pos) {
+        assert spillState() == SpillState.SpillInDominator || spillState() == SpillState.NoDefinitionFound || spillDefinitionPos() == -1 : "cannot set the position twice";
+        splitParent().spillDefinitionPos = pos;
+    }
+
+    // returns true if this interval has a shadow copy on the stack that is always correct
+    public boolean alwaysInMemory() {
+        return (splitParent().spillState == SpillState.SpillInDominator || splitParent().spillState == SpillState.StoreAtDefinition || splitParent().spillState == SpillState.StartInMemory) &&
+                        !canMaterialize();
+    }
+
+    void removeFirstUsePos() {
+        usePosList.removeLowestUsePos();
+    }
+
+    // test intersection
+    boolean intersects(Interval i) {
+        return first.intersects(i.first);
+    }
+
+    int intersectsAt(Interval i) {
+        return first.intersectsAt(i.first);
+    }
+
+    // range iteration
+    void rewindRange() {
+        current = first;
+    }
+
+    void nextRange() {
+        assert this != EndMarker : "not allowed on sentinel";
+        current = current.next;
+    }
+
+    int currentFrom() {
+        return current.from;
+    }
+
+    int currentTo() {
+        return current.to;
+    }
+
+    boolean currentAtEnd() {
+        return current == Range.EndMarker;
+    }
+
+    boolean currentIntersects(Interval it) {
+        return current.intersects(it.current);
+    }
+
+    int currentIntersectsAt(Interval it) {
+        return current.intersectsAt(it.current);
+    }
+
+    /**
+     * Sentinel interval to denote the end of an interval list.
+     */
+    static final Interval EndMarker = new Interval(Value.ILLEGAL, -1);
+
+    Interval(AllocatableValue operand, int operandNumber) {
+        assert operand != null;
+        this.operand = operand;
+        this.operandNumber = operandNumber;
+        if (isRegister(operand)) {
+            location = operand;
+        } else {
+            assert isIllegal(operand) || isVariable(operand);
+        }
+        this.kind = LIRKind.Illegal;
+        this.first = Range.EndMarker;
+        this.usePosList = new UsePosList(4);
+        this.current = Range.EndMarker;
+        this.next = EndMarker;
+        this.cachedTo = -1;
+        this.spillState = SpillState.NoDefinitionFound;
+        this.spillDefinitionPos = -1;
+        splitParent = this;
+        currentSplitChild = this;
+    }
+
+    /**
+     * Sets the value which is used for re-materialization.
+     */
+    public void addMaterializationValue(JavaConstant value) {
+        if (numMaterializationValuesAdded == 0) {
+            materializedValue = value;
+        } else {
+            // Interval is defined on multiple places -> no materialization is possible.
+            materializedValue = null;
+        }
+        numMaterializationValuesAdded++;
+    }
+
+    /**
+     * Returns true if this interval can be re-materialized when spilled. This means that no
+     * spill-moves are needed. Instead of restore-moves the {@link #materializedValue} is restored.
+     */
+    public boolean canMaterialize() {
+        return getMaterializedValue() != null;
+    }
+
+    /**
+     * Returns a value which can be moved to a register instead of a restore-move from stack.
+     */
+    public JavaConstant getMaterializedValue() {
+        return splitParent().materializedValue;
+    }
+
+    int calcTo() {
+        assert first != Range.EndMarker : "interval has no range";
+
+        Range r = first;
+        while (r.next != Range.EndMarker) {
+            r = r.next;
+        }
+        return r.to;
+    }
+
+    // consistency check of split-children
+    boolean checkSplitChildren() {
+        if (!splitChildren.isEmpty()) {
+            assert isSplitParent() : "only split parents can have children";
+
+            for (int i = 0; i < splitChildren.size(); i++) {
+                Interval i1 = splitChildren.get(i);
+
+                assert i1.splitParent() == this : "not a split child of this interval";
+                assert i1.kind().equals(kind()) : "must be equal for all split children";
+                assert (i1.spillSlot() == null && spillSlot == null) || i1.spillSlot().equals(spillSlot()) : "must be equal for all split children";
+
+                for (int j = i + 1; j < splitChildren.size(); j++) {
+                    Interval i2 = splitChildren.get(j);
+
+                    assert !i1.operand.equals(i2.operand) : "same register number";
+
+                    if (i1.from() < i2.from()) {
+                        assert i1.to() <= i2.from() && i1.to() < i2.to() : "intervals overlapping";
+                    } else {
+                        assert i2.from() < i1.from() : "intervals start at same opId";
+                        assert i2.to() <= i1.from() && i2.to() < i1.to() : "intervals overlapping";
+                    }
+                }
+            }
+        }
+
+        return true;
+    }
+
+    public Interval locationHint(boolean searchSplitChild) {
+        if (!searchSplitChild) {
+            return locationHint;
+        }
+
+        if (locationHint != null) {
+            assert locationHint.isSplitParent() : "ony split parents are valid hint registers";
+
+            if (locationHint.location != null && isRegister(locationHint.location)) {
+                return locationHint;
+            } else if (!locationHint.splitChildren.isEmpty()) {
+                // search the first split child that has a register assigned
+                int len = locationHint.splitChildren.size();
+                for (int i = 0; i < len; i++) {
+                    Interval interval = locationHint.splitChildren.get(i);
+                    if (interval.location != null && isRegister(interval.location)) {
+                        return interval;
+                    }
+                }
+            }
+        }
+
+        // no hint interval found that has a register assigned
+        return null;
+    }
+
+    Interval getSplitChildAtOpId(int opId, LIRInstruction.OperandMode mode, LinearScan allocator) {
+        assert isSplitParent() : "can only be called for split parents";
+        assert opId >= 0 : "invalid opId (method cannot be called for spill moves)";
+
+        if (splitChildren.isEmpty()) {
+            assert this.covers(opId, mode) : this + " does not cover " + opId;
+            return this;
+        } else {
+            Interval result = null;
+            int len = splitChildren.size();
+
+            // in outputMode, the end of the interval (opId == cur.to()) is not valid
+            int toOffset = (mode == LIRInstruction.OperandMode.DEF ? 0 : 1);
+
+            int i;
+            for (i = 0; i < len; i++) {
+                Interval cur = splitChildren.get(i);
+                if (cur.from() <= opId && opId < cur.to() + toOffset) {
+                    if (i > 0) {
+                        // exchange current split child to start of list (faster access for next
+                        // call)
+                        Util.atPutGrow(splitChildren, i, splitChildren.get(0), null);
+                        Util.atPutGrow(splitChildren, 0, cur, null);
+                    }
+
+                    // interval found
+                    result = cur;
+                    break;
+                }
+            }
+
+            assert checkSplitChild(result, opId, allocator, toOffset, mode);
+            return result;
+        }
+    }
+
+    private boolean checkSplitChild(Interval result, int opId, LinearScan allocator, int toOffset, LIRInstruction.OperandMode mode) {
+        if (result == null) {
+            // this is an error
+            StringBuilder msg = new StringBuilder(this.toString()).append(" has no child at ").append(opId);
+            if (!splitChildren.isEmpty()) {
+                Interval firstChild = splitChildren.get(0);
+                Interval lastChild = splitChildren.get(splitChildren.size() - 1);
+                msg.append(" (first = ").append(firstChild).append(", last = ").append(lastChild).append(")");
+            }
+            throw new JVMCIError("Linear Scan Error: %s", msg);
+        }
+
+        if (!splitChildren.isEmpty()) {
+            for (Interval interval : splitChildren) {
+                if (interval != result && interval.from() <= opId && opId < interval.to() + toOffset) {
+                    TTY.println(String.format("two valid result intervals found for opId %d: %d and %d", opId, result.operandNumber, interval.operandNumber));
+                    TTY.println(result.logString(allocator));
+                    TTY.println(interval.logString(allocator));
+                    throw new BailoutException("two valid result intervals found");
+                }
+            }
+        }
+        assert result.covers(opId, mode) : "opId not covered by interval";
+        return true;
+    }
+
+    // returns the interval that covers the given opId or null if there is none
+    Interval getIntervalCoveringOpId(int opId) {
+        assert opId >= 0 : "invalid opId";
+        assert opId < to() : "can only look into the past";
+
+        if (opId >= from()) {
+            return this;
+        }
+
+        Interval parent = splitParent();
+        Interval result = null;
+
+        assert !parent.splitChildren.isEmpty() : "no split children available";
+        int len = parent.splitChildren.size();
+
+        for (int i = len - 1; i >= 0; i--) {
+            Interval cur = parent.splitChildren.get(i);
+            if (cur.from() <= opId && opId < cur.to()) {
+                assert result == null : "covered by multiple split children " + result + " and " + cur;
+                result = cur;
+            }
+        }
+
+        return result;
+    }
+
+    // returns the last split child that ends before the given opId
+    Interval getSplitChildBeforeOpId(int opId) {
+        assert opId >= 0 : "invalid opId";
+
+        Interval parent = splitParent();
+        Interval result = null;
+
+        assert !parent.splitChildren.isEmpty() : "no split children available";
+        int len = parent.splitChildren.size();
+
+        for (int i = len - 1; i >= 0; i--) {
+            Interval cur = parent.splitChildren.get(i);
+            if (cur.to() <= opId && (result == null || result.to() < cur.to())) {
+                result = cur;
+            }
+        }
+
+        assert result != null : "no split child found";
+        return result;
+    }
+
+    // checks if opId is covered by any split child
+    boolean splitChildCovers(int opId, LIRInstruction.OperandMode mode) {
+        assert isSplitParent() : "can only be called for split parents";
+        assert opId >= 0 : "invalid opId (method can not be called for spill moves)";
+
+        if (splitChildren.isEmpty()) {
+            // simple case if interval was not split
+            return covers(opId, mode);
+
+        } else {
+            // extended case: check all split children
+            int len = splitChildren.size();
+            for (int i = 0; i < len; i++) {
+                Interval cur = splitChildren.get(i);
+                if (cur.covers(opId, mode)) {
+                    return true;
+                }
+            }
+            return false;
+        }
+    }
+
+    private RegisterPriority adaptPriority(RegisterPriority priority) {
+        /*
+         * In case of re-materialized values we require that use-operands are registers, because we
+         * don't have the value in a stack location. (Note that ShouldHaveRegister means that the
+         * operand can also be a StackSlot).
+         */
+        if (priority == RegisterPriority.ShouldHaveRegister && canMaterialize()) {
+            return RegisterPriority.MustHaveRegister;
+        }
+        return priority;
+    }
+
+    // Note: use positions are sorted descending . first use has highest index
+    int firstUsage(RegisterPriority minRegisterPriority) {
+        assert isVariable(operand) : "cannot access use positions for fixed intervals";
+
+        for (int i = usePosList.size() - 1; i >= 0; --i) {
+            RegisterPriority registerPriority = adaptPriority(usePosList.registerPriority(i));
+            if (registerPriority.greaterEqual(minRegisterPriority)) {
+                return usePosList.usePos(i);
+            }
+        }
+        return Integer.MAX_VALUE;
+    }
+
+    int nextUsage(RegisterPriority minRegisterPriority, int from) {
+        assert isVariable(operand) : "cannot access use positions for fixed intervals";
+
+        for (int i = usePosList.size() - 1; i >= 0; --i) {
+            int usePos = usePosList.usePos(i);
+            if (usePos >= from && adaptPriority(usePosList.registerPriority(i)).greaterEqual(minRegisterPriority)) {
+                return usePos;
+            }
+        }
+        return Integer.MAX_VALUE;
+    }
+
+    int nextUsageExact(RegisterPriority exactRegisterPriority, int from) {
+        assert isVariable(operand) : "cannot access use positions for fixed intervals";
+
+        for (int i = usePosList.size() - 1; i >= 0; --i) {
+            int usePos = usePosList.usePos(i);
+            if (usePos >= from && adaptPriority(usePosList.registerPriority(i)) == exactRegisterPriority) {
+                return usePos;
+            }
+        }
+        return Integer.MAX_VALUE;
+    }
+
+    int previousUsage(RegisterPriority minRegisterPriority, int from) {
+        assert isVariable(operand) : "cannot access use positions for fixed intervals";
+
+        int prev = -1;
+        for (int i = usePosList.size() - 1; i >= 0; --i) {
+            int usePos = usePosList.usePos(i);
+            if (usePos > from) {
+                return prev;
+            }
+            if (adaptPriority(usePosList.registerPriority(i)).greaterEqual(minRegisterPriority)) {
+                prev = usePos;
+            }
+        }
+        return prev;
+    }
+
+    public void addUsePos(int pos, RegisterPriority registerPriority) {
+        assert covers(pos, LIRInstruction.OperandMode.USE) : String.format("use position %d not covered by live range of interval %s", pos, this);
+
+        // do not add use positions for precolored intervals because they are never used
+        if (registerPriority != RegisterPriority.None && isVariable(operand)) {
+            if (DetailedAsserts.getValue()) {
+                for (int i = 0; i < usePosList.size(); i++) {
+                    assert pos <= usePosList.usePos(i) : "already added a use-position with lower position";
+                    if (i > 0) {
+                        assert usePosList.usePos(i) < usePosList.usePos(i - 1) : "not sorted descending";
+                    }
+                }
+            }
+
+            // Note: addUse is called in descending order, so list gets sorted
+            // automatically by just appending new use positions
+            int len = usePosList.size();
+            if (len == 0 || usePosList.usePos(len - 1) > pos) {
+                usePosList.add(pos, registerPriority);
+            } else if (usePosList.registerPriority(len - 1).lessThan(registerPriority)) {
+                assert usePosList.usePos(len - 1) == pos : "list not sorted correctly";
+                usePosList.setRegisterPriority(len - 1, registerPriority);
+            }
+        }
+    }
+
+    public void addRange(int from, int to) {
+        assert from < to : "invalid range";
+        assert first() == Range.EndMarker || to < first().next.from : "not inserting at begin of interval";
+        assert from <= first().to : "not inserting at begin of interval";
+
+        if (first.from <= to) {
+            assert first != Range.EndMarker;
+            // join intersecting ranges
+            first.from = Math.min(from, first().from);
+            first.to = Math.max(to, first().to);
+        } else {
+            // insert new range
+            first = new Range(from, to, first());
+        }
+    }
+
+    Interval newSplitChild(LinearScan allocator) {
+        // allocate new interval
+        Interval parent = splitParent();
+        Interval result = allocator.createDerivedInterval(parent);
+        result.setKind(kind());
+
+        result.splitParent = parent;
+        result.setLocationHint(parent);
+
+        // insert new interval in children-list of parent
+        if (parent.splitChildren.isEmpty()) {
+            assert isSplitParent() : "list must be initialized at first split";
+
+            // Create new non-shared list
+            parent.splitChildren = new ArrayList<>(4);
+            parent.splitChildren.add(this);
+        }
+        parent.splitChildren.add(result);
+
+        return result;
+    }
+
+    /**
+     * Splits this interval at a specified position and returns the remainder as a new <i>child</i>
+     * interval of this interval's {@linkplain #splitParent() parent} interval.
+     * <p>
+     * When an interval is split, a bi-directional link is established between the original
+     * <i>parent</i> interval and the <i>children</i> intervals that are split off this interval.
+     * When a split child is split again, the new created interval is a direct child of the original
+     * parent. That is, there is no tree of split children stored, just a flat list. All split
+     * children are spilled to the same {@linkplain #spillSlot spill slot}.
+     *
+     * @param splitPos the position at which to split this interval
+     * @param allocator the register allocator context
+     * @return the child interval split off from this interval
+     */
+    Interval split(int splitPos, LinearScan allocator) {
+        assert isVariable(operand) : "cannot split fixed intervals";
+
+        // allocate new interval
+        Interval result = newSplitChild(allocator);
+
+        // split the ranges
+        Range prev = null;
+        Range cur = first;
+        while (cur != Range.EndMarker && cur.to <= splitPos) {
+            prev = cur;
+            cur = cur.next;
+        }
+        assert cur != Range.EndMarker : "split interval after end of last range";
+
+        if (cur.from < splitPos) {
+            result.first = new Range(splitPos, cur.to, cur.next);
+            cur.to = splitPos;
+            cur.next = Range.EndMarker;
+
+        } else {
+            assert prev != null : "split before start of first range";
+            result.first = cur;
+            prev.next = Range.EndMarker;
+        }
+        result.current = result.first;
+        cachedTo = -1; // clear cached value
+
+        // split list of use positions
+        result.usePosList = usePosList.splitAt(splitPos);
+
+        if (DetailedAsserts.getValue()) {
+            for (int i = 0; i < usePosList.size(); i++) {
+                assert usePosList.usePos(i) < splitPos;
+            }
+            for (int i = 0; i < result.usePosList.size(); i++) {
+                assert result.usePosList.usePos(i) >= splitPos;
+            }
+        }
+        return result;
+    }
+
+    /**
+     * Splits this interval at a specified position and returns the head as a new interval (this
+     * interval is the tail).
+     *
+     * Currently, only the first range can be split, and the new interval must not have split
+     * positions
+     */
+    Interval splitFromStart(int splitPos, LinearScan allocator) {
+        assert isVariable(operand) : "cannot split fixed intervals";
+        assert splitPos > from() && splitPos < to() : "can only split inside interval";
+        assert splitPos > first.from && splitPos <= first.to : "can only split inside first range";
+        assert firstUsage(RegisterPriority.None) > splitPos : "can not split when use positions are present";
+
+        // allocate new interval
+        Interval result = newSplitChild(allocator);
+
+        // the new interval has only one range (checked by assertion above,
+        // so the splitting of the ranges is very simple
+        result.addRange(first.from, splitPos);
+
+        if (splitPos == first.to) {
+            assert first.next != Range.EndMarker : "must not be at end";
+            first = first.next;
+        } else {
+            first.from = splitPos;
+        }
+
+        return result;
+    }
+
+    // returns true if the opId is inside the interval
+    boolean covers(int opId, LIRInstruction.OperandMode mode) {
+        Range cur = first;
+
+        while (cur != Range.EndMarker && cur.to < opId) {
+            cur = cur.next;
+        }
+        if (cur != Range.EndMarker) {
+            assert cur.to != cur.next.from : "ranges not separated";
+
+            if (mode == LIRInstruction.OperandMode.DEF) {
+                return cur.from <= opId && opId < cur.to;
+            } else {
+                return cur.from <= opId && opId <= cur.to;
+            }
+        }
+        return false;
+    }
+
+    // returns true if the interval has any hole between holeFrom and holeTo
+    // (even if the hole has only the length 1)
+    boolean hasHoleBetween(int holeFrom, int holeTo) {
+        assert holeFrom < holeTo : "check";
+        assert from() <= holeFrom && holeTo <= to() : "index out of interval";
+
+        Range cur = first;
+        while (cur != Range.EndMarker) {
+            assert cur.to < cur.next.from : "no space between ranges";
+
+            // hole-range starts before this range . hole
+            if (holeFrom < cur.from) {
+                return true;
+
+                // hole-range completely inside this range . no hole
+            } else {
+                if (holeTo <= cur.to) {
+                    return false;
+
+                    // overlapping of hole-range with this range . hole
+                } else {
+                    if (holeFrom <= cur.to) {
+                        return true;
+                    }
+                }
+            }
+
+            cur = cur.next;
+        }
+
+        return false;
+    }
+
+    @Override
+    public String toString() {
+        String from = "?";
+        String to = "?";
+        if (first != null && first != Range.EndMarker) {
+            from = String.valueOf(from());
+            // to() may cache a computed value, modifying the current object, which is a bad idea
+            // for a printing function. Compute it directly instead.
+            to = String.valueOf(calcTo());
+        }
+        String locationString = this.location == null ? "" : "@" + this.location;
+        return operandNumber + ":" + operand + (isRegister(operand) ? "" : locationString) + "[" + from + "," + to + "]";
+    }
+
+    /**
+     * Gets the use position information for this interval.
+     */
+    public UsePosList usePosList() {
+        return usePosList;
+    }
+
+    /**
+     * Gets a single line string for logging the details of this interval to a log stream.
+     *
+     * @param allocator the register allocator context
+     */
+    public String logString(LinearScan allocator) {
+        StringBuilder buf = new StringBuilder(100);
+        buf.append(operandNumber).append(':').append(operand).append(' ');
+        if (!isRegister(operand)) {
+            if (location != null) {
+                buf.append("location{").append(location).append("} ");
+            }
+        }
+
+        buf.append("hints{").append(splitParent.operandNumber);
+        Interval hint = locationHint(false);
+        if (hint != null && hint.operandNumber != splitParent.operandNumber) {
+            buf.append(", ").append(hint.operandNumber);
+        }
+        buf.append("} ranges{");
+
+        // print ranges
+        Range cur = first;
+        while (cur != Range.EndMarker) {
+            if (cur != first) {
+                buf.append(", ");
+            }
+            buf.append(cur);
+            cur = cur.next;
+            assert cur != null : "range list not closed with range sentinel";
+        }
+        buf.append("} uses{");
+
+        // print use positions
+        int prev = -1;
+        for (int i = usePosList.size() - 1; i >= 0; --i) {
+            assert prev < usePosList.usePos(i) : "use positions not sorted";
+            if (i != usePosList.size() - 1) {
+                buf.append(", ");
+            }
+            buf.append(usePosList.usePos(i)).append(':').append(usePosList.registerPriority(i));
+            prev = usePosList.usePos(i);
+        }
+        buf.append("} spill-state{").append(spillState()).append("}");
+        if (canMaterialize()) {
+            buf.append(" (remat:").append(getMaterializedValue().toString()).append(")");
+        }
+        return buf.toString();
+    }
+
+    List<Interval> getSplitChildren() {
+        return Collections.unmodifiableList(splitChildren);
+    }
+}
--- /dev/null	Thu Jan 01 00:00:00 1970 +0000
+++ b/graal/com.oracle.graal.lir/src/com/oracle/graal/lir/alloc/trace/IntervalWalker.java	Mon Aug 31 13:06:17 2015 +0200
@@ -0,0 +1,287 @@
+/*
+ * Copyright (c) 2009, 2011, Oracle and/or its affiliates. All rights reserved.
+ * DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER.
+ *
+ * This code is free software; you can redistribute it and/or modify it
+ * under the terms of the GNU General Public License version 2 only, as
+ * published by the Free Software Foundation.
+ *
+ * This code is distributed in the hope that it will be useful, but WITHOUT
+ * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
+ * FITNESS FOR A PARTICULAR PURPOSE.  See the GNU General Public License
+ * version 2 for more details (a copy is included in the LICENSE file that
+ * accompanied this code).
+ *
+ * You should have received a copy of the GNU General Public License version
+ * 2 along with this work; if not, write to the Free Software Foundation,
+ * Inc., 51 Franklin St, Fifth Floor, Boston, MA 02110-1301 USA.
+ *
+ * Please contact Oracle, 500 Oracle Parkway, Redwood Shores, CA 94065 USA
+ * or visit www.oracle.com if you need additional information or have any
+ * questions.
+ */
+package com.oracle.graal.lir.alloc.trace;
+
+import com.oracle.graal.debug.*;
+
+import com.oracle.graal.lir.alloc.trace.Interval.RegisterBinding;
+import com.oracle.graal.lir.alloc.trace.Interval.RegisterBindingLists;
+import com.oracle.graal.lir.alloc.trace.Interval.State;
+
+/**
+ */
+public class IntervalWalker {
+
+    protected final LinearScan allocator;
+
+    /**
+     * Sorted list of intervals, not live before the current position.
+     */
+    protected RegisterBindingLists unhandledLists;
+
+    /**
+     * Sorted list of intervals, live at the current position.
+     */
+    protected RegisterBindingLists activeLists;
+
+    /**
+     * Sorted list of intervals in a life time hole at the current position.
+     */
+    protected RegisterBindingLists inactiveLists;
+
+    /**
+     * The current position (intercept point through the intervals).
+     */
+    protected int currentPosition;
+
+    /**
+     * The binding of the current interval being processed.
+     */
+    protected RegisterBinding currentBinding;
+
+    /**
+     * Processes the {@code currentInterval} interval in an attempt to allocate a physical register
+     * to it and thus allow it to be moved to a list of {@linkplain #activeLists active} intervals.
+     *
+     * @return {@code true} if a register was allocated to the {@code currentInterval} interval
+     */
+    protected boolean activateCurrent(@SuppressWarnings({"unused"}) Interval currentInterval) {
+        return true;
+    }
+
+    void walkBefore(int lirOpId) {
+        walkTo(lirOpId - 1);
+    }
+
+    void walk() {
+        walkTo(Integer.MAX_VALUE);
+    }
+
+    /**
+     * Creates a new interval walker.
+     *
+     * @param allocator the register allocator context
+     * @param unhandledFixed the list of unhandled {@linkplain RegisterBinding#Fixed fixed}
+     *            intervals
+     * @param unhandledAny the list of unhandled {@linkplain RegisterBinding#Any non-fixed}
+     *            intervals
+     */
+    IntervalWalker(LinearScan allocator, Interval unhandledFixed, Interval unhandledAny) {
+        this.allocator = allocator;
+
+        unhandledLists = new RegisterBindingLists(unhandledFixed, unhandledAny, Interval.EndMarker);
+        activeLists = new RegisterBindingLists(Interval.EndMarker, Interval.EndMarker, Interval.EndMarker);
+        inactiveLists = new RegisterBindingLists(Interval.EndMarker, Interval.EndMarker, Interval.EndMarker);
+        currentPosition = -1;
+    }
+
+    protected void removeFromList(Interval interval) {
+        if (interval.state == State.Active) {
+            activeLists.remove(RegisterBinding.Any, interval);
+        } else {
+            assert interval.state == State.Inactive : "invalid state";
+            inactiveLists.remove(RegisterBinding.Any, interval);
+        }
+    }
+
+    private void walkTo(State state, int from) {
+        assert state == State.Active || state == State.Inactive : "wrong state";
+        for (RegisterBinding binding : RegisterBinding.VALUES) {
+            walkTo(state, from, binding);
+        }
+    }
+
+    private void walkTo(State state, int from, RegisterBinding binding) {
+        Interval prevprev = null;
+        Interval prev = (state == State.Active) ? activeLists.get(binding) : inactiveLists.get(binding);
+        Interval next = prev;
+        while (next.currentFrom() <= from) {
+            Interval cur = next;
+            next = cur.next;
+
+            boolean rangeHasChanged = false;
+            while (cur.currentTo() <= from) {
+                cur.nextRange();
+                rangeHasChanged = true;
+            }
+
+            // also handle move from inactive list to active list
+            rangeHasChanged = rangeHasChanged || (state == State.Inactive && cur.currentFrom() <= from);
+
+            if (rangeHasChanged) {
+                // remove cur from list
+                if (prevprev == null) {
+                    if (state == State.Active) {
+                        activeLists.set(binding, next);
+                    } else {
+                        inactiveLists.set(binding, next);
+                    }
+                } else {
+                    prevprev.next = next;
+                }
+                prev = next;
+                Interval.State newState;
+                if (cur.currentAtEnd()) {
+                    // move to handled state (not maintained as a list)
+                    newState = State.Handled;
+                    cur.state = newState;
+                } else {
+                    if (cur.currentFrom() <= from) {
+                        // sort into active list
+                        activeLists.addToListSortedByCurrentFromPositions(binding, cur);
+                        newState = State.Active;
+                    } else {
+                        // sort into inactive list
+                        inactiveLists.addToListSortedByCurrentFromPositions(binding, cur);
+                        newState = State.Inactive;
+                    }
+                    cur.state = newState;
+                    if (prev == cur) {
+                        assert state == newState;
+                        prevprev = prev;
+                        prev = cur.next;
+                    }
+                }
+                intervalMoved(cur, state, newState);
+            } else {
+                prevprev = prev;
+                prev = cur.next;
+            }
+        }
+    }
+
+    /**
+     * Get the next interval from {@linkplain #unhandledLists} which starts before or at
+     * {@code toOpId}. The returned interval is removed and {@link #currentBinding} is set.
+     *
+     * @postcondition all intervals in {@linkplain #unhandledLists} start after {@code toOpId}.
+     *
+     * @return The next interval or null if there is no {@linkplain #unhandledLists unhandled}
+     *         interval at position {@code toOpId}.
+     */
+    private Interval nextInterval(int toOpId) {
+        RegisterBinding binding;
+        Interval any = unhandledLists.any;
+        Interval fixed = unhandledLists.fixed;
+
+        if (any != Interval.EndMarker) {
+            // intervals may start at same position . prefer fixed interval
+            binding = fixed != Interval.EndMarker && fixed.from() <= any.from() ? RegisterBinding.Fixed : RegisterBinding.Any;
+
+            assert binding == RegisterBinding.Fixed && fixed.from() <= any.from() || binding == RegisterBinding.Any && any.from() <= fixed.from() : "wrong interval!!!";
+            assert any == Interval.EndMarker || fixed == Interval.EndMarker || any.from() != fixed.from() || binding == RegisterBinding.Fixed : "if fixed and any-Interval start at same position, fixed must be processed first";
+
+        } else if (fixed != Interval.EndMarker) {
+            binding = RegisterBinding.Fixed;
+        } else {
+            return null;
+        }
+        Interval currentInterval = unhandledLists.get(binding);
+
+        if (toOpId < currentInterval.from()) {
+            return null;
+        }
+
+        currentBinding = binding;
+        unhandledLists.set(binding, currentInterval.next);
+        currentInterval.next = Interval.EndMarker;
+        currentInterval.rewindRange();
+        return currentInterval;
+    }
+
+    /**
+     * Walk up to {@code toOpId}.
+     *
+     * @postcondition {@link #currentPosition} is set to {@code toOpId}, {@link #activeLists} and
+     *                {@link #inactiveLists} are populated and {@link Interval#state}s are up to
+     *                date.
+     */
+    protected void walkTo(int toOpId) {
+        assert currentPosition <= toOpId : "can not walk backwards";
+        for (Interval currentInterval = nextInterval(toOpId); currentInterval != null; currentInterval = nextInterval(toOpId)) {
+            int opId = currentInterval.from();
+
+            // set currentPosition prior to call of walkTo
+            currentPosition = opId;
+
+            // update unhandled stack intervals
+            updateUnhandledStackIntervals(opId);
+
+            // call walkTo even if currentPosition == id
+            walkTo(State.Active, opId);
+            walkTo(State.Inactive, opId);
+
+            try (Indent indent = Debug.logAndIndent("walk to op %d", opId)) {
+                currentInterval.state = State.Active;
+                if (activateCurrent(currentInterval)) {
+                    activeLists.addToListSortedByCurrentFromPositions(currentBinding, currentInterval);
+                    intervalMoved(currentInterval, State.Unhandled, State.Active);
+                }
+            }
+        }
+        // set currentPosition prior to call of walkTo
+        currentPosition = toOpId;
+
+        if (currentPosition <= allocator.maxOpId()) {
+            // update unhandled stack intervals
+            updateUnhandledStackIntervals(toOpId);
+
+            // call walkTo if still in range
+            walkTo(State.Active, toOpId);
+            walkTo(State.Inactive, toOpId);
+        }
+    }
+
+    private void intervalMoved(Interval interval, State from, State to) {
+        // intervalMoved() is called whenever an interval moves from one interval list to another.
+        // In the implementation of this method it is prohibited to move the interval to any list.
+        if (Debug.isLogEnabled()) {
+            Debug.log("interval moved from %s to %s: %s", from, to, interval.logString(allocator));
+        }
+    }
+
+    /**
+     * Move {@linkplain #unhandledLists unhandled} stack intervals to
+     * {@linkplain IntervalWalker #activeLists active}.
+     *
+     * Note that for {@linkplain RegisterBinding#Fixed fixed} and {@linkplain RegisterBinding#Any
+     * any} intervals this is done in {@link #nextInterval(int)}.
+     */
+    private void updateUnhandledStackIntervals(int opId) {
+        Interval currentInterval = unhandledLists.get(RegisterBinding.Stack);
+        while (currentInterval != Interval.EndMarker && currentInterval.from() <= opId) {
+            Interval next = currentInterval.next;
+            if (currentInterval.to() > opId) {
+                currentInterval.state = State.Active;
+                activeLists.addToListSortedByCurrentFromPositions(RegisterBinding.Stack, currentInterval);
+                intervalMoved(currentInterval, State.Unhandled, State.Active);
+            } else {
+                currentInterval.state = State.Handled;
+                intervalMoved(currentInterval, State.Unhandled, State.Handled);
+            }
+            currentInterval = next;
+        }
+        unhandledLists.set(RegisterBinding.Stack, currentInterval);
+    }
+
+}
--- /dev/null	Thu Jan 01 00:00:00 1970 +0000
+++ b/graal/com.oracle.graal.lir/src/com/oracle/graal/lir/alloc/trace/LinearScan.java	Mon Aug 31 13:06:17 2015 +0200
@@ -0,0 +1,891 @@
+/*
+ * Copyright (c) 2009, 2014, Oracle and/or its affiliates. All rights reserved.
+ * DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER.
+ *
+ * This code is free software; you can redistribute it and/or modify it
+ * under the terms of the GNU General Public License version 2 only, as
+ * published by the Free Software Foundation.
+ *
+ * This code is distributed in the hope that it will be useful, but WITHOUT
+ * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
+ * FITNESS FOR A PARTICULAR PURPOSE.  See the GNU General Public License
+ * version 2 for more details (a copy is included in the LICENSE file that
+ * accompanied this code).
+ *
+ * You should have received a copy of the GNU General Public License version
+ * 2 along with this work; if not, write to the Free Software Foundation,
+ * Inc., 51 Franklin St, Fifth Floor, Boston, MA 02110-1301 USA.
+ *
+ * Please contact Oracle, 500 Oracle Parkway, Redwood Shores, CA 94065 USA
+ * or visit www.oracle.com if you need additional information or have any
+ * questions.
+ */
+package com.oracle.graal.lir.alloc.trace;
+
+import static com.oracle.graal.compiler.common.GraalOptions.*;
+import static com.oracle.graal.lir.LIRValueUtil.*;
+import static com.oracle.graal.lir.phases.LIRPhase.Options.*;
+import static jdk.internal.jvmci.code.CodeUtil.*;
+import static jdk.internal.jvmci.code.ValueUtil.*;
+
+import java.util.*;
+
+import jdk.internal.jvmci.code.*;
+import jdk.internal.jvmci.common.*;
+import jdk.internal.jvmci.meta.*;
+import jdk.internal.jvmci.options.*;
+
+import com.oracle.graal.compiler.common.alloc.*;
+import com.oracle.graal.compiler.common.cfg.*;
+import com.oracle.graal.debug.*;
+import com.oracle.graal.debug.Debug.Scope;
+import com.oracle.graal.lir.*;
+import com.oracle.graal.lir.LIRInstruction.OperandFlag;
+import com.oracle.graal.lir.LIRInstruction.OperandMode;
+import com.oracle.graal.lir.alloc.trace.Interval.RegisterBinding;
+import com.oracle.graal.lir.framemap.*;
+import com.oracle.graal.lir.gen.*;
+import com.oracle.graal.lir.gen.LIRGeneratorTool.SpillMoveFactory;
+import com.oracle.graal.lir.phases.AllocationPhase.AllocationContext;
+
+/**
+ * An implementation of the linear scan register allocator algorithm described in <a
+ * href="http://doi.acm.org/10.1145/1064979.1064998"
+ * >"Optimized Interval Splitting in a Linear Scan Register Allocator"</a> by Christian Wimmer and
+ * Hanspeter Moessenboeck.
+ */
+public class LinearScan {
+
+    public static class Options {
+        // @formatter:off
+        @Option(help = "Enable spill position optimization", type = OptionType.Debug)
+        public static final OptionValue<Boolean> LIROptLSRAOptimizeSpillPosition = new NestedBooleanOptionValue(LIROptimization, true);
+        // @formatter:on
+    }
+
+    public static class BlockData {
+
+        /**
+         * Bit map specifying which operands are live upon entry to this block. These are values
+         * used in this block or any of its successors where such value are not defined in this
+         * block. The bit index of an operand is its {@linkplain LinearScan#operandNumber(Value)
+         * operand number}.
+         */
+        public BitSet liveIn;
+
+        /**
+         * Bit map specifying which operands are live upon exit from this block. These are values
+         * used in a successor block that are either defined in this block or were live upon entry
+         * to this block. The bit index of an operand is its
+         * {@linkplain LinearScan#operandNumber(Value) operand number}.
+         */
+        public BitSet liveOut;
+
+        /**
+         * Bit map specifying which operands are used (before being defined) in this block. That is,
+         * these are the values that are live upon entry to the block. The bit index of an operand
+         * is its {@linkplain LinearScan#operandNumber(Value) operand number}.
+         */
+        public BitSet liveGen;
+
+        /**
+         * Bit map specifying which operands are defined/overwritten in this block. The bit index of
+         * an operand is its {@linkplain LinearScan#operandNumber(Value) operand number}.
+         */
+        public BitSet liveKill;
+    }
+
+    public static final int DOMINATOR_SPILL_MOVE_ID = -2;
+    private static final int SPLIT_INTERVALS_CAPACITY_RIGHT_SHIFT = 1;
+
+    private final LIR ir;
+    private final FrameMapBuilder frameMapBuilder;
+    private final RegisterAttributes[] registerAttributes;
+    private final Register[] registers;
+    private final RegisterAllocationConfig regAllocConfig;
+    private final SpillMoveFactory moveFactory;
+
+    private final BlockMap<BlockData> blockData;
+
+    /**
+     * List of blocks in linear-scan order. This is only correct as long as the CFG does not change.
+     */
+    private final List<? extends AbstractBlockBase<?>> sortedBlocks;
+
+    /** @see #intervals() */
+    private Interval[] intervals;
+
+    /**
+     * The number of valid entries in {@link #intervals}.
+     */
+    private int intervalsSize;
+
+    /**
+     * The index of the first entry in {@link #intervals} for a
+     * {@linkplain #createDerivedInterval(Interval) derived interval}.
+     */
+    private int firstDerivedIntervalIndex = -1;
+
+    /**
+     * Intervals sorted by {@link Interval#from()}.
+     */
+    private Interval[] sortedIntervals;
+
+    /**
+     * Map from an instruction {@linkplain LIRInstruction#id id} to the instruction. Entries should
+     * be retrieved with {@link #instructionForId(int)} as the id is not simply an index into this
+     * array.
+     */
+    private LIRInstruction[] opIdToInstructionMap;
+
+    /**
+     * Map from an instruction {@linkplain LIRInstruction#id id} to the
+     * {@linkplain AbstractBlockBase block} containing the instruction. Entries should be retrieved
+     * with {@link #blockForId(int)} as the id is not simply an index into this array.
+     */
+    private AbstractBlockBase<?>[] opIdToBlockMap;
+
+    /**
+     * The {@linkplain #operandNumber(Value) number} of the first variable operand allocated.
+     */
+    private final int firstVariableNumber;
+
+    protected LinearScan(TargetDescription target, LIRGenerationResult res, SpillMoveFactory spillMoveFactory, RegisterAllocationConfig regAllocConfig,
+                    List<? extends AbstractBlockBase<?>> sortedBlocks) {
+        this.ir = res.getLIR();
+        this.moveFactory = spillMoveFactory;
+        this.frameMapBuilder = res.getFrameMapBuilder();
+        this.sortedBlocks = sortedBlocks;
+        this.registerAttributes = regAllocConfig.getRegisterConfig().getAttributesMap();
+        this.regAllocConfig = regAllocConfig;
+
+        this.registers = target.arch.getRegisters();
+        this.firstVariableNumber = getRegisters().length;
+        this.blockData = new BlockMap<>(ir.getControlFlowGraph());
+    }
+
+    public int getFirstLirInstructionId(AbstractBlockBase<?> block) {
+        int result = ir.getLIRforBlock(block).get(0).id();
+        assert result >= 0;
+        return result;
+    }
+
+    public int getLastLirInstructionId(AbstractBlockBase<?> block) {
+        List<LIRInstruction> instructions = ir.getLIRforBlock(block);
+        int result = instructions.get(instructions.size() - 1).id();
+        assert result >= 0;
+        return result;
+    }
+
+    public SpillMoveFactory getSpillMoveFactory() {
+        return moveFactory;
+    }
+
+    protected MoveResolver createMoveResolver() {
+        MoveResolver moveResolver = new MoveResolver(this);
+        assert moveResolver.checkEmpty();
+        return moveResolver;
+    }
+
+    public static boolean isVariableOrRegister(Value value) {
+        return isVariable(value) || isRegister(value);
+    }
+
+    /**
+     * Converts an operand (variable or register) to an index in a flat address space covering all
+     * the {@linkplain Variable variables} and {@linkplain RegisterValue registers} being processed
+     * by this allocator.
+     */
+    int operandNumber(Value operand) {
+        if (isRegister(operand)) {
+            int number = asRegister(operand).number;
+            assert number < firstVariableNumber;
+            return number;
+        }
+        assert isVariable(operand) : operand;
+        return firstVariableNumber + ((Variable) operand).index;
+    }
+
+    /**
+     * Gets the number of operands. This value will increase by 1 for new variable.
+     */
+    int operandSize() {
+        return firstVariableNumber + ir.numVariables();
+    }
+
+    /**
+     * Gets the highest operand number for a register operand. This value will never change.
+     */
+    int maxRegisterNumber() {
+        return firstVariableNumber - 1;
+    }
+
+    public BlockData getBlockData(AbstractBlockBase<?> block) {
+        return blockData.get(block);
+    }
+
+    void initBlockData(AbstractBlockBase<?> block) {
+        blockData.put(block, new BlockData());
+    }
+
+    static final IntervalPredicate IS_PRECOLORED_INTERVAL = new IntervalPredicate() {
+
+        @Override
+        public boolean apply(Interval i) {
+            return isRegister(i.operand);
+        }
+    };
+
+    static final IntervalPredicate IS_VARIABLE_INTERVAL = new IntervalPredicate() {
+
+        @Override
+        public boolean apply(Interval i) {
+            return isVariable(i.operand);
+        }
+    };
+
+    static final IntervalPredicate IS_STACK_INTERVAL = new IntervalPredicate() {
+
+        @Override
+        public boolean apply(Interval i) {
+            return !isRegister(i.operand);
+        }
+    };
+
+    /**
+     * Gets an object describing the attributes of a given register according to this register
+     * configuration.
+     */
+    public RegisterAttributes attributes(Register reg) {
+        return registerAttributes[reg.number];
+    }
+
+    void assignSpillSlot(Interval interval) {
+        /*
+         * Assign the canonical spill slot of the parent (if a part of the interval is already
+         * spilled) or allocate a new spill slot.
+         */
+        if (interval.canMaterialize()) {
+            interval.assignLocation(Value.ILLEGAL);
+        } else if (interval.spillSlot() != null) {
+            interval.assignLocation(interval.spillSlot());
+        } else {
+            VirtualStackSlot slot = frameMapBuilder.allocateSpillSlot(interval.kind());
+            interval.setSpillSlot(slot);
+            interval.assignLocation(slot);
+        }
+    }
+
+    /**
+     * Map from {@linkplain #operandNumber(Value) operand numbers} to intervals.
+     */
+    public Interval[] intervals() {
+        return intervals;
+    }
+
+    void initIntervals() {
+        intervalsSize = operandSize();
+        intervals = new Interval[intervalsSize + (intervalsSize >> SPLIT_INTERVALS_CAPACITY_RIGHT_SHIFT)];
+    }
+
+    /**
+     * Creates a new interval.
+     *
+     * @param operand the operand for the interval
+     * @return the created interval
+     */
+    Interval createInterval(AllocatableValue operand) {
+        assert isLegal(operand);
+        int operandNumber = operandNumber(operand);
+        Interval interval = new Interval(operand, operandNumber);
+        assert operandNumber < intervalsSize;
+        assert intervals[operandNumber] == null;
+        intervals[operandNumber] = interval;
+        return interval;
+    }
+
+    /**
+     * Creates an interval as a result of splitting or spilling another interval.
+     *
+     * @param source an interval being split of spilled
+     * @return a new interval derived from {@code source}
+     */
+    Interval createDerivedInterval(Interval source) {
+        if (firstDerivedIntervalIndex == -1) {
+            firstDerivedIntervalIndex = intervalsSize;
+        }
+        if (intervalsSize == intervals.length) {
+            intervals = Arrays.copyOf(intervals, intervals.length + (intervals.length >> SPLIT_INTERVALS_CAPACITY_RIGHT_SHIFT));
+        }
+        intervalsSize++;
+        Variable variable = new Variable(source.kind(), ir.nextVariable());
+
+        Interval interval = createInterval(variable);
+        assert intervals[intervalsSize - 1] == interval;
+        return interval;
+    }
+
+    // access to block list (sorted in linear scan order)
+    public int blockCount() {
+        return sortedBlocks.size();
+    }
+
+    public AbstractBlockBase<?> blockAt(int index) {
+        return sortedBlocks.get(index);
+    }
+
+    /**
+     * Gets the size of the {@link BlockData#liveIn} and {@link BlockData#liveOut} sets for a basic
+     * block. These sets do not include any operands allocated as a result of creating
+     * {@linkplain #createDerivedInterval(Interval) derived intervals}.
+     */
+    public int liveSetSize() {
+        return firstDerivedIntervalIndex == -1 ? operandSize() : firstDerivedIntervalIndex;
+    }
+
+    int numLoops() {
+        return ir.getControlFlowGraph().getLoops().size();
+    }
+
+    Interval intervalFor(int operandNumber) {
+        return intervals[operandNumber];
+    }
+
+    public Interval intervalFor(Value operand) {
+        int operandNumber = operandNumber(operand);
+        assert operandNumber < intervalsSize;
+        return intervals[operandNumber];
+    }
+
+    public Interval getOrCreateInterval(AllocatableValue operand) {
+        Interval ret = intervalFor(operand);
+        if (ret == null) {
+            return createInterval(operand);
+        } else {
+            return ret;
+        }
+    }
+
+    void initOpIdMaps(int numInstructions) {
+        opIdToInstructionMap = new LIRInstruction[numInstructions];
+        opIdToBlockMap = new AbstractBlockBase<?>[numInstructions];
+    }
+
+    void putOpIdMaps(int index, LIRInstruction op, AbstractBlockBase<?> block) {
+        opIdToInstructionMap[index] = op;
+        opIdToBlockMap[index] = block;
+    }
+
+    /**
+     * Gets the highest instruction id allocated by this object.
+     */
+    int maxOpId() {
+        assert opIdToInstructionMap.length > 0 : "no operations";
+        return (opIdToInstructionMap.length - 1) << 1;
+    }
+
+    /**
+     * Converts an {@linkplain LIRInstruction#id instruction id} to an instruction index. All LIR
+     * instructions in a method have an index one greater than their linear-scan order predecessor
+     * with the first instruction having an index of 0.
+     */
+    private static int opIdToIndex(int opId) {
+        return opId >> 1;
+    }
+
+    /**
+     * Retrieves the {@link LIRInstruction} based on its {@linkplain LIRInstruction#id id}.
+     *
+     * @param opId an instruction {@linkplain LIRInstruction#id id}
+     * @return the instruction whose {@linkplain LIRInstruction#id} {@code == id}
+     */
+    public LIRInstruction instructionForId(int opId) {
+        assert isEven(opId) : "opId not even";
+        LIRInstruction instr = opIdToInstructionMap[opIdToIndex(opId)];
+        assert instr.id() == opId;
+        return instr;
+    }
+
+    /**
+     * Gets the block containing a given instruction.
+     *
+     * @param opId an instruction {@linkplain LIRInstruction#id id}
+     * @return the block containing the instruction denoted by {@code opId}
+     */
+    public AbstractBlockBase<?> blockForId(int opId) {
+        assert opIdToBlockMap.length > 0 && opId >= 0 && opId <= maxOpId() + 1 : "opId out of range";
+        return opIdToBlockMap[opIdToIndex(opId)];
+    }
+
+    boolean isBlockBegin(int opId) {
+        return opId == 0 || blockForId(opId) != blockForId(opId - 1);
+    }
+
+    boolean coversBlockBegin(int opId1, int opId2) {
+        return blockForId(opId1) != blockForId(opId2);
+    }
+
+    /**
+     * Determines if an {@link LIRInstruction} destroys all caller saved registers.
+     *
+     * @param opId an instruction {@linkplain LIRInstruction#id id}
+     * @return {@code true} if the instruction denoted by {@code id} destroys all caller saved
+     *         registers.
+     */
+    boolean hasCall(int opId) {
+        assert isEven(opId) : "opId not even";
+        return instructionForId(opId).destroysCallerSavedRegisters();
+    }
+
+    abstract static class IntervalPredicate {
+
+        abstract boolean apply(Interval i);
+    }
+
+    public boolean isProcessed(Value operand) {
+        return !isRegister(operand) || attributes(asRegister(operand)).isAllocatable();
+    }
+
+    // * Phase 5: actual register allocation
+
+    private static boolean isSorted(Interval[] intervals) {
+        int from = -1;
+        for (Interval interval : intervals) {
+            assert interval != null;
+            assert from <= interval.from();
+            from = interval.from();
+        }
+        return true;
+    }
+
+    static Interval addToList(Interval first, Interval prev, Interval interval) {
+        Interval newFirst = first;
+        if (prev != null) {
+            prev.next = interval;
+        } else {
+            newFirst = interval;
+        }
+        return newFirst;
+    }
+
+    Interval.Pair createUnhandledLists(IntervalPredicate isList1, IntervalPredicate isList2) {
+        assert isSorted(sortedIntervals) : "interval list is not sorted";
+
+        Interval list1 = Interval.EndMarker;
+        Interval list2 = Interval.EndMarker;
+
+        Interval list1Prev = null;
+        Interval list2Prev = null;
+        Interval v;
+
+        int n = sortedIntervals.length;
+        for (int i = 0; i < n; i++) {
+            v = sortedIntervals[i];
+            if (v == null) {
+                continue;
+            }
+
+            if (isList1.apply(v)) {
+                list1 = addToList(list1, list1Prev, v);
+                list1Prev = v;
+            } else if (isList2 == null || isList2.apply(v)) {
+                list2 = addToList(list2, list2Prev, v);
+                list2Prev = v;
+            }
+        }
+
+        if (list1Prev != null) {
+            list1Prev.next = Interval.EndMarker;
+        }
+        if (list2Prev != null) {
+            list2Prev.next = Interval.EndMarker;
+        }
+
+        assert list1Prev == null || list1Prev.next == Interval.EndMarker : "linear list ends not with sentinel";
+        assert list2Prev == null || list2Prev.next == Interval.EndMarker : "linear list ends not with sentinel";
+
+        return new Interval.Pair(list1, list2);
+    }
+
+    protected void sortIntervalsBeforeAllocation() {
+        int sortedLen = 0;
+        for (Interval interval : intervals) {
+            if (interval != null) {
+                sortedLen++;
+            }
+        }
+
+        Interval[] sortedList = new Interval[sortedLen];
+        int sortedIdx = 0;
+        int sortedFromMax = -1;
+
+        // special sorting algorithm: the original interval-list is almost sorted,
+        // only some intervals are swapped. So this is much faster than a complete QuickSort
+        for (Interval interval : intervals) {
+            if (interval != null) {
+                int from = interval.from();
+
+                if (sortedFromMax <= from) {
+                    sortedList[sortedIdx++] = interval;
+                    sortedFromMax = interval.from();
+                } else {
+                    // the assumption that the intervals are already sorted failed,
+                    // so this interval must be sorted in manually
+                    int j;
+                    for (j = sortedIdx - 1; j >= 0 && from < sortedList[j].from(); j--) {
+                        sortedList[j + 1] = sortedList[j];
+                    }
+                    sortedList[j + 1] = interval;
+                    sortedIdx++;
+                }
+            }
+        }
+        sortedIntervals = sortedList;
+    }
+
+    void sortIntervalsAfterAllocation() {
+        if (firstDerivedIntervalIndex == -1) {
+            // no intervals have been added during allocation, so sorted list is already up to date
+            return;
+        }
+
+        Interval[] oldList = sortedIntervals;
+        Interval[] newList = Arrays.copyOfRange(intervals, firstDerivedIntervalIndex, intervalsSize);
+        int oldLen = oldList.length;
+        int newLen = newList.length;
+
+        // conventional sort-algorithm for new intervals
+        Arrays.sort(newList, (Interval a, Interval b) -> a.from() - b.from());
+
+        // merge old and new list (both already sorted) into one combined list
+        Interval[] combinedList = new Interval[oldLen + newLen];
+        int oldIdx = 0;
+        int newIdx = 0;
+
+        while (oldIdx + newIdx < combinedList.length) {
+            if (newIdx >= newLen || (oldIdx < oldLen && oldList[oldIdx].from() <= newList[newIdx].from())) {
+                combinedList[oldIdx + newIdx] = oldList[oldIdx];
+                oldIdx++;
+            } else {
+                combinedList[oldIdx + newIdx] = newList[newIdx];
+                newIdx++;
+            }
+        }
+
+        sortedIntervals = combinedList;
+    }
+
+    // wrapper for Interval.splitChildAtOpId that performs a bailout in product mode
+    // instead of returning null
+    public Interval splitChildAtOpId(Interval interval, int opId, LIRInstruction.OperandMode mode) {
+        Interval result = interval.getSplitChildAtOpId(opId, mode, this);
+
+        if (result != null) {
+            if (Debug.isLogEnabled()) {
+                Debug.log("Split child at pos %d of interval %s is %s", opId, interval, result);
+            }
+            return result;
+        }
+
+        throw new BailoutException("LinearScan: interval is null");
+    }
+
+    static StackSlotValue canonicalSpillOpr(Interval interval) {
+        assert interval.spillSlot() != null : "canonical spill slot not set";
+        return interval.spillSlot();
+    }
+
+    boolean isMaterialized(AllocatableValue operand, int opId, OperandMode mode) {
+        Interval interval = intervalFor(operand);
+        assert interval != null : "interval must exist";
+
+        if (opId != -1) {
+            /*
+             * Operands are not changed when an interval is split during allocation, so search the
+             * right interval here.
+             */
+            interval = splitChildAtOpId(interval, opId, mode);
+        }
+
+        return isIllegal(interval.location()) && interval.canMaterialize();
+    }
+
+    boolean isCallerSave(Value operand) {
+        return attributes(asRegister(operand)).isCallerSave();
+    }
+
+    protected <B extends AbstractBlockBase<B>> void allocate(TargetDescription target, LIRGenerationResult lirGenRes, List<B> codeEmittingOrder, List<B> linearScanOrder,
+                    SpillMoveFactory spillMoveFactory, RegisterAllocationConfig registerAllocationConfig) {
+
+        /*
+         * This is the point to enable debug logging for the whole register allocation.
+         */
+        try (Indent indent = Debug.logAndIndent("LinearScan allocate")) {
+            AllocationContext context = new AllocationContext(spillMoveFactory, registerAllocationConfig);
+
+            createLifetimeAnalysisPhase().apply(target, lirGenRes, codeEmittingOrder, linearScanOrder, context, false);
+
+            try (Scope s = Debug.scope("AfterLifetimeAnalysis", (Object) intervals)) {
+                sortIntervalsBeforeAllocation();
+
+                createRegisterAllocationPhase().apply(target, lirGenRes, codeEmittingOrder, linearScanOrder, context, false);
+
+                if (LinearScan.Options.LIROptLSRAOptimizeSpillPosition.getValue()) {
+                    createOptimizeSpillPositionPhase().apply(target, lirGenRes, codeEmittingOrder, linearScanOrder, context, false);
+                }
+                createResolveDataFlowPhase().apply(target, lirGenRes, codeEmittingOrder, linearScanOrder, context);
+
+                sortIntervalsAfterAllocation();
+
+                if (DetailedAsserts.getValue()) {
+                    verify();
+                }
+                beforeSpillMoveElimination();
+                createSpillMoveEliminationPhase().apply(target, lirGenRes, codeEmittingOrder, linearScanOrder, context);
+                createAssignLocationsPhase().apply(target, lirGenRes, codeEmittingOrder, linearScanOrder, context);
+
+                if (DetailedAsserts.getValue()) {
+                    verifyIntervals();
+                }
+            } catch (Throwable e) {
+                throw Debug.handle(e);
+            }
+        }
+    }
+
+    protected void beforeSpillMoveElimination() {
+    }
+
+    protected LinearScanLifetimeAnalysisPhase createLifetimeAnalysisPhase() {
+        return new LinearScanLifetimeAnalysisPhase(this);
+    }
+
+    protected LinearScanRegisterAllocationPhase createRegisterAllocationPhase() {
+        return new LinearScanRegisterAllocationPhase(this);
+    }
+
+    protected LinearScanOptimizeSpillPositionPhase createOptimizeSpillPositionPhase() {
+        return new LinearScanOptimizeSpillPositionPhase(this);
+    }
+
+    protected LinearScanResolveDataFlowPhase createResolveDataFlowPhase() {
+        return new LinearScanResolveDataFlowPhase(this);
+    }
+
+    protected LinearScanEliminateSpillMovePhase createSpillMoveEliminationPhase() {
+        return new LinearScanEliminateSpillMovePhase(this);
+    }
+
+    protected LinearScanAssignLocationsPhase createAssignLocationsPhase() {
+        return new LinearScanAssignLocationsPhase(this);
+    }
+
+    public void printIntervals(String label) {
+        if (Debug.isLogEnabled()) {
+            try (Indent indent = Debug.logAndIndent("intervals %s", label)) {
+                for (Interval interval : intervals) {
+                    if (interval != null) {
+                        Debug.log("%s", interval.logString(this));
+                    }
+                }
+
+                try (Indent indent2 = Debug.logAndIndent("Basic Blocks")) {
+                    for (int i = 0; i < blockCount(); i++) {
+                        AbstractBlockBase<?> block = blockAt(i);
+                        Debug.log("B%d [%d, %d, %s] ", block.getId(), getFirstLirInstructionId(block), getLastLirInstructionId(block), block.getLoop());
+                    }
+                }
+            }
+        }
+        Debug.dump(Arrays.copyOf(intervals, intervalsSize), label);
+    }
+
+    public void printLir(String label, @SuppressWarnings("unused") boolean hirValid) {
+        Debug.dump(ir, label);
+    }
+
+    boolean verify() {
+        // (check that all intervals have a correct register and that no registers are overwritten)
+        verifyIntervals();
+
+        verifyRegisters();
+
+        Debug.log("no errors found");
+
+        return true;
+    }
+
+    private void verifyRegisters() {
+        // Enable this logging to get output for the verification process.
+        try (Indent indent = Debug.logAndIndent("verifying register allocation")) {
+            RegisterVerifier verifier = new RegisterVerifier(this);
+            verifier.verify(blockAt(0));
+        }
+    }
+
+    protected void verifyIntervals() {
+        try (Indent indent = Debug.logAndIndent("verifying intervals")) {
+            int len = intervalsSize;
+
+            for (int i = 0; i < len; i++) {
+                Interval i1 = intervals[i];
+                if (i1 == null) {
+                    continue;
+                }
+
+                i1.checkSplitChildren();
+
+                if (i1.operandNumber != i) {
+                    Debug.log("Interval %d is on position %d in list", i1.operandNumber, i);
+                    Debug.log(i1.logString(this));
+                    throw new JVMCIError("");
+                }
+
+                if (isVariable(i1.operand) && i1.kind().equals(LIRKind.Illegal)) {
+                    Debug.log("Interval %d has no type assigned", i1.operandNumber);
+                    Debug.log(i1.logString(this));
+                    throw new JVMCIError("");
+                }
+
+                if (i1.location() == null) {
+                    Debug.log("Interval %d has no register assigned", i1.operandNumber);
+                    Debug.log(i1.logString(this));
+                    throw new JVMCIError("");
+                }
+
+                if (i1.first() == Range.EndMarker) {
+                    Debug.log("Interval %d has no Range", i1.operandNumber);
+                    Debug.log(i1.logString(this));
+                    throw new JVMCIError("");
+                }
+
+                for (Range r = i1.first(); r != Range.EndMarker; r = r.next) {
+                    if (r.from >= r.to) {
+                        Debug.log("Interval %d has zero length range", i1.operandNumber);
+                        Debug.log(i1.logString(this));
+                        throw new JVMCIError("");
+                    }
+                }
+
+                for (int j = i + 1; j < len; j++) {
+                    Interval i2 = intervals[j];
+                    if (i2 == null) {
+                        continue;
+                    }
+
+                    // special intervals that are created in MoveResolver
+                    // . ignore them because the range information has no meaning there
+                    if (i1.from() == 1 && i1.to() == 2) {
+                        continue;
+                    }
+                    if (i2.from() == 1 && i2.to() == 2) {
+                        continue;
+                    }
+                    Value l1 = i1.location();
+                    Value l2 = i2.location();
+                    if (i1.intersects(i2) && !isIllegal(l1) && (l1.equals(l2))) {
+                        if (DetailedAsserts.getValue()) {
+                            Debug.log("Intervals %d and %d overlap and have the same register assigned", i1.operandNumber, i2.operandNumber);
+                            Debug.log(i1.logString(this));
+                            Debug.log(i2.logString(this));
+                        }
+                        throw new BailoutException("");
+                    }
+                }
+            }
+        }
+    }
+
+    class CheckConsumer implements ValueConsumer {
+
+        boolean ok;
+        Interval curInterval;
+
+        @Override
+        public void visitValue(Value operand, OperandMode mode, EnumSet<OperandFlag> flags) {
+            if (isRegister(operand)) {
+                if (intervalFor(operand) == curInterval) {
+                    ok = true;
+                }
+            }
+        }
+    }
+
+    void verifyNoOopsInFixedIntervals() {
+        try (Indent indent = Debug.logAndIndent("verifying that no oops are in fixed intervals *")) {
+            CheckConsumer checkConsumer = new CheckConsumer();
+
+            Interval fixedIntervals;
+            Interval otherIntervals;
+            fixedIntervals = createUnhandledLists(IS_PRECOLORED_INTERVAL, null).first;
+            // to ensure a walking until the last instruction id, add a dummy interval
+            // with a high operation id
+            otherIntervals = new Interval(Value.ILLEGAL, -1);
+            otherIntervals.addRange(Integer.MAX_VALUE - 2, Integer.MAX_VALUE - 1);
+            IntervalWalker iw = new IntervalWalker(this, fixedIntervals, otherIntervals);
+
+            for (AbstractBlockBase<?> block : sortedBlocks) {
+                List<LIRInstruction> instructions = ir.getLIRforBlock(block);
+
+                for (int j = 0; j < instructions.size(); j++) {
+                    LIRInstruction op = instructions.get(j);
+
+                    if (op.hasState()) {
+                        iw.walkBefore(op.id());
+                        boolean checkLive = true;
+
+                        /*
+                         * Make sure none of the fixed registers is live across an oopmap since we
+                         * can't handle that correctly.
+                         */
+                        if (checkLive) {
+                            for (Interval interval = iw.activeLists.get(RegisterBinding.Fixed); interval != Interval.EndMarker; interval = interval.next) {
+                                if (interval.currentTo() > op.id() + 1) {
+                                    /*
+                                     * This interval is live out of this op so make sure that this
+                                     * interval represents some value that's referenced by this op
+                                     * either as an input or output.
+                                     */
+                                    checkConsumer.curInterval = interval;
+                                    checkConsumer.ok = false;
+
+                                    op.visitEachInput(checkConsumer);
+                                    op.visitEachAlive(checkConsumer);
+                                    op.visitEachTemp(checkConsumer);
+                                    op.visitEachOutput(checkConsumer);
+
+                                    assert checkConsumer.ok : "fixed intervals should never be live across an oopmap point";
+                                }
+                            }
+                        }
+                    }
+                }
+            }
+        }
+    }
+
+    public LIR getLIR() {
+        return ir;
+    }
+
+    public FrameMapBuilder getFrameMapBuilder() {
+        return frameMapBuilder;
+    }
+
+    public List<? extends AbstractBlockBase<?>> sortedBlocks() {
+        return sortedBlocks;
+    }
+
+    public Register[] getRegisters() {
+        return registers;
+    }
+
+    public RegisterAllocationConfig getRegisterAllocationConfig() {
+        return regAllocConfig;
+    }
+
+    public boolean callKillsRegisters() {
+        return regAllocConfig.getRegisterConfig().areAllAllocatableRegistersCallerSaved();
+    }
+
+}
--- /dev/null	Thu Jan 01 00:00:00 1970 +0000
+++ b/graal/com.oracle.graal.lir/src/com/oracle/graal/lir/alloc/trace/LinearScanAssignLocationsPhase.java	Mon Aug 31 13:06:17 2015 +0200
@@ -0,0 +1,229 @@
+/*
+ * Copyright (c) 2015, 2015, Oracle and/or its affiliates. All rights reserved.
+ * DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER.
+ *
+ * This code is free software; you can redistribute it and/or modify it
+ * under the terms of the GNU General Public License version 2 only, as
+ * published by the Free Software Foundation.
+ *
+ * This code is distributed in the hope that it will be useful, but WITHOUT
+ * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
+ * FITNESS FOR A PARTICULAR PURPOSE.  See the GNU General Public License
+ * version 2 for more details (a copy is included in the LICENSE file that
+ * accompanied this code).
+ *
+ * You should have received a copy of the GNU General Public License version
+ * 2 along with this work; if not, write to the Free Software Foundation,
+ * Inc., 51 Franklin St, Fifth Floor, Boston, MA 02110-1301 USA.
+ *
+ * Please contact Oracle, 500 Oracle Parkway, Redwood Shores, CA 94065 USA
+ * or visit www.oracle.com if you need additional information or have any
+ * questions.
+ */
+package com.oracle.graal.lir.alloc.trace;
+
+import static com.oracle.graal.compiler.common.GraalOptions.*;
+import static com.oracle.graal.lir.LIRValueUtil.*;
+import static jdk.internal.jvmci.code.ValueUtil.*;
+
+import java.util.*;
+
+import jdk.internal.jvmci.code.*;
+import jdk.internal.jvmci.meta.*;
+
+import com.oracle.graal.compiler.common.alloc.*;
+import com.oracle.graal.compiler.common.cfg.*;
+import com.oracle.graal.debug.*;
+import com.oracle.graal.lir.*;
+import com.oracle.graal.lir.LIRInstruction.OperandFlag;
+import com.oracle.graal.lir.LIRInstruction.OperandMode;
+import com.oracle.graal.lir.StandardOp.MoveOp;
+import com.oracle.graal.lir.StandardOp.ValueMoveOp;
+import com.oracle.graal.lir.gen.*;
+import com.oracle.graal.lir.gen.LIRGeneratorTool.SpillMoveFactory;
+import com.oracle.graal.lir.phases.*;
+
+/**
+ * Phase 7: Assign register numbers back to LIR.
+ */
+public class LinearScanAssignLocationsPhase extends AllocationPhase {
+
+    protected final LinearScan allocator;
+
+    public LinearScanAssignLocationsPhase(LinearScan allocator) {
+        this.allocator = allocator;
+    }
+
+    @Override
+    protected <B extends AbstractBlockBase<B>> void run(TargetDescription target, LIRGenerationResult lirGenRes, List<B> codeEmittingOrder, List<B> linearScanOrder, SpillMoveFactory spillMoveFactory,
+                    RegisterAllocationConfig registerAllocationConfig) {
+        assignLocations();
+    }
+
+    /**
+     * Assigns the allocated location for an LIR instruction operand back into the instruction.
+     *
+     * @param op current {@link LIRInstruction}
+     * @param operand an LIR instruction operand
+     * @param mode the usage mode for {@code operand} by the instruction
+     * @return the location assigned for the operand
+     */
+    protected Value colorLirOperand(LIRInstruction op, Variable operand, OperandMode mode) {
+        int opId = op.id();
+        Interval interval = allocator.intervalFor(operand);
+        assert interval != null : "interval must exist";
+
+        if (opId != -1) {
+            if (DetailedAsserts.getValue()) {
+                AbstractBlockBase<?> block = allocator.blockForId(opId);
+                if (block.getSuccessorCount() <= 1 && opId == allocator.getLastLirInstructionId(block)) {
+                    /*
+                     * Check if spill moves could have been appended at the end of this block, but
+                     * before the branch instruction. So the split child information for this branch
+                     * would be incorrect.
+                     */
+                    LIRInstruction instr = allocator.getLIR().getLIRforBlock(block).get(allocator.getLIR().getLIRforBlock(block).size() - 1);
+                    if (instr instanceof StandardOp.JumpOp) {
+                        if (allocator.getBlockData(block).liveOut.get(allocator.operandNumber(operand))) {
+                            assert false : String.format(
+                                            "can't get split child for the last branch of a block because the information would be incorrect (moves are inserted before the branch in resolveDataFlow) block=%s, instruction=%s, operand=%s",
+                                            block, instr, operand);
+                        }
+                    }
+                }
+            }
+
+            /*
+             * Operands are not changed when an interval is split during allocation, so search the
+             * right interval here.
+             */
+            interval = allocator.splitChildAtOpId(interval, opId, mode);
+        }
+
+        if (isIllegal(interval.location()) && interval.canMaterialize()) {
+            assert mode != OperandMode.DEF;
+            return new ConstantValue(interval.kind(), interval.getMaterializedValue());
+        }
+        return interval.location();
+    }
+
+    /**
+     * @param op
+     * @param operand
+     * @param valueMode
+     * @param flags
+     * @see InstructionValueProcedure#doValue(LIRInstruction, Value, OperandMode, EnumSet)
+     */
+    private Value debugInfoProcedure(LIRInstruction op, Value operand, OperandMode valueMode, EnumSet<OperandFlag> flags) {
+        if (isVirtualStackSlot(operand)) {
+            return operand;
+        }
+        int tempOpId = op.id();
+        OperandMode mode = OperandMode.USE;
+        AbstractBlockBase<?> block = allocator.blockForId(tempOpId);
+        if (block.getSuccessorCount() == 1 && tempOpId == allocator.getLastLirInstructionId(block)) {
+            /*
+             * Generating debug information for the last instruction of a block. If this instruction
+             * is a branch, spill moves are inserted before this branch and so the wrong operand
+             * would be returned (spill moves at block boundaries are not considered in the live
+             * ranges of intervals).
+             * 
+             * Solution: use the first opId of the branch target block instead.
+             */
+            final LIRInstruction instr = allocator.getLIR().getLIRforBlock(block).get(allocator.getLIR().getLIRforBlock(block).size() - 1);
+            if (instr instanceof StandardOp.JumpOp) {
+                if (allocator.getBlockData(block).liveOut.get(allocator.operandNumber(operand))) {
+                    tempOpId = allocator.getFirstLirInstructionId(block.getSuccessors().iterator().next());
+                    mode = OperandMode.DEF;
+                }
+            }
+        }
+
+        /*
+         * Get current location of operand. The operand must be live because debug information is
+         * considered when building the intervals if the interval is not live, colorLirOperand will
+         * cause an assert on failure.
+         */
+        Value result = colorLirOperand(op, (Variable) operand, mode);
+        assert !allocator.hasCall(tempOpId) || isStackSlotValue(result) || isJavaConstant(result) || !allocator.isCallerSave(result) : "cannot have caller-save register operands at calls";
+        return result;
+    }
+
+    private void computeDebugInfo(final LIRInstruction op, LIRFrameState info) {
+        info.forEachState(op, this::debugInfoProcedure);
+    }
+
+    private void assignLocations(List<LIRInstruction> instructions) {
+        int numInst = instructions.size();
+        boolean hasDead = false;
+
+        for (int j = 0; j < numInst; j++) {
+            final LIRInstruction op = instructions.get(j);
+            if (op == null) {
+                /*
+                 * this can happen when spill-moves are removed in eliminateSpillMoves
+                 */
+                hasDead = true;
+            } else if (assignLocations(op)) {
+                instructions.set(j, null);
+                hasDead = true;
+            }
+        }
+
+        if (hasDead) {
+            // Remove null values from the list.
+            instructions.removeAll(Collections.singleton(null));
+        }
+    }
+
+    /**
+     * Assigns the operand of an {@link LIRInstruction}.
+     *
+     * @param op The {@link LIRInstruction} that should be colored.
+     * @return {@code true} if the instruction should be deleted.
+     */
+    protected boolean assignLocations(LIRInstruction op) {
+        assert op != null;
+
+        InstructionValueProcedure assignProc = (inst, operand, mode, flags) -> isVariable(operand) ? colorLirOperand(inst, (Variable) operand, mode) : operand;
+        // remove useless moves
+        if (op instanceof MoveOp) {
+            AllocatableValue result = ((MoveOp) op).getResult();
+            if (isVariable(result) && allocator.isMaterialized(result, op.id(), OperandMode.DEF)) {
+                /*
+                 * This happens if a materializable interval is originally not spilled but then
+                 * kicked out in LinearScanWalker.splitForSpilling(). When kicking out such an
+                 * interval this move operation was already generated.
+                 */
+                return true;
+            }
+        }
+
+        op.forEachInput(assignProc);
+        op.forEachAlive(assignProc);
+        op.forEachTemp(assignProc);
+        op.forEachOutput(assignProc);
+
+        // compute reference map and debug information
+        op.forEachState((inst, state) -> computeDebugInfo(inst, state));
+
+        // remove useless moves
+        if (op instanceof ValueMoveOp) {
+            ValueMoveOp move = (ValueMoveOp) op;
+            if (move.getInput().equals(move.getResult())) {
+                return true;
+            }
+        }
+        return false;
+    }
+
+    private void assignLocations() {
+        try (Indent indent = Debug.logAndIndent("assign locations")) {
+            for (AbstractBlockBase<?> block : allocator.sortedBlocks()) {
+                try (Indent indent2 = Debug.logAndIndent("assign locations in block B%d", block.getId())) {
+                    assignLocations(allocator.getLIR().getLIRforBlock(block));
+                }
+            }
+        }
+    }
+}
--- /dev/null	Thu Jan 01 00:00:00 1970 +0000
+++ b/graal/com.oracle.graal.lir/src/com/oracle/graal/lir/alloc/trace/LinearScanEliminateSpillMovePhase.java	Mon Aug 31 13:06:17 2015 +0200
@@ -0,0 +1,218 @@
+/*
+ * Copyright (c) 2015, 2015, Oracle and/or its affiliates. All rights reserved.
+ * DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER.
+ *
+ * This code is free software; you can redistribute it and/or modify it
+ * under the terms of the GNU General Public License version 2 only, as
+ * published by the Free Software Foundation.
+ *
+ * This code is distributed in the hope that it will be useful, but WITHOUT
+ * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
+ * FITNESS FOR A PARTICULAR PURPOSE.  See the GNU General Public License
+ * version 2 for more details (a copy is included in the LICENSE file that
+ * accompanied this code).
+ *
+ * You should have received a copy of the GNU General Public License version
+ * 2 along with this work; if not, write to the Free Software Foundation,
+ * Inc., 51 Franklin St, Fifth Floor, Boston, MA 02110-1301 USA.
+ *
+ * Please contact Oracle, 500 Oracle Parkway, Redwood Shores, CA 94065 USA
+ * or visit www.oracle.com if you need additional information or have any
+ * questions.
+ */
+package com.oracle.graal.lir.alloc.trace;
+
+import static com.oracle.graal.compiler.common.GraalOptions.*;
+import static com.oracle.graal.lir.LIRValueUtil.*;
+import static jdk.internal.jvmci.code.ValueUtil.*;
+
+import java.util.*;
+
+import jdk.internal.jvmci.code.*;
+import jdk.internal.jvmci.meta.*;
+
+import com.oracle.graal.compiler.common.alloc.*;
+import com.oracle.graal.compiler.common.cfg.*;
+import com.oracle.graal.debug.*;
+import com.oracle.graal.lir.*;
+import com.oracle.graal.lir.StandardOp.LoadConstantOp;
+import com.oracle.graal.lir.StandardOp.MoveOp;
+import com.oracle.graal.lir.StandardOp.ValueMoveOp;
+import com.oracle.graal.lir.alloc.trace.Interval.SpillState;
+import com.oracle.graal.lir.alloc.trace.LinearScan.IntervalPredicate;
+import com.oracle.graal.lir.gen.*;
+import com.oracle.graal.lir.gen.LIRGeneratorTool.SpillMoveFactory;
+import com.oracle.graal.lir.phases.*;
+
+public class LinearScanEliminateSpillMovePhase extends AllocationPhase {
+
+    private static final IntervalPredicate mustStoreAtDefinition = new LinearScan.IntervalPredicate() {
+
+        @Override
+        public boolean apply(Interval i) {
+            return i.isSplitParent() && i.spillState() == SpillState.StoreAtDefinition;
+        }
+    };
+
+    protected final LinearScan allocator;
+
+    protected LinearScanEliminateSpillMovePhase(LinearScan allocator) {
+        this.allocator = allocator;
+    }
+
+    @Override
+    protected <B extends AbstractBlockBase<B>> void run(TargetDescription target, LIRGenerationResult lirGenRes, List<B> codeEmittingOrder, List<B> linearScanOrder, SpillMoveFactory spillMoveFactory,
+                    RegisterAllocationConfig registerAllocationConfig) {
+        eliminateSpillMoves();
+    }
+
+    /**
+     * @return the index of the first instruction that is of interest for
+     *         {@link #eliminateSpillMoves()}
+     */
+    protected int firstInstructionOfInterest() {
+        // skip the first because it is always a label
+        return 1;
+    }
+
+    // called once before assignment of register numbers
+    void eliminateSpillMoves() {
+        try (Indent indent = Debug.logAndIndent("Eliminating unnecessary spill moves")) {
+
+            /*
+             * collect all intervals that must be stored after their definition. The list is sorted
+             * by Interval.spillDefinitionPos.
+             */
+            Interval interval;
+            interval = allocator.createUnhandledLists(mustStoreAtDefinition, null).first;
+            if (DetailedAsserts.getValue()) {
+                checkIntervals(interval);
+            }
+
+            LIRInsertionBuffer insertionBuffer = new LIRInsertionBuffer();
+            for (AbstractBlockBase<?> block : allocator.sortedBlocks()) {
+                try (Indent indent1 = Debug.logAndIndent("Handle %s", block)) {
+                    List<LIRInstruction> instructions = allocator.getLIR().getLIRforBlock(block);
+                    int numInst = instructions.size();
+
+                    // iterate all instructions of the block.
+                    for (int j = firstInstructionOfInterest(); j < numInst; j++) {
+                        LIRInstruction op = instructions.get(j);
+                        int opId = op.id();
+
+                        if (opId == -1) {
+                            MoveOp move = (MoveOp) op;
+                            /*
+                             * Remove move from register to stack if the stack slot is guaranteed to
+                             * be correct. Only moves that have been inserted by LinearScan can be
+                             * removed.
+                             */
+                            if (canEliminateSpillMove(block, move)) {
+                                /*
+                                 * Move target is a stack slot that is always correct, so eliminate
+                                 * instruction.
+                                 */
+                                if (Debug.isLogEnabled()) {
+                                    if (move instanceof ValueMoveOp) {
+                                        ValueMoveOp vmove = (ValueMoveOp) move;
+                                        Debug.log("eliminating move from interval %d (%s) to %d (%s) in block %s", allocator.operandNumber(vmove.getInput()), vmove.getInput(),
+                                                        allocator.operandNumber(vmove.getResult()), vmove.getResult(), block);
+                                    } else {
+                                        LoadConstantOp load = (LoadConstantOp) move;
+                                        Debug.log("eliminating constant load from %s to %d (%s) in block %s", load.getConstant(), allocator.operandNumber(load.getResult()), load.getResult(), block);
+                                    }
+                                }
+
+                                // null-instructions are deleted by assignRegNum
+                                instructions.set(j, null);
+                            }
+
+                        } else {
+                            /*
+                             * Insert move from register to stack just after the beginning of the
+                             * interval.
+                             */
+                            assert interval == Interval.EndMarker || interval.spillDefinitionPos() >= opId : "invalid order";
+                            assert interval == Interval.EndMarker || (interval.isSplitParent() && interval.spillState() == SpillState.StoreAtDefinition) : "invalid interval";
+
+                            while (interval != Interval.EndMarker && interval.spillDefinitionPos() == opId) {
+                                if (!interval.canMaterialize()) {
+                                    if (!insertionBuffer.initialized()) {
+                                        /*
+                                         * prepare insertion buffer (appended when all instructions
+                                         * in the block are processed)
+                                         */
+                                        insertionBuffer.init(instructions);
+                                    }
+
+                                    AllocatableValue fromLocation = interval.location();
+                                    AllocatableValue toLocation = LinearScan.canonicalSpillOpr(interval);
+                                    if (!fromLocation.equals(toLocation)) {
+
+                                        assert isRegister(fromLocation) : "from operand must be a register but is: " + fromLocation + " toLocation=" + toLocation + " spillState=" +
+                                                        interval.spillState();
+                                        assert isStackSlotValue(toLocation) : "to operand must be a stack slot";
+
+                                        LIRInstruction move = allocator.getSpillMoveFactory().createMove(toLocation, fromLocation);
+                                        insertionBuffer.append(j + 1, move);
+
+                                        if (Debug.isLogEnabled()) {
+                                            Debug.log("inserting move after definition of interval %d to stack slot %s at opId %d", interval.operandNumber, interval.spillSlot(), opId);
+                                        }
+                                    }
+                                }
+                                interval = interval.next;
+                            }
+                        }
+                    } // end of instruction iteration
+
+                    if (insertionBuffer.initialized()) {
+                        insertionBuffer.finish();
+                    }
+                }
+            } // end of block iteration
+
+            assert interval == Interval.EndMarker : "missed an interval";
+        }
+    }
+
+    /**
+     * @param block The block {@code move} is located in.
+     * @param move Spill move.
+     */
+    protected boolean canEliminateSpillMove(AbstractBlockBase<?> block, MoveOp move) {
+        assert isVariable(move.getResult()) : "LinearScan inserts only moves to variables: " + move;
+
+        Interval curInterval = allocator.intervalFor(move.getResult());
+
+        if (!isRegister(curInterval.location()) && curInterval.alwaysInMemory()) {
+            assert isStackSlotValue(curInterval.location()) : "Not a stack slot: " + curInterval.location();
+            return true;
+        }
+        return false;
+    }
+
+    private static void checkIntervals(Interval interval) {
+        Interval prev = null;
+        Interval temp = interval;
+        while (temp != Interval.EndMarker) {
+            assert temp.spillDefinitionPos() > 0 : "invalid spill definition pos";
+            if (prev != null) {
+                assert temp.from() >= prev.from() : "intervals not sorted";
+                assert temp.spillDefinitionPos() >= prev.spillDefinitionPos() : "when intervals are sorted by from :  then they must also be sorted by spillDefinitionPos";
+            }
+
+            assert temp.spillSlot() != null || temp.canMaterialize() : "interval has no spill slot assigned";
+            assert temp.spillDefinitionPos() >= temp.from() : "invalid order";
+            assert temp.spillDefinitionPos() <= temp.from() + 2 : "only intervals defined once at their start-pos can be optimized";
+
+            if (Debug.isLogEnabled()) {
+                Debug.log("interval %d (from %d to %d) must be stored at %d", temp.operandNumber, temp.from(), temp.to(), temp.spillDefinitionPos());
+            }
+
+            prev = temp;
+            temp = temp.next;
+        }
+    }
+
+}
--- /dev/null	Thu Jan 01 00:00:00 1970 +0000
+++ b/graal/com.oracle.graal.lir/src/com/oracle/graal/lir/alloc/trace/LinearScanLifetimeAnalysisPhase.java	Mon Aug 31 13:06:17 2015 +0200
@@ -0,0 +1,838 @@
+/*
+ * Copyright (c) 2015, 2015, Oracle and/or its affiliates. All rights reserved.
+ * DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER.
+ *
+ * This code is free software; you can redistribute it and/or modify it
+ * under the terms of the GNU General Public License version 2 only, as
+ * published by the Free Software Foundation.
+ *
+ * This code is distributed in the hope that it will be useful, but WITHOUT
+ * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
+ * FITNESS FOR A PARTICULAR PURPOSE.  See the GNU General Public License
+ * version 2 for more details (a copy is included in the LICENSE file that
+ * accompanied this code).
+ *
+ * You should have received a copy of the GNU General Public License version
+ * 2 along with this work; if not, write to the Free Software Foundation,
+ * Inc., 51 Franklin St, Fifth Floor, Boston, MA 02110-1301 USA.
+ *
+ * Please contact Oracle, 500 Oracle Parkway, Redwood Shores, CA 94065 USA
+ * or visit www.oracle.com if you need additional information or have any
+ * questions.
+ */
+package com.oracle.graal.lir.alloc.trace;
+
+import static com.oracle.graal.compiler.common.GraalOptions.*;
+import static com.oracle.graal.lir.LIRValueUtil.*;
+import static com.oracle.graal.lir.debug.LIRGenerationDebugContext.*;
+import static jdk.internal.jvmci.code.ValueUtil.*;
+
+import java.util.*;
+
+import jdk.internal.jvmci.code.*;
+import jdk.internal.jvmci.common.*;
+import jdk.internal.jvmci.meta.*;
+
+import com.oracle.graal.compiler.common.alloc.*;
+import com.oracle.graal.compiler.common.cfg.*;
+import com.oracle.graal.compiler.common.util.*;
+import com.oracle.graal.debug.*;
+import com.oracle.graal.debug.Debug.Scope;
+import com.oracle.graal.lir.*;
+import com.oracle.graal.lir.LIRInstruction.OperandFlag;
+import com.oracle.graal.lir.LIRInstruction.OperandMode;
+import com.oracle.graal.lir.StandardOp.LoadConstantOp;
+import com.oracle.graal.lir.StandardOp.StackStoreOp;
+import com.oracle.graal.lir.StandardOp.ValueMoveOp;
+import com.oracle.graal.lir.alloc.trace.Interval.RegisterPriority;
+import com.oracle.graal.lir.alloc.trace.Interval.SpillState;
+import com.oracle.graal.lir.alloc.trace.LinearScan.BlockData;
+import com.oracle.graal.lir.gen.*;
+import com.oracle.graal.lir.gen.LIRGeneratorTool.SpillMoveFactory;
+import com.oracle.graal.lir.phases.*;
+
+public class LinearScanLifetimeAnalysisPhase extends AllocationPhase {
+
+    protected final LinearScan allocator;
+
+    /**
+     * @param linearScan
+     */
+    protected LinearScanLifetimeAnalysisPhase(LinearScan linearScan) {
+        allocator = linearScan;
+    }
+
+    @Override
+    protected <B extends AbstractBlockBase<B>> void run(TargetDescription target, LIRGenerationResult lirGenRes, List<B> codeEmittingOrder, List<B> linearScanOrder, SpillMoveFactory spillMoveFactory,
+                    RegisterAllocationConfig registerAllocationConfig) {
+        numberInstructions();
+        allocator.printLir("Before register allocation", true);
+        computeLocalLiveSets();
+        computeGlobalLiveSets();
+        buildIntervals();
+    }
+
+    /**
+     * Bit set for each variable that is contained in each loop.
+     */
+    private BitMap2D intervalInLoop;
+
+    boolean isIntervalInLoop(int interval, int loop) {
+        return intervalInLoop.at(interval, loop);
+    }
+
+    /**
+     * Numbers all instructions in all blocks. The numbering follows the
+     * {@linkplain ComputeBlockOrder linear scan order}.
+     */
+    protected void numberInstructions() {
+
+        allocator.initIntervals();
+
+        ValueConsumer setVariableConsumer = (value, mode, flags) -> {
+            if (isVariable(value)) {
+                allocator.getOrCreateInterval(asVariable(value));
+            }
+        };
+
+        // Assign IDs to LIR nodes and build a mapping, lirOps, from ID to LIRInstruction node.
+        int numInstructions = 0;
+        for (AbstractBlockBase<?> block : allocator.sortedBlocks()) {
+            numInstructions += allocator.getLIR().getLIRforBlock(block).size();
+        }
+
+        // initialize with correct length
+        allocator.initOpIdMaps(numInstructions);
+
+        int opId = 0;
+        int index = 0;
+        for (AbstractBlockBase<?> block : allocator.sortedBlocks()) {
+            allocator.initBlockData(block);
+
+            List<LIRInstruction> instructions = allocator.getLIR().getLIRforBlock(block);
+
+            int numInst = instructions.size();
+            for (int j = 0; j < numInst; j++) {
+                LIRInstruction op = instructions.get(j);
+                op.setId(opId);
+
+                allocator.putOpIdMaps(index, op, block);
+                assert allocator.instructionForId(opId) == op : "must match";
+
+                op.visitEachTemp(setVariableConsumer);
+                op.visitEachOutput(setVariableConsumer);
+
+                index++;
+                opId += 2; // numbering of lirOps by two
+            }
+        }
+        assert index == numInstructions : "must match";
+        assert (index << 1) == opId : "must match: " + (index << 1);
+    }
+
+    /**
+     * Computes local live sets (i.e. {@link BlockData#liveGen} and {@link BlockData#liveKill})
+     * separately for each block.
+     */
+    void computeLocalLiveSets() {
+        int liveSize = allocator.liveSetSize();
+
+        intervalInLoop = new BitMap2D(allocator.operandSize(), allocator.numLoops());
+
+        // iterate all blocks
+        for (final AbstractBlockBase<?> block : allocator.sortedBlocks()) {
+            try (Indent indent = Debug.logAndIndent("compute local live sets for block %s", block)) {
+
+                final BitSet liveGen = new BitSet(liveSize);
+                final BitSet liveKill = new BitSet(liveSize);
+
+                List<LIRInstruction> instructions = allocator.getLIR().getLIRforBlock(block);
+                int numInst = instructions.size();
+
+                ValueConsumer useConsumer = (operand, mode, flags) -> {
+                    if (isVariable(operand)) {
+                        int operandNum = allocator.operandNumber(operand);
+                        if (!liveKill.get(operandNum)) {
+                            liveGen.set(operandNum);
+                            if (Debug.isLogEnabled()) {
+                                Debug.log("liveGen for operand %d(%s)", operandNum, operand);
+                            }
+                        }
+                        if (block.getLoop() != null) {
+                            intervalInLoop.setBit(operandNum, block.getLoop().getIndex());
+                        }
+                    }
+
+                    if (DetailedAsserts.getValue()) {
+                        verifyInput(block, liveKill, operand);
+                    }
+                };
+                ValueConsumer stateConsumer = (operand, mode, flags) -> {
+                    if (LinearScan.isVariableOrRegister(operand)) {
+                        int operandNum = allocator.operandNumber(operand);
+                        if (!liveKill.get(operandNum)) {
+                            liveGen.set(operandNum);
+                            if (Debug.isLogEnabled()) {
+                                Debug.log("liveGen in state for operand %d(%s)", operandNum, operand);
+                            }
+                        }
+                    }
+                };
+                ValueConsumer defConsumer = (operand, mode, flags) -> {
+                    if (isVariable(operand)) {
+                        int varNum = allocator.operandNumber(operand);
+                        liveKill.set(varNum);
+                        if (Debug.isLogEnabled()) {
+                            Debug.log("liveKill for operand %d(%s)", varNum, operand);
+                        }
+                        if (block.getLoop() != null) {
+                            intervalInLoop.setBit(varNum, block.getLoop().getIndex());
+                        }
+                    }
+
+                    if (DetailedAsserts.getValue()) {
+                        /*
+                         * Fixed intervals are never live at block boundaries, so they need not be
+                         * processed in live sets. Process them only in debug mode so that this can
+                         * be checked
+                         */
+                        verifyTemp(liveKill, operand);
+                    }
+                };
+
+                // iterate all instructions of the block
+                for (int j = 0; j < numInst; j++) {
+                    final LIRInstruction op = instructions.get(j);
+
+                    try (Indent indent2 = Debug.logAndIndent("handle op %d: %s", op.id(), op)) {
+                        op.visitEachInput(useConsumer);
+                        op.visitEachAlive(useConsumer);
+                        /*
+                         * Add uses of live locals from interpreter's point of view for proper debug
+                         * information generation.
+                         */
+                        op.visitEachState(stateConsumer);
+                        op.visitEachTemp(defConsumer);
+                        op.visitEachOutput(defConsumer);
+                    }
+                } // end of instruction iteration
+
+                BlockData blockSets = allocator.getBlockData(block);
+                blockSets.liveGen = liveGen;
+                blockSets.liveKill = liveKill;
+                blockSets.liveIn = new BitSet(liveSize);
+                blockSets.liveOut = new BitSet(liveSize);
+
+                if (Debug.isLogEnabled()) {
+                    Debug.log("liveGen  B%d %s", block.getId(), blockSets.liveGen);
+                    Debug.log("liveKill B%d %s", block.getId(), blockSets.liveKill);
+                }
+
+            }
+        } // end of block iteration
+    }
+
+    private void verifyTemp(BitSet liveKill, Value operand) {
+        /*
+         * Fixed intervals are never live at block boundaries, so they need not be processed in live
+         * sets. Process them only in debug mode so that this can be checked
+         */
+        if (isRegister(operand)) {
+            if (allocator.isProcessed(operand)) {
+                liveKill.set(allocator.operandNumber(operand));
+            }
+        }
+    }
+
+    private void verifyInput(AbstractBlockBase<?> block, BitSet liveKill, Value operand) {
+        /*
+         * Fixed intervals are never live at block boundaries, so they need not be processed in live
+         * sets. This is checked by these assertions to be sure about it. The entry block may have
+         * incoming values in registers, which is ok.
+         */
+        if (isRegister(operand) && block != allocator.getLIR().getControlFlowGraph().getStartBlock()) {
+            if (allocator.isProcessed(operand)) {
+                assert liveKill.get(allocator.operandNumber(operand)) : "using fixed register that is not defined in this block";
+            }
+        }
+    }
+
+    /**
+     * Performs a backward dataflow analysis to compute global live sets (i.e.
+     * {@link BlockData#liveIn} and {@link BlockData#liveOut}) for each block.
+     */
+    protected void computeGlobalLiveSets() {
+        try (Indent indent = Debug.logAndIndent("compute global live sets")) {
+            int numBlocks = allocator.blockCount();
+            boolean changeOccurred;
+            boolean changeOccurredInBlock;
+            int iterationCount = 0;
+            BitSet liveOut = new BitSet(allocator.liveSetSize()); // scratch set for calculations
+
+            /*
+             * Perform a backward dataflow analysis to compute liveOut and liveIn for each block.
+             * The loop is executed until a fixpoint is reached (no changes in an iteration).
+             */
+            do {
+                changeOccurred = false;
+
+                try (Indent indent2 = Debug.logAndIndent("new iteration %d", iterationCount)) {
+
+                    // iterate all blocks in reverse order
+                    for (int i = numBlocks - 1; i >= 0; i--) {
+                        AbstractBlockBase<?> block = allocator.blockAt(i);
+                        BlockData blockSets = allocator.getBlockData(block);
+
+                        changeOccurredInBlock = false;
+
+                        /* liveOut(block) is the union of liveIn(sux), for successors sux of block. */
+                        int n = block.getSuccessorCount();
+                        if (n > 0) {
+                            liveOut.clear();
+                            // block has successors
+                            if (n > 0) {
+                                for (AbstractBlockBase<?> successor : block.getSuccessors()) {
+                                    liveOut.or(allocator.getBlockData(successor).liveIn);
+                                }
+                            }
+
+                            if (!blockSets.liveOut.equals(liveOut)) {
+                                /*
+                                 * A change occurred. Swap the old and new live out sets to avoid
+                                 * copying.
+                                 */
+                                BitSet temp = blockSets.liveOut;
+                                blockSets.liveOut = liveOut;
+                                liveOut = temp;
+
+                                changeOccurred = true;
+                                changeOccurredInBlock = true;
+                            }
+                        }
+
+                        if (iterationCount == 0 || changeOccurredInBlock) {
+                            /*
+                             * liveIn(block) is the union of liveGen(block) with (liveOut(block) &
+                             * !liveKill(block)).
+                             * 
+                             * Note: liveIn has to be computed only in first iteration or if liveOut
+                             * has changed!
+                             */
+                            BitSet liveIn = blockSets.liveIn;
+                            liveIn.clear();
+                            liveIn.or(blockSets.liveOut);
+                            liveIn.andNot(blockSets.liveKill);
+                            liveIn.or(blockSets.liveGen);
+
+                            if (Debug.isLogEnabled()) {
+                                Debug.log("block %d: livein = %s,  liveout = %s", block.getId(), liveIn, blockSets.liveOut);
+                            }
+                        }
+                    }
+                    iterationCount++;
+
+                    if (changeOccurred && iterationCount > 50) {
+                        throw new BailoutException("too many iterations in computeGlobalLiveSets");
+                    }
+                }
+            } while (changeOccurred);
+
+            if (DetailedAsserts.getValue()) {
+                verifyLiveness();
+            }
+
+            // check that the liveIn set of the first block is empty
+            AbstractBlockBase<?> startBlock = allocator.getLIR().getControlFlowGraph().getStartBlock();
+            if (allocator.getBlockData(startBlock).liveIn.cardinality() != 0) {
+                if (DetailedAsserts.getValue()) {
+                    reportFailure(numBlocks);
+                }
+                // bailout if this occurs in product mode.
+                throw new JVMCIError("liveIn set of first block must be empty: " + allocator.getBlockData(startBlock).liveIn);
+            }
+        }
+    }
+
+    protected void reportFailure(int numBlocks) {
+        try (Scope s = Debug.forceLog()) {
+            try (Indent indent = Debug.logAndIndent("report failure")) {
+
+                BitSet startBlockLiveIn = allocator.getBlockData(allocator.getLIR().getControlFlowGraph().getStartBlock()).liveIn;
+                try (Indent indent2 = Debug.logAndIndent("Error: liveIn set of first block must be empty (when this fails, variables are used before they are defined):")) {
+                    for (int operandNum = startBlockLiveIn.nextSetBit(0); operandNum >= 0; operandNum = startBlockLiveIn.nextSetBit(operandNum + 1)) {
+                        Interval interval = allocator.intervalFor(operandNum);
+                        if (interval != null) {
+                            Value operand = interval.operand;
+                            Debug.log("var %d; operand=%s; node=%s", operandNum, operand, getSourceForOperandFromDebugContext(operand));
+                        } else {
+                            Debug.log("var %d; missing operand", operandNum);
+                        }
+                    }
+                }
+
+                // print some additional information to simplify debugging
+                for (int operandNum = startBlockLiveIn.nextSetBit(0); operandNum >= 0; operandNum = startBlockLiveIn.nextSetBit(operandNum + 1)) {
+                    Interval interval = allocator.intervalFor(operandNum);
+                    Value operand = null;
+                    Object valueForOperandFromDebugContext = null;
+                    if (interval != null) {
+                        operand = interval.operand;
+                        valueForOperandFromDebugContext = getSourceForOperandFromDebugContext(operand);
+                    }
+                    try (Indent indent2 = Debug.logAndIndent("---- Detailed information for var %d; operand=%s; node=%s ----", operandNum, operand, valueForOperandFromDebugContext)) {
+
+                        Deque<AbstractBlockBase<?>> definedIn = new ArrayDeque<>();
+                        HashSet<AbstractBlockBase<?>> usedIn = new HashSet<>();
+                        for (AbstractBlockBase<?> block : allocator.sortedBlocks()) {
+                            if (allocator.getBlockData(block).liveGen.get(operandNum)) {
+                                usedIn.add(block);
+                                try (Indent indent3 = Debug.logAndIndent("used in block B%d", block.getId())) {
+                                    for (LIRInstruction ins : allocator.getLIR().getLIRforBlock(block)) {
+                                        try (Indent indent4 = Debug.logAndIndent("%d: %s", ins.id(), ins)) {
+                                            ins.forEachState((liveStateOperand, mode, flags) -> {
+                                                Debug.log("operand=%s", liveStateOperand);
+                                                return liveStateOperand;
+                                            });
+                                        }
+                                    }
+                                }
+                            }
+                            if (allocator.getBlockData(block).liveKill.get(operandNum)) {
+                                definedIn.add(block);
+                                try (Indent indent3 = Debug.logAndIndent("defined in block B%d", block.getId())) {
+                                    for (LIRInstruction ins : allocator.getLIR().getLIRforBlock(block)) {
+                                        Debug.log("%d: %s", ins.id(), ins);
+                                    }
+                                }
+                            }
+                        }
+
+                        int[] hitCount = new int[numBlocks];
+
+                        while (!definedIn.isEmpty()) {
+                            AbstractBlockBase<?> block = definedIn.removeFirst();
+                            usedIn.remove(block);
+                            for (AbstractBlockBase<?> successor : block.getSuccessors()) {
+                                if (successor.isLoopHeader()) {
+                                    if (!block.isLoopEnd()) {
+                                        definedIn.add(successor);
+                                    }
+                                } else {
+                                    if (++hitCount[successor.getId()] == successor.getPredecessorCount()) {
+                                        definedIn.add(successor);
+                                    }
+                                }
+                            }
+                        }
+                        try (Indent indent3 = Debug.logAndIndent("**** offending usages are in: ")) {
+                            for (AbstractBlockBase<?> block : usedIn) {
+                                Debug.log("B%d", block.getId());
+                            }
+                        }
+                    }
+                }
+            }
+        } catch (Throwable e) {
+            throw Debug.handle(e);
+        }
+    }
+
+    protected void verifyLiveness() {
+        /*
+         * Check that fixed intervals are not live at block boundaries (live set must be empty at
+         * fixed intervals).
+         */
+        for (AbstractBlockBase<?> block : allocator.sortedBlocks()) {
+            for (int j = 0; j <= allocator.maxRegisterNumber(); j++) {
+                assert !allocator.getBlockData(block).liveIn.get(j) : "liveIn  set of fixed register must be empty";
+                assert !allocator.getBlockData(block).liveOut.get(j) : "liveOut set of fixed register must be empty";
+                assert !allocator.getBlockData(block).liveGen.get(j) : "liveGen set of fixed register must be empty";
+            }
+        }
+    }
+
+    protected void addUse(AllocatableValue operand, int from, int to, RegisterPriority registerPriority, LIRKind kind) {
+        if (!allocator.isProcessed(operand)) {
+            return;
+        }
+
+        Interval interval = allocator.getOrCreateInterval(operand);
+        if (!kind.equals(LIRKind.Illegal)) {
+            interval.setKind(kind);
+        }
+
+        interval.addRange(from, to);
+
+        // Register use position at even instruction id.
+        interval.addUsePos(to & ~1, registerPriority);
+
+        if (Debug.isLogEnabled()) {
+            Debug.log("add use: %s, from %d to %d (%s)", interval, from, to, registerPriority.name());
+        }
+    }
+
+    protected void addTemp(AllocatableValue operand, int tempPos, RegisterPriority registerPriority, LIRKind kind) {
+        if (!allocator.isProcessed(operand)) {
+            return;
+        }
+
+        Interval interval = allocator.getOrCreateInterval(operand);
+        if (!kind.equals(LIRKind.Illegal)) {
+            interval.setKind(kind);
+        }
+
+        interval.addRange(tempPos, tempPos + 1);
+        interval.addUsePos(tempPos, registerPriority);
+        interval.addMaterializationValue(null);
+
+        if (Debug.isLogEnabled()) {
+            Debug.log("add temp: %s tempPos %d (%s)", interval, tempPos, RegisterPriority.MustHaveRegister.name());
+        }
+    }
+
+    protected void addDef(AllocatableValue operand, LIRInstruction op, RegisterPriority registerPriority, LIRKind kind) {
+        if (!allocator.isProcessed(operand)) {
+            return;
+        }
+        int defPos = op.id();
+
+        Interval interval = allocator.getOrCreateInterval(operand);
+        if (!kind.equals(LIRKind.Illegal)) {
+            interval.setKind(kind);
+        }
+
+        Range r = interval.first();
+        if (r.from <= defPos) {
+            /*
+             * Update the starting point (when a range is first created for a use, its start is the
+             * beginning of the current block until a def is encountered).
+             */
+            r.from = defPos;
+            interval.addUsePos(defPos, registerPriority);
+
+        } else {
+            /*
+             * Dead value - make vacuous interval also add register priority for dead intervals
+             */
+            interval.addRange(defPos, defPos + 1);
+            interval.addUsePos(defPos, registerPriority);
+            if (Debug.isLogEnabled()) {
+                Debug.log("Warning: def of operand %s at %d occurs without use", operand, defPos);
+            }
+        }
+
+        changeSpillDefinitionPos(op, operand, interval, defPos);
+        if (registerPriority == RegisterPriority.None && interval.spillState().ordinal() <= SpillState.StartInMemory.ordinal() && isStackSlot(operand)) {
+            // detection of method-parameters and roundfp-results
+            interval.setSpillState(SpillState.StartInMemory);
+        }
+        interval.addMaterializationValue(getMaterializedValue(op, operand, interval));
+
+        if (Debug.isLogEnabled()) {
+            Debug.log("add def: %s defPos %d (%s)", interval, defPos, registerPriority.name());
+        }
+    }
+
+    /**
+     * Optimizes moves related to incoming stack based arguments. The interval for the destination
+     * of such moves is assigned the stack slot (which is in the caller's frame) as its spill slot.
+     */
+    protected void handleMethodArguments(LIRInstruction op) {
+        if (op instanceof ValueMoveOp) {
+            ValueMoveOp move = (ValueMoveOp) op;
+            if (optimizeMethodArgument(move.getInput())) {
+                StackSlot slot = asStackSlot(move.getInput());
+                if (DetailedAsserts.getValue()) {
+                    assert op.id() > 0 : "invalid id";
+                    assert allocator.blockForId(op.id()).getPredecessorCount() == 0 : "move from stack must be in first block";
+                    assert isVariable(move.getResult()) : "result of move must be a variable";
+
+                    if (Debug.isLogEnabled()) {
+                        Debug.log("found move from stack slot %s to %s", slot, move.getResult());
+                    }
+                }
+
+                Interval interval = allocator.intervalFor(move.getResult());
+                interval.setSpillSlot(slot);
+                interval.assignLocation(slot);
+            }
+        } else if (op instanceof StackStoreOp) {
+            StackStoreOp store = (StackStoreOp) op;
+            StackSlot slot = asStackSlot(store.getStackSlot());
+            Interval interval = allocator.intervalFor(store.getResult());
+            interval.setSpillSlot(slot);
+            interval.setSpillState(SpillState.StartInMemory);
+        }
+    }
+
+    protected void addRegisterHint(final LIRInstruction op, final Value targetValue, OperandMode mode, EnumSet<OperandFlag> flags, final boolean hintAtDef) {
+        if (flags.contains(OperandFlag.HINT) && LinearScan.isVariableOrRegister(targetValue)) {
+
+            op.forEachRegisterHint(targetValue, mode, (registerHint, valueMode, valueFlags) -> {
+                if (LinearScan.isVariableOrRegister(registerHint)) {
+                    Interval from = allocator.getOrCreateInterval((AllocatableValue) registerHint);
+                    Interval to = allocator.getOrCreateInterval((AllocatableValue) targetValue);
+
+                    /* hints always point from def to use */
+                    if (hintAtDef) {
+                        to.setLocationHint(from);
+                    } else {
+                        from.setLocationHint(to);
+                    }
+                    if (Debug.isLogEnabled()) {
+                        Debug.log("operation at opId %d: added hint from interval %d to %d", op.id(), from.operandNumber, to.operandNumber);
+                    }
+
+                    return registerHint;
+                }
+                return null;
+            });
+        }
+    }
+
+    /**
+     * Eliminates moves from register to stack if the stack slot is known to be correct.
+     *
+     * @param op
+     * @param operand
+     */
+    protected void changeSpillDefinitionPos(LIRInstruction op, AllocatableValue operand, Interval interval, int defPos) {
+        assert interval.isSplitParent() : "can only be called for split parents";
+
+        switch (interval.spillState()) {
+            case NoDefinitionFound:
+                assert interval.spillDefinitionPos() == -1 : "must no be set before";
+                interval.setSpillDefinitionPos(defPos);
+                interval.setSpillState(SpillState.NoSpillStore);
+                break;
+
+            case NoSpillStore:
+                assert defPos <= interval.spillDefinitionPos() : "positions are processed in reverse order when intervals are created";
+                if (defPos < interval.spillDefinitionPos() - 2) {
+                    // second definition found, so no spill optimization possible for this interval
+                    interval.setSpillState(SpillState.NoOptimization);
+                } else {
+                    // two consecutive definitions (because of two-operand LIR form)
+                    assert allocator.blockForId(defPos) == allocator.blockForId(interval.spillDefinitionPos()) : "block must be equal";
+                }
+                break;
+
+            case NoOptimization:
+                // nothing to do
+                break;
+
+            default:
+                throw new BailoutException("other states not allowed at this time");
+        }
+    }
+
+    private static boolean optimizeMethodArgument(Value value) {
+        /*
+         * Object method arguments that are passed on the stack are currently not optimized because
+         * this requires that the runtime visits method arguments during stack walking.
+         */
+        return isStackSlot(value) && asStackSlot(value).isInCallerFrame() && value.getKind() != Kind.Object;
+    }
+
+    /**
+     * Determines the register priority for an instruction's output/result operand.
+     */
+    protected RegisterPriority registerPriorityOfOutputOperand(LIRInstruction op) {
+        if (op instanceof ValueMoveOp) {
+            ValueMoveOp move = (ValueMoveOp) op;
+            if (optimizeMethodArgument(move.getInput())) {
+                return RegisterPriority.None;
+            }
+        } else if (op instanceof StackStoreOp) {
+            return RegisterPriority.ShouldHaveRegister;
+        }
+
+        // all other operands require a register
+        return RegisterPriority.MustHaveRegister;
+    }
+
+    /**
+     * Determines the priority which with an instruction's input operand will be allocated a
+     * register.
+     */
+    protected static RegisterPriority registerPriorityOfInputOperand(EnumSet<OperandFlag> flags) {
+        if (flags.contains(OperandFlag.STACK)) {
+            return RegisterPriority.ShouldHaveRegister;
+        }
+        // all other operands require a register
+        return RegisterPriority.MustHaveRegister;
+    }
+
+    protected void buildIntervals() {
+
+        try (Indent indent = Debug.logAndIndent("build intervals")) {
+            InstructionValueConsumer outputConsumer = (op, operand, mode, flags) -> {
+                if (LinearScan.isVariableOrRegister(operand)) {
+                    addDef((AllocatableValue) operand, op, registerPriorityOfOutputOperand(op), operand.getLIRKind());
+                    addRegisterHint(op, operand, mode, flags, true);
+                }
+            };
+
+            InstructionValueConsumer tempConsumer = (op, operand, mode, flags) -> {
+                if (LinearScan.isVariableOrRegister(operand)) {
+                    addTemp((AllocatableValue) operand, op.id(), RegisterPriority.MustHaveRegister, operand.getLIRKind());
+                    addRegisterHint(op, operand, mode, flags, false);
+                }
+            };
+
+            InstructionValueConsumer aliveConsumer = (op, operand, mode, flags) -> {
+                if (LinearScan.isVariableOrRegister(operand)) {
+                    RegisterPriority p = registerPriorityOfInputOperand(flags);
+                    int opId = op.id();
+                    int blockFrom = allocator.getFirstLirInstructionId((allocator.blockForId(opId)));
+                    addUse((AllocatableValue) operand, blockFrom, opId + 1, p, operand.getLIRKind());
+                    addRegisterHint(op, operand, mode, flags, false);
+                }
+            };
+
+            InstructionValueConsumer inputConsumer = (op, operand, mode, flags) -> {
+                if (LinearScan.isVariableOrRegister(operand)) {
+                    int opId = op.id();
+                    int blockFrom = allocator.getFirstLirInstructionId((allocator.blockForId(opId)));
+                    RegisterPriority p = registerPriorityOfInputOperand(flags);
+                    addUse((AllocatableValue) operand, blockFrom, opId, p, operand.getLIRKind());
+                    addRegisterHint(op, operand, mode, flags, false);
+                }
+            };
+
+            InstructionValueConsumer stateProc = (op, operand, mode, flags) -> {
+                if (LinearScan.isVariableOrRegister(operand)) {
+                    int opId = op.id();
+                    int blockFrom = allocator.getFirstLirInstructionId((allocator.blockForId(opId)));
+                    addUse((AllocatableValue) operand, blockFrom, opId + 1, RegisterPriority.None, operand.getLIRKind());
+                }
+            };
+
+            // create a list with all caller-save registers (cpu, fpu, xmm)
+            Register[] callerSaveRegs = allocator.getRegisterAllocationConfig().getRegisterConfig().getCallerSaveRegisters();
+
+            // iterate all blocks in reverse order
+            for (int i = allocator.blockCount() - 1; i >= 0; i--) {
+
+                AbstractBlockBase<?> block = allocator.blockAt(i);
+                try (Indent indent2 = Debug.logAndIndent("handle block %d", block.getId())) {
+
+                    List<LIRInstruction> instructions = allocator.getLIR().getLIRforBlock(block);
+                    final int blockFrom = allocator.getFirstLirInstructionId(block);
+                    int blockTo = allocator.getLastLirInstructionId(block);
+
+                    assert blockFrom == instructions.get(0).id();
+                    assert blockTo == instructions.get(instructions.size() - 1).id();
+
+                    // Update intervals for operands live at the end of this block;
+                    BitSet live = allocator.getBlockData(block).liveOut;
+                    for (int operandNum = live.nextSetBit(0); operandNum >= 0; operandNum = live.nextSetBit(operandNum + 1)) {
+                        assert live.get(operandNum) : "should not stop here otherwise";
+                        AllocatableValue operand = allocator.intervalFor(operandNum).operand;
+                        if (Debug.isLogEnabled()) {
+                            Debug.log("live in %d: %s", operandNum, operand);
+                        }
+
+                        addUse(operand, blockFrom, blockTo + 2, RegisterPriority.None, LIRKind.Illegal);
+
+                        /*
+                         * Add special use positions for loop-end blocks when the interval is used
+                         * anywhere inside this loop. It's possible that the block was part of a
+                         * non-natural loop, so it might have an invalid loop index.
+                         */
+                        if (block.isLoopEnd() && block.getLoop() != null && isIntervalInLoop(operandNum, block.getLoop().getIndex())) {
+                            allocator.intervalFor(operandNum).addUsePos(blockTo + 1, RegisterPriority.LiveAtLoopEnd);
+                        }
+                    }
+
+                    /*
+                     * Iterate all instructions of the block in reverse order. definitions of
+                     * intervals are processed before uses.
+                     */
+                    for (int j = instructions.size() - 1; j >= 0; j--) {
+                        final LIRInstruction op = instructions.get(j);
+                        final int opId = op.id();
+
+                        try (Indent indent3 = Debug.logAndIndent("handle inst %d: %s", opId, op)) {
+
+                            // add a temp range for each register if operation destroys
+                            // caller-save registers
+                            if (op.destroysCallerSavedRegisters()) {
+                                for (Register r : callerSaveRegs) {
+                                    if (allocator.attributes(r).isAllocatable()) {
+                                        addTemp(r.asValue(), opId, RegisterPriority.None, LIRKind.Illegal);
+                                    }
+                                }
+                                if (Debug.isLogEnabled()) {
+                                    Debug.log("operation destroys all caller-save registers");
+                                }
+                            }
+
+                            op.visitEachOutput(outputConsumer);
+                            op.visitEachTemp(tempConsumer);
+                            op.visitEachAlive(aliveConsumer);
+                            op.visitEachInput(inputConsumer);
+
+                            /*
+                             * Add uses of live locals from interpreter's point of view for proper
+                             * debug information generation. Treat these operands as temp values (if
+                             * the live range is extended to a call site, the value would be in a
+                             * register at the call otherwise).
+                             */
+                            op.visitEachState(stateProc);
+
+                            // special steps for some instructions (especially moves)
+                            handleMethodArguments(op);
+
+                        }
+
+                    } // end of instruction iteration
+                }
+            } // end of block iteration
+
+            /*
+             * Add the range [0, 1] to all fixed intervals. the register allocator need not handle
+             * unhandled fixed intervals.
+             */
+            for (Interval interval : allocator.intervals()) {
+                if (interval != null && isRegister(interval.operand)) {
+                    interval.addRange(0, 1);
+                }
+            }
+        }
+    }
+
+    /**
+     * Returns a value for a interval definition, which can be used for re-materialization.
+     *
+     * @param op An instruction which defines a value
+     * @param operand The destination operand of the instruction
+     * @param interval The interval for this defined value.
+     * @return Returns the value which is moved to the instruction and which can be reused at all
+     *         reload-locations in case the interval of this instruction is spilled. Currently this
+     *         can only be a {@link JavaConstant}.
+     */
+    protected static JavaConstant getMaterializedValue(LIRInstruction op, Value operand, Interval interval) {
+        if (op instanceof LoadConstantOp) {
+            LoadConstantOp move = (LoadConstantOp) op;
+            if (move.getConstant() instanceof JavaConstant) {
+                /*
+                 * Check if the interval has any uses which would accept an stack location (priority
+                 * == ShouldHaveRegister). Rematerialization of such intervals can result in a
+                 * degradation, because rematerialization always inserts a constant load, even if
+                 * the value is not needed in a register.
+                 */
+                Interval.UsePosList usePosList = interval.usePosList();
+                int numUsePos = usePosList.size();
+                for (int useIdx = 0; useIdx < numUsePos; useIdx++) {
+                    Interval.RegisterPriority priority = usePosList.registerPriority(useIdx);
+                    if (priority == Interval.RegisterPriority.ShouldHaveRegister) {
+                        return null;
+                    }
+                }
+                return (JavaConstant) move.getConstant();
+            }
+        }
+        return null;
+    }
+}
--- /dev/null	Thu Jan 01 00:00:00 1970 +0000
+++ b/graal/com.oracle.graal.lir/src/com/oracle/graal/lir/alloc/trace/LinearScanOptimizeSpillPositionPhase.java	Mon Aug 31 13:06:17 2015 +0200
@@ -0,0 +1,232 @@
+/*
+ * Copyright (c) 2015, 2015, Oracle and/or its affiliates. All rights reserved.
+ * DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER.
+ *
+ * This code is free software; you can redistribute it and/or modify it
+ * under the terms of the GNU General Public License version 2 only, as
+ * published by the Free Software Foundation.
+ *
+ * This code is distributed in the hope that it will be useful, but WITHOUT
+ * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
+ * FITNESS FOR A PARTICULAR PURPOSE.  See the GNU General Public License
+ * version 2 for more details (a copy is included in the LICENSE file that
+ * accompanied this code).
+ *
+ * You should have received a copy of the GNU General Public License version
+ * 2 along with this work; if not, write to the Free Software Foundation,
+ * Inc., 51 Franklin St, Fifth Floor, Boston, MA 02110-1301 USA.
+ *
+ * Please contact Oracle, 500 Oracle Parkway, Redwood Shores, CA 94065 USA
+ * or visit www.oracle.com if you need additional information or have any
+ * questions.
+ */
+package com.oracle.graal.lir.alloc.trace;
+
+import static com.oracle.graal.compiler.common.cfg.AbstractControlFlowGraph.*;
+import static jdk.internal.jvmci.code.ValueUtil.*;
+
+import java.util.*;
+
+import jdk.internal.jvmci.code.*;
+import com.oracle.graal.debug.*;
+import jdk.internal.jvmci.meta.*;
+
+import com.oracle.graal.compiler.common.alloc.*;
+import com.oracle.graal.compiler.common.cfg.*;
+import com.oracle.graal.lir.*;
+import com.oracle.graal.lir.LIRInstruction.OperandMode;
+import com.oracle.graal.lir.alloc.trace.Interval.SpillState;
+import com.oracle.graal.lir.gen.*;
+import com.oracle.graal.lir.gen.LIRGeneratorTool.SpillMoveFactory;
+import com.oracle.graal.lir.phases.*;
+
+public final class LinearScanOptimizeSpillPositionPhase extends AllocationPhase {
+
+    private static final DebugMetric betterSpillPos = Debug.metric("BetterSpillPosition");
+    private static final DebugMetric betterSpillPosWithLowerProbability = Debug.metric("BetterSpillPositionWithLowerProbability");
+
+    private final LinearScan allocator;
+
+    LinearScanOptimizeSpillPositionPhase(LinearScan allocator) {
+        this.allocator = allocator;
+    }
+
+    @Override
+    protected <B extends AbstractBlockBase<B>> void run(TargetDescription target, LIRGenerationResult lirGenRes, List<B> codeEmittingOrder, List<B> linearScanOrder, SpillMoveFactory spillMoveFactory,
+                    RegisterAllocationConfig registerAllocationConfig) {
+        optimizeSpillPosition();
+        allocator.printIntervals("After optimize spill position");
+    }
+
+    private void optimizeSpillPosition() {
+        try (Indent indent0 = Debug.logAndIndent("OptimizeSpillPositions")) {
+            LIRInsertionBuffer[] insertionBuffers = new LIRInsertionBuffer[allocator.getLIR().linearScanOrder().size()];
+            for (Interval interval : allocator.intervals()) {
+                optimizeInterval(insertionBuffers, interval);
+            }
+            for (LIRInsertionBuffer insertionBuffer : insertionBuffers) {
+                if (insertionBuffer != null) {
+                    assert insertionBuffer.initialized() : "Insertion buffer is nonnull but not initialized!";
+                    insertionBuffer.finish();
+                }
+            }
+        }
+    }
+
+    private void optimizeInterval(LIRInsertionBuffer[] insertionBuffers, Interval interval) {
+        if (interval == null || !interval.isSplitParent() || interval.spillState() != SpillState.SpillInDominator) {
+            return;
+        }
+        AbstractBlockBase<?> defBlock = allocator.blockForId(interval.spillDefinitionPos());
+        AbstractBlockBase<?> spillBlock = null;
+        Interval firstSpillChild = null;
+        try (Indent indent = Debug.logAndIndent("interval %s (%s)", interval, defBlock)) {
+            for (Interval splitChild : interval.getSplitChildren()) {
+                if (isStackSlotValue(splitChild.location())) {
+                    if (firstSpillChild == null || splitChild.from() < firstSpillChild.from()) {
+                        firstSpillChild = splitChild;
+                    } else {
+                        assert firstSpillChild.from() < splitChild.from();
+                    }
+                    // iterate all blocks where the interval has use positions
+                    for (AbstractBlockBase<?> splitBlock : blocksForInterval(splitChild)) {
+                        if (dominates(defBlock, splitBlock)) {
+                            Debug.log("Split interval %s, block %s", splitChild, splitBlock);
+                            if (spillBlock == null) {
+                                spillBlock = splitBlock;
+                            } else {
+                                spillBlock = commonDominator(spillBlock, splitBlock);
+                                assert spillBlock != null;
+                            }
+                        }
+                    }
+                }
+            }
+            if (spillBlock == null) {
+                Debug.log("not spill interval found");
+                // no spill interval
+                interval.setSpillState(SpillState.StoreAtDefinition);
+                return;
+            }
+            Debug.log(3, "Spill block candidate (initial): %s", spillBlock);
+            // move out of loops
+            if (defBlock.getLoopDepth() < spillBlock.getLoopDepth()) {
+                spillBlock = moveSpillOutOfLoop(defBlock, spillBlock);
+            }
+            Debug.log(3, "Spill block candidate (after loop optimizaton): %s", spillBlock);
+
+            /*
+             * The spill block is the begin of the first split child (aka the value is on the
+             * stack).
+             *
+             * The problem is that if spill block has more than one predecessor, the values at the
+             * end of the predecessors might differ. Therefore, we would need a spill move in all
+             * predecessors. To avoid this we spill in the dominator.
+             */
+            assert firstSpillChild != null;
+            if (!defBlock.equals(spillBlock) && spillBlock.equals(allocator.blockForId(firstSpillChild.from()))) {
+                AbstractBlockBase<?> dom = spillBlock.getDominator();
+                if (Debug.isLogEnabled()) {
+                    Debug.log("Spill block (%s) is the beginning of a spill child -> use dominator (%s)", spillBlock, dom);
+                }
+                spillBlock = dom;
+            }
+            if (defBlock.equals(spillBlock)) {
+                Debug.log(3, "Definition is the best choice: %s", defBlock);
+                // definition is the best choice
+                interval.setSpillState(SpillState.StoreAtDefinition);
+                return;
+            }
+            assert dominates(defBlock, spillBlock);
+            betterSpillPos.increment();
+            if (Debug.isLogEnabled()) {
+                Debug.log("Better spill position found (Block %s)", spillBlock);
+            }
+
+            if (defBlock.probability() <= spillBlock.probability()) {
+                Debug.log(3, "Definition has lower probability %s (%f) is lower than spill block %s (%f)", defBlock, defBlock.probability(), spillBlock, spillBlock.probability());
+                // better spill block has the same probability -> do nothing
+                interval.setSpillState(SpillState.StoreAtDefinition);
+                return;
+            }
+
+            LIRInsertionBuffer insertionBuffer = insertionBuffers[spillBlock.getId()];
+            if (insertionBuffer == null) {
+                insertionBuffer = new LIRInsertionBuffer();
+                insertionBuffers[spillBlock.getId()] = insertionBuffer;
+                insertionBuffer.init(allocator.getLIR().getLIRforBlock(spillBlock));
+            }
+            int spillOpId = allocator.getFirstLirInstructionId(spillBlock);
+            // insert spill move
+            AllocatableValue fromLocation = interval.getSplitChildAtOpId(spillOpId, OperandMode.DEF, allocator).location();
+            AllocatableValue toLocation = LinearScan.canonicalSpillOpr(interval);
+            LIRInstruction move = allocator.getSpillMoveFactory().createMove(toLocation, fromLocation);
+            Debug.log(3, "Insert spill move %s", move);
+            move.setId(LinearScan.DOMINATOR_SPILL_MOVE_ID);
+            /*
+             * We can use the insertion buffer directly because we always insert at position 1.
+             */
+            insertionBuffer.append(1, move);
+
+            betterSpillPosWithLowerProbability.increment();
+            interval.setSpillDefinitionPos(spillOpId);
+        }
+    }
+
+    /**
+     * Iterate over all {@link AbstractBlockBase blocks} of an interval.
+     */
+    private class IntervalBlockIterator implements Iterator<AbstractBlockBase<?>> {
+
+        Range range;
+        AbstractBlockBase<?> block;
+
+        public IntervalBlockIterator(Interval interval) {
+            range = interval.first();
+            block = allocator.blockForId(range.from);
+        }
+
+        public AbstractBlockBase<?> next() {
+            AbstractBlockBase<?> currentBlock = block;
+            int nextBlockIndex = block.getLinearScanNumber() + 1;
+            if (nextBlockIndex < allocator.sortedBlocks().size()) {
+                block = allocator.sortedBlocks().get(nextBlockIndex);
+                if (range.to <= allocator.getFirstLirInstructionId(block)) {
+                    range = range.next;
+                    if (range == Range.EndMarker) {
+                        block = null;
+                    } else {
+                        block = allocator.blockForId(range.from);
+                    }
+                }
+            } else {
+                block = null;
+            }
+            return currentBlock;
+        }
+
+        public boolean hasNext() {
+            return block != null;
+        }
+    }
+
+    private Iterable<AbstractBlockBase<?>> blocksForInterval(Interval interval) {
+        return new Iterable<AbstractBlockBase<?>>() {
+            public Iterator<AbstractBlockBase<?>> iterator() {
+                return new IntervalBlockIterator(interval);
+            }
+        };
+    }
+
+    private static AbstractBlockBase<?> moveSpillOutOfLoop(AbstractBlockBase<?> defBlock, AbstractBlockBase<?> spillBlock) {
+        int defLoopDepth = defBlock.getLoopDepth();
+        for (AbstractBlockBase<?> block = spillBlock.getDominator(); !defBlock.equals(block); block = block.getDominator()) {
+            assert block != null : "spill block not dominated by definition block?";
+            if (block.getLoopDepth() <= defLoopDepth) {
+                assert block.getLoopDepth() == defLoopDepth : "Cannot spill an interval outside of the loop where it is defined!";
+                return block;
+            }
+        }
+        return defBlock;
+    }
+}
--- /dev/null	Thu Jan 01 00:00:00 1970 +0000
+++ b/graal/com.oracle.graal.lir/src/com/oracle/graal/lir/alloc/trace/LinearScanRegisterAllocationPhase.java	Mon Aug 31 13:06:17 2015 +0200
@@ -0,0 +1,73 @@
+/*
+ * Copyright (c) 2015, 2015, Oracle and/or its affiliates. All rights reserved.
+ * DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER.
+ *
+ * This code is free software; you can redistribute it and/or modify it
+ * under the terms of the GNU General Public License version 2 only, as
+ * published by the Free Software Foundation.
+ *
+ * This code is distributed in the hope that it will be useful, but WITHOUT
+ * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
+ * FITNESS FOR A PARTICULAR PURPOSE.  See the GNU General Public License
+ * version 2 for more details (a copy is included in the LICENSE file that
+ * accompanied this code).
+ *
+ * You should have received a copy of the GNU General Public License version
+ * 2 along with this work; if not, write to the Free Software Foundation,
+ * Inc., 51 Franklin St, Fifth Floor, Boston, MA 02110-1301 USA.
+ *
+ * Please contact Oracle, 500 Oracle Parkway, Redwood Shores, CA 94065 USA
+ * or visit www.oracle.com if you need additional information or have any
+ * questions.
+ */
+package com.oracle.graal.lir.alloc.trace;
+
+import java.util.*;
+
+import jdk.internal.jvmci.code.*;
+import com.oracle.graal.debug.*;
+
+import com.oracle.graal.compiler.common.alloc.*;
+import com.oracle.graal.compiler.common.cfg.*;
+import com.oracle.graal.lir.gen.*;
+import com.oracle.graal.lir.gen.LIRGeneratorTool.SpillMoveFactory;
+import com.oracle.graal.lir.phases.*;
+
+public final class LinearScanRegisterAllocationPhase extends AllocationPhase {
+
+    private final LinearScan allocator;
+
+    LinearScanRegisterAllocationPhase(LinearScan allocator) {
+        this.allocator = allocator;
+    }
+
+    @Override
+    protected <B extends AbstractBlockBase<B>> void run(TargetDescription target, LIRGenerationResult lirGenRes, List<B> codeEmittingOrder, List<B> linearScanOrder, SpillMoveFactory spillMoveFactory,
+                    RegisterAllocationConfig registerAllocationConfig) {
+        allocator.printIntervals("Before register allocation");
+        allocateRegisters();
+        allocator.printIntervals("After register allocation");
+    }
+
+    void allocateRegisters() {
+        try (Indent indent = Debug.logAndIndent("allocate registers")) {
+            Interval precoloredIntervals;
+            Interval notPrecoloredIntervals;
+
+            Interval.Pair result = allocator.createUnhandledLists(LinearScan.IS_PRECOLORED_INTERVAL, LinearScan.IS_VARIABLE_INTERVAL);
+            precoloredIntervals = result.first;
+            notPrecoloredIntervals = result.second;
+
+            // allocate cpu registers
+            LinearScanWalker lsw;
+            if (OptimizingLinearScanWalker.Options.LSRAOptimization.getValue()) {
+                lsw = new OptimizingLinearScanWalker(allocator, precoloredIntervals, notPrecoloredIntervals);
+            } else {
+                lsw = new LinearScanWalker(allocator, precoloredIntervals, notPrecoloredIntervals);
+            }
+            lsw.walk();
+            lsw.finishAllocation();
+        }
+    }
+
+}
--- /dev/null	Thu Jan 01 00:00:00 1970 +0000
+++ b/graal/com.oracle.graal.lir/src/com/oracle/graal/lir/alloc/trace/LinearScanResolveDataFlowPhase.java	Mon Aug 31 13:06:17 2015 +0200
@@ -0,0 +1,208 @@
+/*
+ * Copyright (c) 2015, 2015, Oracle and/or its affiliates. All rights reserved.
+ * DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER.
+ *
+ * This code is free software; you can redistribute it and/or modify it
+ * under the terms of the GNU General Public License version 2 only, as
+ * published by the Free Software Foundation.
+ *
+ * This code is distributed in the hope that it will be useful, but WITHOUT
+ * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
+ * FITNESS FOR A PARTICULAR PURPOSE.  See the GNU General Public License
+ * version 2 for more details (a copy is included in the LICENSE file that
+ * accompanied this code).
+ *
+ * You should have received a copy of the GNU General Public License version
+ * 2 along with this work; if not, write to the Free Software Foundation,
+ * Inc., 51 Franklin St, Fifth Floor, Boston, MA 02110-1301 USA.
+ *
+ * Please contact Oracle, 500 Oracle Parkway, Redwood Shores, CA 94065 USA
+ * or visit www.oracle.com if you need additional information or have any
+ * questions.
+ */
+package com.oracle.graal.lir.alloc.trace;
+
+import static com.oracle.graal.compiler.common.GraalOptions.*;
+
+import java.util.*;
+
+import jdk.internal.jvmci.code.*;
+import com.oracle.graal.debug.*;
+
+import com.oracle.graal.compiler.common.alloc.*;
+import com.oracle.graal.compiler.common.cfg.*;
+import com.oracle.graal.lir.*;
+import com.oracle.graal.lir.gen.*;
+import com.oracle.graal.lir.gen.LIRGeneratorTool.SpillMoveFactory;
+import com.oracle.graal.lir.phases.*;
+
+/**
+ * Phase 6: resolve data flow
+ *
+ * Insert moves at edges between blocks if intervals have been split.
+ */
+public class LinearScanResolveDataFlowPhase extends AllocationPhase {
+
+    protected final LinearScan allocator;
+
+    protected LinearScanResolveDataFlowPhase(LinearScan allocator) {
+        this.allocator = allocator;
+    }
+
+    @Override
+    protected <B extends AbstractBlockBase<B>> void run(TargetDescription target, LIRGenerationResult lirGenRes, List<B> codeEmittingOrder, List<B> linearScanOrder, SpillMoveFactory spillMoveFactory,
+                    RegisterAllocationConfig registerAllocationConfig) {
+        resolveDataFlow();
+    }
+
+    protected void resolveCollectMappings(AbstractBlockBase<?> fromBlock, AbstractBlockBase<?> toBlock, AbstractBlockBase<?> midBlock, MoveResolver moveResolver) {
+        assert moveResolver.checkEmpty();
+        assert midBlock == null ||
+                        (midBlock.getPredecessorCount() == 1 && midBlock.getSuccessorCount() == 1 && midBlock.getPredecessors().get(0).equals(fromBlock) && midBlock.getSuccessors().get(0).equals(
+                                        toBlock));
+
+        int toBlockFirstInstructionId = allocator.getFirstLirInstructionId(toBlock);
+        int fromBlockLastInstructionId = allocator.getLastLirInstructionId(fromBlock) + 1;
+        int numOperands = allocator.operandSize();
+        BitSet liveAtEdge = allocator.getBlockData(toBlock).liveIn;
+
+        // visit all variables for which the liveAtEdge bit is set
+        for (int operandNum = liveAtEdge.nextSetBit(0); operandNum >= 0; operandNum = liveAtEdge.nextSetBit(operandNum + 1)) {
+            assert operandNum < numOperands : "live information set for not exisiting interval";
+            assert allocator.getBlockData(fromBlock).liveOut.get(operandNum) && allocator.getBlockData(toBlock).liveIn.get(operandNum) : "interval not live at this edge";
+
+            Interval fromInterval = allocator.splitChildAtOpId(allocator.intervalFor(operandNum), fromBlockLastInstructionId, LIRInstruction.OperandMode.DEF);
+            Interval toInterval = allocator.splitChildAtOpId(allocator.intervalFor(operandNum), toBlockFirstInstructionId, LIRInstruction.OperandMode.DEF);
+
+            if (fromInterval != toInterval && !fromInterval.location().equals(toInterval.location())) {
+                // need to insert move instruction
+                moveResolver.addMapping(fromInterval, toInterval);
+            }
+        }
+    }
+
+    void resolveFindInsertPos(AbstractBlockBase<?> fromBlock, AbstractBlockBase<?> toBlock, MoveResolver moveResolver) {
+        if (fromBlock.getSuccessorCount() <= 1) {
+            if (Debug.isLogEnabled()) {
+                Debug.log("inserting moves at end of fromBlock B%d", fromBlock.getId());
+            }
+
+            List<LIRInstruction> instructions = allocator.getLIR().getLIRforBlock(fromBlock);
+            LIRInstruction instr = instructions.get(instructions.size() - 1);
+            if (instr instanceof StandardOp.JumpOp) {
+                // insert moves before branch
+                moveResolver.setInsertPosition(instructions, instructions.size() - 1);
+            } else {
+                moveResolver.setInsertPosition(instructions, instructions.size());
+            }
+
+        } else {
+            if (Debug.isLogEnabled()) {
+                Debug.log("inserting moves at beginning of toBlock B%d", toBlock.getId());
+            }
+
+            if (DetailedAsserts.getValue()) {
+                assert allocator.getLIR().getLIRforBlock(fromBlock).get(0) instanceof StandardOp.LabelOp : "block does not start with a label";
+
+                /*
+                 * Because the number of predecessor edges matches the number of successor edges,
+                 * blocks which are reached by switch statements may have be more than one
+                 * predecessor but it will be guaranteed that all predecessors will be the same.
+                 */
+                for (AbstractBlockBase<?> predecessor : toBlock.getPredecessors()) {
+                    assert fromBlock == predecessor : "all critical edges must be broken";
+                }
+            }
+
+            moveResolver.setInsertPosition(allocator.getLIR().getLIRforBlock(toBlock), 1);
+        }
+    }
+
+    /**
+     * Inserts necessary moves (spilling or reloading) at edges between blocks for intervals that
+     * have been split.
+     */
+    protected void resolveDataFlow() {
+        try (Indent indent = Debug.logAndIndent("resolve data flow")) {
+
+            MoveResolver moveResolver = allocator.createMoveResolver();
+            BitSet blockCompleted = new BitSet(allocator.blockCount());
+
+            optimizeEmptyBlocks(moveResolver, blockCompleted);
+
+            resolveDataFlow0(moveResolver, blockCompleted);
+
+        }
+    }
+
+    protected void optimizeEmptyBlocks(MoveResolver moveResolver, BitSet blockCompleted) {
+        for (AbstractBlockBase<?> block : allocator.sortedBlocks()) {
+
+            // check if block has only one predecessor and only one successor
+            if (block.getPredecessorCount() == 1 && block.getSuccessorCount() == 1) {
+                List<LIRInstruction> instructions = allocator.getLIR().getLIRforBlock(block);
+                assert instructions.get(0) instanceof StandardOp.LabelOp : "block must start with label";
+                assert instructions.get(instructions.size() - 1) instanceof StandardOp.JumpOp : "block with successor must end with unconditional jump";
+
+                // check if block is empty (only label and branch)
+                if (instructions.size() == 2) {
+                    AbstractBlockBase<?> pred = block.getPredecessors().iterator().next();
+                    AbstractBlockBase<?> sux = block.getSuccessors().iterator().next();
+
+                    // prevent optimization of two consecutive blocks
+                    if (!blockCompleted.get(pred.getLinearScanNumber()) && !blockCompleted.get(sux.getLinearScanNumber())) {
+                        if (Debug.isLogEnabled()) {
+                            Debug.log(" optimizing empty block B%d (pred: B%d, sux: B%d)", block.getId(), pred.getId(), sux.getId());
+                        }
+
+                        blockCompleted.set(block.getLinearScanNumber());
+
+                        /*
+                         * Directly resolve between pred and sux (without looking at the empty block
+                         * between).
+                         */
+                        resolveCollectMappings(pred, sux, block, moveResolver);
+                        if (moveResolver.hasMappings()) {
+                            moveResolver.setInsertPosition(instructions, 1);
+                            moveResolver.resolveAndAppendMoves();
+                        }
+                    }
+                }
+            }
+        }
+    }
+
+    protected void resolveDataFlow0(MoveResolver moveResolver, BitSet blockCompleted) {
+        BitSet alreadyResolved = new BitSet(allocator.blockCount());
+        for (AbstractBlockBase<?> fromBlock : allocator.sortedBlocks()) {
+            if (!blockCompleted.get(fromBlock.getLinearScanNumber())) {
+                alreadyResolved.clear();
+                alreadyResolved.or(blockCompleted);
+
+                for (AbstractBlockBase<?> toBlock : fromBlock.getSuccessors()) {
+
+                    /*
+                     * Check for duplicate edges between the same blocks (can happen with switch
+                     * blocks).
+                     */
+                    if (!alreadyResolved.get(toBlock.getLinearScanNumber())) {
+                        if (Debug.isLogEnabled()) {
+                            Debug.log("processing edge between B%d and B%d", fromBlock.getId(), toBlock.getId());
+                        }
+
+                        alreadyResolved.set(toBlock.getLinearScanNumber());
+
+                        // collect all intervals that have been split between
+                        // fromBlock and toBlock
+                        resolveCollectMappings(fromBlock, toBlock, null, moveResolver);
+                        if (moveResolver.hasMappings()) {
+                            resolveFindInsertPos(fromBlock, toBlock, moveResolver);
+                            moveResolver.resolveAndAppendMoves();
+                        }
+                    }
+                }
+            }
+        }
+    }
+
+}
--- /dev/null	Thu Jan 01 00:00:00 1970 +0000
+++ b/graal/com.oracle.graal.lir/src/com/oracle/graal/lir/alloc/trace/LinearScanWalker.java	Mon Aug 31 13:06:17 2015 +0200
@@ -0,0 +1,1061 @@
+/*
+ * Copyright (c) 2009, 2015, Oracle and/or its affiliates. All rights reserved.
+ * DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER.
+ *
+ * This code is free software; you can redistribute it and/or modify it
+ * under the terms of the GNU General Public License version 2 only, as
+ * published by the Free Software Foundation.
+ *
+ * This code is distributed in the hope that it will be useful, but WITHOUT
+ * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
+ * FITNESS FOR A PARTICULAR PURPOSE.  See the GNU General Public License
+ * version 2 for more details (a copy is included in the LICENSE file that
+ * accompanied this code).
+ *
+ * You should have received a copy of the GNU General Public License version
+ * 2 along with this work; if not, write to the Free Software Foundation,
+ * Inc., 51 Franklin St, Fifth Floor, Boston, MA 02110-1301 USA.
+ *
+ * Please contact Oracle, 500 Oracle Parkway, Redwood Shores, CA 94065 USA
+ * or visit www.oracle.com if you need additional information or have any
+ * questions.
+ */
+package com.oracle.graal.lir.alloc.trace;
+
+import static com.oracle.graal.lir.LIRValueUtil.*;
+import static jdk.internal.jvmci.code.CodeUtil.*;
+import static jdk.internal.jvmci.code.ValueUtil.*;
+
+import java.util.*;
+
+import jdk.internal.jvmci.code.*;
+import jdk.internal.jvmci.meta.*;
+
+import com.oracle.graal.compiler.common.alloc.RegisterAllocationConfig.AllocatableRegisters;
+import com.oracle.graal.compiler.common.cfg.*;
+import com.oracle.graal.compiler.common.util.*;
+import com.oracle.graal.debug.*;
+import com.oracle.graal.lir.*;
+import com.oracle.graal.lir.StandardOp.ValueMoveOp;
+import com.oracle.graal.lir.alloc.trace.Interval.RegisterBinding;
+import com.oracle.graal.lir.alloc.trace.Interval.RegisterPriority;
+import com.oracle.graal.lir.alloc.trace.Interval.SpillState;
+import com.oracle.graal.lir.alloc.trace.Interval.State;
+
+/**
+ */
+class LinearScanWalker extends IntervalWalker {
+
+    protected Register[] availableRegs;
+
+    protected final int[] usePos;
+    protected final int[] blockPos;
+
+    protected List<Interval>[] spillIntervals;
+
+    private MoveResolver moveResolver; // for ordering spill moves
+
+    private int minReg;
+
+    private int maxReg;
+
+    /**
+     * Only 10% of the lists in {@link #spillIntervals} are actually used. But when they are used,
+     * they can grow quite long. The maximum length observed was 45 (all numbers taken from a
+     * bootstrap run of Graal). Therefore, we initialize {@link #spillIntervals} with this marker
+     * value, and allocate a "real" list only on demand in {@link #setUsePos}.
+     */
+    private static final List<Interval> EMPTY_LIST = new ArrayList<>(0);
+
+    // accessors mapped to same functions in class LinearScan
+    int blockCount() {
+        return allocator.blockCount();
+    }
+
+    AbstractBlockBase<?> blockAt(int idx) {
+        return allocator.blockAt(idx);
+    }
+
+    AbstractBlockBase<?> blockOfOpWithId(int opId) {
+        return allocator.blockForId(opId);
+    }
+
+    LinearScanWalker(LinearScan allocator, Interval unhandledFixedFirst, Interval unhandledAnyFirst) {
+        super(allocator, unhandledFixedFirst, unhandledAnyFirst);
+
+        moveResolver = allocator.createMoveResolver();
+        spillIntervals = Util.uncheckedCast(new List[allocator.getRegisters().length]);
+        for (int i = 0; i < allocator.getRegisters().length; i++) {
+            spillIntervals[i] = EMPTY_LIST;
+        }
+        usePos = new int[allocator.getRegisters().length];
+        blockPos = new int[allocator.getRegisters().length];
+    }
+
+    void initUseLists(boolean onlyProcessUsePos) {
+        for (Register register : availableRegs) {
+            int i = register.number;
+            usePos[i] = Integer.MAX_VALUE;
+
+            if (!onlyProcessUsePos) {
+                blockPos[i] = Integer.MAX_VALUE;
+                spillIntervals[i].clear();
+            }
+        }
+    }
+
+    int maxRegisterNumber() {
+        return maxReg;
+    }
+
+    int minRegisterNumber() {
+        return minReg;
+    }
+
+    boolean isRegisterInRange(int reg) {
+        return reg >= minRegisterNumber() && reg <= maxRegisterNumber();
+    }
+
+    void excludeFromUse(Interval i) {
+        Value location = i.location();
+        int i1 = asRegister(location).number;
+        if (isRegisterInRange(i1)) {
+            usePos[i1] = 0;
+        }
+    }
+
+    void setUsePos(Interval interval, int usePos, boolean onlyProcessUsePos) {
+        if (usePos != -1) {
+            assert usePos != 0 : "must use excludeFromUse to set usePos to 0";
+            int i = asRegister(interval.location()).number;
+            if (isRegisterInRange(i)) {
+                if (this.usePos[i] > usePos) {
+                    this.usePos[i] = usePos;
+                }
+                if (!onlyProcessUsePos) {
+                    List<Interval> list = spillIntervals[i];
+                    if (list == EMPTY_LIST) {
+                        list = new ArrayList<>(2);
+                        spillIntervals[i] = list;
+                    }
+                    list.add(interval);
+                }
+            }
+        }
+    }
+
+    void setBlockPos(Interval i, int blockPos) {
+        if (blockPos != -1) {
+            int reg = asRegister(i.location()).number;
+            if (isRegisterInRange(reg)) {
+                if (this.blockPos[reg] > blockPos) {
+                    this.blockPos[reg] = blockPos;
+                }
+                if (usePos[reg] > blockPos) {
+                    usePos[reg] = blockPos;
+                }
+            }
+        }
+    }
+
+    void freeExcludeActiveFixed() {
+        Interval interval = activeLists.get(RegisterBinding.Fixed);
+        while (interval != Interval.EndMarker) {
+            assert isRegister(interval.location()) : "active interval must have a register assigned";
+            excludeFromUse(interval);
+            interval = interval.next;
+        }
+    }
+
+    void freeExcludeActiveAny() {
+        Interval interval = activeLists.get(RegisterBinding.Any);
+        while (interval != Interval.EndMarker) {
+            assert isRegister(interval.location()) : "active interval must have a register assigned";
+            excludeFromUse(interval);
+            interval = interval.next;
+        }
+    }
+
+    void freeCollectInactiveFixed(Interval current) {
+        Interval interval = inactiveLists.get(RegisterBinding.Fixed);
+        while (interval != Interval.EndMarker) {
+            if (current.to() <= interval.currentFrom()) {
+                assert interval.currentIntersectsAt(current) == -1 : "must not intersect";
+                setUsePos(interval, interval.currentFrom(), true);
+            } else {
+                setUsePos(interval, interval.currentIntersectsAt(current), true);
+            }
+            interval = interval.next;
+        }
+    }
+
+    void freeCollectInactiveAny(Interval current) {
+        Interval interval = inactiveLists.get(RegisterBinding.Any);
+        while (interval != Interval.EndMarker) {
+            setUsePos(interval, interval.currentIntersectsAt(current), true);
+            interval = interval.next;
+        }
+    }
+
+    void freeCollectUnhandled(RegisterBinding kind, Interval current) {
+        Interval interval = unhandledLists.get(kind);
+        while (interval != Interval.EndMarker) {
+            setUsePos(interval, interval.intersectsAt(current), true);
+            if (kind == RegisterBinding.Fixed && current.to() <= interval.from()) {
+                setUsePos(interval, interval.from(), true);
+            }
+            interval = interval.next;
+        }
+    }
+
+    void spillExcludeActiveFixed() {
+        Interval interval = activeLists.get(RegisterBinding.Fixed);
+        while (interval != Interval.EndMarker) {
+            excludeFromUse(interval);
+            interval = interval.next;
+        }
+    }
+
+    void spillBlockUnhandledFixed(Interval current) {
+        Interval interval = unhandledLists.get(RegisterBinding.Fixed);
+        while (interval != Interval.EndMarker) {
+            setBlockPos(interval, interval.intersectsAt(current));
+            interval = interval.next;
+        }
+    }
+
+    void spillBlockInactiveFixed(Interval current) {
+        Interval interval = inactiveLists.get(RegisterBinding.Fixed);
+        while (interval != Interval.EndMarker) {
+            if (current.to() > interval.currentFrom()) {
+                setBlockPos(interval, interval.currentIntersectsAt(current));
+            } else {
+                assert interval.currentIntersectsAt(current) == -1 : "invalid optimization: intervals intersect";
+            }
+
+            interval = interval.next;
+        }
+    }
+
+    void spillCollectActiveAny(RegisterPriority registerPriority) {
+        Interval interval = activeLists.get(RegisterBinding.Any);
+        while (interval != Interval.EndMarker) {
+            setUsePos(interval, Math.min(interval.nextUsage(registerPriority, currentPosition), interval.to()), false);
+            interval = interval.next;
+        }
+    }
+
+    void spillCollectInactiveAny(Interval current) {
+        Interval interval = inactiveLists.get(RegisterBinding.Any);
+        while (interval != Interval.EndMarker) {
+            if (interval.currentIntersects(current)) {
+                setUsePos(interval, Math.min(interval.nextUsage(RegisterPriority.LiveAtLoopEnd, currentPosition), interval.to()), false);
+            }
+            interval = interval.next;
+        }
+    }
+
+    void insertMove(int operandId, Interval srcIt, Interval dstIt) {
+        // output all moves here. When source and target are equal, the move is
+        // optimized away later in assignRegNums
+
+        int opId = (operandId + 1) & ~1;
+        AbstractBlockBase<?> opBlock = allocator.blockForId(opId);
+        assert opId > 0 && allocator.blockForId(opId - 2) == opBlock : "cannot insert move at block boundary";
+
+        // calculate index of instruction inside instruction list of current block
+        // the minimal index (for a block with no spill moves) can be calculated because the
+        // numbering of instructions is known.
+        // When the block already contains spill moves, the index must be increased until the
+        // correct index is reached.
+        List<LIRInstruction> instructions = allocator.getLIR().getLIRforBlock(opBlock);
+        int index = (opId - instructions.get(0).id()) >> 1;
+        assert instructions.get(index).id() <= opId : "error in calculation";
+
+        while (instructions.get(index).id() != opId) {
+            index++;
+            assert 0 <= index && index < instructions.size() : "index out of bounds";
+        }
+        assert 1 <= index && index < instructions.size() : "index out of bounds";
+        assert instructions.get(index).id() == opId : "error in calculation";
+
+        // insert new instruction before instruction at position index
+        moveResolver.moveInsertPosition(instructions, index);
+        moveResolver.addMapping(srcIt, dstIt);
+    }
+
+    int findOptimalSplitPos(AbstractBlockBase<?> minBlock, AbstractBlockBase<?> maxBlock, int maxSplitPos) {
+        int fromBlockNr = minBlock.getLinearScanNumber();
+        int toBlockNr = maxBlock.getLinearScanNumber();
+
+        assert 0 <= fromBlockNr && fromBlockNr < blockCount() : "out of range";
+        assert 0 <= toBlockNr && toBlockNr < blockCount() : "out of range";
+        assert fromBlockNr < toBlockNr : "must cross block boundary";
+
+        // Try to split at end of maxBlock. If this would be after
+        // maxSplitPos, then use the begin of maxBlock
+        int optimalSplitPos = allocator.getLastLirInstructionId(maxBlock) + 2;
+        if (optimalSplitPos > maxSplitPos) {
+            optimalSplitPos = allocator.getFirstLirInstructionId(maxBlock);
+        }
+
+        int minLoopDepth = maxBlock.getLoopDepth();
+        for (int i = toBlockNr - 1; i >= fromBlockNr; i--) {
+            AbstractBlockBase<?> cur = blockAt(i);
+
+            if (cur.getLoopDepth() < minLoopDepth) {
+                // block with lower loop-depth found . split at the end of this block
+                minLoopDepth = cur.getLoopDepth();
+                optimalSplitPos = allocator.getLastLirInstructionId(cur) + 2;
+            }
+        }
+        assert optimalSplitPos > allocator.maxOpId() || allocator.isBlockBegin(optimalSplitPos) : "algorithm must move split pos to block boundary";
+
+        return optimalSplitPos;
+    }
+
+    int findOptimalSplitPos(Interval interval, int minSplitPos, int maxSplitPos, boolean doLoopOptimization) {
+        int optimalSplitPos = -1;
+        if (minSplitPos == maxSplitPos) {
+            // trivial case, no optimization of split position possible
+            if (Debug.isLogEnabled()) {
+                Debug.log("min-pos and max-pos are equal, no optimization possible");
+            }
+            optimalSplitPos = minSplitPos;
+
+        } else {
+            assert minSplitPos < maxSplitPos : "must be true then";
+            assert minSplitPos > 0 : "cannot access minSplitPos - 1 otherwise";
+
+            // reason for using minSplitPos - 1: when the minimal split pos is exactly at the
+            // beginning of a block, then minSplitPos is also a possible split position.
+            // Use the block before as minBlock, because then minBlock.lastLirInstructionId() + 2 ==
+            // minSplitPos
+            AbstractBlockBase<?> minBlock = allocator.blockForId(minSplitPos - 1);
+
+            // reason for using maxSplitPos - 1: otherwise there would be an assert on failure
+            // when an interval ends at the end of the last block of the method
+            // (in this case, maxSplitPos == allocator().maxLirOpId() + 2, and there is no
+            // block at this opId)
+            AbstractBlockBase<?> maxBlock = allocator.blockForId(maxSplitPos - 1);
+
+            assert minBlock.getLinearScanNumber() <= maxBlock.getLinearScanNumber() : "invalid order";
+            if (minBlock == maxBlock) {
+                // split position cannot be moved to block boundary : so split as late as possible
+                if (Debug.isLogEnabled()) {
+                    Debug.log("cannot move split pos to block boundary because minPos and maxPos are in same block");
+                }
+                optimalSplitPos = maxSplitPos;
+
+            } else {
+                if (interval.hasHoleBetween(maxSplitPos - 1, maxSplitPos) && !allocator.isBlockBegin(maxSplitPos)) {
+                    // Do not move split position if the interval has a hole before maxSplitPos.
+                    // Intervals resulting from Phi-Functions have more than one definition (marked
+                    // as mustHaveRegister) with a hole before each definition. When the register is
+                    // needed
+                    // for the second definition : an earlier reloading is unnecessary.
+                    if (Debug.isLogEnabled()) {
+                        Debug.log("interval has hole just before maxSplitPos, so splitting at maxSplitPos");
+                    }
+                    optimalSplitPos = maxSplitPos;
+
+                } else {
+                    // seach optimal block boundary between minSplitPos and maxSplitPos
+                    if (Debug.isLogEnabled()) {
+                        Debug.log("moving split pos to optimal block boundary between block B%d and B%d", minBlock.getId(), maxBlock.getId());
+                    }
+
+                    if (doLoopOptimization) {
+                        // Loop optimization: if a loop-end marker is found between min- and
+                        // max-position :
+                        // then split before this loop
+                        int loopEndPos = interval.nextUsageExact(RegisterPriority.LiveAtLoopEnd, allocator.getLastLirInstructionId(minBlock) + 2);
+                        if (Debug.isLogEnabled()) {
+                            Debug.log("loop optimization: loop end found at pos %d", loopEndPos);
+                        }
+
+                        assert loopEndPos > minSplitPos : "invalid order";
+                        if (loopEndPos < maxSplitPos) {
+                            // loop-end marker found between min- and max-position
+                            // if it is not the end marker for the same loop as the min-position :
+                            // then move
+                            // the max-position to this loop block.
+                            // Desired result: uses tagged as shouldHaveRegister inside a loop cause
+                            // a reloading
+                            // of the interval (normally, only mustHaveRegister causes a reloading)
+                            AbstractBlockBase<?> loopBlock = allocator.blockForId(loopEndPos);
+
+                            if (Debug.isLogEnabled()) {
+                                Debug.log("interval is used in loop that ends in block B%d, so trying to move maxBlock back from B%d to B%d", loopBlock.getId(), maxBlock.getId(), loopBlock.getId());
+                            }
+                            assert loopBlock != minBlock : "loopBlock and minBlock must be different because block boundary is needed between";
+
+                            int maxSpillPos = allocator.getLastLirInstructionId(loopBlock) + 2;
+                            optimalSplitPos = findOptimalSplitPos(minBlock, loopBlock, maxSpillPos);
+                            if (optimalSplitPos == maxSpillPos) {
+                                optimalSplitPos = -1;
+                                if (Debug.isLogEnabled()) {
+                                    Debug.log("loop optimization not necessary");
+                                }
+                            } else {
+                                if (Debug.isLogEnabled()) {
+                                    Debug.log("loop optimization successful");
+                                }
+                            }
+                        }
+                    }
+
+                    if (optimalSplitPos == -1) {
+                        // not calculated by loop optimization
+                        optimalSplitPos = findOptimalSplitPos(minBlock, maxBlock, maxSplitPos);
+                    }
+                }
+            }
+        }
+        if (Debug.isLogEnabled()) {
+            Debug.log("optimal split position: %d", optimalSplitPos);
+        }
+
+        return optimalSplitPos;
+    }
+
+    // split an interval at the optimal position between minSplitPos and
+    // maxSplitPos in two parts:
+    // 1) the left part has already a location assigned
+    // 2) the right part is sorted into to the unhandled-list
+    void splitBeforeUsage(Interval interval, int minSplitPos, int maxSplitPos) {
+
+        try (Indent indent = Debug.logAndIndent("splitting interval %s between %d and %d", interval, minSplitPos, maxSplitPos)) {
+
+            assert interval.from() < minSplitPos : "cannot split at start of interval";
+            assert currentPosition < minSplitPos : "cannot split before current position";
+            assert minSplitPos <= maxSplitPos : "invalid order";
+            assert maxSplitPos <= interval.to() : "cannot split after end of interval";
+
+            int optimalSplitPos = findOptimalSplitPos(interval, minSplitPos, maxSplitPos, true);
+
+            assert minSplitPos <= optimalSplitPos && optimalSplitPos <= maxSplitPos : "out of range";
+            assert optimalSplitPos <= interval.to() : "cannot split after end of interval";
+            assert optimalSplitPos > interval.from() : "cannot split at start of interval";
+
+            if (optimalSplitPos == interval.to() && interval.nextUsage(RegisterPriority.MustHaveRegister, minSplitPos) == Integer.MAX_VALUE) {
+                // the split position would be just before the end of the interval
+                // . no split at all necessary
+                if (Debug.isLogEnabled()) {
+                    Debug.log("no split necessary because optimal split position is at end of interval");
+                }
+                return;
+            }
+
+            // must calculate this before the actual split is performed and before split position is
+            // moved to odd opId
+            boolean moveNecessary = !allocator.isBlockBegin(optimalSplitPos) && !interval.hasHoleBetween(optimalSplitPos - 1, optimalSplitPos);
+
+            if (!allocator.isBlockBegin(optimalSplitPos)) {
+                // move position before actual instruction (odd opId)
+                optimalSplitPos = (optimalSplitPos - 1) | 1;
+            }
+
+            if (Debug.isLogEnabled()) {
+                Debug.log("splitting at position %d", optimalSplitPos);
+            }
+
+            assert allocator.isBlockBegin(optimalSplitPos) || ((optimalSplitPos & 1) == 1) : "split pos must be odd when not on block boundary";
+            assert !allocator.isBlockBegin(optimalSplitPos) || ((optimalSplitPos & 1) == 0) : "split pos must be even on block boundary";
+
+            Interval splitPart = interval.split(optimalSplitPos, allocator);
+
+            splitPart.setInsertMoveWhenActivated(moveNecessary);
+
+            assert splitPart.from() >= currentPosition : "cannot append new interval before current walk position";
+            unhandledLists.addToListSortedByStartAndUsePositions(RegisterBinding.Any, splitPart);
+
+            if (Debug.isLogEnabled()) {
+                Debug.log("left interval  %s: %s", moveNecessary ? "      " : "", interval.logString(allocator));
+                Debug.log("right interval %s: %s", moveNecessary ? "(move)" : "", splitPart.logString(allocator));
+            }
+        }
+    }
+
+    // split an interval at the optimal position between minSplitPos and
+    // maxSplitPos in two parts:
+    // 1) the left part has already a location assigned
+    // 2) the right part is always on the stack and therefore ignored in further processing
+
+    void splitForSpilling(Interval interval) {
+        // calculate allowed range of splitting position
+        int maxSplitPos = currentPosition;
+        int previousUsage = interval.previousUsage(RegisterPriority.ShouldHaveRegister, maxSplitPos);
+        if (previousUsage == currentPosition) {
+            /*
+             * If there is a usage with ShouldHaveRegister priority at the current position fall
+             * back to MustHaveRegister priority. This only happens if register priority was
+             * downgraded to MustHaveRegister in #allocLockedRegister.
+             */
+            previousUsage = interval.previousUsage(RegisterPriority.MustHaveRegister, maxSplitPos);
+        }
+        int minSplitPos = Math.max(previousUsage + 1, interval.from());
+
+        try (Indent indent = Debug.logAndIndent("splitting and spilling interval %s between %d and %d", interval, minSplitPos, maxSplitPos)) {
+
+            assert interval.state == State.Active : "why spill interval that is not active?";
+            assert interval.from() <= minSplitPos : "cannot split before start of interval";
+            assert minSplitPos <= maxSplitPos : "invalid order";
+            assert maxSplitPos < interval.to() : "cannot split at end end of interval";
+            assert currentPosition < interval.to() : "interval must not end before current position";
+
+            if (minSplitPos == interval.from()) {
+                // the whole interval is never used, so spill it entirely to memory
+
+                try (Indent indent2 = Debug.logAndIndent("spilling entire interval because split pos is at beginning of interval (use positions: %d)", interval.usePosList().size())) {
+
+                    assert interval.firstUsage(RegisterPriority.MustHaveRegister) > currentPosition : String.format("interval %s must not have use position before currentPosition %d", interval,
+                                    currentPosition);
+
+                    allocator.assignSpillSlot(interval);
+                    handleSpillSlot(interval);
+                    changeSpillState(interval, minSplitPos);
+
+                    // Also kick parent intervals out of register to memory when they have no use
+                    // position. This avoids short interval in register surrounded by intervals in
+                    // memory . avoid useless moves from memory to register and back
+                    Interval parent = interval;
+                    while (parent != null && parent.isSplitChild()) {
+                        parent = parent.getSplitChildBeforeOpId(parent.from());
+
+                        if (isRegister(parent.location())) {
+                            if (parent.firstUsage(RegisterPriority.ShouldHaveRegister) == Integer.MAX_VALUE) {
+                                // parent is never used, so kick it out of its assigned register
+                                if (Debug.isLogEnabled()) {
+                                    Debug.log("kicking out interval %d out of its register because it is never used", parent.operandNumber);
+                                }
+                                allocator.assignSpillSlot(parent);
+                                handleSpillSlot(parent);
+                            } else {
+                                // do not go further back because the register is actually used by
+                                // the interval
+                                parent = null;
+                            }
+                        }
+                    }
+                }
+
+            } else {
+                // search optimal split pos, split interval and spill only the right hand part
+                int optimalSplitPos = findOptimalSplitPos(interval, minSplitPos, maxSplitPos, false);
+
+                assert minSplitPos <= optimalSplitPos && optimalSplitPos <= maxSplitPos : "out of range";
+                assert optimalSplitPos < interval.to() : "cannot split at end of interval";
+                assert optimalSplitPos >= interval.from() : "cannot split before start of interval";
+
+                if (!allocator.isBlockBegin(optimalSplitPos)) {
+                    // move position before actual instruction (odd opId)
+                    optimalSplitPos = (optimalSplitPos - 1) | 1;
+                }
+
+                try (Indent indent2 = Debug.logAndIndent("splitting at position %d", optimalSplitPos)) {
+                    assert allocator.isBlockBegin(optimalSplitPos) || ((optimalSplitPos & 1) == 1) : "split pos must be odd when not on block boundary";
+                    assert !allocator.isBlockBegin(optimalSplitPos) || ((optimalSplitPos & 1) == 0) : "split pos must be even on block boundary";
+
+                    Interval spilledPart = interval.split(optimalSplitPos, allocator);
+                    allocator.assignSpillSlot(spilledPart);
+                    handleSpillSlot(spilledPart);
+                    changeSpillState(spilledPart, optimalSplitPos);
+
+                    if (!allocator.isBlockBegin(optimalSplitPos)) {
+                        if (Debug.isLogEnabled()) {
+                            Debug.log("inserting move from interval %d to %d", interval.operandNumber, spilledPart.operandNumber);
+                        }
+                        insertMove(optimalSplitPos, interval, spilledPart);
+                    }
+
+                    // the currentSplitChild is needed later when moves are inserted for reloading
+                    assert spilledPart.currentSplitChild() == interval : "overwriting wrong currentSplitChild";
+                    spilledPart.makeCurrentSplitChild();
+
+                    if (Debug.isLogEnabled()) {
+                        Debug.log("left interval: %s", interval.logString(allocator));
+                        Debug.log("spilled interval   : %s", spilledPart.logString(allocator));
+                    }
+                }
+            }
+        }
+    }
+
+    // called during register allocation
+    private void changeSpillState(Interval interval, int spillPos) {
+        switch (interval.spillState()) {
+            case NoSpillStore: {
+                int defLoopDepth = allocator.blockForId(interval.spillDefinitionPos()).getLoopDepth();
+                int spillLoopDepth = allocator.blockForId(spillPos).getLoopDepth();
+
+                if (defLoopDepth < spillLoopDepth) {
+                    /*
+                     * The loop depth of the spilling position is higher then the loop depth at the
+                     * definition of the interval. Move write to memory out of loop.
+                     */
+                    if (LinearScan.Options.LIROptLSRAOptimizeSpillPosition.getValue()) {
+                        // find best spill position in dominator the tree
+                        interval.setSpillState(SpillState.SpillInDominator);
+                    } else {
+                        // store at definition of the interval
+                        interval.setSpillState(SpillState.StoreAtDefinition);
+                    }
+                } else {
+                    /*
+                     * The interval is currently spilled only once, so for now there is no reason to
+                     * store the interval at the definition.
+                     */
+                    interval.setSpillState(SpillState.OneSpillStore);
+                }
+                break;
+            }
+
+            case OneSpillStore: {
+                if (LinearScan.Options.LIROptLSRAOptimizeSpillPosition.getValue()) {
+                    // the interval is spilled more then once
+                    interval.setSpillState(SpillState.SpillInDominator);
+                } else {
+                    // It is better to store it to memory at the definition.
+                    interval.setSpillState(SpillState.StoreAtDefinition);
+                }
+                break;
+            }
+
+            case SpillInDominator:
+            case StoreAtDefinition:
+            case StartInMemory:
+            case NoOptimization:
+            case NoDefinitionFound:
+                // nothing to do
+                break;
+
+            default:
+                throw new BailoutException("other states not allowed at this time");
+        }
+    }
+
+    /**
+     * This is called for every interval that is assigned to a stack slot.
+     */
+    protected void handleSpillSlot(Interval interval) {
+        assert interval.location() != null && (interval.canMaterialize() || isStackSlotValue(interval.location())) : "interval not assigned to a stack slot " + interval;
+        // Do nothing. Stack slots are not processed in this implementation.
+    }
+
+    void splitStackInterval(Interval interval) {
+        int minSplitPos = currentPosition + 1;
+        int maxSplitPos = Math.min(interval.firstUsage(RegisterPriority.ShouldHaveRegister), interval.to());
+
+        splitBeforeUsage(interval, minSplitPos, maxSplitPos);
+    }
+
+    void splitWhenPartialRegisterAvailable(Interval interval, int registerAvailableUntil) {
+        int minSplitPos = Math.max(interval.previousUsage(RegisterPriority.ShouldHaveRegister, registerAvailableUntil), interval.from() + 1);
+        splitBeforeUsage(interval, minSplitPos, registerAvailableUntil);
+    }
+
+    void splitAndSpillInterval(Interval interval) {
+        assert interval.state == State.Active || interval.state == State.Inactive : "other states not allowed";
+
+        int currentPos = currentPosition;
+        if (interval.state == State.Inactive) {
+            // the interval is currently inactive, so no spill slot is needed for now.
+            // when the split part is activated, the interval has a new chance to get a register,
+            // so in the best case no stack slot is necessary
+            assert interval.hasHoleBetween(currentPos - 1, currentPos + 1) : "interval can not be inactive otherwise";
+            splitBeforeUsage(interval, currentPos + 1, currentPos + 1);
+
+        } else {
+            // search the position where the interval must have a register and split
+            // at the optimal position before.
+            // The new created part is added to the unhandled list and will get a register
+            // when it is activated
+            int minSplitPos = currentPos + 1;
+            int maxSplitPos = Math.min(interval.nextUsage(RegisterPriority.MustHaveRegister, minSplitPos), interval.to());
+
+            splitBeforeUsage(interval, minSplitPos, maxSplitPos);
+
+            assert interval.nextUsage(RegisterPriority.MustHaveRegister, currentPos) == Integer.MAX_VALUE : "the remaining part is spilled to stack and therefore has no register";
+            splitForSpilling(interval);
+        }
+    }
+
+    boolean allocFreeRegister(Interval interval) {
+        try (Indent indent = Debug.logAndIndent("trying to find free register for %s", interval)) {
+
+            initUseLists(true);
+            freeExcludeActiveFixed();
+            freeExcludeActiveAny();
+            freeCollectInactiveFixed(interval);
+            freeCollectInactiveAny(interval);
+            // freeCollectUnhandled(fixedKind, cur);
+            assert unhandledLists.get(RegisterBinding.Fixed) == Interval.EndMarker : "must not have unhandled fixed intervals because all fixed intervals have a use at position 0";
+
+            // usePos contains the start of the next interval that has this register assigned
+            // (either as a fixed register or a normal allocated register in the past)
+            // only intervals overlapping with cur are processed, non-overlapping invervals can be
+            // ignored safely
+            if (Debug.isLogEnabled()) {
+                // Enable this logging to see all register states
+                try (Indent indent2 = Debug.logAndIndent("state of registers:")) {
+                    for (Register register : availableRegs) {
+                        int i = register.number;
+                        Debug.log("reg %d: usePos: %d", register.number, usePos[i]);
+                    }
+                }
+            }
+
+            Register hint = null;
+            Interval locationHint = interval.locationHint(true);
+            if (locationHint != null && locationHint.location() != null && isRegister(locationHint.location())) {
+                hint = asRegister(locationHint.location());
+                if (Debug.isLogEnabled()) {
+                    Debug.log("hint register %d from interval %s", hint.number, locationHint);
+                }
+            }
+            assert interval.location() == null : "register already assigned to interval";
+
+            // the register must be free at least until this position
+            int regNeededUntil = interval.from() + 1;
+            int intervalTo = interval.to();
+
+            boolean needSplit = false;
+            int splitPos = -1;
+
+            Register reg = null;
+            Register minFullReg = null;
+            Register maxPartialReg = null;
+
+            for (Register availableReg : availableRegs) {
+                int number = availableReg.number;
+                if (usePos[number] >= intervalTo) {
+                    // this register is free for the full interval
+                    if (minFullReg == null || availableReg.equals(hint) || (usePos[number] < usePos[minFullReg.number] && !minFullReg.equals(hint))) {
+                        minFullReg = availableReg;
+                    }
+                } else if (usePos[number] > regNeededUntil) {
+                    // this register is at least free until regNeededUntil
+                    if (maxPartialReg == null || availableReg.equals(hint) || (usePos[number] > usePos[maxPartialReg.number] && !maxPartialReg.equals(hint))) {
+                        maxPartialReg = availableReg;
+                    }
+                }
+            }
+
+            if (minFullReg != null) {
+                reg = minFullReg;
+            } else if (maxPartialReg != null) {
+                needSplit = true;
+                reg = maxPartialReg;
+            } else {
+                return false;
+            }
+
+            splitPos = usePos[reg.number];
+            interval.assignLocation(reg.asValue(interval.kind()));
+            if (Debug.isLogEnabled()) {
+                Debug.log("selected register %d", reg.number);
+            }
+
+            assert splitPos > 0 : "invalid splitPos";
+            if (needSplit) {
+                // register not available for full interval, so split it
+                splitWhenPartialRegisterAvailable(interval, splitPos);
+            }
+            // only return true if interval is completely assigned
+            return true;
+        }
+    }
+
+    void splitAndSpillIntersectingIntervals(Register reg) {
+        assert reg != null : "no register assigned";
+
+        for (int i = 0; i < spillIntervals[reg.number].size(); i++) {
+            Interval interval = spillIntervals[reg.number].get(i);
+            removeFromList(interval);
+            splitAndSpillInterval(interval);
+        }
+    }
+
+    // Split an Interval and spill it to memory so that cur can be placed in a register
+    void allocLockedRegister(Interval interval) {
+        try (Indent indent = Debug.logAndIndent("alloc locked register: need to split and spill to get register for %s", interval)) {
+
+            // the register must be free at least until this position
+            int firstUsage = interval.firstUsage(RegisterPriority.MustHaveRegister);
+            int firstShouldHaveUsage = interval.firstUsage(RegisterPriority.ShouldHaveRegister);
+            int regNeededUntil = Math.min(firstUsage, interval.from() + 1);
+            int intervalTo = interval.to();
+            assert regNeededUntil >= 0 && regNeededUntil < Integer.MAX_VALUE : "interval has no use";
+
+            Register reg;
+            Register ignore;
+            /*
+             * In the common case we don't spill registers that have _any_ use position that is
+             * closer than the next use of the current interval, but if we can't spill the current
+             * interval we weaken this strategy and also allow spilling of intervals that have a
+             * non-mandatory requirements (no MustHaveRegister use position).
+             */
+            for (RegisterPriority registerPriority = RegisterPriority.LiveAtLoopEnd; true; registerPriority = RegisterPriority.MustHaveRegister) {
+                // collect current usage of registers
+                initUseLists(false);
+                spillExcludeActiveFixed();
+                // spillBlockUnhandledFixed(cur);
+                assert unhandledLists.get(RegisterBinding.Fixed) == Interval.EndMarker : "must not have unhandled fixed intervals because all fixed intervals have a use at position 0";
+                spillBlockInactiveFixed(interval);
+                spillCollectActiveAny(registerPriority);
+                spillCollectInactiveAny(interval);
+                if (Debug.isLogEnabled()) {
+                    printRegisterState();
+                }
+
+                reg = null;
+                ignore = interval.location() != null && isRegister(interval.location()) ? asRegister(interval.location()) : null;
+
+                for (Register availableReg : availableRegs) {
+                    int number = availableReg.number;
+                    if (availableReg.equals(ignore)) {
+                        // this register must be ignored
+                    } else if (usePos[number] > regNeededUntil) {
+                        if (reg == null || (usePos[number] > usePos[reg.number])) {
+                            reg = availableReg;
+                        }
+                    }
+                }
+
+                int regUsePos = (reg == null ? 0 : usePos[reg.number]);
+                if (regUsePos <= firstShouldHaveUsage) {
+                    if (Debug.isLogEnabled()) {
+                        Debug.log("able to spill current interval. firstUsage(register): %d, usePos: %d", firstUsage, regUsePos);
+                    }
+
+                    if (firstUsage <= interval.from() + 1) {
+                        if (registerPriority.equals(RegisterPriority.LiveAtLoopEnd)) {
+                            /*
+                             * Tool of last resort: we can not spill the current interval so we try
+                             * to spill an active interval that has a usage but do not require a
+                             * register.
+                             */
+                            Debug.log("retry with register priority must have register");
+                            continue;
+                        }
+                        String description = "cannot spill interval (" + interval + ") that is used in first instruction (possible reason: no register found) firstUsage=" + firstUsage +
+                                        ", interval.from()=" + interval.from() + "; already used candidates: " + Arrays.toString(availableRegs);
+                        /*
+                         * assign a reasonable register and do a bailout in product mode to avoid
+                         * errors
+                         */
+                        allocator.assignSpillSlot(interval);
+                        Debug.dump(allocator.getLIR(), description);
+                        allocator.printIntervals(description);
+                        throw new OutOfRegistersException("LinearScan: no register found", description);
+                    }
+
+                    splitAndSpillInterval(interval);
+                    return;
+                }
+                break;
+            }
+
+            boolean needSplit = blockPos[reg.number] <= intervalTo;
+
+            int splitPos = blockPos[reg.number];
+
+            if (Debug.isLogEnabled()) {
+                Debug.log("decided to use register %d", reg.number);
+            }
+            assert splitPos > 0 : "invalid splitPos";
+            assert needSplit || splitPos > interval.from() : "splitting interval at from";
+
+            interval.assignLocation(reg.asValue(interval.kind()));
+            if (needSplit) {
+                // register not available for full interval : so split it
+                splitWhenPartialRegisterAvailable(interval, splitPos);
+            }
+
+            // perform splitting and spilling for all affected intervals
+            splitAndSpillIntersectingIntervals(reg);
+            return;
+        }
+    }
+
+    void printRegisterState() {
+        try (Indent indent2 = Debug.logAndIndent("state of registers:")) {
+            for (Register reg : availableRegs) {
+                int i = reg.number;
+                try (Indent indent3 = Debug.logAndIndent("reg %d: usePos: %d, blockPos: %d, intervals: ", i, usePos[i], blockPos[i])) {
+                    for (int j = 0; j < spillIntervals[i].size(); j++) {
+                        Debug.log("%s ", spillIntervals[i].get(j));
+                    }
+                }
+            }
+        }
+    }
+
+    boolean noAllocationPossible(Interval interval) {
+        if (allocator.callKillsRegisters()) {
+            // fast calculation of intervals that can never get a register because the
+            // the next instruction is a call that blocks all registers
+            // Note: this only works if a call kills all registers
+
+            // check if this interval is the result of a split operation
+            // (an interval got a register until this position)
+            int pos = interval.from();
+            if (isOdd(pos)) {
+                // the current instruction is a call that blocks all registers
+                if (pos < allocator.maxOpId() && allocator.hasCall(pos + 1) && interval.to() > pos + 1) {
+                    if (Debug.isLogEnabled()) {
+                        Debug.log("free register cannot be available because all registers blocked by following call");
+                    }
+
+                    // safety check that there is really no register available
+                    assert !allocFreeRegister(interval) : "found a register for this interval";
+                    return true;
+                }
+            }
+        }
+        return false;
+    }
+
+    void initVarsForAlloc(Interval interval) {
+        AllocatableRegisters allocatableRegisters = allocator.getRegisterAllocationConfig().getAllocatableRegisters(interval.kind().getPlatformKind());
+        availableRegs = allocatableRegisters.allocatableRegisters;
+        minReg = allocatableRegisters.minRegisterNumber;
+        maxReg = allocatableRegisters.maxRegisterNumber;
+    }
+
+    static boolean isMove(LIRInstruction op, Interval from, Interval to) {
+        if (op instanceof ValueMoveOp) {
+            ValueMoveOp move = (ValueMoveOp) op;
+            if (isVariable(move.getInput()) && isVariable(move.getResult())) {
+                return move.getInput() != null && move.getInput().equals(from.operand) && move.getResult() != null && move.getResult().equals(to.operand);
+            }
+        }
+        return false;
+    }
+
+    // optimization (especially for phi functions of nested loops):
+    // assign same spill slot to non-intersecting intervals
+    void combineSpilledIntervals(Interval interval) {
+        if (interval.isSplitChild()) {
+            // optimization is only suitable for split parents
+            return;
+        }
+
+        Interval registerHint = interval.locationHint(false);
+        if (registerHint == null) {
+            // cur is not the target of a move : otherwise registerHint would be set
+            return;
+        }
+        assert registerHint.isSplitParent() : "register hint must be split parent";
+
+        if (interval.spillState() != SpillState.NoOptimization || registerHint.spillState() != SpillState.NoOptimization) {
+            // combining the stack slots for intervals where spill move optimization is applied
+            // is not benefitial and would cause problems
+            return;
+        }
+
+        int beginPos = interval.from();
+        int endPos = interval.to();
+        if (endPos > allocator.maxOpId() || isOdd(beginPos) || isOdd(endPos)) {
+            // safety check that lirOpWithId is allowed
+            return;
+        }
+
+        if (!isMove(allocator.instructionForId(beginPos), registerHint, interval) || !isMove(allocator.instructionForId(endPos), interval, registerHint)) {
+            // cur and registerHint are not connected with two moves
+            return;
+        }
+
+        Interval beginHint = registerHint.getSplitChildAtOpId(beginPos, LIRInstruction.OperandMode.USE, allocator);
+        Interval endHint = registerHint.getSplitChildAtOpId(endPos, LIRInstruction.OperandMode.DEF, allocator);
+        if (beginHint == endHint || beginHint.to() != beginPos || endHint.from() != endPos) {
+            // registerHint must be split : otherwise the re-writing of use positions does not work
+            return;
+        }
+
+        assert beginHint.location() != null : "must have register assigned";
+        assert endHint.location() == null : "must not have register assigned";
+        assert interval.firstUsage(RegisterPriority.MustHaveRegister) == beginPos : "must have use position at begin of interval because of move";
+        assert endHint.firstUsage(RegisterPriority.MustHaveRegister) == endPos : "must have use position at begin of interval because of move";
+
+        if (isRegister(beginHint.location())) {
+            // registerHint is not spilled at beginPos : so it would not be benefitial to
+            // immediately spill cur
+            return;
+        }
+        assert registerHint.spillSlot() != null : "must be set when part of interval was spilled";
+
+        // modify intervals such that cur gets the same stack slot as registerHint
+        // delete use positions to prevent the intervals to get a register at beginning
+        interval.setSpillSlot(registerHint.spillSlot());
+        interval.removeFirstUsePos();
+        endHint.removeFirstUsePos();
+    }
+
+    // allocate a physical register or memory location to an interval
+    @Override
+    protected boolean activateCurrent(Interval interval) {
+        boolean result = true;
+
+        try (Indent indent = Debug.logAndIndent("activating interval %s,  splitParent: %d", interval, interval.splitParent().operandNumber)) {
+
+            final Value operand = interval.operand;
+            if (interval.location() != null && isStackSlotValue(interval.location())) {
+                // activating an interval that has a stack slot assigned . split it at first use
+                // position
+                // used for method parameters
+                if (Debug.isLogEnabled()) {
+                    Debug.log("interval has spill slot assigned (method parameter) . split it before first use");
+                }
+                splitStackInterval(interval);
+                result = false;
+
+            } else {
+                if (interval.location() == null) {
+                    // interval has not assigned register . normal allocation
+                    // (this is the normal case for most intervals)
+                    if (Debug.isLogEnabled()) {
+                        Debug.log("normal allocation of register");
+                    }
+
+                    // assign same spill slot to non-intersecting intervals
+                    combineSpilledIntervals(interval);
+
+                    initVarsForAlloc(interval);
+                    if (noAllocationPossible(interval) || !allocFreeRegister(interval)) {
+                        // no empty register available.
+                        // split and spill another interval so that this interval gets a register
+                        allocLockedRegister(interval);
+                    }
+
+                    // spilled intervals need not be move to active-list
+                    if (!isRegister(interval.location())) {
+                        result = false;
+                    }
+                }
+            }
+
+            // load spilled values that become active from stack slot to register
+            if (interval.insertMoveWhenActivated()) {
+                assert interval.isSplitChild();
+                assert interval.currentSplitChild() != null;
+                assert !interval.currentSplitChild().operand.equals(operand) : "cannot insert move between same interval";
+                if (Debug.isLogEnabled()) {
+                    Debug.log("Inserting move from interval %d to %d because insertMoveWhenActivated is set", interval.currentSplitChild().operandNumber, interval.operandNumber);
+                }
+
+                insertMove(interval.from(), interval.currentSplitChild(), interval);
+            }
+            interval.makeCurrentSplitChild();
+
+        }
+
+        return result; // true = interval is moved to active list
+    }
+
+    public void finishAllocation() {
+        // must be called when all intervals are allocated
+        moveResolver.resolveAndAppendMoves();
+    }
+}
--- /dev/null	Thu Jan 01 00:00:00 1970 +0000
+++ b/graal/com.oracle.graal.lir/src/com/oracle/graal/lir/alloc/trace/MoveResolver.java	Mon Aug 31 13:06:17 2015 +0200
@@ -0,0 +1,471 @@
+/*
+ * Copyright (c) 2009, 2015, Oracle and/or its affiliates. All rights reserved.
+ * DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER.
+ *
+ * This code is free software; you can redistribute it and/or modify it
+ * under the terms of the GNU General Public License version 2 only, as
+ * published by the Free Software Foundation.
+ *
+ * This code is distributed in the hope that it will be useful, but WITHOUT
+ * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
+ * FITNESS FOR A PARTICULAR PURPOSE.  See the GNU General Public License
+ * version 2 for more details (a copy is included in the LICENSE file that
+ * accompanied this code).
+ *
+ * You should have received a copy of the GNU General Public License version
+ * 2 along with this work; if not, write to the Free Software Foundation,
+ * Inc., 51 Franklin St, Fifth Floor, Boston, MA 02110-1301 USA.
+ *
+ * Please contact Oracle, 500 Oracle Parkway, Redwood Shores, CA 94065 USA
+ * or visit www.oracle.com if you need additional information or have any
+ * questions.
+ */
+package com.oracle.graal.lir.alloc.trace;
+
+import static jdk.internal.jvmci.code.ValueUtil.*;
+
+import java.util.*;
+
+import jdk.internal.jvmci.code.*;
+import jdk.internal.jvmci.common.*;
+import jdk.internal.jvmci.meta.*;
+
+import com.oracle.graal.debug.*;
+import com.oracle.graal.lir.*;
+
+/**
+ */
+public class MoveResolver {
+
+    private final LinearScan allocator;
+
+    private int insertIdx;
+    private LIRInsertionBuffer insertionBuffer; // buffer where moves are inserted
+
+    private final List<Interval> mappingFrom;
+    private final List<Constant> mappingFromOpr;
+    private final List<Interval> mappingTo;
+    private boolean multipleReadsAllowed;
+    private final int[] registerBlocked;
+
+    protected void setValueBlocked(Value location, int direction) {
+        assert direction == 1 || direction == -1 : "out of bounds";
+        if (isRegister(location)) {
+            registerBlocked[asRegister(location).number] += direction;
+        } else {
+            throw JVMCIError.shouldNotReachHere("unhandled value " + location);
+        }
+    }
+
+    protected Interval getMappingFrom(int i) {
+        return mappingFrom.get(i);
+    }
+
+    protected int mappingFromSize() {
+        return mappingFrom.size();
+    }
+
+    protected int valueBlocked(Value location) {
+        if (isRegister(location)) {
+            return registerBlocked[asRegister(location).number];
+        }
+        throw JVMCIError.shouldNotReachHere("unhandled value " + location);
+    }
+
+    void setMultipleReadsAllowed() {
+        multipleReadsAllowed = true;
+    }
+
+    protected boolean areMultipleReadsAllowed() {
+        return multipleReadsAllowed;
+    }
+
+    boolean hasMappings() {
+        return mappingFrom.size() > 0;
+    }
+
+    protected LinearScan getAllocator() {
+        return allocator;
+    }
+
+    protected MoveResolver(LinearScan allocator) {
+
+        this.allocator = allocator;
+        this.multipleReadsAllowed = false;
+        this.mappingFrom = new ArrayList<>(8);
+        this.mappingFromOpr = new ArrayList<>(8);
+        this.mappingTo = new ArrayList<>(8);
+        this.insertIdx = -1;
+        this.insertionBuffer = new LIRInsertionBuffer();
+        this.registerBlocked = new int[allocator.getRegisters().length];
+    }
+
+    protected boolean checkEmpty() {
+        assert mappingFrom.size() == 0 && mappingFromOpr.size() == 0 && mappingTo.size() == 0 : "list must be empty before and after processing";
+        for (int i = 0; i < getAllocator().getRegisters().length; i++) {
+            assert registerBlocked[i] == 0 : "register map must be empty before and after processing";
+        }
+        checkMultipleReads();
+        return true;
+    }
+
+    protected void checkMultipleReads() {
+        assert !areMultipleReadsAllowed() : "must have default value";
+    }
+
+    private boolean verifyBeforeResolve() {
+        assert mappingFrom.size() == mappingFromOpr.size() : "length must be equal";
+        assert mappingFrom.size() == mappingTo.size() : "length must be equal";
+        assert insertIdx != -1 : "insert position not set";
+
+        int i;
+        int j;
+        if (!areMultipleReadsAllowed()) {
+            for (i = 0; i < mappingFrom.size(); i++) {
+                for (j = i + 1; j < mappingFrom.size(); j++) {
+                    assert mappingFrom.get(i) == null || mappingFrom.get(i) != mappingFrom.get(j) : "cannot read from same interval twice";
+                }
+            }
+        }
+
+        for (i = 0; i < mappingTo.size(); i++) {
+            for (j = i + 1; j < mappingTo.size(); j++) {
+                assert mappingTo.get(i) != mappingTo.get(j) : "cannot write to same interval twice";
+            }
+        }
+
+        HashSet<Value> usedRegs = new HashSet<>();
+        if (!areMultipleReadsAllowed()) {
+            for (i = 0; i < mappingFrom.size(); i++) {
+                Interval interval = mappingFrom.get(i);
+                if (interval != null && !isIllegal(interval.location())) {
+                    boolean unique = usedRegs.add(interval.location());
+                    assert unique : "cannot read from same register twice";
+                }
+            }
+        }
+
+        usedRegs.clear();
+        for (i = 0; i < mappingTo.size(); i++) {
+            Interval interval = mappingTo.get(i);
+            if (isIllegal(interval.location())) {
+                // After insertion the location may become illegal, so don't check it since multiple
+                // intervals might be illegal.
+                continue;
+            }
+            boolean unique = usedRegs.add(interval.location());
+            assert unique : "cannot write to same register twice";
+        }
+
+        verifyStackSlotMapping();
+
+        return true;
+    }
+
+    protected void verifyStackSlotMapping() {
+        HashSet<Value> usedRegs = new HashSet<>();
+        for (int i = 0; i < mappingFrom.size(); i++) {
+            Interval interval = mappingFrom.get(i);
+            if (interval != null && !isRegister(interval.location())) {
+                usedRegs.add(interval.location());
+            }
+        }
+        for (int i = 0; i < mappingTo.size(); i++) {
+            Interval interval = mappingTo.get(i);
+            assert !usedRegs.contains(interval.location()) || checkIntervalLocation(mappingFrom.get(i), interval, mappingFromOpr.get(i)) : "stack slots used in mappingFrom must be disjoint to mappingTo";
+        }
+    }
+
+    private static boolean checkIntervalLocation(Interval from, Interval to, Constant fromOpr) {
+        if (from == null) {
+            return fromOpr != null;
+        } else {
+            return to.location().equals(from.location());
+        }
+    }
+
+    // mark assignedReg and assignedRegHi of the interval as blocked
+    private void blockRegisters(Interval interval) {
+        Value location = interval.location();
+        if (mightBeBlocked(location)) {
+            assert areMultipleReadsAllowed() || valueBlocked(location) == 0 : "location already marked as used: " + location;
+            int direction = 1;
+            setValueBlocked(location, direction);
+            Debug.log("block %s", location);
+        }
+    }
+
+    // mark assignedReg and assignedRegHi of the interval as unblocked
+    private void unblockRegisters(Interval interval) {
+        Value location = interval.location();
+        if (mightBeBlocked(location)) {
+            assert valueBlocked(location) > 0 : "location already marked as unused: " + location;
+            setValueBlocked(location, -1);
+            Debug.log("unblock %s", location);
+        }
+    }
+
+    /**
+     * Checks if the {@linkplain Interval#location() location} of {@code to} is not blocked or is
+     * only blocked by {@code from}.
+     */
+    private boolean safeToProcessMove(Interval from, Interval to) {
+        Value fromReg = from != null ? from.location() : null;
+
+        Value location = to.location();
+        if (mightBeBlocked(location)) {
+            if ((valueBlocked(location) > 1 || (valueBlocked(location) == 1 && !isMoveToSelf(fromReg, location)))) {
+                return false;
+            }
+        }
+
+        return true;
+    }
+
+    protected boolean isMoveToSelf(Value from, Value to) {
+        assert to != null;
+        if (to.equals(from)) {
+            return true;
+        }
+        if (from != null && isRegister(from) && isRegister(to) && asRegister(from).equals(asRegister(to))) {
+            assert LIRKind.verifyMoveKinds(to.getLIRKind(), from.getLIRKind()) : String.format("Same register but Kind mismatch %s <- %s", to, from);
+            return true;
+        }
+        return false;
+    }
+
+    protected boolean mightBeBlocked(Value location) {
+        return isRegister(location);
+    }
+
+    private void createInsertionBuffer(List<LIRInstruction> list) {
+        assert !insertionBuffer.initialized() : "overwriting existing buffer";
+        insertionBuffer.init(list);
+    }
+
+    private void appendInsertionBuffer() {
+        if (insertionBuffer.initialized()) {
+            insertionBuffer.finish();
+        }
+        assert !insertionBuffer.initialized() : "must be uninitialized now";
+
+        insertIdx = -1;
+    }
+
+    private void insertMove(Interval fromInterval, Interval toInterval) {
+        assert !fromInterval.operand.equals(toInterval.operand) : "from and to interval equal: " + fromInterval;
+        assert LIRKind.verifyMoveKinds(toInterval.kind(), fromInterval.kind()) : "move between different types";
+        assert insertIdx != -1 : "must setup insert position first";
+
+        insertionBuffer.append(insertIdx, createMove(fromInterval.operand, toInterval.operand, fromInterval.location(), toInterval.location()));
+
+        if (Debug.isLogEnabled()) {
+            Debug.log("insert move from %s to %s at %d", fromInterval, toInterval, insertIdx);
+        }
+    }
+
+    /**
+     * @param fromOpr {@link Interval#operand operand} of the {@code from} interval
+     * @param toOpr {@link Interval#operand operand} of the {@code to} interval
+     * @param fromLocation {@link Interval#location() location} of the {@code to} interval
+     * @param toLocation {@link Interval#location() location} of the {@code to} interval
+     */
+    protected LIRInstruction createMove(AllocatableValue fromOpr, AllocatableValue toOpr, AllocatableValue fromLocation, AllocatableValue toLocation) {
+        return getAllocator().getSpillMoveFactory().createMove(toOpr, fromOpr);
+    }
+
+    private void insertMove(Constant fromOpr, Interval toInterval) {
+        assert insertIdx != -1 : "must setup insert position first";
+
+        AllocatableValue toOpr = toInterval.operand;
+        LIRInstruction move = getAllocator().getSpillMoveFactory().createLoad(toOpr, fromOpr);
+        insertionBuffer.append(insertIdx, move);
+
+        if (Debug.isLogEnabled()) {
+            Debug.log("insert move from value %s to %s at %d", fromOpr, toInterval, insertIdx);
+        }
+    }
+
+    private void resolveMappings() {
+        try (Indent indent = Debug.logAndIndent("resolveMapping")) {
+            assert verifyBeforeResolve();
+            if (Debug.isLogEnabled()) {
+                printMapping();
+            }
+
+            // Block all registers that are used as input operands of a move.
+            // When a register is blocked, no move to this register is emitted.
+            // This is necessary for detecting cycles in moves.
+            int i;
+            for (i = mappingFrom.size() - 1; i >= 0; i--) {
+                Interval fromInterval = mappingFrom.get(i);
+                if (fromInterval != null) {
+                    blockRegisters(fromInterval);
+                }
+            }
+
+            int spillCandidate = -1;
+            while (mappingFrom.size() > 0) {
+                boolean processedInterval = false;
+
+                for (i = mappingFrom.size() - 1; i >= 0; i--) {
+                    Interval fromInterval = mappingFrom.get(i);
+                    Interval toInterval = mappingTo.get(i);
+
+                    if (safeToProcessMove(fromInterval, toInterval)) {
+                        // this interval can be processed because target is free
+                        if (fromInterval != null) {
+                            insertMove(fromInterval, toInterval);
+                            unblockRegisters(fromInterval);
+                        } else {
+                            insertMove(mappingFromOpr.get(i), toInterval);
+                        }
+                        mappingFrom.remove(i);
+                        mappingFromOpr.remove(i);
+                        mappingTo.remove(i);
+
+                        processedInterval = true;
+                    } else if (fromInterval != null && isRegister(fromInterval.location())) {
+                        // this interval cannot be processed now because target is not free
+                        // it starts in a register, so it is a possible candidate for spilling
+                        spillCandidate = i;
+                    }
+                }
+
+                if (!processedInterval) {
+                    breakCycle(spillCandidate);
+                }
+            }
+        }
+
+        // reset to default value
+        multipleReadsAllowed = false;
+
+        // check that all intervals have been processed
+        assert checkEmpty();
+    }
+
+    protected void breakCycle(int spillCandidate) {
+        // no move could be processed because there is a cycle in the move list
+        // (e.g. r1 . r2, r2 . r1), so one interval must be spilled to memory
+        assert spillCandidate != -1 : "no interval in register for spilling found";
+
+        // create a new spill interval and assign a stack slot to it
+        Interval fromInterval = mappingFrom.get(spillCandidate);
+        // do not allocate a new spill slot for temporary interval, but
+        // use spill slot assigned to fromInterval. Otherwise moves from
+        // one stack slot to another can happen (not allowed by LIRAssembler
+        StackSlotValue spillSlot = fromInterval.spillSlot();
+        if (spillSlot == null) {
+            spillSlot = getAllocator().getFrameMapBuilder().allocateSpillSlot(fromInterval.kind());
+            fromInterval.setSpillSlot(spillSlot);
+        }
+        spillInterval(spillCandidate, fromInterval, spillSlot);
+    }
+
+    protected void spillInterval(int spillCandidate, Interval fromInterval, StackSlotValue spillSlot) {
+        assert mappingFrom.get(spillCandidate).equals(fromInterval);
+        Interval spillInterval = getAllocator().createDerivedInterval(fromInterval);
+        spillInterval.setKind(fromInterval.kind());
+
+        // add a dummy range because real position is difficult to calculate
+        // Note: this range is a special case when the integrity of the allocation is
+        // checked
+        spillInterval.addRange(1, 2);
+
+        spillInterval.assignLocation(spillSlot);
+
+        if (Debug.isLogEnabled()) {
+            Debug.log("created new Interval for spilling: %s", spillInterval);
+        }
+        blockRegisters(spillInterval);
+
+        // insert a move from register to stack and update the mapping
+        insertMove(fromInterval, spillInterval);
+        mappingFrom.set(spillCandidate, spillInterval);
+        unblockRegisters(fromInterval);
+    }
+
+    private void printMapping() {
+        try (Indent indent = Debug.logAndIndent("Mapping")) {
+            for (int i = mappingFrom.size() - 1; i >= 0; i--) {
+                Interval fromInterval = mappingFrom.get(i);
+                Interval toInterval = mappingTo.get(i);
+                String from;
+                Value to = toInterval.location();
+                if (fromInterval == null) {
+                    from = mappingFromOpr.get(i).toString();
+                } else {
+                    from = fromInterval.location().toString();
+                }
+                Debug.log("move %s <- %s", from, to);
+            }
+        }
+    }
+
+    void setInsertPosition(List<LIRInstruction> insertList, int insertIdx) {
+        assert this.insertIdx == -1 : "use moveInsertPosition instead of setInsertPosition when data already set";
+
+        createInsertionBuffer(insertList);
+        this.insertIdx = insertIdx;
+    }
+
+    void moveInsertPosition(List<LIRInstruction> newInsertList, int newInsertIdx) {
+        if (insertionBuffer.lirList() != null && (insertionBuffer.lirList() != newInsertList || this.insertIdx != newInsertIdx)) {
+            // insert position changed . resolve current mappings
+            resolveMappings();
+        }
+
+        if (insertionBuffer.lirList() != newInsertList) {
+            // block changed . append insertionBuffer because it is
+            // bound to a specific block and create a new insertionBuffer
+            appendInsertionBuffer();
+            createInsertionBuffer(newInsertList);
+        }
+
+        this.insertIdx = newInsertIdx;
+    }
+
+    public void addMapping(Interval fromInterval, Interval toInterval) {
+
+        if (isIllegal(toInterval.location()) && toInterval.canMaterialize()) {
+            if (Debug.isLogEnabled()) {
+                Debug.log("no store to rematerializable interval %s needed", toInterval);
+            }
+            return;
+        }
+        if (isIllegal(fromInterval.location()) && fromInterval.canMaterialize()) {
+            // Instead of a reload, re-materialize the value
+            JavaConstant rematValue = fromInterval.getMaterializedValue();
+            addMapping(rematValue, toInterval);
+            return;
+        }
+        if (Debug.isLogEnabled()) {
+            Debug.log("add move mapping from %s to %s", fromInterval, toInterval);
+        }
+
+        assert !fromInterval.operand.equals(toInterval.operand) : "from and to interval equal: " + fromInterval;
+        assert LIRKind.verifyMoveKinds(toInterval.kind(), fromInterval.kind()) : String.format("Kind mismatch: %s vs. %s, from=%s, to=%s", fromInterval.kind(), toInterval.kind(), fromInterval,
+                        toInterval);
+        mappingFrom.add(fromInterval);
+        mappingFromOpr.add(null);
+        mappingTo.add(toInterval);
+    }
+
+    public void addMapping(Constant fromOpr, Interval toInterval) {
+        if (Debug.isLogEnabled()) {
+            Debug.log("add move mapping from %s to %s", fromOpr, toInterval);
+        }
+
+        mappingFrom.add(null);
+        mappingFromOpr.add(fromOpr);
+        mappingTo.add(toInterval);
+    }
+
+    void resolveAndAppendMoves() {
+        if (hasMappings()) {
+            resolveMappings();
+        }
+        appendInsertionBuffer();
+    }
+}
--- /dev/null	Thu Jan 01 00:00:00 1970 +0000
+++ b/graal/com.oracle.graal.lir/src/com/oracle/graal/lir/alloc/trace/OptimizingLinearScanWalker.java	Mon Aug 31 13:06:17 2015 +0200
@@ -0,0 +1,252 @@
+/*
+ * Copyright (c) 2014, 2014, Oracle and/or its affiliates. All rights reserved.
+ * DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER.
+ *
+ * This code is free software; you can redistribute it and/or modify it
+ * under the terms of the GNU General Public License version 2 only, as
+ * published by the Free Software Foundation.
+ *
+ * This code is distributed in the hope that it will be useful, but WITHOUT
+ * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
+ * FITNESS FOR A PARTICULAR PURPOSE.  See the GNU General Public License
+ * version 2 for more details (a copy is included in the LICENSE file that
+ * accompanied this code).
+ *
+ * You should have received a copy of the GNU General Public License version
+ * 2 along with this work; if not, write to the Free Software Foundation,
+ * Inc., 51 Franklin St, Fifth Floor, Boston, MA 02110-1301 USA.
+ *
+ * Please contact Oracle, 500 Oracle Parkway, Redwood Shores, CA 94065 USA
+ * or visit www.oracle.com if you need additional information or have any
+ * questions.
+ */
+package com.oracle.graal.lir.alloc.trace;
+
+import static jdk.internal.jvmci.code.ValueUtil.*;
+import jdk.internal.jvmci.code.*;
+import jdk.internal.jvmci.meta.*;
+import jdk.internal.jvmci.options.*;
+
+import com.oracle.graal.compiler.common.cfg.*;
+import com.oracle.graal.debug.*;
+import com.oracle.graal.debug.Debug.Scope;
+import com.oracle.graal.lir.alloc.trace.Interval.RegisterBinding;
+import com.oracle.graal.lir.alloc.trace.Interval.RegisterBindingLists;
+import com.oracle.graal.lir.alloc.trace.Interval.RegisterPriority;
+import com.oracle.graal.lir.alloc.trace.Interval.State;
+
+public class OptimizingLinearScanWalker extends LinearScanWalker {
+
+    public static class Options {
+        // @formatter:off
+        @Option(help = "Enable LSRA optimization", type = OptionType.Debug)
+        public static final OptionValue<Boolean> LSRAOptimization = new OptionValue<>(true);
+        @Option(help = "LSRA optimization: Only split but do not reassign", type = OptionType.Debug)
+        public static final OptionValue<Boolean> LSRAOptSplitOnly = new OptionValue<>(false);
+        // @formatter:on
+    }
+
+    OptimizingLinearScanWalker(LinearScan allocator, Interval unhandledFixedFirst, Interval unhandledAnyFirst) {
+        super(allocator, unhandledFixedFirst, unhandledAnyFirst);
+    }
+
+    @Override
+    protected void handleSpillSlot(Interval interval) {
+        assert interval.location() != null : "interval  not assigned " + interval;
+        if (interval.canMaterialize()) {
+            assert !isStackSlotValue(interval.location()) : "interval can materialize but assigned to a stack slot " + interval;
+            return;
+        }
+        assert isStackSlotValue(interval.location()) : "interval not assigned to a stack slot " + interval;
+        try (Scope s1 = Debug.scope("LSRAOptimization")) {
+            Debug.log("adding stack to unhandled list %s", interval);
+            unhandledLists.addToListSortedByStartAndUsePositions(RegisterBinding.Stack, interval);
+        }
+    }
+
+    @SuppressWarnings("unused")
+    private static void printRegisterBindingList(RegisterBindingLists list, RegisterBinding binding) {
+        for (Interval interval = list.get(binding); interval != Interval.EndMarker; interval = interval.next) {
+            Debug.log("%s", interval);
+        }
+    }
+
+    @Override
+    void walk() {
+        try (Scope s = Debug.scope("OptimizingLinearScanWalker")) {
+            for (AbstractBlockBase<?> block : allocator.sortedBlocks()) {
+                optimizeBlock(block);
+            }
+        }
+        super.walk();
+    }
+
+    private void optimizeBlock(AbstractBlockBase<?> block) {
+        if (block.getPredecessorCount() == 1) {
+            int nextBlock = allocator.getFirstLirInstructionId(block);
+            try (Scope s1 = Debug.scope("LSRAOptimization")) {
+                Debug.log("next block: %s (%d)", block, nextBlock);
+            }
+            try (Indent indent0 = Debug.indent()) {
+                walkTo(nextBlock);
+
+                try (Scope s1 = Debug.scope("LSRAOptimization")) {
+                    boolean changed = true;
+                    // we need to do this because the active lists might change
+                    loop: while (changed) {
+                        changed = false;
+                        try (Indent indent1 = Debug.logAndIndent("Active intervals: (block %s [%d])", block, nextBlock)) {
+                            for (Interval active = activeLists.get(RegisterBinding.Any); active != Interval.EndMarker; active = active.next) {
+                                Debug.log("active   (any): %s", active);
+                                if (optimize(nextBlock, block, active, RegisterBinding.Any)) {
+                                    changed = true;
+                                    break loop;
+                                }
+                            }
+                            for (Interval active = activeLists.get(RegisterBinding.Stack); active != Interval.EndMarker; active = active.next) {
+                                Debug.log("active (stack): %s", active);
+                                if (optimize(nextBlock, block, active, RegisterBinding.Stack)) {
+                                    changed = true;
+                                    break loop;
+                                }
+                            }
+                        }
+                    }
+                }
+            }
+        }
+    }
+
+    private boolean optimize(int currentPos, AbstractBlockBase<?> currentBlock, Interval currentInterval, RegisterBinding binding) {
+        // BEGIN initialize and sanity checks
+        assert currentBlock != null : "block must not be null";
+        assert currentInterval != null : "interval must not be null";
+
+        assert currentBlock.getPredecessorCount() == 1 : "more than one predecessors -> optimization not possible";
+
+        if (!currentInterval.isSplitChild()) {
+            // interval is not a split child -> no need for optimization
+            return false;
+        }
+
+        if (currentInterval.from() == currentPos) {
+            // the interval starts at the current position so no need for splitting
+            return false;
+        }
+
+        // get current location
+        AllocatableValue currentLocation = currentInterval.location();
+        assert currentLocation != null : "active intervals must have a location assigned!";
+
+        // get predecessor stuff
+        AbstractBlockBase<?> predecessorBlock = currentBlock.getPredecessors().get(0);
+        int predEndId = allocator.getLastLirInstructionId(predecessorBlock);
+        Interval predecessorInterval = currentInterval.getIntervalCoveringOpId(predEndId);
+        assert predecessorInterval != null : "variable not live at the end of the only predecessor! " + predecessorBlock + " -> " + currentBlock + " interval: " + currentInterval;
+        AllocatableValue predecessorLocation = predecessorInterval.location();
+        assert predecessorLocation != null : "handled intervals must have a location assigned!";
+
+        // END initialize and sanity checks
+
+        if (currentLocation.equals(predecessorLocation)) {
+            // locations are already equal -> nothing to optimize
+            return false;
+        }
+
+        if (!isStackSlotValue(predecessorLocation) && !isRegister(predecessorLocation)) {
+            assert predecessorInterval.canMaterialize();
+            // value is materialized -> no need for optimization
+            return false;
+        }
+
+        assert isStackSlotValue(currentLocation) || isRegister(currentLocation) : "current location not a register or stack slot " + currentLocation;
+
+        try (Indent indent = Debug.logAndIndent("location differs: %s vs. %s", predecessorLocation, currentLocation)) {
+            // split current interval at current position
+            Debug.log("splitting at position %d", currentPos);
+
+            assert allocator.isBlockBegin(currentPos) && ((currentPos & 1) == 0) : "split pos must be even when on block boundary";
+
+            Interval splitPart = currentInterval.split(currentPos, allocator);
+            activeLists.remove(binding, currentInterval);
+
+            assert splitPart.from() >= currentPosition : "cannot append new interval before current walk position";
+
+            // the currentSplitChild is needed later when moves are inserted for reloading
+            assert splitPart.currentSplitChild() == currentInterval : "overwriting wrong currentSplitChild";
+            splitPart.makeCurrentSplitChild();
+
+            if (Debug.isLogEnabled()) {
+                Debug.log("left interval  : %s", currentInterval.logString(allocator));
+                Debug.log("right interval : %s", splitPart.logString(allocator));
+            }
+
+            if (Options.LSRAOptSplitOnly.getValue()) {
+                // just add the split interval to the unhandled list
+                unhandledLists.addToListSortedByStartAndUsePositions(RegisterBinding.Any, splitPart);
+            } else {
+                if (isRegister(predecessorLocation)) {
+                    splitRegisterInterval(splitPart, asRegister(predecessorLocation));
+                } else {
+                    assert isStackSlotValue(predecessorLocation);
+                    Debug.log("assigning interval %s to %s", splitPart, predecessorLocation);
+                    splitPart.assignLocation(predecessorLocation);
+                    // activate interval
+                    activeLists.addToListSortedByCurrentFromPositions(RegisterBinding.Stack, splitPart);
+                    splitPart.state = State.Active;
+
+                    splitStackInterval(splitPart);
+                }
+            }
+        }
+        return true;
+    }
+
+    private void splitRegisterInterval(Interval interval, Register reg) {
+        // collect current usage of registers
+        initVarsForAlloc(interval);
+        initUseLists(false);
+        spillExcludeActiveFixed();
+        // spillBlockUnhandledFixed(cur);
+        assert unhandledLists.get(RegisterBinding.Fixed) == Interval.EndMarker : "must not have unhandled fixed intervals because all fixed intervals have a use at position 0";
+        spillBlockInactiveFixed(interval);
+        spillCollectActiveAny(RegisterPriority.LiveAtLoopEnd);
+        spillCollectInactiveAny(interval);
+
+        if (Debug.isLogEnabled()) {
+            try (Indent indent2 = Debug.logAndIndent("state of registers:")) {
+                for (Register register : availableRegs) {
+                    int i = register.number;
+                    try (Indent indent3 = Debug.logAndIndent("reg %d: usePos: %d, blockPos: %d, intervals: ", i, usePos[i], blockPos[i])) {
+                        for (int j = 0; j < spillIntervals[i].size(); j++) {
+                            Debug.log("%d ", spillIntervals[i].get(j).operandNumber);
+                        }
+                    }
+                }
+            }
+        }
+
+        // the register must be free at least until this position
+        boolean needSplit = blockPos[reg.number] <= interval.to();
+
+        int splitPos = blockPos[reg.number];
+
+        assert splitPos > 0 : "invalid splitPos";
+        assert needSplit || splitPos > interval.from() : "splitting interval at from";
+
+        Debug.log("assigning interval %s to %s", interval, reg);
+        interval.assignLocation(reg.asValue(interval.kind()));
+        if (needSplit) {
+            // register not available for full interval : so split it
+            splitWhenPartialRegisterAvailable(interval, splitPos);
+        }
+
+        // perform splitting and spilling for all affected intervals
+        splitAndSpillIntersectingIntervals(reg);
+
+        // activate interval
+        activeLists.addToListSortedByCurrentFromPositions(RegisterBinding.Any, interval);
+        interval.state = State.Active;
+
+    }
+}
--- /dev/null	Thu Jan 01 00:00:00 1970 +0000
+++ b/graal/com.oracle.graal.lir/src/com/oracle/graal/lir/alloc/trace/OutOfRegistersException.java	Mon Aug 31 13:06:17 2015 +0200
@@ -0,0 +1,66 @@
+/*
+ * Copyright (c) 2015, 2015, Oracle and/or its affiliates. All rights reserved.
+ * DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER.
+ *
+ * This code is free software; you can redistribute it and/or modify it
+ * under the terms of the GNU General Public License version 2 only, as
+ * published by the Free Software Foundation.
+ *
+ * This code is distributed in the hope that it will be useful, but WITHOUT
+ * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
+ * FITNESS FOR A PARTICULAR PURPOSE.  See the GNU General Public License
+ * version 2 for more details (a copy is included in the LICENSE file that
+ * accompanied this code).
+ *
+ * You should have received a copy of the GNU General Public License version
+ * 2 along with this work; if not, write to the Free Software Foundation,
+ * Inc., 51 Franklin St, Fifth Floor, Boston, MA 02110-1301 USA.
+ *
+ * Please contact Oracle, 500 Oracle Parkway, Redwood Shores, CA 94065 USA
+ * or visit www.oracle.com if you need additional information or have any
+ * questions.
+ */
+package com.oracle.graal.lir.alloc.trace;
+
+import jdk.internal.jvmci.code.*;
+
+public class OutOfRegistersException extends BailoutException {
+
+    private static final long serialVersionUID = -3479786650143432195L;
+
+    private final String description;
+
+    public OutOfRegistersException(String msg) {
+        super(msg);
+        this.description = "";
+    }
+
+    public OutOfRegistersException(Throwable cause, String msg) {
+        super(cause, msg);
+        this.description = "";
+    }
+
+    public OutOfRegistersException(boolean permanent, String msg) {
+        super(permanent, msg);
+        this.description = "";
+    }
+
+    public OutOfRegistersException(String msg, String description) {
+        super(msg);
+        this.description = description;
+    }
+
+    public OutOfRegistersException(Throwable cause, String msg, String description) {
+        super(cause, msg);
+        this.description = description;
+    }
+
+    public OutOfRegistersException(boolean permanent, String msg, String description) {
+        super(permanent, msg);
+        this.description = description;
+    }
+
+    public String getDescription() {
+        return description;
+    }
+}
--- /dev/null	Thu Jan 01 00:00:00 1970 +0000
+++ b/graal/com.oracle.graal.lir/src/com/oracle/graal/lir/alloc/trace/Range.java	Mon Aug 31 13:06:17 2015 +0200
@@ -0,0 +1,116 @@
+/*
+ * Copyright (c) 2009, 2011, Oracle and/or its affiliates. All rights reserved.
+ * DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER.
+ *
+ * This code is free software; you can redistribute it and/or modify it
+ * under the terms of the GNU General Public License version 2 only, as
+ * published by the Free Software Foundation.
+ *
+ * This code is distributed in the hope that it will be useful, but WITHOUT
+ * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
+ * FITNESS FOR A PARTICULAR PURPOSE.  See the GNU General Public License
+ * version 2 for more details (a copy is included in the LICENSE file that
+ * accompanied this code).
+ *
+ * You should have received a copy of the GNU General Public License version
+ * 2 along with this work; if not, write to the Free Software Foundation,
+ * Inc., 51 Franklin St, Fifth Floor, Boston, MA 02110-1301 USA.
+ *
+ * Please contact Oracle, 500 Oracle Parkway, Redwood Shores, CA 94065 USA
+ * or visit www.oracle.com if you need additional information or have any
+ * questions.
+ */
+package com.oracle.graal.lir.alloc.trace;
+
+/**
+ * Represents a range of integers from a start (inclusive) to an end (exclusive.
+ */
+public final class Range {
+
+    public static final Range EndMarker = new Range(Integer.MAX_VALUE, Integer.MAX_VALUE, null);
+
+    /**
+     * The start of the range, inclusive.
+     */
+    public int from;
+
+    /**
+     * The end of the range, exclusive.
+     */
+    public int to;
+
+    /**
+     * A link to allow the range to be put into a singly linked list.
+     */
+    public Range next;
+
+    boolean intersects(Range r) {
+        return intersectsAt(r) != -1;
+    }
+
+    /**
+     * Creates a new range.
+     *
+     * @param from the start of the range, inclusive
+     * @param to the end of the range, exclusive
+     * @param next link to the next range in a linked list
+     */
+    Range(int from, int to, Range next) {
+        this.from = from;
+        this.to = to;
+        this.next = next;
+    }
+
+    int intersectsAt(Range other) {
+        Range r1 = this;
+        Range r2 = other;
+
+        assert r2 != null : "null ranges not allowed";
+        assert r1 != EndMarker && r2 != EndMarker : "empty ranges not allowed";
+
+        do {
+            if (r1.from < r2.from) {
+                if (r1.to <= r2.from) {
+                    r1 = r1.next;
+                    if (r1 == EndMarker) {
+                        return -1;
+                    }
+                } else {
+                    return r2.from;
+                }
+            } else {
+                if (r2.from < r1.from) {
+                    if (r2.to <= r1.from) {
+                        r2 = r2.next;
+                        if (r2 == EndMarker) {
+                            return -1;
+                        }
+                    } else {
+                        return r1.from;
+                    }
+                } else { // r1.from() == r2.from()
+                    if (r1.from == r1.to) {
+                        r1 = r1.next;
+                        if (r1 == EndMarker) {
+                            return -1;
+                        }
+                    } else {
+                        if (r2.from == r2.to) {
+                            r2 = r2.next;
+                            if (r2 == EndMarker) {
+                                return -1;
+                            }
+                        } else {
+                            return r1.from;
+                        }
+                    }
+                }
+            }
+        } while (true);
+    }
+
+    @Override
+    public String toString() {
+        return "[" + from + ", " + to + "]";
+    }
+}
--- /dev/null	Thu Jan 01 00:00:00 1970 +0000
+++ b/graal/com.oracle.graal.lir/src/com/oracle/graal/lir/alloc/trace/RegisterVerifier.java	Mon Aug 31 13:06:17 2015 +0200
@@ -0,0 +1,257 @@
+/*
+ * Copyright (c) 2009, 2012, Oracle and/or its affiliates. All rights reserved.
+ * DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER.
+ *
+ * This code is free software; you can redistribute it and/or modify it
+ * under the terms of the GNU General Public License version 2 only, as
+ * published by the Free Software Foundation.
+ *
+ * This code is distributed in the hope that it will be useful, but WITHOUT
+ * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
+ * FITNESS FOR A PARTICULAR PURPOSE.  See the GNU General Public License
+ * version 2 for more details (a copy is included in the LICENSE file that
+ * accompanied this code).
+ *
+ * You should have received a copy of the GNU General Public License version
+ * 2 along with this work; if not, write to the Free Software Foundation,
+ * Inc., 51 Franklin St, Fifth Floor, Boston, MA 02110-1301 USA.
+ *
+ * Please contact Oracle, 500 Oracle Parkway, Redwood Shores, CA 94065 USA
+ * or visit www.oracle.com if you need additional information or have any
+ * questions.
+ */
+package com.oracle.graal.lir.alloc.trace;
+
+import static jdk.internal.jvmci.code.ValueUtil.*;
+
+import java.util.*;
+
+import jdk.internal.jvmci.code.*;
+import jdk.internal.jvmci.common.*;
+import com.oracle.graal.debug.*;
+import com.oracle.graal.debug.Debug.Scope;
+import jdk.internal.jvmci.meta.*;
+
+import com.oracle.graal.compiler.common.cfg.*;
+import com.oracle.graal.compiler.common.util.*;
+import com.oracle.graal.lir.*;
+import com.oracle.graal.lir.LIRInstruction.OperandFlag;
+import com.oracle.graal.lir.LIRInstruction.OperandMode;
+
+/**
+ */
+final class RegisterVerifier {
+
+    LinearScan allocator;
+    List<AbstractBlockBase<?>> workList; // all blocks that must be processed
+    ArrayMap<Interval[]> savedStates; // saved information of previous check
+
+    // simplified access to methods of LinearScan
+    Interval intervalAt(Value operand) {
+        return allocator.intervalFor(operand);
+    }
+
+    // currently, only registers are processed
+    int stateSize() {
+        return allocator.maxRegisterNumber() + 1;
+    }
+
+    // accessors
+    Interval[] stateForBlock(AbstractBlockBase<?> block) {
+        return savedStates.get(block.getId());
+    }
+
+    void setStateForBlock(AbstractBlockBase<?> block, Interval[] savedState) {
+        savedStates.put(block.getId(), savedState);
+    }
+
+    void addToWorkList(AbstractBlockBase<?> block) {
+        if (!workList.contains(block)) {
+            workList.add(block);
+        }
+    }
+
+    RegisterVerifier(LinearScan allocator) {
+        this.allocator = allocator;
+        workList = new ArrayList<>(16);
+        this.savedStates = new ArrayMap<>();
+
+    }
+
+    void verify(AbstractBlockBase<?> start) {
+        try (Scope s = Debug.scope("RegisterVerifier")) {
+            // setup input registers (method arguments) for first block
+            Interval[] inputState = new Interval[stateSize()];
+            setStateForBlock(start, inputState);
+            addToWorkList(start);
+
+            // main loop for verification
+            do {
+                AbstractBlockBase<?> block = workList.get(0);
+                workList.remove(0);
+
+                processBlock(block);
+            } while (!workList.isEmpty());
+        }
+    }
+
+    private void processBlock(AbstractBlockBase<?> block) {
+        try (Indent indent = Debug.logAndIndent("processBlock B%d", block.getId())) {
+            // must copy state because it is modified
+            Interval[] inputState = copy(stateForBlock(block));
+
+            try (Indent indent2 = Debug.logAndIndent("Input-State of intervals:")) {
+                printState(inputState);
+            }
+
+            // process all operations of the block
+            processOperations(block, inputState);
+
+            try (Indent indent2 = Debug.logAndIndent("Output-State of intervals:")) {
+                printState(inputState);
+            }
+
+            // iterate all successors
+            for (AbstractBlockBase<?> succ : block.getSuccessors()) {
+                processSuccessor(succ, inputState);
+            }
+        }
+    }
+
+    protected void printState(Interval[] inputState) {
+        for (int i = 0; i < stateSize(); i++) {
+            Register reg = allocator.getRegisters()[i];
+            assert reg.number == i;
+            if (inputState[i] != null) {
+                Debug.log(" %6s %4d  --  %s", reg, inputState[i].operandNumber, inputState[i]);
+            } else {
+                Debug.log(" %6s   __", reg);
+            }
+        }
+    }
+
+    private void processSuccessor(AbstractBlockBase<?> block, Interval[] inputState) {
+        Interval[] savedState = stateForBlock(block);
+
+        if (savedState != null) {
+            // this block was already processed before.
+            // check if new inputState is consistent with savedState
+
+            boolean savedStateCorrect = true;
+            for (int i = 0; i < stateSize(); i++) {
+                if (inputState[i] != savedState[i]) {
+                    // current inputState and previous savedState assume a different
+                    // interval in this register . assume that this register is invalid
+                    if (savedState[i] != null) {
+                        // invalidate old calculation only if it assumed that
+                        // register was valid. when the register was already invalid,
+                        // then the old calculation was correct.
+                        savedStateCorrect = false;
+                        savedState[i] = null;
+
+                        Debug.log("processSuccessor B%d: invalidating slot %d", block.getId(), i);
+                    }
+                }
+            }
+
+            if (savedStateCorrect) {
+                // already processed block with correct inputState
+                Debug.log("processSuccessor B%d: previous visit already correct", block.getId());
+            } else {
+                // must re-visit this block
+                Debug.log("processSuccessor B%d: must re-visit because input state changed", block.getId());
+                addToWorkList(block);
+            }
+
+        } else {
+            // block was not processed before, so set initial inputState
+            Debug.log("processSuccessor B%d: initial visit", block.getId());
+
+            setStateForBlock(block, copy(inputState));
+            addToWorkList(block);
+        }
+    }
+
+    static Interval[] copy(Interval[] inputState) {
+        return inputState.clone();
+    }
+
+    static void statePut(Interval[] inputState, Value location, Interval interval) {
+        if (location != null && isRegister(location)) {
+            Register reg = asRegister(location);
+            int regNum = reg.number;
+            if (interval != null) {
+                Debug.log("%s = %s", reg, interval.operand);
+            } else if (inputState[regNum] != null) {
+                Debug.log("%s = null", reg);
+            }
+
+            inputState[regNum] = interval;
+        }
+    }
+
+    static boolean checkState(AbstractBlockBase<?> block, LIRInstruction op, Interval[] inputState, Value operand, Value reg, Interval interval) {
+        if (reg != null && isRegister(reg)) {
+            if (inputState[asRegister(reg).number] != interval) {
+                throw new JVMCIError(
+                                "Error in register allocation: operation (%s) in block %s expected register %s (operand %s) to contain the value of interval %s but data-flow says it contains interval %s",
+                                op, block, reg, operand, interval, inputState[asRegister(reg).number]);
+            }
+        }
+        return true;
+    }
+
+    void processOperations(AbstractBlockBase<?> block, final Interval[] inputState) {
+        List<LIRInstruction> ops = allocator.getLIR().getLIRforBlock(block);
+        InstructionValueConsumer useConsumer = new InstructionValueConsumer() {
+
+            @Override
+            public void visitValue(LIRInstruction op, Value operand, OperandMode mode, EnumSet<OperandFlag> flags) {
+                // we skip spill moves inserted by the spill position optimization
+                if (LinearScan.isVariableOrRegister(operand) && allocator.isProcessed(operand) && op.id() != LinearScan.DOMINATOR_SPILL_MOVE_ID) {
+                    Interval interval = intervalAt(operand);
+                    if (op.id() != -1) {
+                        interval = interval.getSplitChildAtOpId(op.id(), mode, allocator);
+                    }
+
+                    assert checkState(block, op, inputState, interval.operand, interval.location(), interval.splitParent());
+                }
+            }
+        };
+
+        InstructionValueConsumer defConsumer = (op, operand, mode, flags) -> {
+            if (LinearScan.isVariableOrRegister(operand) && allocator.isProcessed(operand)) {
+                Interval interval = intervalAt(operand);
+                if (op.id() != -1) {
+                    interval = interval.getSplitChildAtOpId(op.id(), mode, allocator);
+                }
+
+                statePut(inputState, interval.location(), interval.splitParent());
+            }
+        };
+
+        // visit all instructions of the block
+        for (int i = 0; i < ops.size(); i++) {
+            final LIRInstruction op = ops.get(i);
+
+            if (Debug.isLogEnabled()) {
+                Debug.log("%s", op.toStringWithIdPrefix());
+            }
+
+            // check if input operands are correct
+            op.visitEachInput(useConsumer);
+            // invalidate all caller save registers at calls
+            if (op.destroysCallerSavedRegisters()) {
+                for (Register r : allocator.getRegisterAllocationConfig().getRegisterConfig().getCallerSaveRegisters()) {
+                    statePut(inputState, r.asValue(), null);
+                }
+            }
+            op.visitEachAlive(useConsumer);
+            // set temp operands (some operations use temp operands also as output operands, so
+            // can't set them null)
+            op.visitEachTemp(defConsumer);
+            // set output operands
+            op.visitEachOutput(defConsumer);
+        }
+    }
+}
--- /dev/null	Thu Jan 01 00:00:00 1970 +0000
+++ b/graal/com.oracle.graal.lir/src/com/oracle/graal/lir/alloc/trace/SSAMoveResolver.java	Mon Aug 31 13:06:17 2015 +0200
@@ -0,0 +1,169 @@
+/*
+ * Copyright (c) 2015, 2015, Oracle and/or its affiliates. All rights reserved.
+ * DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER.
+ *
+ * This code is free software; you can redistribute it and/or modify it
+ * under the terms of the GNU General Public License version 2 only, as
+ * published by the Free Software Foundation.
+ *
+ * This code is distributed in the hope that it will be useful, but WITHOUT
+ * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
+ * FITNESS FOR A PARTICULAR PURPOSE.  See the GNU General Public License
+ * version 2 for more details (a copy is included in the LICENSE file that
+ * accompanied this code).
+ *
+ * You should have received a copy of the GNU General Public License version
+ * 2 along with this work; if not, write to the Free Software Foundation,
+ * Inc., 51 Franklin St, Fifth Floor, Boston, MA 02110-1301 USA.
+ *
+ * Please contact Oracle, 500 Oracle Parkway, Redwood Shores, CA 94065 USA
+ * or visit www.oracle.com if you need additional information or have any
+ * questions.
+ */
+package com.oracle.graal.lir.alloc.trace;
+
+import static jdk.internal.jvmci.code.ValueUtil.*;
+
+import java.util.*;
+
+import jdk.internal.jvmci.code.*;
+import jdk.internal.jvmci.common.*;
+import jdk.internal.jvmci.meta.*;
+
+import com.oracle.graal.lir.*;
+import com.oracle.graal.lir.framemap.*;
+
+public final class SSAMoveResolver extends MoveResolver {
+
+    private static final int STACK_SLOT_IN_CALLER_FRAME_IDX = -1;
+    private int[] stackBlocked;
+    private final int firstVirtualStackIndex;
+
+    public SSAMoveResolver(LinearScan allocator) {
+        super(allocator);
+        FrameMapBuilderTool frameMapBuilderTool = (FrameMapBuilderTool) allocator.getFrameMapBuilder();
+        FrameMap frameMap = frameMapBuilderTool.getFrameMap();
+        this.stackBlocked = new int[frameMapBuilderTool.getNumberOfStackSlots()];
+        this.firstVirtualStackIndex = !frameMap.frameNeedsAllocating() ? 0 : frameMap.currentFrameSize() + 1;
+    }
+
+    @Override
+    public boolean checkEmpty() {
+        for (int i = 0; i < stackBlocked.length; i++) {
+            assert stackBlocked[i] == 0 : "stack map must be empty before and after processing";
+        }
+        return super.checkEmpty();
+    }
+
+    @Override
+    protected void checkMultipleReads() {
+        // multiple reads are allowed in SSA LSRA
+    }
+
+    @Override
+    protected void verifyStackSlotMapping() {
+        // relax disjoint stack maps invariant
+    }
+
+    @Override
+    protected boolean areMultipleReadsAllowed() {
+        return true;
+    }
+
+    @Override
+    protected boolean mightBeBlocked(Value location) {
+        if (super.mightBeBlocked(location)) {
+            return true;
+        }
+        if (isStackSlotValue(location)) {
+            return true;
+        }
+        return false;
+    }
+
+    private int getStackArrayIndex(StackSlotValue stackSlotValue) {
+        if (isStackSlot(stackSlotValue)) {
+            return getStackArrayIndex(asStackSlot(stackSlotValue));
+        }
+        if (isVirtualStackSlot(stackSlotValue)) {
+            return getStackArrayIndex(asVirtualStackSlot(stackSlotValue));
+        }
+        throw JVMCIError.shouldNotReachHere("Unhandled StackSlotValue: " + stackSlotValue);
+    }
+
+    private int getStackArrayIndex(StackSlot stackSlot) {
+        int stackIdx;
+        if (stackSlot.isInCallerFrame()) {
+            // incoming stack arguments can be ignored
+            stackIdx = STACK_SLOT_IN_CALLER_FRAME_IDX;
+        } else {
+            assert stackSlot.getRawAddFrameSize() : "Unexpected stack slot: " + stackSlot;
+            int offset = -stackSlot.getRawOffset();
+            assert 0 <= offset && offset < firstVirtualStackIndex : String.format("Wrong stack slot offset: %d (first virtual stack slot index: %d", offset, firstVirtualStackIndex);
+            stackIdx = offset;
+        }
+        return stackIdx;
+    }
+
+    private int getStackArrayIndex(VirtualStackSlot virtualStackSlot) {
+        return firstVirtualStackIndex + virtualStackSlot.getId();
+    }
+
+    @Override
+    protected void setValueBlocked(Value location, int direction) {
+        assert direction == 1 || direction == -1 : "out of bounds";
+        if (isStackSlotValue(location)) {
+            int stackIdx = getStackArrayIndex(asStackSlotValue(location));
+            if (stackIdx == STACK_SLOT_IN_CALLER_FRAME_IDX) {
+                // incoming stack arguments can be ignored
+                return;
+            }
+            if (stackIdx >= stackBlocked.length) {
+                stackBlocked = Arrays.copyOf(stackBlocked, stackIdx + 1);
+            }
+            stackBlocked[stackIdx] += direction;
+        } else {
+            super.setValueBlocked(location, direction);
+        }
+    }
+
+    @Override
+    protected int valueBlocked(Value location) {
+        if (isStackSlotValue(location)) {
+            int stackIdx = getStackArrayIndex(asStackSlotValue(location));
+            if (stackIdx == STACK_SLOT_IN_CALLER_FRAME_IDX) {
+                // incoming stack arguments are always blocked (aka they can not be written)
+                return 1;
+            }
+            if (stackIdx >= stackBlocked.length) {
+                return 0;
+            }
+            return stackBlocked[stackIdx];
+        }
+        return super.valueBlocked(location);
+    }
+
+    @Override
+    protected LIRInstruction createMove(AllocatableValue fromOpr, AllocatableValue toOpr, AllocatableValue fromLocation, AllocatableValue toLocation) {
+        if (isStackSlotValue(toLocation) && isStackSlotValue(fromLocation)) {
+            return getAllocator().getSpillMoveFactory().createStackMove(toOpr, fromOpr);
+        }
+        return super.createMove(fromOpr, toOpr, fromLocation, toLocation);
+    }
+
+    @Override
+    protected void breakCycle(int spillCandidate) {
+        if (spillCandidate != -1) {
+            super.breakCycle(spillCandidate);
+            return;
+        }
+        assert mappingFromSize() > 1;
+        // Arbitrarily select the first entry for spilling.
+        int stackSpillCandidate = 0;
+        Interval fromInterval = getMappingFrom(stackSpillCandidate);
+        assert isStackSlotValue(fromInterval.location());
+        // allocate new stack slot
+        StackSlotValue spillSlot = getAllocator().getFrameMapBuilder().allocateSpillSlot(fromInterval.kind());
+        spillInterval(stackSpillCandidate, fromInterval, spillSlot);
+    }
+}
--- /dev/null	Thu Jan 01 00:00:00 1970 +0000
+++ b/graal/com.oracle.graal.lir/src/com/oracle/graal/lir/alloc/trace/SSILinearScanEliminateSpillMovePhase.java	Mon Aug 31 13:06:17 2015 +0200
@@ -0,0 +1,45 @@
+/*
+ * Copyright (c) 2015, 2015, Oracle and/or its affiliates. All rights reserved.
+ * DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER.
+ *
+ * This code is free software; you can redistribute it and/or modify it
+ * under the terms of the GNU General Public License version 2 only, as
+ * published by the Free Software Foundation.
+ *
+ * This code is distributed in the hope that it will be useful, but WITHOUT
+ * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
+ * FITNESS FOR A PARTICULAR PURPOSE.  See the GNU General Public License
+ * version 2 for more details (a copy is included in the LICENSE file that
+ * accompanied this code).
+ *
+ * You should have received a copy of the GNU General Public License version
+ * 2 along with this work; if not, write to the Free Software Foundation,
+ * Inc., 51 Franklin St, Fifth Floor, Boston, MA 02110-1301 USA.
+ *
+ * Please contact Oracle, 500 Oracle Parkway, Redwood Shores, CA 94065 USA
+ * or visit www.oracle.com if you need additional information or have any
+ * questions.
+ */
+package com.oracle.graal.lir.alloc.trace;
+
+import com.oracle.graal.compiler.common.cfg.*;
+import com.oracle.graal.lir.StandardOp.MoveOp;
+
+public class SSILinearScanEliminateSpillMovePhase extends LinearScanEliminateSpillMovePhase {
+
+    public SSILinearScanEliminateSpillMovePhase(LinearScan allocator) {
+        super(allocator);
+    }
+
+    @Override
+    protected int firstInstructionOfInterest() {
+        // also look at Labels as they define PHI values
+        return 0;
+    }
+
+    @Override
+    protected boolean canEliminateSpillMove(AbstractBlockBase<?> block, MoveOp move) {
+        // TODO (je) do not eliminate spill moves yet!
+        return false;
+    }
+}
--- a/graal/com.oracle.graal.lir/src/com/oracle/graal/lir/alloc/trace/TraceGlobalMoveResolver.java	Mon Aug 31 18:42:38 2015 -0700
+++ b/graal/com.oracle.graal.lir/src/com/oracle/graal/lir/alloc/trace/TraceGlobalMoveResolver.java	Mon Aug 31 13:06:17 2015 +0200
@@ -33,7 +33,6 @@
 
 import com.oracle.graal.debug.*;
 import com.oracle.graal.lir.*;
-import com.oracle.graal.lir.alloc.lsra.*;
 import com.oracle.graal.lir.framemap.*;
 import com.oracle.graal.lir.gen.*;
 import com.oracle.graal.lir.gen.LIRGeneratorTool.SpillMoveFactory;
--- a/graal/com.oracle.graal.lir/src/com/oracle/graal/lir/alloc/trace/TraceLinearScan.java	Mon Aug 31 18:42:38 2015 -0700
+++ b/graal/com.oracle.graal.lir/src/com/oracle/graal/lir/alloc/trace/TraceLinearScan.java	Mon Aug 31 13:06:17 2015 +0200
@@ -37,9 +37,6 @@
 import com.oracle.graal.compiler.common.cfg.*;
 import com.oracle.graal.debug.*;
 import com.oracle.graal.debug.Debug.Scope;
-import com.oracle.graal.lir.alloc.lsra.*;
-import com.oracle.graal.lir.alloc.lsra.ssa.*;
-import com.oracle.graal.lir.alloc.lsra.ssi.*;
 import com.oracle.graal.lir.gen.*;
 import com.oracle.graal.lir.gen.LIRGeneratorTool.SpillMoveFactory;
 import com.oracle.graal.lir.phases.AllocationPhase.AllocationContext;
--- a/graal/com.oracle.graal.lir/src/com/oracle/graal/lir/alloc/trace/TraceLinearScanAssignLocationsPhase.java	Mon Aug 31 18:42:38 2015 -0700
+++ b/graal/com.oracle.graal.lir/src/com/oracle/graal/lir/alloc/trace/TraceLinearScanAssignLocationsPhase.java	Mon Aug 31 13:06:17 2015 +0200
@@ -36,7 +36,6 @@
 import com.oracle.graal.lir.LIRInstruction.OperandFlag;
 import com.oracle.graal.lir.LIRInstruction.OperandMode;
 import com.oracle.graal.lir.StandardOp.BlockEndOp;
-import com.oracle.graal.lir.alloc.lsra.*;
 
 /**
  * Specialization of {@link LinearScanAssignLocationsPhase} that inserts
--- a/graal/com.oracle.graal.lir/src/com/oracle/graal/lir/alloc/trace/TraceLinearScanLifetimeAnalysisPhase.java	Mon Aug 31 18:42:38 2015 -0700
+++ b/graal/com.oracle.graal.lir/src/com/oracle/graal/lir/alloc/trace/TraceLinearScanLifetimeAnalysisPhase.java	Mon Aug 31 13:06:17 2015 +0200
@@ -40,10 +40,9 @@
 import com.oracle.graal.lir.StandardOp.BlockEndOp;
 import com.oracle.graal.lir.StandardOp.LabelOp;
 import com.oracle.graal.lir.StandardOp.MoveOp;
-import com.oracle.graal.lir.alloc.lsra.*;
-import com.oracle.graal.lir.alloc.lsra.Interval.RegisterPriority;
-import com.oracle.graal.lir.alloc.lsra.Interval.SpillState;
-import com.oracle.graal.lir.alloc.lsra.LinearScan.BlockData;
+import com.oracle.graal.lir.alloc.trace.Interval.RegisterPriority;
+import com.oracle.graal.lir.alloc.trace.Interval.SpillState;
+import com.oracle.graal.lir.alloc.trace.LinearScan.BlockData;
 import com.oracle.graal.lir.ssi.*;
 
 public class TraceLinearScanLifetimeAnalysisPhase extends LinearScanLifetimeAnalysisPhase {
--- a/graal/com.oracle.graal.lir/src/com/oracle/graal/lir/alloc/trace/TraceLinearScanResolveDataFlowPhase.java	Mon Aug 31 18:42:38 2015 -0700
+++ b/graal/com.oracle.graal.lir/src/com/oracle/graal/lir/alloc/trace/TraceLinearScanResolveDataFlowPhase.java	Mon Aug 31 13:06:17 2015 +0200
@@ -32,7 +32,6 @@
 import com.oracle.graal.compiler.common.cfg.*;
 import com.oracle.graal.debug.*;
 import com.oracle.graal.lir.*;
-import com.oracle.graal.lir.alloc.lsra.*;
 import com.oracle.graal.lir.ssa.SSAUtil.PhiValueVisitor;
 import com.oracle.graal.lir.ssi.*;
 
--- a/graal/com.oracle.graal.lir/src/com/oracle/graal/lir/alloc/trace/TraceSimpleLifetimeAnalysisPhase.java	Mon Aug 31 18:42:38 2015 -0700
+++ b/graal/com.oracle.graal.lir/src/com/oracle/graal/lir/alloc/trace/TraceSimpleLifetimeAnalysisPhase.java	Mon Aug 31 13:06:17 2015 +0200
@@ -34,9 +34,8 @@
 import com.oracle.graal.compiler.common.cfg.*;
 import com.oracle.graal.debug.*;
 import com.oracle.graal.lir.*;
-import com.oracle.graal.lir.alloc.lsra.*;
-import com.oracle.graal.lir.alloc.lsra.Interval.RegisterPriority;
-import com.oracle.graal.lir.alloc.lsra.Interval.SpillState;
+import com.oracle.graal.lir.alloc.trace.Interval.RegisterPriority;
+import com.oracle.graal.lir.alloc.trace.Interval.SpillState;
 import com.oracle.graal.lir.gen.*;
 import com.oracle.graal.lir.gen.LIRGeneratorTool.SpillMoveFactory;