# HG changeset patch # User Doug Simon # Date 1423223090 -3600 # Node ID d599eeab1b532695ad55e6185664dbadaa5bb51f # Parent 32c7a5a88523a07fb750bad3a8cc1393fb022e1d# Parent 39e99cf01468d46929237d49e15889eb0d491f6b Merge. diff -r 32c7a5a88523 -r d599eeab1b53 graal/com.oracle.graal.alloc/overview.html --- a/graal/com.oracle.graal.alloc/overview.html Fri Feb 06 12:17:20 2015 +0100 +++ /dev/null Thu Jan 01 00:00:00 1970 +0000 @@ -1,36 +0,0 @@ - - - - - - - - -Documentation for the com.oracle.graal.alloc project. - - - diff -r 32c7a5a88523 -r d599eeab1b53 graal/com.oracle.graal.alloc/src/com/oracle/graal/alloc/ComputeBlockOrder.java --- a/graal/com.oracle.graal.alloc/src/com/oracle/graal/alloc/ComputeBlockOrder.java Fri Feb 06 12:17:20 2015 +0100 +++ /dev/null Thu Jan 01 00:00:00 1970 +0000 @@ -1,265 +0,0 @@ -/* - * Copyright (c) 2009, 2011, Oracle and/or its affiliates. All rights reserved. - * DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER. - * - * This code is free software; you can redistribute it and/or modify it - * under the terms of the GNU General Public License version 2 only, as - * published by the Free Software Foundation. - * - * This code is distributed in the hope that it will be useful, but WITHOUT - * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or - * FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License - * version 2 for more details (a copy is included in the LICENSE file that - * accompanied this code). - * - * You should have received a copy of the GNU General Public License version - * 2 along with this work; if not, write to the Free Software Foundation, - * Inc., 51 Franklin St, Fifth Floor, Boston, MA 02110-1301 USA. - * - * Please contact Oracle, 500 Oracle Parkway, Redwood Shores, CA 94065 USA - * or visit www.oracle.com if you need additional information or have any - * questions. - */ - -package com.oracle.graal.alloc; - -import java.util.*; - -import com.oracle.graal.compiler.common.cfg.*; - -/** - * Computes an ordering of the block that can be used by the linear scan register allocator and the - * machine code generator. The machine code generation order will start with the first block and - * produce a straight sequence always following the most likely successor. Then it will continue - * with the most likely path that was left out during this process. The process iteratively - * continues until all blocks are scheduled. Additionally, it is guaranteed that all blocks of a - * loop are scheduled before any block following the loop is scheduled. - * - * The machine code generator order includes reordering of loop headers such that the backward jump - * is a conditional jump if there is only one loop end block. Additionally, the target of loop - * backward jumps are always marked as aligned. Aligning the target of conditional jumps does not - * bring a measurable benefit and is therefore avoided to keep the code size small. - * - * The linear scan register allocator order has an additional mechanism that prevents merge nodes - * from being scheduled if there is at least one highly likely predecessor still unscheduled. This - * increases the probability that the merge node and the corresponding predecessor are more closely - * together in the schedule thus decreasing the probability for inserted phi moves. Also, the - * algorithm sets the linear scan order number of the block that corresponds to its index in the - * linear scan order. - */ -public final class ComputeBlockOrder { - - /** - * The initial capacities of the worklists used for iteratively finding the block order. - */ - private static final int INITIAL_WORKLIST_CAPACITY = 10; - - /** - * Divisor used for degrading the probability of the current path versus unscheduled paths at a - * merge node when calculating the linear scan order. A high value means that predecessors of - * merge nodes are more likely to be scheduled before the merge node. - */ - private static final int PENALTY_VERSUS_UNSCHEDULED = 10; - - /** - * Computes the block order used for the linear scan register allocator. - * - * @return sorted list of blocks - */ - public static > List computeLinearScanOrder(int blockCount, T startBlock) { - List order = new ArrayList<>(); - BitSet visitedBlocks = new BitSet(blockCount); - PriorityQueue worklist = initializeWorklist(startBlock, visitedBlocks); - computeLinearScanOrder(order, worklist, visitedBlocks); - assert checkOrder(order, blockCount); - return order; - } - - /** - * Computes the block order used for code emission. - * - * @return sorted list of blocks - */ - public static > List computeCodeEmittingOrder(int blockCount, T startBlock) { - List order = new ArrayList<>(); - BitSet visitedBlocks = new BitSet(blockCount); - PriorityQueue worklist = initializeWorklist(startBlock, visitedBlocks); - computeCodeEmittingOrder(order, worklist, visitedBlocks); - assert checkOrder(order, blockCount); - return order; - } - - /** - * Iteratively adds paths to the code emission block order. - */ - private static > void computeCodeEmittingOrder(List order, PriorityQueue worklist, BitSet visitedBlocks) { - while (!worklist.isEmpty()) { - T nextImportantPath = worklist.poll(); - addPathToCodeEmittingOrder(nextImportantPath, order, worklist, visitedBlocks); - } - } - - /** - * Iteratively adds paths to the linear scan block order. - */ - private static > void computeLinearScanOrder(List order, PriorityQueue worklist, BitSet visitedBlocks) { - while (!worklist.isEmpty()) { - T nextImportantPath = worklist.poll(); - addPathToLinearScanOrder(nextImportantPath, order, worklist, visitedBlocks); - } - } - - /** - * Initializes the priority queue used for the work list of blocks and adds the start block. - */ - private static > PriorityQueue initializeWorklist(T startBlock, BitSet visitedBlocks) { - PriorityQueue result = new PriorityQueue<>(INITIAL_WORKLIST_CAPACITY, new BlockOrderComparator<>()); - result.add(startBlock); - visitedBlocks.set(startBlock.getId()); - return result; - } - - /** - * Add a linear path to the linear scan order greedily following the most likely successor. - */ - private static > void addPathToLinearScanOrder(T block, List order, PriorityQueue worklist, BitSet visitedBlocks) { - block.setLinearScanNumber(order.size()); - order.add(block); - T mostLikelySuccessor = findAndMarkMostLikelySuccessor(block, visitedBlocks); - enqueueSuccessors(block, worklist, visitedBlocks); - if (mostLikelySuccessor != null) { - if (!mostLikelySuccessor.isLoopHeader() && mostLikelySuccessor.getPredecessorCount() > 1) { - // We are at a merge. Check probabilities of predecessors that are not yet - // scheduled. - double unscheduledSum = 0.0; - for (T pred : mostLikelySuccessor.getPredecessors()) { - if (pred.getLinearScanNumber() == -1) { - unscheduledSum += pred.probability(); - } - } - - if (unscheduledSum > block.probability() / PENALTY_VERSUS_UNSCHEDULED) { - // Add this merge only after at least one additional predecessor gets scheduled. - visitedBlocks.clear(mostLikelySuccessor.getId()); - return; - } - } - addPathToLinearScanOrder(mostLikelySuccessor, order, worklist, visitedBlocks); - } - } - - /** - * Add a linear path to the code emission order greedily following the most likely successor. - */ - private static > void addPathToCodeEmittingOrder(T initialBlock, List order, PriorityQueue worklist, BitSet visitedBlocks) { - T block = initialBlock; - while (block != null) { - // Skip loop headers if there is only a single loop end block to - // make the backward jump be a conditional jump. - if (!skipLoopHeader(block)) { - - // Align unskipped loop headers as they are the target of the backward jump. - if (block.isLoopHeader()) { - block.setAlign(true); - } - addBlock(block, order); - } - - Loop loop = block.getLoop(); - if (block.isLoopEnd() && skipLoopHeader(loop.getHeader())) { - - // This is the only loop end of a skipped loop header. - // Add the header immediately afterwards. - addBlock(loop.getHeader(), order); - - // Make sure the loop successors of the loop header are aligned - // as they are the target - // of the backward jump. - for (T successor : loop.getHeader().getSuccessors()) { - if (successor.getLoopDepth() == block.getLoopDepth()) { - successor.setAlign(true); - } - } - } - - T mostLikelySuccessor = findAndMarkMostLikelySuccessor(block, visitedBlocks); - enqueueSuccessors(block, worklist, visitedBlocks); - block = mostLikelySuccessor; - } - } - - /** - * Adds a block to the ordering. - */ - private static > void addBlock(T header, List order) { - assert !order.contains(header) : "Cannot insert block twice"; - order.add(header); - } - - /** - * Find the highest likely unvisited successor block of a given block. - */ - private static > T findAndMarkMostLikelySuccessor(T block, BitSet visitedBlocks) { - T result = null; - for (T successor : block.getSuccessors()) { - assert successor.probability() >= 0.0 : "Probabilities must be positive"; - if (!visitedBlocks.get(successor.getId()) && successor.getLoopDepth() >= block.getLoopDepth() && (result == null || successor.probability() >= result.probability())) { - result = successor; - } - } - if (result != null) { - visitedBlocks.set(result.getId()); - } - return result; - } - - /** - * Add successor blocks into the given work list if they are not already marked as visited. - */ - private static > void enqueueSuccessors(T block, PriorityQueue worklist, BitSet visitedBlocks) { - for (T successor : block.getSuccessors()) { - if (!visitedBlocks.get(successor.getId())) { - visitedBlocks.set(successor.getId()); - worklist.add(successor); - } - } - } - - /** - * Skip the loop header block if the loop consists of more than one block and it has only a - * single loop end block. - */ - private static > boolean skipLoopHeader(AbstractBlock block) { - return (block.isLoopHeader() && !block.isLoopEnd() && block.getLoop().numBackedges() == 1); - } - - /** - * Checks that the ordering contains the expected number of blocks. - */ - private static boolean checkOrder(List> order, int expectedBlockCount) { - assert order.size() == expectedBlockCount : String.format("Number of blocks in ordering (%d) does not match expected block count (%d)", order.size(), expectedBlockCount); - return true; - } - - /** - * Comparator for sorting blocks based on loop depth and probability. - */ - private static class BlockOrderComparator> implements Comparator { - - @Override - public int compare(T a, T b) { - // Loop blocks before any loop exit block. - int diff = b.getLoopDepth() - a.getLoopDepth(); - if (diff != 0) { - return diff; - } - - // Blocks with high probability before blocks with low probability. - if (a.probability() > b.probability()) { - return -1; - } else { - return 1; - } - } - } -} diff -r 32c7a5a88523 -r d599eeab1b53 graal/com.oracle.graal.baseline/src/com/oracle/graal/baseline/BaselineBytecodeParser.java --- a/graal/com.oracle.graal.baseline/src/com/oracle/graal/baseline/BaselineBytecodeParser.java Fri Feb 06 12:17:20 2015 +0100 +++ b/graal/com.oracle.graal.baseline/src/com/oracle/graal/baseline/BaselineBytecodeParser.java Fri Feb 06 12:44:50 2015 +0100 @@ -26,11 +26,10 @@ import java.util.*; -import com.oracle.graal.alloc.*; import com.oracle.graal.api.code.*; import com.oracle.graal.api.meta.*; -import com.oracle.graal.compiler.alloc.*; import com.oracle.graal.compiler.common.*; +import com.oracle.graal.compiler.common.alloc.*; import com.oracle.graal.compiler.common.calc.*; import com.oracle.graal.compiler.common.cfg.*; import com.oracle.graal.compiler.gen.*; @@ -42,6 +41,7 @@ import com.oracle.graal.java.BciBlockMapping.LocalLiveness; import com.oracle.graal.lir.*; import com.oracle.graal.lir.StandardOp.BlockEndOp; +import com.oracle.graal.lir.alloc.lsra.*; import com.oracle.graal.lir.framemap.*; import com.oracle.graal.lir.gen.*; import com.oracle.graal.lir.stackslotalloc.*; diff -r 32c7a5a88523 -r d599eeab1b53 graal/com.oracle.graal.compiler.amd64/src/com/oracle/graal/compiler/amd64/AMD64LIRGenerator.java --- a/graal/com.oracle.graal.compiler.amd64/src/com/oracle/graal/compiler/amd64/AMD64LIRGenerator.java Fri Feb 06 12:17:20 2015 +0100 +++ b/graal/com.oracle.graal.compiler.amd64/src/com/oracle/graal/compiler/amd64/AMD64LIRGenerator.java Fri Feb 06 12:44:50 2015 +0100 @@ -38,6 +38,7 @@ import com.oracle.graal.compiler.common.*; import com.oracle.graal.compiler.common.calc.*; import com.oracle.graal.compiler.common.spi.*; +import com.oracle.graal.compiler.common.util.*; import com.oracle.graal.lir.*; import com.oracle.graal.lir.StandardOp.JumpOp; import com.oracle.graal.lir.amd64.*; diff -r 32c7a5a88523 -r d599eeab1b53 graal/com.oracle.graal.compiler.common/src/com/oracle/graal/compiler/common/GraalOptions.java --- a/graal/com.oracle.graal.compiler.common/src/com/oracle/graal/compiler/common/GraalOptions.java Fri Feb 06 12:17:20 2015 +0100 +++ b/graal/com.oracle.graal.compiler.common/src/com/oracle/graal/compiler/common/GraalOptions.java Fri Feb 06 12:44:50 2015 +0100 @@ -348,4 +348,15 @@ */ @Option(help = "", type = OptionType.Debug) public static final OptionValue SnippetCounters = new OptionValue<>(false); + + @Option(help = "Enable expensive assertions", type = OptionType.Debug) + public static final OptionValue DetailedAsserts = new StableOptionValue() { + @Override + protected Boolean initialValue() { + boolean enabled = false; + // turn detailed assertions on when the general assertions are on (misusing the assert keyword for this) + assert (enabled = true) == true; + return enabled; + } + }; } diff -r 32c7a5a88523 -r d599eeab1b53 graal/com.oracle.graal.compiler.common/src/com/oracle/graal/compiler/common/alloc/ComputeBlockOrder.java --- /dev/null Thu Jan 01 00:00:00 1970 +0000 +++ b/graal/com.oracle.graal.compiler.common/src/com/oracle/graal/compiler/common/alloc/ComputeBlockOrder.java Fri Feb 06 12:44:50 2015 +0100 @@ -0,0 +1,265 @@ +/* + * Copyright (c) 2009, 2011, Oracle and/or its affiliates. All rights reserved. + * DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER. + * + * This code is free software; you can redistribute it and/or modify it + * under the terms of the GNU General Public License version 2 only, as + * published by the Free Software Foundation. + * + * This code is distributed in the hope that it will be useful, but WITHOUT + * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or + * FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License + * version 2 for more details (a copy is included in the LICENSE file that + * accompanied this code). + * + * You should have received a copy of the GNU General Public License version + * 2 along with this work; if not, write to the Free Software Foundation, + * Inc., 51 Franklin St, Fifth Floor, Boston, MA 02110-1301 USA. + * + * Please contact Oracle, 500 Oracle Parkway, Redwood Shores, CA 94065 USA + * or visit www.oracle.com if you need additional information or have any + * questions. + */ + +package com.oracle.graal.compiler.common.alloc; + +import java.util.*; + +import com.oracle.graal.compiler.common.cfg.*; + +/** + * Computes an ordering of the block that can be used by the linear scan register allocator and the + * machine code generator. The machine code generation order will start with the first block and + * produce a straight sequence always following the most likely successor. Then it will continue + * with the most likely path that was left out during this process. The process iteratively + * continues until all blocks are scheduled. Additionally, it is guaranteed that all blocks of a + * loop are scheduled before any block following the loop is scheduled. + * + * The machine code generator order includes reordering of loop headers such that the backward jump + * is a conditional jump if there is only one loop end block. Additionally, the target of loop + * backward jumps are always marked as aligned. Aligning the target of conditional jumps does not + * bring a measurable benefit and is therefore avoided to keep the code size small. + * + * The linear scan register allocator order has an additional mechanism that prevents merge nodes + * from being scheduled if there is at least one highly likely predecessor still unscheduled. This + * increases the probability that the merge node and the corresponding predecessor are more closely + * together in the schedule thus decreasing the probability for inserted phi moves. Also, the + * algorithm sets the linear scan order number of the block that corresponds to its index in the + * linear scan order. + */ +public final class ComputeBlockOrder { + + /** + * The initial capacities of the worklists used for iteratively finding the block order. + */ + private static final int INITIAL_WORKLIST_CAPACITY = 10; + + /** + * Divisor used for degrading the probability of the current path versus unscheduled paths at a + * merge node when calculating the linear scan order. A high value means that predecessors of + * merge nodes are more likely to be scheduled before the merge node. + */ + private static final int PENALTY_VERSUS_UNSCHEDULED = 10; + + /** + * Computes the block order used for the linear scan register allocator. + * + * @return sorted list of blocks + */ + public static > List computeLinearScanOrder(int blockCount, T startBlock) { + List order = new ArrayList<>(); + BitSet visitedBlocks = new BitSet(blockCount); + PriorityQueue worklist = initializeWorklist(startBlock, visitedBlocks); + computeLinearScanOrder(order, worklist, visitedBlocks); + assert checkOrder(order, blockCount); + return order; + } + + /** + * Computes the block order used for code emission. + * + * @return sorted list of blocks + */ + public static > List computeCodeEmittingOrder(int blockCount, T startBlock) { + List order = new ArrayList<>(); + BitSet visitedBlocks = new BitSet(blockCount); + PriorityQueue worklist = initializeWorklist(startBlock, visitedBlocks); + computeCodeEmittingOrder(order, worklist, visitedBlocks); + assert checkOrder(order, blockCount); + return order; + } + + /** + * Iteratively adds paths to the code emission block order. + */ + private static > void computeCodeEmittingOrder(List order, PriorityQueue worklist, BitSet visitedBlocks) { + while (!worklist.isEmpty()) { + T nextImportantPath = worklist.poll(); + addPathToCodeEmittingOrder(nextImportantPath, order, worklist, visitedBlocks); + } + } + + /** + * Iteratively adds paths to the linear scan block order. + */ + private static > void computeLinearScanOrder(List order, PriorityQueue worklist, BitSet visitedBlocks) { + while (!worklist.isEmpty()) { + T nextImportantPath = worklist.poll(); + addPathToLinearScanOrder(nextImportantPath, order, worklist, visitedBlocks); + } + } + + /** + * Initializes the priority queue used for the work list of blocks and adds the start block. + */ + private static > PriorityQueue initializeWorklist(T startBlock, BitSet visitedBlocks) { + PriorityQueue result = new PriorityQueue<>(INITIAL_WORKLIST_CAPACITY, new BlockOrderComparator<>()); + result.add(startBlock); + visitedBlocks.set(startBlock.getId()); + return result; + } + + /** + * Add a linear path to the linear scan order greedily following the most likely successor. + */ + private static > void addPathToLinearScanOrder(T block, List order, PriorityQueue worklist, BitSet visitedBlocks) { + block.setLinearScanNumber(order.size()); + order.add(block); + T mostLikelySuccessor = findAndMarkMostLikelySuccessor(block, visitedBlocks); + enqueueSuccessors(block, worklist, visitedBlocks); + if (mostLikelySuccessor != null) { + if (!mostLikelySuccessor.isLoopHeader() && mostLikelySuccessor.getPredecessorCount() > 1) { + // We are at a merge. Check probabilities of predecessors that are not yet + // scheduled. + double unscheduledSum = 0.0; + for (T pred : mostLikelySuccessor.getPredecessors()) { + if (pred.getLinearScanNumber() == -1) { + unscheduledSum += pred.probability(); + } + } + + if (unscheduledSum > block.probability() / PENALTY_VERSUS_UNSCHEDULED) { + // Add this merge only after at least one additional predecessor gets scheduled. + visitedBlocks.clear(mostLikelySuccessor.getId()); + return; + } + } + addPathToLinearScanOrder(mostLikelySuccessor, order, worklist, visitedBlocks); + } + } + + /** + * Add a linear path to the code emission order greedily following the most likely successor. + */ + private static > void addPathToCodeEmittingOrder(T initialBlock, List order, PriorityQueue worklist, BitSet visitedBlocks) { + T block = initialBlock; + while (block != null) { + // Skip loop headers if there is only a single loop end block to + // make the backward jump be a conditional jump. + if (!skipLoopHeader(block)) { + + // Align unskipped loop headers as they are the target of the backward jump. + if (block.isLoopHeader()) { + block.setAlign(true); + } + addBlock(block, order); + } + + Loop loop = block.getLoop(); + if (block.isLoopEnd() && skipLoopHeader(loop.getHeader())) { + + // This is the only loop end of a skipped loop header. + // Add the header immediately afterwards. + addBlock(loop.getHeader(), order); + + // Make sure the loop successors of the loop header are aligned + // as they are the target + // of the backward jump. + for (T successor : loop.getHeader().getSuccessors()) { + if (successor.getLoopDepth() == block.getLoopDepth()) { + successor.setAlign(true); + } + } + } + + T mostLikelySuccessor = findAndMarkMostLikelySuccessor(block, visitedBlocks); + enqueueSuccessors(block, worklist, visitedBlocks); + block = mostLikelySuccessor; + } + } + + /** + * Adds a block to the ordering. + */ + private static > void addBlock(T header, List order) { + assert !order.contains(header) : "Cannot insert block twice"; + order.add(header); + } + + /** + * Find the highest likely unvisited successor block of a given block. + */ + private static > T findAndMarkMostLikelySuccessor(T block, BitSet visitedBlocks) { + T result = null; + for (T successor : block.getSuccessors()) { + assert successor.probability() >= 0.0 : "Probabilities must be positive"; + if (!visitedBlocks.get(successor.getId()) && successor.getLoopDepth() >= block.getLoopDepth() && (result == null || successor.probability() >= result.probability())) { + result = successor; + } + } + if (result != null) { + visitedBlocks.set(result.getId()); + } + return result; + } + + /** + * Add successor blocks into the given work list if they are not already marked as visited. + */ + private static > void enqueueSuccessors(T block, PriorityQueue worklist, BitSet visitedBlocks) { + for (T successor : block.getSuccessors()) { + if (!visitedBlocks.get(successor.getId())) { + visitedBlocks.set(successor.getId()); + worklist.add(successor); + } + } + } + + /** + * Skip the loop header block if the loop consists of more than one block and it has only a + * single loop end block. + */ + private static > boolean skipLoopHeader(AbstractBlock block) { + return (block.isLoopHeader() && !block.isLoopEnd() && block.getLoop().numBackedges() == 1); + } + + /** + * Checks that the ordering contains the expected number of blocks. + */ + private static boolean checkOrder(List> order, int expectedBlockCount) { + assert order.size() == expectedBlockCount : String.format("Number of blocks in ordering (%d) does not match expected block count (%d)", order.size(), expectedBlockCount); + return true; + } + + /** + * Comparator for sorting blocks based on loop depth and probability. + */ + private static class BlockOrderComparator> implements Comparator { + + @Override + public int compare(T a, T b) { + // Loop blocks before any loop exit block. + int diff = b.getLoopDepth() - a.getLoopDepth(); + if (diff != 0) { + return diff; + } + + // Blocks with high probability before blocks with low probability. + if (a.probability() > b.probability()) { + return -1; + } else { + return 1; + } + } + } +} diff -r 32c7a5a88523 -r d599eeab1b53 graal/com.oracle.graal.compiler.common/src/com/oracle/graal/compiler/common/util/ArrayMap.java --- /dev/null Thu Jan 01 00:00:00 1970 +0000 +++ b/graal/com.oracle.graal.compiler.common/src/com/oracle/graal/compiler/common/util/ArrayMap.java Fri Feb 06 12:44:50 2015 +0100 @@ -0,0 +1,118 @@ +/* + * Copyright (c) 2009, 2011, Oracle and/or its affiliates. All rights reserved. + * DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER. + * + * This code is free software; you can redistribute it and/or modify it + * under the terms of the GNU General Public License version 2 only, as + * published by the Free Software Foundation. + * + * This code is distributed in the hope that it will be useful, but WITHOUT + * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or + * FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License + * version 2 for more details (a copy is included in the LICENSE file that + * accompanied this code). + * + * You should have received a copy of the GNU General Public License version + * 2 along with this work; if not, write to the Free Software Foundation, + * Inc., 51 Franklin St, Fifth Floor, Boston, MA 02110-1301 USA. + * + * Please contact Oracle, 500 Oracle Parkway, Redwood Shores, CA 94065 USA + * or visit www.oracle.com if you need additional information or have any + * questions. + */ +package com.oracle.graal.compiler.common.util; + +/** + * The {@code ArrayMap} class implements an efficient one-level map which is implemented as an + * array. Note that because of the one-level array inside, this data structure performs best when + * the range of integer keys is small and densely used. Note that the implementation can handle + * arbitrary intervals, including negative numbers, up to intervals of size 2^31 - 1. + */ +public class ArrayMap { + + private static final int INITIAL_SIZE = 5; // how big the initial array should be + private static final int EXTRA = 2; // how far on the left or right of a new element to grow + + Object[] map; + int low; + + /** + * Constructs a new {@code ArrayMap} with no initial assumptions. + */ + public ArrayMap() { + } + + /** + * Constructs a new {@code ArrayMap} that initially covers the specified interval. Note that + * this map will automatically expand if necessary later. + * + * @param low the low index, inclusive + * @param high the high index, exclusive + */ + public ArrayMap(int low, int high) { + this.low = low; + this.map = new Object[high - low + 1]; + } + + /** + * Puts a new value in the map at the specified index. + * + * @param i the index at which to store the value + * @param value the value to store at the specified index + */ + public void put(int i, T value) { + int index = i - low; + if (map == null) { + // no map yet + map = new Object[INITIAL_SIZE]; + low = index - 2; + map[INITIAL_SIZE / 2] = value; + } else if (index < 0) { + // grow backwards + growBackward(i, value); + } else if (index >= map.length) { + // grow forwards + growForward(i, value); + } else { + // no growth necessary + map[index] = value; + } + } + + /** + * Gets the value at the specified index in the map. + * + * @param i the index + * @return the value at the specified index; {@code null} if there is no value at the specified + * index, or if the index is out of the currently stored range + */ + public T get(int i) { + int index = i - low; + if (map == null || index < 0 || index >= map.length) { + return null; + } + Class type = null; + return Util.uncheckedCast(type, map[index]); + } + + public int length() { + return map.length; + } + + private void growBackward(int i, T value) { + int nlow = i - EXTRA; + Object[] nmap = new Object[low - nlow + map.length]; + System.arraycopy(map, 0, nmap, low - nlow, map.length); + map = nmap; + low = nlow; + map[i - low] = value; + } + + private void growForward(int i, T value) { + int nlen = i - low + 1 + EXTRA; + Object[] nmap = new Object[nlen]; + System.arraycopy(map, 0, nmap, 0, map.length); + map = nmap; + map[i - low] = value; + } +} diff -r 32c7a5a88523 -r d599eeab1b53 graal/com.oracle.graal.compiler.common/src/com/oracle/graal/compiler/common/util/ArraySet.java --- /dev/null Thu Jan 01 00:00:00 1970 +0000 +++ b/graal/com.oracle.graal.compiler.common/src/com/oracle/graal/compiler/common/util/ArraySet.java Fri Feb 06 12:44:50 2015 +0100 @@ -0,0 +1,54 @@ +/* + * Copyright (c) 2013, Oracle and/or its affiliates. All rights reserved. + * DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER. + * + * This code is free software; you can redistribute it and/or modify it + * under the terms of the GNU General Public License version 2 only, as + * published by the Free Software Foundation. + * + * This code is distributed in the hope that it will be useful, but WITHOUT + * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or + * FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License + * version 2 for more details (a copy is included in the LICENSE file that + * accompanied this code). + * + * You should have received a copy of the GNU General Public License version + * 2 along with this work; if not, write to the Free Software Foundation, + * Inc., 51 Franklin St, Fifth Floor, Boston, MA 02110-1301 USA. + * + * Please contact Oracle, 500 Oracle Parkway, Redwood Shores, CA 94065 USA + * or visit www.oracle.com if you need additional information or have any + * questions. + */ +package com.oracle.graal.compiler.common.util; + +import java.util.*; + +/** + * Mimic a set implementation with an ArrayList. Beneficial for small sets (compared to + * {@link HashSet}). + */ +public class ArraySet extends ArrayList implements Set { + private static final long serialVersionUID = 4476957522387436654L; + + public ArraySet() { + super(); + } + + public ArraySet(int i) { + super(i); + } + + public ArraySet(Collection c) { + super(c); + } + + @Override + public boolean add(E e) { + // avoid duplicated entries + if (contains(e)) { + return false; + } + return super.add(e); + } +} diff -r 32c7a5a88523 -r d599eeab1b53 graal/com.oracle.graal.compiler.common/src/com/oracle/graal/compiler/common/util/BitMap2D.java --- /dev/null Thu Jan 01 00:00:00 1970 +0000 +++ b/graal/com.oracle.graal.compiler.common/src/com/oracle/graal/compiler/common/util/BitMap2D.java Fri Feb 06 12:44:50 2015 +0100 @@ -0,0 +1,99 @@ +/* + * Copyright (c) 2009, 2011, Oracle and/or its affiliates. All rights reserved. + * DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER. + * + * This code is free software; you can redistribute it and/or modify it + * under the terms of the GNU General Public License version 2 only, as + * published by the Free Software Foundation. + * + * This code is distributed in the hope that it will be useful, but WITHOUT + * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or + * FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License + * version 2 for more details (a copy is included in the LICENSE file that + * accompanied this code). + * + * You should have received a copy of the GNU General Public License version + * 2 along with this work; if not, write to the Free Software Foundation, + * Inc., 51 Franklin St, Fifth Floor, Boston, MA 02110-1301 USA. + * + * Please contact Oracle, 500 Oracle Parkway, Redwood Shores, CA 94065 USA + * or visit www.oracle.com if you need additional information or have any + * questions. + */ +package com.oracle.graal.compiler.common.util; + +import java.util.*; + +/** + * This class implements a two-dimensional bitmap. + */ +public final class BitMap2D { + + private BitSet map; + private final int bitsPerSlot; + + private int bitIndex(int slotIndex, int bitWithinSlotIndex) { + return slotIndex * bitsPerSlot + bitWithinSlotIndex; + } + + private boolean verifyBitWithinSlotIndex(int index) { + assert index < bitsPerSlot : "index " + index + " is out of bounds " + bitsPerSlot; + return true; + } + + public BitMap2D(int sizeInSlots, int bitsPerSlot) { + map = new BitSet(sizeInSlots * bitsPerSlot); + this.bitsPerSlot = bitsPerSlot; + } + + public int sizeInBits() { + return map.size(); + } + + // Returns number of full slots that have been allocated + public int sizeInSlots() { + return map.size() / bitsPerSlot; + } + + public boolean isValidIndex(int slotIndex, int bitWithinSlotIndex) { + assert verifyBitWithinSlotIndex(bitWithinSlotIndex); + return (bitIndex(slotIndex, bitWithinSlotIndex) < sizeInBits()); + } + + public boolean at(int slotIndex, int bitWithinSlotIndex) { + assert verifyBitWithinSlotIndex(bitWithinSlotIndex); + return map.get(bitIndex(slotIndex, bitWithinSlotIndex)); + } + + public void setBit(int slotIndex, int bitWithinSlotIndex) { + assert verifyBitWithinSlotIndex(bitWithinSlotIndex); + map.set(bitIndex(slotIndex, bitWithinSlotIndex)); + } + + public void clearBit(int slotIndex, int bitWithinSlotIndex) { + assert verifyBitWithinSlotIndex(bitWithinSlotIndex); + map.clear(bitIndex(slotIndex, bitWithinSlotIndex)); + } + + public void atPutGrow(int slotIndex, int bitWithinSlotIndex, boolean value) { + int size = sizeInSlots(); + if (size <= slotIndex) { + while (size <= slotIndex) { + size *= 2; + } + BitSet newBitMap = new BitSet(size * bitsPerSlot); + newBitMap.or(map); + map = newBitMap; + } + + if (value) { + setBit(slotIndex, bitWithinSlotIndex); + } else { + clearBit(slotIndex, bitWithinSlotIndex); + } + } + + public void clear() { + map.clear(); + } +} diff -r 32c7a5a88523 -r d599eeab1b53 graal/com.oracle.graal.compiler.common/src/com/oracle/graal/compiler/common/util/IntList.java --- /dev/null Thu Jan 01 00:00:00 1970 +0000 +++ b/graal/com.oracle.graal.compiler.common/src/com/oracle/graal/compiler/common/util/IntList.java Fri Feb 06 12:44:50 2015 +0100 @@ -0,0 +1,163 @@ +/* + * Copyright (c) 2010, 2011, Oracle and/or its affiliates. All rights reserved. + * DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER. + * + * This code is free software; you can redistribute it and/or modify it + * under the terms of the GNU General Public License version 2 only, as + * published by the Free Software Foundation. + * + * This code is distributed in the hope that it will be useful, but WITHOUT + * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or + * FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License + * version 2 for more details (a copy is included in the LICENSE file that + * accompanied this code). + * + * You should have received a copy of the GNU General Public License version + * 2 along with this work; if not, write to the Free Software Foundation, + * Inc., 51 Franklin St, Fifth Floor, Boston, MA 02110-1301 USA. + * + * Please contact Oracle, 500 Oracle Parkway, Redwood Shores, CA 94065 USA + * or visit www.oracle.com if you need additional information or have any + * questions. + */ +package com.oracle.graal.compiler.common.util; + +import java.util.*; + +/** + * An expandable and indexable list of {@code int}s. + * + * This class avoids the boxing/unboxing incurred by {@code ArrayList}. + */ +public final class IntList { + + private int[] array; + private int size; + + /** + * Creates an int list with a specified initial capacity. + * + * @param initialCapacity + */ + public IntList(int initialCapacity) { + array = new int[initialCapacity]; + } + + /** + * Creates an int list with a specified initial array. + * + * @param array the initial array used for the list (no copy is made) + * @param initialSize the initial {@linkplain #size() size} of the list (must be less than or + * equal to {@code array.length} + */ + public IntList(int[] array, int initialSize) { + assert initialSize <= array.length; + this.array = array; + this.size = initialSize; + } + + /** + * Makes a new int list by copying a range from a given int list. + * + * @param other the list from which a range of values is to be copied into the new list + * @param startIndex the index in {@code other} at which to start copying + * @param length the number of values to copy from {@code other} + * @return a new int list whose {@linkplain #size() size} and capacity is {@code length} + */ + public static IntList copy(IntList other, int startIndex, int length) { + return copy(other, startIndex, length, length); + } + + /** + * Makes a new int list by copying a range from a given int list. + * + * @param other the list from which a range of values is to be copied into the new list + * @param startIndex the index in {@code other} at which to start copying + * @param length the number of values to copy from {@code other} + * @param initialCapacity the initial capacity of the new int list (must be greater or equal to + * {@code length}) + * @return a new int list whose {@linkplain #size() size} is {@code length} + */ + public static IntList copy(IntList other, int startIndex, int length, int initialCapacity) { + assert initialCapacity >= length : "initialCapacity < length"; + int[] array = new int[initialCapacity]; + System.arraycopy(other.array, startIndex, array, 0, length); + return new IntList(array, length); + } + + public int size() { + return size; + } + + /** + * Appends a value to the end of this list, increasing its {@linkplain #size() size} by 1. + * + * @param value the value to append + */ + public void add(int value) { + if (size == array.length) { + int newSize = (size * 3) / 2 + 1; + array = Arrays.copyOf(array, newSize); + } + array[size++] = value; + } + + /** + * Gets the value in this list at a given index. + * + * @param index the index of the element to return + * @throws IndexOutOfBoundsException if {@code index < 0 || index >= size()} + */ + public int get(int index) { + if (index >= size) { + throw new IndexOutOfBoundsException("Index: " + index + ", Size: " + size); + } + return array[index]; + } + + /** + * Sets the size of this list to 0. + */ + public void clear() { + size = 0; + } + + /** + * Sets a value at a given index in this list. + * + * @param index the index of the element to update + * @param value the new value of the element + * @throws IndexOutOfBoundsException if {@code index < 0 || index >= size()} + */ + public void set(int index, int value) { + if (index >= size) { + throw new IndexOutOfBoundsException("Index: " + index + ", Size: " + size); + } + array[index] = value; + } + + /** + * Adjusts the {@linkplain #size() size} of this int list. + * + * If {@code newSize < size()}, the size is changed to {@code newSize}. If + * {@code newSize > size()}, sufficient 0 elements are {@linkplain #add(int) added} until + * {@code size() == newSize}. + * + * @param newSize the new size of this int list + */ + public void setSize(int newSize) { + if (newSize < size) { + size = newSize; + } else if (newSize > size) { + array = Arrays.copyOf(array, newSize); + } + } + + @Override + public String toString() { + if (array.length == size) { + return Arrays.toString(array); + } + return Arrays.toString(Arrays.copyOf(array, size)); + } +} diff -r 32c7a5a88523 -r d599eeab1b53 graal/com.oracle.graal.compiler.common/src/com/oracle/graal/compiler/common/util/Util.java --- /dev/null Thu Jan 01 00:00:00 1970 +0000 +++ b/graal/com.oracle.graal.compiler.common/src/com/oracle/graal/compiler/common/util/Util.java Fri Feb 06 12:44:50 2015 +0100 @@ -0,0 +1,337 @@ +/* + * Copyright (c) 2009, 2011, Oracle and/or its affiliates. All rights reserved. + * DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER. + * + * This code is free software; you can redistribute it and/or modify it + * under the terms of the GNU General Public License version 2 only, as + * published by the Free Software Foundation. + * + * This code is distributed in the hope that it will be useful, but WITHOUT + * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or + * FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License + * version 2 for more details (a copy is included in the LICENSE file that + * accompanied this code). + * + * You should have received a copy of the GNU General Public License version + * 2 along with this work; if not, write to the Free Software Foundation, + * Inc., 51 Franklin St, Fifth Floor, Boston, MA 02110-1301 USA. + * + * Please contact Oracle, 500 Oracle Parkway, Redwood Shores, CA 94065 USA + * or visit www.oracle.com if you need additional information or have any + * questions. + */ +package com.oracle.graal.compiler.common.util; + +import java.util.*; + +import com.oracle.graal.api.code.*; +import com.oracle.graal.debug.*; + +/** + * The {@code Util} class contains a motley collection of utility methods used throughout the + * compiler. + */ +public class Util { + + public static final int PRINTING_LINE_WIDTH = 40; + public static final char SECTION_CHARACTER = '*'; + public static final char SUB_SECTION_CHARACTER = '='; + public static final char SEPERATOR_CHARACTER = '-'; + + public static boolean replaceInList(T a, T b, List list) { + final int max = list.size(); + for (int i = 0; i < max; i++) { + if (list.get(i) == a) { + list.set(i, b); + return true; + } + } + return false; + } + + /** + * Statically cast an object to an arbitrary Object type. Dynamically checked. + */ + @SuppressWarnings("unchecked") + public static T uncheckedCast(@SuppressWarnings("unused") Class type, Object object) { + return (T) object; + } + + /** + * Statically cast an object to an arbitrary Object type. Dynamically checked. + */ + @SuppressWarnings("unchecked") + public static T uncheckedCast(Object object) { + return (T) object; + } + + /** + * Utility method to combine a base hash with the identity hash of one or more objects. + * + * @param hash the base hash + * @param x the object to add to the hash + * @return the combined hash + */ + public static int hash1(int hash, Object x) { + // always set at least one bit in case the hash wraps to zero + return 0x10000000 | (hash + 7 * System.identityHashCode(x)); + } + + /** + * Utility method to combine a base hash with the identity hash of one or more objects. + * + * @param hash the base hash + * @param x the first object to add to the hash + * @param y the second object to add to the hash + * @return the combined hash + */ + public static int hash2(int hash, Object x, Object y) { + // always set at least one bit in case the hash wraps to zero + return 0x20000000 | (hash + 7 * System.identityHashCode(x) + 11 * System.identityHashCode(y)); + } + + /** + * Utility method to combine a base hash with the identity hash of one or more objects. + * + * @param hash the base hash + * @param x the first object to add to the hash + * @param y the second object to add to the hash + * @param z the third object to add to the hash + * @return the combined hash + */ + public static int hash3(int hash, Object x, Object y, Object z) { + // always set at least one bit in case the hash wraps to zero + return 0x30000000 | (hash + 7 * System.identityHashCode(x) + 11 * System.identityHashCode(y) + 13 * System.identityHashCode(z)); + } + + /** + * Utility method to combine a base hash with the identity hash of one or more objects. + * + * @param hash the base hash + * @param x the first object to add to the hash + * @param y the second object to add to the hash + * @param z the third object to add to the hash + * @param w the fourth object to add to the hash + * @return the combined hash + */ + public static int hash4(int hash, Object x, Object y, Object z, Object w) { + // always set at least one bit in case the hash wraps to zero + return 0x40000000 | (hash + 7 * System.identityHashCode(x) + 11 * System.identityHashCode(y) + 13 * System.identityHashCode(z) + 17 * System.identityHashCode(w)); + } + + static { + assert CodeUtil.log2(2) == 1; + assert CodeUtil.log2(4) == 2; + assert CodeUtil.log2(8) == 3; + assert CodeUtil.log2(16) == 4; + assert CodeUtil.log2(32) == 5; + assert CodeUtil.log2(0x40000000) == 30; + + assert CodeUtil.log2(2L) == 1; + assert CodeUtil.log2(4L) == 2; + assert CodeUtil.log2(8L) == 3; + assert CodeUtil.log2(16L) == 4; + assert CodeUtil.log2(32L) == 5; + assert CodeUtil.log2(0x4000000000000000L) == 62; + + assert !CodeUtil.isPowerOf2(3); + assert !CodeUtil.isPowerOf2(5); + assert !CodeUtil.isPowerOf2(7); + assert !CodeUtil.isPowerOf2(-1); + + assert CodeUtil.isPowerOf2(2); + assert CodeUtil.isPowerOf2(4); + assert CodeUtil.isPowerOf2(8); + assert CodeUtil.isPowerOf2(16); + assert CodeUtil.isPowerOf2(32); + assert CodeUtil.isPowerOf2(64); + } + + /** + * Sets the element at a given position of a list and ensures that this position exists. If the + * list is current shorter than the position, intermediate positions are filled with a given + * value. + * + * @param list the list to put the element into + * @param pos the position at which to insert the element + * @param x the element that should be inserted + * @param filler the filler element that is used for the intermediate positions in case the list + * is shorter than pos + */ + public static void atPutGrow(List list, int pos, T x, T filler) { + if (list.size() < pos + 1) { + while (list.size() < pos + 1) { + list.add(filler); + } + assert list.size() == pos + 1; + } + + assert list.size() >= pos + 1; + list.set(pos, x); + } + + public static void breakpoint() { + // do nothing. + } + + public static void guarantee(boolean b, String string) { + if (!b) { + throw new BailoutException(string); + } + } + + public static void warning(String string) { + TTY.println("WARNING: " + string); + } + + public static int safeToInt(long l) { + assert (int) l == l; + return (int) l; + } + + public static int roundUp(int number, int mod) { + return ((number + mod - 1) / mod) * mod; + } + + public static void printSection(String name, char sectionCharacter) { + + String header = " " + name + " "; + int remainingCharacters = PRINTING_LINE_WIDTH - header.length(); + int leftPart = remainingCharacters / 2; + int rightPart = remainingCharacters - leftPart; + for (int i = 0; i < leftPart; i++) { + TTY.print(sectionCharacter); + } + + TTY.print(header); + + for (int i = 0; i < rightPart; i++) { + TTY.print(sectionCharacter); + } + + TTY.println(); + } + + /** + * Prints entries in a byte array as space separated hex values to {@link TTY}. + * + * @param address an address at which the bytes are located. This is used to print an address + * prefix per line of output. + * @param array the array containing all the bytes to print + * @param bytesPerLine the number of values to print per line of output + */ + public static void printBytes(long address, byte[] array, int bytesPerLine) { + printBytes(address, array, 0, array.length, bytesPerLine); + } + + /** + * Prints entries in a byte array as space separated hex values to {@link TTY}. + * + * @param address an address at which the bytes are located. This is used to print an address + * prefix per line of output. + * @param array the array containing the bytes to print + * @param offset the offset in {@code array} of the values to print + * @param length the number of values from {@code array} print + * @param bytesPerLine the number of values to print per line of output + */ + public static void printBytes(long address, byte[] array, int offset, int length, int bytesPerLine) { + assert bytesPerLine > 0; + boolean newLine = true; + for (int i = 0; i < length; i++) { + if (newLine) { + TTY.print("%08x: ", address + i); + newLine = false; + } + TTY.print("%02x ", array[i]); + if (i % bytesPerLine == bytesPerLine - 1) { + TTY.println(); + newLine = true; + } + } + + if (length % bytesPerLine != bytesPerLine) { + TTY.println(); + } + } + + public static boolean isShiftCount(int x) { + return 0 <= x && x < 32; + } + + /** + * Determines if a given {@code int} value is the range of unsigned byte values. + */ + public static boolean isUByte(int x) { + return (x & 0xff) == x; + } + + /** + * Determines if a given {@code int} value is the range of signed byte values. + */ + public static boolean isByte(int x) { + return (byte) x == x; + } + + /** + * Determines if a given {@code long} value is the range of unsigned byte values. + */ + public static boolean isUByte(long x) { + return (x & 0xffL) == x; + } + + /** + * Determines if a given {@code long} value is the range of signed byte values. + */ + public static boolean isByte(long l) { + return (byte) l == l; + } + + /** + * Determines if a given {@code long} value is the range of unsigned int values. + */ + public static boolean isUInt(long x) { + return (x & 0xffffffffL) == x; + } + + /** + * Determines if a given {@code long} value is the range of signed int values. + */ + public static boolean isInt(long l) { + return (int) l == l; + } + + /** + * Determines if a given {@code int} value is the range of signed short values. + */ + public static boolean isShort(int x) { + return (short) x == x; + } + + public static boolean is32bit(long x) { + return -0x80000000L <= x && x < 0x80000000L; + } + + public static short safeToShort(int v) { + assert isShort(v); + return (short) v; + } + + /** + * Creates an array of integers of length "size", in which each number from 0 to (size - 1) + * occurs exactly once. The integers are sorted using the given comparator. This can be used to + * create a sorting for arrays that cannot be modified directly. + * + * @param size The size of the range to be sorted. + * @param comparator A comparator that is used to compare indexes. + * @return An array of integers that contains each number from 0 to (size - 1) exactly once, + * sorted using the comparator. + */ + public static Integer[] createSortedPermutation(int size, Comparator comparator) { + Integer[] indexes = new Integer[size]; + for (int i = 0; i < size; i++) { + indexes[i] = i; + } + Arrays.sort(indexes, comparator); + return indexes; + } +} diff -r 32c7a5a88523 -r d599eeab1b53 graal/com.oracle.graal.compiler/src/com/oracle/graal/compiler/GraalCompiler.java --- a/graal/com.oracle.graal.compiler/src/com/oracle/graal/compiler/GraalCompiler.java Fri Feb 06 12:17:20 2015 +0100 +++ b/graal/com.oracle.graal.compiler/src/com/oracle/graal/compiler/GraalCompiler.java Fri Feb 06 12:44:50 2015 +0100 @@ -29,19 +29,19 @@ import java.util.*; -import com.oracle.graal.alloc.*; import com.oracle.graal.api.code.*; import com.oracle.graal.api.code.CompilationResult.ConstantReference; import com.oracle.graal.api.code.CompilationResult.DataPatch; import com.oracle.graal.api.meta.*; import com.oracle.graal.api.meta.ProfilingInfo.TriState; -import com.oracle.graal.compiler.alloc.*; +import com.oracle.graal.compiler.common.alloc.*; import com.oracle.graal.compiler.common.cfg.*; import com.oracle.graal.compiler.target.*; import com.oracle.graal.debug.*; import com.oracle.graal.debug.Debug.Scope; import com.oracle.graal.debug.internal.*; import com.oracle.graal.lir.*; +import com.oracle.graal.lir.alloc.lsra.*; import com.oracle.graal.lir.asm.*; import com.oracle.graal.lir.constopt.*; import com.oracle.graal.lir.framemap.*; diff -r 32c7a5a88523 -r d599eeab1b53 graal/com.oracle.graal.compiler/src/com/oracle/graal/compiler/GraalDebugConfig.java --- a/graal/com.oracle.graal.compiler/src/com/oracle/graal/compiler/GraalDebugConfig.java Fri Feb 06 12:17:20 2015 +0100 +++ b/graal/com.oracle.graal.compiler/src/com/oracle/graal/compiler/GraalDebugConfig.java Fri Feb 06 12:44:50 2015 +0100 @@ -65,16 +65,6 @@ public static final OptionValue SuppressZeroDebugValues = new OptionValue<>(false); @Option(help = "Send Graal IR to dump handlers on error", type = OptionType.Debug) public static final OptionValue DumpOnError = new OptionValue<>(false); - @Option(help = "Enable expensive assertions", type = OptionType.Debug) - public static final OptionValue DetailedAsserts = new StableOptionValue() { - @Override - protected Boolean initialValue() { - boolean enabled = false; - // turn detailed assertions on when the general assertions are on (misusing the assert keyword for this) - assert (enabled = true) == true; - return enabled; - } - }; @Option(help = "Enable more verbose log output when available", type = OptionType.Debug) public static final OptionValue LogVerbose = new OptionValue<>(false); // @formatter:on diff -r 32c7a5a88523 -r d599eeab1b53 graal/com.oracle.graal.compiler/src/com/oracle/graal/compiler/alloc/Interval.java --- a/graal/com.oracle.graal.compiler/src/com/oracle/graal/compiler/alloc/Interval.java Fri Feb 06 12:17:20 2015 +0100 +++ /dev/null Thu Jan 01 00:00:00 1970 +0000 @@ -1,1303 +0,0 @@ -/* - * Copyright (c) 2009, 2014, Oracle and/or its affiliates. All rights reserved. - * DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER. - * - * This code is free software; you can redistribute it and/or modify it - * under the terms of the GNU General Public License version 2 only, as - * published by the Free Software Foundation. - * - * This code is distributed in the hope that it will be useful, but WITHOUT - * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or - * FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License - * version 2 for more details (a copy is included in the LICENSE file that - * accompanied this code). - * - * You should have received a copy of the GNU General Public License version - * 2 along with this work; if not, write to the Free Software Foundation, - * Inc., 51 Franklin St, Fifth Floor, Boston, MA 02110-1301 USA. - * - * Please contact Oracle, 500 Oracle Parkway, Redwood Shores, CA 94065 USA - * or visit www.oracle.com if you need additional information or have any - * questions. - */ -package com.oracle.graal.compiler.alloc; - -import static com.oracle.graal.api.code.ValueUtil.*; -import static com.oracle.graal.compiler.GraalDebugConfig.*; -import static com.oracle.graal.lir.LIRValueUtil.*; - -import java.util.*; - -import com.oracle.graal.api.code.*; -import com.oracle.graal.api.meta.*; -import com.oracle.graal.compiler.common.*; -import com.oracle.graal.debug.*; -import com.oracle.graal.lir.*; -import com.oracle.graal.phases.util.*; - -/** - * Represents an interval in the {@linkplain LinearScan linear scan register allocator}. - */ -public final class Interval { - - /** - * A pair of intervals. - */ - static final class Pair { - - public final Interval first; - public final Interval second; - - public Pair(Interval first, Interval second) { - this.first = first; - this.second = second; - } - } - - /** - * A set of interval lists, one per {@linkplain RegisterBinding binding} type. - */ - static final class RegisterBindingLists { - - /** - * List of intervals whose binding is currently {@link RegisterBinding#Fixed}. - */ - public Interval fixed; - - /** - * List of intervals whose binding is currently {@link RegisterBinding#Any}. - */ - public Interval any; - - /** - * List of intervals whose binding is currently {@link RegisterBinding#Stack}. - */ - public Interval stack; - - public RegisterBindingLists(Interval fixed, Interval any, Interval stack) { - this.fixed = fixed; - this.any = any; - this.stack = stack; - } - - /** - * Gets the list for a specified binding. - * - * @param binding specifies the list to be returned - * @return the list of intervals whose binding is {@code binding} - */ - public Interval get(RegisterBinding binding) { - switch (binding) { - case Any: - return any; - case Fixed: - return fixed; - case Stack: - return stack; - } - throw GraalInternalError.shouldNotReachHere(); - } - - /** - * Sets the list for a specified binding. - * - * @param binding specifies the list to be replaced - * @param list a list of intervals whose binding is {@code binding} - */ - public void set(RegisterBinding binding, Interval list) { - assert list != null; - switch (binding) { - case Any: - any = list; - break; - case Fixed: - fixed = list; - break; - case Stack: - stack = list; - break; - } - } - - /** - * Adds an interval to a list sorted by {@linkplain Interval#currentFrom() current from} - * positions. - * - * @param binding specifies the list to be updated - * @param interval the interval to add - */ - public void addToListSortedByCurrentFromPositions(RegisterBinding binding, Interval interval) { - Interval list = get(binding); - Interval prev = null; - Interval cur = list; - while (cur.currentFrom() < interval.currentFrom()) { - prev = cur; - cur = cur.next; - } - Interval result = list; - if (prev == null) { - // add to head of list - result = interval; - } else { - // add before 'cur' - prev.next = interval; - } - interval.next = cur; - set(binding, result); - } - - /** - * Adds an interval to a list sorted by {@linkplain Interval#from() start} positions and - * {@linkplain Interval#firstUsage(RegisterPriority) first usage} positions. - * - * @param binding specifies the list to be updated - * @param interval the interval to add - */ - public void addToListSortedByStartAndUsePositions(RegisterBinding binding, Interval interval) { - Interval list = get(binding); - Interval prev = null; - Interval cur = list; - while (cur.from() < interval.from() || (cur.from() == interval.from() && cur.firstUsage(RegisterPriority.None) < interval.firstUsage(RegisterPriority.None))) { - prev = cur; - cur = cur.next; - } - if (prev == null) { - list = interval; - } else { - prev.next = interval; - } - interval.next = cur; - set(binding, list); - } - - /** - * Removes an interval from a list. - * - * @param binding specifies the list to be updated - * @param i the interval to remove - */ - public void remove(RegisterBinding binding, Interval i) { - Interval list = get(binding); - Interval prev = null; - Interval cur = list; - while (cur != i) { - assert cur != null && cur != Interval.EndMarker : "interval has not been found in list: " + i; - prev = cur; - cur = cur.next; - } - if (prev == null) { - set(binding, cur.next); - } else { - prev.next = cur.next; - } - } - } - - /** - * Constants denoting the register usage priority for an interval. The constants are declared in - * increasing order of priority are are used to optimize spilling when multiple overlapping - * intervals compete for limited registers. - */ - public enum RegisterPriority { - /** - * No special reason for an interval to be allocated a register. - */ - None, - - /** - * Priority level for intervals live at the end of a loop. - */ - LiveAtLoopEnd, - - /** - * Priority level for intervals that should be allocated to a register. - */ - ShouldHaveRegister, - - /** - * Priority level for intervals that must be allocated to a register. - */ - MustHaveRegister; - - public static final RegisterPriority[] VALUES = values(); - - /** - * Determines if this priority is higher than or equal to a given priority. - */ - public boolean greaterEqual(RegisterPriority other) { - return ordinal() >= other.ordinal(); - } - - /** - * Determines if this priority is lower than a given priority. - */ - public boolean lessThan(RegisterPriority other) { - return ordinal() < other.ordinal(); - } - } - - /** - * Constants denoting whether an interval is bound to a specific register. This models platform - * dependencies on register usage for certain instructions. - */ - enum RegisterBinding { - /** - * Interval is bound to a specific register as required by the platform. - */ - Fixed, - - /** - * Interval has no specific register requirements. - */ - Any, - - /** - * Interval is bound to a stack slot. - */ - Stack; - - public static final RegisterBinding[] VALUES = values(); - } - - /** - * Constants denoting the linear-scan states an interval may be in with respect to the - * {@linkplain Interval#from() start} {@code position} of the interval being processed. - */ - enum State { - /** - * An interval that starts after {@code position}. - */ - Unhandled, - - /** - * An interval that {@linkplain Interval#covers covers} {@code position} and has an assigned - * register. - */ - Active, - - /** - * An interval that starts before and ends after {@code position} but does not - * {@linkplain Interval#covers cover} it due to a lifetime hole. - */ - Inactive, - - /** - * An interval that ends before {@code position} or is spilled to memory. - */ - Handled; - } - - /** - * Constants used in optimization of spilling of an interval. - */ - enum SpillState { - /** - * Starting state of calculation: no definition found yet. - */ - NoDefinitionFound, - - /** - * One definition has already been found. Two consecutive definitions are treated as one - * (e.g. a consecutive move and add because of two-operand LIR form). The position of this - * definition is given by {@link Interval#spillDefinitionPos()}. - */ - NoSpillStore, - - /** - * One spill move has already been inserted. - */ - OneSpillStore, - - /** - * The interval is spilled multiple times or is spilled in a loop. Place the store somewhere - * on the dominator path between the definition and the usages. - */ - SpillInDominator, - - /** - * The interval should be stored immediately after its definition to prevent multiple - * redundant stores. - */ - StoreAtDefinition, - - /** - * The interval starts in memory (e.g. method parameter), so a store is never necessary. - */ - StartInMemory, - - /** - * The interval has more than one definition (e.g. resulting from phi moves), so stores to - * memory are not optimized. - */ - NoOptimization - } - - /** - * List of use positions. Each entry in the list records the use position and register priority - * associated with the use position. The entries in the list are in descending order of use - * position. - * - */ - public static final class UsePosList { - - private IntList list; - - /** - * Creates a use list. - * - * @param initialCapacity the initial capacity of the list in terms of entries - */ - public UsePosList(int initialCapacity) { - list = new IntList(initialCapacity * 2); - } - - private UsePosList(IntList list) { - this.list = list; - } - - /** - * Splits this list around a given position. All entries in this list with a use position - * greater or equal than {@code splitPos} are removed from this list and added to the - * returned list. - * - * @param splitPos the position for the split - * @return a use position list containing all entries removed from this list that have a use - * position greater or equal than {@code splitPos} - */ - public UsePosList splitAt(int splitPos) { - int i = size() - 1; - int len = 0; - while (i >= 0 && usePos(i) < splitPos) { - --i; - len += 2; - } - int listSplitIndex = (i + 1) * 2; - IntList childList = list; - list = IntList.copy(this.list, listSplitIndex, len); - childList.setSize(listSplitIndex); - UsePosList child = new UsePosList(childList); - return child; - } - - /** - * Gets the use position at a specified index in this list. - * - * @param index the index of the entry for which the use position is returned - * @return the use position of entry {@code index} in this list - */ - public int usePos(int index) { - return list.get(index << 1); - } - - /** - * Gets the register priority for the use position at a specified index in this list. - * - * @param index the index of the entry for which the register priority is returned - * @return the register priority of entry {@code index} in this list - */ - public RegisterPriority registerPriority(int index) { - return RegisterPriority.VALUES[list.get((index << 1) + 1)]; - } - - public void add(int usePos, RegisterPriority registerPriority) { - assert list.size() == 0 || usePos(size() - 1) > usePos; - list.add(usePos); - list.add(registerPriority.ordinal()); - } - - public int size() { - return list.size() >> 1; - } - - public void removeLowestUsePos() { - list.setSize(list.size() - 2); - } - - public void setRegisterPriority(int index, RegisterPriority registerPriority) { - list.set(index * 2, registerPriority.ordinal()); - } - - @Override - public String toString() { - StringBuilder buf = new StringBuilder("["); - for (int i = size() - 1; i >= 0; --i) { - if (buf.length() != 1) { - buf.append(", "); - } - RegisterPriority prio = registerPriority(i); - buf.append(usePos(i)).append(" -> ").append(prio.ordinal()).append(':').append(prio); - } - return buf.append("]").toString(); - } - } - - /** - * The {@linkplain RegisterValue register} or {@linkplain Variable variable} for this interval - * prior to register allocation. - */ - public final AllocatableValue operand; - - /** - * The operand number for this interval's {@linkplain #operand operand}. - */ - public final int operandNumber; - - /** - * The {@linkplain RegisterValue register} or {@linkplain StackSlot spill slot} assigned to this - * interval. In case of a spilled interval which is re-materialized this is - * {@link Value#ILLEGAL}. - */ - private AllocatableValue location; - - /** - * The stack slot to which all splits of this interval are spilled if necessary. - */ - private StackSlotValue spillSlot; - - /** - * The kind of this interval. - */ - private LIRKind kind; - - /** - * The head of the list of ranges describing this interval. This list is sorted by - * {@linkplain LIRInstruction#id instruction ids}. - */ - private Range first; - - /** - * List of (use-positions, register-priorities) pairs, sorted by use-positions. - */ - private UsePosList usePosList; - - /** - * Iterator used to traverse the ranges of an interval. - */ - private Range current; - - /** - * Link to next interval in a sorted list of intervals that ends with {@link #EndMarker}. - */ - Interval next; - - /** - * The linear-scan state of this interval. - */ - State state; - - private int cachedTo; // cached value: to of last range (-1: not cached) - - /** - * The interval from which this one is derived. If this is a {@linkplain #isSplitParent() split - * parent}, it points to itself. - */ - private Interval splitParent; - - /** - * List of all intervals that are split off from this interval. This is only used if this is a - * {@linkplain #isSplitParent() split parent}. - */ - private List splitChildren = Collections.emptyList(); - - /** - * Current split child that has been active or inactive last (always stored in split parents). - */ - private Interval currentSplitChild; - - /** - * Specifies if move is inserted between currentSplitChild and this interval when interval gets - * active the first time. - */ - private boolean insertMoveWhenActivated; - - /** - * For spill move optimization. - */ - private SpillState spillState; - - /** - * Position where this interval is defined (if defined only once). - */ - private int spillDefinitionPos; - - /** - * This interval should be assigned the same location as the hint interval. - */ - private Interval locationHint; - - /** - * The value with which a spilled child interval can be re-materialized. Currently this must be - * a Constant. - */ - private JavaConstant materializedValue; - - /** - * The number of times {@link #addMaterializationValue(JavaConstant)} is called. - */ - private int numMaterializationValuesAdded; - - void assignLocation(AllocatableValue newLocation) { - if (isRegister(newLocation)) { - assert this.location == null : "cannot re-assign location for " + this; - if (newLocation.getLIRKind().equals(LIRKind.Illegal) && !kind.equals(LIRKind.Illegal)) { - this.location = asRegister(newLocation).asValue(kind); - return; - } - } else if (isIllegal(newLocation)) { - assert canMaterialize(); - } else { - assert this.location == null || isRegister(this.location) || (isVirtualStackSlot(this.location) && isStackSlot(newLocation)) : "cannot re-assign location for " + this; - assert isStackSlotValue(newLocation); - assert !newLocation.getLIRKind().equals(LIRKind.Illegal); - assert newLocation.getLIRKind().equals(this.kind); - } - this.location = newLocation; - } - - /** - * Gets the {@linkplain RegisterValue register} or {@linkplain StackSlot spill slot} assigned to - * this interval. - */ - public AllocatableValue location() { - return location; - } - - public LIRKind kind() { - assert !isRegister(operand) : "cannot access type for fixed interval"; - return kind; - } - - void setKind(LIRKind kind) { - assert isRegister(operand) || this.kind().equals(LIRKind.Illegal) || this.kind().equals(kind) : "overwriting existing type"; - this.kind = kind; - } - - public Range first() { - return first; - } - - int from() { - return first.from; - } - - int to() { - if (cachedTo == -1) { - cachedTo = calcTo(); - } - assert cachedTo == calcTo() : "invalid cached value"; - return cachedTo; - } - - int numUsePositions() { - return usePosList.size(); - } - - void setLocationHint(Interval interval) { - locationHint = interval; - } - - boolean isSplitParent() { - return splitParent == this; - } - - boolean isSplitChild() { - return splitParent != this; - } - - /** - * Gets the split parent for this interval. - */ - public Interval splitParent() { - assert splitParent.isSplitParent() : "not a split parent: " + this; - return splitParent; - } - - /** - * Gets the canonical spill slot for this interval. - */ - StackSlotValue spillSlot() { - return splitParent().spillSlot; - } - - void setSpillSlot(StackSlotValue slot) { - assert splitParent().spillSlot == null || (isVirtualStackSlot(splitParent().spillSlot) && isStackSlot(slot)) : "connot overwrite existing spill slot"; - splitParent().spillSlot = slot; - } - - Interval currentSplitChild() { - return splitParent().currentSplitChild; - } - - void makeCurrentSplitChild() { - splitParent().currentSplitChild = this; - } - - boolean insertMoveWhenActivated() { - return insertMoveWhenActivated; - } - - void setInsertMoveWhenActivated(boolean b) { - insertMoveWhenActivated = b; - } - - // for spill optimization - public SpillState spillState() { - return splitParent().spillState; - } - - int spillDefinitionPos() { - return splitParent().spillDefinitionPos; - } - - void setSpillState(SpillState state) { - assert state.ordinal() >= spillState().ordinal() : "state cannot decrease"; - splitParent().spillState = state; - } - - void setSpillDefinitionPos(int pos) { - assert spillState() == SpillState.SpillInDominator || spillDefinitionPos() == -1 : "cannot set the position twice"; - splitParent().spillDefinitionPos = pos; - } - - // returns true if this interval has a shadow copy on the stack that is always correct - boolean alwaysInMemory() { - return (splitParent().spillState == SpillState.SpillInDominator || splitParent().spillState == SpillState.StoreAtDefinition || splitParent().spillState == SpillState.StartInMemory) && - !canMaterialize(); - } - - void removeFirstUsePos() { - usePosList.removeLowestUsePos(); - } - - // test intersection - boolean intersects(Interval i) { - return first.intersects(i.first); - } - - int intersectsAt(Interval i) { - return first.intersectsAt(i.first); - } - - // range iteration - void rewindRange() { - current = first; - } - - void nextRange() { - assert this != EndMarker : "not allowed on sentinel"; - current = current.next; - } - - int currentFrom() { - return current.from; - } - - int currentTo() { - return current.to; - } - - boolean currentAtEnd() { - return current == Range.EndMarker; - } - - boolean currentIntersects(Interval it) { - return current.intersects(it.current); - } - - int currentIntersectsAt(Interval it) { - return current.intersectsAt(it.current); - } - - /** - * Sentinel interval to denote the end of an interval list. - */ - static final Interval EndMarker = new Interval(Value.ILLEGAL, -1); - - Interval(AllocatableValue operand, int operandNumber) { - assert operand != null; - this.operand = operand; - this.operandNumber = operandNumber; - if (isRegister(operand)) { - location = operand; - } else { - assert isIllegal(operand) || isVariable(operand); - } - this.kind = LIRKind.Illegal; - this.first = Range.EndMarker; - this.usePosList = new UsePosList(4); - this.current = Range.EndMarker; - this.next = EndMarker; - this.cachedTo = -1; - this.spillState = SpillState.NoDefinitionFound; - this.spillDefinitionPos = -1; - splitParent = this; - currentSplitChild = this; - } - - /** - * Sets the value which is used for re-materialization. - */ - void addMaterializationValue(JavaConstant value) { - if (numMaterializationValuesAdded == 0) { - materializedValue = value; - } else { - // Interval is defined on multiple places -> no materialization is possible. - materializedValue = null; - } - numMaterializationValuesAdded++; - } - - /** - * Returns true if this interval can be re-materialized when spilled. This means that no - * spill-moves are needed. Instead of restore-moves the {@link #materializedValue} is restored. - */ - public boolean canMaterialize() { - return getMaterializedValue() != null; - } - - /** - * Returns a value which can be moved to a register instead of a restore-move from stack. - */ - public JavaConstant getMaterializedValue() { - return splitParent().materializedValue; - } - - int calcTo() { - assert first != Range.EndMarker : "interval has no range"; - - Range r = first; - while (r.next != Range.EndMarker) { - r = r.next; - } - return r.to; - } - - // consistency check of split-children - boolean checkSplitChildren() { - if (!splitChildren.isEmpty()) { - assert isSplitParent() : "only split parents can have children"; - - for (int i = 0; i < splitChildren.size(); i++) { - Interval i1 = splitChildren.get(i); - - assert i1.splitParent() == this : "not a split child of this interval"; - assert i1.kind().equals(kind()) : "must be equal for all split children"; - assert (i1.spillSlot() == null && spillSlot == null) || i1.spillSlot().equals(spillSlot()) : "must be equal for all split children"; - - for (int j = i + 1; j < splitChildren.size(); j++) { - Interval i2 = splitChildren.get(j); - - assert !i1.operand.equals(i2.operand) : "same register number"; - - if (i1.from() < i2.from()) { - assert i1.to() <= i2.from() && i1.to() < i2.to() : "intervals overlapping"; - } else { - assert i2.from() < i1.from() : "intervals start at same opId"; - assert i2.to() <= i1.from() && i2.to() < i1.to() : "intervals overlapping"; - } - } - } - } - - return true; - } - - public Interval locationHint(boolean searchSplitChild) { - if (!searchSplitChild) { - return locationHint; - } - - if (locationHint != null) { - assert locationHint.isSplitParent() : "ony split parents are valid hint registers"; - - if (locationHint.location != null && isRegister(locationHint.location)) { - return locationHint; - } else if (!locationHint.splitChildren.isEmpty()) { - // search the first split child that has a register assigned - int len = locationHint.splitChildren.size(); - for (int i = 0; i < len; i++) { - Interval interval = locationHint.splitChildren.get(i); - if (interval.location != null && isRegister(interval.location)) { - return interval; - } - } - } - } - - // no hint interval found that has a register assigned - return null; - } - - Interval getSplitChildAtOpId(int opId, LIRInstruction.OperandMode mode, LinearScan allocator) { - assert isSplitParent() : "can only be called for split parents"; - assert opId >= 0 : "invalid opId (method cannot be called for spill moves)"; - - if (splitChildren.isEmpty()) { - assert this.covers(opId, mode) : this + " does not cover " + opId; - return this; - } else { - Interval result = null; - int len = splitChildren.size(); - - // in outputMode, the end of the interval (opId == cur.to()) is not valid - int toOffset = (mode == LIRInstruction.OperandMode.DEF ? 0 : 1); - - int i; - for (i = 0; i < len; i++) { - Interval cur = splitChildren.get(i); - if (cur.from() <= opId && opId < cur.to() + toOffset) { - if (i > 0) { - // exchange current split child to start of list (faster access for next - // call) - Util.atPutGrow(splitChildren, i, splitChildren.get(0), null); - Util.atPutGrow(splitChildren, 0, cur, null); - } - - // interval found - result = cur; - break; - } - } - - assert checkSplitChild(result, opId, allocator, toOffset, mode); - return result; - } - } - - private boolean checkSplitChild(Interval result, int opId, LinearScan allocator, int toOffset, LIRInstruction.OperandMode mode) { - if (result == null) { - // this is an error - StringBuilder msg = new StringBuilder(this.toString()).append(" has no child at ").append(opId); - if (!splitChildren.isEmpty()) { - Interval firstChild = splitChildren.get(0); - Interval lastChild = splitChildren.get(splitChildren.size() - 1); - msg.append(" (first = ").append(firstChild).append(", last = ").append(lastChild).append(")"); - } - throw new GraalInternalError("Linear Scan Error: %s", msg); - } - - if (!splitChildren.isEmpty()) { - for (Interval interval : splitChildren) { - if (interval != result && interval.from() <= opId && opId < interval.to() + toOffset) { - TTY.println(String.format("two valid result intervals found for opId %d: %d and %d", opId, result.operandNumber, interval.operandNumber)); - TTY.println(result.logString(allocator)); - TTY.println(interval.logString(allocator)); - throw new BailoutException("two valid result intervals found"); - } - } - } - assert result.covers(opId, mode) : "opId not covered by interval"; - return true; - } - - // returns the interval that covers the given opId or null if there is none - Interval getIntervalCoveringOpId(int opId) { - assert opId >= 0 : "invalid opId"; - assert opId < to() : "can only look into the past"; - - if (opId >= from()) { - return this; - } - - Interval parent = splitParent(); - Interval result = null; - - assert !parent.splitChildren.isEmpty() : "no split children available"; - int len = parent.splitChildren.size(); - - for (int i = len - 1; i >= 0; i--) { - Interval cur = parent.splitChildren.get(i); - if (cur.from() <= opId && opId < cur.to()) { - assert result == null : "covered by multiple split children " + result + " and " + cur; - result = cur; - } - } - - return result; - } - - // returns the last split child that ends before the given opId - Interval getSplitChildBeforeOpId(int opId) { - assert opId >= 0 : "invalid opId"; - - Interval parent = splitParent(); - Interval result = null; - - assert !parent.splitChildren.isEmpty() : "no split children available"; - int len = parent.splitChildren.size(); - - for (int i = len - 1; i >= 0; i--) { - Interval cur = parent.splitChildren.get(i); - if (cur.to() <= opId && (result == null || result.to() < cur.to())) { - result = cur; - } - } - - assert result != null : "no split child found"; - return result; - } - - // checks if opId is covered by any split child - boolean splitChildCovers(int opId, LIRInstruction.OperandMode mode) { - assert isSplitParent() : "can only be called for split parents"; - assert opId >= 0 : "invalid opId (method can not be called for spill moves)"; - - if (splitChildren.isEmpty()) { - // simple case if interval was not split - return covers(opId, mode); - - } else { - // extended case: check all split children - int len = splitChildren.size(); - for (int i = 0; i < len; i++) { - Interval cur = splitChildren.get(i); - if (cur.covers(opId, mode)) { - return true; - } - } - return false; - } - } - - private RegisterPriority adaptPriority(RegisterPriority priority) { - /* - * In case of re-materialized values we require that use-operands are registers, because we - * don't have the value in a stack location. (Note that ShouldHaveRegister means that the - * operand can also be a StackSlot). - */ - if (priority == RegisterPriority.ShouldHaveRegister && canMaterialize()) { - return RegisterPriority.MustHaveRegister; - } - return priority; - } - - // Note: use positions are sorted descending . first use has highest index - int firstUsage(RegisterPriority minRegisterPriority) { - assert isVariable(operand) : "cannot access use positions for fixed intervals"; - - for (int i = usePosList.size() - 1; i >= 0; --i) { - RegisterPriority registerPriority = adaptPriority(usePosList.registerPriority(i)); - if (registerPriority.greaterEqual(minRegisterPriority)) { - return usePosList.usePos(i); - } - } - return Integer.MAX_VALUE; - } - - int nextUsage(RegisterPriority minRegisterPriority, int from) { - assert isVariable(operand) : "cannot access use positions for fixed intervals"; - - for (int i = usePosList.size() - 1; i >= 0; --i) { - int usePos = usePosList.usePos(i); - if (usePos >= from && adaptPriority(usePosList.registerPriority(i)).greaterEqual(minRegisterPriority)) { - return usePos; - } - } - return Integer.MAX_VALUE; - } - - int nextUsageExact(RegisterPriority exactRegisterPriority, int from) { - assert isVariable(operand) : "cannot access use positions for fixed intervals"; - - for (int i = usePosList.size() - 1; i >= 0; --i) { - int usePos = usePosList.usePos(i); - if (usePos >= from && adaptPriority(usePosList.registerPriority(i)) == exactRegisterPriority) { - return usePos; - } - } - return Integer.MAX_VALUE; - } - - int previousUsage(RegisterPriority minRegisterPriority, int from) { - assert isVariable(operand) : "cannot access use positions for fixed intervals"; - - int prev = 0; - for (int i = usePosList.size() - 1; i >= 0; --i) { - int usePos = usePosList.usePos(i); - if (usePos > from) { - return prev; - } - if (adaptPriority(usePosList.registerPriority(i)).greaterEqual(minRegisterPriority)) { - prev = usePos; - } - } - return prev; - } - - void addUsePos(int pos, RegisterPriority registerPriority) { - assert covers(pos, LIRInstruction.OperandMode.USE) : "use position not covered by live range"; - - // do not add use positions for precolored intervals because they are never used - if (registerPriority != RegisterPriority.None && isVariable(operand)) { - if (DetailedAsserts.getValue()) { - for (int i = 0; i < usePosList.size(); i++) { - assert pos <= usePosList.usePos(i) : "already added a use-position with lower position"; - if (i > 0) { - assert usePosList.usePos(i) < usePosList.usePos(i - 1) : "not sorted descending"; - } - } - } - - // Note: addUse is called in descending order, so list gets sorted - // automatically by just appending new use positions - int len = usePosList.size(); - if (len == 0 || usePosList.usePos(len - 1) > pos) { - usePosList.add(pos, registerPriority); - } else if (usePosList.registerPriority(len - 1).lessThan(registerPriority)) { - assert usePosList.usePos(len - 1) == pos : "list not sorted correctly"; - usePosList.setRegisterPriority(len - 1, registerPriority); - } - } - } - - void addRange(int from, int to) { - assert from < to : "invalid range"; - assert first() == Range.EndMarker || to < first().next.from : "not inserting at begin of interval"; - assert from <= first().to : "not inserting at begin of interval"; - - if (first.from <= to) { - assert first != Range.EndMarker; - // join intersecting ranges - first.from = Math.min(from, first().from); - first.to = Math.max(to, first().to); - } else { - // insert new range - first = new Range(from, to, first()); - } - } - - Interval newSplitChild(LinearScan allocator) { - // allocate new interval - Interval parent = splitParent(); - Interval result = allocator.createDerivedInterval(parent); - result.setKind(kind()); - - result.splitParent = parent; - result.setLocationHint(parent); - - // insert new interval in children-list of parent - if (parent.splitChildren.isEmpty()) { - assert isSplitParent() : "list must be initialized at first split"; - - // Create new non-shared list - parent.splitChildren = new ArrayList<>(4); - parent.splitChildren.add(this); - } - parent.splitChildren.add(result); - - return result; - } - - /** - * Splits this interval at a specified position and returns the remainder as a new child - * interval of this interval's {@linkplain #splitParent() parent} interval. - *

- * When an interval is split, a bi-directional link is established between the original - * parent interval and the children intervals that are split off this interval. - * When a split child is split again, the new created interval is a direct child of the original - * parent. That is, there is no tree of split children stored, just a flat list. All split - * children are spilled to the same {@linkplain #spillSlot spill slot}. - * - * @param splitPos the position at which to split this interval - * @param allocator the register allocator context - * @return the child interval split off from this interval - */ - Interval split(int splitPos, LinearScan allocator) { - assert isVariable(operand) : "cannot split fixed intervals"; - - // allocate new interval - Interval result = newSplitChild(allocator); - - // split the ranges - Range prev = null; - Range cur = first; - while (cur != Range.EndMarker && cur.to <= splitPos) { - prev = cur; - cur = cur.next; - } - assert cur != Range.EndMarker : "split interval after end of last range"; - - if (cur.from < splitPos) { - result.first = new Range(splitPos, cur.to, cur.next); - cur.to = splitPos; - cur.next = Range.EndMarker; - - } else { - assert prev != null : "split before start of first range"; - result.first = cur; - prev.next = Range.EndMarker; - } - result.current = result.first; - cachedTo = -1; // clear cached value - - // split list of use positions - result.usePosList = usePosList.splitAt(splitPos); - - if (DetailedAsserts.getValue()) { - for (int i = 0; i < usePosList.size(); i++) { - assert usePosList.usePos(i) < splitPos; - } - for (int i = 0; i < result.usePosList.size(); i++) { - assert result.usePosList.usePos(i) >= splitPos; - } - } - return result; - } - - /** - * Splits this interval at a specified position and returns the head as a new interval (this - * interval is the tail). - * - * Currently, only the first range can be split, and the new interval must not have split - * positions - */ - Interval splitFromStart(int splitPos, LinearScan allocator) { - assert isVariable(operand) : "cannot split fixed intervals"; - assert splitPos > from() && splitPos < to() : "can only split inside interval"; - assert splitPos > first.from && splitPos <= first.to : "can only split inside first range"; - assert firstUsage(RegisterPriority.None) > splitPos : "can not split when use positions are present"; - - // allocate new interval - Interval result = newSplitChild(allocator); - - // the new interval has only one range (checked by assertion above, - // so the splitting of the ranges is very simple - result.addRange(first.from, splitPos); - - if (splitPos == first.to) { - assert first.next != Range.EndMarker : "must not be at end"; - first = first.next; - } else { - first.from = splitPos; - } - - return result; - } - - // returns true if the opId is inside the interval - boolean covers(int opId, LIRInstruction.OperandMode mode) { - Range cur = first; - - while (cur != Range.EndMarker && cur.to < opId) { - cur = cur.next; - } - if (cur != Range.EndMarker) { - assert cur.to != cur.next.from : "ranges not separated"; - - if (mode == LIRInstruction.OperandMode.DEF) { - return cur.from <= opId && opId < cur.to; - } else { - return cur.from <= opId && opId <= cur.to; - } - } - return false; - } - - // returns true if the interval has any hole between holeFrom and holeTo - // (even if the hole has only the length 1) - boolean hasHoleBetween(int holeFrom, int holeTo) { - assert holeFrom < holeTo : "check"; - assert from() <= holeFrom && holeTo <= to() : "index out of interval"; - - Range cur = first; - while (cur != Range.EndMarker) { - assert cur.to < cur.next.from : "no space between ranges"; - - // hole-range starts before this range . hole - if (holeFrom < cur.from) { - return true; - - // hole-range completely inside this range . no hole - } else { - if (holeTo <= cur.to) { - return false; - - // overlapping of hole-range with this range . hole - } else { - if (holeFrom <= cur.to) { - return true; - } - } - } - - cur = cur.next; - } - - return false; - } - - @Override - public String toString() { - String from = "?"; - String to = "?"; - if (first != null && first != Range.EndMarker) { - from = String.valueOf(from()); - // to() may cache a computed value, modifying the current object, which is a bad idea - // for a printing function. Compute it directly instead. - to = String.valueOf(calcTo()); - } - String locationString = this.location == null ? "" : "@" + this.location; - return operandNumber + ":" + operand + (isRegister(operand) ? "" : locationString) + "[" + from + "," + to + "]"; - } - - /** - * Gets the use position information for this interval. - */ - public UsePosList usePosList() { - return usePosList; - } - - /** - * Gets a single line string for logging the details of this interval to a log stream. - * - * @param allocator the register allocator context - */ - public String logString(LinearScan allocator) { - StringBuilder buf = new StringBuilder(100); - buf.append(operandNumber).append(':').append(operand).append(' '); - if (!isRegister(operand)) { - if (location != null) { - buf.append("location{").append(location).append("} "); - } - } - - buf.append("hints{").append(splitParent.operandNumber); - Interval hint = locationHint(false); - if (hint != null && hint.operandNumber != splitParent.operandNumber) { - buf.append(", ").append(hint.operandNumber); - } - buf.append("} ranges{"); - - // print ranges - Range cur = first; - while (cur != Range.EndMarker) { - if (cur != first) { - buf.append(", "); - } - buf.append(cur); - cur = cur.next; - assert cur != null : "range list not closed with range sentinel"; - } - buf.append("} uses{"); - - // print use positions - int prev = 0; - for (int i = usePosList.size() - 1; i >= 0; --i) { - assert prev < usePosList.usePos(i) : "use positions not sorted"; - if (i != usePosList.size() - 1) { - buf.append(", "); - } - buf.append(usePosList.usePos(i)).append(':').append(usePosList.registerPriority(i)); - prev = usePosList.usePos(i); - } - buf.append("} spill-state{").append(spillState()).append("}"); - if (canMaterialize()) { - buf.append(" (remat:").append(getMaterializedValue().toString()).append(")"); - } - return buf.toString(); - } - - List getSplitChildren() { - return Collections.unmodifiableList(splitChildren); - } -} diff -r 32c7a5a88523 -r d599eeab1b53 graal/com.oracle.graal.compiler/src/com/oracle/graal/compiler/alloc/IntervalWalker.java --- a/graal/com.oracle.graal.compiler/src/com/oracle/graal/compiler/alloc/IntervalWalker.java Fri Feb 06 12:17:20 2015 +0100 +++ /dev/null Thu Jan 01 00:00:00 1970 +0000 @@ -1,284 +0,0 @@ -/* - * Copyright (c) 2009, 2011, Oracle and/or its affiliates. All rights reserved. - * DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER. - * - * This code is free software; you can redistribute it and/or modify it - * under the terms of the GNU General Public License version 2 only, as - * published by the Free Software Foundation. - * - * This code is distributed in the hope that it will be useful, but WITHOUT - * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or - * FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License - * version 2 for more details (a copy is included in the LICENSE file that - * accompanied this code). - * - * You should have received a copy of the GNU General Public License version - * 2 along with this work; if not, write to the Free Software Foundation, - * Inc., 51 Franklin St, Fifth Floor, Boston, MA 02110-1301 USA. - * - * Please contact Oracle, 500 Oracle Parkway, Redwood Shores, CA 94065 USA - * or visit www.oracle.com if you need additional information or have any - * questions. - */ -package com.oracle.graal.compiler.alloc; - -import com.oracle.graal.compiler.alloc.Interval.RegisterBinding; -import com.oracle.graal.compiler.alloc.Interval.RegisterBindingLists; -import com.oracle.graal.compiler.alloc.Interval.State; -import com.oracle.graal.debug.*; - -/** - */ -public class IntervalWalker { - - protected final LinearScan allocator; - - /** - * Sorted list of intervals, not live before the current position. - */ - protected RegisterBindingLists unhandledLists; - - /** - * Sorted list of intervals, live at the current position. - */ - protected RegisterBindingLists activeLists; - - /** - * Sorted list of intervals in a life time hole at the current position. - */ - protected RegisterBindingLists inactiveLists; - - /** - * The current position (intercept point through the intervals). - */ - protected int currentPosition; - - /** - * The binding of the current interval being processed. - */ - protected RegisterBinding currentBinding; - - /** - * Processes the {@code currentInterval} interval in an attempt to allocate a physical register - * to it and thus allow it to be moved to a list of {@linkplain #activeLists active} intervals. - * - * @return {@code true} if a register was allocated to the {@code currentInterval} interval - */ - protected boolean activateCurrent(@SuppressWarnings({"unused"}) Interval currentInterval) { - return true; - } - - void walkBefore(int lirOpId) { - walkTo(lirOpId - 1); - } - - void walk() { - walkTo(Integer.MAX_VALUE); - } - - /** - * Creates a new interval walker. - * - * @param allocator the register allocator context - * @param unhandledFixed the list of unhandled {@linkplain RegisterBinding#Fixed fixed} - * intervals - * @param unhandledAny the list of unhandled {@linkplain RegisterBinding#Any non-fixed} - * intervals - */ - IntervalWalker(LinearScan allocator, Interval unhandledFixed, Interval unhandledAny) { - this.allocator = allocator; - - unhandledLists = new RegisterBindingLists(unhandledFixed, unhandledAny, Interval.EndMarker); - activeLists = new RegisterBindingLists(Interval.EndMarker, Interval.EndMarker, Interval.EndMarker); - inactiveLists = new RegisterBindingLists(Interval.EndMarker, Interval.EndMarker, Interval.EndMarker); - currentPosition = -1; - } - - protected void removeFromList(Interval interval) { - if (interval.state == State.Active) { - activeLists.remove(RegisterBinding.Any, interval); - } else { - assert interval.state == State.Inactive : "invalid state"; - inactiveLists.remove(RegisterBinding.Any, interval); - } - } - - private void walkTo(State state, int from) { - assert state == State.Active || state == State.Inactive : "wrong state"; - for (RegisterBinding binding : RegisterBinding.VALUES) { - Interval prevprev = null; - Interval prev = (state == State.Active) ? activeLists.get(binding) : inactiveLists.get(binding); - Interval next = prev; - while (next.currentFrom() <= from) { - Interval cur = next; - next = cur.next; - - boolean rangeHasChanged = false; - while (cur.currentTo() <= from) { - cur.nextRange(); - rangeHasChanged = true; - } - - // also handle move from inactive list to active list - rangeHasChanged = rangeHasChanged || (state == State.Inactive && cur.currentFrom() <= from); - - if (rangeHasChanged) { - // remove cur from list - if (prevprev == null) { - if (state == State.Active) { - activeLists.set(binding, next); - } else { - inactiveLists.set(binding, next); - } - } else { - prevprev.next = next; - } - prev = next; - if (cur.currentAtEnd()) { - // move to handled state (not maintained as a list) - cur.state = State.Handled; - intervalMoved(cur, state, State.Handled); - } else if (cur.currentFrom() <= from) { - // sort into active list - activeLists.addToListSortedByCurrentFromPositions(binding, cur); - cur.state = State.Active; - if (prev == cur) { - assert state == State.Active : "check"; - prevprev = prev; - prev = cur.next; - } - intervalMoved(cur, state, State.Active); - } else { - // sort into inactive list - inactiveLists.addToListSortedByCurrentFromPositions(binding, cur); - cur.state = State.Inactive; - if (prev == cur) { - assert state == State.Inactive : "check"; - prevprev = prev; - prev = cur.next; - } - intervalMoved(cur, state, State.Inactive); - } - } else { - prevprev = prev; - prev = cur.next; - } - } - } - } - - /** - * Get the next interval from {@linkplain #unhandledLists} which starts before or at - * {@code toOpId}. The returned interval is removed and {@link #currentBinding} is set. - * - * @postcondition all intervals in {@linkplain #unhandledLists} start after {@code toOpId}. - * - * @return The next interval or null if there is no {@linkplain #unhandledLists unhandled} - * interval at position {@code toOpId}. - */ - private Interval nextInterval(int toOpId) { - RegisterBinding binding; - Interval any = unhandledLists.any; - Interval fixed = unhandledLists.fixed; - - if (any != Interval.EndMarker) { - // intervals may start at same position . prefer fixed interval - binding = fixed != Interval.EndMarker && fixed.from() <= any.from() ? RegisterBinding.Fixed : RegisterBinding.Any; - - assert binding == RegisterBinding.Fixed && fixed.from() <= any.from() || binding == RegisterBinding.Any && any.from() <= fixed.from() : "wrong interval!!!"; - assert any == Interval.EndMarker || fixed == Interval.EndMarker || any.from() != fixed.from() || binding == RegisterBinding.Fixed : "if fixed and any-Interval start at same position, fixed must be processed first"; - - } else if (fixed != Interval.EndMarker) { - binding = RegisterBinding.Fixed; - } else { - return null; - } - Interval currentInterval = unhandledLists.get(binding); - - if (toOpId < currentInterval.from()) { - return null; - } - - currentBinding = binding; - unhandledLists.set(binding, currentInterval.next); - currentInterval.next = Interval.EndMarker; - currentInterval.rewindRange(); - return currentInterval; - } - - /** - * Walk up to {@code toOpId}. - * - * @postcondition {@link #currentPosition} is set to {@code toOpId}, {@link #activeLists} and - * {@link #inactiveLists} are populated and {@link Interval#state}s are up to - * date. - */ - protected void walkTo(int toOpId) { - assert currentPosition <= toOpId : "can not walk backwards"; - for (Interval currentInterval = nextInterval(toOpId); currentInterval != null; currentInterval = nextInterval(toOpId)) { - int opId = currentInterval.from(); - - // set currentPosition prior to call of walkTo - currentPosition = opId; - - // update unhandled stack intervals - updateUnhandledStackIntervals(opId); - - // call walkTo even if currentPosition == id - walkTo(State.Active, opId); - walkTo(State.Inactive, opId); - - try (Indent indent = Debug.logAndIndent("walk to op %d", opId)) { - currentInterval.state = State.Active; - if (activateCurrent(currentInterval)) { - activeLists.addToListSortedByCurrentFromPositions(currentBinding, currentInterval); - intervalMoved(currentInterval, State.Unhandled, State.Active); - } - } - } - // set currentPosition prior to call of walkTo - currentPosition = toOpId; - - if (currentPosition <= allocator.maxOpId()) { - // update unhandled stack intervals - updateUnhandledStackIntervals(toOpId); - - // call walkTo if still in range - walkTo(State.Active, toOpId); - walkTo(State.Inactive, toOpId); - } - } - - private void intervalMoved(Interval interval, State from, State to) { - // intervalMoved() is called whenever an interval moves from one interval list to another. - // In the implementation of this method it is prohibited to move the interval to any list. - if (Debug.isLogEnabled()) { - Debug.log("interval moved from %s to %s: %s", from, to, interval.logString(allocator)); - } - } - - /** - * Move {@linkplain #unhandledLists unhandled} stack intervals to - * {@linkplain IntervalWalker #activeLists active}. - * - * Note that for {@linkplain RegisterBinding#Fixed fixed} and {@linkplain RegisterBinding#Any - * any} intervals this is done in {@link #nextInterval(int)}. - */ - private void updateUnhandledStackIntervals(int opId) { - Interval currentInterval = unhandledLists.get(RegisterBinding.Stack); - while (currentInterval != Interval.EndMarker && currentInterval.from() <= opId) { - Interval next = currentInterval.next; - if (currentInterval.to() > opId) { - currentInterval.state = State.Active; - activeLists.addToListSortedByCurrentFromPositions(RegisterBinding.Stack, currentInterval); - intervalMoved(currentInterval, State.Unhandled, State.Active); - } else { - currentInterval.state = State.Handled; - intervalMoved(currentInterval, State.Unhandled, State.Handled); - } - currentInterval = next; - } - unhandledLists.set(RegisterBinding.Stack, currentInterval); - } - -} diff -r 32c7a5a88523 -r d599eeab1b53 graal/com.oracle.graal.compiler/src/com/oracle/graal/compiler/alloc/LinearScan.java --- a/graal/com.oracle.graal.compiler/src/com/oracle/graal/compiler/alloc/LinearScan.java Fri Feb 06 12:17:20 2015 +0100 +++ /dev/null Thu Jan 01 00:00:00 1970 +0000 @@ -1,2197 +0,0 @@ -/* - * Copyright (c) 2009, 2014, Oracle and/or its affiliates. All rights reserved. - * DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER. - * - * This code is free software; you can redistribute it and/or modify it - * under the terms of the GNU General Public License version 2 only, as - * published by the Free Software Foundation. - * - * This code is distributed in the hope that it will be useful, but WITHOUT - * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or - * FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License - * version 2 for more details (a copy is included in the LICENSE file that - * accompanied this code). - * - * You should have received a copy of the GNU General Public License version - * 2 along with this work; if not, write to the Free Software Foundation, - * Inc., 51 Franklin St, Fifth Floor, Boston, MA 02110-1301 USA. - * - * Please contact Oracle, 500 Oracle Parkway, Redwood Shores, CA 94065 USA - * or visit www.oracle.com if you need additional information or have any - * questions. - */ -package com.oracle.graal.compiler.alloc; - -import static com.oracle.graal.api.code.CodeUtil.*; -import static com.oracle.graal.api.code.ValueUtil.*; -import static com.oracle.graal.compiler.GraalDebugConfig.*; -import static com.oracle.graal.compiler.common.cfg.AbstractControlFlowGraph.*; -import static com.oracle.graal.lir.LIRValueUtil.*; - -import java.util.*; - -import com.oracle.graal.alloc.*; -import com.oracle.graal.api.code.*; -import com.oracle.graal.api.meta.*; -import com.oracle.graal.compiler.alloc.Interval.RegisterBinding; -import com.oracle.graal.compiler.alloc.Interval.RegisterPriority; -import com.oracle.graal.compiler.alloc.Interval.SpillState; -import com.oracle.graal.compiler.common.*; -import com.oracle.graal.compiler.common.cfg.*; -import com.oracle.graal.compiler.gen.*; -import com.oracle.graal.debug.*; -import com.oracle.graal.debug.Debug.Scope; -import com.oracle.graal.lir.*; -import com.oracle.graal.lir.LIRInstruction.OperandFlag; -import com.oracle.graal.lir.LIRInstruction.OperandMode; -import com.oracle.graal.lir.StandardOp.MoveOp; -import com.oracle.graal.lir.framemap.*; -import com.oracle.graal.lir.gen.*; -import com.oracle.graal.nodes.*; -import com.oracle.graal.options.*; -import com.oracle.graal.phases.util.*; - -/** - * An implementation of the linear scan register allocator algorithm described in "Optimized Interval Splitting in a Linear Scan Register Allocator" by Christian Wimmer and - * Hanspeter Moessenboeck. - */ -public final class LinearScan { - - final TargetDescription target; - final LIRGenerationResult res; - final LIR ir; - final FrameMapBuilder frameMapBuilder; - final RegisterAttributes[] registerAttributes; - final Register[] registers; - - final boolean callKillsRegisters; - - public static final int DOMINATOR_SPILL_MOVE_ID = -2; - private static final int SPLIT_INTERVALS_CAPACITY_RIGHT_SHIFT = 1; - - public static class Options { - // @formatter:off - @Option(help = "Enable spill position optimization", type = OptionType.Debug) - public static final OptionValue LSRAOptimizeSpillPosition = new OptionValue<>(true); - // @formatter:on - } - - public static class BlockData { - - /** - * Bit map specifying which operands are live upon entry to this block. These are values - * used in this block or any of its successors where such value are not defined in this - * block. The bit index of an operand is its {@linkplain LinearScan#operandNumber(Value) - * operand number}. - */ - public BitSet liveIn; - - /** - * Bit map specifying which operands are live upon exit from this block. These are values - * used in a successor block that are either defined in this block or were live upon entry - * to this block. The bit index of an operand is its - * {@linkplain LinearScan#operandNumber(Value) operand number}. - */ - public BitSet liveOut; - - /** - * Bit map specifying which operands are used (before being defined) in this block. That is, - * these are the values that are live upon entry to the block. The bit index of an operand - * is its {@linkplain LinearScan#operandNumber(Value) operand number}. - */ - public BitSet liveGen; - - /** - * Bit map specifying which operands are defined/overwritten in this block. The bit index of - * an operand is its {@linkplain LinearScan#operandNumber(Value) operand number}. - */ - public BitSet liveKill; - } - - public final BlockMap blockData; - - /** - * List of blocks in linear-scan order. This is only correct as long as the CFG does not change. - */ - final List> sortedBlocks; - - /** - * Map from {@linkplain #operandNumber(Value) operand numbers} to intervals. - */ - Interval[] intervals; - - /** - * The number of valid entries in {@link #intervals}. - */ - int intervalsSize; - - /** - * The index of the first entry in {@link #intervals} for a - * {@linkplain #createDerivedInterval(Interval) derived interval}. - */ - int firstDerivedIntervalIndex = -1; - - /** - * Intervals sorted by {@link Interval#from()}. - */ - Interval[] sortedIntervals; - - /** - * Map from an instruction {@linkplain LIRInstruction#id id} to the instruction. Entries should - * be retrieved with {@link #instructionForId(int)} as the id is not simply an index into this - * array. - */ - LIRInstruction[] opIdToInstructionMap; - - /** - * Map from an instruction {@linkplain LIRInstruction#id id} to the {@linkplain AbstractBlock - * block} containing the instruction. Entries should be retrieved with {@link #blockForId(int)} - * as the id is not simply an index into this array. - */ - AbstractBlock[] opIdToBlockMap; - - /** - * Bit set for each variable that is contained in each loop. - */ - BitMap2D intervalInLoop; - - /** - * The {@linkplain #operandNumber(Value) number} of the first variable operand allocated. - */ - private final int firstVariableNumber; - - public LinearScan(TargetDescription target, LIRGenerationResult res) { - this.target = target; - this.res = res; - this.ir = res.getLIR(); - this.frameMapBuilder = res.getFrameMapBuilder(); - this.sortedBlocks = ir.linearScanOrder(); - this.registerAttributes = frameMapBuilder.getRegisterConfig().getAttributesMap(); - - this.registers = target.arch.getRegisters(); - this.firstVariableNumber = registers.length; - this.blockData = new BlockMap<>(ir.getControlFlowGraph()); - - // If all allocatable registers are caller saved, then no registers are live across a call - // site. The register allocator can save time not trying to find a register at a call site. - this.callKillsRegisters = this.frameMapBuilder.getRegisterConfig().areAllAllocatableRegistersCallerSaved(); - } - - public int getFirstLirInstructionId(AbstractBlock block) { - int result = ir.getLIRforBlock(block).get(0).id(); - assert result >= 0; - return result; - } - - public int getLastLirInstructionId(AbstractBlock block) { - List instructions = ir.getLIRforBlock(block); - int result = instructions.get(instructions.size() - 1).id(); - assert result >= 0; - return result; - } - - public static boolean isVariableOrRegister(Value value) { - return isVariable(value) || isRegister(value); - } - - /** - * Converts an operand (variable or register) to an index in a flat address space covering all - * the {@linkplain Variable variables} and {@linkplain RegisterValue registers} being processed - * by this allocator. - */ - private int operandNumber(Value operand) { - if (isRegister(operand)) { - int number = asRegister(operand).number; - assert number < firstVariableNumber; - return number; - } - assert isVariable(operand) : operand; - return firstVariableNumber + ((Variable) operand).index; - } - - /** - * Gets the number of operands. This value will increase by 1 for new variable. - */ - private int operandSize() { - return firstVariableNumber + ir.numVariables(); - } - - /** - * Gets the highest operand number for a register operand. This value will never change. - */ - public int maxRegisterNumber() { - return firstVariableNumber - 1; - } - - static final IntervalPredicate IS_PRECOLORED_INTERVAL = new IntervalPredicate() { - - @Override - public boolean apply(Interval i) { - return isRegister(i.operand); - } - }; - - static final IntervalPredicate IS_VARIABLE_INTERVAL = new IntervalPredicate() { - - @Override - public boolean apply(Interval i) { - return isVariable(i.operand); - } - }; - - static final IntervalPredicate IS_STACK_INTERVAL = new IntervalPredicate() { - - @Override - public boolean apply(Interval i) { - return !isRegister(i.operand); - } - }; - - /** - * Gets an object describing the attributes of a given register according to this register - * configuration. - */ - RegisterAttributes attributes(Register reg) { - return registerAttributes[reg.number]; - } - - void assignSpillSlot(Interval interval) { - // assign the canonical spill slot of the parent (if a part of the interval - // is already spilled) or allocate a new spill slot - if (interval.canMaterialize()) { - interval.assignLocation(Value.ILLEGAL); - } else if (interval.spillSlot() != null) { - interval.assignLocation(interval.spillSlot()); - } else { - VirtualStackSlot slot = frameMapBuilder.allocateSpillSlot(interval.kind()); - interval.setSpillSlot(slot); - interval.assignLocation(slot); - } - } - - /** - * Creates a new interval. - * - * @param operand the operand for the interval - * @return the created interval - */ - Interval createInterval(AllocatableValue operand) { - assert isLegal(operand); - int operandNumber = operandNumber(operand); - Interval interval = new Interval(operand, operandNumber); - assert operandNumber < intervalsSize; - assert intervals[operandNumber] == null; - intervals[operandNumber] = interval; - return interval; - } - - /** - * Creates an interval as a result of splitting or spilling another interval. - * - * @param source an interval being split of spilled - * @return a new interval derived from {@code source} - */ - Interval createDerivedInterval(Interval source) { - if (firstDerivedIntervalIndex == -1) { - firstDerivedIntervalIndex = intervalsSize; - } - if (intervalsSize == intervals.length) { - intervals = Arrays.copyOf(intervals, intervals.length + (intervals.length >> SPLIT_INTERVALS_CAPACITY_RIGHT_SHIFT)); - } - intervalsSize++; - Variable variable = new Variable(source.kind(), ir.nextVariable()); - - Interval interval = createInterval(variable); - assert intervals[intervalsSize - 1] == interval; - return interval; - } - - // access to block list (sorted in linear scan order) - int blockCount() { - return sortedBlocks.size(); - } - - AbstractBlock blockAt(int index) { - return sortedBlocks.get(index); - } - - /** - * Gets the size of the {@link BlockData#liveIn} and {@link BlockData#liveOut} sets for a basic - * block. These sets do not include any operands allocated as a result of creating - * {@linkplain #createDerivedInterval(Interval) derived intervals}. - */ - int liveSetSize() { - return firstDerivedIntervalIndex == -1 ? operandSize() : firstDerivedIntervalIndex; - } - - int numLoops() { - return ir.getControlFlowGraph().getLoops().size(); - } - - boolean isIntervalInLoop(int interval, int loop) { - return intervalInLoop.at(interval, loop); - } - - Interval intervalFor(int operandNumber) { - return intervals[operandNumber]; - } - - Interval intervalFor(Value operand) { - int operandNumber = operandNumber(operand); - assert operandNumber < intervalsSize; - return intervals[operandNumber]; - } - - Interval getOrCreateInterval(AllocatableValue operand) { - Interval ret = intervalFor(operand); - if (ret == null) { - return createInterval(operand); - } else { - return ret; - } - } - - /** - * Gets the highest instruction id allocated by this object. - */ - int maxOpId() { - assert opIdToInstructionMap.length > 0 : "no operations"; - return (opIdToInstructionMap.length - 1) << 1; - } - - /** - * Converts an {@linkplain LIRInstruction#id instruction id} to an instruction index. All LIR - * instructions in a method have an index one greater than their linear-scan order predecesor - * with the first instruction having an index of 0. - */ - static int opIdToIndex(int opId) { - return opId >> 1; - } - - /** - * Retrieves the {@link LIRInstruction} based on its {@linkplain LIRInstruction#id id}. - * - * @param opId an instruction {@linkplain LIRInstruction#id id} - * @return the instruction whose {@linkplain LIRInstruction#id} {@code == id} - */ - LIRInstruction instructionForId(int opId) { - assert isEven(opId) : "opId not even"; - LIRInstruction instr = opIdToInstructionMap[opIdToIndex(opId)]; - assert instr.id() == opId; - return instr; - } - - /** - * Gets the block containing a given instruction. - * - * @param opId an instruction {@linkplain LIRInstruction#id id} - * @return the block containing the instruction denoted by {@code opId} - */ - AbstractBlock blockForId(int opId) { - assert opIdToBlockMap.length > 0 && opId >= 0 && opId <= maxOpId() + 1 : "opId out of range"; - return opIdToBlockMap[opIdToIndex(opId)]; - } - - boolean isBlockBegin(int opId) { - return opId == 0 || blockForId(opId) != blockForId(opId - 1); - } - - boolean coversBlockBegin(int opId1, int opId2) { - return blockForId(opId1) != blockForId(opId2); - } - - /** - * Determines if an {@link LIRInstruction} destroys all caller saved registers. - * - * @param opId an instruction {@linkplain LIRInstruction#id id} - * @return {@code true} if the instruction denoted by {@code id} destroys all caller saved - * registers. - */ - boolean hasCall(int opId) { - assert isEven(opId) : "opId not even"; - return instructionForId(opId).destroysCallerSavedRegisters(); - } - - /** - * Eliminates moves from register to stack if the stack slot is known to be correct. - */ - void changeSpillDefinitionPos(Interval interval, int defPos) { - assert interval.isSplitParent() : "can only be called for split parents"; - - switch (interval.spillState()) { - case NoDefinitionFound: - assert interval.spillDefinitionPos() == -1 : "must no be set before"; - interval.setSpillDefinitionPos(defPos); - interval.setSpillState(SpillState.NoSpillStore); - break; - - case NoSpillStore: - assert defPos <= interval.spillDefinitionPos() : "positions are processed in reverse order when intervals are created"; - if (defPos < interval.spillDefinitionPos() - 2) { - // second definition found, so no spill optimization possible for this interval - interval.setSpillState(SpillState.NoOptimization); - } else { - // two consecutive definitions (because of two-operand LIR form) - assert blockForId(defPos) == blockForId(interval.spillDefinitionPos()) : "block must be equal"; - } - break; - - case NoOptimization: - // nothing to do - break; - - default: - throw new BailoutException("other states not allowed at this time"); - } - } - - // called during register allocation - void changeSpillState(Interval interval, int spillPos) { - switch (interval.spillState()) { - case NoSpillStore: { - int defLoopDepth = blockForId(interval.spillDefinitionPos()).getLoopDepth(); - int spillLoopDepth = blockForId(spillPos).getLoopDepth(); - - if (defLoopDepth < spillLoopDepth) { - // the loop depth of the spilling position is higher then the loop depth - // at the definition of the interval . move write to memory out of loop. - if (Options.LSRAOptimizeSpillPosition.getValue()) { - // find best spill position in dominator the tree - interval.setSpillState(SpillState.SpillInDominator); - } else { - // store at definition of the interval - interval.setSpillState(SpillState.StoreAtDefinition); - } - } else { - // the interval is currently spilled only once, so for now there is no - // reason to store the interval at the definition - interval.setSpillState(SpillState.OneSpillStore); - } - break; - } - - case OneSpillStore: { - if (Options.LSRAOptimizeSpillPosition.getValue()) { - // the interval is spilled more then once - interval.setSpillState(SpillState.SpillInDominator); - } else { - // it is better to store it to - // memory at the definition - interval.setSpillState(SpillState.StoreAtDefinition); - } - break; - } - - case SpillInDominator: - case StoreAtDefinition: - case StartInMemory: - case NoOptimization: - case NoDefinitionFound: - // nothing to do - break; - - default: - throw new BailoutException("other states not allowed at this time"); - } - } - - abstract static class IntervalPredicate { - - abstract boolean apply(Interval i); - } - - private static final IntervalPredicate mustStoreAtDefinition = new IntervalPredicate() { - - @Override - public boolean apply(Interval i) { - return i.isSplitParent() && i.spillState() == SpillState.StoreAtDefinition; - } - }; - - // called once before assignment of register numbers - void eliminateSpillMoves() { - try (Indent indent = Debug.logAndIndent("Eliminating unnecessary spill moves")) { - - // collect all intervals that must be stored after their definition. - // the list is sorted by Interval.spillDefinitionPos - Interval interval; - interval = createUnhandledLists(mustStoreAtDefinition, null).first; - if (DetailedAsserts.getValue()) { - checkIntervals(interval); - } - - LIRInsertionBuffer insertionBuffer = new LIRInsertionBuffer(); - for (AbstractBlock block : sortedBlocks) { - List instructions = ir.getLIRforBlock(block); - int numInst = instructions.size(); - - // iterate all instructions of the block. skip the first - // because it is always a label - for (int j = 1; j < numInst; j++) { - LIRInstruction op = instructions.get(j); - int opId = op.id(); - - if (opId == -1) { - MoveOp move = (MoveOp) op; - // remove move from register to stack if the stack slot is guaranteed to be - // correct. - // only moves that have been inserted by LinearScan can be removed. - assert isVariable(move.getResult()) : "LinearScan inserts only moves to variables"; - - Interval curInterval = intervalFor(move.getResult()); - - if (!isRegister(curInterval.location()) && curInterval.alwaysInMemory()) { - // move target is a stack slot that is always correct, so eliminate - // instruction - if (Debug.isLogEnabled()) { - Debug.log("eliminating move from interval %d to %d", operandNumber(move.getInput()), operandNumber(move.getResult())); - } - // null-instructions are deleted by assignRegNum - instructions.set(j, null); - } - - } else { - // insert move from register to stack just after - // the beginning of the interval - assert interval == Interval.EndMarker || interval.spillDefinitionPos() >= opId : "invalid order"; - assert interval == Interval.EndMarker || (interval.isSplitParent() && interval.spillState() == SpillState.StoreAtDefinition) : "invalid interval"; - - while (interval != Interval.EndMarker && interval.spillDefinitionPos() == opId) { - if (!interval.canMaterialize()) { - if (!insertionBuffer.initialized()) { - // prepare insertion buffer (appended when all instructions in - // the block are processed) - insertionBuffer.init(instructions); - } - - AllocatableValue fromLocation = interval.location(); - AllocatableValue toLocation = canonicalSpillOpr(interval); - - assert isRegister(fromLocation) : "from operand must be a register but is: " + fromLocation + " toLocation=" + toLocation + " spillState=" + interval.spillState(); - assert isStackSlotValue(toLocation) : "to operand must be a stack slot"; - - insertionBuffer.append(j + 1, ir.getSpillMoveFactory().createMove(toLocation, fromLocation)); - - Debug.log("inserting move after definition of interval %d to stack slot %s at opId %d", interval.operandNumber, interval.spillSlot(), opId); - } - interval = interval.next; - } - } - } // end of instruction iteration - - if (insertionBuffer.initialized()) { - insertionBuffer.finish(); - } - } // end of block iteration - - assert interval == Interval.EndMarker : "missed an interval"; - } - } - - private static void checkIntervals(Interval interval) { - Interval prev = null; - Interval temp = interval; - while (temp != Interval.EndMarker) { - assert temp.spillDefinitionPos() > 0 : "invalid spill definition pos"; - if (prev != null) { - assert temp.from() >= prev.from() : "intervals not sorted"; - assert temp.spillDefinitionPos() >= prev.spillDefinitionPos() : "when intervals are sorted by from : then they must also be sorted by spillDefinitionPos"; - } - - assert temp.spillSlot() != null || temp.canMaterialize() : "interval has no spill slot assigned"; - assert temp.spillDefinitionPos() >= temp.from() : "invalid order"; - assert temp.spillDefinitionPos() <= temp.from() + 2 : "only intervals defined once at their start-pos can be optimized"; - - Debug.log("interval %d (from %d to %d) must be stored at %d", temp.operandNumber, temp.from(), temp.to(), temp.spillDefinitionPos()); - - prev = temp; - temp = temp.next; - } - } - - /** - * Numbers all instructions in all blocks. The numbering follows the - * {@linkplain ComputeBlockOrder linear scan order}. - */ - void numberInstructions() { - - intervalsSize = operandSize(); - intervals = new Interval[intervalsSize + (intervalsSize >> SPLIT_INTERVALS_CAPACITY_RIGHT_SHIFT)]; - - ValueConsumer setVariableConsumer = (value, mode, flags) -> { - if (isVariable(value)) { - getOrCreateInterval(asVariable(value)); - } - }; - - // Assign IDs to LIR nodes and build a mapping, lirOps, from ID to LIRInstruction node. - int numInstructions = 0; - for (AbstractBlock block : sortedBlocks) { - numInstructions += ir.getLIRforBlock(block).size(); - } - - // initialize with correct length - opIdToInstructionMap = new LIRInstruction[numInstructions]; - opIdToBlockMap = new AbstractBlock[numInstructions]; - - int opId = 0; - int index = 0; - for (AbstractBlock block : sortedBlocks) { - blockData.put(block, new BlockData()); - - List instructions = ir.getLIRforBlock(block); - - int numInst = instructions.size(); - for (int j = 0; j < numInst; j++) { - LIRInstruction op = instructions.get(j); - op.setId(opId); - - opIdToInstructionMap[index] = op; - opIdToBlockMap[index] = block; - assert instructionForId(opId) == op : "must match"; - - op.visitEachTemp(setVariableConsumer); - op.visitEachOutput(setVariableConsumer); - - index++; - opId += 2; // numbering of lirOps by two - } - } - assert index == numInstructions : "must match"; - assert (index << 1) == opId : "must match: " + (index << 1); - } - - /** - * Computes local live sets (i.e. {@link BlockData#liveGen} and {@link BlockData#liveKill}) - * separately for each block. - */ - void computeLocalLiveSets() { - int liveSize = liveSetSize(); - - intervalInLoop = new BitMap2D(operandSize(), numLoops()); - - // iterate all blocks - for (final AbstractBlock block : sortedBlocks) { - try (Indent indent = Debug.logAndIndent("compute local live sets for block %d", block.getId())) { - - final BitSet liveGen = new BitSet(liveSize); - final BitSet liveKill = new BitSet(liveSize); - - List instructions = ir.getLIRforBlock(block); - int numInst = instructions.size(); - - ValueConsumer useConsumer = (operand, mode, flags) -> { - if (isVariable(operand)) { - int operandNum = operandNumber(operand); - if (!liveKill.get(operandNum)) { - liveGen.set(operandNum); - Debug.log("liveGen for operand %d", operandNum); - } - if (block.getLoop() != null) { - intervalInLoop.setBit(operandNum, block.getLoop().getIndex()); - } - } - - if (DetailedAsserts.getValue()) { - verifyInput(block, liveKill, operand); - } - }; - ValueConsumer stateConsumer = (operand, mode, flags) -> { - if (isVariableOrRegister(operand)) { - int operandNum = operandNumber(operand); - if (!liveKill.get(operandNum)) { - liveGen.set(operandNum); - Debug.log("liveGen in state for operand %d", operandNum); - } - } - }; - ValueConsumer defConsumer = (operand, mode, flags) -> { - if (isVariable(operand)) { - int varNum = operandNumber(operand); - liveKill.set(varNum); - Debug.log("liveKill for operand %d", varNum); - if (block.getLoop() != null) { - intervalInLoop.setBit(varNum, block.getLoop().getIndex()); - } - } - - if (DetailedAsserts.getValue()) { - // fixed intervals are never live at block boundaries, so - // they need not be processed in live sets - // process them only in debug mode so that this can be checked - verifyTemp(liveKill, operand); - } - }; - - // iterate all instructions of the block - for (int j = 0; j < numInst; j++) { - final LIRInstruction op = instructions.get(j); - - try (Indent indent2 = Debug.logAndIndent("handle op %d", op.id())) { - op.visitEachInput(useConsumer); - op.visitEachAlive(useConsumer); - // Add uses of live locals from interpreter's point of view for proper debug - // information generation - op.visitEachState(stateConsumer); - op.visitEachTemp(defConsumer); - op.visitEachOutput(defConsumer); - } - } // end of instruction iteration - - BlockData blockSets = blockData.get(block); - blockSets.liveGen = liveGen; - blockSets.liveKill = liveKill; - blockSets.liveIn = new BitSet(liveSize); - blockSets.liveOut = new BitSet(liveSize); - - Debug.log("liveGen B%d %s", block.getId(), blockSets.liveGen); - Debug.log("liveKill B%d %s", block.getId(), blockSets.liveKill); - - } - } // end of block iteration - } - - private void verifyTemp(BitSet liveKill, Value operand) { - // fixed intervals are never live at block boundaries, so - // they need not be processed in live sets - // process them only in debug mode so that this can be checked - if (isRegister(operand)) { - if (isProcessed(operand)) { - liveKill.set(operandNumber(operand)); - } - } - } - - private void verifyInput(AbstractBlock block, BitSet liveKill, Value operand) { - // fixed intervals are never live at block boundaries, so - // they need not be processed in live sets. - // this is checked by these assertions to be sure about it. - // the entry block may have incoming - // values in registers, which is ok. - if (isRegister(operand) && block != ir.getControlFlowGraph().getStartBlock()) { - if (isProcessed(operand)) { - assert liveKill.get(operandNumber(operand)) : "using fixed register that is not defined in this block"; - } - } - } - - /** - * Performs a backward dataflow analysis to compute global live sets (i.e. - * {@link BlockData#liveIn} and {@link BlockData#liveOut}) for each block. - */ - void computeGlobalLiveSets() { - try (Indent indent = Debug.logAndIndent("compute global live sets")) { - int numBlocks = blockCount(); - boolean changeOccurred; - boolean changeOccurredInBlock; - int iterationCount = 0; - BitSet liveOut = new BitSet(liveSetSize()); // scratch set for calculations - - // Perform a backward dataflow analysis to compute liveOut and liveIn for each block. - // The loop is executed until a fixpoint is reached (no changes in an iteration) - do { - changeOccurred = false; - - try (Indent indent2 = Debug.logAndIndent("new iteration %d", iterationCount)) { - - // iterate all blocks in reverse order - for (int i = numBlocks - 1; i >= 0; i--) { - AbstractBlock block = blockAt(i); - BlockData blockSets = blockData.get(block); - - changeOccurredInBlock = false; - - // liveOut(block) is the union of liveIn(sux), for successors sux of block - int n = block.getSuccessorCount(); - if (n > 0) { - liveOut.clear(); - // block has successors - if (n > 0) { - for (AbstractBlock successor : block.getSuccessors()) { - liveOut.or(blockData.get(successor).liveIn); - } - } - - if (!blockSets.liveOut.equals(liveOut)) { - // A change occurred. Swap the old and new live out - // sets to avoid copying. - BitSet temp = blockSets.liveOut; - blockSets.liveOut = liveOut; - liveOut = temp; - - changeOccurred = true; - changeOccurredInBlock = true; - } - } - - if (iterationCount == 0 || changeOccurredInBlock) { - // liveIn(block) is the union of liveGen(block) with (liveOut(block) & - // !liveKill(block)) - // note: liveIn has to be computed only in first iteration - // or if liveOut has changed! - BitSet liveIn = blockSets.liveIn; - liveIn.clear(); - liveIn.or(blockSets.liveOut); - liveIn.andNot(blockSets.liveKill); - liveIn.or(blockSets.liveGen); - - Debug.log("block %d: livein = %s, liveout = %s", block.getId(), liveIn, blockSets.liveOut); - } - } - iterationCount++; - - if (changeOccurred && iterationCount > 50) { - throw new BailoutException("too many iterations in computeGlobalLiveSets"); - } - } - } while (changeOccurred); - - if (DetailedAsserts.getValue()) { - verifyLiveness(); - } - - // check that the liveIn set of the first block is empty - AbstractBlock startBlock = ir.getControlFlowGraph().getStartBlock(); - if (blockData.get(startBlock).liveIn.cardinality() != 0) { - if (DetailedAsserts.getValue()) { - reportFailure(numBlocks); - } - // bailout if this occurs in product mode. - throw new GraalInternalError("liveIn set of first block must be empty: " + blockData.get(startBlock).liveIn); - } - } - } - - private static NodeLIRBuilder getNodeLIRGeneratorFromDebugContext() { - if (Debug.isEnabled()) { - NodeLIRBuilder lirGen = Debug.contextLookup(NodeLIRBuilder.class); - assert lirGen != null; - return lirGen; - } - return null; - } - - private static ValueNode getValueForOperandFromDebugContext(Value value) { - NodeLIRBuilder gen = getNodeLIRGeneratorFromDebugContext(); - if (gen != null) { - return gen.valueForOperand(value); - } - return null; - } - - private void reportFailure(int numBlocks) { - try (Scope s = Debug.forceLog()) { - try (Indent indent = Debug.logAndIndent("report failure")) { - - BitSet startBlockLiveIn = blockData.get(ir.getControlFlowGraph().getStartBlock()).liveIn; - try (Indent indent2 = Debug.logAndIndent("Error: liveIn set of first block must be empty (when this fails, variables are used before they are defined):")) { - for (int operandNum = startBlockLiveIn.nextSetBit(0); operandNum >= 0; operandNum = startBlockLiveIn.nextSetBit(operandNum + 1)) { - Interval interval = intervalFor(operandNum); - if (interval != null) { - Value operand = interval.operand; - Debug.log("var %d; operand=%s; node=%s", operandNum, operand, getValueForOperandFromDebugContext(operand)); - } else { - Debug.log("var %d; missing operand", operandNum); - } - } - } - - // print some additional information to simplify debugging - for (int operandNum = startBlockLiveIn.nextSetBit(0); operandNum >= 0; operandNum = startBlockLiveIn.nextSetBit(operandNum + 1)) { - Interval interval = intervalFor(operandNum); - Value operand = null; - ValueNode valueForOperandFromDebugContext = null; - if (interval != null) { - operand = interval.operand; - valueForOperandFromDebugContext = getValueForOperandFromDebugContext(operand); - } - try (Indent indent2 = Debug.logAndIndent("---- Detailed information for var %d; operand=%s; node=%s ----", operandNum, operand, valueForOperandFromDebugContext)) { - - Deque> definedIn = new ArrayDeque<>(); - HashSet> usedIn = new HashSet<>(); - for (AbstractBlock block : sortedBlocks) { - if (blockData.get(block).liveGen.get(operandNum)) { - usedIn.add(block); - try (Indent indent3 = Debug.logAndIndent("used in block B%d", block.getId())) { - for (LIRInstruction ins : ir.getLIRforBlock(block)) { - try (Indent indent4 = Debug.logAndIndent("%d: %s", ins.id(), ins)) { - ins.forEachState((liveStateOperand, mode, flags) -> { - Debug.log("operand=%s", liveStateOperand); - return liveStateOperand; - }); - } - } - } - } - if (blockData.get(block).liveKill.get(operandNum)) { - definedIn.add(block); - try (Indent indent3 = Debug.logAndIndent("defined in block B%d", block.getId())) { - for (LIRInstruction ins : ir.getLIRforBlock(block)) { - Debug.log("%d: %s", ins.id(), ins); - } - } - } - } - - int[] hitCount = new int[numBlocks]; - - while (!definedIn.isEmpty()) { - AbstractBlock block = definedIn.removeFirst(); - usedIn.remove(block); - for (AbstractBlock successor : block.getSuccessors()) { - if (successor.isLoopHeader()) { - if (!block.isLoopEnd()) { - definedIn.add(successor); - } - } else { - if (++hitCount[successor.getId()] == successor.getPredecessorCount()) { - definedIn.add(successor); - } - } - } - } - try (Indent indent3 = Debug.logAndIndent("**** offending usages are in: ")) { - for (AbstractBlock block : usedIn) { - Debug.log("B%d", block.getId()); - } - } - } - } - } - } catch (Throwable e) { - throw Debug.handle(e); - } - } - - private void verifyLiveness() { - // check that fixed intervals are not live at block boundaries - // (live set must be empty at fixed intervals) - for (AbstractBlock block : sortedBlocks) { - for (int j = 0; j <= maxRegisterNumber(); j++) { - assert !blockData.get(block).liveIn.get(j) : "liveIn set of fixed register must be empty"; - assert !blockData.get(block).liveOut.get(j) : "liveOut set of fixed register must be empty"; - assert !blockData.get(block).liveGen.get(j) : "liveGen set of fixed register must be empty"; - } - } - } - - void addUse(AllocatableValue operand, int from, int to, RegisterPriority registerPriority, LIRKind kind) { - if (!isProcessed(operand)) { - return; - } - - Interval interval = getOrCreateInterval(operand); - if (!kind.equals(LIRKind.Illegal)) { - interval.setKind(kind); - } - - interval.addRange(from, to); - - // Register use position at even instruction id. - interval.addUsePos(to & ~1, registerPriority); - - Debug.log("add use: %s, from %d to %d (%s)", interval, from, to, registerPriority.name()); - } - - void addTemp(AllocatableValue operand, int tempPos, RegisterPriority registerPriority, LIRKind kind) { - if (!isProcessed(operand)) { - return; - } - - Interval interval = getOrCreateInterval(operand); - if (!kind.equals(LIRKind.Illegal)) { - interval.setKind(kind); - } - - interval.addRange(tempPos, tempPos + 1); - interval.addUsePos(tempPos, registerPriority); - interval.addMaterializationValue(null); - - Debug.log("add temp: %s tempPos %d (%s)", interval, tempPos, RegisterPriority.MustHaveRegister.name()); - } - - boolean isProcessed(Value operand) { - return !isRegister(operand) || attributes(asRegister(operand)).isAllocatable(); - } - - void addDef(AllocatableValue operand, LIRInstruction op, RegisterPriority registerPriority, LIRKind kind) { - if (!isProcessed(operand)) { - return; - } - int defPos = op.id(); - - Interval interval = getOrCreateInterval(operand); - if (!kind.equals(LIRKind.Illegal)) { - interval.setKind(kind); - } - - Range r = interval.first(); - if (r.from <= defPos) { - // Update the starting point (when a range is first created for a use, its - // start is the beginning of the current block until a def is encountered.) - r.from = defPos; - interval.addUsePos(defPos, registerPriority); - - } else { - // Dead value - make vacuous interval - // also add register priority for dead intervals - interval.addRange(defPos, defPos + 1); - interval.addUsePos(defPos, registerPriority); - Debug.log("Warning: def of operand %s at %d occurs without use", operand, defPos); - } - - changeSpillDefinitionPos(interval, defPos); - if (registerPriority == RegisterPriority.None && interval.spillState().ordinal() <= SpillState.StartInMemory.ordinal()) { - // detection of method-parameters and roundfp-results - interval.setSpillState(SpillState.StartInMemory); - } - interval.addMaterializationValue(LinearScan.getMaterializedValue(op, operand, interval)); - - Debug.log("add def: %s defPos %d (%s)", interval, defPos, registerPriority.name()); - } - - /** - * Determines the register priority for an instruction's output/result operand. - */ - static RegisterPriority registerPriorityOfOutputOperand(LIRInstruction op) { - if (op instanceof MoveOp) { - MoveOp move = (MoveOp) op; - if (optimizeMethodArgument(move.getInput())) { - return RegisterPriority.None; - } - } - - // all other operands require a register - return RegisterPriority.MustHaveRegister; - } - - /** - * Determines the priority which with an instruction's input operand will be allocated a - * register. - */ - static RegisterPriority registerPriorityOfInputOperand(EnumSet flags) { - if (flags.contains(OperandFlag.STACK)) { - return RegisterPriority.ShouldHaveRegister; - } - // all other operands require a register - return RegisterPriority.MustHaveRegister; - } - - private static boolean optimizeMethodArgument(Value value) { - /* - * Object method arguments that are passed on the stack are currently not optimized because - * this requires that the runtime visits method arguments during stack walking. - */ - return isStackSlot(value) && asStackSlot(value).isInCallerFrame() && value.getKind() != Kind.Object; - } - - /** - * Optimizes moves related to incoming stack based arguments. The interval for the destination - * of such moves is assigned the stack slot (which is in the caller's frame) as its spill slot. - */ - void handleMethodArguments(LIRInstruction op) { - if (op instanceof MoveOp) { - MoveOp move = (MoveOp) op; - if (optimizeMethodArgument(move.getInput())) { - StackSlot slot = asStackSlot(move.getInput()); - if (DetailedAsserts.getValue()) { - assert op.id() > 0 : "invalid id"; - assert blockForId(op.id()).getPredecessorCount() == 0 : "move from stack must be in first block"; - assert isVariable(move.getResult()) : "result of move must be a variable"; - - Debug.log("found move from stack slot %s to %s", slot, move.getResult()); - } - - Interval interval = intervalFor(move.getResult()); - interval.setSpillSlot(slot); - interval.assignLocation(slot); - } - } - } - - void addRegisterHint(final LIRInstruction op, final Value targetValue, OperandMode mode, EnumSet flags, final boolean hintAtDef) { - if (flags.contains(OperandFlag.HINT) && isVariableOrRegister(targetValue)) { - - op.forEachRegisterHint(targetValue, mode, (registerHint, valueMode, valueFlags) -> { - if (isVariableOrRegister(registerHint)) { - Interval from = getOrCreateInterval((AllocatableValue) registerHint); - Interval to = getOrCreateInterval((AllocatableValue) targetValue); - - /* hints always point from def to use */ - if (hintAtDef) { - to.setLocationHint(from); - } else { - from.setLocationHint(to); - } - Debug.log("operation at opId %d: added hint from interval %d to %d", op.id(), from.operandNumber, to.operandNumber); - - return registerHint; - } - return null; - }); - } - } - - void buildIntervals() { - - try (Indent indent = Debug.logAndIndent("build intervals")) { - InstructionValueConsumer outputConsumer = (op, operand, mode, flags) -> { - if (isVariableOrRegister(operand)) { - addDef((AllocatableValue) operand, op, registerPriorityOfOutputOperand(op), operand.getLIRKind()); - addRegisterHint(op, operand, mode, flags, true); - } - }; - - InstructionValueConsumer tempConsumer = (op, operand, mode, flags) -> { - if (isVariableOrRegister(operand)) { - addTemp((AllocatableValue) operand, op.id(), RegisterPriority.MustHaveRegister, operand.getLIRKind()); - addRegisterHint(op, operand, mode, flags, false); - } - }; - - InstructionValueConsumer aliveConsumer = (op, operand, mode, flags) -> { - if (isVariableOrRegister(operand)) { - RegisterPriority p = registerPriorityOfInputOperand(flags); - int opId = op.id(); - int blockFrom = getFirstLirInstructionId((blockForId(opId))); - addUse((AllocatableValue) operand, blockFrom, opId + 1, p, operand.getLIRKind()); - addRegisterHint(op, operand, mode, flags, false); - } - }; - - InstructionValueConsumer inputConsumer = (op, operand, mode, flags) -> { - if (isVariableOrRegister(operand)) { - int opId = op.id(); - int blockFrom = getFirstLirInstructionId((blockForId(opId))); - RegisterPriority p = registerPriorityOfInputOperand(flags); - addUse((AllocatableValue) operand, blockFrom, opId, p, operand.getLIRKind()); - addRegisterHint(op, operand, mode, flags, false); - } - }; - - InstructionValueConsumer stateProc = (op, operand, mode, flags) -> { - if (isVariableOrRegister(operand)) { - int opId = op.id(); - int blockFrom = getFirstLirInstructionId((blockForId(opId))); - addUse((AllocatableValue) operand, blockFrom, opId + 1, RegisterPriority.None, operand.getLIRKind()); - } - }; - - // create a list with all caller-save registers (cpu, fpu, xmm) - Register[] callerSaveRegs = frameMapBuilder.getRegisterConfig().getCallerSaveRegisters(); - - // iterate all blocks in reverse order - for (int i = blockCount() - 1; i >= 0; i--) { - - AbstractBlock block = blockAt(i); - try (Indent indent2 = Debug.logAndIndent("handle block %d", block.getId())) { - - List instructions = ir.getLIRforBlock(block); - final int blockFrom = getFirstLirInstructionId(block); - int blockTo = getLastLirInstructionId(block); - - assert blockFrom == instructions.get(0).id(); - assert blockTo == instructions.get(instructions.size() - 1).id(); - - // Update intervals for operands live at the end of this block; - BitSet live = blockData.get(block).liveOut; - for (int operandNum = live.nextSetBit(0); operandNum >= 0; operandNum = live.nextSetBit(operandNum + 1)) { - assert live.get(operandNum) : "should not stop here otherwise"; - AllocatableValue operand = intervalFor(operandNum).operand; - Debug.log("live in %d: %s", operandNum, operand); - - addUse(operand, blockFrom, blockTo + 2, RegisterPriority.None, LIRKind.Illegal); - - // add special use positions for loop-end blocks when the - // interval is used anywhere inside this loop. It's possible - // that the block was part of a non-natural loop, so it might - // have an invalid loop index. - if (block.isLoopEnd() && block.getLoop() != null && isIntervalInLoop(operandNum, block.getLoop().getIndex())) { - intervalFor(operandNum).addUsePos(blockTo + 1, RegisterPriority.LiveAtLoopEnd); - } - } - - // iterate all instructions of the block in reverse order. - // definitions of intervals are processed before uses - for (int j = instructions.size() - 1; j >= 0; j--) { - final LIRInstruction op = instructions.get(j); - final int opId = op.id(); - - try (Indent indent3 = Debug.logAndIndent("handle inst %d: %s", opId, op)) { - - // add a temp range for each register if operation destroys - // caller-save registers - if (op.destroysCallerSavedRegisters()) { - for (Register r : callerSaveRegs) { - if (attributes(r).isAllocatable()) { - addTemp(r.asValue(), opId, RegisterPriority.None, LIRKind.Illegal); - } - } - Debug.log("operation destroys all caller-save registers"); - } - - op.visitEachOutput(outputConsumer); - op.visitEachTemp(tempConsumer); - op.visitEachAlive(aliveConsumer); - op.visitEachInput(inputConsumer); - - // Add uses of live locals from interpreter's point of view for proper - // debug information generation - // Treat these operands as temp values (if the live range is extended - // to a call site, the value would be in a register at - // the call otherwise) - op.visitEachState(stateProc); - - // special steps for some instructions (especially moves) - handleMethodArguments(op); - - } - - } // end of instruction iteration - } - } // end of block iteration - - // add the range [0, 1] to all fixed intervals. - // the register allocator need not handle unhandled fixed intervals - for (Interval interval : intervals) { - if (interval != null && isRegister(interval.operand)) { - interval.addRange(0, 1); - } - } - } - } - - // * Phase 5: actual register allocation - - private static boolean isSorted(Interval[] intervals) { - int from = -1; - for (Interval interval : intervals) { - assert interval != null; - assert from <= interval.from(); - from = interval.from(); - } - return true; - } - - static Interval addToList(Interval first, Interval prev, Interval interval) { - Interval newFirst = first; - if (prev != null) { - prev.next = interval; - } else { - newFirst = interval; - } - return newFirst; - } - - Interval.Pair createUnhandledLists(IntervalPredicate isList1, IntervalPredicate isList2) { - assert isSorted(sortedIntervals) : "interval list is not sorted"; - - Interval list1 = Interval.EndMarker; - Interval list2 = Interval.EndMarker; - - Interval list1Prev = null; - Interval list2Prev = null; - Interval v; - - int n = sortedIntervals.length; - for (int i = 0; i < n; i++) { - v = sortedIntervals[i]; - if (v == null) { - continue; - } - - if (isList1.apply(v)) { - list1 = addToList(list1, list1Prev, v); - list1Prev = v; - } else if (isList2 == null || isList2.apply(v)) { - list2 = addToList(list2, list2Prev, v); - list2Prev = v; - } - } - - if (list1Prev != null) { - list1Prev.next = Interval.EndMarker; - } - if (list2Prev != null) { - list2Prev.next = Interval.EndMarker; - } - - assert list1Prev == null || list1Prev.next == Interval.EndMarker : "linear list ends not with sentinel"; - assert list2Prev == null || list2Prev.next == Interval.EndMarker : "linear list ends not with sentinel"; - - return new Interval.Pair(list1, list2); - } - - void sortIntervalsBeforeAllocation() { - int sortedLen = 0; - for (Interval interval : intervals) { - if (interval != null) { - sortedLen++; - } - } - - Interval[] sortedList = new Interval[sortedLen]; - int sortedIdx = 0; - int sortedFromMax = -1; - - // special sorting algorithm: the original interval-list is almost sorted, - // only some intervals are swapped. So this is much faster than a complete QuickSort - for (Interval interval : intervals) { - if (interval != null) { - int from = interval.from(); - - if (sortedFromMax <= from) { - sortedList[sortedIdx++] = interval; - sortedFromMax = interval.from(); - } else { - // the assumption that the intervals are already sorted failed, - // so this interval must be sorted in manually - int j; - for (j = sortedIdx - 1; j >= 0 && from < sortedList[j].from(); j--) { - sortedList[j + 1] = sortedList[j]; - } - sortedList[j + 1] = interval; - sortedIdx++; - } - } - } - sortedIntervals = sortedList; - } - - void sortIntervalsAfterAllocation() { - if (firstDerivedIntervalIndex == -1) { - // no intervals have been added during allocation, so sorted list is already up to date - return; - } - - Interval[] oldList = sortedIntervals; - Interval[] newList = Arrays.copyOfRange(intervals, firstDerivedIntervalIndex, intervalsSize); - int oldLen = oldList.length; - int newLen = newList.length; - - // conventional sort-algorithm for new intervals - Arrays.sort(newList, (Interval a, Interval b) -> a.from() - b.from()); - - // merge old and new list (both already sorted) into one combined list - Interval[] combinedList = new Interval[oldLen + newLen]; - int oldIdx = 0; - int newIdx = 0; - - while (oldIdx + newIdx < combinedList.length) { - if (newIdx >= newLen || (oldIdx < oldLen && oldList[oldIdx].from() <= newList[newIdx].from())) { - combinedList[oldIdx + newIdx] = oldList[oldIdx]; - oldIdx++; - } else { - combinedList[oldIdx + newIdx] = newList[newIdx]; - newIdx++; - } - } - - sortedIntervals = combinedList; - } - - public void allocateRegisters() { - try (Indent indent = Debug.logAndIndent("allocate registers")) { - Interval precoloredIntervals; - Interval notPrecoloredIntervals; - - Interval.Pair result = createUnhandledLists(IS_PRECOLORED_INTERVAL, IS_VARIABLE_INTERVAL); - precoloredIntervals = result.first; - notPrecoloredIntervals = result.second; - - // allocate cpu registers - LinearScanWalker lsw; - if (OptimizingLinearScanWalker.Options.LSRAOptimization.getValue()) { - lsw = new OptimizingLinearScanWalker(this, precoloredIntervals, notPrecoloredIntervals); - } else { - lsw = new LinearScanWalker(this, precoloredIntervals, notPrecoloredIntervals); - } - lsw.walk(); - lsw.finishAllocation(); - } - } - - // * Phase 6: resolve data flow - // (insert moves at edges between blocks if intervals have been split) - - // wrapper for Interval.splitChildAtOpId that performs a bailout in product mode - // instead of returning null - Interval splitChildAtOpId(Interval interval, int opId, LIRInstruction.OperandMode mode) { - Interval result = interval.getSplitChildAtOpId(opId, mode, this); - - if (result != null) { - Debug.log("Split child at pos %d of interval %s is %s", opId, interval, result); - return result; - } - - throw new BailoutException("LinearScan: interval is null"); - } - - Interval intervalAtBlockBegin(AbstractBlock block, int operandNumber) { - return splitChildAtOpId(intervalFor(operandNumber), getFirstLirInstructionId(block), LIRInstruction.OperandMode.DEF); - } - - Interval intervalAtBlockEnd(AbstractBlock block, int operandNumber) { - return splitChildAtOpId(intervalFor(operandNumber), getLastLirInstructionId(block) + 1, LIRInstruction.OperandMode.DEF); - } - - void resolveCollectMappings(AbstractBlock fromBlock, AbstractBlock toBlock, MoveResolver moveResolver) { - assert moveResolver.checkEmpty(); - - int numOperands = operandSize(); - BitSet liveAtEdge = blockData.get(toBlock).liveIn; - - // visit all variables for which the liveAtEdge bit is set - for (int operandNum = liveAtEdge.nextSetBit(0); operandNum >= 0; operandNum = liveAtEdge.nextSetBit(operandNum + 1)) { - assert operandNum < numOperands : "live information set for not exisiting interval"; - assert blockData.get(fromBlock).liveOut.get(operandNum) && blockData.get(toBlock).liveIn.get(operandNum) : "interval not live at this edge"; - - Interval fromInterval = intervalAtBlockEnd(fromBlock, operandNum); - Interval toInterval = intervalAtBlockBegin(toBlock, operandNum); - - if (fromInterval != toInterval && !fromInterval.location().equals(toInterval.location())) { - // need to insert move instruction - moveResolver.addMapping(fromInterval, toInterval); - } - } - } - - void resolveFindInsertPos(AbstractBlock fromBlock, AbstractBlock toBlock, MoveResolver moveResolver) { - if (fromBlock.getSuccessorCount() <= 1) { - Debug.log("inserting moves at end of fromBlock B%d", fromBlock.getId()); - - List instructions = ir.getLIRforBlock(fromBlock); - LIRInstruction instr = instructions.get(instructions.size() - 1); - if (instr instanceof StandardOp.JumpOp) { - // insert moves before branch - moveResolver.setInsertPosition(instructions, instructions.size() - 1); - } else { - moveResolver.setInsertPosition(instructions, instructions.size()); - } - - } else { - Debug.log("inserting moves at beginning of toBlock B%d", toBlock.getId()); - - if (DetailedAsserts.getValue()) { - assert ir.getLIRforBlock(fromBlock).get(0) instanceof StandardOp.LabelOp : "block does not start with a label"; - - // because the number of predecessor edges matches the number of - // successor edges, blocks which are reached by switch statements - // may have be more than one predecessor but it will be guaranteed - // that all predecessors will be the same. - for (AbstractBlock predecessor : toBlock.getPredecessors()) { - assert fromBlock == predecessor : "all critical edges must be broken"; - } - } - - moveResolver.setInsertPosition(ir.getLIRforBlock(toBlock), 1); - } - } - - /** - * Inserts necessary moves (spilling or reloading) at edges between blocks for intervals that - * have been split. - */ - void resolveDataFlow() { - try (Indent indent = Debug.logAndIndent("resolve data flow")) { - - int numBlocks = blockCount(); - MoveResolver moveResolver = new MoveResolver(this); - BitSet blockCompleted = new BitSet(numBlocks); - BitSet alreadyResolved = new BitSet(numBlocks); - - for (AbstractBlock block : sortedBlocks) { - - // check if block has only one predecessor and only one successor - if (block.getPredecessorCount() == 1 && block.getSuccessorCount() == 1) { - List instructions = ir.getLIRforBlock(block); - assert instructions.get(0) instanceof StandardOp.LabelOp : "block must start with label"; - assert instructions.get(instructions.size() - 1) instanceof StandardOp.JumpOp : "block with successor must end with unconditional jump"; - - // check if block is empty (only label and branch) - if (instructions.size() == 2) { - AbstractBlock pred = block.getPredecessors().iterator().next(); - AbstractBlock sux = block.getSuccessors().iterator().next(); - - // prevent optimization of two consecutive blocks - if (!blockCompleted.get(pred.getLinearScanNumber()) && !blockCompleted.get(sux.getLinearScanNumber())) { - Debug.log(" optimizing empty block B%d (pred: B%d, sux: B%d)", block.getId(), pred.getId(), sux.getId()); - - blockCompleted.set(block.getLinearScanNumber()); - - // directly resolve between pred and sux (without looking - // at the empty block - // between) - resolveCollectMappings(pred, sux, moveResolver); - if (moveResolver.hasMappings()) { - moveResolver.setInsertPosition(instructions, 1); - moveResolver.resolveAndAppendMoves(); - } - } - } - } - } - - for (AbstractBlock fromBlock : sortedBlocks) { - if (!blockCompleted.get(fromBlock.getLinearScanNumber())) { - alreadyResolved.clear(); - alreadyResolved.or(blockCompleted); - - for (AbstractBlock toBlock : fromBlock.getSuccessors()) { - - // check for duplicate edges between the same blocks (can happen with switch - // blocks) - if (!alreadyResolved.get(toBlock.getLinearScanNumber())) { - Debug.log("processing edge between B%d and B%d", fromBlock.getId(), toBlock.getId()); - - alreadyResolved.set(toBlock.getLinearScanNumber()); - - // collect all intervals that have been split between - // fromBlock and toBlock - resolveCollectMappings(fromBlock, toBlock, moveResolver); - if (moveResolver.hasMappings()) { - resolveFindInsertPos(fromBlock, toBlock, moveResolver); - moveResolver.resolveAndAppendMoves(); - } - } - } - } - } - } - } - - // * Phase 7: assign register numbers back to LIR - // (includes computation of debug information and oop maps) - - static StackSlotValue canonicalSpillOpr(Interval interval) { - assert interval.spillSlot() != null : "canonical spill slot not set"; - return interval.spillSlot(); - } - - /** - * Assigns the allocated location for an LIR instruction operand back into the instruction. - * - * @param operand an LIR instruction operand - * @param opId the id of the LIR instruction using {@code operand} - * @param mode the usage mode for {@code operand} by the instruction - * @return the location assigned for the operand - */ - private Value colorLirOperand(Variable operand, int opId, OperandMode mode) { - Interval interval = intervalFor(operand); - assert interval != null : "interval must exist"; - - if (opId != -1) { - if (DetailedAsserts.getValue()) { - AbstractBlock block = blockForId(opId); - if (block.getSuccessorCount() <= 1 && opId == getLastLirInstructionId(block)) { - // check if spill moves could have been appended at the end of this block, but - // before the branch instruction. So the split child information for this branch - // would - // be incorrect. - LIRInstruction instr = ir.getLIRforBlock(block).get(ir.getLIRforBlock(block).size() - 1); - if (instr instanceof StandardOp.JumpOp) { - if (blockData.get(block).liveOut.get(operandNumber(operand))) { - assert false : "can't get split child for the last branch of a block because the information would be incorrect (moves are inserted before the branch in resolveDataFlow)"; - } - } - } - } - - // operands are not changed when an interval is split during allocation, - // so search the right interval here - interval = splitChildAtOpId(interval, opId, mode); - } - - if (isIllegal(interval.location()) && interval.canMaterialize()) { - assert mode != OperandMode.DEF; - return interval.getMaterializedValue(); - } - return interval.location(); - } - - private boolean isMaterialized(AllocatableValue operand, int opId, OperandMode mode) { - Interval interval = intervalFor(operand); - assert interval != null : "interval must exist"; - - if (opId != -1) { - // operands are not changed when an interval is split during allocation, - // so search the right interval here - interval = splitChildAtOpId(interval, opId, mode); - } - - return isIllegal(interval.location()) && interval.canMaterialize(); - } - - protected IntervalWalker initIntervalWalker(IntervalPredicate predicate) { - // setup lists of potential oops for walking - Interval oopIntervals; - Interval nonOopIntervals; - - oopIntervals = createUnhandledLists(predicate, null).first; - - // intervals that have no oops inside need not to be processed. - // to ensure a walking until the last instruction id, add a dummy interval - // with a high operation id - nonOopIntervals = new Interval(Value.ILLEGAL, -1); - nonOopIntervals.addRange(Integer.MAX_VALUE - 2, Integer.MAX_VALUE - 1); - - return new IntervalWalker(this, oopIntervals, nonOopIntervals); - } - - private boolean isCallerSave(Value operand) { - return attributes(asRegister(operand)).isCallerSave(); - } - - /** - * @param op - * @param operand - * @param valueMode - * @param flags - * @see InstructionValueProcedure#doValue(LIRInstruction, Value, OperandMode, EnumSet) - */ - private Value debugInfoProcedure(LIRInstruction op, Value operand, OperandMode valueMode, EnumSet flags) { - if (isVirtualStackSlot(operand)) { - return operand; - } - int tempOpId = op.id(); - OperandMode mode = OperandMode.USE; - AbstractBlock block = blockForId(tempOpId); - if (block.getSuccessorCount() == 1 && tempOpId == getLastLirInstructionId(block)) { - // generating debug information for the last instruction of a block. - // if this instruction is a branch, spill moves are inserted before this branch - // and so the wrong operand would be returned (spill moves at block boundaries - // are not - // considered in the live ranges of intervals) - // Solution: use the first opId of the branch target block instead. - final LIRInstruction instr = ir.getLIRforBlock(block).get(ir.getLIRforBlock(block).size() - 1); - if (instr instanceof StandardOp.JumpOp) { - if (blockData.get(block).liveOut.get(operandNumber(operand))) { - tempOpId = getFirstLirInstructionId(block.getSuccessors().iterator().next()); - mode = OperandMode.DEF; - } - } - } - - // Get current location of operand - // The operand must be live because debug information is considered when building - // the intervals - // if the interval is not live, colorLirOperand will cause an assert on failure - Value result = colorLirOperand((Variable) operand, tempOpId, mode); - assert !hasCall(tempOpId) || isStackSlotValue(result) || isConstant(result) || !isCallerSave(result) : "cannot have caller-save register operands at calls"; - return result; - } - - private void computeDebugInfo(final LIRInstruction op, LIRFrameState info) { - info.forEachState(op, this::debugInfoProcedure); - } - - private void assignLocations(List instructions) { - int numInst = instructions.size(); - boolean hasDead = false; - - InstructionValueProcedure assignProc = (op, operand, mode, flags) -> isVariable(operand) ? colorLirOperand((Variable) operand, op.id(), mode) : operand; - for (int j = 0; j < numInst; j++) { - final LIRInstruction op = instructions.get(j); - if (op == null) { // this can happen when spill-moves are removed in eliminateSpillMoves - hasDead = true; - continue; - } - - // remove useless moves - MoveOp move = null; - if (op instanceof MoveOp) { - move = (MoveOp) op; - AllocatableValue result = move.getResult(); - if (isVariable(result) && isMaterialized(result, op.id(), OperandMode.DEF)) { - /* - * This happens if a materializable interval is originally not spilled but then - * kicked out in LinearScanWalker.splitForSpilling(). When kicking out such an - * interval this move operation was already generated. - */ - instructions.set(j, null); - hasDead = true; - continue; - } - } - - op.forEachInput(assignProc); - op.forEachAlive(assignProc); - op.forEachTemp(assignProc); - op.forEachOutput(assignProc); - - // compute reference map and debug information - op.forEachState((inst, state) -> computeDebugInfo(inst, state)); - - // remove useless moves - if (move != null) { - if (move.getInput().equals(move.getResult())) { - instructions.set(j, null); - hasDead = true; - } - } - } - - if (hasDead) { - // Remove null values from the list. - instructions.removeAll(Collections.singleton(null)); - } - } - - private void assignLocations() { - try (Indent indent = Debug.logAndIndent("assign locations")) { - for (AbstractBlock block : sortedBlocks) { - try (Indent indent2 = Debug.logAndIndent("assign locations in block B%d", block.getId())) { - assignLocations(ir.getLIRforBlock(block)); - } - } - } - } - - public static void allocate(TargetDescription target, LIRGenerationResult res) { - new LinearScan(target, res).allocate(); - } - - private void allocate() { - - /* - * This is the point to enable debug logging for the whole register allocation. - */ - try (Indent indent = Debug.logAndIndent("LinearScan allocate")) { - - try (Scope s = Debug.scope("LifetimeAnalysis")) { - numberInstructions(); - printLir("Before register allocation", true); - computeLocalLiveSets(); - computeGlobalLiveSets(); - buildIntervals(); - sortIntervalsBeforeAllocation(); - } catch (Throwable e) { - throw Debug.handle(e); - } - - try (Scope s = Debug.scope("RegisterAllocation")) { - printIntervals("Before register allocation"); - allocateRegisters(); - } catch (Throwable e) { - throw Debug.handle(e); - } - - if (Options.LSRAOptimizeSpillPosition.getValue()) { - try (Scope s = Debug.scope("OptimizeSpillPosition")) { - optimizeSpillPosition(); - } catch (Throwable e) { - throw Debug.handle(e); - } - } - - try (Scope s = Debug.scope("ResolveDataFlow")) { - resolveDataFlow(); - } catch (Throwable e) { - throw Debug.handle(e); - } - - try (Scope s = Debug.scope("DebugInfo")) { - printIntervals("After register allocation"); - printLir("After register allocation", true); - - sortIntervalsAfterAllocation(); - - if (DetailedAsserts.getValue()) { - verify(); - } - - try (Scope s1 = Debug.scope("EliminateSpillMove")) { - eliminateSpillMoves(); - } catch (Throwable e) { - throw Debug.handle(e); - } - printLir("After spill move elimination", true); - - try (Scope s1 = Debug.scope("AssignLocations")) { - assignLocations(); - } catch (Throwable e) { - throw Debug.handle(e); - } - - if (DetailedAsserts.getValue()) { - verifyIntervals(); - } - } catch (Throwable e) { - throw Debug.handle(e); - } - - printLir("After register number assignment", true); - } - } - - private DebugMetric betterSpillPos = Debug.metric("BetterSpillPosition"); - private DebugMetric betterSpillPosWithLowerProbability = Debug.metric("BetterSpillPositionWithLowerProbability"); - - private void optimizeSpillPosition() { - LIRInsertionBuffer[] insertionBuffers = new LIRInsertionBuffer[ir.linearScanOrder().size()]; - for (Interval interval : intervals) { - if (interval != null && interval.isSplitParent() && interval.spillState() == SpillState.SpillInDominator) { - AbstractBlock defBlock = blockForId(interval.spillDefinitionPos()); - AbstractBlock spillBlock = null; - Interval firstSpillChild = null; - try (Indent indent = Debug.logAndIndent("interval %s (%s)", interval, defBlock)) { - for (Interval splitChild : interval.getSplitChildren()) { - if (isStackSlotValue(splitChild.location())) { - if (firstSpillChild == null || splitChild.from() < firstSpillChild.from()) { - firstSpillChild = splitChild; - } else { - assert firstSpillChild.from() < splitChild.from(); - } - // iterate all blocks where the interval has use positions - for (AbstractBlock splitBlock : blocksForInterval(splitChild)) { - if (dominates(defBlock, splitBlock)) { - Debug.log("Split interval %s, block %s", splitChild, splitBlock); - if (spillBlock == null) { - spillBlock = splitBlock; - } else { - spillBlock = commonDominator(spillBlock, splitBlock); - assert spillBlock != null; - } - } - } - } - } - if (spillBlock == null) { - // no spill interval - interval.setSpillState(SpillState.StoreAtDefinition); - } else { - // move out of loops - if (defBlock.getLoopDepth() < spillBlock.getLoopDepth()) { - spillBlock = moveSpillOutOfLoop(defBlock, spillBlock); - } - - /* - * If the spill block is the begin of the first split child (aka the value - * is on the stack) spill in the dominator. - */ - assert firstSpillChild != null; - if (!defBlock.equals(spillBlock) && spillBlock.equals(blockForId(firstSpillChild.from()))) { - AbstractBlock dom = spillBlock.getDominator(); - Debug.log("Spill block (%s) is the beginning of a spill child -> use dominator (%s)", spillBlock, dom); - spillBlock = dom; - } - - if (!defBlock.equals(spillBlock)) { - assert dominates(defBlock, spillBlock); - betterSpillPos.increment(); - Debug.log("Better spill position found (Block %s)", spillBlock); - - if (defBlock.probability() <= spillBlock.probability()) { - // better spill block has the same probability -> do nothing - interval.setSpillState(SpillState.StoreAtDefinition); - } else { - LIRInsertionBuffer insertionBuffer = insertionBuffers[spillBlock.getId()]; - if (insertionBuffer == null) { - insertionBuffer = new LIRInsertionBuffer(); - insertionBuffers[spillBlock.getId()] = insertionBuffer; - insertionBuffer.init(ir.getLIRforBlock(spillBlock)); - } - int spillOpId = getFirstLirInstructionId(spillBlock); - // insert spill move - AllocatableValue fromLocation = interval.getSplitChildAtOpId(spillOpId, OperandMode.DEF, this).location(); - AllocatableValue toLocation = canonicalSpillOpr(interval); - LIRInstruction move = ir.getSpillMoveFactory().createMove(toLocation, fromLocation); - move.setId(DOMINATOR_SPILL_MOVE_ID); - /* - * We can use the insertion buffer directly because we always insert - * at position 1. - */ - insertionBuffer.append(1, move); - - betterSpillPosWithLowerProbability.increment(); - interval.setSpillDefinitionPos(spillOpId); - } - } else { - // definition is the best choice - interval.setSpillState(SpillState.StoreAtDefinition); - } - } - } - } - } - for (LIRInsertionBuffer insertionBuffer : insertionBuffers) { - if (insertionBuffer != null) { - assert insertionBuffer.initialized() : "Insertion buffer is nonnull but not initialized!"; - insertionBuffer.finish(); - } - } - } - - /** - * Iterate over all {@link AbstractBlock blocks} of an interval. - */ - private class IntervalBlockIterator implements Iterator> { - - Range range; - AbstractBlock block; - - public IntervalBlockIterator(Interval interval) { - range = interval.first(); - block = blockForId(range.from); - } - - public AbstractBlock next() { - AbstractBlock currentBlock = block; - int nextBlockIndex = block.getLinearScanNumber() + 1; - if (nextBlockIndex < sortedBlocks.size()) { - block = sortedBlocks.get(nextBlockIndex); - if (range.to <= getFirstLirInstructionId(block)) { - range = range.next; - if (range == Range.EndMarker) { - block = null; - } else { - block = blockForId(range.from); - } - } - } else { - block = null; - } - return currentBlock; - } - - public boolean hasNext() { - return block != null; - } - } - - private Iterable> blocksForInterval(Interval interval) { - return new Iterable>() { - public Iterator> iterator() { - return new IntervalBlockIterator(interval); - } - }; - } - - private static AbstractBlock moveSpillOutOfLoop(AbstractBlock defBlock, AbstractBlock spillBlock) { - int defLoopDepth = defBlock.getLoopDepth(); - for (AbstractBlock block = spillBlock.getDominator(); !defBlock.equals(block); block = block.getDominator()) { - assert block != null : "spill block not dominated by definition block?"; - if (block.getLoopDepth() <= defLoopDepth) { - assert block.getLoopDepth() == defLoopDepth : "Cannot spill an interval outside of the loop where it is defined!"; - return block; - } - } - return defBlock; - } - - void printIntervals(String label) { - if (Debug.isLogEnabled()) { - try (Indent indent = Debug.logAndIndent("intervals %s", label)) { - for (Interval interval : intervals) { - if (interval != null) { - Debug.log("%s", interval.logString(this)); - } - } - - try (Indent indent2 = Debug.logAndIndent("Basic Blocks")) { - for (int i = 0; i < blockCount(); i++) { - AbstractBlock block = blockAt(i); - Debug.log("B%d [%d, %d, %s] ", block.getId(), getFirstLirInstructionId(block), getLastLirInstructionId(block), block.getLoop()); - } - } - } - } - Debug.dump(Arrays.copyOf(intervals, intervalsSize), label); - } - - void printLir(String label, @SuppressWarnings("unused") boolean hirValid) { - Debug.dump(ir, label); - } - - boolean verify() { - // (check that all intervals have a correct register and that no registers are overwritten) - verifyIntervals(); - - verifyRegisters(); - - Debug.log("no errors found"); - - return true; - } - - private void verifyRegisters() { - // Enable this logging to get output for the verification process. - try (Indent indent = Debug.logAndIndent("verifying register allocation")) { - RegisterVerifier verifier = new RegisterVerifier(this); - verifier.verify(blockAt(0)); - } - } - - void verifyIntervals() { - try (Indent indent = Debug.logAndIndent("verifying intervals")) { - int len = intervalsSize; - - for (int i = 0; i < len; i++) { - Interval i1 = intervals[i]; - if (i1 == null) { - continue; - } - - i1.checkSplitChildren(); - - if (i1.operandNumber != i) { - Debug.log("Interval %d is on position %d in list", i1.operandNumber, i); - Debug.log(i1.logString(this)); - throw new GraalInternalError(""); - } - - if (isVariable(i1.operand) && i1.kind().equals(LIRKind.Illegal)) { - Debug.log("Interval %d has no type assigned", i1.operandNumber); - Debug.log(i1.logString(this)); - throw new GraalInternalError(""); - } - - if (i1.location() == null) { - Debug.log("Interval %d has no register assigned", i1.operandNumber); - Debug.log(i1.logString(this)); - throw new GraalInternalError(""); - } - - if (i1.first() == Range.EndMarker) { - Debug.log("Interval %d has no Range", i1.operandNumber); - Debug.log(i1.logString(this)); - throw new GraalInternalError(""); - } - - for (Range r = i1.first(); r != Range.EndMarker; r = r.next) { - if (r.from >= r.to) { - Debug.log("Interval %d has zero length range", i1.operandNumber); - Debug.log(i1.logString(this)); - throw new GraalInternalError(""); - } - } - - for (int j = i + 1; j < len; j++) { - Interval i2 = intervals[j]; - if (i2 == null) { - continue; - } - - // special intervals that are created in MoveResolver - // . ignore them because the range information has no meaning there - if (i1.from() == 1 && i1.to() == 2) { - continue; - } - if (i2.from() == 1 && i2.to() == 2) { - continue; - } - Value l1 = i1.location(); - Value l2 = i2.location(); - if (i1.intersects(i2) && !isIllegal(l1) && (l1.equals(l2))) { - if (DetailedAsserts.getValue()) { - Debug.log("Intervals %d and %d overlap and have the same register assigned", i1.operandNumber, i2.operandNumber); - Debug.log(i1.logString(this)); - Debug.log(i2.logString(this)); - } - throw new BailoutException(""); - } - } - } - } - } - - class CheckConsumer implements ValueConsumer { - - boolean ok; - Interval curInterval; - - @Override - public void visitValue(Value operand, OperandMode mode, EnumSet flags) { - if (isRegister(operand)) { - if (intervalFor(operand) == curInterval) { - ok = true; - } - } - } - } - - void verifyNoOopsInFixedIntervals() { - try (Indent indent = Debug.logAndIndent("verifying that no oops are in fixed intervals *")) { - CheckConsumer checkConsumer = new CheckConsumer(); - - Interval fixedIntervals; - Interval otherIntervals; - fixedIntervals = createUnhandledLists(IS_PRECOLORED_INTERVAL, null).first; - // to ensure a walking until the last instruction id, add a dummy interval - // with a high operation id - otherIntervals = new Interval(Value.ILLEGAL, -1); - otherIntervals.addRange(Integer.MAX_VALUE - 2, Integer.MAX_VALUE - 1); - IntervalWalker iw = new IntervalWalker(this, fixedIntervals, otherIntervals); - - for (AbstractBlock block : sortedBlocks) { - List instructions = ir.getLIRforBlock(block); - - for (int j = 0; j < instructions.size(); j++) { - LIRInstruction op = instructions.get(j); - - if (op.hasState()) { - iw.walkBefore(op.id()); - boolean checkLive = true; - - // Make sure none of the fixed registers is live across an - // oopmap since we can't handle that correctly. - if (checkLive) { - for (Interval interval = iw.activeLists.get(RegisterBinding.Fixed); interval != Interval.EndMarker; interval = interval.next) { - if (interval.currentTo() > op.id() + 1) { - // This interval is live out of this op so make sure - // that this interval represents some value that's - // referenced by this op either as an input or output. - checkConsumer.curInterval = interval; - checkConsumer.ok = false; - - op.visitEachInput(checkConsumer); - op.visitEachAlive(checkConsumer); - op.visitEachTemp(checkConsumer); - op.visitEachOutput(checkConsumer); - - assert checkConsumer.ok : "fixed intervals should never be live across an oopmap point"; - } - } - } - } - } - } - } - } - - /** - * Returns a value for a interval definition, which can be used for re-materialization. - * - * @param op An instruction which defines a value - * @param operand The destination operand of the instruction - * @param interval The interval for this defined value. - * @return Returns the value which is moved to the instruction and which can be reused at all - * reload-locations in case the interval of this instruction is spilled. Currently this - * can only be a {@link JavaConstant}. - */ - public static JavaConstant getMaterializedValue(LIRInstruction op, Value operand, Interval interval) { - if (op instanceof MoveOp) { - MoveOp move = (MoveOp) op; - if (move.getInput() instanceof JavaConstant) { - /* - * Check if the interval has any uses which would accept an stack location (priority - * == ShouldHaveRegister). Rematerialization of such intervals can result in a - * degradation, because rematerialization always inserts a constant load, even if - * the value is not needed in a register. - */ - Interval.UsePosList usePosList = interval.usePosList(); - int numUsePos = usePosList.size(); - for (int useIdx = 0; useIdx < numUsePos; useIdx++) { - Interval.RegisterPriority priority = usePosList.registerPriority(useIdx); - if (priority == Interval.RegisterPriority.ShouldHaveRegister) { - return null; - } - } - return (JavaConstant) move.getInput(); - } - } - return null; - } -} diff -r 32c7a5a88523 -r d599eeab1b53 graal/com.oracle.graal.compiler/src/com/oracle/graal/compiler/alloc/LinearScanWalker.java --- a/graal/com.oracle.graal.compiler/src/com/oracle/graal/compiler/alloc/LinearScanWalker.java Fri Feb 06 12:17:20 2015 +0100 +++ /dev/null Thu Jan 01 00:00:00 1970 +0000 @@ -1,903 +0,0 @@ -/* - * Copyright (c) 2009, 2014, Oracle and/or its affiliates. All rights reserved. - * DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER. - * - * This code is free software; you can redistribute it and/or modify it - * under the terms of the GNU General Public License version 2 only, as - * published by the Free Software Foundation. - * - * This code is distributed in the hope that it will be useful, but WITHOUT - * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or - * FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License - * version 2 for more details (a copy is included in the LICENSE file that - * accompanied this code). - * - * You should have received a copy of the GNU General Public License version - * 2 along with this work; if not, write to the Free Software Foundation, - * Inc., 51 Franklin St, Fifth Floor, Boston, MA 02110-1301 USA. - * - * Please contact Oracle, 500 Oracle Parkway, Redwood Shores, CA 94065 USA - * or visit www.oracle.com if you need additional information or have any - * questions. - */ -package com.oracle.graal.compiler.alloc; - -import static com.oracle.graal.api.code.CodeUtil.*; -import static com.oracle.graal.api.code.ValueUtil.*; -import static com.oracle.graal.lir.LIRValueUtil.*; - -import java.util.*; - -import com.oracle.graal.api.code.*; -import com.oracle.graal.api.meta.*; -import com.oracle.graal.compiler.alloc.Interval.RegisterBinding; -import com.oracle.graal.compiler.alloc.Interval.RegisterPriority; -import com.oracle.graal.compiler.alloc.Interval.SpillState; -import com.oracle.graal.compiler.alloc.Interval.State; -import com.oracle.graal.compiler.common.cfg.*; -import com.oracle.graal.debug.*; -import com.oracle.graal.lir.*; -import com.oracle.graal.lir.StandardOp.MoveOp; -import com.oracle.graal.phases.util.*; - -/** - */ -class LinearScanWalker extends IntervalWalker { - - protected Register[] availableRegs; - - protected final int[] usePos; - protected final int[] blockPos; - - protected List[] spillIntervals; - - private MoveResolver moveResolver; // for ordering spill moves - - /** - * Only 10% of the lists in {@link #spillIntervals} are actually used. But when they are used, - * they can grow quite long. The maximum length observed was 45 (all numbers taken from a - * bootstrap run of Graal). Therefore, we initialize {@link #spillIntervals} with this marker - * value, and allocate a "real" list only on demand in {@link #setUsePos}. - */ - private static final List EMPTY_LIST = new ArrayList<>(0); - - // accessors mapped to same functions in class LinearScan - int blockCount() { - return allocator.blockCount(); - } - - AbstractBlock blockAt(int idx) { - return allocator.blockAt(idx); - } - - AbstractBlock blockOfOpWithId(int opId) { - return allocator.blockForId(opId); - } - - LinearScanWalker(LinearScan allocator, Interval unhandledFixedFirst, Interval unhandledAnyFirst) { - super(allocator, unhandledFixedFirst, unhandledAnyFirst); - - moveResolver = new MoveResolver(allocator); - spillIntervals = Util.uncheckedCast(new List[allocator.registers.length]); - for (int i = 0; i < allocator.registers.length; i++) { - spillIntervals[i] = EMPTY_LIST; - } - usePos = new int[allocator.registers.length]; - blockPos = new int[allocator.registers.length]; - } - - void initUseLists(boolean onlyProcessUsePos) { - for (Register register : availableRegs) { - int i = register.number; - usePos[i] = Integer.MAX_VALUE; - - if (!onlyProcessUsePos) { - blockPos[i] = Integer.MAX_VALUE; - spillIntervals[i].clear(); - } - } - } - - void excludeFromUse(Interval i) { - Value location = i.location(); - int i1 = asRegister(location).number; - if (i1 >= availableRegs[0].number && i1 <= availableRegs[availableRegs.length - 1].number) { - usePos[i1] = 0; - } - } - - void setUsePos(Interval interval, int usePos, boolean onlyProcessUsePos) { - if (usePos != -1) { - assert usePos != 0 : "must use excludeFromUse to set usePos to 0"; - int i = asRegister(interval.location()).number; - if (i >= availableRegs[0].number && i <= availableRegs[availableRegs.length - 1].number) { - if (this.usePos[i] > usePos) { - this.usePos[i] = usePos; - } - if (!onlyProcessUsePos) { - List list = spillIntervals[i]; - if (list == EMPTY_LIST) { - list = new ArrayList<>(2); - spillIntervals[i] = list; - } - list.add(interval); - } - } - } - } - - void setBlockPos(Interval i, int blockPos) { - if (blockPos != -1) { - int reg = asRegister(i.location()).number; - if (reg >= availableRegs[0].number && reg <= availableRegs[availableRegs.length - 1].number) { - if (this.blockPos[reg] > blockPos) { - this.blockPos[reg] = blockPos; - } - if (usePos[reg] > blockPos) { - usePos[reg] = blockPos; - } - } - } - } - - void freeExcludeActiveFixed() { - Interval interval = activeLists.get(RegisterBinding.Fixed); - while (interval != Interval.EndMarker) { - assert isRegister(interval.location()) : "active interval must have a register assigned"; - excludeFromUse(interval); - interval = interval.next; - } - } - - void freeExcludeActiveAny() { - Interval interval = activeLists.get(RegisterBinding.Any); - while (interval != Interval.EndMarker) { - assert isRegister(interval.location()) : "active interval must have a register assigned"; - excludeFromUse(interval); - interval = interval.next; - } - } - - void freeCollectInactiveFixed(Interval current) { - Interval interval = inactiveLists.get(RegisterBinding.Fixed); - while (interval != Interval.EndMarker) { - if (current.to() <= interval.currentFrom()) { - assert interval.currentIntersectsAt(current) == -1 : "must not intersect"; - setUsePos(interval, interval.currentFrom(), true); - } else { - setUsePos(interval, interval.currentIntersectsAt(current), true); - } - interval = interval.next; - } - } - - void freeCollectInactiveAny(Interval current) { - Interval interval = inactiveLists.get(RegisterBinding.Any); - while (interval != Interval.EndMarker) { - setUsePos(interval, interval.currentIntersectsAt(current), true); - interval = interval.next; - } - } - - void freeCollectUnhandled(RegisterBinding kind, Interval current) { - Interval interval = unhandledLists.get(kind); - while (interval != Interval.EndMarker) { - setUsePos(interval, interval.intersectsAt(current), true); - if (kind == RegisterBinding.Fixed && current.to() <= interval.from()) { - setUsePos(interval, interval.from(), true); - } - interval = interval.next; - } - } - - void spillExcludeActiveFixed() { - Interval interval = activeLists.get(RegisterBinding.Fixed); - while (interval != Interval.EndMarker) { - excludeFromUse(interval); - interval = interval.next; - } - } - - void spillBlockUnhandledFixed(Interval current) { - Interval interval = unhandledLists.get(RegisterBinding.Fixed); - while (interval != Interval.EndMarker) { - setBlockPos(interval, interval.intersectsAt(current)); - interval = interval.next; - } - } - - void spillBlockInactiveFixed(Interval current) { - Interval interval = inactiveLists.get(RegisterBinding.Fixed); - while (interval != Interval.EndMarker) { - if (current.to() > interval.currentFrom()) { - setBlockPos(interval, interval.currentIntersectsAt(current)); - } else { - assert interval.currentIntersectsAt(current) == -1 : "invalid optimization: intervals intersect"; - } - - interval = interval.next; - } - } - - void spillCollectActiveAny() { - Interval interval = activeLists.get(RegisterBinding.Any); - while (interval != Interval.EndMarker) { - setUsePos(interval, Math.min(interval.nextUsage(RegisterPriority.LiveAtLoopEnd, currentPosition), interval.to()), false); - interval = interval.next; - } - } - - void spillCollectInactiveAny(Interval current) { - Interval interval = inactiveLists.get(RegisterBinding.Any); - while (interval != Interval.EndMarker) { - if (interval.currentIntersects(current)) { - setUsePos(interval, Math.min(interval.nextUsage(RegisterPriority.LiveAtLoopEnd, currentPosition), interval.to()), false); - } - interval = interval.next; - } - } - - void insertMove(int operandId, Interval srcIt, Interval dstIt) { - // output all moves here. When source and target are equal, the move is - // optimized away later in assignRegNums - - int opId = (operandId + 1) & ~1; - AbstractBlock opBlock = allocator.blockForId(opId); - assert opId > 0 && allocator.blockForId(opId - 2) == opBlock : "cannot insert move at block boundary"; - - // calculate index of instruction inside instruction list of current block - // the minimal index (for a block with no spill moves) can be calculated because the - // numbering of instructions is known. - // When the block already contains spill moves, the index must be increased until the - // correct index is reached. - List instructions = allocator.ir.getLIRforBlock(opBlock); - int index = (opId - instructions.get(0).id()) >> 1; - assert instructions.get(index).id() <= opId : "error in calculation"; - - while (instructions.get(index).id() != opId) { - index++; - assert 0 <= index && index < instructions.size() : "index out of bounds"; - } - assert 1 <= index && index < instructions.size() : "index out of bounds"; - assert instructions.get(index).id() == opId : "error in calculation"; - - // insert new instruction before instruction at position index - moveResolver.moveInsertPosition(instructions, index); - moveResolver.addMapping(srcIt, dstIt); - } - - int findOptimalSplitPos(AbstractBlock minBlock, AbstractBlock maxBlock, int maxSplitPos) { - int fromBlockNr = minBlock.getLinearScanNumber(); - int toBlockNr = maxBlock.getLinearScanNumber(); - - assert 0 <= fromBlockNr && fromBlockNr < blockCount() : "out of range"; - assert 0 <= toBlockNr && toBlockNr < blockCount() : "out of range"; - assert fromBlockNr < toBlockNr : "must cross block boundary"; - - // Try to split at end of maxBlock. If this would be after - // maxSplitPos, then use the begin of maxBlock - int optimalSplitPos = allocator.getLastLirInstructionId(maxBlock) + 2; - if (optimalSplitPos > maxSplitPos) { - optimalSplitPos = allocator.getFirstLirInstructionId(maxBlock); - } - - int minLoopDepth = maxBlock.getLoopDepth(); - for (int i = toBlockNr - 1; i >= fromBlockNr; i--) { - AbstractBlock cur = blockAt(i); - - if (cur.getLoopDepth() < minLoopDepth) { - // block with lower loop-depth found . split at the end of this block - minLoopDepth = cur.getLoopDepth(); - optimalSplitPos = allocator.getLastLirInstructionId(cur) + 2; - } - } - assert optimalSplitPos > allocator.maxOpId() || allocator.isBlockBegin(optimalSplitPos) : "algorithm must move split pos to block boundary"; - - return optimalSplitPos; - } - - int findOptimalSplitPos(Interval interval, int minSplitPos, int maxSplitPos, boolean doLoopOptimization) { - int optimalSplitPos = -1; - if (minSplitPos == maxSplitPos) { - // trivial case, no optimization of split position possible - Debug.log("min-pos and max-pos are equal, no optimization possible"); - optimalSplitPos = minSplitPos; - - } else { - assert minSplitPos < maxSplitPos : "must be true then"; - assert minSplitPos > 0 : "cannot access minSplitPos - 1 otherwise"; - - // reason for using minSplitPos - 1: when the minimal split pos is exactly at the - // beginning of a block, then minSplitPos is also a possible split position. - // Use the block before as minBlock, because then minBlock.lastLirInstructionId() + 2 == - // minSplitPos - AbstractBlock minBlock = allocator.blockForId(minSplitPos - 1); - - // reason for using maxSplitPos - 1: otherwise there would be an assert on failure - // when an interval ends at the end of the last block of the method - // (in this case, maxSplitPos == allocator().maxLirOpId() + 2, and there is no - // block at this opId) - AbstractBlock maxBlock = allocator.blockForId(maxSplitPos - 1); - - assert minBlock.getLinearScanNumber() <= maxBlock.getLinearScanNumber() : "invalid order"; - if (minBlock == maxBlock) { - // split position cannot be moved to block boundary : so split as late as possible - Debug.log("cannot move split pos to block boundary because minPos and maxPos are in same block"); - optimalSplitPos = maxSplitPos; - - } else { - if (interval.hasHoleBetween(maxSplitPos - 1, maxSplitPos) && !allocator.isBlockBegin(maxSplitPos)) { - // Do not move split position if the interval has a hole before maxSplitPos. - // Intervals resulting from Phi-Functions have more than one definition (marked - // as mustHaveRegister) with a hole before each definition. When the register is - // needed - // for the second definition : an earlier reloading is unnecessary. - Debug.log("interval has hole just before maxSplitPos, so splitting at maxSplitPos"); - optimalSplitPos = maxSplitPos; - - } else { - // seach optimal block boundary between minSplitPos and maxSplitPos - Debug.log("moving split pos to optimal block boundary between block B%d and B%d", minBlock.getId(), maxBlock.getId()); - - if (doLoopOptimization) { - // Loop optimization: if a loop-end marker is found between min- and - // max-position : - // then split before this loop - int loopEndPos = interval.nextUsageExact(RegisterPriority.LiveAtLoopEnd, allocator.getLastLirInstructionId(minBlock) + 2); - Debug.log("loop optimization: loop end found at pos %d", loopEndPos); - - assert loopEndPos > minSplitPos : "invalid order"; - if (loopEndPos < maxSplitPos) { - // loop-end marker found between min- and max-position - // if it is not the end marker for the same loop as the min-position : - // then move - // the max-position to this loop block. - // Desired result: uses tagged as shouldHaveRegister inside a loop cause - // a reloading - // of the interval (normally, only mustHaveRegister causes a reloading) - AbstractBlock loopBlock = allocator.blockForId(loopEndPos); - - Debug.log("interval is used in loop that ends in block B%d, so trying to move maxBlock back from B%d to B%d", loopBlock.getId(), maxBlock.getId(), loopBlock.getId()); - assert loopBlock != minBlock : "loopBlock and minBlock must be different because block boundary is needed between"; - - optimalSplitPos = findOptimalSplitPos(minBlock, loopBlock, allocator.getLastLirInstructionId(loopBlock) + 2); - if (optimalSplitPos == allocator.getLastLirInstructionId(loopBlock) + 2) { - optimalSplitPos = -1; - Debug.log("loop optimization not necessary"); - } else { - Debug.log("loop optimization successful"); - } - } - } - - if (optimalSplitPos == -1) { - // not calculated by loop optimization - optimalSplitPos = findOptimalSplitPos(minBlock, maxBlock, maxSplitPos); - } - } - } - } - Debug.log("optimal split position: %d", optimalSplitPos); - - return optimalSplitPos; - } - - // split an interval at the optimal position between minSplitPos and - // maxSplitPos in two parts: - // 1) the left part has already a location assigned - // 2) the right part is sorted into to the unhandled-list - void splitBeforeUsage(Interval interval, int minSplitPos, int maxSplitPos) { - - try (Indent indent = Debug.logAndIndent("splitting interval %s between %d and %d", interval, minSplitPos, maxSplitPos)) { - - assert interval.from() < minSplitPos : "cannot split at start of interval"; - assert currentPosition < minSplitPos : "cannot split before current position"; - assert minSplitPos <= maxSplitPos : "invalid order"; - assert maxSplitPos <= interval.to() : "cannot split after end of interval"; - - int optimalSplitPos = findOptimalSplitPos(interval, minSplitPos, maxSplitPos, true); - - assert minSplitPos <= optimalSplitPos && optimalSplitPos <= maxSplitPos : "out of range"; - assert optimalSplitPos <= interval.to() : "cannot split after end of interval"; - assert optimalSplitPos > interval.from() : "cannot split at start of interval"; - - if (optimalSplitPos == interval.to() && interval.nextUsage(RegisterPriority.MustHaveRegister, minSplitPos) == Integer.MAX_VALUE) { - // the split position would be just before the end of the interval - // . no split at all necessary - Debug.log("no split necessary because optimal split position is at end of interval"); - return; - } - - // must calculate this before the actual split is performed and before split position is - // moved to odd opId - boolean moveNecessary = !allocator.isBlockBegin(optimalSplitPos) && !interval.hasHoleBetween(optimalSplitPos - 1, optimalSplitPos); - - if (!allocator.isBlockBegin(optimalSplitPos)) { - // move position before actual instruction (odd opId) - optimalSplitPos = (optimalSplitPos - 1) | 1; - } - - Debug.log("splitting at position %d", optimalSplitPos); - - assert allocator.isBlockBegin(optimalSplitPos) || ((optimalSplitPos & 1) == 1) : "split pos must be odd when not on block boundary"; - assert !allocator.isBlockBegin(optimalSplitPos) || ((optimalSplitPos & 1) == 0) : "split pos must be even on block boundary"; - - Interval splitPart = interval.split(optimalSplitPos, allocator); - - splitPart.setInsertMoveWhenActivated(moveNecessary); - - assert splitPart.from() >= currentPosition : "cannot append new interval before current walk position"; - unhandledLists.addToListSortedByStartAndUsePositions(RegisterBinding.Any, splitPart); - - if (Debug.isLogEnabled()) { - Debug.log("left interval %s: %s", moveNecessary ? " " : "", interval.logString(allocator)); - Debug.log("right interval %s: %s", moveNecessary ? "(move)" : "", splitPart.logString(allocator)); - } - } - } - - // split an interval at the optimal position between minSplitPos and - // maxSplitPos in two parts: - // 1) the left part has already a location assigned - // 2) the right part is always on the stack and therefore ignored in further processing - - void splitForSpilling(Interval interval) { - // calculate allowed range of splitting position - int maxSplitPos = currentPosition; - int minSplitPos = Math.max(interval.previousUsage(RegisterPriority.ShouldHaveRegister, maxSplitPos) + 1, interval.from()); - - try (Indent indent = Debug.logAndIndent("splitting and spilling interval %s between %d and %d", interval, minSplitPos, maxSplitPos)) { - - assert interval.state == State.Active : "why spill interval that is not active?"; - assert interval.from() <= minSplitPos : "cannot split before start of interval"; - assert minSplitPos <= maxSplitPos : "invalid order"; - assert maxSplitPos < interval.to() : "cannot split at end end of interval"; - assert currentPosition < interval.to() : "interval must not end before current position"; - - if (minSplitPos == interval.from()) { - // the whole interval is never used, so spill it entirely to memory - - try (Indent indent2 = Debug.logAndIndent("spilling entire interval because split pos is at beginning of interval (use positions: %d)", interval.usePosList().size())) { - - assert interval.firstUsage(RegisterPriority.ShouldHaveRegister) > currentPosition : "interval must not have use position before currentPosition"; - - allocator.assignSpillSlot(interval); - handleSpillSlot(interval); - allocator.changeSpillState(interval, minSplitPos); - - // Also kick parent intervals out of register to memory when they have no use - // position. This avoids short interval in register surrounded by intervals in - // memory . avoid useless moves from memory to register and back - Interval parent = interval; - while (parent != null && parent.isSplitChild()) { - parent = parent.getSplitChildBeforeOpId(parent.from()); - - if (isRegister(parent.location())) { - if (parent.firstUsage(RegisterPriority.ShouldHaveRegister) == Integer.MAX_VALUE) { - // parent is never used, so kick it out of its assigned register - Debug.log("kicking out interval %d out of its register because it is never used", parent.operandNumber); - allocator.assignSpillSlot(parent); - handleSpillSlot(parent); - } else { - // do not go further back because the register is actually used by - // the interval - parent = null; - } - } - } - } - - } else { - // search optimal split pos, split interval and spill only the right hand part - int optimalSplitPos = findOptimalSplitPos(interval, minSplitPos, maxSplitPos, false); - - assert minSplitPos <= optimalSplitPos && optimalSplitPos <= maxSplitPos : "out of range"; - assert optimalSplitPos < interval.to() : "cannot split at end of interval"; - assert optimalSplitPos >= interval.from() : "cannot split before start of interval"; - - if (!allocator.isBlockBegin(optimalSplitPos)) { - // move position before actual instruction (odd opId) - optimalSplitPos = (optimalSplitPos - 1) | 1; - } - - try (Indent indent2 = Debug.logAndIndent("splitting at position %d", optimalSplitPos)) { - assert allocator.isBlockBegin(optimalSplitPos) || ((optimalSplitPos & 1) == 1) : "split pos must be odd when not on block boundary"; - assert !allocator.isBlockBegin(optimalSplitPos) || ((optimalSplitPos & 1) == 0) : "split pos must be even on block boundary"; - - Interval spilledPart = interval.split(optimalSplitPos, allocator); - allocator.assignSpillSlot(spilledPart); - handleSpillSlot(spilledPart); - allocator.changeSpillState(spilledPart, optimalSplitPos); - - if (!allocator.isBlockBegin(optimalSplitPos)) { - Debug.log("inserting move from interval %d to %d", interval.operandNumber, spilledPart.operandNumber); - insertMove(optimalSplitPos, interval, spilledPart); - } - - // the currentSplitChild is needed later when moves are inserted for reloading - assert spilledPart.currentSplitChild() == interval : "overwriting wrong currentSplitChild"; - spilledPart.makeCurrentSplitChild(); - - if (Debug.isLogEnabled()) { - Debug.log("left interval: %s", interval.logString(allocator)); - Debug.log("spilled interval : %s", spilledPart.logString(allocator)); - } - } - } - } - } - - /** - * This is called for every interval that is assigned to a stack slot. - */ - protected void handleSpillSlot(Interval interval) { - assert interval.location() != null && (interval.canMaterialize() || isStackSlotValue(interval.location())) : "interval not assigned to a stack slot " + interval; - // Do nothing. Stack slots are not processed in this implementation. - } - - void splitStackInterval(Interval interval) { - int minSplitPos = currentPosition + 1; - int maxSplitPos = Math.min(interval.firstUsage(RegisterPriority.ShouldHaveRegister), interval.to()); - - splitBeforeUsage(interval, minSplitPos, maxSplitPos); - } - - void splitWhenPartialRegisterAvailable(Interval interval, int registerAvailableUntil) { - int minSplitPos = Math.max(interval.previousUsage(RegisterPriority.ShouldHaveRegister, registerAvailableUntil), interval.from() + 1); - splitBeforeUsage(interval, minSplitPos, registerAvailableUntil); - } - - void splitAndSpillInterval(Interval interval) { - assert interval.state == State.Active || interval.state == State.Inactive : "other states not allowed"; - - int currentPos = currentPosition; - if (interval.state == State.Inactive) { - // the interval is currently inactive, so no spill slot is needed for now. - // when the split part is activated, the interval has a new chance to get a register, - // so in the best case no stack slot is necessary - assert interval.hasHoleBetween(currentPos - 1, currentPos + 1) : "interval can not be inactive otherwise"; - splitBeforeUsage(interval, currentPos + 1, currentPos + 1); - - } else { - // search the position where the interval must have a register and split - // at the optimal position before. - // The new created part is added to the unhandled list and will get a register - // when it is activated - int minSplitPos = currentPos + 1; - int maxSplitPos = Math.min(interval.nextUsage(RegisterPriority.MustHaveRegister, minSplitPos), interval.to()); - - splitBeforeUsage(interval, minSplitPos, maxSplitPos); - - assert interval.nextUsage(RegisterPriority.MustHaveRegister, currentPos) == Integer.MAX_VALUE : "the remaining part is spilled to stack and therefore has no register"; - splitForSpilling(interval); - } - } - - boolean allocFreeRegister(Interval interval) { - try (Indent indent = Debug.logAndIndent("trying to find free register for %s", interval)) { - - initUseLists(true); - freeExcludeActiveFixed(); - freeExcludeActiveAny(); - freeCollectInactiveFixed(interval); - freeCollectInactiveAny(interval); - // freeCollectUnhandled(fixedKind, cur); - assert unhandledLists.get(RegisterBinding.Fixed) == Interval.EndMarker : "must not have unhandled fixed intervals because all fixed intervals have a use at position 0"; - - // usePos contains the start of the next interval that has this register assigned - // (either as a fixed register or a normal allocated register in the past) - // only intervals overlapping with cur are processed, non-overlapping invervals can be - // ignored safely - if (Debug.isLogEnabled()) { - // Enable this logging to see all register states - try (Indent indent2 = Debug.logAndIndent("state of registers:")) { - for (Register register : availableRegs) { - int i = register.number; - Debug.log("reg %d: usePos: %d", register.number, usePos[i]); - } - } - } - - Register hint = null; - Interval locationHint = interval.locationHint(true); - if (locationHint != null && locationHint.location() != null && isRegister(locationHint.location())) { - hint = asRegister(locationHint.location()); - Debug.log("hint register %d from interval %s", hint.number, locationHint); - } - assert interval.location() == null : "register already assigned to interval"; - - // the register must be free at least until this position - int regNeededUntil = interval.from() + 1; - int intervalTo = interval.to(); - - boolean needSplit = false; - int splitPos = -1; - - Register reg = null; - Register minFullReg = null; - Register maxPartialReg = null; - - for (int i = 0; i < availableRegs.length; ++i) { - Register availableReg = availableRegs[i]; - int number = availableReg.number; - if (usePos[number] >= intervalTo) { - // this register is free for the full interval - if (minFullReg == null || availableReg.equals(hint) || (usePos[number] < usePos[minFullReg.number] && !minFullReg.equals(hint))) { - minFullReg = availableReg; - } - } else if (usePos[number] > regNeededUntil) { - // this register is at least free until regNeededUntil - if (maxPartialReg == null || availableReg.equals(hint) || (usePos[number] > usePos[maxPartialReg.number] && !maxPartialReg.equals(hint))) { - maxPartialReg = availableReg; - } - } - } - - if (minFullReg != null) { - reg = minFullReg; - } else if (maxPartialReg != null) { - needSplit = true; - reg = maxPartialReg; - } else { - return false; - } - - splitPos = usePos[reg.number]; - interval.assignLocation(reg.asValue(interval.kind())); - Debug.log("selected register %d", reg.number); - - assert splitPos > 0 : "invalid splitPos"; - if (needSplit) { - // register not available for full interval, so split it - splitWhenPartialRegisterAvailable(interval, splitPos); - } - // only return true if interval is completely assigned - return true; - } - } - - void splitAndSpillIntersectingIntervals(Register reg) { - assert reg != null : "no register assigned"; - - for (int i = 0; i < spillIntervals[reg.number].size(); i++) { - Interval interval = spillIntervals[reg.number].get(i); - removeFromList(interval); - splitAndSpillInterval(interval); - } - } - - // Split an Interval and spill it to memory so that cur can be placed in a register - void allocLockedRegister(Interval interval) { - try (Indent indent = Debug.logAndIndent("alloc locked register: need to split and spill to get register for %s", interval)) { - - // collect current usage of registers - initUseLists(false); - spillExcludeActiveFixed(); - // spillBlockUnhandledFixed(cur); - assert unhandledLists.get(RegisterBinding.Fixed) == Interval.EndMarker : "must not have unhandled fixed intervals because all fixed intervals have a use at position 0"; - spillBlockInactiveFixed(interval); - spillCollectActiveAny(); - spillCollectInactiveAny(interval); - - if (Debug.isLogEnabled()) { - try (Indent indent2 = Debug.logAndIndent("state of registers:")) { - for (Register reg : availableRegs) { - int i = reg.number; - try (Indent indent3 = Debug.logAndIndent("reg %d: usePos: %d, blockPos: %d, intervals: ", i, usePos[i], blockPos[i])) { - for (int j = 0; j < spillIntervals[i].size(); j++) { - Debug.log("%d ", spillIntervals[i].get(j).operandNumber); - } - } - } - } - } - - // the register must be free at least until this position - int firstUsage = interval.firstUsage(RegisterPriority.MustHaveRegister); - int regNeededUntil = Math.min(firstUsage, interval.from() + 1); - int intervalTo = interval.to(); - assert regNeededUntil > 0 && regNeededUntil < Integer.MAX_VALUE : "interval has no use"; - - Register reg = null; - Register ignore = interval.location() != null && isRegister(interval.location()) ? asRegister(interval.location()) : null; - for (Register availableReg : availableRegs) { - int number = availableReg.number; - if (availableReg.equals(ignore)) { - // this register must be ignored - } else if (usePos[number] > regNeededUntil) { - if (reg == null || (usePos[number] > usePos[reg.number])) { - reg = availableReg; - } - } - } - - int regUsePos = (reg == null ? 0 : usePos[reg.number]); - if (regUsePos <= firstUsage) { - Debug.log("able to spill current interval. firstUsage(register): %d, usePos: %d", firstUsage, regUsePos); - - if (firstUsage <= interval.from() + 1) { - assert false : "cannot spill interval that is used in first instruction (possible reason: no register found) firstUsage=" + firstUsage + ", interval.from()=" + interval.from(); - // assign a reasonable register and do a bailout in product mode to avoid errors - allocator.assignSpillSlot(interval); - throw new BailoutException("LinearScan: no register found"); - } - - splitAndSpillInterval(interval); - return; - } - - boolean needSplit = blockPos[reg.number] <= intervalTo; - - int splitPos = blockPos[reg.number]; - - Debug.log("decided to use register %d", reg.number); - assert splitPos > 0 : "invalid splitPos"; - assert needSplit || splitPos > interval.from() : "splitting interval at from"; - - interval.assignLocation(reg.asValue(interval.kind())); - if (needSplit) { - // register not available for full interval : so split it - splitWhenPartialRegisterAvailable(interval, splitPos); - } - - // perform splitting and spilling for all affected intervals - splitAndSpillIntersectingIntervals(reg); - } - } - - boolean noAllocationPossible(Interval interval) { - if (allocator.callKillsRegisters) { - // fast calculation of intervals that can never get a register because the - // the next instruction is a call that blocks all registers - // Note: this only works if a call kills all registers - - // check if this interval is the result of a split operation - // (an interval got a register until this position) - int pos = interval.from(); - if (isOdd(pos)) { - // the current instruction is a call that blocks all registers - if (pos < allocator.maxOpId() && allocator.hasCall(pos + 1) && interval.to() > pos + 1) { - Debug.log("free register cannot be available because all registers blocked by following call"); - - // safety check that there is really no register available - assert !allocFreeRegister(interval) : "found a register for this interval"; - return true; - } - } - } - return false; - } - - void initVarsForAlloc(Interval interval) { - availableRegs = allocator.frameMapBuilder.getRegisterConfig().getAllocatableRegisters(interval.kind().getPlatformKind()); - } - - static boolean isMove(LIRInstruction op, Interval from, Interval to) { - if (op instanceof MoveOp) { - MoveOp move = (MoveOp) op; - if (isVariable(move.getInput()) && isVariable(move.getResult())) { - return move.getInput() != null && move.getInput().equals(from.operand) && move.getResult() != null && move.getResult().equals(to.operand); - } - } - return false; - } - - // optimization (especially for phi functions of nested loops): - // assign same spill slot to non-intersecting intervals - void combineSpilledIntervals(Interval interval) { - if (interval.isSplitChild()) { - // optimization is only suitable for split parents - return; - } - - Interval registerHint = interval.locationHint(false); - if (registerHint == null) { - // cur is not the target of a move : otherwise registerHint would be set - return; - } - assert registerHint.isSplitParent() : "register hint must be split parent"; - - if (interval.spillState() != SpillState.NoOptimization || registerHint.spillState() != SpillState.NoOptimization) { - // combining the stack slots for intervals where spill move optimization is applied - // is not benefitial and would cause problems - return; - } - - int beginPos = interval.from(); - int endPos = interval.to(); - if (endPos > allocator.maxOpId() || isOdd(beginPos) || isOdd(endPos)) { - // safety check that lirOpWithId is allowed - return; - } - - if (!isMove(allocator.instructionForId(beginPos), registerHint, interval) || !isMove(allocator.instructionForId(endPos), interval, registerHint)) { - // cur and registerHint are not connected with two moves - return; - } - - Interval beginHint = registerHint.getSplitChildAtOpId(beginPos, LIRInstruction.OperandMode.USE, allocator); - Interval endHint = registerHint.getSplitChildAtOpId(endPos, LIRInstruction.OperandMode.DEF, allocator); - if (beginHint == endHint || beginHint.to() != beginPos || endHint.from() != endPos) { - // registerHint must be split : otherwise the re-writing of use positions does not work - return; - } - - assert beginHint.location() != null : "must have register assigned"; - assert endHint.location() == null : "must not have register assigned"; - assert interval.firstUsage(RegisterPriority.MustHaveRegister) == beginPos : "must have use position at begin of interval because of move"; - assert endHint.firstUsage(RegisterPriority.MustHaveRegister) == endPos : "must have use position at begin of interval because of move"; - - if (isRegister(beginHint.location())) { - // registerHint is not spilled at beginPos : so it would not be benefitial to - // immediately spill cur - return; - } - assert registerHint.spillSlot() != null : "must be set when part of interval was spilled"; - - // modify intervals such that cur gets the same stack slot as registerHint - // delete use positions to prevent the intervals to get a register at beginning - interval.setSpillSlot(registerHint.spillSlot()); - interval.removeFirstUsePos(); - endHint.removeFirstUsePos(); - } - - // allocate a physical register or memory location to an interval - @Override - protected boolean activateCurrent(Interval interval) { - boolean result = true; - - try (Indent indent = Debug.logAndIndent("activating interval %s, splitParent: %d", interval, interval.splitParent().operandNumber)) { - - final Value operand = interval.operand; - if (interval.location() != null && isStackSlotValue(interval.location())) { - // activating an interval that has a stack slot assigned . split it at first use - // position - // used for method parameters - Debug.log("interval has spill slot assigned (method parameter) . split it before first use"); - splitStackInterval(interval); - result = false; - - } else { - if (interval.location() == null) { - // interval has not assigned register . normal allocation - // (this is the normal case for most intervals) - Debug.log("normal allocation of register"); - - // assign same spill slot to non-intersecting intervals - combineSpilledIntervals(interval); - - initVarsForAlloc(interval); - if (noAllocationPossible(interval) || !allocFreeRegister(interval)) { - // no empty register available. - // split and spill another interval so that this interval gets a register - allocLockedRegister(interval); - } - - // spilled intervals need not be move to active-list - if (!isRegister(interval.location())) { - result = false; - } - } - } - - // load spilled values that become active from stack slot to register - if (interval.insertMoveWhenActivated()) { - assert interval.isSplitChild(); - assert interval.currentSplitChild() != null; - assert !interval.currentSplitChild().operand.equals(operand) : "cannot insert move between same interval"; - Debug.log("Inserting move from interval %d to %d because insertMoveWhenActivated is set", interval.currentSplitChild().operandNumber, interval.operandNumber); - - insertMove(interval.from(), interval.currentSplitChild(), interval); - } - interval.makeCurrentSplitChild(); - - } - - return result; // true = interval is moved to active list - } - - public void finishAllocation() { - // must be called when all intervals are allocated - moveResolver.resolveAndAppendMoves(); - } -} diff -r 32c7a5a88523 -r d599eeab1b53 graal/com.oracle.graal.compiler/src/com/oracle/graal/compiler/alloc/LocationMarker.java --- a/graal/com.oracle.graal.compiler/src/com/oracle/graal/compiler/alloc/LocationMarker.java Fri Feb 06 12:17:20 2015 +0100 +++ /dev/null Thu Jan 01 00:00:00 1970 +0000 @@ -1,221 +0,0 @@ -/* - * Copyright (c) 2014, 2014, Oracle and/or its affiliates. All rights reserved. - * DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER. - * - * This code is free software; you can redistribute it and/or modify it - * under the terms of the GNU General Public License version 2 only, as - * published by the Free Software Foundation. - * - * This code is distributed in the hope that it will be useful, but WITHOUT - * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or - * FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License - * version 2 for more details (a copy is included in the LICENSE file that - * accompanied this code). - * - * You should have received a copy of the GNU General Public License version - * 2 along with this work; if not, write to the Free Software Foundation, - * Inc., 51 Franklin St, Fifth Floor, Boston, MA 02110-1301 USA. - * - * Please contact Oracle, 500 Oracle Parkway, Redwood Shores, CA 94065 USA - * or visit www.oracle.com if you need additional information or have any - * questions. - */ -package com.oracle.graal.compiler.alloc; - -import static com.oracle.graal.api.code.ValueUtil.*; - -import java.util.*; - -import com.oracle.graal.api.code.*; -import com.oracle.graal.api.meta.*; -import com.oracle.graal.compiler.common.cfg.*; -import com.oracle.graal.debug.*; -import com.oracle.graal.lir.*; -import com.oracle.graal.lir.LIRInstruction.OperandFlag; -import com.oracle.graal.lir.LIRInstruction.OperandMode; -import com.oracle.graal.lir.framemap.*; -import com.oracle.graal.options.*; - -public final class LocationMarker { - - public static class Options { - // @formatter:off - @Option(help = "Use decoupled pass for location marking (instead of using LSRA marking)", type = OptionType.Debug) - public static final OptionValue UseLocationMarker = new OptionValue<>(true); - // @formatter:on - } - - /** - * Mark all live references for a frame state. The frame state use this information to build the - * OOP maps. - */ - public static void markLocations(LIR lir, FrameMap frameMap) { - new LocationMarker(lir, frameMap).build(); - } - - private final LIR lir; - private final FrameMap frameMap; - private final RegisterAttributes[] registerAttributes; - private final BlockMap liveInMap; - private final BlockMap liveOutMap; - - private LocationMarker(LIR lir, FrameMap frameMap) { - this.lir = lir; - this.frameMap = frameMap; - this.registerAttributes = frameMap.getRegisterConfig().getAttributesMap(); - liveInMap = new BlockMap<>(lir.getControlFlowGraph()); - liveOutMap = new BlockMap<>(lir.getControlFlowGraph()); - } - - private void build() { - Deque> worklist = new ArrayDeque<>(); - for (int i = lir.getControlFlowGraph().getBlocks().size() - 1; i >= 0; i--) { - worklist.add(lir.getControlFlowGraph().getBlocks().get(i)); - } - for (AbstractBlock block : lir.getControlFlowGraph().getBlocks()) { - liveInMap.put(block, frameMap.initReferenceMap(true)); - } - while (!worklist.isEmpty()) { - AbstractBlock block = worklist.poll(); - processBlock(block, worklist); - } - // finish states - for (AbstractBlock block : lir.getControlFlowGraph().getBlocks()) { - List instructions = lir.getLIRforBlock(block); - for (int i = instructions.size() - 1; i >= 0; i--) { - LIRInstruction inst = instructions.get(i); - inst.forEachState((op, info) -> info.finish(op, frameMap)); - } - - } - } - - /** - * Merge outSet with in-set of successors. - */ - private boolean updateOutBlock(AbstractBlock block) { - ReferenceMap union = frameMap.initReferenceMap(true); - block.getSuccessors().forEach(succ -> union.updateUnion(liveInMap.get(succ))); - ReferenceMap outSet = liveOutMap.get(block); - // check if changed - if (outSet == null || !union.equals(outSet)) { - liveOutMap.put(block, union); - return true; - } - return false; - } - - private void processBlock(AbstractBlock block, Deque> worklist) { - if (updateOutBlock(block)) { - try (Indent indent = Debug.logAndIndent("handle block %s", block)) { - BlockClosure closure = new BlockClosure(liveOutMap.get(block).clone()); - List instructions = lir.getLIRforBlock(block); - for (int i = instructions.size() - 1; i >= 0; i--) { - LIRInstruction inst = instructions.get(i); - closure.processInstructionBottomUp(inst); - } - liveInMap.put(block, closure.getCurrentSet()); - worklist.addAll(block.getPredecessors()); - } - } - } - - private static final EnumSet REGISTER_FLAG_SET = EnumSet.of(OperandFlag.REG); - private static final LIRKind REFERENCE_KIND = LIRKind.reference(Kind.Object); - - private void forEachDestroyedCallerSavedRegister(LIRInstruction op, ValueConsumer consumer) { - if (op.destroysCallerSavedRegisters()) { - for (Register reg : frameMap.getRegisterConfig().getCallerSaveRegisters()) { - consumer.visitValue(reg.asValue(REFERENCE_KIND), OperandMode.TEMP, REGISTER_FLAG_SET); - } - } - } - - private final class BlockClosure { - private final ReferenceMap currentSet; - - private BlockClosure(ReferenceMap set) { - currentSet = set; - } - - private ReferenceMap getCurrentSet() { - return currentSet; - } - - /** - * Process all values of an instruction bottom-up, i.e. definitions before usages. Values - * that start or end at the current operation are not included. - */ - private void processInstructionBottomUp(LIRInstruction op) { - try (Indent indent = Debug.logAndIndent("handle op %d, %s", op.id(), op)) { - // kills - op.visitEachTemp(this::defConsumer); - op.visitEachOutput(this::defConsumer); - forEachDestroyedCallerSavedRegister(op, this::defConsumer); - - // gen - values that are considered alive for this state - op.visitEachAlive(this::useConsumer); - op.visitEachState(this::useConsumer); - // mark locations - op.forEachState((inst, info) -> markLocation(inst, info, this.getCurrentSet())); - // gen - op.visitEachInput(this::useConsumer); - } - } - - /** - * @see InstructionValueConsumer - * @param operand - * @param mode - * @param flags - */ - private void useConsumer(Value operand, OperandMode mode, EnumSet flags) { - LIRKind kind = operand.getLIRKind(); - if (shouldProcessValue(operand) && !kind.isValue() && !kind.isDerivedReference()) { - // no need to insert values and derived reference - Debug.log("set operand: %s", operand); - frameMap.setReference(operand, currentSet); - } - } - - /** - * @see InstructionValueConsumer - * @param operand - * @param mode - * @param flags - */ - private void defConsumer(Value operand, OperandMode mode, EnumSet flags) { - if (shouldProcessValue(operand)) { - Debug.log("clear operand: %s", operand); - frameMap.clearReference(operand, currentSet); - } else { - assert isIllegal(operand) || operand.getPlatformKind() != Kind.Illegal || mode == OperandMode.TEMP : String.format("Illegal PlatformKind is only allowed for TEMP mode: %s, %s", - operand, mode); - } - } - - protected boolean shouldProcessValue(Value operand) { - return (isRegister(operand) && attributes(asRegister(operand)).isAllocatable() || isStackSlot(operand)) && operand.getPlatformKind() != Kind.Illegal; - } - } - - /** - * This method does the actual marking. - */ - private void markLocation(LIRInstruction op, LIRFrameState info, ReferenceMap refMap) { - if (!info.hasDebugInfo()) { - info.initDebugInfo(frameMap, !op.destroysCallerSavedRegisters() || !frameMap.getRegisterConfig().areAllAllocatableRegistersCallerSaved()); - } - info.updateUnion(refMap); - } - - /** - * Gets an object describing the attributes of a given register according to this register - * configuration. - * - * @see LinearScan#attributes - */ - private RegisterAttributes attributes(Register reg) { - return registerAttributes[reg.number]; - } -} diff -r 32c7a5a88523 -r d599eeab1b53 graal/com.oracle.graal.compiler/src/com/oracle/graal/compiler/alloc/MoveResolver.java --- a/graal/com.oracle.graal.compiler/src/com/oracle/graal/compiler/alloc/MoveResolver.java Fri Feb 06 12:17:20 2015 +0100 +++ /dev/null Thu Jan 01 00:00:00 1970 +0000 @@ -1,364 +0,0 @@ -/* - * Copyright (c) 2009, 2014, Oracle and/or its affiliates. All rights reserved. - * DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER. - * - * This code is free software; you can redistribute it and/or modify it - * under the terms of the GNU General Public License version 2 only, as - * published by the Free Software Foundation. - * - * This code is distributed in the hope that it will be useful, but WITHOUT - * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or - * FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License - * version 2 for more details (a copy is included in the LICENSE file that - * accompanied this code). - * - * You should have received a copy of the GNU General Public License version - * 2 along with this work; if not, write to the Free Software Foundation, - * Inc., 51 Franklin St, Fifth Floor, Boston, MA 02110-1301 USA. - * - * Please contact Oracle, 500 Oracle Parkway, Redwood Shores, CA 94065 USA - * or visit www.oracle.com if you need additional information or have any - * questions. - */ -package com.oracle.graal.compiler.alloc; - -import static com.oracle.graal.api.code.ValueUtil.*; - -import java.util.*; - -import com.oracle.graal.api.code.*; -import com.oracle.graal.api.meta.*; -import com.oracle.graal.debug.*; -import com.oracle.graal.lir.*; - -/** - */ -final class MoveResolver { - - private final LinearScan allocator; - - private int insertIdx; - private LIRInsertionBuffer insertionBuffer; // buffer where moves are inserted - - private final List mappingFrom; - private final List mappingFromOpr; - private final List mappingTo; - private boolean multipleReadsAllowed; - private final int[] registerBlocked; - - private int registerBlocked(int reg) { - return registerBlocked[reg]; - } - - private void setRegisterBlocked(int reg, int direction) { - assert direction == 1 || direction == -1 : "out of bounds"; - registerBlocked[reg] += direction; - } - - void setMultipleReadsAllowed() { - multipleReadsAllowed = true; - } - - boolean hasMappings() { - return mappingFrom.size() > 0; - } - - MoveResolver(LinearScan allocator) { - - this.allocator = allocator; - this.multipleReadsAllowed = false; - this.mappingFrom = new ArrayList<>(8); - this.mappingFromOpr = new ArrayList<>(8); - this.mappingTo = new ArrayList<>(8); - this.insertIdx = -1; - this.insertionBuffer = new LIRInsertionBuffer(); - this.registerBlocked = new int[allocator.registers.length]; - assert checkEmpty(); - } - - boolean checkEmpty() { - assert mappingFrom.size() == 0 && mappingFromOpr.size() == 0 && mappingTo.size() == 0 : "list must be empty before and after processing"; - for (int i = 0; i < allocator.registers.length; i++) { - assert registerBlocked(i) == 0 : "register map must be empty before and after processing"; - } - assert !multipleReadsAllowed : "must have default value"; - return true; - } - - private boolean verifyBeforeResolve() { - assert mappingFrom.size() == mappingFromOpr.size() : "length must be equal"; - assert mappingFrom.size() == mappingTo.size() : "length must be equal"; - assert insertIdx != -1 : "insert position not set"; - - int i; - int j; - if (!multipleReadsAllowed) { - for (i = 0; i < mappingFrom.size(); i++) { - for (j = i + 1; j < mappingFrom.size(); j++) { - assert mappingFrom.get(i) == null || mappingFrom.get(i) != mappingFrom.get(j) : "cannot read from same interval twice"; - } - } - } - - for (i = 0; i < mappingTo.size(); i++) { - for (j = i + 1; j < mappingTo.size(); j++) { - assert mappingTo.get(i) != mappingTo.get(j) : "cannot write to same interval twice"; - } - } - - HashSet usedRegs = new HashSet<>(); - if (!multipleReadsAllowed) { - for (i = 0; i < mappingFrom.size(); i++) { - Interval interval = mappingFrom.get(i); - if (interval != null && !isIllegal(interval.location())) { - boolean unique = usedRegs.add(interval.location()); - assert unique : "cannot read from same register twice"; - } - } - } - - usedRegs.clear(); - for (i = 0; i < mappingTo.size(); i++) { - Interval interval = mappingTo.get(i); - if (isIllegal(interval.location())) { - // After insertion the location may become illegal, so don't check it since multiple - // intervals might be illegal. - continue; - } - boolean unique = usedRegs.add(interval.location()); - assert unique : "cannot write to same register twice"; - } - - usedRegs.clear(); - for (i = 0; i < mappingFrom.size(); i++) { - Interval interval = mappingFrom.get(i); - if (interval != null && !isRegister(interval.location())) { - usedRegs.add(interval.location()); - } - } - for (i = 0; i < mappingTo.size(); i++) { - Interval interval = mappingTo.get(i); - assert !usedRegs.contains(interval.location()) || interval.location().equals(mappingFrom.get(i).location()) : "stack slots used in mappingFrom must be disjoint to mappingTo"; - } - - return true; - } - - // mark assignedReg and assignedRegHi of the interval as blocked - private void blockRegisters(Interval interval) { - Value location = interval.location(); - if (isRegister(location)) { - int reg = asRegister(location).number; - assert multipleReadsAllowed || registerBlocked(reg) == 0 : "register already marked as used"; - setRegisterBlocked(reg, 1); - } - } - - // mark assignedReg and assignedRegHi of the interval as unblocked - private void unblockRegisters(Interval interval) { - Value location = interval.location(); - if (isRegister(location)) { - int reg = asRegister(location).number; - assert registerBlocked(reg) > 0 : "register already marked as unused"; - setRegisterBlocked(reg, -1); - } - } - - /** - * Checks if the {@linkplain Interval#location() location} of {@code to} is not blocked or is - * only blocked by {@code from}. - */ - private boolean safeToProcessMove(Interval from, Interval to) { - Value fromReg = from != null ? from.location() : null; - - Value reg = to.location(); - if (isRegister(reg)) { - if (registerBlocked(asRegister(reg).number) > 1 || (registerBlocked(asRegister(reg).number) == 1 && !reg.equals(fromReg))) { - return false; - } - } - - return true; - } - - private void createInsertionBuffer(List list) { - assert !insertionBuffer.initialized() : "overwriting existing buffer"; - insertionBuffer.init(list); - } - - private void appendInsertionBuffer() { - if (insertionBuffer.initialized()) { - insertionBuffer.finish(); - } - assert !insertionBuffer.initialized() : "must be uninitialized now"; - - insertIdx = -1; - } - - private void insertMove(Interval fromInterval, Interval toInterval) { - assert !fromInterval.operand.equals(toInterval.operand) : "from and to interval equal: " + fromInterval; - assert fromInterval.kind().equals(toInterval.kind()) : "move between different types"; - assert insertIdx != -1 : "must setup insert position first"; - - AllocatableValue fromOpr = fromInterval.operand; - AllocatableValue toOpr = toInterval.operand; - - insertionBuffer.append(insertIdx, allocator.ir.getSpillMoveFactory().createMove(toOpr, fromOpr)); - - Debug.log("insert move from %s to %s at %d", fromInterval, toInterval, insertIdx); - } - - private void insertMove(Value fromOpr, Interval toInterval) { - assert fromOpr.getLIRKind().equals(toInterval.kind()) : "move between different types"; - assert insertIdx != -1 : "must setup insert position first"; - - AllocatableValue toOpr = toInterval.operand; - insertionBuffer.append(insertIdx, allocator.ir.getSpillMoveFactory().createMove(toOpr, fromOpr)); - - Debug.log("insert move from value %s to %s at %d", fromOpr, toInterval, insertIdx); - } - - private void resolveMappings() { - assert verifyBeforeResolve(); - - // Block all registers that are used as input operands of a move. - // When a register is blocked, no move to this register is emitted. - // This is necessary for detecting cycles in moves. - int i; - for (i = mappingFrom.size() - 1; i >= 0; i--) { - Interval fromInterval = mappingFrom.get(i); - if (fromInterval != null) { - blockRegisters(fromInterval); - } - } - - int spillCandidate = -1; - while (mappingFrom.size() > 0) { - boolean processedInterval = false; - - for (i = mappingFrom.size() - 1; i >= 0; i--) { - Interval fromInterval = mappingFrom.get(i); - Interval toInterval = mappingTo.get(i); - - if (safeToProcessMove(fromInterval, toInterval)) { - // this interval can be processed because target is free - if (fromInterval != null) { - insertMove(fromInterval, toInterval); - unblockRegisters(fromInterval); - } else { - insertMove(mappingFromOpr.get(i), toInterval); - } - mappingFrom.remove(i); - mappingFromOpr.remove(i); - mappingTo.remove(i); - - processedInterval = true; - } else if (fromInterval != null && isRegister(fromInterval.location())) { - // this interval cannot be processed now because target is not free - // it starts in a register, so it is a possible candidate for spilling - spillCandidate = i; - } - } - - if (!processedInterval) { - // no move could be processed because there is a cycle in the move list - // (e.g. r1 . r2, r2 . r1), so one interval must be spilled to memory - assert spillCandidate != -1 : "no interval in register for spilling found"; - - // create a new spill interval and assign a stack slot to it - Interval fromInterval = mappingFrom.get(spillCandidate); - Interval spillInterval = allocator.createDerivedInterval(fromInterval); - spillInterval.setKind(fromInterval.kind()); - - // add a dummy range because real position is difficult to calculate - // Note: this range is a special case when the integrity of the allocation is - // checked - spillInterval.addRange(1, 2); - - // do not allocate a new spill slot for temporary interval, but - // use spill slot assigned to fromInterval. Otherwise moves from - // one stack slot to another can happen (not allowed by LIRAssembler - StackSlotValue spillSlot = fromInterval.spillSlot(); - if (spillSlot == null) { - spillSlot = allocator.frameMapBuilder.allocateSpillSlot(spillInterval.kind()); - fromInterval.setSpillSlot(spillSlot); - } - spillInterval.assignLocation(spillSlot); - - Debug.log("created new Interval for spilling: %s", spillInterval); - - // insert a move from register to stack and update the mapping - insertMove(fromInterval, spillInterval); - mappingFrom.set(spillCandidate, spillInterval); - unblockRegisters(fromInterval); - } - } - - // reset to default value - multipleReadsAllowed = false; - - // check that all intervals have been processed - assert checkEmpty(); - } - - void setInsertPosition(List insertList, int insertIdx) { - assert this.insertIdx == -1 : "use moveInsertPosition instead of setInsertPosition when data already set"; - - createInsertionBuffer(insertList); - this.insertIdx = insertIdx; - } - - void moveInsertPosition(List newInsertList, int newInsertIdx) { - if (insertionBuffer.lirList() != null && (insertionBuffer.lirList() != newInsertList || this.insertIdx != newInsertIdx)) { - // insert position changed . resolve current mappings - resolveMappings(); - } - - if (insertionBuffer.lirList() != newInsertList) { - // block changed . append insertionBuffer because it is - // bound to a specific block and create a new insertionBuffer - appendInsertionBuffer(); - createInsertionBuffer(newInsertList); - } - - this.insertIdx = newInsertIdx; - } - - void addMapping(Interval fromInterval, Interval toInterval) { - - if (isIllegal(toInterval.location()) && toInterval.canMaterialize()) { - Debug.log("no store to rematerializable interval %s needed", toInterval); - return; - } - if (isIllegal(fromInterval.location()) && fromInterval.canMaterialize()) { - // Instead of a reload, re-materialize the value - Value rematValue = fromInterval.getMaterializedValue(); - addMapping(rematValue, toInterval); - return; - } - Debug.log("add move mapping from %s to %s", fromInterval, toInterval); - - assert !fromInterval.operand.equals(toInterval.operand) : "from and to interval equal: " + fromInterval; - assert fromInterval.kind().equals(toInterval.kind()); - mappingFrom.add(fromInterval); - mappingFromOpr.add(Value.ILLEGAL); - mappingTo.add(toInterval); - } - - void addMapping(Value fromOpr, Interval toInterval) { - Debug.log("add move mapping from %s to %s", fromOpr, toInterval); - - assert isConstant(fromOpr) : "only for constants"; - - mappingFrom.add(null); - mappingFromOpr.add(fromOpr); - mappingTo.add(toInterval); - } - - void resolveAndAppendMoves() { - if (hasMappings()) { - resolveMappings(); - } - appendInsertionBuffer(); - } -} diff -r 32c7a5a88523 -r d599eeab1b53 graal/com.oracle.graal.compiler/src/com/oracle/graal/compiler/alloc/OptimizingLinearScanWalker.java --- a/graal/com.oracle.graal.compiler/src/com/oracle/graal/compiler/alloc/OptimizingLinearScanWalker.java Fri Feb 06 12:17:20 2015 +0100 +++ /dev/null Thu Jan 01 00:00:00 1970 +0000 @@ -1,251 +0,0 @@ -/* - * Copyright (c) 2014, 2014, Oracle and/or its affiliates. All rights reserved. - * DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER. - * - * This code is free software; you can redistribute it and/or modify it - * under the terms of the GNU General Public License version 2 only, as - * published by the Free Software Foundation. - * - * This code is distributed in the hope that it will be useful, but WITHOUT - * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or - * FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License - * version 2 for more details (a copy is included in the LICENSE file that - * accompanied this code). - * - * You should have received a copy of the GNU General Public License version - * 2 along with this work; if not, write to the Free Software Foundation, - * Inc., 51 Franklin St, Fifth Floor, Boston, MA 02110-1301 USA. - * - * Please contact Oracle, 500 Oracle Parkway, Redwood Shores, CA 94065 USA - * or visit www.oracle.com if you need additional information or have any - * questions. - */ -package com.oracle.graal.compiler.alloc; - -import static com.oracle.graal.api.code.ValueUtil.*; - -import com.oracle.graal.api.code.*; -import com.oracle.graal.api.meta.*; -import com.oracle.graal.compiler.alloc.Interval.RegisterBinding; -import com.oracle.graal.compiler.alloc.Interval.RegisterBindingLists; -import com.oracle.graal.compiler.alloc.Interval.State; -import com.oracle.graal.compiler.common.cfg.*; -import com.oracle.graal.debug.*; -import com.oracle.graal.debug.Debug.Scope; -import com.oracle.graal.options.*; - -public class OptimizingLinearScanWalker extends LinearScanWalker { - - public static class Options { - // @formatter:off - @Option(help = "Enable LSRA optimization", type = OptionType.Debug) - public static final OptionValue LSRAOptimization = new OptionValue<>(true); - @Option(help = "LSRA optimization: Only split but do not reassign", type = OptionType.Debug) - public static final OptionValue LSRAOptSplitOnly = new OptionValue<>(false); - // @formatter:on - } - - OptimizingLinearScanWalker(LinearScan allocator, Interval unhandledFixedFirst, Interval unhandledAnyFirst) { - super(allocator, unhandledFixedFirst, unhandledAnyFirst); - } - - @Override - protected void handleSpillSlot(Interval interval) { - assert interval.location() != null : "interval not assigned " + interval; - if (interval.canMaterialize()) { - assert !isStackSlotValue(interval.location()) : "interval can materialize but assigned to a stack slot " + interval; - return; - } - assert isStackSlotValue(interval.location()) : "interval not assigned to a stack slot " + interval; - try (Scope s1 = Debug.scope("LSRAOptimization")) { - Debug.log("adding stack to unhandled list %s", interval); - unhandledLists.addToListSortedByStartAndUsePositions(RegisterBinding.Stack, interval); - } - } - - @SuppressWarnings("unused") - private static void printRegisterBindingList(RegisterBindingLists list, RegisterBinding binding) { - for (Interval interval = list.get(binding); interval != Interval.EndMarker; interval = interval.next) { - Debug.log("%s", interval); - } - } - - @Override - void walk() { - try (Scope s = Debug.scope("OptimizingLinearScanWalker")) { - for (AbstractBlock block : allocator.sortedBlocks) { - optimizeBlock(block); - } - } - super.walk(); - } - - private void optimizeBlock(AbstractBlock block) { - if (block.getPredecessorCount() == 1) { - int nextBlock = allocator.getFirstLirInstructionId(block); - try (Scope s1 = Debug.scope("LSRAOptimization")) { - Debug.log("next block: %s (%d)", block, nextBlock); - } - try (Indent indent0 = Debug.indent()) { - walkTo(nextBlock); - - try (Scope s1 = Debug.scope("LSRAOptimization")) { - boolean changed = true; - // we need to do this because the active lists might change - loop: while (changed) { - changed = false; - try (Indent indent1 = Debug.logAndIndent("Active intervals: (block %s [%d])", block, nextBlock)) { - for (Interval active = activeLists.get(RegisterBinding.Any); active != Interval.EndMarker; active = active.next) { - Debug.log("active (any): %s", active); - if (optimize(nextBlock, block, active, RegisterBinding.Any)) { - changed = true; - break loop; - } - } - for (Interval active = activeLists.get(RegisterBinding.Stack); active != Interval.EndMarker; active = active.next) { - Debug.log("active (stack): %s", active); - if (optimize(nextBlock, block, active, RegisterBinding.Stack)) { - changed = true; - break loop; - } - } - } - } - } - } - } - } - - private boolean optimize(int currentPos, AbstractBlock currentBlock, Interval currentInterval, RegisterBinding binding) { - // BEGIN initialize and sanity checks - assert currentBlock != null : "block must not be null"; - assert currentInterval != null : "interval must not be null"; - - assert currentBlock.getPredecessorCount() == 1 : "more than one predecessors -> optimization not possible"; - - if (!currentInterval.isSplitChild()) { - // interval is not a split child -> no need for optimization - return false; - } - - if (currentInterval.from() == currentPos) { - // the interval starts at the current position so no need for splitting - return false; - } - - // get current location - AllocatableValue currentLocation = currentInterval.location(); - assert currentLocation != null : "active intervals must have a location assigned!"; - - // get predecessor stuff - AbstractBlock predecessorBlock = currentBlock.getPredecessors().get(0); - int predEndId = allocator.getLastLirInstructionId(predecessorBlock); - Interval predecessorInterval = currentInterval.getIntervalCoveringOpId(predEndId); - assert predecessorInterval != null : "variable not live at the end of the only predecessor! " + predecessorBlock + " -> " + currentBlock + " interval: " + currentInterval; - AllocatableValue predecessorLocation = predecessorInterval.location(); - assert predecessorLocation != null : "handled intervals must have a location assigned!"; - - // END initialize and sanity checks - - if (currentLocation.equals(predecessorLocation)) { - // locations are already equal -> nothing to optimize - return false; - } - - if (!isStackSlotValue(predecessorLocation) && !isRegister(predecessorLocation)) { - assert predecessorInterval.canMaterialize(); - // value is materialized -> no need for optimization - return false; - } - - assert isStackSlotValue(currentLocation) || isRegister(currentLocation) : "current location not a register or stack slot " + currentLocation; - - try (Indent indent = Debug.logAndIndent("location differs: %s vs. %s", predecessorLocation, currentLocation)) { - // split current interval at current position - Debug.log("splitting at position %d", currentPos); - - assert allocator.isBlockBegin(currentPos) && ((currentPos & 1) == 0) : "split pos must be even when on block boundary"; - - Interval splitPart = currentInterval.split(currentPos, allocator); - activeLists.remove(binding, currentInterval); - - assert splitPart.from() >= currentPosition : "cannot append new interval before current walk position"; - - // the currentSplitChild is needed later when moves are inserted for reloading - assert splitPart.currentSplitChild() == currentInterval : "overwriting wrong currentSplitChild"; - splitPart.makeCurrentSplitChild(); - - if (Debug.isLogEnabled()) { - Debug.log("left interval : %s", currentInterval.logString(allocator)); - Debug.log("right interval : %s", splitPart.logString(allocator)); - } - - if (Options.LSRAOptSplitOnly.getValue()) { - // just add the split interval to the unhandled list - unhandledLists.addToListSortedByStartAndUsePositions(RegisterBinding.Any, splitPart); - } else { - if (isRegister(predecessorLocation)) { - splitRegisterInterval(splitPart, asRegister(predecessorLocation)); - } else { - assert isStackSlotValue(predecessorLocation); - Debug.log("assigning interval %s to %s", splitPart, predecessorLocation); - splitPart.assignLocation(predecessorLocation); - // activate interval - activeLists.addToListSortedByCurrentFromPositions(RegisterBinding.Stack, splitPart); - splitPart.state = State.Active; - - splitStackInterval(splitPart); - } - } - } - return true; - } - - private void splitRegisterInterval(Interval interval, Register reg) { - // collect current usage of registers - initVarsForAlloc(interval); - initUseLists(false); - spillExcludeActiveFixed(); - // spillBlockUnhandledFixed(cur); - assert unhandledLists.get(RegisterBinding.Fixed) == Interval.EndMarker : "must not have unhandled fixed intervals because all fixed intervals have a use at position 0"; - spillBlockInactiveFixed(interval); - spillCollectActiveAny(); - spillCollectInactiveAny(interval); - - if (Debug.isLogEnabled()) { - try (Indent indent2 = Debug.logAndIndent("state of registers:")) { - for (Register register : availableRegs) { - int i = register.number; - try (Indent indent3 = Debug.logAndIndent("reg %d: usePos: %d, blockPos: %d, intervals: ", i, usePos[i], blockPos[i])) { - for (int j = 0; j < spillIntervals[i].size(); j++) { - Debug.log("%d ", spillIntervals[i].get(j).operandNumber); - } - } - } - } - } - - // the register must be free at least until this position - boolean needSplit = blockPos[reg.number] <= interval.to(); - - int splitPos = blockPos[reg.number]; - - assert splitPos > 0 : "invalid splitPos"; - assert needSplit || splitPos > interval.from() : "splitting interval at from"; - - Debug.log("assigning interval %s to %s", interval, reg); - interval.assignLocation(reg.asValue(interval.kind())); - if (needSplit) { - // register not available for full interval : so split it - splitWhenPartialRegisterAvailable(interval, splitPos); - } - - // perform splitting and spilling for all affected intervals - splitAndSpillIntersectingIntervals(reg); - - // activate interval - activeLists.addToListSortedByCurrentFromPositions(RegisterBinding.Any, interval); - interval.state = State.Active; - - } -} diff -r 32c7a5a88523 -r d599eeab1b53 graal/com.oracle.graal.compiler/src/com/oracle/graal/compiler/alloc/Range.java --- a/graal/com.oracle.graal.compiler/src/com/oracle/graal/compiler/alloc/Range.java Fri Feb 06 12:17:20 2015 +0100 +++ /dev/null Thu Jan 01 00:00:00 1970 +0000 @@ -1,116 +0,0 @@ -/* - * Copyright (c) 2009, 2011, Oracle and/or its affiliates. All rights reserved. - * DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER. - * - * This code is free software; you can redistribute it and/or modify it - * under the terms of the GNU General Public License version 2 only, as - * published by the Free Software Foundation. - * - * This code is distributed in the hope that it will be useful, but WITHOUT - * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or - * FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License - * version 2 for more details (a copy is included in the LICENSE file that - * accompanied this code). - * - * You should have received a copy of the GNU General Public License version - * 2 along with this work; if not, write to the Free Software Foundation, - * Inc., 51 Franklin St, Fifth Floor, Boston, MA 02110-1301 USA. - * - * Please contact Oracle, 500 Oracle Parkway, Redwood Shores, CA 94065 USA - * or visit www.oracle.com if you need additional information or have any - * questions. - */ -package com.oracle.graal.compiler.alloc; - -/** - * Represents a range of integers from a start (inclusive) to an end (exclusive. - */ -public final class Range { - - public static final Range EndMarker = new Range(Integer.MAX_VALUE, Integer.MAX_VALUE, null); - - /** - * The start of the range, inclusive. - */ - public int from; - - /** - * The end of the range, exclusive. - */ - public int to; - - /** - * A link to allow the range to be put into a singly linked list. - */ - public Range next; - - boolean intersects(Range r) { - return intersectsAt(r) != -1; - } - - /** - * Creates a new range. - * - * @param from the start of the range, inclusive - * @param to the end of the range, exclusive - * @param next link to the next range in a linked list - */ - Range(int from, int to, Range next) { - this.from = from; - this.to = to; - this.next = next; - } - - int intersectsAt(Range other) { - Range r1 = this; - Range r2 = other; - - assert r2 != null : "null ranges not allowed"; - assert r1 != EndMarker && r2 != EndMarker : "empty ranges not allowed"; - - do { - if (r1.from < r2.from) { - if (r1.to <= r2.from) { - r1 = r1.next; - if (r1 == EndMarker) { - return -1; - } - } else { - return r2.from; - } - } else { - if (r2.from < r1.from) { - if (r2.to <= r1.from) { - r2 = r2.next; - if (r2 == EndMarker) { - return -1; - } - } else { - return r1.from; - } - } else { // r1.from() == r2.from() - if (r1.from == r1.to) { - r1 = r1.next; - if (r1 == EndMarker) { - return -1; - } - } else { - if (r2.from == r2.to) { - r2 = r2.next; - if (r2 == EndMarker) { - return -1; - } - } else { - return r1.from; - } - } - } - } - } while (true); - } - - @Override - public String toString() { - return "[" + from + ", " + to + "]"; - } -} diff -r 32c7a5a88523 -r d599eeab1b53 graal/com.oracle.graal.compiler/src/com/oracle/graal/compiler/alloc/RegisterVerifier.java --- a/graal/com.oracle.graal.compiler/src/com/oracle/graal/compiler/alloc/RegisterVerifier.java Fri Feb 06 12:17:20 2015 +0100 +++ /dev/null Thu Jan 01 00:00:00 1970 +0000 @@ -1,240 +0,0 @@ -/* - * Copyright (c) 2009, 2012, Oracle and/or its affiliates. All rights reserved. - * DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER. - * - * This code is free software; you can redistribute it and/or modify it - * under the terms of the GNU General Public License version 2 only, as - * published by the Free Software Foundation. - * - * This code is distributed in the hope that it will be useful, but WITHOUT - * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or - * FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License - * version 2 for more details (a copy is included in the LICENSE file that - * accompanied this code). - * - * You should have received a copy of the GNU General Public License version - * 2 along with this work; if not, write to the Free Software Foundation, - * Inc., 51 Franklin St, Fifth Floor, Boston, MA 02110-1301 USA. - * - * Please contact Oracle, 500 Oracle Parkway, Redwood Shores, CA 94065 USA - * or visit www.oracle.com if you need additional information or have any - * questions. - */ -package com.oracle.graal.compiler.alloc; - -import static com.oracle.graal.api.code.ValueUtil.*; - -import java.util.*; - -import com.oracle.graal.api.code.*; -import com.oracle.graal.api.meta.*; -import com.oracle.graal.compiler.common.*; -import com.oracle.graal.compiler.common.cfg.*; -import com.oracle.graal.debug.*; -import com.oracle.graal.lir.*; -import com.oracle.graal.lir.LIRInstruction.OperandFlag; -import com.oracle.graal.lir.LIRInstruction.OperandMode; -import com.oracle.graal.phases.util.*; - -/** - */ -final class RegisterVerifier { - - LinearScan allocator; - List> workList; // all blocks that must be processed - ArrayMap savedStates; // saved information of previous check - - // simplified access to methods of LinearScan - Interval intervalAt(Value operand) { - return allocator.intervalFor(operand); - } - - // currently, only registers are processed - int stateSize() { - return allocator.maxRegisterNumber() + 1; - } - - // accessors - Interval[] stateForBlock(AbstractBlock block) { - return savedStates.get(block.getId()); - } - - void setStateForBlock(AbstractBlock block, Interval[] savedState) { - savedStates.put(block.getId(), savedState); - } - - void addToWorkList(AbstractBlock block) { - if (!workList.contains(block)) { - workList.add(block); - } - } - - RegisterVerifier(LinearScan allocator) { - this.allocator = allocator; - workList = new ArrayList<>(16); - this.savedStates = new ArrayMap<>(); - - } - - void verify(AbstractBlock start) { - // setup input registers (method arguments) for first block - Interval[] inputState = new Interval[stateSize()]; - setStateForBlock(start, inputState); - addToWorkList(start); - - // main loop for verification - do { - AbstractBlock block = workList.get(0); - workList.remove(0); - - processBlock(block); - } while (!workList.isEmpty()); - } - - private void processBlock(AbstractBlock block) { - try (Indent indent = Debug.logAndIndent("processBlock B%d", block.getId())) { - // must copy state because it is modified - Interval[] inputState = copy(stateForBlock(block)); - - try (Indent indent2 = Debug.logAndIndent("Input-State of intervals:")) { - for (int i = 0; i < stateSize(); i++) { - if (inputState[i] != null) { - Debug.log(" %4d", inputState[i].operandNumber); - } else { - Debug.log(" __"); - } - } - } - - // process all operations of the block - processOperations(allocator.ir.getLIRforBlock(block), inputState); - - // iterate all successors - for (AbstractBlock succ : block.getSuccessors()) { - processSuccessor(succ, inputState); - } - } - } - - private void processSuccessor(AbstractBlock block, Interval[] inputState) { - Interval[] savedState = stateForBlock(block); - - if (savedState != null) { - // this block was already processed before. - // check if new inputState is consistent with savedState - - boolean savedStateCorrect = true; - for (int i = 0; i < stateSize(); i++) { - if (inputState[i] != savedState[i]) { - // current inputState and previous savedState assume a different - // interval in this register . assume that this register is invalid - if (savedState[i] != null) { - // invalidate old calculation only if it assumed that - // register was valid. when the register was already invalid, - // then the old calculation was correct. - savedStateCorrect = false; - savedState[i] = null; - - Debug.log("processSuccessor B%d: invalidating slot %d", block.getId(), i); - } - } - } - - if (savedStateCorrect) { - // already processed block with correct inputState - Debug.log("processSuccessor B%d: previous visit already correct", block.getId()); - } else { - // must re-visit this block - Debug.log("processSuccessor B%d: must re-visit because input state changed", block.getId()); - addToWorkList(block); - } - - } else { - // block was not processed before, so set initial inputState - Debug.log("processSuccessor B%d: initial visit", block.getId()); - - setStateForBlock(block, copy(inputState)); - addToWorkList(block); - } - } - - static Interval[] copy(Interval[] inputState) { - return inputState.clone(); - } - - static void statePut(Interval[] inputState, Value location, Interval interval) { - if (location != null && isRegister(location)) { - Register reg = asRegister(location); - int regNum = reg.number; - if (interval != null) { - Debug.log("%s = %s", reg, interval.operand); - } else if (inputState[regNum] != null) { - Debug.log("%s = null", reg); - } - - inputState[regNum] = interval; - } - } - - static boolean checkState(Interval[] inputState, Value reg, Interval interval) { - if (reg != null && isRegister(reg)) { - if (inputState[asRegister(reg).number] != interval) { - throw new GraalInternalError("!! Error in register allocation: register %s does not contain interval %s but interval %s", reg, interval.operand, inputState[asRegister(reg).number]); - } - } - return true; - } - - void processOperations(List ops, final Interval[] inputState) { - InstructionValueConsumer useConsumer = new InstructionValueConsumer() { - - @Override - public void visitValue(LIRInstruction op, Value operand, OperandMode mode, EnumSet flags) { - // we skip spill moves inserted by the spill position optimization - if (LinearScan.isVariableOrRegister(operand) && allocator.isProcessed(operand) && op.id() != LinearScan.DOMINATOR_SPILL_MOVE_ID) { - Interval interval = intervalAt(operand); - if (op.id() != -1) { - interval = interval.getSplitChildAtOpId(op.id(), mode, allocator); - } - - assert checkState(inputState, interval.location(), interval.splitParent()); - } - } - }; - - InstructionValueConsumer defConsumer = (op, operand, mode, flags) -> { - if (LinearScan.isVariableOrRegister(operand) && allocator.isProcessed(operand)) { - Interval interval = intervalAt(operand); - if (op.id() != -1) { - interval = interval.getSplitChildAtOpId(op.id(), mode, allocator); - } - - statePut(inputState, interval.location(), interval.splitParent()); - } - }; - - // visit all instructions of the block - for (int i = 0; i < ops.size(); i++) { - final LIRInstruction op = ops.get(i); - - if (Debug.isLogEnabled()) { - Debug.log("%s", op.toStringWithIdPrefix()); - } - - // check if input operands are correct - op.visitEachInput(useConsumer); - // invalidate all caller save registers at calls - if (op.destroysCallerSavedRegisters()) { - for (Register r : allocator.frameMapBuilder.getRegisterConfig().getCallerSaveRegisters()) { - statePut(inputState, r.asValue(), null); - } - } - op.visitEachAlive(useConsumer); - // set temp operands (some operations use temp operands also as output operands, so - // can't set them null) - op.visitEachTemp(defConsumer); - // set output operands - op.visitEachOutput(defConsumer); - } - } -} diff -r 32c7a5a88523 -r d599eeab1b53 graal/com.oracle.graal.compiler/src/com/oracle/graal/compiler/gen/NodeLIRBuilder.java --- a/graal/com.oracle.graal.compiler/src/com/oracle/graal/compiler/gen/NodeLIRBuilder.java Fri Feb 06 12:17:20 2015 +0100 +++ b/graal/com.oracle.graal.compiler/src/com/oracle/graal/compiler/gen/NodeLIRBuilder.java Fri Feb 06 12:44:50 2015 +0100 @@ -42,6 +42,7 @@ import com.oracle.graal.graph.*; import com.oracle.graal.lir.*; import com.oracle.graal.lir.StandardOp.JumpOp; +import com.oracle.graal.lir.debug.*; import com.oracle.graal.lir.gen.*; import com.oracle.graal.lir.gen.LIRGenerator.Options; import com.oracle.graal.nodes.*; @@ -81,7 +82,7 @@ @MatchableNode(nodeClass = XorNode.class, inputs = {"x", "y"}, commutative = true) @MatchableNode(nodeClass = PiNode.class, inputs = {"object"}) @MatchableNode(nodeClass = ConstantLocationNode.class, shareable = true) -public abstract class NodeLIRBuilder implements NodeLIRBuilderTool { +public abstract class NodeLIRBuilder implements NodeLIRBuilderTool, LIRGenerationDebugContext { private final NodeMap nodeOperands; private final DebugInfoBuilder debugInfoBuilder; @@ -143,6 +144,11 @@ } @Override + public Object getSourceForOperand(Value value) { + return valueForOperand(value); + } + + @Override public Value setResult(ValueNode x, Value operand) { assert (!isRegister(operand) || !gen.attributes(asRegister(operand)).isAllocatable()); assert nodeOperands != null && (nodeOperands.get(x) == null || nodeOperands.get(x) instanceof ComplexMatchValue) : "operand cannot be set twice"; diff -r 32c7a5a88523 -r d599eeab1b53 graal/com.oracle.graal.lir/src/com/oracle/graal/lir/alloc/lsra/Interval.java --- /dev/null Thu Jan 01 00:00:00 1970 +0000 +++ b/graal/com.oracle.graal.lir/src/com/oracle/graal/lir/alloc/lsra/Interval.java Fri Feb 06 12:44:50 2015 +0100 @@ -0,0 +1,1303 @@ +/* + * Copyright (c) 2009, 2014, Oracle and/or its affiliates. All rights reserved. + * DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER. + * + * This code is free software; you can redistribute it and/or modify it + * under the terms of the GNU General Public License version 2 only, as + * published by the Free Software Foundation. + * + * This code is distributed in the hope that it will be useful, but WITHOUT + * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or + * FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License + * version 2 for more details (a copy is included in the LICENSE file that + * accompanied this code). + * + * You should have received a copy of the GNU General Public License version + * 2 along with this work; if not, write to the Free Software Foundation, + * Inc., 51 Franklin St, Fifth Floor, Boston, MA 02110-1301 USA. + * + * Please contact Oracle, 500 Oracle Parkway, Redwood Shores, CA 94065 USA + * or visit www.oracle.com if you need additional information or have any + * questions. + */ +package com.oracle.graal.lir.alloc.lsra; + +import static com.oracle.graal.api.code.ValueUtil.*; +import static com.oracle.graal.compiler.common.GraalOptions.*; +import static com.oracle.graal.lir.LIRValueUtil.*; + +import java.util.*; + +import com.oracle.graal.api.code.*; +import com.oracle.graal.api.meta.*; +import com.oracle.graal.compiler.common.*; +import com.oracle.graal.compiler.common.util.*; +import com.oracle.graal.debug.*; +import com.oracle.graal.lir.*; + +/** + * Represents an interval in the {@linkplain LinearScan linear scan register allocator}. + */ +public final class Interval { + + /** + * A pair of intervals. + */ + static final class Pair { + + public final Interval first; + public final Interval second; + + public Pair(Interval first, Interval second) { + this.first = first; + this.second = second; + } + } + + /** + * A set of interval lists, one per {@linkplain RegisterBinding binding} type. + */ + static final class RegisterBindingLists { + + /** + * List of intervals whose binding is currently {@link RegisterBinding#Fixed}. + */ + public Interval fixed; + + /** + * List of intervals whose binding is currently {@link RegisterBinding#Any}. + */ + public Interval any; + + /** + * List of intervals whose binding is currently {@link RegisterBinding#Stack}. + */ + public Interval stack; + + public RegisterBindingLists(Interval fixed, Interval any, Interval stack) { + this.fixed = fixed; + this.any = any; + this.stack = stack; + } + + /** + * Gets the list for a specified binding. + * + * @param binding specifies the list to be returned + * @return the list of intervals whose binding is {@code binding} + */ + public Interval get(RegisterBinding binding) { + switch (binding) { + case Any: + return any; + case Fixed: + return fixed; + case Stack: + return stack; + } + throw GraalInternalError.shouldNotReachHere(); + } + + /** + * Sets the list for a specified binding. + * + * @param binding specifies the list to be replaced + * @param list a list of intervals whose binding is {@code binding} + */ + public void set(RegisterBinding binding, Interval list) { + assert list != null; + switch (binding) { + case Any: + any = list; + break; + case Fixed: + fixed = list; + break; + case Stack: + stack = list; + break; + } + } + + /** + * Adds an interval to a list sorted by {@linkplain Interval#currentFrom() current from} + * positions. + * + * @param binding specifies the list to be updated + * @param interval the interval to add + */ + public void addToListSortedByCurrentFromPositions(RegisterBinding binding, Interval interval) { + Interval list = get(binding); + Interval prev = null; + Interval cur = list; + while (cur.currentFrom() < interval.currentFrom()) { + prev = cur; + cur = cur.next; + } + Interval result = list; + if (prev == null) { + // add to head of list + result = interval; + } else { + // add before 'cur' + prev.next = interval; + } + interval.next = cur; + set(binding, result); + } + + /** + * Adds an interval to a list sorted by {@linkplain Interval#from() start} positions and + * {@linkplain Interval#firstUsage(RegisterPriority) first usage} positions. + * + * @param binding specifies the list to be updated + * @param interval the interval to add + */ + public void addToListSortedByStartAndUsePositions(RegisterBinding binding, Interval interval) { + Interval list = get(binding); + Interval prev = null; + Interval cur = list; + while (cur.from() < interval.from() || (cur.from() == interval.from() && cur.firstUsage(RegisterPriority.None) < interval.firstUsage(RegisterPriority.None))) { + prev = cur; + cur = cur.next; + } + if (prev == null) { + list = interval; + } else { + prev.next = interval; + } + interval.next = cur; + set(binding, list); + } + + /** + * Removes an interval from a list. + * + * @param binding specifies the list to be updated + * @param i the interval to remove + */ + public void remove(RegisterBinding binding, Interval i) { + Interval list = get(binding); + Interval prev = null; + Interval cur = list; + while (cur != i) { + assert cur != null && cur != Interval.EndMarker : "interval has not been found in list: " + i; + prev = cur; + cur = cur.next; + } + if (prev == null) { + set(binding, cur.next); + } else { + prev.next = cur.next; + } + } + } + + /** + * Constants denoting the register usage priority for an interval. The constants are declared in + * increasing order of priority are are used to optimize spilling when multiple overlapping + * intervals compete for limited registers. + */ + public enum RegisterPriority { + /** + * No special reason for an interval to be allocated a register. + */ + None, + + /** + * Priority level for intervals live at the end of a loop. + */ + LiveAtLoopEnd, + + /** + * Priority level for intervals that should be allocated to a register. + */ + ShouldHaveRegister, + + /** + * Priority level for intervals that must be allocated to a register. + */ + MustHaveRegister; + + public static final RegisterPriority[] VALUES = values(); + + /** + * Determines if this priority is higher than or equal to a given priority. + */ + public boolean greaterEqual(RegisterPriority other) { + return ordinal() >= other.ordinal(); + } + + /** + * Determines if this priority is lower than a given priority. + */ + public boolean lessThan(RegisterPriority other) { + return ordinal() < other.ordinal(); + } + } + + /** + * Constants denoting whether an interval is bound to a specific register. This models platform + * dependencies on register usage for certain instructions. + */ + enum RegisterBinding { + /** + * Interval is bound to a specific register as required by the platform. + */ + Fixed, + + /** + * Interval has no specific register requirements. + */ + Any, + + /** + * Interval is bound to a stack slot. + */ + Stack; + + public static final RegisterBinding[] VALUES = values(); + } + + /** + * Constants denoting the linear-scan states an interval may be in with respect to the + * {@linkplain Interval#from() start} {@code position} of the interval being processed. + */ + enum State { + /** + * An interval that starts after {@code position}. + */ + Unhandled, + + /** + * An interval that {@linkplain Interval#covers covers} {@code position} and has an assigned + * register. + */ + Active, + + /** + * An interval that starts before and ends after {@code position} but does not + * {@linkplain Interval#covers cover} it due to a lifetime hole. + */ + Inactive, + + /** + * An interval that ends before {@code position} or is spilled to memory. + */ + Handled; + } + + /** + * Constants used in optimization of spilling of an interval. + */ + enum SpillState { + /** + * Starting state of calculation: no definition found yet. + */ + NoDefinitionFound, + + /** + * One definition has already been found. Two consecutive definitions are treated as one + * (e.g. a consecutive move and add because of two-operand LIR form). The position of this + * definition is given by {@link Interval#spillDefinitionPos()}. + */ + NoSpillStore, + + /** + * One spill move has already been inserted. + */ + OneSpillStore, + + /** + * The interval is spilled multiple times or is spilled in a loop. Place the store somewhere + * on the dominator path between the definition and the usages. + */ + SpillInDominator, + + /** + * The interval should be stored immediately after its definition to prevent multiple + * redundant stores. + */ + StoreAtDefinition, + + /** + * The interval starts in memory (e.g. method parameter), so a store is never necessary. + */ + StartInMemory, + + /** + * The interval has more than one definition (e.g. resulting from phi moves), so stores to + * memory are not optimized. + */ + NoOptimization + } + + /** + * List of use positions. Each entry in the list records the use position and register priority + * associated with the use position. The entries in the list are in descending order of use + * position. + * + */ + public static final class UsePosList { + + private IntList list; + + /** + * Creates a use list. + * + * @param initialCapacity the initial capacity of the list in terms of entries + */ + public UsePosList(int initialCapacity) { + list = new IntList(initialCapacity * 2); + } + + private UsePosList(IntList list) { + this.list = list; + } + + /** + * Splits this list around a given position. All entries in this list with a use position + * greater or equal than {@code splitPos} are removed from this list and added to the + * returned list. + * + * @param splitPos the position for the split + * @return a use position list containing all entries removed from this list that have a use + * position greater or equal than {@code splitPos} + */ + public UsePosList splitAt(int splitPos) { + int i = size() - 1; + int len = 0; + while (i >= 0 && usePos(i) < splitPos) { + --i; + len += 2; + } + int listSplitIndex = (i + 1) * 2; + IntList childList = list; + list = IntList.copy(this.list, listSplitIndex, len); + childList.setSize(listSplitIndex); + UsePosList child = new UsePosList(childList); + return child; + } + + /** + * Gets the use position at a specified index in this list. + * + * @param index the index of the entry for which the use position is returned + * @return the use position of entry {@code index} in this list + */ + public int usePos(int index) { + return list.get(index << 1); + } + + /** + * Gets the register priority for the use position at a specified index in this list. + * + * @param index the index of the entry for which the register priority is returned + * @return the register priority of entry {@code index} in this list + */ + public RegisterPriority registerPriority(int index) { + return RegisterPriority.VALUES[list.get((index << 1) + 1)]; + } + + public void add(int usePos, RegisterPriority registerPriority) { + assert list.size() == 0 || usePos(size() - 1) > usePos; + list.add(usePos); + list.add(registerPriority.ordinal()); + } + + public int size() { + return list.size() >> 1; + } + + public void removeLowestUsePos() { + list.setSize(list.size() - 2); + } + + public void setRegisterPriority(int index, RegisterPriority registerPriority) { + list.set(index * 2, registerPriority.ordinal()); + } + + @Override + public String toString() { + StringBuilder buf = new StringBuilder("["); + for (int i = size() - 1; i >= 0; --i) { + if (buf.length() != 1) { + buf.append(", "); + } + RegisterPriority prio = registerPriority(i); + buf.append(usePos(i)).append(" -> ").append(prio.ordinal()).append(':').append(prio); + } + return buf.append("]").toString(); + } + } + + /** + * The {@linkplain RegisterValue register} or {@linkplain Variable variable} for this interval + * prior to register allocation. + */ + public final AllocatableValue operand; + + /** + * The operand number for this interval's {@linkplain #operand operand}. + */ + public final int operandNumber; + + /** + * The {@linkplain RegisterValue register} or {@linkplain StackSlot spill slot} assigned to this + * interval. In case of a spilled interval which is re-materialized this is + * {@link Value#ILLEGAL}. + */ + private AllocatableValue location; + + /** + * The stack slot to which all splits of this interval are spilled if necessary. + */ + private StackSlotValue spillSlot; + + /** + * The kind of this interval. + */ + private LIRKind kind; + + /** + * The head of the list of ranges describing this interval. This list is sorted by + * {@linkplain LIRInstruction#id instruction ids}. + */ + private Range first; + + /** + * List of (use-positions, register-priorities) pairs, sorted by use-positions. + */ + private UsePosList usePosList; + + /** + * Iterator used to traverse the ranges of an interval. + */ + private Range current; + + /** + * Link to next interval in a sorted list of intervals that ends with {@link #EndMarker}. + */ + Interval next; + + /** + * The linear-scan state of this interval. + */ + State state; + + private int cachedTo; // cached value: to of last range (-1: not cached) + + /** + * The interval from which this one is derived. If this is a {@linkplain #isSplitParent() split + * parent}, it points to itself. + */ + private Interval splitParent; + + /** + * List of all intervals that are split off from this interval. This is only used if this is a + * {@linkplain #isSplitParent() split parent}. + */ + private List splitChildren = Collections.emptyList(); + + /** + * Current split child that has been active or inactive last (always stored in split parents). + */ + private Interval currentSplitChild; + + /** + * Specifies if move is inserted between currentSplitChild and this interval when interval gets + * active the first time. + */ + private boolean insertMoveWhenActivated; + + /** + * For spill move optimization. + */ + private SpillState spillState; + + /** + * Position where this interval is defined (if defined only once). + */ + private int spillDefinitionPos; + + /** + * This interval should be assigned the same location as the hint interval. + */ + private Interval locationHint; + + /** + * The value with which a spilled child interval can be re-materialized. Currently this must be + * a Constant. + */ + private JavaConstant materializedValue; + + /** + * The number of times {@link #addMaterializationValue(JavaConstant)} is called. + */ + private int numMaterializationValuesAdded; + + void assignLocation(AllocatableValue newLocation) { + if (isRegister(newLocation)) { + assert this.location == null : "cannot re-assign location for " + this; + if (newLocation.getLIRKind().equals(LIRKind.Illegal) && !kind.equals(LIRKind.Illegal)) { + this.location = asRegister(newLocation).asValue(kind); + return; + } + } else if (isIllegal(newLocation)) { + assert canMaterialize(); + } else { + assert this.location == null || isRegister(this.location) || (isVirtualStackSlot(this.location) && isStackSlot(newLocation)) : "cannot re-assign location for " + this; + assert isStackSlotValue(newLocation); + assert !newLocation.getLIRKind().equals(LIRKind.Illegal); + assert newLocation.getLIRKind().equals(this.kind); + } + this.location = newLocation; + } + + /** + * Gets the {@linkplain RegisterValue register} or {@linkplain StackSlot spill slot} assigned to + * this interval. + */ + public AllocatableValue location() { + return location; + } + + public LIRKind kind() { + assert !isRegister(operand) : "cannot access type for fixed interval"; + return kind; + } + + void setKind(LIRKind kind) { + assert isRegister(operand) || this.kind().equals(LIRKind.Illegal) || this.kind().equals(kind) : "overwriting existing type"; + this.kind = kind; + } + + public Range first() { + return first; + } + + int from() { + return first.from; + } + + int to() { + if (cachedTo == -1) { + cachedTo = calcTo(); + } + assert cachedTo == calcTo() : "invalid cached value"; + return cachedTo; + } + + int numUsePositions() { + return usePosList.size(); + } + + void setLocationHint(Interval interval) { + locationHint = interval; + } + + boolean isSplitParent() { + return splitParent == this; + } + + boolean isSplitChild() { + return splitParent != this; + } + + /** + * Gets the split parent for this interval. + */ + public Interval splitParent() { + assert splitParent.isSplitParent() : "not a split parent: " + this; + return splitParent; + } + + /** + * Gets the canonical spill slot for this interval. + */ + StackSlotValue spillSlot() { + return splitParent().spillSlot; + } + + void setSpillSlot(StackSlotValue slot) { + assert splitParent().spillSlot == null || (isVirtualStackSlot(splitParent().spillSlot) && isStackSlot(slot)) : "connot overwrite existing spill slot"; + splitParent().spillSlot = slot; + } + + Interval currentSplitChild() { + return splitParent().currentSplitChild; + } + + void makeCurrentSplitChild() { + splitParent().currentSplitChild = this; + } + + boolean insertMoveWhenActivated() { + return insertMoveWhenActivated; + } + + void setInsertMoveWhenActivated(boolean b) { + insertMoveWhenActivated = b; + } + + // for spill optimization + public SpillState spillState() { + return splitParent().spillState; + } + + int spillDefinitionPos() { + return splitParent().spillDefinitionPos; + } + + void setSpillState(SpillState state) { + assert state.ordinal() >= spillState().ordinal() : "state cannot decrease"; + splitParent().spillState = state; + } + + void setSpillDefinitionPos(int pos) { + assert spillState() == SpillState.SpillInDominator || spillDefinitionPos() == -1 : "cannot set the position twice"; + splitParent().spillDefinitionPos = pos; + } + + // returns true if this interval has a shadow copy on the stack that is always correct + boolean alwaysInMemory() { + return (splitParent().spillState == SpillState.SpillInDominator || splitParent().spillState == SpillState.StoreAtDefinition || splitParent().spillState == SpillState.StartInMemory) && + !canMaterialize(); + } + + void removeFirstUsePos() { + usePosList.removeLowestUsePos(); + } + + // test intersection + boolean intersects(Interval i) { + return first.intersects(i.first); + } + + int intersectsAt(Interval i) { + return first.intersectsAt(i.first); + } + + // range iteration + void rewindRange() { + current = first; + } + + void nextRange() { + assert this != EndMarker : "not allowed on sentinel"; + current = current.next; + } + + int currentFrom() { + return current.from; + } + + int currentTo() { + return current.to; + } + + boolean currentAtEnd() { + return current == Range.EndMarker; + } + + boolean currentIntersects(Interval it) { + return current.intersects(it.current); + } + + int currentIntersectsAt(Interval it) { + return current.intersectsAt(it.current); + } + + /** + * Sentinel interval to denote the end of an interval list. + */ + static final Interval EndMarker = new Interval(Value.ILLEGAL, -1); + + Interval(AllocatableValue operand, int operandNumber) { + assert operand != null; + this.operand = operand; + this.operandNumber = operandNumber; + if (isRegister(operand)) { + location = operand; + } else { + assert isIllegal(operand) || isVariable(operand); + } + this.kind = LIRKind.Illegal; + this.first = Range.EndMarker; + this.usePosList = new UsePosList(4); + this.current = Range.EndMarker; + this.next = EndMarker; + this.cachedTo = -1; + this.spillState = SpillState.NoDefinitionFound; + this.spillDefinitionPos = -1; + splitParent = this; + currentSplitChild = this; + } + + /** + * Sets the value which is used for re-materialization. + */ + void addMaterializationValue(JavaConstant value) { + if (numMaterializationValuesAdded == 0) { + materializedValue = value; + } else { + // Interval is defined on multiple places -> no materialization is possible. + materializedValue = null; + } + numMaterializationValuesAdded++; + } + + /** + * Returns true if this interval can be re-materialized when spilled. This means that no + * spill-moves are needed. Instead of restore-moves the {@link #materializedValue} is restored. + */ + public boolean canMaterialize() { + return getMaterializedValue() != null; + } + + /** + * Returns a value which can be moved to a register instead of a restore-move from stack. + */ + public JavaConstant getMaterializedValue() { + return splitParent().materializedValue; + } + + int calcTo() { + assert first != Range.EndMarker : "interval has no range"; + + Range r = first; + while (r.next != Range.EndMarker) { + r = r.next; + } + return r.to; + } + + // consistency check of split-children + boolean checkSplitChildren() { + if (!splitChildren.isEmpty()) { + assert isSplitParent() : "only split parents can have children"; + + for (int i = 0; i < splitChildren.size(); i++) { + Interval i1 = splitChildren.get(i); + + assert i1.splitParent() == this : "not a split child of this interval"; + assert i1.kind().equals(kind()) : "must be equal for all split children"; + assert (i1.spillSlot() == null && spillSlot == null) || i1.spillSlot().equals(spillSlot()) : "must be equal for all split children"; + + for (int j = i + 1; j < splitChildren.size(); j++) { + Interval i2 = splitChildren.get(j); + + assert !i1.operand.equals(i2.operand) : "same register number"; + + if (i1.from() < i2.from()) { + assert i1.to() <= i2.from() && i1.to() < i2.to() : "intervals overlapping"; + } else { + assert i2.from() < i1.from() : "intervals start at same opId"; + assert i2.to() <= i1.from() && i2.to() < i1.to() : "intervals overlapping"; + } + } + } + } + + return true; + } + + public Interval locationHint(boolean searchSplitChild) { + if (!searchSplitChild) { + return locationHint; + } + + if (locationHint != null) { + assert locationHint.isSplitParent() : "ony split parents are valid hint registers"; + + if (locationHint.location != null && isRegister(locationHint.location)) { + return locationHint; + } else if (!locationHint.splitChildren.isEmpty()) { + // search the first split child that has a register assigned + int len = locationHint.splitChildren.size(); + for (int i = 0; i < len; i++) { + Interval interval = locationHint.splitChildren.get(i); + if (interval.location != null && isRegister(interval.location)) { + return interval; + } + } + } + } + + // no hint interval found that has a register assigned + return null; + } + + Interval getSplitChildAtOpId(int opId, LIRInstruction.OperandMode mode, LinearScan allocator) { + assert isSplitParent() : "can only be called for split parents"; + assert opId >= 0 : "invalid opId (method cannot be called for spill moves)"; + + if (splitChildren.isEmpty()) { + assert this.covers(opId, mode) : this + " does not cover " + opId; + return this; + } else { + Interval result = null; + int len = splitChildren.size(); + + // in outputMode, the end of the interval (opId == cur.to()) is not valid + int toOffset = (mode == LIRInstruction.OperandMode.DEF ? 0 : 1); + + int i; + for (i = 0; i < len; i++) { + Interval cur = splitChildren.get(i); + if (cur.from() <= opId && opId < cur.to() + toOffset) { + if (i > 0) { + // exchange current split child to start of list (faster access for next + // call) + Util.atPutGrow(splitChildren, i, splitChildren.get(0), null); + Util.atPutGrow(splitChildren, 0, cur, null); + } + + // interval found + result = cur; + break; + } + } + + assert checkSplitChild(result, opId, allocator, toOffset, mode); + return result; + } + } + + private boolean checkSplitChild(Interval result, int opId, LinearScan allocator, int toOffset, LIRInstruction.OperandMode mode) { + if (result == null) { + // this is an error + StringBuilder msg = new StringBuilder(this.toString()).append(" has no child at ").append(opId); + if (!splitChildren.isEmpty()) { + Interval firstChild = splitChildren.get(0); + Interval lastChild = splitChildren.get(splitChildren.size() - 1); + msg.append(" (first = ").append(firstChild).append(", last = ").append(lastChild).append(")"); + } + throw new GraalInternalError("Linear Scan Error: %s", msg); + } + + if (!splitChildren.isEmpty()) { + for (Interval interval : splitChildren) { + if (interval != result && interval.from() <= opId && opId < interval.to() + toOffset) { + TTY.println(String.format("two valid result intervals found for opId %d: %d and %d", opId, result.operandNumber, interval.operandNumber)); + TTY.println(result.logString(allocator)); + TTY.println(interval.logString(allocator)); + throw new BailoutException("two valid result intervals found"); + } + } + } + assert result.covers(opId, mode) : "opId not covered by interval"; + return true; + } + + // returns the interval that covers the given opId or null if there is none + Interval getIntervalCoveringOpId(int opId) { + assert opId >= 0 : "invalid opId"; + assert opId < to() : "can only look into the past"; + + if (opId >= from()) { + return this; + } + + Interval parent = splitParent(); + Interval result = null; + + assert !parent.splitChildren.isEmpty() : "no split children available"; + int len = parent.splitChildren.size(); + + for (int i = len - 1; i >= 0; i--) { + Interval cur = parent.splitChildren.get(i); + if (cur.from() <= opId && opId < cur.to()) { + assert result == null : "covered by multiple split children " + result + " and " + cur; + result = cur; + } + } + + return result; + } + + // returns the last split child that ends before the given opId + Interval getSplitChildBeforeOpId(int opId) { + assert opId >= 0 : "invalid opId"; + + Interval parent = splitParent(); + Interval result = null; + + assert !parent.splitChildren.isEmpty() : "no split children available"; + int len = parent.splitChildren.size(); + + for (int i = len - 1; i >= 0; i--) { + Interval cur = parent.splitChildren.get(i); + if (cur.to() <= opId && (result == null || result.to() < cur.to())) { + result = cur; + } + } + + assert result != null : "no split child found"; + return result; + } + + // checks if opId is covered by any split child + boolean splitChildCovers(int opId, LIRInstruction.OperandMode mode) { + assert isSplitParent() : "can only be called for split parents"; + assert opId >= 0 : "invalid opId (method can not be called for spill moves)"; + + if (splitChildren.isEmpty()) { + // simple case if interval was not split + return covers(opId, mode); + + } else { + // extended case: check all split children + int len = splitChildren.size(); + for (int i = 0; i < len; i++) { + Interval cur = splitChildren.get(i); + if (cur.covers(opId, mode)) { + return true; + } + } + return false; + } + } + + private RegisterPriority adaptPriority(RegisterPriority priority) { + /* + * In case of re-materialized values we require that use-operands are registers, because we + * don't have the value in a stack location. (Note that ShouldHaveRegister means that the + * operand can also be a StackSlot). + */ + if (priority == RegisterPriority.ShouldHaveRegister && canMaterialize()) { + return RegisterPriority.MustHaveRegister; + } + return priority; + } + + // Note: use positions are sorted descending . first use has highest index + int firstUsage(RegisterPriority minRegisterPriority) { + assert isVariable(operand) : "cannot access use positions for fixed intervals"; + + for (int i = usePosList.size() - 1; i >= 0; --i) { + RegisterPriority registerPriority = adaptPriority(usePosList.registerPriority(i)); + if (registerPriority.greaterEqual(minRegisterPriority)) { + return usePosList.usePos(i); + } + } + return Integer.MAX_VALUE; + } + + int nextUsage(RegisterPriority minRegisterPriority, int from) { + assert isVariable(operand) : "cannot access use positions for fixed intervals"; + + for (int i = usePosList.size() - 1; i >= 0; --i) { + int usePos = usePosList.usePos(i); + if (usePos >= from && adaptPriority(usePosList.registerPriority(i)).greaterEqual(minRegisterPriority)) { + return usePos; + } + } + return Integer.MAX_VALUE; + } + + int nextUsageExact(RegisterPriority exactRegisterPriority, int from) { + assert isVariable(operand) : "cannot access use positions for fixed intervals"; + + for (int i = usePosList.size() - 1; i >= 0; --i) { + int usePos = usePosList.usePos(i); + if (usePos >= from && adaptPriority(usePosList.registerPriority(i)) == exactRegisterPriority) { + return usePos; + } + } + return Integer.MAX_VALUE; + } + + int previousUsage(RegisterPriority minRegisterPriority, int from) { + assert isVariable(operand) : "cannot access use positions for fixed intervals"; + + int prev = 0; + for (int i = usePosList.size() - 1; i >= 0; --i) { + int usePos = usePosList.usePos(i); + if (usePos > from) { + return prev; + } + if (adaptPriority(usePosList.registerPriority(i)).greaterEqual(minRegisterPriority)) { + prev = usePos; + } + } + return prev; + } + + void addUsePos(int pos, RegisterPriority registerPriority) { + assert covers(pos, LIRInstruction.OperandMode.USE) : "use position not covered by live range"; + + // do not add use positions for precolored intervals because they are never used + if (registerPriority != RegisterPriority.None && isVariable(operand)) { + if (DetailedAsserts.getValue()) { + for (int i = 0; i < usePosList.size(); i++) { + assert pos <= usePosList.usePos(i) : "already added a use-position with lower position"; + if (i > 0) { + assert usePosList.usePos(i) < usePosList.usePos(i - 1) : "not sorted descending"; + } + } + } + + // Note: addUse is called in descending order, so list gets sorted + // automatically by just appending new use positions + int len = usePosList.size(); + if (len == 0 || usePosList.usePos(len - 1) > pos) { + usePosList.add(pos, registerPriority); + } else if (usePosList.registerPriority(len - 1).lessThan(registerPriority)) { + assert usePosList.usePos(len - 1) == pos : "list not sorted correctly"; + usePosList.setRegisterPriority(len - 1, registerPriority); + } + } + } + + void addRange(int from, int to) { + assert from < to : "invalid range"; + assert first() == Range.EndMarker || to < first().next.from : "not inserting at begin of interval"; + assert from <= first().to : "not inserting at begin of interval"; + + if (first.from <= to) { + assert first != Range.EndMarker; + // join intersecting ranges + first.from = Math.min(from, first().from); + first.to = Math.max(to, first().to); + } else { + // insert new range + first = new Range(from, to, first()); + } + } + + Interval newSplitChild(LinearScan allocator) { + // allocate new interval + Interval parent = splitParent(); + Interval result = allocator.createDerivedInterval(parent); + result.setKind(kind()); + + result.splitParent = parent; + result.setLocationHint(parent); + + // insert new interval in children-list of parent + if (parent.splitChildren.isEmpty()) { + assert isSplitParent() : "list must be initialized at first split"; + + // Create new non-shared list + parent.splitChildren = new ArrayList<>(4); + parent.splitChildren.add(this); + } + parent.splitChildren.add(result); + + return result; + } + + /** + * Splits this interval at a specified position and returns the remainder as a new child + * interval of this interval's {@linkplain #splitParent() parent} interval. + *

+ * When an interval is split, a bi-directional link is established between the original + * parent interval and the children intervals that are split off this interval. + * When a split child is split again, the new created interval is a direct child of the original + * parent. That is, there is no tree of split children stored, just a flat list. All split + * children are spilled to the same {@linkplain #spillSlot spill slot}. + * + * @param splitPos the position at which to split this interval + * @param allocator the register allocator context + * @return the child interval split off from this interval + */ + Interval split(int splitPos, LinearScan allocator) { + assert isVariable(operand) : "cannot split fixed intervals"; + + // allocate new interval + Interval result = newSplitChild(allocator); + + // split the ranges + Range prev = null; + Range cur = first; + while (cur != Range.EndMarker && cur.to <= splitPos) { + prev = cur; + cur = cur.next; + } + assert cur != Range.EndMarker : "split interval after end of last range"; + + if (cur.from < splitPos) { + result.first = new Range(splitPos, cur.to, cur.next); + cur.to = splitPos; + cur.next = Range.EndMarker; + + } else { + assert prev != null : "split before start of first range"; + result.first = cur; + prev.next = Range.EndMarker; + } + result.current = result.first; + cachedTo = -1; // clear cached value + + // split list of use positions + result.usePosList = usePosList.splitAt(splitPos); + + if (DetailedAsserts.getValue()) { + for (int i = 0; i < usePosList.size(); i++) { + assert usePosList.usePos(i) < splitPos; + } + for (int i = 0; i < result.usePosList.size(); i++) { + assert result.usePosList.usePos(i) >= splitPos; + } + } + return result; + } + + /** + * Splits this interval at a specified position and returns the head as a new interval (this + * interval is the tail). + * + * Currently, only the first range can be split, and the new interval must not have split + * positions + */ + Interval splitFromStart(int splitPos, LinearScan allocator) { + assert isVariable(operand) : "cannot split fixed intervals"; + assert splitPos > from() && splitPos < to() : "can only split inside interval"; + assert splitPos > first.from && splitPos <= first.to : "can only split inside first range"; + assert firstUsage(RegisterPriority.None) > splitPos : "can not split when use positions are present"; + + // allocate new interval + Interval result = newSplitChild(allocator); + + // the new interval has only one range (checked by assertion above, + // so the splitting of the ranges is very simple + result.addRange(first.from, splitPos); + + if (splitPos == first.to) { + assert first.next != Range.EndMarker : "must not be at end"; + first = first.next; + } else { + first.from = splitPos; + } + + return result; + } + + // returns true if the opId is inside the interval + boolean covers(int opId, LIRInstruction.OperandMode mode) { + Range cur = first; + + while (cur != Range.EndMarker && cur.to < opId) { + cur = cur.next; + } + if (cur != Range.EndMarker) { + assert cur.to != cur.next.from : "ranges not separated"; + + if (mode == LIRInstruction.OperandMode.DEF) { + return cur.from <= opId && opId < cur.to; + } else { + return cur.from <= opId && opId <= cur.to; + } + } + return false; + } + + // returns true if the interval has any hole between holeFrom and holeTo + // (even if the hole has only the length 1) + boolean hasHoleBetween(int holeFrom, int holeTo) { + assert holeFrom < holeTo : "check"; + assert from() <= holeFrom && holeTo <= to() : "index out of interval"; + + Range cur = first; + while (cur != Range.EndMarker) { + assert cur.to < cur.next.from : "no space between ranges"; + + // hole-range starts before this range . hole + if (holeFrom < cur.from) { + return true; + + // hole-range completely inside this range . no hole + } else { + if (holeTo <= cur.to) { + return false; + + // overlapping of hole-range with this range . hole + } else { + if (holeFrom <= cur.to) { + return true; + } + } + } + + cur = cur.next; + } + + return false; + } + + @Override + public String toString() { + String from = "?"; + String to = "?"; + if (first != null && first != Range.EndMarker) { + from = String.valueOf(from()); + // to() may cache a computed value, modifying the current object, which is a bad idea + // for a printing function. Compute it directly instead. + to = String.valueOf(calcTo()); + } + String locationString = this.location == null ? "" : "@" + this.location; + return operandNumber + ":" + operand + (isRegister(operand) ? "" : locationString) + "[" + from + "," + to + "]"; + } + + /** + * Gets the use position information for this interval. + */ + public UsePosList usePosList() { + return usePosList; + } + + /** + * Gets a single line string for logging the details of this interval to a log stream. + * + * @param allocator the register allocator context + */ + public String logString(LinearScan allocator) { + StringBuilder buf = new StringBuilder(100); + buf.append(operandNumber).append(':').append(operand).append(' '); + if (!isRegister(operand)) { + if (location != null) { + buf.append("location{").append(location).append("} "); + } + } + + buf.append("hints{").append(splitParent.operandNumber); + Interval hint = locationHint(false); + if (hint != null && hint.operandNumber != splitParent.operandNumber) { + buf.append(", ").append(hint.operandNumber); + } + buf.append("} ranges{"); + + // print ranges + Range cur = first; + while (cur != Range.EndMarker) { + if (cur != first) { + buf.append(", "); + } + buf.append(cur); + cur = cur.next; + assert cur != null : "range list not closed with range sentinel"; + } + buf.append("} uses{"); + + // print use positions + int prev = 0; + for (int i = usePosList.size() - 1; i >= 0; --i) { + assert prev < usePosList.usePos(i) : "use positions not sorted"; + if (i != usePosList.size() - 1) { + buf.append(", "); + } + buf.append(usePosList.usePos(i)).append(':').append(usePosList.registerPriority(i)); + prev = usePosList.usePos(i); + } + buf.append("} spill-state{").append(spillState()).append("}"); + if (canMaterialize()) { + buf.append(" (remat:").append(getMaterializedValue().toString()).append(")"); + } + return buf.toString(); + } + + List getSplitChildren() { + return Collections.unmodifiableList(splitChildren); + } +} diff -r 32c7a5a88523 -r d599eeab1b53 graal/com.oracle.graal.lir/src/com/oracle/graal/lir/alloc/lsra/IntervalWalker.java --- /dev/null Thu Jan 01 00:00:00 1970 +0000 +++ b/graal/com.oracle.graal.lir/src/com/oracle/graal/lir/alloc/lsra/IntervalWalker.java Fri Feb 06 12:44:50 2015 +0100 @@ -0,0 +1,282 @@ +/* + * Copyright (c) 2009, 2011, Oracle and/or its affiliates. All rights reserved. + * DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER. + * + * This code is free software; you can redistribute it and/or modify it + * under the terms of the GNU General Public License version 2 only, as + * published by the Free Software Foundation. + * + * This code is distributed in the hope that it will be useful, but WITHOUT + * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or + * FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License + * version 2 for more details (a copy is included in the LICENSE file that + * accompanied this code). + * + * You should have received a copy of the GNU General Public License version + * 2 along with this work; if not, write to the Free Software Foundation, + * Inc., 51 Franklin St, Fifth Floor, Boston, MA 02110-1301 USA. + * + * Please contact Oracle, 500 Oracle Parkway, Redwood Shores, CA 94065 USA + * or visit www.oracle.com if you need additional information or have any + * questions. + */ +package com.oracle.graal.lir.alloc.lsra; + +import com.oracle.graal.debug.*; +import com.oracle.graal.lir.alloc.lsra.Interval.*; + +/** + */ +public class IntervalWalker { + + protected final LinearScan allocator; + + /** + * Sorted list of intervals, not live before the current position. + */ + protected RegisterBindingLists unhandledLists; + + /** + * Sorted list of intervals, live at the current position. + */ + protected RegisterBindingLists activeLists; + + /** + * Sorted list of intervals in a life time hole at the current position. + */ + protected RegisterBindingLists inactiveLists; + + /** + * The current position (intercept point through the intervals). + */ + protected int currentPosition; + + /** + * The binding of the current interval being processed. + */ + protected RegisterBinding currentBinding; + + /** + * Processes the {@code currentInterval} interval in an attempt to allocate a physical register + * to it and thus allow it to be moved to a list of {@linkplain #activeLists active} intervals. + * + * @return {@code true} if a register was allocated to the {@code currentInterval} interval + */ + protected boolean activateCurrent(@SuppressWarnings({"unused"}) Interval currentInterval) { + return true; + } + + void walkBefore(int lirOpId) { + walkTo(lirOpId - 1); + } + + void walk() { + walkTo(Integer.MAX_VALUE); + } + + /** + * Creates a new interval walker. + * + * @param allocator the register allocator context + * @param unhandledFixed the list of unhandled {@linkplain RegisterBinding#Fixed fixed} + * intervals + * @param unhandledAny the list of unhandled {@linkplain RegisterBinding#Any non-fixed} + * intervals + */ + IntervalWalker(LinearScan allocator, Interval unhandledFixed, Interval unhandledAny) { + this.allocator = allocator; + + unhandledLists = new RegisterBindingLists(unhandledFixed, unhandledAny, Interval.EndMarker); + activeLists = new RegisterBindingLists(Interval.EndMarker, Interval.EndMarker, Interval.EndMarker); + inactiveLists = new RegisterBindingLists(Interval.EndMarker, Interval.EndMarker, Interval.EndMarker); + currentPosition = -1; + } + + protected void removeFromList(Interval interval) { + if (interval.state == State.Active) { + activeLists.remove(RegisterBinding.Any, interval); + } else { + assert interval.state == State.Inactive : "invalid state"; + inactiveLists.remove(RegisterBinding.Any, interval); + } + } + + private void walkTo(State state, int from) { + assert state == State.Active || state == State.Inactive : "wrong state"; + for (RegisterBinding binding : RegisterBinding.VALUES) { + Interval prevprev = null; + Interval prev = (state == State.Active) ? activeLists.get(binding) : inactiveLists.get(binding); + Interval next = prev; + while (next.currentFrom() <= from) { + Interval cur = next; + next = cur.next; + + boolean rangeHasChanged = false; + while (cur.currentTo() <= from) { + cur.nextRange(); + rangeHasChanged = true; + } + + // also handle move from inactive list to active list + rangeHasChanged = rangeHasChanged || (state == State.Inactive && cur.currentFrom() <= from); + + if (rangeHasChanged) { + // remove cur from list + if (prevprev == null) { + if (state == State.Active) { + activeLists.set(binding, next); + } else { + inactiveLists.set(binding, next); + } + } else { + prevprev.next = next; + } + prev = next; + if (cur.currentAtEnd()) { + // move to handled state (not maintained as a list) + cur.state = State.Handled; + intervalMoved(cur, state, State.Handled); + } else if (cur.currentFrom() <= from) { + // sort into active list + activeLists.addToListSortedByCurrentFromPositions(binding, cur); + cur.state = State.Active; + if (prev == cur) { + assert state == State.Active : "check"; + prevprev = prev; + prev = cur.next; + } + intervalMoved(cur, state, State.Active); + } else { + // sort into inactive list + inactiveLists.addToListSortedByCurrentFromPositions(binding, cur); + cur.state = State.Inactive; + if (prev == cur) { + assert state == State.Inactive : "check"; + prevprev = prev; + prev = cur.next; + } + intervalMoved(cur, state, State.Inactive); + } + } else { + prevprev = prev; + prev = cur.next; + } + } + } + } + + /** + * Get the next interval from {@linkplain #unhandledLists} which starts before or at + * {@code toOpId}. The returned interval is removed and {@link #currentBinding} is set. + * + * @postcondition all intervals in {@linkplain #unhandledLists} start after {@code toOpId}. + * + * @return The next interval or null if there is no {@linkplain #unhandledLists unhandled} + * interval at position {@code toOpId}. + */ + private Interval nextInterval(int toOpId) { + RegisterBinding binding; + Interval any = unhandledLists.any; + Interval fixed = unhandledLists.fixed; + + if (any != Interval.EndMarker) { + // intervals may start at same position . prefer fixed interval + binding = fixed != Interval.EndMarker && fixed.from() <= any.from() ? RegisterBinding.Fixed : RegisterBinding.Any; + + assert binding == RegisterBinding.Fixed && fixed.from() <= any.from() || binding == RegisterBinding.Any && any.from() <= fixed.from() : "wrong interval!!!"; + assert any == Interval.EndMarker || fixed == Interval.EndMarker || any.from() != fixed.from() || binding == RegisterBinding.Fixed : "if fixed and any-Interval start at same position, fixed must be processed first"; + + } else if (fixed != Interval.EndMarker) { + binding = RegisterBinding.Fixed; + } else { + return null; + } + Interval currentInterval = unhandledLists.get(binding); + + if (toOpId < currentInterval.from()) { + return null; + } + + currentBinding = binding; + unhandledLists.set(binding, currentInterval.next); + currentInterval.next = Interval.EndMarker; + currentInterval.rewindRange(); + return currentInterval; + } + + /** + * Walk up to {@code toOpId}. + * + * @postcondition {@link #currentPosition} is set to {@code toOpId}, {@link #activeLists} and + * {@link #inactiveLists} are populated and {@link Interval#state}s are up to + * date. + */ + protected void walkTo(int toOpId) { + assert currentPosition <= toOpId : "can not walk backwards"; + for (Interval currentInterval = nextInterval(toOpId); currentInterval != null; currentInterval = nextInterval(toOpId)) { + int opId = currentInterval.from(); + + // set currentPosition prior to call of walkTo + currentPosition = opId; + + // update unhandled stack intervals + updateUnhandledStackIntervals(opId); + + // call walkTo even if currentPosition == id + walkTo(State.Active, opId); + walkTo(State.Inactive, opId); + + try (Indent indent = Debug.logAndIndent("walk to op %d", opId)) { + currentInterval.state = State.Active; + if (activateCurrent(currentInterval)) { + activeLists.addToListSortedByCurrentFromPositions(currentBinding, currentInterval); + intervalMoved(currentInterval, State.Unhandled, State.Active); + } + } + } + // set currentPosition prior to call of walkTo + currentPosition = toOpId; + + if (currentPosition <= allocator.maxOpId()) { + // update unhandled stack intervals + updateUnhandledStackIntervals(toOpId); + + // call walkTo if still in range + walkTo(State.Active, toOpId); + walkTo(State.Inactive, toOpId); + } + } + + private void intervalMoved(Interval interval, State from, State to) { + // intervalMoved() is called whenever an interval moves from one interval list to another. + // In the implementation of this method it is prohibited to move the interval to any list. + if (Debug.isLogEnabled()) { + Debug.log("interval moved from %s to %s: %s", from, to, interval.logString(allocator)); + } + } + + /** + * Move {@linkplain #unhandledLists unhandled} stack intervals to + * {@linkplain IntervalWalker #activeLists active}. + * + * Note that for {@linkplain RegisterBinding#Fixed fixed} and {@linkplain RegisterBinding#Any + * any} intervals this is done in {@link #nextInterval(int)}. + */ + private void updateUnhandledStackIntervals(int opId) { + Interval currentInterval = unhandledLists.get(RegisterBinding.Stack); + while (currentInterval != Interval.EndMarker && currentInterval.from() <= opId) { + Interval next = currentInterval.next; + if (currentInterval.to() > opId) { + currentInterval.state = State.Active; + activeLists.addToListSortedByCurrentFromPositions(RegisterBinding.Stack, currentInterval); + intervalMoved(currentInterval, State.Unhandled, State.Active); + } else { + currentInterval.state = State.Handled; + intervalMoved(currentInterval, State.Unhandled, State.Handled); + } + currentInterval = next; + } + unhandledLists.set(RegisterBinding.Stack, currentInterval); + } + +} diff -r 32c7a5a88523 -r d599eeab1b53 graal/com.oracle.graal.lir/src/com/oracle/graal/lir/alloc/lsra/LinearScan.java --- /dev/null Thu Jan 01 00:00:00 1970 +0000 +++ b/graal/com.oracle.graal.lir/src/com/oracle/graal/lir/alloc/lsra/LinearScan.java Fri Feb 06 12:44:50 2015 +0100 @@ -0,0 +1,2179 @@ +/* + * Copyright (c) 2009, 2014, Oracle and/or its affiliates. All rights reserved. + * DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER. + * + * This code is free software; you can redistribute it and/or modify it + * under the terms of the GNU General Public License version 2 only, as + * published by the Free Software Foundation. + * + * This code is distributed in the hope that it will be useful, but WITHOUT + * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or + * FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License + * version 2 for more details (a copy is included in the LICENSE file that + * accompanied this code). + * + * You should have received a copy of the GNU General Public License version + * 2 along with this work; if not, write to the Free Software Foundation, + * Inc., 51 Franklin St, Fifth Floor, Boston, MA 02110-1301 USA. + * + * Please contact Oracle, 500 Oracle Parkway, Redwood Shores, CA 94065 USA + * or visit www.oracle.com if you need additional information or have any + * questions. + */ +package com.oracle.graal.lir.alloc.lsra; + +import static com.oracle.graal.api.code.CodeUtil.*; +import static com.oracle.graal.api.code.ValueUtil.*; +import static com.oracle.graal.compiler.common.GraalOptions.*; +import static com.oracle.graal.compiler.common.cfg.AbstractControlFlowGraph.*; +import static com.oracle.graal.lir.LIRValueUtil.*; +import static com.oracle.graal.lir.debug.LIRGenerationDebugContext.*; + +import java.util.*; + +import com.oracle.graal.api.code.*; +import com.oracle.graal.api.meta.*; +import com.oracle.graal.compiler.common.*; +import com.oracle.graal.compiler.common.alloc.*; +import com.oracle.graal.compiler.common.cfg.*; +import com.oracle.graal.compiler.common.util.*; +import com.oracle.graal.debug.*; +import com.oracle.graal.debug.Debug.Scope; +import com.oracle.graal.lir.*; +import com.oracle.graal.lir.LIRInstruction.OperandFlag; +import com.oracle.graal.lir.LIRInstruction.OperandMode; +import com.oracle.graal.lir.StandardOp.MoveOp; +import com.oracle.graal.lir.alloc.lsra.Interval.RegisterBinding; +import com.oracle.graal.lir.alloc.lsra.Interval.RegisterPriority; +import com.oracle.graal.lir.alloc.lsra.Interval.SpillState; +import com.oracle.graal.lir.framemap.*; +import com.oracle.graal.lir.gen.*; +import com.oracle.graal.options.*; + +/** + * An implementation of the linear scan register allocator algorithm described in "Optimized Interval Splitting in a Linear Scan Register Allocator" by Christian Wimmer and + * Hanspeter Moessenboeck. + */ +public final class LinearScan { + + final TargetDescription target; + final LIRGenerationResult res; + final LIR ir; + final FrameMapBuilder frameMapBuilder; + final RegisterAttributes[] registerAttributes; + final Register[] registers; + + final boolean callKillsRegisters; + + public static final int DOMINATOR_SPILL_MOVE_ID = -2; + private static final int SPLIT_INTERVALS_CAPACITY_RIGHT_SHIFT = 1; + + public static class Options { + // @formatter:off + @Option(help = "Enable spill position optimization", type = OptionType.Debug) + public static final OptionValue LSRAOptimizeSpillPosition = new OptionValue<>(true); + // @formatter:on + } + + public static class BlockData { + + /** + * Bit map specifying which operands are live upon entry to this block. These are values + * used in this block or any of its successors where such value are not defined in this + * block. The bit index of an operand is its {@linkplain LinearScan#operandNumber(Value) + * operand number}. + */ + public BitSet liveIn; + + /** + * Bit map specifying which operands are live upon exit from this block. These are values + * used in a successor block that are either defined in this block or were live upon entry + * to this block. The bit index of an operand is its + * {@linkplain LinearScan#operandNumber(Value) operand number}. + */ + public BitSet liveOut; + + /** + * Bit map specifying which operands are used (before being defined) in this block. That is, + * these are the values that are live upon entry to the block. The bit index of an operand + * is its {@linkplain LinearScan#operandNumber(Value) operand number}. + */ + public BitSet liveGen; + + /** + * Bit map specifying which operands are defined/overwritten in this block. The bit index of + * an operand is its {@linkplain LinearScan#operandNumber(Value) operand number}. + */ + public BitSet liveKill; + } + + public final BlockMap blockData; + + /** + * List of blocks in linear-scan order. This is only correct as long as the CFG does not change. + */ + final List> sortedBlocks; + + /** + * Map from {@linkplain #operandNumber(Value) operand numbers} to intervals. + */ + Interval[] intervals; + + /** + * The number of valid entries in {@link #intervals}. + */ + int intervalsSize; + + /** + * The index of the first entry in {@link #intervals} for a + * {@linkplain #createDerivedInterval(Interval) derived interval}. + */ + int firstDerivedIntervalIndex = -1; + + /** + * Intervals sorted by {@link Interval#from()}. + */ + Interval[] sortedIntervals; + + /** + * Map from an instruction {@linkplain LIRInstruction#id id} to the instruction. Entries should + * be retrieved with {@link #instructionForId(int)} as the id is not simply an index into this + * array. + */ + LIRInstruction[] opIdToInstructionMap; + + /** + * Map from an instruction {@linkplain LIRInstruction#id id} to the {@linkplain AbstractBlock + * block} containing the instruction. Entries should be retrieved with {@link #blockForId(int)} + * as the id is not simply an index into this array. + */ + AbstractBlock[] opIdToBlockMap; + + /** + * Bit set for each variable that is contained in each loop. + */ + BitMap2D intervalInLoop; + + /** + * The {@linkplain #operandNumber(Value) number} of the first variable operand allocated. + */ + private final int firstVariableNumber; + + public LinearScan(TargetDescription target, LIRGenerationResult res) { + this.target = target; + this.res = res; + this.ir = res.getLIR(); + this.frameMapBuilder = res.getFrameMapBuilder(); + this.sortedBlocks = ir.linearScanOrder(); + this.registerAttributes = frameMapBuilder.getRegisterConfig().getAttributesMap(); + + this.registers = target.arch.getRegisters(); + this.firstVariableNumber = registers.length; + this.blockData = new BlockMap<>(ir.getControlFlowGraph()); + + // If all allocatable registers are caller saved, then no registers are live across a call + // site. The register allocator can save time not trying to find a register at a call site. + this.callKillsRegisters = this.frameMapBuilder.getRegisterConfig().areAllAllocatableRegistersCallerSaved(); + } + + public int getFirstLirInstructionId(AbstractBlock block) { + int result = ir.getLIRforBlock(block).get(0).id(); + assert result >= 0; + return result; + } + + public int getLastLirInstructionId(AbstractBlock block) { + List instructions = ir.getLIRforBlock(block); + int result = instructions.get(instructions.size() - 1).id(); + assert result >= 0; + return result; + } + + public static boolean isVariableOrRegister(Value value) { + return isVariable(value) || isRegister(value); + } + + /** + * Converts an operand (variable or register) to an index in a flat address space covering all + * the {@linkplain Variable variables} and {@linkplain RegisterValue registers} being processed + * by this allocator. + */ + private int operandNumber(Value operand) { + if (isRegister(operand)) { + int number = asRegister(operand).number; + assert number < firstVariableNumber; + return number; + } + assert isVariable(operand) : operand; + return firstVariableNumber + ((Variable) operand).index; + } + + /** + * Gets the number of operands. This value will increase by 1 for new variable. + */ + private int operandSize() { + return firstVariableNumber + ir.numVariables(); + } + + /** + * Gets the highest operand number for a register operand. This value will never change. + */ + public int maxRegisterNumber() { + return firstVariableNumber - 1; + } + + static final IntervalPredicate IS_PRECOLORED_INTERVAL = new IntervalPredicate() { + + @Override + public boolean apply(Interval i) { + return isRegister(i.operand); + } + }; + + static final IntervalPredicate IS_VARIABLE_INTERVAL = new IntervalPredicate() { + + @Override + public boolean apply(Interval i) { + return isVariable(i.operand); + } + }; + + static final IntervalPredicate IS_STACK_INTERVAL = new IntervalPredicate() { + + @Override + public boolean apply(Interval i) { + return !isRegister(i.operand); + } + }; + + /** + * Gets an object describing the attributes of a given register according to this register + * configuration. + */ + RegisterAttributes attributes(Register reg) { + return registerAttributes[reg.number]; + } + + void assignSpillSlot(Interval interval) { + // assign the canonical spill slot of the parent (if a part of the interval + // is already spilled) or allocate a new spill slot + if (interval.canMaterialize()) { + interval.assignLocation(Value.ILLEGAL); + } else if (interval.spillSlot() != null) { + interval.assignLocation(interval.spillSlot()); + } else { + VirtualStackSlot slot = frameMapBuilder.allocateSpillSlot(interval.kind()); + interval.setSpillSlot(slot); + interval.assignLocation(slot); + } + } + + /** + * Creates a new interval. + * + * @param operand the operand for the interval + * @return the created interval + */ + Interval createInterval(AllocatableValue operand) { + assert isLegal(operand); + int operandNumber = operandNumber(operand); + Interval interval = new Interval(operand, operandNumber); + assert operandNumber < intervalsSize; + assert intervals[operandNumber] == null; + intervals[operandNumber] = interval; + return interval; + } + + /** + * Creates an interval as a result of splitting or spilling another interval. + * + * @param source an interval being split of spilled + * @return a new interval derived from {@code source} + */ + Interval createDerivedInterval(Interval source) { + if (firstDerivedIntervalIndex == -1) { + firstDerivedIntervalIndex = intervalsSize; + } + if (intervalsSize == intervals.length) { + intervals = Arrays.copyOf(intervals, intervals.length + (intervals.length >> SPLIT_INTERVALS_CAPACITY_RIGHT_SHIFT)); + } + intervalsSize++; + Variable variable = new Variable(source.kind(), ir.nextVariable()); + + Interval interval = createInterval(variable); + assert intervals[intervalsSize - 1] == interval; + return interval; + } + + // access to block list (sorted in linear scan order) + int blockCount() { + return sortedBlocks.size(); + } + + AbstractBlock blockAt(int index) { + return sortedBlocks.get(index); + } + + /** + * Gets the size of the {@link BlockData#liveIn} and {@link BlockData#liveOut} sets for a basic + * block. These sets do not include any operands allocated as a result of creating + * {@linkplain #createDerivedInterval(Interval) derived intervals}. + */ + int liveSetSize() { + return firstDerivedIntervalIndex == -1 ? operandSize() : firstDerivedIntervalIndex; + } + + int numLoops() { + return ir.getControlFlowGraph().getLoops().size(); + } + + boolean isIntervalInLoop(int interval, int loop) { + return intervalInLoop.at(interval, loop); + } + + Interval intervalFor(int operandNumber) { + return intervals[operandNumber]; + } + + Interval intervalFor(Value operand) { + int operandNumber = operandNumber(operand); + assert operandNumber < intervalsSize; + return intervals[operandNumber]; + } + + Interval getOrCreateInterval(AllocatableValue operand) { + Interval ret = intervalFor(operand); + if (ret == null) { + return createInterval(operand); + } else { + return ret; + } + } + + /** + * Gets the highest instruction id allocated by this object. + */ + int maxOpId() { + assert opIdToInstructionMap.length > 0 : "no operations"; + return (opIdToInstructionMap.length - 1) << 1; + } + + /** + * Converts an {@linkplain LIRInstruction#id instruction id} to an instruction index. All LIR + * instructions in a method have an index one greater than their linear-scan order predecesor + * with the first instruction having an index of 0. + */ + static int opIdToIndex(int opId) { + return opId >> 1; + } + + /** + * Retrieves the {@link LIRInstruction} based on its {@linkplain LIRInstruction#id id}. + * + * @param opId an instruction {@linkplain LIRInstruction#id id} + * @return the instruction whose {@linkplain LIRInstruction#id} {@code == id} + */ + LIRInstruction instructionForId(int opId) { + assert isEven(opId) : "opId not even"; + LIRInstruction instr = opIdToInstructionMap[opIdToIndex(opId)]; + assert instr.id() == opId; + return instr; + } + + /** + * Gets the block containing a given instruction. + * + * @param opId an instruction {@linkplain LIRInstruction#id id} + * @return the block containing the instruction denoted by {@code opId} + */ + AbstractBlock blockForId(int opId) { + assert opIdToBlockMap.length > 0 && opId >= 0 && opId <= maxOpId() + 1 : "opId out of range"; + return opIdToBlockMap[opIdToIndex(opId)]; + } + + boolean isBlockBegin(int opId) { + return opId == 0 || blockForId(opId) != blockForId(opId - 1); + } + + boolean coversBlockBegin(int opId1, int opId2) { + return blockForId(opId1) != blockForId(opId2); + } + + /** + * Determines if an {@link LIRInstruction} destroys all caller saved registers. + * + * @param opId an instruction {@linkplain LIRInstruction#id id} + * @return {@code true} if the instruction denoted by {@code id} destroys all caller saved + * registers. + */ + boolean hasCall(int opId) { + assert isEven(opId) : "opId not even"; + return instructionForId(opId).destroysCallerSavedRegisters(); + } + + /** + * Eliminates moves from register to stack if the stack slot is known to be correct. + */ + void changeSpillDefinitionPos(Interval interval, int defPos) { + assert interval.isSplitParent() : "can only be called for split parents"; + + switch (interval.spillState()) { + case NoDefinitionFound: + assert interval.spillDefinitionPos() == -1 : "must no be set before"; + interval.setSpillDefinitionPos(defPos); + interval.setSpillState(SpillState.NoSpillStore); + break; + + case NoSpillStore: + assert defPos <= interval.spillDefinitionPos() : "positions are processed in reverse order when intervals are created"; + if (defPos < interval.spillDefinitionPos() - 2) { + // second definition found, so no spill optimization possible for this interval + interval.setSpillState(SpillState.NoOptimization); + } else { + // two consecutive definitions (because of two-operand LIR form) + assert blockForId(defPos) == blockForId(interval.spillDefinitionPos()) : "block must be equal"; + } + break; + + case NoOptimization: + // nothing to do + break; + + default: + throw new BailoutException("other states not allowed at this time"); + } + } + + // called during register allocation + void changeSpillState(Interval interval, int spillPos) { + switch (interval.spillState()) { + case NoSpillStore: { + int defLoopDepth = blockForId(interval.spillDefinitionPos()).getLoopDepth(); + int spillLoopDepth = blockForId(spillPos).getLoopDepth(); + + if (defLoopDepth < spillLoopDepth) { + // the loop depth of the spilling position is higher then the loop depth + // at the definition of the interval . move write to memory out of loop. + if (Options.LSRAOptimizeSpillPosition.getValue()) { + // find best spill position in dominator the tree + interval.setSpillState(SpillState.SpillInDominator); + } else { + // store at definition of the interval + interval.setSpillState(SpillState.StoreAtDefinition); + } + } else { + // the interval is currently spilled only once, so for now there is no + // reason to store the interval at the definition + interval.setSpillState(SpillState.OneSpillStore); + } + break; + } + + case OneSpillStore: { + if (Options.LSRAOptimizeSpillPosition.getValue()) { + // the interval is spilled more then once + interval.setSpillState(SpillState.SpillInDominator); + } else { + // it is better to store it to + // memory at the definition + interval.setSpillState(SpillState.StoreAtDefinition); + } + break; + } + + case SpillInDominator: + case StoreAtDefinition: + case StartInMemory: + case NoOptimization: + case NoDefinitionFound: + // nothing to do + break; + + default: + throw new BailoutException("other states not allowed at this time"); + } + } + + abstract static class IntervalPredicate { + + abstract boolean apply(Interval i); + } + + private static final IntervalPredicate mustStoreAtDefinition = new IntervalPredicate() { + + @Override + public boolean apply(Interval i) { + return i.isSplitParent() && i.spillState() == SpillState.StoreAtDefinition; + } + }; + + // called once before assignment of register numbers + void eliminateSpillMoves() { + try (Indent indent = Debug.logAndIndent("Eliminating unnecessary spill moves")) { + + // collect all intervals that must be stored after their definition. + // the list is sorted by Interval.spillDefinitionPos + Interval interval; + interval = createUnhandledLists(mustStoreAtDefinition, null).first; + if (DetailedAsserts.getValue()) { + checkIntervals(interval); + } + + LIRInsertionBuffer insertionBuffer = new LIRInsertionBuffer(); + for (AbstractBlock block : sortedBlocks) { + List instructions = ir.getLIRforBlock(block); + int numInst = instructions.size(); + + // iterate all instructions of the block. skip the first + // because it is always a label + for (int j = 1; j < numInst; j++) { + LIRInstruction op = instructions.get(j); + int opId = op.id(); + + if (opId == -1) { + MoveOp move = (MoveOp) op; + // remove move from register to stack if the stack slot is guaranteed to be + // correct. + // only moves that have been inserted by LinearScan can be removed. + assert isVariable(move.getResult()) : "LinearScan inserts only moves to variables"; + + Interval curInterval = intervalFor(move.getResult()); + + if (!isRegister(curInterval.location()) && curInterval.alwaysInMemory()) { + // move target is a stack slot that is always correct, so eliminate + // instruction + if (Debug.isLogEnabled()) { + Debug.log("eliminating move from interval %d to %d", operandNumber(move.getInput()), operandNumber(move.getResult())); + } + // null-instructions are deleted by assignRegNum + instructions.set(j, null); + } + + } else { + // insert move from register to stack just after + // the beginning of the interval + assert interval == Interval.EndMarker || interval.spillDefinitionPos() >= opId : "invalid order"; + assert interval == Interval.EndMarker || (interval.isSplitParent() && interval.spillState() == SpillState.StoreAtDefinition) : "invalid interval"; + + while (interval != Interval.EndMarker && interval.spillDefinitionPos() == opId) { + if (!interval.canMaterialize()) { + if (!insertionBuffer.initialized()) { + // prepare insertion buffer (appended when all instructions in + // the block are processed) + insertionBuffer.init(instructions); + } + + AllocatableValue fromLocation = interval.location(); + AllocatableValue toLocation = canonicalSpillOpr(interval); + + assert isRegister(fromLocation) : "from operand must be a register but is: " + fromLocation + " toLocation=" + toLocation + " spillState=" + interval.spillState(); + assert isStackSlotValue(toLocation) : "to operand must be a stack slot"; + + insertionBuffer.append(j + 1, ir.getSpillMoveFactory().createMove(toLocation, fromLocation)); + + Debug.log("inserting move after definition of interval %d to stack slot %s at opId %d", interval.operandNumber, interval.spillSlot(), opId); + } + interval = interval.next; + } + } + } // end of instruction iteration + + if (insertionBuffer.initialized()) { + insertionBuffer.finish(); + } + } // end of block iteration + + assert interval == Interval.EndMarker : "missed an interval"; + } + } + + private static void checkIntervals(Interval interval) { + Interval prev = null; + Interval temp = interval; + while (temp != Interval.EndMarker) { + assert temp.spillDefinitionPos() > 0 : "invalid spill definition pos"; + if (prev != null) { + assert temp.from() >= prev.from() : "intervals not sorted"; + assert temp.spillDefinitionPos() >= prev.spillDefinitionPos() : "when intervals are sorted by from : then they must also be sorted by spillDefinitionPos"; + } + + assert temp.spillSlot() != null || temp.canMaterialize() : "interval has no spill slot assigned"; + assert temp.spillDefinitionPos() >= temp.from() : "invalid order"; + assert temp.spillDefinitionPos() <= temp.from() + 2 : "only intervals defined once at their start-pos can be optimized"; + + Debug.log("interval %d (from %d to %d) must be stored at %d", temp.operandNumber, temp.from(), temp.to(), temp.spillDefinitionPos()); + + prev = temp; + temp = temp.next; + } + } + + /** + * Numbers all instructions in all blocks. The numbering follows the + * {@linkplain ComputeBlockOrder linear scan order}. + */ + void numberInstructions() { + + intervalsSize = operandSize(); + intervals = new Interval[intervalsSize + (intervalsSize >> SPLIT_INTERVALS_CAPACITY_RIGHT_SHIFT)]; + + ValueConsumer setVariableConsumer = (value, mode, flags) -> { + if (isVariable(value)) { + getOrCreateInterval(asVariable(value)); + } + }; + + // Assign IDs to LIR nodes and build a mapping, lirOps, from ID to LIRInstruction node. + int numInstructions = 0; + for (AbstractBlock block : sortedBlocks) { + numInstructions += ir.getLIRforBlock(block).size(); + } + + // initialize with correct length + opIdToInstructionMap = new LIRInstruction[numInstructions]; + opIdToBlockMap = new AbstractBlock[numInstructions]; + + int opId = 0; + int index = 0; + for (AbstractBlock block : sortedBlocks) { + blockData.put(block, new BlockData()); + + List instructions = ir.getLIRforBlock(block); + + int numInst = instructions.size(); + for (int j = 0; j < numInst; j++) { + LIRInstruction op = instructions.get(j); + op.setId(opId); + + opIdToInstructionMap[index] = op; + opIdToBlockMap[index] = block; + assert instructionForId(opId) == op : "must match"; + + op.visitEachTemp(setVariableConsumer); + op.visitEachOutput(setVariableConsumer); + + index++; + opId += 2; // numbering of lirOps by two + } + } + assert index == numInstructions : "must match"; + assert (index << 1) == opId : "must match: " + (index << 1); + } + + /** + * Computes local live sets (i.e. {@link BlockData#liveGen} and {@link BlockData#liveKill}) + * separately for each block. + */ + void computeLocalLiveSets() { + int liveSize = liveSetSize(); + + intervalInLoop = new BitMap2D(operandSize(), numLoops()); + + // iterate all blocks + for (final AbstractBlock block : sortedBlocks) { + try (Indent indent = Debug.logAndIndent("compute local live sets for block %d", block.getId())) { + + final BitSet liveGen = new BitSet(liveSize); + final BitSet liveKill = new BitSet(liveSize); + + List instructions = ir.getLIRforBlock(block); + int numInst = instructions.size(); + + ValueConsumer useConsumer = (operand, mode, flags) -> { + if (isVariable(operand)) { + int operandNum = operandNumber(operand); + if (!liveKill.get(operandNum)) { + liveGen.set(operandNum); + Debug.log("liveGen for operand %d", operandNum); + } + if (block.getLoop() != null) { + intervalInLoop.setBit(operandNum, block.getLoop().getIndex()); + } + } + + if (DetailedAsserts.getValue()) { + verifyInput(block, liveKill, operand); + } + }; + ValueConsumer stateConsumer = (operand, mode, flags) -> { + if (isVariableOrRegister(operand)) { + int operandNum = operandNumber(operand); + if (!liveKill.get(operandNum)) { + liveGen.set(operandNum); + Debug.log("liveGen in state for operand %d", operandNum); + } + } + }; + ValueConsumer defConsumer = (operand, mode, flags) -> { + if (isVariable(operand)) { + int varNum = operandNumber(operand); + liveKill.set(varNum); + Debug.log("liveKill for operand %d", varNum); + if (block.getLoop() != null) { + intervalInLoop.setBit(varNum, block.getLoop().getIndex()); + } + } + + if (DetailedAsserts.getValue()) { + // fixed intervals are never live at block boundaries, so + // they need not be processed in live sets + // process them only in debug mode so that this can be checked + verifyTemp(liveKill, operand); + } + }; + + // iterate all instructions of the block + for (int j = 0; j < numInst; j++) { + final LIRInstruction op = instructions.get(j); + + try (Indent indent2 = Debug.logAndIndent("handle op %d", op.id())) { + op.visitEachInput(useConsumer); + op.visitEachAlive(useConsumer); + // Add uses of live locals from interpreter's point of view for proper debug + // information generation + op.visitEachState(stateConsumer); + op.visitEachTemp(defConsumer); + op.visitEachOutput(defConsumer); + } + } // end of instruction iteration + + BlockData blockSets = blockData.get(block); + blockSets.liveGen = liveGen; + blockSets.liveKill = liveKill; + blockSets.liveIn = new BitSet(liveSize); + blockSets.liveOut = new BitSet(liveSize); + + Debug.log("liveGen B%d %s", block.getId(), blockSets.liveGen); + Debug.log("liveKill B%d %s", block.getId(), blockSets.liveKill); + + } + } // end of block iteration + } + + private void verifyTemp(BitSet liveKill, Value operand) { + // fixed intervals are never live at block boundaries, so + // they need not be processed in live sets + // process them only in debug mode so that this can be checked + if (isRegister(operand)) { + if (isProcessed(operand)) { + liveKill.set(operandNumber(operand)); + } + } + } + + private void verifyInput(AbstractBlock block, BitSet liveKill, Value operand) { + // fixed intervals are never live at block boundaries, so + // they need not be processed in live sets. + // this is checked by these assertions to be sure about it. + // the entry block may have incoming + // values in registers, which is ok. + if (isRegister(operand) && block != ir.getControlFlowGraph().getStartBlock()) { + if (isProcessed(operand)) { + assert liveKill.get(operandNumber(operand)) : "using fixed register that is not defined in this block"; + } + } + } + + /** + * Performs a backward dataflow analysis to compute global live sets (i.e. + * {@link BlockData#liveIn} and {@link BlockData#liveOut}) for each block. + */ + void computeGlobalLiveSets() { + try (Indent indent = Debug.logAndIndent("compute global live sets")) { + int numBlocks = blockCount(); + boolean changeOccurred; + boolean changeOccurredInBlock; + int iterationCount = 0; + BitSet liveOut = new BitSet(liveSetSize()); // scratch set for calculations + + // Perform a backward dataflow analysis to compute liveOut and liveIn for each block. + // The loop is executed until a fixpoint is reached (no changes in an iteration) + do { + changeOccurred = false; + + try (Indent indent2 = Debug.logAndIndent("new iteration %d", iterationCount)) { + + // iterate all blocks in reverse order + for (int i = numBlocks - 1; i >= 0; i--) { + AbstractBlock block = blockAt(i); + BlockData blockSets = blockData.get(block); + + changeOccurredInBlock = false; + + // liveOut(block) is the union of liveIn(sux), for successors sux of block + int n = block.getSuccessorCount(); + if (n > 0) { + liveOut.clear(); + // block has successors + if (n > 0) { + for (AbstractBlock successor : block.getSuccessors()) { + liveOut.or(blockData.get(successor).liveIn); + } + } + + if (!blockSets.liveOut.equals(liveOut)) { + // A change occurred. Swap the old and new live out + // sets to avoid copying. + BitSet temp = blockSets.liveOut; + blockSets.liveOut = liveOut; + liveOut = temp; + + changeOccurred = true; + changeOccurredInBlock = true; + } + } + + if (iterationCount == 0 || changeOccurredInBlock) { + // liveIn(block) is the union of liveGen(block) with (liveOut(block) & + // !liveKill(block)) + // note: liveIn has to be computed only in first iteration + // or if liveOut has changed! + BitSet liveIn = blockSets.liveIn; + liveIn.clear(); + liveIn.or(blockSets.liveOut); + liveIn.andNot(blockSets.liveKill); + liveIn.or(blockSets.liveGen); + + Debug.log("block %d: livein = %s, liveout = %s", block.getId(), liveIn, blockSets.liveOut); + } + } + iterationCount++; + + if (changeOccurred && iterationCount > 50) { + throw new BailoutException("too many iterations in computeGlobalLiveSets"); + } + } + } while (changeOccurred); + + if (DetailedAsserts.getValue()) { + verifyLiveness(); + } + + // check that the liveIn set of the first block is empty + AbstractBlock startBlock = ir.getControlFlowGraph().getStartBlock(); + if (blockData.get(startBlock).liveIn.cardinality() != 0) { + if (DetailedAsserts.getValue()) { + reportFailure(numBlocks); + } + // bailout if this occurs in product mode. + throw new GraalInternalError("liveIn set of first block must be empty: " + blockData.get(startBlock).liveIn); + } + } + } + + private void reportFailure(int numBlocks) { + try (Scope s = Debug.forceLog()) { + try (Indent indent = Debug.logAndIndent("report failure")) { + + BitSet startBlockLiveIn = blockData.get(ir.getControlFlowGraph().getStartBlock()).liveIn; + try (Indent indent2 = Debug.logAndIndent("Error: liveIn set of first block must be empty (when this fails, variables are used before they are defined):")) { + for (int operandNum = startBlockLiveIn.nextSetBit(0); operandNum >= 0; operandNum = startBlockLiveIn.nextSetBit(operandNum + 1)) { + Interval interval = intervalFor(operandNum); + if (interval != null) { + Value operand = interval.operand; + Debug.log("var %d; operand=%s; node=%s", operandNum, operand, getSourceForOperandFromDebugContext(operand)); + } else { + Debug.log("var %d; missing operand", operandNum); + } + } + } + + // print some additional information to simplify debugging + for (int operandNum = startBlockLiveIn.nextSetBit(0); operandNum >= 0; operandNum = startBlockLiveIn.nextSetBit(operandNum + 1)) { + Interval interval = intervalFor(operandNum); + Value operand = null; + Object valueForOperandFromDebugContext = null; + if (interval != null) { + operand = interval.operand; + valueForOperandFromDebugContext = getSourceForOperandFromDebugContext(operand); + } + try (Indent indent2 = Debug.logAndIndent("---- Detailed information for var %d; operand=%s; node=%s ----", operandNum, operand, valueForOperandFromDebugContext)) { + + Deque> definedIn = new ArrayDeque<>(); + HashSet> usedIn = new HashSet<>(); + for (AbstractBlock block : sortedBlocks) { + if (blockData.get(block).liveGen.get(operandNum)) { + usedIn.add(block); + try (Indent indent3 = Debug.logAndIndent("used in block B%d", block.getId())) { + for (LIRInstruction ins : ir.getLIRforBlock(block)) { + try (Indent indent4 = Debug.logAndIndent("%d: %s", ins.id(), ins)) { + ins.forEachState((liveStateOperand, mode, flags) -> { + Debug.log("operand=%s", liveStateOperand); + return liveStateOperand; + }); + } + } + } + } + if (blockData.get(block).liveKill.get(operandNum)) { + definedIn.add(block); + try (Indent indent3 = Debug.logAndIndent("defined in block B%d", block.getId())) { + for (LIRInstruction ins : ir.getLIRforBlock(block)) { + Debug.log("%d: %s", ins.id(), ins); + } + } + } + } + + int[] hitCount = new int[numBlocks]; + + while (!definedIn.isEmpty()) { + AbstractBlock block = definedIn.removeFirst(); + usedIn.remove(block); + for (AbstractBlock successor : block.getSuccessors()) { + if (successor.isLoopHeader()) { + if (!block.isLoopEnd()) { + definedIn.add(successor); + } + } else { + if (++hitCount[successor.getId()] == successor.getPredecessorCount()) { + definedIn.add(successor); + } + } + } + } + try (Indent indent3 = Debug.logAndIndent("**** offending usages are in: ")) { + for (AbstractBlock block : usedIn) { + Debug.log("B%d", block.getId()); + } + } + } + } + } + } catch (Throwable e) { + throw Debug.handle(e); + } + } + + private void verifyLiveness() { + // check that fixed intervals are not live at block boundaries + // (live set must be empty at fixed intervals) + for (AbstractBlock block : sortedBlocks) { + for (int j = 0; j <= maxRegisterNumber(); j++) { + assert !blockData.get(block).liveIn.get(j) : "liveIn set of fixed register must be empty"; + assert !blockData.get(block).liveOut.get(j) : "liveOut set of fixed register must be empty"; + assert !blockData.get(block).liveGen.get(j) : "liveGen set of fixed register must be empty"; + } + } + } + + void addUse(AllocatableValue operand, int from, int to, RegisterPriority registerPriority, LIRKind kind) { + if (!isProcessed(operand)) { + return; + } + + Interval interval = getOrCreateInterval(operand); + if (!kind.equals(LIRKind.Illegal)) { + interval.setKind(kind); + } + + interval.addRange(from, to); + + // Register use position at even instruction id. + interval.addUsePos(to & ~1, registerPriority); + + Debug.log("add use: %s, from %d to %d (%s)", interval, from, to, registerPriority.name()); + } + + void addTemp(AllocatableValue operand, int tempPos, RegisterPriority registerPriority, LIRKind kind) { + if (!isProcessed(operand)) { + return; + } + + Interval interval = getOrCreateInterval(operand); + if (!kind.equals(LIRKind.Illegal)) { + interval.setKind(kind); + } + + interval.addRange(tempPos, tempPos + 1); + interval.addUsePos(tempPos, registerPriority); + interval.addMaterializationValue(null); + + Debug.log("add temp: %s tempPos %d (%s)", interval, tempPos, RegisterPriority.MustHaveRegister.name()); + } + + boolean isProcessed(Value operand) { + return !isRegister(operand) || attributes(asRegister(operand)).isAllocatable(); + } + + void addDef(AllocatableValue operand, LIRInstruction op, RegisterPriority registerPriority, LIRKind kind) { + if (!isProcessed(operand)) { + return; + } + int defPos = op.id(); + + Interval interval = getOrCreateInterval(operand); + if (!kind.equals(LIRKind.Illegal)) { + interval.setKind(kind); + } + + Range r = interval.first(); + if (r.from <= defPos) { + // Update the starting point (when a range is first created for a use, its + // start is the beginning of the current block until a def is encountered.) + r.from = defPos; + interval.addUsePos(defPos, registerPriority); + + } else { + // Dead value - make vacuous interval + // also add register priority for dead intervals + interval.addRange(defPos, defPos + 1); + interval.addUsePos(defPos, registerPriority); + Debug.log("Warning: def of operand %s at %d occurs without use", operand, defPos); + } + + changeSpillDefinitionPos(interval, defPos); + if (registerPriority == RegisterPriority.None && interval.spillState().ordinal() <= SpillState.StartInMemory.ordinal()) { + // detection of method-parameters and roundfp-results + interval.setSpillState(SpillState.StartInMemory); + } + interval.addMaterializationValue(LinearScan.getMaterializedValue(op, operand, interval)); + + Debug.log("add def: %s defPos %d (%s)", interval, defPos, registerPriority.name()); + } + + /** + * Determines the register priority for an instruction's output/result operand. + */ + static RegisterPriority registerPriorityOfOutputOperand(LIRInstruction op) { + if (op instanceof MoveOp) { + MoveOp move = (MoveOp) op; + if (optimizeMethodArgument(move.getInput())) { + return RegisterPriority.None; + } + } + + // all other operands require a register + return RegisterPriority.MustHaveRegister; + } + + /** + * Determines the priority which with an instruction's input operand will be allocated a + * register. + */ + static RegisterPriority registerPriorityOfInputOperand(EnumSet flags) { + if (flags.contains(OperandFlag.STACK)) { + return RegisterPriority.ShouldHaveRegister; + } + // all other operands require a register + return RegisterPriority.MustHaveRegister; + } + + private static boolean optimizeMethodArgument(Value value) { + /* + * Object method arguments that are passed on the stack are currently not optimized because + * this requires that the runtime visits method arguments during stack walking. + */ + return isStackSlot(value) && asStackSlot(value).isInCallerFrame() && value.getKind() != Kind.Object; + } + + /** + * Optimizes moves related to incoming stack based arguments. The interval for the destination + * of such moves is assigned the stack slot (which is in the caller's frame) as its spill slot. + */ + void handleMethodArguments(LIRInstruction op) { + if (op instanceof MoveOp) { + MoveOp move = (MoveOp) op; + if (optimizeMethodArgument(move.getInput())) { + StackSlot slot = asStackSlot(move.getInput()); + if (DetailedAsserts.getValue()) { + assert op.id() > 0 : "invalid id"; + assert blockForId(op.id()).getPredecessorCount() == 0 : "move from stack must be in first block"; + assert isVariable(move.getResult()) : "result of move must be a variable"; + + Debug.log("found move from stack slot %s to %s", slot, move.getResult()); + } + + Interval interval = intervalFor(move.getResult()); + interval.setSpillSlot(slot); + interval.assignLocation(slot); + } + } + } + + void addRegisterHint(final LIRInstruction op, final Value targetValue, OperandMode mode, EnumSet flags, final boolean hintAtDef) { + if (flags.contains(OperandFlag.HINT) && isVariableOrRegister(targetValue)) { + + op.forEachRegisterHint(targetValue, mode, (registerHint, valueMode, valueFlags) -> { + if (isVariableOrRegister(registerHint)) { + Interval from = getOrCreateInterval((AllocatableValue) registerHint); + Interval to = getOrCreateInterval((AllocatableValue) targetValue); + + /* hints always point from def to use */ + if (hintAtDef) { + to.setLocationHint(from); + } else { + from.setLocationHint(to); + } + Debug.log("operation at opId %d: added hint from interval %d to %d", op.id(), from.operandNumber, to.operandNumber); + + return registerHint; + } + return null; + }); + } + } + + void buildIntervals() { + + try (Indent indent = Debug.logAndIndent("build intervals")) { + InstructionValueConsumer outputConsumer = (op, operand, mode, flags) -> { + if (isVariableOrRegister(operand)) { + addDef((AllocatableValue) operand, op, registerPriorityOfOutputOperand(op), operand.getLIRKind()); + addRegisterHint(op, operand, mode, flags, true); + } + }; + + InstructionValueConsumer tempConsumer = (op, operand, mode, flags) -> { + if (isVariableOrRegister(operand)) { + addTemp((AllocatableValue) operand, op.id(), RegisterPriority.MustHaveRegister, operand.getLIRKind()); + addRegisterHint(op, operand, mode, flags, false); + } + }; + + InstructionValueConsumer aliveConsumer = (op, operand, mode, flags) -> { + if (isVariableOrRegister(operand)) { + RegisterPriority p = registerPriorityOfInputOperand(flags); + int opId = op.id(); + int blockFrom = getFirstLirInstructionId((blockForId(opId))); + addUse((AllocatableValue) operand, blockFrom, opId + 1, p, operand.getLIRKind()); + addRegisterHint(op, operand, mode, flags, false); + } + }; + + InstructionValueConsumer inputConsumer = (op, operand, mode, flags) -> { + if (isVariableOrRegister(operand)) { + int opId = op.id(); + int blockFrom = getFirstLirInstructionId((blockForId(opId))); + RegisterPriority p = registerPriorityOfInputOperand(flags); + addUse((AllocatableValue) operand, blockFrom, opId, p, operand.getLIRKind()); + addRegisterHint(op, operand, mode, flags, false); + } + }; + + InstructionValueConsumer stateProc = (op, operand, mode, flags) -> { + if (isVariableOrRegister(operand)) { + int opId = op.id(); + int blockFrom = getFirstLirInstructionId((blockForId(opId))); + addUse((AllocatableValue) operand, blockFrom, opId + 1, RegisterPriority.None, operand.getLIRKind()); + } + }; + + // create a list with all caller-save registers (cpu, fpu, xmm) + Register[] callerSaveRegs = frameMapBuilder.getRegisterConfig().getCallerSaveRegisters(); + + // iterate all blocks in reverse order + for (int i = blockCount() - 1; i >= 0; i--) { + + AbstractBlock block = blockAt(i); + try (Indent indent2 = Debug.logAndIndent("handle block %d", block.getId())) { + + List instructions = ir.getLIRforBlock(block); + final int blockFrom = getFirstLirInstructionId(block); + int blockTo = getLastLirInstructionId(block); + + assert blockFrom == instructions.get(0).id(); + assert blockTo == instructions.get(instructions.size() - 1).id(); + + // Update intervals for operands live at the end of this block; + BitSet live = blockData.get(block).liveOut; + for (int operandNum = live.nextSetBit(0); operandNum >= 0; operandNum = live.nextSetBit(operandNum + 1)) { + assert live.get(operandNum) : "should not stop here otherwise"; + AllocatableValue operand = intervalFor(operandNum).operand; + Debug.log("live in %d: %s", operandNum, operand); + + addUse(operand, blockFrom, blockTo + 2, RegisterPriority.None, LIRKind.Illegal); + + // add special use positions for loop-end blocks when the + // interval is used anywhere inside this loop. It's possible + // that the block was part of a non-natural loop, so it might + // have an invalid loop index. + if (block.isLoopEnd() && block.getLoop() != null && isIntervalInLoop(operandNum, block.getLoop().getIndex())) { + intervalFor(operandNum).addUsePos(blockTo + 1, RegisterPriority.LiveAtLoopEnd); + } + } + + // iterate all instructions of the block in reverse order. + // definitions of intervals are processed before uses + for (int j = instructions.size() - 1; j >= 0; j--) { + final LIRInstruction op = instructions.get(j); + final int opId = op.id(); + + try (Indent indent3 = Debug.logAndIndent("handle inst %d: %s", opId, op)) { + + // add a temp range for each register if operation destroys + // caller-save registers + if (op.destroysCallerSavedRegisters()) { + for (Register r : callerSaveRegs) { + if (attributes(r).isAllocatable()) { + addTemp(r.asValue(), opId, RegisterPriority.None, LIRKind.Illegal); + } + } + Debug.log("operation destroys all caller-save registers"); + } + + op.visitEachOutput(outputConsumer); + op.visitEachTemp(tempConsumer); + op.visitEachAlive(aliveConsumer); + op.visitEachInput(inputConsumer); + + // Add uses of live locals from interpreter's point of view for proper + // debug information generation + // Treat these operands as temp values (if the live range is extended + // to a call site, the value would be in a register at + // the call otherwise) + op.visitEachState(stateProc); + + // special steps for some instructions (especially moves) + handleMethodArguments(op); + + } + + } // end of instruction iteration + } + } // end of block iteration + + // add the range [0, 1] to all fixed intervals. + // the register allocator need not handle unhandled fixed intervals + for (Interval interval : intervals) { + if (interval != null && isRegister(interval.operand)) { + interval.addRange(0, 1); + } + } + } + } + + // * Phase 5: actual register allocation + + private static boolean isSorted(Interval[] intervals) { + int from = -1; + for (Interval interval : intervals) { + assert interval != null; + assert from <= interval.from(); + from = interval.from(); + } + return true; + } + + static Interval addToList(Interval first, Interval prev, Interval interval) { + Interval newFirst = first; + if (prev != null) { + prev.next = interval; + } else { + newFirst = interval; + } + return newFirst; + } + + Interval.Pair createUnhandledLists(IntervalPredicate isList1, IntervalPredicate isList2) { + assert isSorted(sortedIntervals) : "interval list is not sorted"; + + Interval list1 = Interval.EndMarker; + Interval list2 = Interval.EndMarker; + + Interval list1Prev = null; + Interval list2Prev = null; + Interval v; + + int n = sortedIntervals.length; + for (int i = 0; i < n; i++) { + v = sortedIntervals[i]; + if (v == null) { + continue; + } + + if (isList1.apply(v)) { + list1 = addToList(list1, list1Prev, v); + list1Prev = v; + } else if (isList2 == null || isList2.apply(v)) { + list2 = addToList(list2, list2Prev, v); + list2Prev = v; + } + } + + if (list1Prev != null) { + list1Prev.next = Interval.EndMarker; + } + if (list2Prev != null) { + list2Prev.next = Interval.EndMarker; + } + + assert list1Prev == null || list1Prev.next == Interval.EndMarker : "linear list ends not with sentinel"; + assert list2Prev == null || list2Prev.next == Interval.EndMarker : "linear list ends not with sentinel"; + + return new Interval.Pair(list1, list2); + } + + void sortIntervalsBeforeAllocation() { + int sortedLen = 0; + for (Interval interval : intervals) { + if (interval != null) { + sortedLen++; + } + } + + Interval[] sortedList = new Interval[sortedLen]; + int sortedIdx = 0; + int sortedFromMax = -1; + + // special sorting algorithm: the original interval-list is almost sorted, + // only some intervals are swapped. So this is much faster than a complete QuickSort + for (Interval interval : intervals) { + if (interval != null) { + int from = interval.from(); + + if (sortedFromMax <= from) { + sortedList[sortedIdx++] = interval; + sortedFromMax = interval.from(); + } else { + // the assumption that the intervals are already sorted failed, + // so this interval must be sorted in manually + int j; + for (j = sortedIdx - 1; j >= 0 && from < sortedList[j].from(); j--) { + sortedList[j + 1] = sortedList[j]; + } + sortedList[j + 1] = interval; + sortedIdx++; + } + } + } + sortedIntervals = sortedList; + } + + void sortIntervalsAfterAllocation() { + if (firstDerivedIntervalIndex == -1) { + // no intervals have been added during allocation, so sorted list is already up to date + return; + } + + Interval[] oldList = sortedIntervals; + Interval[] newList = Arrays.copyOfRange(intervals, firstDerivedIntervalIndex, intervalsSize); + int oldLen = oldList.length; + int newLen = newList.length; + + // conventional sort-algorithm for new intervals + Arrays.sort(newList, (Interval a, Interval b) -> a.from() - b.from()); + + // merge old and new list (both already sorted) into one combined list + Interval[] combinedList = new Interval[oldLen + newLen]; + int oldIdx = 0; + int newIdx = 0; + + while (oldIdx + newIdx < combinedList.length) { + if (newIdx >= newLen || (oldIdx < oldLen && oldList[oldIdx].from() <= newList[newIdx].from())) { + combinedList[oldIdx + newIdx] = oldList[oldIdx]; + oldIdx++; + } else { + combinedList[oldIdx + newIdx] = newList[newIdx]; + newIdx++; + } + } + + sortedIntervals = combinedList; + } + + public void allocateRegisters() { + try (Indent indent = Debug.logAndIndent("allocate registers")) { + Interval precoloredIntervals; + Interval notPrecoloredIntervals; + + Interval.Pair result = createUnhandledLists(IS_PRECOLORED_INTERVAL, IS_VARIABLE_INTERVAL); + precoloredIntervals = result.first; + notPrecoloredIntervals = result.second; + + // allocate cpu registers + LinearScanWalker lsw; + if (OptimizingLinearScanWalker.Options.LSRAOptimization.getValue()) { + lsw = new OptimizingLinearScanWalker(this, precoloredIntervals, notPrecoloredIntervals); + } else { + lsw = new LinearScanWalker(this, precoloredIntervals, notPrecoloredIntervals); + } + lsw.walk(); + lsw.finishAllocation(); + } + } + + // * Phase 6: resolve data flow + // (insert moves at edges between blocks if intervals have been split) + + // wrapper for Interval.splitChildAtOpId that performs a bailout in product mode + // instead of returning null + Interval splitChildAtOpId(Interval interval, int opId, LIRInstruction.OperandMode mode) { + Interval result = interval.getSplitChildAtOpId(opId, mode, this); + + if (result != null) { + Debug.log("Split child at pos %d of interval %s is %s", opId, interval, result); + return result; + } + + throw new BailoutException("LinearScan: interval is null"); + } + + Interval intervalAtBlockBegin(AbstractBlock block, int operandNumber) { + return splitChildAtOpId(intervalFor(operandNumber), getFirstLirInstructionId(block), LIRInstruction.OperandMode.DEF); + } + + Interval intervalAtBlockEnd(AbstractBlock block, int operandNumber) { + return splitChildAtOpId(intervalFor(operandNumber), getLastLirInstructionId(block) + 1, LIRInstruction.OperandMode.DEF); + } + + void resolveCollectMappings(AbstractBlock fromBlock, AbstractBlock toBlock, MoveResolver moveResolver) { + assert moveResolver.checkEmpty(); + + int numOperands = operandSize(); + BitSet liveAtEdge = blockData.get(toBlock).liveIn; + + // visit all variables for which the liveAtEdge bit is set + for (int operandNum = liveAtEdge.nextSetBit(0); operandNum >= 0; operandNum = liveAtEdge.nextSetBit(operandNum + 1)) { + assert operandNum < numOperands : "live information set for not exisiting interval"; + assert blockData.get(fromBlock).liveOut.get(operandNum) && blockData.get(toBlock).liveIn.get(operandNum) : "interval not live at this edge"; + + Interval fromInterval = intervalAtBlockEnd(fromBlock, operandNum); + Interval toInterval = intervalAtBlockBegin(toBlock, operandNum); + + if (fromInterval != toInterval && !fromInterval.location().equals(toInterval.location())) { + // need to insert move instruction + moveResolver.addMapping(fromInterval, toInterval); + } + } + } + + void resolveFindInsertPos(AbstractBlock fromBlock, AbstractBlock toBlock, MoveResolver moveResolver) { + if (fromBlock.getSuccessorCount() <= 1) { + Debug.log("inserting moves at end of fromBlock B%d", fromBlock.getId()); + + List instructions = ir.getLIRforBlock(fromBlock); + LIRInstruction instr = instructions.get(instructions.size() - 1); + if (instr instanceof StandardOp.JumpOp) { + // insert moves before branch + moveResolver.setInsertPosition(instructions, instructions.size() - 1); + } else { + moveResolver.setInsertPosition(instructions, instructions.size()); + } + + } else { + Debug.log("inserting moves at beginning of toBlock B%d", toBlock.getId()); + + if (DetailedAsserts.getValue()) { + assert ir.getLIRforBlock(fromBlock).get(0) instanceof StandardOp.LabelOp : "block does not start with a label"; + + // because the number of predecessor edges matches the number of + // successor edges, blocks which are reached by switch statements + // may have be more than one predecessor but it will be guaranteed + // that all predecessors will be the same. + for (AbstractBlock predecessor : toBlock.getPredecessors()) { + assert fromBlock == predecessor : "all critical edges must be broken"; + } + } + + moveResolver.setInsertPosition(ir.getLIRforBlock(toBlock), 1); + } + } + + /** + * Inserts necessary moves (spilling or reloading) at edges between blocks for intervals that + * have been split. + */ + void resolveDataFlow() { + try (Indent indent = Debug.logAndIndent("resolve data flow")) { + + int numBlocks = blockCount(); + MoveResolver moveResolver = new MoveResolver(this); + BitSet blockCompleted = new BitSet(numBlocks); + BitSet alreadyResolved = new BitSet(numBlocks); + + for (AbstractBlock block : sortedBlocks) { + + // check if block has only one predecessor and only one successor + if (block.getPredecessorCount() == 1 && block.getSuccessorCount() == 1) { + List instructions = ir.getLIRforBlock(block); + assert instructions.get(0) instanceof StandardOp.LabelOp : "block must start with label"; + assert instructions.get(instructions.size() - 1) instanceof StandardOp.JumpOp : "block with successor must end with unconditional jump"; + + // check if block is empty (only label and branch) + if (instructions.size() == 2) { + AbstractBlock pred = block.getPredecessors().iterator().next(); + AbstractBlock sux = block.getSuccessors().iterator().next(); + + // prevent optimization of two consecutive blocks + if (!blockCompleted.get(pred.getLinearScanNumber()) && !blockCompleted.get(sux.getLinearScanNumber())) { + Debug.log(" optimizing empty block B%d (pred: B%d, sux: B%d)", block.getId(), pred.getId(), sux.getId()); + + blockCompleted.set(block.getLinearScanNumber()); + + // directly resolve between pred and sux (without looking + // at the empty block + // between) + resolveCollectMappings(pred, sux, moveResolver); + if (moveResolver.hasMappings()) { + moveResolver.setInsertPosition(instructions, 1); + moveResolver.resolveAndAppendMoves(); + } + } + } + } + } + + for (AbstractBlock fromBlock : sortedBlocks) { + if (!blockCompleted.get(fromBlock.getLinearScanNumber())) { + alreadyResolved.clear(); + alreadyResolved.or(blockCompleted); + + for (AbstractBlock toBlock : fromBlock.getSuccessors()) { + + // check for duplicate edges between the same blocks (can happen with switch + // blocks) + if (!alreadyResolved.get(toBlock.getLinearScanNumber())) { + Debug.log("processing edge between B%d and B%d", fromBlock.getId(), toBlock.getId()); + + alreadyResolved.set(toBlock.getLinearScanNumber()); + + // collect all intervals that have been split between + // fromBlock and toBlock + resolveCollectMappings(fromBlock, toBlock, moveResolver); + if (moveResolver.hasMappings()) { + resolveFindInsertPos(fromBlock, toBlock, moveResolver); + moveResolver.resolveAndAppendMoves(); + } + } + } + } + } + } + } + + // * Phase 7: assign register numbers back to LIR + // (includes computation of debug information and oop maps) + + static StackSlotValue canonicalSpillOpr(Interval interval) { + assert interval.spillSlot() != null : "canonical spill slot not set"; + return interval.spillSlot(); + } + + /** + * Assigns the allocated location for an LIR instruction operand back into the instruction. + * + * @param operand an LIR instruction operand + * @param opId the id of the LIR instruction using {@code operand} + * @param mode the usage mode for {@code operand} by the instruction + * @return the location assigned for the operand + */ + private Value colorLirOperand(Variable operand, int opId, OperandMode mode) { + Interval interval = intervalFor(operand); + assert interval != null : "interval must exist"; + + if (opId != -1) { + if (DetailedAsserts.getValue()) { + AbstractBlock block = blockForId(opId); + if (block.getSuccessorCount() <= 1 && opId == getLastLirInstructionId(block)) { + // check if spill moves could have been appended at the end of this block, but + // before the branch instruction. So the split child information for this branch + // would + // be incorrect. + LIRInstruction instr = ir.getLIRforBlock(block).get(ir.getLIRforBlock(block).size() - 1); + if (instr instanceof StandardOp.JumpOp) { + if (blockData.get(block).liveOut.get(operandNumber(operand))) { + assert false : "can't get split child for the last branch of a block because the information would be incorrect (moves are inserted before the branch in resolveDataFlow)"; + } + } + } + } + + // operands are not changed when an interval is split during allocation, + // so search the right interval here + interval = splitChildAtOpId(interval, opId, mode); + } + + if (isIllegal(interval.location()) && interval.canMaterialize()) { + assert mode != OperandMode.DEF; + return interval.getMaterializedValue(); + } + return interval.location(); + } + + private boolean isMaterialized(AllocatableValue operand, int opId, OperandMode mode) { + Interval interval = intervalFor(operand); + assert interval != null : "interval must exist"; + + if (opId != -1) { + // operands are not changed when an interval is split during allocation, + // so search the right interval here + interval = splitChildAtOpId(interval, opId, mode); + } + + return isIllegal(interval.location()) && interval.canMaterialize(); + } + + protected IntervalWalker initIntervalWalker(IntervalPredicate predicate) { + // setup lists of potential oops for walking + Interval oopIntervals; + Interval nonOopIntervals; + + oopIntervals = createUnhandledLists(predicate, null).first; + + // intervals that have no oops inside need not to be processed. + // to ensure a walking until the last instruction id, add a dummy interval + // with a high operation id + nonOopIntervals = new Interval(Value.ILLEGAL, -1); + nonOopIntervals.addRange(Integer.MAX_VALUE - 2, Integer.MAX_VALUE - 1); + + return new IntervalWalker(this, oopIntervals, nonOopIntervals); + } + + private boolean isCallerSave(Value operand) { + return attributes(asRegister(operand)).isCallerSave(); + } + + /** + * @param op + * @param operand + * @param valueMode + * @param flags + * @see InstructionValueProcedure#doValue(LIRInstruction, Value, OperandMode, EnumSet) + */ + private Value debugInfoProcedure(LIRInstruction op, Value operand, OperandMode valueMode, EnumSet flags) { + if (isVirtualStackSlot(operand)) { + return operand; + } + int tempOpId = op.id(); + OperandMode mode = OperandMode.USE; + AbstractBlock block = blockForId(tempOpId); + if (block.getSuccessorCount() == 1 && tempOpId == getLastLirInstructionId(block)) { + // generating debug information for the last instruction of a block. + // if this instruction is a branch, spill moves are inserted before this branch + // and so the wrong operand would be returned (spill moves at block boundaries + // are not + // considered in the live ranges of intervals) + // Solution: use the first opId of the branch target block instead. + final LIRInstruction instr = ir.getLIRforBlock(block).get(ir.getLIRforBlock(block).size() - 1); + if (instr instanceof StandardOp.JumpOp) { + if (blockData.get(block).liveOut.get(operandNumber(operand))) { + tempOpId = getFirstLirInstructionId(block.getSuccessors().iterator().next()); + mode = OperandMode.DEF; + } + } + } + + // Get current location of operand + // The operand must be live because debug information is considered when building + // the intervals + // if the interval is not live, colorLirOperand will cause an assert on failure + Value result = colorLirOperand((Variable) operand, tempOpId, mode); + assert !hasCall(tempOpId) || isStackSlotValue(result) || isConstant(result) || !isCallerSave(result) : "cannot have caller-save register operands at calls"; + return result; + } + + private void computeDebugInfo(final LIRInstruction op, LIRFrameState info) { + info.forEachState(op, this::debugInfoProcedure); + } + + private void assignLocations(List instructions) { + int numInst = instructions.size(); + boolean hasDead = false; + + InstructionValueProcedure assignProc = (op, operand, mode, flags) -> isVariable(operand) ? colorLirOperand((Variable) operand, op.id(), mode) : operand; + for (int j = 0; j < numInst; j++) { + final LIRInstruction op = instructions.get(j); + if (op == null) { // this can happen when spill-moves are removed in eliminateSpillMoves + hasDead = true; + continue; + } + + // remove useless moves + MoveOp move = null; + if (op instanceof MoveOp) { + move = (MoveOp) op; + AllocatableValue result = move.getResult(); + if (isVariable(result) && isMaterialized(result, op.id(), OperandMode.DEF)) { + /* + * This happens if a materializable interval is originally not spilled but then + * kicked out in LinearScanWalker.splitForSpilling(). When kicking out such an + * interval this move operation was already generated. + */ + instructions.set(j, null); + hasDead = true; + continue; + } + } + + op.forEachInput(assignProc); + op.forEachAlive(assignProc); + op.forEachTemp(assignProc); + op.forEachOutput(assignProc); + + // compute reference map and debug information + op.forEachState((inst, state) -> computeDebugInfo(inst, state)); + + // remove useless moves + if (move != null) { + if (move.getInput().equals(move.getResult())) { + instructions.set(j, null); + hasDead = true; + } + } + } + + if (hasDead) { + // Remove null values from the list. + instructions.removeAll(Collections.singleton(null)); + } + } + + private void assignLocations() { + try (Indent indent = Debug.logAndIndent("assign locations")) { + for (AbstractBlock block : sortedBlocks) { + try (Indent indent2 = Debug.logAndIndent("assign locations in block B%d", block.getId())) { + assignLocations(ir.getLIRforBlock(block)); + } + } + } + } + + public static void allocate(TargetDescription target, LIRGenerationResult res) { + new LinearScan(target, res).allocate(); + } + + private void allocate() { + + /* + * This is the point to enable debug logging for the whole register allocation. + */ + try (Indent indent = Debug.logAndIndent("LinearScan allocate")) { + + try (Scope s = Debug.scope("LifetimeAnalysis")) { + numberInstructions(); + printLir("Before register allocation", true); + computeLocalLiveSets(); + computeGlobalLiveSets(); + buildIntervals(); + sortIntervalsBeforeAllocation(); + } catch (Throwable e) { + throw Debug.handle(e); + } + + try (Scope s = Debug.scope("RegisterAllocation")) { + printIntervals("Before register allocation"); + allocateRegisters(); + } catch (Throwable e) { + throw Debug.handle(e); + } + + if (Options.LSRAOptimizeSpillPosition.getValue()) { + try (Scope s = Debug.scope("OptimizeSpillPosition")) { + optimizeSpillPosition(); + } catch (Throwable e) { + throw Debug.handle(e); + } + } + + try (Scope s = Debug.scope("ResolveDataFlow")) { + resolveDataFlow(); + } catch (Throwable e) { + throw Debug.handle(e); + } + + try (Scope s = Debug.scope("DebugInfo")) { + printIntervals("After register allocation"); + printLir("After register allocation", true); + + sortIntervalsAfterAllocation(); + + if (DetailedAsserts.getValue()) { + verify(); + } + + try (Scope s1 = Debug.scope("EliminateSpillMove")) { + eliminateSpillMoves(); + } catch (Throwable e) { + throw Debug.handle(e); + } + printLir("After spill move elimination", true); + + try (Scope s1 = Debug.scope("AssignLocations")) { + assignLocations(); + } catch (Throwable e) { + throw Debug.handle(e); + } + + if (DetailedAsserts.getValue()) { + verifyIntervals(); + } + } catch (Throwable e) { + throw Debug.handle(e); + } + + printLir("After register number assignment", true); + } + } + + private DebugMetric betterSpillPos = Debug.metric("BetterSpillPosition"); + private DebugMetric betterSpillPosWithLowerProbability = Debug.metric("BetterSpillPositionWithLowerProbability"); + + private void optimizeSpillPosition() { + LIRInsertionBuffer[] insertionBuffers = new LIRInsertionBuffer[ir.linearScanOrder().size()]; + for (Interval interval : intervals) { + if (interval != null && interval.isSplitParent() && interval.spillState() == SpillState.SpillInDominator) { + AbstractBlock defBlock = blockForId(interval.spillDefinitionPos()); + AbstractBlock spillBlock = null; + Interval firstSpillChild = null; + try (Indent indent = Debug.logAndIndent("interval %s (%s)", interval, defBlock)) { + for (Interval splitChild : interval.getSplitChildren()) { + if (isStackSlotValue(splitChild.location())) { + if (firstSpillChild == null || splitChild.from() < firstSpillChild.from()) { + firstSpillChild = splitChild; + } else { + assert firstSpillChild.from() < splitChild.from(); + } + // iterate all blocks where the interval has use positions + for (AbstractBlock splitBlock : blocksForInterval(splitChild)) { + if (dominates(defBlock, splitBlock)) { + Debug.log("Split interval %s, block %s", splitChild, splitBlock); + if (spillBlock == null) { + spillBlock = splitBlock; + } else { + spillBlock = commonDominator(spillBlock, splitBlock); + assert spillBlock != null; + } + } + } + } + } + if (spillBlock == null) { + // no spill interval + interval.setSpillState(SpillState.StoreAtDefinition); + } else { + // move out of loops + if (defBlock.getLoopDepth() < spillBlock.getLoopDepth()) { + spillBlock = moveSpillOutOfLoop(defBlock, spillBlock); + } + + /* + * If the spill block is the begin of the first split child (aka the value + * is on the stack) spill in the dominator. + */ + assert firstSpillChild != null; + if (!defBlock.equals(spillBlock) && spillBlock.equals(blockForId(firstSpillChild.from()))) { + AbstractBlock dom = spillBlock.getDominator(); + Debug.log("Spill block (%s) is the beginning of a spill child -> use dominator (%s)", spillBlock, dom); + spillBlock = dom; + } + + if (!defBlock.equals(spillBlock)) { + assert dominates(defBlock, spillBlock); + betterSpillPos.increment(); + Debug.log("Better spill position found (Block %s)", spillBlock); + + if (defBlock.probability() <= spillBlock.probability()) { + // better spill block has the same probability -> do nothing + interval.setSpillState(SpillState.StoreAtDefinition); + } else { + LIRInsertionBuffer insertionBuffer = insertionBuffers[spillBlock.getId()]; + if (insertionBuffer == null) { + insertionBuffer = new LIRInsertionBuffer(); + insertionBuffers[spillBlock.getId()] = insertionBuffer; + insertionBuffer.init(ir.getLIRforBlock(spillBlock)); + } + int spillOpId = getFirstLirInstructionId(spillBlock); + // insert spill move + AllocatableValue fromLocation = interval.getSplitChildAtOpId(spillOpId, OperandMode.DEF, this).location(); + AllocatableValue toLocation = canonicalSpillOpr(interval); + LIRInstruction move = ir.getSpillMoveFactory().createMove(toLocation, fromLocation); + move.setId(DOMINATOR_SPILL_MOVE_ID); + /* + * We can use the insertion buffer directly because we always insert + * at position 1. + */ + insertionBuffer.append(1, move); + + betterSpillPosWithLowerProbability.increment(); + interval.setSpillDefinitionPos(spillOpId); + } + } else { + // definition is the best choice + interval.setSpillState(SpillState.StoreAtDefinition); + } + } + } + } + } + for (LIRInsertionBuffer insertionBuffer : insertionBuffers) { + if (insertionBuffer != null) { + assert insertionBuffer.initialized() : "Insertion buffer is nonnull but not initialized!"; + insertionBuffer.finish(); + } + } + } + + /** + * Iterate over all {@link AbstractBlock blocks} of an interval. + */ + private class IntervalBlockIterator implements Iterator> { + + Range range; + AbstractBlock block; + + public IntervalBlockIterator(Interval interval) { + range = interval.first(); + block = blockForId(range.from); + } + + public AbstractBlock next() { + AbstractBlock currentBlock = block; + int nextBlockIndex = block.getLinearScanNumber() + 1; + if (nextBlockIndex < sortedBlocks.size()) { + block = sortedBlocks.get(nextBlockIndex); + if (range.to <= getFirstLirInstructionId(block)) { + range = range.next; + if (range == Range.EndMarker) { + block = null; + } else { + block = blockForId(range.from); + } + } + } else { + block = null; + } + return currentBlock; + } + + public boolean hasNext() { + return block != null; + } + } + + private Iterable> blocksForInterval(Interval interval) { + return new Iterable>() { + public Iterator> iterator() { + return new IntervalBlockIterator(interval); + } + }; + } + + private static AbstractBlock moveSpillOutOfLoop(AbstractBlock defBlock, AbstractBlock spillBlock) { + int defLoopDepth = defBlock.getLoopDepth(); + for (AbstractBlock block = spillBlock.getDominator(); !defBlock.equals(block); block = block.getDominator()) { + assert block != null : "spill block not dominated by definition block?"; + if (block.getLoopDepth() <= defLoopDepth) { + assert block.getLoopDepth() == defLoopDepth : "Cannot spill an interval outside of the loop where it is defined!"; + return block; + } + } + return defBlock; + } + + void printIntervals(String label) { + if (Debug.isLogEnabled()) { + try (Indent indent = Debug.logAndIndent("intervals %s", label)) { + for (Interval interval : intervals) { + if (interval != null) { + Debug.log("%s", interval.logString(this)); + } + } + + try (Indent indent2 = Debug.logAndIndent("Basic Blocks")) { + for (int i = 0; i < blockCount(); i++) { + AbstractBlock block = blockAt(i); + Debug.log("B%d [%d, %d, %s] ", block.getId(), getFirstLirInstructionId(block), getLastLirInstructionId(block), block.getLoop()); + } + } + } + } + Debug.dump(Arrays.copyOf(intervals, intervalsSize), label); + } + + void printLir(String label, @SuppressWarnings("unused") boolean hirValid) { + Debug.dump(ir, label); + } + + boolean verify() { + // (check that all intervals have a correct register and that no registers are overwritten) + verifyIntervals(); + + verifyRegisters(); + + Debug.log("no errors found"); + + return true; + } + + private void verifyRegisters() { + // Enable this logging to get output for the verification process. + try (Indent indent = Debug.logAndIndent("verifying register allocation")) { + RegisterVerifier verifier = new RegisterVerifier(this); + verifier.verify(blockAt(0)); + } + } + + void verifyIntervals() { + try (Indent indent = Debug.logAndIndent("verifying intervals")) { + int len = intervalsSize; + + for (int i = 0; i < len; i++) { + Interval i1 = intervals[i]; + if (i1 == null) { + continue; + } + + i1.checkSplitChildren(); + + if (i1.operandNumber != i) { + Debug.log("Interval %d is on position %d in list", i1.operandNumber, i); + Debug.log(i1.logString(this)); + throw new GraalInternalError(""); + } + + if (isVariable(i1.operand) && i1.kind().equals(LIRKind.Illegal)) { + Debug.log("Interval %d has no type assigned", i1.operandNumber); + Debug.log(i1.logString(this)); + throw new GraalInternalError(""); + } + + if (i1.location() == null) { + Debug.log("Interval %d has no register assigned", i1.operandNumber); + Debug.log(i1.logString(this)); + throw new GraalInternalError(""); + } + + if (i1.first() == Range.EndMarker) { + Debug.log("Interval %d has no Range", i1.operandNumber); + Debug.log(i1.logString(this)); + throw new GraalInternalError(""); + } + + for (Range r = i1.first(); r != Range.EndMarker; r = r.next) { + if (r.from >= r.to) { + Debug.log("Interval %d has zero length range", i1.operandNumber); + Debug.log(i1.logString(this)); + throw new GraalInternalError(""); + } + } + + for (int j = i + 1; j < len; j++) { + Interval i2 = intervals[j]; + if (i2 == null) { + continue; + } + + // special intervals that are created in MoveResolver + // . ignore them because the range information has no meaning there + if (i1.from() == 1 && i1.to() == 2) { + continue; + } + if (i2.from() == 1 && i2.to() == 2) { + continue; + } + Value l1 = i1.location(); + Value l2 = i2.location(); + if (i1.intersects(i2) && !isIllegal(l1) && (l1.equals(l2))) { + if (DetailedAsserts.getValue()) { + Debug.log("Intervals %d and %d overlap and have the same register assigned", i1.operandNumber, i2.operandNumber); + Debug.log(i1.logString(this)); + Debug.log(i2.logString(this)); + } + throw new BailoutException(""); + } + } + } + } + } + + class CheckConsumer implements ValueConsumer { + + boolean ok; + Interval curInterval; + + @Override + public void visitValue(Value operand, OperandMode mode, EnumSet flags) { + if (isRegister(operand)) { + if (intervalFor(operand) == curInterval) { + ok = true; + } + } + } + } + + void verifyNoOopsInFixedIntervals() { + try (Indent indent = Debug.logAndIndent("verifying that no oops are in fixed intervals *")) { + CheckConsumer checkConsumer = new CheckConsumer(); + + Interval fixedIntervals; + Interval otherIntervals; + fixedIntervals = createUnhandledLists(IS_PRECOLORED_INTERVAL, null).first; + // to ensure a walking until the last instruction id, add a dummy interval + // with a high operation id + otherIntervals = new Interval(Value.ILLEGAL, -1); + otherIntervals.addRange(Integer.MAX_VALUE - 2, Integer.MAX_VALUE - 1); + IntervalWalker iw = new IntervalWalker(this, fixedIntervals, otherIntervals); + + for (AbstractBlock block : sortedBlocks) { + List instructions = ir.getLIRforBlock(block); + + for (int j = 0; j < instructions.size(); j++) { + LIRInstruction op = instructions.get(j); + + if (op.hasState()) { + iw.walkBefore(op.id()); + boolean checkLive = true; + + // Make sure none of the fixed registers is live across an + // oopmap since we can't handle that correctly. + if (checkLive) { + for (Interval interval = iw.activeLists.get(RegisterBinding.Fixed); interval != Interval.EndMarker; interval = interval.next) { + if (interval.currentTo() > op.id() + 1) { + // This interval is live out of this op so make sure + // that this interval represents some value that's + // referenced by this op either as an input or output. + checkConsumer.curInterval = interval; + checkConsumer.ok = false; + + op.visitEachInput(checkConsumer); + op.visitEachAlive(checkConsumer); + op.visitEachTemp(checkConsumer); + op.visitEachOutput(checkConsumer); + + assert checkConsumer.ok : "fixed intervals should never be live across an oopmap point"; + } + } + } + } + } + } + } + } + + /** + * Returns a value for a interval definition, which can be used for re-materialization. + * + * @param op An instruction which defines a value + * @param operand The destination operand of the instruction + * @param interval The interval for this defined value. + * @return Returns the value which is moved to the instruction and which can be reused at all + * reload-locations in case the interval of this instruction is spilled. Currently this + * can only be a {@link JavaConstant}. + */ + public static JavaConstant getMaterializedValue(LIRInstruction op, Value operand, Interval interval) { + if (op instanceof MoveOp) { + MoveOp move = (MoveOp) op; + if (move.getInput() instanceof JavaConstant) { + /* + * Check if the interval has any uses which would accept an stack location (priority + * == ShouldHaveRegister). Rematerialization of such intervals can result in a + * degradation, because rematerialization always inserts a constant load, even if + * the value is not needed in a register. + */ + Interval.UsePosList usePosList = interval.usePosList(); + int numUsePos = usePosList.size(); + for (int useIdx = 0; useIdx < numUsePos; useIdx++) { + Interval.RegisterPriority priority = usePosList.registerPriority(useIdx); + if (priority == Interval.RegisterPriority.ShouldHaveRegister) { + return null; + } + } + return (JavaConstant) move.getInput(); + } + } + return null; + } +} diff -r 32c7a5a88523 -r d599eeab1b53 graal/com.oracle.graal.lir/src/com/oracle/graal/lir/alloc/lsra/LinearScanWalker.java --- /dev/null Thu Jan 01 00:00:00 1970 +0000 +++ b/graal/com.oracle.graal.lir/src/com/oracle/graal/lir/alloc/lsra/LinearScanWalker.java Fri Feb 06 12:44:50 2015 +0100 @@ -0,0 +1,900 @@ +/* + * Copyright (c) 2009, 2014, Oracle and/or its affiliates. All rights reserved. + * DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER. + * + * This code is free software; you can redistribute it and/or modify it + * under the terms of the GNU General Public License version 2 only, as + * published by the Free Software Foundation. + * + * This code is distributed in the hope that it will be useful, but WITHOUT + * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or + * FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License + * version 2 for more details (a copy is included in the LICENSE file that + * accompanied this code). + * + * You should have received a copy of the GNU General Public License version + * 2 along with this work; if not, write to the Free Software Foundation, + * Inc., 51 Franklin St, Fifth Floor, Boston, MA 02110-1301 USA. + * + * Please contact Oracle, 500 Oracle Parkway, Redwood Shores, CA 94065 USA + * or visit www.oracle.com if you need additional information or have any + * questions. + */ +package com.oracle.graal.lir.alloc.lsra; + +import static com.oracle.graal.api.code.CodeUtil.*; +import static com.oracle.graal.api.code.ValueUtil.*; +import static com.oracle.graal.lir.LIRValueUtil.*; + +import java.util.*; + +import com.oracle.graal.api.code.*; +import com.oracle.graal.api.meta.*; +import com.oracle.graal.compiler.common.cfg.*; +import com.oracle.graal.compiler.common.util.*; +import com.oracle.graal.debug.*; +import com.oracle.graal.lir.*; +import com.oracle.graal.lir.StandardOp.MoveOp; +import com.oracle.graal.lir.alloc.lsra.Interval.*; + +/** + */ +class LinearScanWalker extends IntervalWalker { + + protected Register[] availableRegs; + + protected final int[] usePos; + protected final int[] blockPos; + + protected List[] spillIntervals; + + private MoveResolver moveResolver; // for ordering spill moves + + /** + * Only 10% of the lists in {@link #spillIntervals} are actually used. But when they are used, + * they can grow quite long. The maximum length observed was 45 (all numbers taken from a + * bootstrap run of Graal). Therefore, we initialize {@link #spillIntervals} with this marker + * value, and allocate a "real" list only on demand in {@link #setUsePos}. + */ + private static final List EMPTY_LIST = new ArrayList<>(0); + + // accessors mapped to same functions in class LinearScan + int blockCount() { + return allocator.blockCount(); + } + + AbstractBlock blockAt(int idx) { + return allocator.blockAt(idx); + } + + AbstractBlock blockOfOpWithId(int opId) { + return allocator.blockForId(opId); + } + + LinearScanWalker(LinearScan allocator, Interval unhandledFixedFirst, Interval unhandledAnyFirst) { + super(allocator, unhandledFixedFirst, unhandledAnyFirst); + + moveResolver = new MoveResolver(allocator); + spillIntervals = Util.uncheckedCast(new List[allocator.registers.length]); + for (int i = 0; i < allocator.registers.length; i++) { + spillIntervals[i] = EMPTY_LIST; + } + usePos = new int[allocator.registers.length]; + blockPos = new int[allocator.registers.length]; + } + + void initUseLists(boolean onlyProcessUsePos) { + for (Register register : availableRegs) { + int i = register.number; + usePos[i] = Integer.MAX_VALUE; + + if (!onlyProcessUsePos) { + blockPos[i] = Integer.MAX_VALUE; + spillIntervals[i].clear(); + } + } + } + + void excludeFromUse(Interval i) { + Value location = i.location(); + int i1 = asRegister(location).number; + if (i1 >= availableRegs[0].number && i1 <= availableRegs[availableRegs.length - 1].number) { + usePos[i1] = 0; + } + } + + void setUsePos(Interval interval, int usePos, boolean onlyProcessUsePos) { + if (usePos != -1) { + assert usePos != 0 : "must use excludeFromUse to set usePos to 0"; + int i = asRegister(interval.location()).number; + if (i >= availableRegs[0].number && i <= availableRegs[availableRegs.length - 1].number) { + if (this.usePos[i] > usePos) { + this.usePos[i] = usePos; + } + if (!onlyProcessUsePos) { + List list = spillIntervals[i]; + if (list == EMPTY_LIST) { + list = new ArrayList<>(2); + spillIntervals[i] = list; + } + list.add(interval); + } + } + } + } + + void setBlockPos(Interval i, int blockPos) { + if (blockPos != -1) { + int reg = asRegister(i.location()).number; + if (reg >= availableRegs[0].number && reg <= availableRegs[availableRegs.length - 1].number) { + if (this.blockPos[reg] > blockPos) { + this.blockPos[reg] = blockPos; + } + if (usePos[reg] > blockPos) { + usePos[reg] = blockPos; + } + } + } + } + + void freeExcludeActiveFixed() { + Interval interval = activeLists.get(RegisterBinding.Fixed); + while (interval != Interval.EndMarker) { + assert isRegister(interval.location()) : "active interval must have a register assigned"; + excludeFromUse(interval); + interval = interval.next; + } + } + + void freeExcludeActiveAny() { + Interval interval = activeLists.get(RegisterBinding.Any); + while (interval != Interval.EndMarker) { + assert isRegister(interval.location()) : "active interval must have a register assigned"; + excludeFromUse(interval); + interval = interval.next; + } + } + + void freeCollectInactiveFixed(Interval current) { + Interval interval = inactiveLists.get(RegisterBinding.Fixed); + while (interval != Interval.EndMarker) { + if (current.to() <= interval.currentFrom()) { + assert interval.currentIntersectsAt(current) == -1 : "must not intersect"; + setUsePos(interval, interval.currentFrom(), true); + } else { + setUsePos(interval, interval.currentIntersectsAt(current), true); + } + interval = interval.next; + } + } + + void freeCollectInactiveAny(Interval current) { + Interval interval = inactiveLists.get(RegisterBinding.Any); + while (interval != Interval.EndMarker) { + setUsePos(interval, interval.currentIntersectsAt(current), true); + interval = interval.next; + } + } + + void freeCollectUnhandled(RegisterBinding kind, Interval current) { + Interval interval = unhandledLists.get(kind); + while (interval != Interval.EndMarker) { + setUsePos(interval, interval.intersectsAt(current), true); + if (kind == RegisterBinding.Fixed && current.to() <= interval.from()) { + setUsePos(interval, interval.from(), true); + } + interval = interval.next; + } + } + + void spillExcludeActiveFixed() { + Interval interval = activeLists.get(RegisterBinding.Fixed); + while (interval != Interval.EndMarker) { + excludeFromUse(interval); + interval = interval.next; + } + } + + void spillBlockUnhandledFixed(Interval current) { + Interval interval = unhandledLists.get(RegisterBinding.Fixed); + while (interval != Interval.EndMarker) { + setBlockPos(interval, interval.intersectsAt(current)); + interval = interval.next; + } + } + + void spillBlockInactiveFixed(Interval current) { + Interval interval = inactiveLists.get(RegisterBinding.Fixed); + while (interval != Interval.EndMarker) { + if (current.to() > interval.currentFrom()) { + setBlockPos(interval, interval.currentIntersectsAt(current)); + } else { + assert interval.currentIntersectsAt(current) == -1 : "invalid optimization: intervals intersect"; + } + + interval = interval.next; + } + } + + void spillCollectActiveAny() { + Interval interval = activeLists.get(RegisterBinding.Any); + while (interval != Interval.EndMarker) { + setUsePos(interval, Math.min(interval.nextUsage(RegisterPriority.LiveAtLoopEnd, currentPosition), interval.to()), false); + interval = interval.next; + } + } + + void spillCollectInactiveAny(Interval current) { + Interval interval = inactiveLists.get(RegisterBinding.Any); + while (interval != Interval.EndMarker) { + if (interval.currentIntersects(current)) { + setUsePos(interval, Math.min(interval.nextUsage(RegisterPriority.LiveAtLoopEnd, currentPosition), interval.to()), false); + } + interval = interval.next; + } + } + + void insertMove(int operandId, Interval srcIt, Interval dstIt) { + // output all moves here. When source and target are equal, the move is + // optimized away later in assignRegNums + + int opId = (operandId + 1) & ~1; + AbstractBlock opBlock = allocator.blockForId(opId); + assert opId > 0 && allocator.blockForId(opId - 2) == opBlock : "cannot insert move at block boundary"; + + // calculate index of instruction inside instruction list of current block + // the minimal index (for a block with no spill moves) can be calculated because the + // numbering of instructions is known. + // When the block already contains spill moves, the index must be increased until the + // correct index is reached. + List instructions = allocator.ir.getLIRforBlock(opBlock); + int index = (opId - instructions.get(0).id()) >> 1; + assert instructions.get(index).id() <= opId : "error in calculation"; + + while (instructions.get(index).id() != opId) { + index++; + assert 0 <= index && index < instructions.size() : "index out of bounds"; + } + assert 1 <= index && index < instructions.size() : "index out of bounds"; + assert instructions.get(index).id() == opId : "error in calculation"; + + // insert new instruction before instruction at position index + moveResolver.moveInsertPosition(instructions, index); + moveResolver.addMapping(srcIt, dstIt); + } + + int findOptimalSplitPos(AbstractBlock minBlock, AbstractBlock maxBlock, int maxSplitPos) { + int fromBlockNr = minBlock.getLinearScanNumber(); + int toBlockNr = maxBlock.getLinearScanNumber(); + + assert 0 <= fromBlockNr && fromBlockNr < blockCount() : "out of range"; + assert 0 <= toBlockNr && toBlockNr < blockCount() : "out of range"; + assert fromBlockNr < toBlockNr : "must cross block boundary"; + + // Try to split at end of maxBlock. If this would be after + // maxSplitPos, then use the begin of maxBlock + int optimalSplitPos = allocator.getLastLirInstructionId(maxBlock) + 2; + if (optimalSplitPos > maxSplitPos) { + optimalSplitPos = allocator.getFirstLirInstructionId(maxBlock); + } + + int minLoopDepth = maxBlock.getLoopDepth(); + for (int i = toBlockNr - 1; i >= fromBlockNr; i--) { + AbstractBlock cur = blockAt(i); + + if (cur.getLoopDepth() < minLoopDepth) { + // block with lower loop-depth found . split at the end of this block + minLoopDepth = cur.getLoopDepth(); + optimalSplitPos = allocator.getLastLirInstructionId(cur) + 2; + } + } + assert optimalSplitPos > allocator.maxOpId() || allocator.isBlockBegin(optimalSplitPos) : "algorithm must move split pos to block boundary"; + + return optimalSplitPos; + } + + int findOptimalSplitPos(Interval interval, int minSplitPos, int maxSplitPos, boolean doLoopOptimization) { + int optimalSplitPos = -1; + if (minSplitPos == maxSplitPos) { + // trivial case, no optimization of split position possible + Debug.log("min-pos and max-pos are equal, no optimization possible"); + optimalSplitPos = minSplitPos; + + } else { + assert minSplitPos < maxSplitPos : "must be true then"; + assert minSplitPos > 0 : "cannot access minSplitPos - 1 otherwise"; + + // reason for using minSplitPos - 1: when the minimal split pos is exactly at the + // beginning of a block, then minSplitPos is also a possible split position. + // Use the block before as minBlock, because then minBlock.lastLirInstructionId() + 2 == + // minSplitPos + AbstractBlock minBlock = allocator.blockForId(minSplitPos - 1); + + // reason for using maxSplitPos - 1: otherwise there would be an assert on failure + // when an interval ends at the end of the last block of the method + // (in this case, maxSplitPos == allocator().maxLirOpId() + 2, and there is no + // block at this opId) + AbstractBlock maxBlock = allocator.blockForId(maxSplitPos - 1); + + assert minBlock.getLinearScanNumber() <= maxBlock.getLinearScanNumber() : "invalid order"; + if (minBlock == maxBlock) { + // split position cannot be moved to block boundary : so split as late as possible + Debug.log("cannot move split pos to block boundary because minPos and maxPos are in same block"); + optimalSplitPos = maxSplitPos; + + } else { + if (interval.hasHoleBetween(maxSplitPos - 1, maxSplitPos) && !allocator.isBlockBegin(maxSplitPos)) { + // Do not move split position if the interval has a hole before maxSplitPos. + // Intervals resulting from Phi-Functions have more than one definition (marked + // as mustHaveRegister) with a hole before each definition. When the register is + // needed + // for the second definition : an earlier reloading is unnecessary. + Debug.log("interval has hole just before maxSplitPos, so splitting at maxSplitPos"); + optimalSplitPos = maxSplitPos; + + } else { + // seach optimal block boundary between minSplitPos and maxSplitPos + Debug.log("moving split pos to optimal block boundary between block B%d and B%d", minBlock.getId(), maxBlock.getId()); + + if (doLoopOptimization) { + // Loop optimization: if a loop-end marker is found between min- and + // max-position : + // then split before this loop + int loopEndPos = interval.nextUsageExact(RegisterPriority.LiveAtLoopEnd, allocator.getLastLirInstructionId(minBlock) + 2); + Debug.log("loop optimization: loop end found at pos %d", loopEndPos); + + assert loopEndPos > minSplitPos : "invalid order"; + if (loopEndPos < maxSplitPos) { + // loop-end marker found between min- and max-position + // if it is not the end marker for the same loop as the min-position : + // then move + // the max-position to this loop block. + // Desired result: uses tagged as shouldHaveRegister inside a loop cause + // a reloading + // of the interval (normally, only mustHaveRegister causes a reloading) + AbstractBlock loopBlock = allocator.blockForId(loopEndPos); + + Debug.log("interval is used in loop that ends in block B%d, so trying to move maxBlock back from B%d to B%d", loopBlock.getId(), maxBlock.getId(), loopBlock.getId()); + assert loopBlock != minBlock : "loopBlock and minBlock must be different because block boundary is needed between"; + + optimalSplitPos = findOptimalSplitPos(minBlock, loopBlock, allocator.getLastLirInstructionId(loopBlock) + 2); + if (optimalSplitPos == allocator.getLastLirInstructionId(loopBlock) + 2) { + optimalSplitPos = -1; + Debug.log("loop optimization not necessary"); + } else { + Debug.log("loop optimization successful"); + } + } + } + + if (optimalSplitPos == -1) { + // not calculated by loop optimization + optimalSplitPos = findOptimalSplitPos(minBlock, maxBlock, maxSplitPos); + } + } + } + } + Debug.log("optimal split position: %d", optimalSplitPos); + + return optimalSplitPos; + } + + // split an interval at the optimal position between minSplitPos and + // maxSplitPos in two parts: + // 1) the left part has already a location assigned + // 2) the right part is sorted into to the unhandled-list + void splitBeforeUsage(Interval interval, int minSplitPos, int maxSplitPos) { + + try (Indent indent = Debug.logAndIndent("splitting interval %s between %d and %d", interval, minSplitPos, maxSplitPos)) { + + assert interval.from() < minSplitPos : "cannot split at start of interval"; + assert currentPosition < minSplitPos : "cannot split before current position"; + assert minSplitPos <= maxSplitPos : "invalid order"; + assert maxSplitPos <= interval.to() : "cannot split after end of interval"; + + int optimalSplitPos = findOptimalSplitPos(interval, minSplitPos, maxSplitPos, true); + + assert minSplitPos <= optimalSplitPos && optimalSplitPos <= maxSplitPos : "out of range"; + assert optimalSplitPos <= interval.to() : "cannot split after end of interval"; + assert optimalSplitPos > interval.from() : "cannot split at start of interval"; + + if (optimalSplitPos == interval.to() && interval.nextUsage(RegisterPriority.MustHaveRegister, minSplitPos) == Integer.MAX_VALUE) { + // the split position would be just before the end of the interval + // . no split at all necessary + Debug.log("no split necessary because optimal split position is at end of interval"); + return; + } + + // must calculate this before the actual split is performed and before split position is + // moved to odd opId + boolean moveNecessary = !allocator.isBlockBegin(optimalSplitPos) && !interval.hasHoleBetween(optimalSplitPos - 1, optimalSplitPos); + + if (!allocator.isBlockBegin(optimalSplitPos)) { + // move position before actual instruction (odd opId) + optimalSplitPos = (optimalSplitPos - 1) | 1; + } + + Debug.log("splitting at position %d", optimalSplitPos); + + assert allocator.isBlockBegin(optimalSplitPos) || ((optimalSplitPos & 1) == 1) : "split pos must be odd when not on block boundary"; + assert !allocator.isBlockBegin(optimalSplitPos) || ((optimalSplitPos & 1) == 0) : "split pos must be even on block boundary"; + + Interval splitPart = interval.split(optimalSplitPos, allocator); + + splitPart.setInsertMoveWhenActivated(moveNecessary); + + assert splitPart.from() >= currentPosition : "cannot append new interval before current walk position"; + unhandledLists.addToListSortedByStartAndUsePositions(RegisterBinding.Any, splitPart); + + if (Debug.isLogEnabled()) { + Debug.log("left interval %s: %s", moveNecessary ? " " : "", interval.logString(allocator)); + Debug.log("right interval %s: %s", moveNecessary ? "(move)" : "", splitPart.logString(allocator)); + } + } + } + + // split an interval at the optimal position between minSplitPos and + // maxSplitPos in two parts: + // 1) the left part has already a location assigned + // 2) the right part is always on the stack and therefore ignored in further processing + + void splitForSpilling(Interval interval) { + // calculate allowed range of splitting position + int maxSplitPos = currentPosition; + int minSplitPos = Math.max(interval.previousUsage(RegisterPriority.ShouldHaveRegister, maxSplitPos) + 1, interval.from()); + + try (Indent indent = Debug.logAndIndent("splitting and spilling interval %s between %d and %d", interval, minSplitPos, maxSplitPos)) { + + assert interval.state == State.Active : "why spill interval that is not active?"; + assert interval.from() <= minSplitPos : "cannot split before start of interval"; + assert minSplitPos <= maxSplitPos : "invalid order"; + assert maxSplitPos < interval.to() : "cannot split at end end of interval"; + assert currentPosition < interval.to() : "interval must not end before current position"; + + if (minSplitPos == interval.from()) { + // the whole interval is never used, so spill it entirely to memory + + try (Indent indent2 = Debug.logAndIndent("spilling entire interval because split pos is at beginning of interval (use positions: %d)", interval.usePosList().size())) { + + assert interval.firstUsage(RegisterPriority.ShouldHaveRegister) > currentPosition : "interval must not have use position before currentPosition"; + + allocator.assignSpillSlot(interval); + handleSpillSlot(interval); + allocator.changeSpillState(interval, minSplitPos); + + // Also kick parent intervals out of register to memory when they have no use + // position. This avoids short interval in register surrounded by intervals in + // memory . avoid useless moves from memory to register and back + Interval parent = interval; + while (parent != null && parent.isSplitChild()) { + parent = parent.getSplitChildBeforeOpId(parent.from()); + + if (isRegister(parent.location())) { + if (parent.firstUsage(RegisterPriority.ShouldHaveRegister) == Integer.MAX_VALUE) { + // parent is never used, so kick it out of its assigned register + Debug.log("kicking out interval %d out of its register because it is never used", parent.operandNumber); + allocator.assignSpillSlot(parent); + handleSpillSlot(parent); + } else { + // do not go further back because the register is actually used by + // the interval + parent = null; + } + } + } + } + + } else { + // search optimal split pos, split interval and spill only the right hand part + int optimalSplitPos = findOptimalSplitPos(interval, minSplitPos, maxSplitPos, false); + + assert minSplitPos <= optimalSplitPos && optimalSplitPos <= maxSplitPos : "out of range"; + assert optimalSplitPos < interval.to() : "cannot split at end of interval"; + assert optimalSplitPos >= interval.from() : "cannot split before start of interval"; + + if (!allocator.isBlockBegin(optimalSplitPos)) { + // move position before actual instruction (odd opId) + optimalSplitPos = (optimalSplitPos - 1) | 1; + } + + try (Indent indent2 = Debug.logAndIndent("splitting at position %d", optimalSplitPos)) { + assert allocator.isBlockBegin(optimalSplitPos) || ((optimalSplitPos & 1) == 1) : "split pos must be odd when not on block boundary"; + assert !allocator.isBlockBegin(optimalSplitPos) || ((optimalSplitPos & 1) == 0) : "split pos must be even on block boundary"; + + Interval spilledPart = interval.split(optimalSplitPos, allocator); + allocator.assignSpillSlot(spilledPart); + handleSpillSlot(spilledPart); + allocator.changeSpillState(spilledPart, optimalSplitPos); + + if (!allocator.isBlockBegin(optimalSplitPos)) { + Debug.log("inserting move from interval %d to %d", interval.operandNumber, spilledPart.operandNumber); + insertMove(optimalSplitPos, interval, spilledPart); + } + + // the currentSplitChild is needed later when moves are inserted for reloading + assert spilledPart.currentSplitChild() == interval : "overwriting wrong currentSplitChild"; + spilledPart.makeCurrentSplitChild(); + + if (Debug.isLogEnabled()) { + Debug.log("left interval: %s", interval.logString(allocator)); + Debug.log("spilled interval : %s", spilledPart.logString(allocator)); + } + } + } + } + } + + /** + * This is called for every interval that is assigned to a stack slot. + */ + protected void handleSpillSlot(Interval interval) { + assert interval.location() != null && (interval.canMaterialize() || isStackSlotValue(interval.location())) : "interval not assigned to a stack slot " + interval; + // Do nothing. Stack slots are not processed in this implementation. + } + + void splitStackInterval(Interval interval) { + int minSplitPos = currentPosition + 1; + int maxSplitPos = Math.min(interval.firstUsage(RegisterPriority.ShouldHaveRegister), interval.to()); + + splitBeforeUsage(interval, minSplitPos, maxSplitPos); + } + + void splitWhenPartialRegisterAvailable(Interval interval, int registerAvailableUntil) { + int minSplitPos = Math.max(interval.previousUsage(RegisterPriority.ShouldHaveRegister, registerAvailableUntil), interval.from() + 1); + splitBeforeUsage(interval, minSplitPos, registerAvailableUntil); + } + + void splitAndSpillInterval(Interval interval) { + assert interval.state == State.Active || interval.state == State.Inactive : "other states not allowed"; + + int currentPos = currentPosition; + if (interval.state == State.Inactive) { + // the interval is currently inactive, so no spill slot is needed for now. + // when the split part is activated, the interval has a new chance to get a register, + // so in the best case no stack slot is necessary + assert interval.hasHoleBetween(currentPos - 1, currentPos + 1) : "interval can not be inactive otherwise"; + splitBeforeUsage(interval, currentPos + 1, currentPos + 1); + + } else { + // search the position where the interval must have a register and split + // at the optimal position before. + // The new created part is added to the unhandled list and will get a register + // when it is activated + int minSplitPos = currentPos + 1; + int maxSplitPos = Math.min(interval.nextUsage(RegisterPriority.MustHaveRegister, minSplitPos), interval.to()); + + splitBeforeUsage(interval, minSplitPos, maxSplitPos); + + assert interval.nextUsage(RegisterPriority.MustHaveRegister, currentPos) == Integer.MAX_VALUE : "the remaining part is spilled to stack and therefore has no register"; + splitForSpilling(interval); + } + } + + boolean allocFreeRegister(Interval interval) { + try (Indent indent = Debug.logAndIndent("trying to find free register for %s", interval)) { + + initUseLists(true); + freeExcludeActiveFixed(); + freeExcludeActiveAny(); + freeCollectInactiveFixed(interval); + freeCollectInactiveAny(interval); + // freeCollectUnhandled(fixedKind, cur); + assert unhandledLists.get(RegisterBinding.Fixed) == Interval.EndMarker : "must not have unhandled fixed intervals because all fixed intervals have a use at position 0"; + + // usePos contains the start of the next interval that has this register assigned + // (either as a fixed register or a normal allocated register in the past) + // only intervals overlapping with cur are processed, non-overlapping invervals can be + // ignored safely + if (Debug.isLogEnabled()) { + // Enable this logging to see all register states + try (Indent indent2 = Debug.logAndIndent("state of registers:")) { + for (Register register : availableRegs) { + int i = register.number; + Debug.log("reg %d: usePos: %d", register.number, usePos[i]); + } + } + } + + Register hint = null; + Interval locationHint = interval.locationHint(true); + if (locationHint != null && locationHint.location() != null && isRegister(locationHint.location())) { + hint = asRegister(locationHint.location()); + Debug.log("hint register %d from interval %s", hint.number, locationHint); + } + assert interval.location() == null : "register already assigned to interval"; + + // the register must be free at least until this position + int regNeededUntil = interval.from() + 1; + int intervalTo = interval.to(); + + boolean needSplit = false; + int splitPos = -1; + + Register reg = null; + Register minFullReg = null; + Register maxPartialReg = null; + + for (int i = 0; i < availableRegs.length; ++i) { + Register availableReg = availableRegs[i]; + int number = availableReg.number; + if (usePos[number] >= intervalTo) { + // this register is free for the full interval + if (minFullReg == null || availableReg.equals(hint) || (usePos[number] < usePos[minFullReg.number] && !minFullReg.equals(hint))) { + minFullReg = availableReg; + } + } else if (usePos[number] > regNeededUntil) { + // this register is at least free until regNeededUntil + if (maxPartialReg == null || availableReg.equals(hint) || (usePos[number] > usePos[maxPartialReg.number] && !maxPartialReg.equals(hint))) { + maxPartialReg = availableReg; + } + } + } + + if (minFullReg != null) { + reg = minFullReg; + } else if (maxPartialReg != null) { + needSplit = true; + reg = maxPartialReg; + } else { + return false; + } + + splitPos = usePos[reg.number]; + interval.assignLocation(reg.asValue(interval.kind())); + Debug.log("selected register %d", reg.number); + + assert splitPos > 0 : "invalid splitPos"; + if (needSplit) { + // register not available for full interval, so split it + splitWhenPartialRegisterAvailable(interval, splitPos); + } + // only return true if interval is completely assigned + return true; + } + } + + void splitAndSpillIntersectingIntervals(Register reg) { + assert reg != null : "no register assigned"; + + for (int i = 0; i < spillIntervals[reg.number].size(); i++) { + Interval interval = spillIntervals[reg.number].get(i); + removeFromList(interval); + splitAndSpillInterval(interval); + } + } + + // Split an Interval and spill it to memory so that cur can be placed in a register + void allocLockedRegister(Interval interval) { + try (Indent indent = Debug.logAndIndent("alloc locked register: need to split and spill to get register for %s", interval)) { + + // collect current usage of registers + initUseLists(false); + spillExcludeActiveFixed(); + // spillBlockUnhandledFixed(cur); + assert unhandledLists.get(RegisterBinding.Fixed) == Interval.EndMarker : "must not have unhandled fixed intervals because all fixed intervals have a use at position 0"; + spillBlockInactiveFixed(interval); + spillCollectActiveAny(); + spillCollectInactiveAny(interval); + + if (Debug.isLogEnabled()) { + try (Indent indent2 = Debug.logAndIndent("state of registers:")) { + for (Register reg : availableRegs) { + int i = reg.number; + try (Indent indent3 = Debug.logAndIndent("reg %d: usePos: %d, blockPos: %d, intervals: ", i, usePos[i], blockPos[i])) { + for (int j = 0; j < spillIntervals[i].size(); j++) { + Debug.log("%d ", spillIntervals[i].get(j).operandNumber); + } + } + } + } + } + + // the register must be free at least until this position + int firstUsage = interval.firstUsage(RegisterPriority.MustHaveRegister); + int regNeededUntil = Math.min(firstUsage, interval.from() + 1); + int intervalTo = interval.to(); + assert regNeededUntil > 0 && regNeededUntil < Integer.MAX_VALUE : "interval has no use"; + + Register reg = null; + Register ignore = interval.location() != null && isRegister(interval.location()) ? asRegister(interval.location()) : null; + for (Register availableReg : availableRegs) { + int number = availableReg.number; + if (availableReg.equals(ignore)) { + // this register must be ignored + } else if (usePos[number] > regNeededUntil) { + if (reg == null || (usePos[number] > usePos[reg.number])) { + reg = availableReg; + } + } + } + + int regUsePos = (reg == null ? 0 : usePos[reg.number]); + if (regUsePos <= firstUsage) { + Debug.log("able to spill current interval. firstUsage(register): %d, usePos: %d", firstUsage, regUsePos); + + if (firstUsage <= interval.from() + 1) { + assert false : "cannot spill interval that is used in first instruction (possible reason: no register found) firstUsage=" + firstUsage + ", interval.from()=" + interval.from(); + // assign a reasonable register and do a bailout in product mode to avoid errors + allocator.assignSpillSlot(interval); + throw new BailoutException("LinearScan: no register found"); + } + + splitAndSpillInterval(interval); + return; + } + + boolean needSplit = blockPos[reg.number] <= intervalTo; + + int splitPos = blockPos[reg.number]; + + Debug.log("decided to use register %d", reg.number); + assert splitPos > 0 : "invalid splitPos"; + assert needSplit || splitPos > interval.from() : "splitting interval at from"; + + interval.assignLocation(reg.asValue(interval.kind())); + if (needSplit) { + // register not available for full interval : so split it + splitWhenPartialRegisterAvailable(interval, splitPos); + } + + // perform splitting and spilling for all affected intervals + splitAndSpillIntersectingIntervals(reg); + } + } + + boolean noAllocationPossible(Interval interval) { + if (allocator.callKillsRegisters) { + // fast calculation of intervals that can never get a register because the + // the next instruction is a call that blocks all registers + // Note: this only works if a call kills all registers + + // check if this interval is the result of a split operation + // (an interval got a register until this position) + int pos = interval.from(); + if (isOdd(pos)) { + // the current instruction is a call that blocks all registers + if (pos < allocator.maxOpId() && allocator.hasCall(pos + 1) && interval.to() > pos + 1) { + Debug.log("free register cannot be available because all registers blocked by following call"); + + // safety check that there is really no register available + assert !allocFreeRegister(interval) : "found a register for this interval"; + return true; + } + } + } + return false; + } + + void initVarsForAlloc(Interval interval) { + availableRegs = allocator.frameMapBuilder.getRegisterConfig().getAllocatableRegisters(interval.kind().getPlatformKind()); + } + + static boolean isMove(LIRInstruction op, Interval from, Interval to) { + if (op instanceof MoveOp) { + MoveOp move = (MoveOp) op; + if (isVariable(move.getInput()) && isVariable(move.getResult())) { + return move.getInput() != null && move.getInput().equals(from.operand) && move.getResult() != null && move.getResult().equals(to.operand); + } + } + return false; + } + + // optimization (especially for phi functions of nested loops): + // assign same spill slot to non-intersecting intervals + void combineSpilledIntervals(Interval interval) { + if (interval.isSplitChild()) { + // optimization is only suitable for split parents + return; + } + + Interval registerHint = interval.locationHint(false); + if (registerHint == null) { + // cur is not the target of a move : otherwise registerHint would be set + return; + } + assert registerHint.isSplitParent() : "register hint must be split parent"; + + if (interval.spillState() != SpillState.NoOptimization || registerHint.spillState() != SpillState.NoOptimization) { + // combining the stack slots for intervals where spill move optimization is applied + // is not benefitial and would cause problems + return; + } + + int beginPos = interval.from(); + int endPos = interval.to(); + if (endPos > allocator.maxOpId() || isOdd(beginPos) || isOdd(endPos)) { + // safety check that lirOpWithId is allowed + return; + } + + if (!isMove(allocator.instructionForId(beginPos), registerHint, interval) || !isMove(allocator.instructionForId(endPos), interval, registerHint)) { + // cur and registerHint are not connected with two moves + return; + } + + Interval beginHint = registerHint.getSplitChildAtOpId(beginPos, LIRInstruction.OperandMode.USE, allocator); + Interval endHint = registerHint.getSplitChildAtOpId(endPos, LIRInstruction.OperandMode.DEF, allocator); + if (beginHint == endHint || beginHint.to() != beginPos || endHint.from() != endPos) { + // registerHint must be split : otherwise the re-writing of use positions does not work + return; + } + + assert beginHint.location() != null : "must have register assigned"; + assert endHint.location() == null : "must not have register assigned"; + assert interval.firstUsage(RegisterPriority.MustHaveRegister) == beginPos : "must have use position at begin of interval because of move"; + assert endHint.firstUsage(RegisterPriority.MustHaveRegister) == endPos : "must have use position at begin of interval because of move"; + + if (isRegister(beginHint.location())) { + // registerHint is not spilled at beginPos : so it would not be benefitial to + // immediately spill cur + return; + } + assert registerHint.spillSlot() != null : "must be set when part of interval was spilled"; + + // modify intervals such that cur gets the same stack slot as registerHint + // delete use positions to prevent the intervals to get a register at beginning + interval.setSpillSlot(registerHint.spillSlot()); + interval.removeFirstUsePos(); + endHint.removeFirstUsePos(); + } + + // allocate a physical register or memory location to an interval + @Override + protected boolean activateCurrent(Interval interval) { + boolean result = true; + + try (Indent indent = Debug.logAndIndent("activating interval %s, splitParent: %d", interval, interval.splitParent().operandNumber)) { + + final Value operand = interval.operand; + if (interval.location() != null && isStackSlotValue(interval.location())) { + // activating an interval that has a stack slot assigned . split it at first use + // position + // used for method parameters + Debug.log("interval has spill slot assigned (method parameter) . split it before first use"); + splitStackInterval(interval); + result = false; + + } else { + if (interval.location() == null) { + // interval has not assigned register . normal allocation + // (this is the normal case for most intervals) + Debug.log("normal allocation of register"); + + // assign same spill slot to non-intersecting intervals + combineSpilledIntervals(interval); + + initVarsForAlloc(interval); + if (noAllocationPossible(interval) || !allocFreeRegister(interval)) { + // no empty register available. + // split and spill another interval so that this interval gets a register + allocLockedRegister(interval); + } + + // spilled intervals need not be move to active-list + if (!isRegister(interval.location())) { + result = false; + } + } + } + + // load spilled values that become active from stack slot to register + if (interval.insertMoveWhenActivated()) { + assert interval.isSplitChild(); + assert interval.currentSplitChild() != null; + assert !interval.currentSplitChild().operand.equals(operand) : "cannot insert move between same interval"; + Debug.log("Inserting move from interval %d to %d because insertMoveWhenActivated is set", interval.currentSplitChild().operandNumber, interval.operandNumber); + + insertMove(interval.from(), interval.currentSplitChild(), interval); + } + interval.makeCurrentSplitChild(); + + } + + return result; // true = interval is moved to active list + } + + public void finishAllocation() { + // must be called when all intervals are allocated + moveResolver.resolveAndAppendMoves(); + } +} diff -r 32c7a5a88523 -r d599eeab1b53 graal/com.oracle.graal.lir/src/com/oracle/graal/lir/alloc/lsra/LocationMarker.java --- /dev/null Thu Jan 01 00:00:00 1970 +0000 +++ b/graal/com.oracle.graal.lir/src/com/oracle/graal/lir/alloc/lsra/LocationMarker.java Fri Feb 06 12:44:50 2015 +0100 @@ -0,0 +1,221 @@ +/* + * Copyright (c) 2014, 2014, Oracle and/or its affiliates. All rights reserved. + * DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER. + * + * This code is free software; you can redistribute it and/or modify it + * under the terms of the GNU General Public License version 2 only, as + * published by the Free Software Foundation. + * + * This code is distributed in the hope that it will be useful, but WITHOUT + * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or + * FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License + * version 2 for more details (a copy is included in the LICENSE file that + * accompanied this code). + * + * You should have received a copy of the GNU General Public License version + * 2 along with this work; if not, write to the Free Software Foundation, + * Inc., 51 Franklin St, Fifth Floor, Boston, MA 02110-1301 USA. + * + * Please contact Oracle, 500 Oracle Parkway, Redwood Shores, CA 94065 USA + * or visit www.oracle.com if you need additional information or have any + * questions. + */ +package com.oracle.graal.lir.alloc.lsra; + +import static com.oracle.graal.api.code.ValueUtil.*; + +import java.util.*; + +import com.oracle.graal.api.code.*; +import com.oracle.graal.api.meta.*; +import com.oracle.graal.compiler.common.cfg.*; +import com.oracle.graal.debug.*; +import com.oracle.graal.lir.*; +import com.oracle.graal.lir.LIRInstruction.OperandFlag; +import com.oracle.graal.lir.LIRInstruction.OperandMode; +import com.oracle.graal.lir.framemap.*; +import com.oracle.graal.options.*; + +public final class LocationMarker { + + public static class Options { + // @formatter:off + @Option(help = "Use decoupled pass for location marking (instead of using LSRA marking)", type = OptionType.Debug) + public static final OptionValue UseLocationMarker = new OptionValue<>(true); + // @formatter:on + } + + /** + * Mark all live references for a frame state. The frame state use this information to build the + * OOP maps. + */ + public static void markLocations(LIR lir, FrameMap frameMap) { + new LocationMarker(lir, frameMap).build(); + } + + private final LIR lir; + private final FrameMap frameMap; + private final RegisterAttributes[] registerAttributes; + private final BlockMap liveInMap; + private final BlockMap liveOutMap; + + private LocationMarker(LIR lir, FrameMap frameMap) { + this.lir = lir; + this.frameMap = frameMap; + this.registerAttributes = frameMap.getRegisterConfig().getAttributesMap(); + liveInMap = new BlockMap<>(lir.getControlFlowGraph()); + liveOutMap = new BlockMap<>(lir.getControlFlowGraph()); + } + + private void build() { + Deque> worklist = new ArrayDeque<>(); + for (int i = lir.getControlFlowGraph().getBlocks().size() - 1; i >= 0; i--) { + worklist.add(lir.getControlFlowGraph().getBlocks().get(i)); + } + for (AbstractBlock block : lir.getControlFlowGraph().getBlocks()) { + liveInMap.put(block, frameMap.initReferenceMap(true)); + } + while (!worklist.isEmpty()) { + AbstractBlock block = worklist.poll(); + processBlock(block, worklist); + } + // finish states + for (AbstractBlock block : lir.getControlFlowGraph().getBlocks()) { + List instructions = lir.getLIRforBlock(block); + for (int i = instructions.size() - 1; i >= 0; i--) { + LIRInstruction inst = instructions.get(i); + inst.forEachState((op, info) -> info.finish(op, frameMap)); + } + + } + } + + /** + * Merge outSet with in-set of successors. + */ + private boolean updateOutBlock(AbstractBlock block) { + ReferenceMap union = frameMap.initReferenceMap(true); + block.getSuccessors().forEach(succ -> union.updateUnion(liveInMap.get(succ))); + ReferenceMap outSet = liveOutMap.get(block); + // check if changed + if (outSet == null || !union.equals(outSet)) { + liveOutMap.put(block, union); + return true; + } + return false; + } + + private void processBlock(AbstractBlock block, Deque> worklist) { + if (updateOutBlock(block)) { + try (Indent indent = Debug.logAndIndent("handle block %s", block)) { + BlockClosure closure = new BlockClosure(liveOutMap.get(block).clone()); + List instructions = lir.getLIRforBlock(block); + for (int i = instructions.size() - 1; i >= 0; i--) { + LIRInstruction inst = instructions.get(i); + closure.processInstructionBottomUp(inst); + } + liveInMap.put(block, closure.getCurrentSet()); + worklist.addAll(block.getPredecessors()); + } + } + } + + private static final EnumSet REGISTER_FLAG_SET = EnumSet.of(OperandFlag.REG); + private static final LIRKind REFERENCE_KIND = LIRKind.reference(Kind.Object); + + private void forEachDestroyedCallerSavedRegister(LIRInstruction op, ValueConsumer consumer) { + if (op.destroysCallerSavedRegisters()) { + for (Register reg : frameMap.getRegisterConfig().getCallerSaveRegisters()) { + consumer.visitValue(reg.asValue(REFERENCE_KIND), OperandMode.TEMP, REGISTER_FLAG_SET); + } + } + } + + private final class BlockClosure { + private final ReferenceMap currentSet; + + private BlockClosure(ReferenceMap set) { + currentSet = set; + } + + private ReferenceMap getCurrentSet() { + return currentSet; + } + + /** + * Process all values of an instruction bottom-up, i.e. definitions before usages. Values + * that start or end at the current operation are not included. + */ + private void processInstructionBottomUp(LIRInstruction op) { + try (Indent indent = Debug.logAndIndent("handle op %d, %s", op.id(), op)) { + // kills + op.visitEachTemp(this::defConsumer); + op.visitEachOutput(this::defConsumer); + forEachDestroyedCallerSavedRegister(op, this::defConsumer); + + // gen - values that are considered alive for this state + op.visitEachAlive(this::useConsumer); + op.visitEachState(this::useConsumer); + // mark locations + op.forEachState((inst, info) -> markLocation(inst, info, this.getCurrentSet())); + // gen + op.visitEachInput(this::useConsumer); + } + } + + /** + * @see InstructionValueConsumer + * @param operand + * @param mode + * @param flags + */ + private void useConsumer(Value operand, OperandMode mode, EnumSet flags) { + LIRKind kind = operand.getLIRKind(); + if (shouldProcessValue(operand) && !kind.isValue() && !kind.isDerivedReference()) { + // no need to insert values and derived reference + Debug.log("set operand: %s", operand); + frameMap.setReference(operand, currentSet); + } + } + + /** + * @see InstructionValueConsumer + * @param operand + * @param mode + * @param flags + */ + private void defConsumer(Value operand, OperandMode mode, EnumSet flags) { + if (shouldProcessValue(operand)) { + Debug.log("clear operand: %s", operand); + frameMap.clearReference(operand, currentSet); + } else { + assert isIllegal(operand) || operand.getPlatformKind() != Kind.Illegal || mode == OperandMode.TEMP : String.format("Illegal PlatformKind is only allowed for TEMP mode: %s, %s", + operand, mode); + } + } + + protected boolean shouldProcessValue(Value operand) { + return (isRegister(operand) && attributes(asRegister(operand)).isAllocatable() || isStackSlot(operand)) && operand.getPlatformKind() != Kind.Illegal; + } + } + + /** + * This method does the actual marking. + */ + private void markLocation(LIRInstruction op, LIRFrameState info, ReferenceMap refMap) { + if (!info.hasDebugInfo()) { + info.initDebugInfo(frameMap, !op.destroysCallerSavedRegisters() || !frameMap.getRegisterConfig().areAllAllocatableRegistersCallerSaved()); + } + info.updateUnion(refMap); + } + + /** + * Gets an object describing the attributes of a given register according to this register + * configuration. + * + * @see LinearScan#attributes + */ + private RegisterAttributes attributes(Register reg) { + return registerAttributes[reg.number]; + } +} diff -r 32c7a5a88523 -r d599eeab1b53 graal/com.oracle.graal.lir/src/com/oracle/graal/lir/alloc/lsra/MoveResolver.java --- /dev/null Thu Jan 01 00:00:00 1970 +0000 +++ b/graal/com.oracle.graal.lir/src/com/oracle/graal/lir/alloc/lsra/MoveResolver.java Fri Feb 06 12:44:50 2015 +0100 @@ -0,0 +1,364 @@ +/* + * Copyright (c) 2009, 2014, Oracle and/or its affiliates. All rights reserved. + * DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER. + * + * This code is free software; you can redistribute it and/or modify it + * under the terms of the GNU General Public License version 2 only, as + * published by the Free Software Foundation. + * + * This code is distributed in the hope that it will be useful, but WITHOUT + * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or + * FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License + * version 2 for more details (a copy is included in the LICENSE file that + * accompanied this code). + * + * You should have received a copy of the GNU General Public License version + * 2 along with this work; if not, write to the Free Software Foundation, + * Inc., 51 Franklin St, Fifth Floor, Boston, MA 02110-1301 USA. + * + * Please contact Oracle, 500 Oracle Parkway, Redwood Shores, CA 94065 USA + * or visit www.oracle.com if you need additional information or have any + * questions. + */ +package com.oracle.graal.lir.alloc.lsra; + +import static com.oracle.graal.api.code.ValueUtil.*; + +import java.util.*; + +import com.oracle.graal.api.code.*; +import com.oracle.graal.api.meta.*; +import com.oracle.graal.debug.*; +import com.oracle.graal.lir.*; + +/** + */ +final class MoveResolver { + + private final LinearScan allocator; + + private int insertIdx; + private LIRInsertionBuffer insertionBuffer; // buffer where moves are inserted + + private final List mappingFrom; + private final List mappingFromOpr; + private final List mappingTo; + private boolean multipleReadsAllowed; + private final int[] registerBlocked; + + private int registerBlocked(int reg) { + return registerBlocked[reg]; + } + + private void setRegisterBlocked(int reg, int direction) { + assert direction == 1 || direction == -1 : "out of bounds"; + registerBlocked[reg] += direction; + } + + void setMultipleReadsAllowed() { + multipleReadsAllowed = true; + } + + boolean hasMappings() { + return mappingFrom.size() > 0; + } + + MoveResolver(LinearScan allocator) { + + this.allocator = allocator; + this.multipleReadsAllowed = false; + this.mappingFrom = new ArrayList<>(8); + this.mappingFromOpr = new ArrayList<>(8); + this.mappingTo = new ArrayList<>(8); + this.insertIdx = -1; + this.insertionBuffer = new LIRInsertionBuffer(); + this.registerBlocked = new int[allocator.registers.length]; + assert checkEmpty(); + } + + boolean checkEmpty() { + assert mappingFrom.size() == 0 && mappingFromOpr.size() == 0 && mappingTo.size() == 0 : "list must be empty before and after processing"; + for (int i = 0; i < allocator.registers.length; i++) { + assert registerBlocked(i) == 0 : "register map must be empty before and after processing"; + } + assert !multipleReadsAllowed : "must have default value"; + return true; + } + + private boolean verifyBeforeResolve() { + assert mappingFrom.size() == mappingFromOpr.size() : "length must be equal"; + assert mappingFrom.size() == mappingTo.size() : "length must be equal"; + assert insertIdx != -1 : "insert position not set"; + + int i; + int j; + if (!multipleReadsAllowed) { + for (i = 0; i < mappingFrom.size(); i++) { + for (j = i + 1; j < mappingFrom.size(); j++) { + assert mappingFrom.get(i) == null || mappingFrom.get(i) != mappingFrom.get(j) : "cannot read from same interval twice"; + } + } + } + + for (i = 0; i < mappingTo.size(); i++) { + for (j = i + 1; j < mappingTo.size(); j++) { + assert mappingTo.get(i) != mappingTo.get(j) : "cannot write to same interval twice"; + } + } + + HashSet usedRegs = new HashSet<>(); + if (!multipleReadsAllowed) { + for (i = 0; i < mappingFrom.size(); i++) { + Interval interval = mappingFrom.get(i); + if (interval != null && !isIllegal(interval.location())) { + boolean unique = usedRegs.add(interval.location()); + assert unique : "cannot read from same register twice"; + } + } + } + + usedRegs.clear(); + for (i = 0; i < mappingTo.size(); i++) { + Interval interval = mappingTo.get(i); + if (isIllegal(interval.location())) { + // After insertion the location may become illegal, so don't check it since multiple + // intervals might be illegal. + continue; + } + boolean unique = usedRegs.add(interval.location()); + assert unique : "cannot write to same register twice"; + } + + usedRegs.clear(); + for (i = 0; i < mappingFrom.size(); i++) { + Interval interval = mappingFrom.get(i); + if (interval != null && !isRegister(interval.location())) { + usedRegs.add(interval.location()); + } + } + for (i = 0; i < mappingTo.size(); i++) { + Interval interval = mappingTo.get(i); + assert !usedRegs.contains(interval.location()) || interval.location().equals(mappingFrom.get(i).location()) : "stack slots used in mappingFrom must be disjoint to mappingTo"; + } + + return true; + } + + // mark assignedReg and assignedRegHi of the interval as blocked + private void blockRegisters(Interval interval) { + Value location = interval.location(); + if (isRegister(location)) { + int reg = asRegister(location).number; + assert multipleReadsAllowed || registerBlocked(reg) == 0 : "register already marked as used"; + setRegisterBlocked(reg, 1); + } + } + + // mark assignedReg and assignedRegHi of the interval as unblocked + private void unblockRegisters(Interval interval) { + Value location = interval.location(); + if (isRegister(location)) { + int reg = asRegister(location).number; + assert registerBlocked(reg) > 0 : "register already marked as unused"; + setRegisterBlocked(reg, -1); + } + } + + /** + * Checks if the {@linkplain Interval#location() location} of {@code to} is not blocked or is + * only blocked by {@code from}. + */ + private boolean safeToProcessMove(Interval from, Interval to) { + Value fromReg = from != null ? from.location() : null; + + Value reg = to.location(); + if (isRegister(reg)) { + if (registerBlocked(asRegister(reg).number) > 1 || (registerBlocked(asRegister(reg).number) == 1 && !reg.equals(fromReg))) { + return false; + } + } + + return true; + } + + private void createInsertionBuffer(List list) { + assert !insertionBuffer.initialized() : "overwriting existing buffer"; + insertionBuffer.init(list); + } + + private void appendInsertionBuffer() { + if (insertionBuffer.initialized()) { + insertionBuffer.finish(); + } + assert !insertionBuffer.initialized() : "must be uninitialized now"; + + insertIdx = -1; + } + + private void insertMove(Interval fromInterval, Interval toInterval) { + assert !fromInterval.operand.equals(toInterval.operand) : "from and to interval equal: " + fromInterval; + assert fromInterval.kind().equals(toInterval.kind()) : "move between different types"; + assert insertIdx != -1 : "must setup insert position first"; + + AllocatableValue fromOpr = fromInterval.operand; + AllocatableValue toOpr = toInterval.operand; + + insertionBuffer.append(insertIdx, allocator.ir.getSpillMoveFactory().createMove(toOpr, fromOpr)); + + Debug.log("insert move from %s to %s at %d", fromInterval, toInterval, insertIdx); + } + + private void insertMove(Value fromOpr, Interval toInterval) { + assert fromOpr.getLIRKind().equals(toInterval.kind()) : "move between different types"; + assert insertIdx != -1 : "must setup insert position first"; + + AllocatableValue toOpr = toInterval.operand; + insertionBuffer.append(insertIdx, allocator.ir.getSpillMoveFactory().createMove(toOpr, fromOpr)); + + Debug.log("insert move from value %s to %s at %d", fromOpr, toInterval, insertIdx); + } + + private void resolveMappings() { + assert verifyBeforeResolve(); + + // Block all registers that are used as input operands of a move. + // When a register is blocked, no move to this register is emitted. + // This is necessary for detecting cycles in moves. + int i; + for (i = mappingFrom.size() - 1; i >= 0; i--) { + Interval fromInterval = mappingFrom.get(i); + if (fromInterval != null) { + blockRegisters(fromInterval); + } + } + + int spillCandidate = -1; + while (mappingFrom.size() > 0) { + boolean processedInterval = false; + + for (i = mappingFrom.size() - 1; i >= 0; i--) { + Interval fromInterval = mappingFrom.get(i); + Interval toInterval = mappingTo.get(i); + + if (safeToProcessMove(fromInterval, toInterval)) { + // this interval can be processed because target is free + if (fromInterval != null) { + insertMove(fromInterval, toInterval); + unblockRegisters(fromInterval); + } else { + insertMove(mappingFromOpr.get(i), toInterval); + } + mappingFrom.remove(i); + mappingFromOpr.remove(i); + mappingTo.remove(i); + + processedInterval = true; + } else if (fromInterval != null && isRegister(fromInterval.location())) { + // this interval cannot be processed now because target is not free + // it starts in a register, so it is a possible candidate for spilling + spillCandidate = i; + } + } + + if (!processedInterval) { + // no move could be processed because there is a cycle in the move list + // (e.g. r1 . r2, r2 . r1), so one interval must be spilled to memory + assert spillCandidate != -1 : "no interval in register for spilling found"; + + // create a new spill interval and assign a stack slot to it + Interval fromInterval = mappingFrom.get(spillCandidate); + Interval spillInterval = allocator.createDerivedInterval(fromInterval); + spillInterval.setKind(fromInterval.kind()); + + // add a dummy range because real position is difficult to calculate + // Note: this range is a special case when the integrity of the allocation is + // checked + spillInterval.addRange(1, 2); + + // do not allocate a new spill slot for temporary interval, but + // use spill slot assigned to fromInterval. Otherwise moves from + // one stack slot to another can happen (not allowed by LIRAssembler + StackSlotValue spillSlot = fromInterval.spillSlot(); + if (spillSlot == null) { + spillSlot = allocator.frameMapBuilder.allocateSpillSlot(spillInterval.kind()); + fromInterval.setSpillSlot(spillSlot); + } + spillInterval.assignLocation(spillSlot); + + Debug.log("created new Interval for spilling: %s", spillInterval); + + // insert a move from register to stack and update the mapping + insertMove(fromInterval, spillInterval); + mappingFrom.set(spillCandidate, spillInterval); + unblockRegisters(fromInterval); + } + } + + // reset to default value + multipleReadsAllowed = false; + + // check that all intervals have been processed + assert checkEmpty(); + } + + void setInsertPosition(List insertList, int insertIdx) { + assert this.insertIdx == -1 : "use moveInsertPosition instead of setInsertPosition when data already set"; + + createInsertionBuffer(insertList); + this.insertIdx = insertIdx; + } + + void moveInsertPosition(List newInsertList, int newInsertIdx) { + if (insertionBuffer.lirList() != null && (insertionBuffer.lirList() != newInsertList || this.insertIdx != newInsertIdx)) { + // insert position changed . resolve current mappings + resolveMappings(); + } + + if (insertionBuffer.lirList() != newInsertList) { + // block changed . append insertionBuffer because it is + // bound to a specific block and create a new insertionBuffer + appendInsertionBuffer(); + createInsertionBuffer(newInsertList); + } + + this.insertIdx = newInsertIdx; + } + + void addMapping(Interval fromInterval, Interval toInterval) { + + if (isIllegal(toInterval.location()) && toInterval.canMaterialize()) { + Debug.log("no store to rematerializable interval %s needed", toInterval); + return; + } + if (isIllegal(fromInterval.location()) && fromInterval.canMaterialize()) { + // Instead of a reload, re-materialize the value + Value rematValue = fromInterval.getMaterializedValue(); + addMapping(rematValue, toInterval); + return; + } + Debug.log("add move mapping from %s to %s", fromInterval, toInterval); + + assert !fromInterval.operand.equals(toInterval.operand) : "from and to interval equal: " + fromInterval; + assert fromInterval.kind().equals(toInterval.kind()); + mappingFrom.add(fromInterval); + mappingFromOpr.add(Value.ILLEGAL); + mappingTo.add(toInterval); + } + + void addMapping(Value fromOpr, Interval toInterval) { + Debug.log("add move mapping from %s to %s", fromOpr, toInterval); + + assert isConstant(fromOpr) : "only for constants"; + + mappingFrom.add(null); + mappingFromOpr.add(fromOpr); + mappingTo.add(toInterval); + } + + void resolveAndAppendMoves() { + if (hasMappings()) { + resolveMappings(); + } + appendInsertionBuffer(); + } +} diff -r 32c7a5a88523 -r d599eeab1b53 graal/com.oracle.graal.lir/src/com/oracle/graal/lir/alloc/lsra/OptimizingLinearScanWalker.java --- /dev/null Thu Jan 01 00:00:00 1970 +0000 +++ b/graal/com.oracle.graal.lir/src/com/oracle/graal/lir/alloc/lsra/OptimizingLinearScanWalker.java Fri Feb 06 12:44:50 2015 +0100 @@ -0,0 +1,249 @@ +/* + * Copyright (c) 2014, 2014, Oracle and/or its affiliates. All rights reserved. + * DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER. + * + * This code is free software; you can redistribute it and/or modify it + * under the terms of the GNU General Public License version 2 only, as + * published by the Free Software Foundation. + * + * This code is distributed in the hope that it will be useful, but WITHOUT + * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or + * FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License + * version 2 for more details (a copy is included in the LICENSE file that + * accompanied this code). + * + * You should have received a copy of the GNU General Public License version + * 2 along with this work; if not, write to the Free Software Foundation, + * Inc., 51 Franklin St, Fifth Floor, Boston, MA 02110-1301 USA. + * + * Please contact Oracle, 500 Oracle Parkway, Redwood Shores, CA 94065 USA + * or visit www.oracle.com if you need additional information or have any + * questions. + */ +package com.oracle.graal.lir.alloc.lsra; + +import static com.oracle.graal.api.code.ValueUtil.*; + +import com.oracle.graal.api.code.*; +import com.oracle.graal.api.meta.*; +import com.oracle.graal.compiler.common.cfg.*; +import com.oracle.graal.debug.*; +import com.oracle.graal.debug.Debug.Scope; +import com.oracle.graal.lir.alloc.lsra.Interval.*; +import com.oracle.graal.options.*; + +public class OptimizingLinearScanWalker extends LinearScanWalker { + + public static class Options { + // @formatter:off + @Option(help = "Enable LSRA optimization", type = OptionType.Debug) + public static final OptionValue LSRAOptimization = new OptionValue<>(true); + @Option(help = "LSRA optimization: Only split but do not reassign", type = OptionType.Debug) + public static final OptionValue LSRAOptSplitOnly = new OptionValue<>(false); + // @formatter:on + } + + OptimizingLinearScanWalker(LinearScan allocator, Interval unhandledFixedFirst, Interval unhandledAnyFirst) { + super(allocator, unhandledFixedFirst, unhandledAnyFirst); + } + + @Override + protected void handleSpillSlot(Interval interval) { + assert interval.location() != null : "interval not assigned " + interval; + if (interval.canMaterialize()) { + assert !isStackSlotValue(interval.location()) : "interval can materialize but assigned to a stack slot " + interval; + return; + } + assert isStackSlotValue(interval.location()) : "interval not assigned to a stack slot " + interval; + try (Scope s1 = Debug.scope("LSRAOptimization")) { + Debug.log("adding stack to unhandled list %s", interval); + unhandledLists.addToListSortedByStartAndUsePositions(RegisterBinding.Stack, interval); + } + } + + @SuppressWarnings("unused") + private static void printRegisterBindingList(RegisterBindingLists list, RegisterBinding binding) { + for (Interval interval = list.get(binding); interval != Interval.EndMarker; interval = interval.next) { + Debug.log("%s", interval); + } + } + + @Override + void walk() { + try (Scope s = Debug.scope("OptimizingLinearScanWalker")) { + for (AbstractBlock block : allocator.sortedBlocks) { + optimizeBlock(block); + } + } + super.walk(); + } + + private void optimizeBlock(AbstractBlock block) { + if (block.getPredecessorCount() == 1) { + int nextBlock = allocator.getFirstLirInstructionId(block); + try (Scope s1 = Debug.scope("LSRAOptimization")) { + Debug.log("next block: %s (%d)", block, nextBlock); + } + try (Indent indent0 = Debug.indent()) { + walkTo(nextBlock); + + try (Scope s1 = Debug.scope("LSRAOptimization")) { + boolean changed = true; + // we need to do this because the active lists might change + loop: while (changed) { + changed = false; + try (Indent indent1 = Debug.logAndIndent("Active intervals: (block %s [%d])", block, nextBlock)) { + for (Interval active = activeLists.get(RegisterBinding.Any); active != Interval.EndMarker; active = active.next) { + Debug.log("active (any): %s", active); + if (optimize(nextBlock, block, active, RegisterBinding.Any)) { + changed = true; + break loop; + } + } + for (Interval active = activeLists.get(RegisterBinding.Stack); active != Interval.EndMarker; active = active.next) { + Debug.log("active (stack): %s", active); + if (optimize(nextBlock, block, active, RegisterBinding.Stack)) { + changed = true; + break loop; + } + } + } + } + } + } + } + } + + private boolean optimize(int currentPos, AbstractBlock currentBlock, Interval currentInterval, RegisterBinding binding) { + // BEGIN initialize and sanity checks + assert currentBlock != null : "block must not be null"; + assert currentInterval != null : "interval must not be null"; + + assert currentBlock.getPredecessorCount() == 1 : "more than one predecessors -> optimization not possible"; + + if (!currentInterval.isSplitChild()) { + // interval is not a split child -> no need for optimization + return false; + } + + if (currentInterval.from() == currentPos) { + // the interval starts at the current position so no need for splitting + return false; + } + + // get current location + AllocatableValue currentLocation = currentInterval.location(); + assert currentLocation != null : "active intervals must have a location assigned!"; + + // get predecessor stuff + AbstractBlock predecessorBlock = currentBlock.getPredecessors().get(0); + int predEndId = allocator.getLastLirInstructionId(predecessorBlock); + Interval predecessorInterval = currentInterval.getIntervalCoveringOpId(predEndId); + assert predecessorInterval != null : "variable not live at the end of the only predecessor! " + predecessorBlock + " -> " + currentBlock + " interval: " + currentInterval; + AllocatableValue predecessorLocation = predecessorInterval.location(); + assert predecessorLocation != null : "handled intervals must have a location assigned!"; + + // END initialize and sanity checks + + if (currentLocation.equals(predecessorLocation)) { + // locations are already equal -> nothing to optimize + return false; + } + + if (!isStackSlotValue(predecessorLocation) && !isRegister(predecessorLocation)) { + assert predecessorInterval.canMaterialize(); + // value is materialized -> no need for optimization + return false; + } + + assert isStackSlotValue(currentLocation) || isRegister(currentLocation) : "current location not a register or stack slot " + currentLocation; + + try (Indent indent = Debug.logAndIndent("location differs: %s vs. %s", predecessorLocation, currentLocation)) { + // split current interval at current position + Debug.log("splitting at position %d", currentPos); + + assert allocator.isBlockBegin(currentPos) && ((currentPos & 1) == 0) : "split pos must be even when on block boundary"; + + Interval splitPart = currentInterval.split(currentPos, allocator); + activeLists.remove(binding, currentInterval); + + assert splitPart.from() >= currentPosition : "cannot append new interval before current walk position"; + + // the currentSplitChild is needed later when moves are inserted for reloading + assert splitPart.currentSplitChild() == currentInterval : "overwriting wrong currentSplitChild"; + splitPart.makeCurrentSplitChild(); + + if (Debug.isLogEnabled()) { + Debug.log("left interval : %s", currentInterval.logString(allocator)); + Debug.log("right interval : %s", splitPart.logString(allocator)); + } + + if (Options.LSRAOptSplitOnly.getValue()) { + // just add the split interval to the unhandled list + unhandledLists.addToListSortedByStartAndUsePositions(RegisterBinding.Any, splitPart); + } else { + if (isRegister(predecessorLocation)) { + splitRegisterInterval(splitPart, asRegister(predecessorLocation)); + } else { + assert isStackSlotValue(predecessorLocation); + Debug.log("assigning interval %s to %s", splitPart, predecessorLocation); + splitPart.assignLocation(predecessorLocation); + // activate interval + activeLists.addToListSortedByCurrentFromPositions(RegisterBinding.Stack, splitPart); + splitPart.state = State.Active; + + splitStackInterval(splitPart); + } + } + } + return true; + } + + private void splitRegisterInterval(Interval interval, Register reg) { + // collect current usage of registers + initVarsForAlloc(interval); + initUseLists(false); + spillExcludeActiveFixed(); + // spillBlockUnhandledFixed(cur); + assert unhandledLists.get(RegisterBinding.Fixed) == Interval.EndMarker : "must not have unhandled fixed intervals because all fixed intervals have a use at position 0"; + spillBlockInactiveFixed(interval); + spillCollectActiveAny(); + spillCollectInactiveAny(interval); + + if (Debug.isLogEnabled()) { + try (Indent indent2 = Debug.logAndIndent("state of registers:")) { + for (Register register : availableRegs) { + int i = register.number; + try (Indent indent3 = Debug.logAndIndent("reg %d: usePos: %d, blockPos: %d, intervals: ", i, usePos[i], blockPos[i])) { + for (int j = 0; j < spillIntervals[i].size(); j++) { + Debug.log("%d ", spillIntervals[i].get(j).operandNumber); + } + } + } + } + } + + // the register must be free at least until this position + boolean needSplit = blockPos[reg.number] <= interval.to(); + + int splitPos = blockPos[reg.number]; + + assert splitPos > 0 : "invalid splitPos"; + assert needSplit || splitPos > interval.from() : "splitting interval at from"; + + Debug.log("assigning interval %s to %s", interval, reg); + interval.assignLocation(reg.asValue(interval.kind())); + if (needSplit) { + // register not available for full interval : so split it + splitWhenPartialRegisterAvailable(interval, splitPos); + } + + // perform splitting and spilling for all affected intervals + splitAndSpillIntersectingIntervals(reg); + + // activate interval + activeLists.addToListSortedByCurrentFromPositions(RegisterBinding.Any, interval); + interval.state = State.Active; + + } +} diff -r 32c7a5a88523 -r d599eeab1b53 graal/com.oracle.graal.lir/src/com/oracle/graal/lir/alloc/lsra/Range.java --- /dev/null Thu Jan 01 00:00:00 1970 +0000 +++ b/graal/com.oracle.graal.lir/src/com/oracle/graal/lir/alloc/lsra/Range.java Fri Feb 06 12:44:50 2015 +0100 @@ -0,0 +1,116 @@ +/* + * Copyright (c) 2009, 2011, Oracle and/or its affiliates. All rights reserved. + * DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER. + * + * This code is free software; you can redistribute it and/or modify it + * under the terms of the GNU General Public License version 2 only, as + * published by the Free Software Foundation. + * + * This code is distributed in the hope that it will be useful, but WITHOUT + * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or + * FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License + * version 2 for more details (a copy is included in the LICENSE file that + * accompanied this code). + * + * You should have received a copy of the GNU General Public License version + * 2 along with this work; if not, write to the Free Software Foundation, + * Inc., 51 Franklin St, Fifth Floor, Boston, MA 02110-1301 USA. + * + * Please contact Oracle, 500 Oracle Parkway, Redwood Shores, CA 94065 USA + * or visit www.oracle.com if you need additional information or have any + * questions. + */ +package com.oracle.graal.lir.alloc.lsra; + +/** + * Represents a range of integers from a start (inclusive) to an end (exclusive. + */ +public final class Range { + + public static final Range EndMarker = new Range(Integer.MAX_VALUE, Integer.MAX_VALUE, null); + + /** + * The start of the range, inclusive. + */ + public int from; + + /** + * The end of the range, exclusive. + */ + public int to; + + /** + * A link to allow the range to be put into a singly linked list. + */ + public Range next; + + boolean intersects(Range r) { + return intersectsAt(r) != -1; + } + + /** + * Creates a new range. + * + * @param from the start of the range, inclusive + * @param to the end of the range, exclusive + * @param next link to the next range in a linked list + */ + Range(int from, int to, Range next) { + this.from = from; + this.to = to; + this.next = next; + } + + int intersectsAt(Range other) { + Range r1 = this; + Range r2 = other; + + assert r2 != null : "null ranges not allowed"; + assert r1 != EndMarker && r2 != EndMarker : "empty ranges not allowed"; + + do { + if (r1.from < r2.from) { + if (r1.to <= r2.from) { + r1 = r1.next; + if (r1 == EndMarker) { + return -1; + } + } else { + return r2.from; + } + } else { + if (r2.from < r1.from) { + if (r2.to <= r1.from) { + r2 = r2.next; + if (r2 == EndMarker) { + return -1; + } + } else { + return r1.from; + } + } else { // r1.from() == r2.from() + if (r1.from == r1.to) { + r1 = r1.next; + if (r1 == EndMarker) { + return -1; + } + } else { + if (r2.from == r2.to) { + r2 = r2.next; + if (r2 == EndMarker) { + return -1; + } + } else { + return r1.from; + } + } + } + } + } while (true); + } + + @Override + public String toString() { + return "[" + from + ", " + to + "]"; + } +} diff -r 32c7a5a88523 -r d599eeab1b53 graal/com.oracle.graal.lir/src/com/oracle/graal/lir/alloc/lsra/RegisterVerifier.java --- /dev/null Thu Jan 01 00:00:00 1970 +0000 +++ b/graal/com.oracle.graal.lir/src/com/oracle/graal/lir/alloc/lsra/RegisterVerifier.java Fri Feb 06 12:44:50 2015 +0100 @@ -0,0 +1,240 @@ +/* + * Copyright (c) 2009, 2012, Oracle and/or its affiliates. All rights reserved. + * DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER. + * + * This code is free software; you can redistribute it and/or modify it + * under the terms of the GNU General Public License version 2 only, as + * published by the Free Software Foundation. + * + * This code is distributed in the hope that it will be useful, but WITHOUT + * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or + * FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License + * version 2 for more details (a copy is included in the LICENSE file that + * accompanied this code). + * + * You should have received a copy of the GNU General Public License version + * 2 along with this work; if not, write to the Free Software Foundation, + * Inc., 51 Franklin St, Fifth Floor, Boston, MA 02110-1301 USA. + * + * Please contact Oracle, 500 Oracle Parkway, Redwood Shores, CA 94065 USA + * or visit www.oracle.com if you need additional information or have any + * questions. + */ +package com.oracle.graal.lir.alloc.lsra; + +import static com.oracle.graal.api.code.ValueUtil.*; + +import java.util.*; + +import com.oracle.graal.api.code.*; +import com.oracle.graal.api.meta.*; +import com.oracle.graal.compiler.common.*; +import com.oracle.graal.compiler.common.cfg.*; +import com.oracle.graal.compiler.common.util.*; +import com.oracle.graal.debug.*; +import com.oracle.graal.lir.*; +import com.oracle.graal.lir.LIRInstruction.OperandFlag; +import com.oracle.graal.lir.LIRInstruction.OperandMode; + +/** + */ +final class RegisterVerifier { + + LinearScan allocator; + List> workList; // all blocks that must be processed + ArrayMap savedStates; // saved information of previous check + + // simplified access to methods of LinearScan + Interval intervalAt(Value operand) { + return allocator.intervalFor(operand); + } + + // currently, only registers are processed + int stateSize() { + return allocator.maxRegisterNumber() + 1; + } + + // accessors + Interval[] stateForBlock(AbstractBlock block) { + return savedStates.get(block.getId()); + } + + void setStateForBlock(AbstractBlock block, Interval[] savedState) { + savedStates.put(block.getId(), savedState); + } + + void addToWorkList(AbstractBlock block) { + if (!workList.contains(block)) { + workList.add(block); + } + } + + RegisterVerifier(LinearScan allocator) { + this.allocator = allocator; + workList = new ArrayList<>(16); + this.savedStates = new ArrayMap<>(); + + } + + void verify(AbstractBlock start) { + // setup input registers (method arguments) for first block + Interval[] inputState = new Interval[stateSize()]; + setStateForBlock(start, inputState); + addToWorkList(start); + + // main loop for verification + do { + AbstractBlock block = workList.get(0); + workList.remove(0); + + processBlock(block); + } while (!workList.isEmpty()); + } + + private void processBlock(AbstractBlock block) { + try (Indent indent = Debug.logAndIndent("processBlock B%d", block.getId())) { + // must copy state because it is modified + Interval[] inputState = copy(stateForBlock(block)); + + try (Indent indent2 = Debug.logAndIndent("Input-State of intervals:")) { + for (int i = 0; i < stateSize(); i++) { + if (inputState[i] != null) { + Debug.log(" %4d", inputState[i].operandNumber); + } else { + Debug.log(" __"); + } + } + } + + // process all operations of the block + processOperations(allocator.ir.getLIRforBlock(block), inputState); + + // iterate all successors + for (AbstractBlock succ : block.getSuccessors()) { + processSuccessor(succ, inputState); + } + } + } + + private void processSuccessor(AbstractBlock block, Interval[] inputState) { + Interval[] savedState = stateForBlock(block); + + if (savedState != null) { + // this block was already processed before. + // check if new inputState is consistent with savedState + + boolean savedStateCorrect = true; + for (int i = 0; i < stateSize(); i++) { + if (inputState[i] != savedState[i]) { + // current inputState and previous savedState assume a different + // interval in this register . assume that this register is invalid + if (savedState[i] != null) { + // invalidate old calculation only if it assumed that + // register was valid. when the register was already invalid, + // then the old calculation was correct. + savedStateCorrect = false; + savedState[i] = null; + + Debug.log("processSuccessor B%d: invalidating slot %d", block.getId(), i); + } + } + } + + if (savedStateCorrect) { + // already processed block with correct inputState + Debug.log("processSuccessor B%d: previous visit already correct", block.getId()); + } else { + // must re-visit this block + Debug.log("processSuccessor B%d: must re-visit because input state changed", block.getId()); + addToWorkList(block); + } + + } else { + // block was not processed before, so set initial inputState + Debug.log("processSuccessor B%d: initial visit", block.getId()); + + setStateForBlock(block, copy(inputState)); + addToWorkList(block); + } + } + + static Interval[] copy(Interval[] inputState) { + return inputState.clone(); + } + + static void statePut(Interval[] inputState, Value location, Interval interval) { + if (location != null && isRegister(location)) { + Register reg = asRegister(location); + int regNum = reg.number; + if (interval != null) { + Debug.log("%s = %s", reg, interval.operand); + } else if (inputState[regNum] != null) { + Debug.log("%s = null", reg); + } + + inputState[regNum] = interval; + } + } + + static boolean checkState(Interval[] inputState, Value reg, Interval interval) { + if (reg != null && isRegister(reg)) { + if (inputState[asRegister(reg).number] != interval) { + throw new GraalInternalError("!! Error in register allocation: register %s does not contain interval %s but interval %s", reg, interval.operand, inputState[asRegister(reg).number]); + } + } + return true; + } + + void processOperations(List ops, final Interval[] inputState) { + InstructionValueConsumer useConsumer = new InstructionValueConsumer() { + + @Override + public void visitValue(LIRInstruction op, Value operand, OperandMode mode, EnumSet flags) { + // we skip spill moves inserted by the spill position optimization + if (LinearScan.isVariableOrRegister(operand) && allocator.isProcessed(operand) && op.id() != LinearScan.DOMINATOR_SPILL_MOVE_ID) { + Interval interval = intervalAt(operand); + if (op.id() != -1) { + interval = interval.getSplitChildAtOpId(op.id(), mode, allocator); + } + + assert checkState(inputState, interval.location(), interval.splitParent()); + } + } + }; + + InstructionValueConsumer defConsumer = (op, operand, mode, flags) -> { + if (LinearScan.isVariableOrRegister(operand) && allocator.isProcessed(operand)) { + Interval interval = intervalAt(operand); + if (op.id() != -1) { + interval = interval.getSplitChildAtOpId(op.id(), mode, allocator); + } + + statePut(inputState, interval.location(), interval.splitParent()); + } + }; + + // visit all instructions of the block + for (int i = 0; i < ops.size(); i++) { + final LIRInstruction op = ops.get(i); + + if (Debug.isLogEnabled()) { + Debug.log("%s", op.toStringWithIdPrefix()); + } + + // check if input operands are correct + op.visitEachInput(useConsumer); + // invalidate all caller save registers at calls + if (op.destroysCallerSavedRegisters()) { + for (Register r : allocator.frameMapBuilder.getRegisterConfig().getCallerSaveRegisters()) { + statePut(inputState, r.asValue(), null); + } + } + op.visitEachAlive(useConsumer); + // set temp operands (some operations use temp operands also as output operands, so + // can't set them null) + op.visitEachTemp(defConsumer); + // set output operands + op.visitEachOutput(defConsumer); + } + } +} diff -r 32c7a5a88523 -r d599eeab1b53 graal/com.oracle.graal.lir/src/com/oracle/graal/lir/debug/LIRGenerationDebugContext.java --- /dev/null Thu Jan 01 00:00:00 1970 +0000 +++ b/graal/com.oracle.graal.lir/src/com/oracle/graal/lir/debug/LIRGenerationDebugContext.java Fri Feb 06 12:44:50 2015 +0100 @@ -0,0 +1,57 @@ +/* + * Copyright (c) 2015, 2015, Oracle and/or its affiliates. All rights reserved. + * DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER. + * + * This code is free software; you can redistribute it and/or modify it + * under the terms of the GNU General Public License version 2 only, as + * published by the Free Software Foundation. + * + * This code is distributed in the hope that it will be useful, but WITHOUT + * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or + * FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License + * version 2 for more details (a copy is included in the LICENSE file that + * accompanied this code). + * + * You should have received a copy of the GNU General Public License version + * 2 along with this work; if not, write to the Free Software Foundation, + * Inc., 51 Franklin St, Fifth Floor, Boston, MA 02110-1301 USA. + * + * Please contact Oracle, 500 Oracle Parkway, Redwood Shores, CA 94065 USA + * or visit www.oracle.com if you need additional information or have any + * questions. + */ +package com.oracle.graal.lir.debug; + +import com.oracle.graal.api.meta.*; +import com.oracle.graal.debug.*; +import com.oracle.graal.lir.*; + +/** + * Provides information about {@link LIR} generation for debugging purposes. + */ +public interface LIRGenerationDebugContext { + + /** + * Gets an object that represents the source of an {@link LIR} {@link Value operand} in a higher + * representation. + */ + Object getSourceForOperand(Value value); + + static LIRGenerationDebugContext getFromDebugContext() { + if (Debug.isEnabled()) { + LIRGenerationDebugContext lirGen = Debug.contextLookup(LIRGenerationDebugContext.class); + assert lirGen != null; + return lirGen; + } + return null; + } + + static Object getSourceForOperandFromDebugContext(Value value) { + LIRGenerationDebugContext gen = getFromDebugContext(); + if (gen != null) { + return gen.getSourceForOperand(value); + } + return null; + } + +} diff -r 32c7a5a88523 -r d599eeab1b53 graal/com.oracle.graal.phases/src/com/oracle/graal/phases/util/ArrayMap.java --- a/graal/com.oracle.graal.phases/src/com/oracle/graal/phases/util/ArrayMap.java Fri Feb 06 12:17:20 2015 +0100 +++ /dev/null Thu Jan 01 00:00:00 1970 +0000 @@ -1,118 +0,0 @@ -/* - * Copyright (c) 2009, 2011, Oracle and/or its affiliates. All rights reserved. - * DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER. - * - * This code is free software; you can redistribute it and/or modify it - * under the terms of the GNU General Public License version 2 only, as - * published by the Free Software Foundation. - * - * This code is distributed in the hope that it will be useful, but WITHOUT - * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or - * FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License - * version 2 for more details (a copy is included in the LICENSE file that - * accompanied this code). - * - * You should have received a copy of the GNU General Public License version - * 2 along with this work; if not, write to the Free Software Foundation, - * Inc., 51 Franklin St, Fifth Floor, Boston, MA 02110-1301 USA. - * - * Please contact Oracle, 500 Oracle Parkway, Redwood Shores, CA 94065 USA - * or visit www.oracle.com if you need additional information or have any - * questions. - */ -package com.oracle.graal.phases.util; - -/** - * The {@code ArrayMap} class implements an efficient one-level map which is implemented as an - * array. Note that because of the one-level array inside, this data structure performs best when - * the range of integer keys is small and densely used. Note that the implementation can handle - * arbitrary intervals, including negative numbers, up to intervals of size 2^31 - 1. - */ -public class ArrayMap { - - private static final int INITIAL_SIZE = 5; // how big the initial array should be - private static final int EXTRA = 2; // how far on the left or right of a new element to grow - - Object[] map; - int low; - - /** - * Constructs a new {@code ArrayMap} with no initial assumptions. - */ - public ArrayMap() { - } - - /** - * Constructs a new {@code ArrayMap} that initially covers the specified interval. Note that - * this map will automatically expand if necessary later. - * - * @param low the low index, inclusive - * @param high the high index, exclusive - */ - public ArrayMap(int low, int high) { - this.low = low; - this.map = new Object[high - low + 1]; - } - - /** - * Puts a new value in the map at the specified index. - * - * @param i the index at which to store the value - * @param value the value to store at the specified index - */ - public void put(int i, T value) { - int index = i - low; - if (map == null) { - // no map yet - map = new Object[INITIAL_SIZE]; - low = index - 2; - map[INITIAL_SIZE / 2] = value; - } else if (index < 0) { - // grow backwards - growBackward(i, value); - } else if (index >= map.length) { - // grow forwards - growForward(i, value); - } else { - // no growth necessary - map[index] = value; - } - } - - /** - * Gets the value at the specified index in the map. - * - * @param i the index - * @return the value at the specified index; {@code null} if there is no value at the specified - * index, or if the index is out of the currently stored range - */ - public T get(int i) { - int index = i - low; - if (map == null || index < 0 || index >= map.length) { - return null; - } - Class type = null; - return Util.uncheckedCast(type, map[index]); - } - - public int length() { - return map.length; - } - - private void growBackward(int i, T value) { - int nlow = i - EXTRA; - Object[] nmap = new Object[low - nlow + map.length]; - System.arraycopy(map, 0, nmap, low - nlow, map.length); - map = nmap; - low = nlow; - map[i - low] = value; - } - - private void growForward(int i, T value) { - int nlen = i - low + 1 + EXTRA; - Object[] nmap = new Object[nlen]; - System.arraycopy(map, 0, nmap, 0, map.length); - map = nmap; - map[i - low] = value; - } -} diff -r 32c7a5a88523 -r d599eeab1b53 graal/com.oracle.graal.phases/src/com/oracle/graal/phases/util/ArraySet.java --- a/graal/com.oracle.graal.phases/src/com/oracle/graal/phases/util/ArraySet.java Fri Feb 06 12:17:20 2015 +0100 +++ /dev/null Thu Jan 01 00:00:00 1970 +0000 @@ -1,54 +0,0 @@ -/* - * Copyright (c) 2013, Oracle and/or its affiliates. All rights reserved. - * DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER. - * - * This code is free software; you can redistribute it and/or modify it - * under the terms of the GNU General Public License version 2 only, as - * published by the Free Software Foundation. - * - * This code is distributed in the hope that it will be useful, but WITHOUT - * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or - * FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License - * version 2 for more details (a copy is included in the LICENSE file that - * accompanied this code). - * - * You should have received a copy of the GNU General Public License version - * 2 along with this work; if not, write to the Free Software Foundation, - * Inc., 51 Franklin St, Fifth Floor, Boston, MA 02110-1301 USA. - * - * Please contact Oracle, 500 Oracle Parkway, Redwood Shores, CA 94065 USA - * or visit www.oracle.com if you need additional information or have any - * questions. - */ -package com.oracle.graal.phases.util; - -import java.util.*; - -/** - * Mimic a set implementation with an ArrayList. Beneficial for small sets (compared to - * {@link HashSet}). - */ -public class ArraySet extends ArrayList implements Set { - private static final long serialVersionUID = 4476957522387436654L; - - public ArraySet() { - super(); - } - - public ArraySet(int i) { - super(i); - } - - public ArraySet(Collection c) { - super(c); - } - - @Override - public boolean add(E e) { - // avoid duplicated entries - if (contains(e)) { - return false; - } - return super.add(e); - } -} diff -r 32c7a5a88523 -r d599eeab1b53 graal/com.oracle.graal.phases/src/com/oracle/graal/phases/util/BitMap2D.java --- a/graal/com.oracle.graal.phases/src/com/oracle/graal/phases/util/BitMap2D.java Fri Feb 06 12:17:20 2015 +0100 +++ /dev/null Thu Jan 01 00:00:00 1970 +0000 @@ -1,99 +0,0 @@ -/* - * Copyright (c) 2009, 2011, Oracle and/or its affiliates. All rights reserved. - * DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER. - * - * This code is free software; you can redistribute it and/or modify it - * under the terms of the GNU General Public License version 2 only, as - * published by the Free Software Foundation. - * - * This code is distributed in the hope that it will be useful, but WITHOUT - * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or - * FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License - * version 2 for more details (a copy is included in the LICENSE file that - * accompanied this code). - * - * You should have received a copy of the GNU General Public License version - * 2 along with this work; if not, write to the Free Software Foundation, - * Inc., 51 Franklin St, Fifth Floor, Boston, MA 02110-1301 USA. - * - * Please contact Oracle, 500 Oracle Parkway, Redwood Shores, CA 94065 USA - * or visit www.oracle.com if you need additional information or have any - * questions. - */ -package com.oracle.graal.phases.util; - -import java.util.*; - -/** - * This class implements a two-dimensional bitmap. - */ -public final class BitMap2D { - - private BitSet map; - private final int bitsPerSlot; - - private int bitIndex(int slotIndex, int bitWithinSlotIndex) { - return slotIndex * bitsPerSlot + bitWithinSlotIndex; - } - - private boolean verifyBitWithinSlotIndex(int index) { - assert index < bitsPerSlot : "index " + index + " is out of bounds " + bitsPerSlot; - return true; - } - - public BitMap2D(int sizeInSlots, int bitsPerSlot) { - map = new BitSet(sizeInSlots * bitsPerSlot); - this.bitsPerSlot = bitsPerSlot; - } - - public int sizeInBits() { - return map.size(); - } - - // Returns number of full slots that have been allocated - public int sizeInSlots() { - return map.size() / bitsPerSlot; - } - - public boolean isValidIndex(int slotIndex, int bitWithinSlotIndex) { - assert verifyBitWithinSlotIndex(bitWithinSlotIndex); - return (bitIndex(slotIndex, bitWithinSlotIndex) < sizeInBits()); - } - - public boolean at(int slotIndex, int bitWithinSlotIndex) { - assert verifyBitWithinSlotIndex(bitWithinSlotIndex); - return map.get(bitIndex(slotIndex, bitWithinSlotIndex)); - } - - public void setBit(int slotIndex, int bitWithinSlotIndex) { - assert verifyBitWithinSlotIndex(bitWithinSlotIndex); - map.set(bitIndex(slotIndex, bitWithinSlotIndex)); - } - - public void clearBit(int slotIndex, int bitWithinSlotIndex) { - assert verifyBitWithinSlotIndex(bitWithinSlotIndex); - map.clear(bitIndex(slotIndex, bitWithinSlotIndex)); - } - - public void atPutGrow(int slotIndex, int bitWithinSlotIndex, boolean value) { - int size = sizeInSlots(); - if (size <= slotIndex) { - while (size <= slotIndex) { - size *= 2; - } - BitSet newBitMap = new BitSet(size * bitsPerSlot); - newBitMap.or(map); - map = newBitMap; - } - - if (value) { - setBit(slotIndex, bitWithinSlotIndex); - } else { - clearBit(slotIndex, bitWithinSlotIndex); - } - } - - public void clear() { - map.clear(); - } -} diff -r 32c7a5a88523 -r d599eeab1b53 graal/com.oracle.graal.phases/src/com/oracle/graal/phases/util/IntList.java --- a/graal/com.oracle.graal.phases/src/com/oracle/graal/phases/util/IntList.java Fri Feb 06 12:17:20 2015 +0100 +++ /dev/null Thu Jan 01 00:00:00 1970 +0000 @@ -1,163 +0,0 @@ -/* - * Copyright (c) 2010, 2011, Oracle and/or its affiliates. All rights reserved. - * DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER. - * - * This code is free software; you can redistribute it and/or modify it - * under the terms of the GNU General Public License version 2 only, as - * published by the Free Software Foundation. - * - * This code is distributed in the hope that it will be useful, but WITHOUT - * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or - * FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License - * version 2 for more details (a copy is included in the LICENSE file that - * accompanied this code). - * - * You should have received a copy of the GNU General Public License version - * 2 along with this work; if not, write to the Free Software Foundation, - * Inc., 51 Franklin St, Fifth Floor, Boston, MA 02110-1301 USA. - * - * Please contact Oracle, 500 Oracle Parkway, Redwood Shores, CA 94065 USA - * or visit www.oracle.com if you need additional information or have any - * questions. - */ -package com.oracle.graal.phases.util; - -import java.util.*; - -/** - * An expandable and indexable list of {@code int}s. - * - * This class avoids the boxing/unboxing incurred by {@code ArrayList}. - */ -public final class IntList { - - private int[] array; - private int size; - - /** - * Creates an int list with a specified initial capacity. - * - * @param initialCapacity - */ - public IntList(int initialCapacity) { - array = new int[initialCapacity]; - } - - /** - * Creates an int list with a specified initial array. - * - * @param array the initial array used for the list (no copy is made) - * @param initialSize the initial {@linkplain #size() size} of the list (must be less than or - * equal to {@code array.length} - */ - public IntList(int[] array, int initialSize) { - assert initialSize <= array.length; - this.array = array; - this.size = initialSize; - } - - /** - * Makes a new int list by copying a range from a given int list. - * - * @param other the list from which a range of values is to be copied into the new list - * @param startIndex the index in {@code other} at which to start copying - * @param length the number of values to copy from {@code other} - * @return a new int list whose {@linkplain #size() size} and capacity is {@code length} - */ - public static IntList copy(IntList other, int startIndex, int length) { - return copy(other, startIndex, length, length); - } - - /** - * Makes a new int list by copying a range from a given int list. - * - * @param other the list from which a range of values is to be copied into the new list - * @param startIndex the index in {@code other} at which to start copying - * @param length the number of values to copy from {@code other} - * @param initialCapacity the initial capacity of the new int list (must be greater or equal to - * {@code length}) - * @return a new int list whose {@linkplain #size() size} is {@code length} - */ - public static IntList copy(IntList other, int startIndex, int length, int initialCapacity) { - assert initialCapacity >= length : "initialCapacity < length"; - int[] array = new int[initialCapacity]; - System.arraycopy(other.array, startIndex, array, 0, length); - return new IntList(array, length); - } - - public int size() { - return size; - } - - /** - * Appends a value to the end of this list, increasing its {@linkplain #size() size} by 1. - * - * @param value the value to append - */ - public void add(int value) { - if (size == array.length) { - int newSize = (size * 3) / 2 + 1; - array = Arrays.copyOf(array, newSize); - } - array[size++] = value; - } - - /** - * Gets the value in this list at a given index. - * - * @param index the index of the element to return - * @throws IndexOutOfBoundsException if {@code index < 0 || index >= size()} - */ - public int get(int index) { - if (index >= size) { - throw new IndexOutOfBoundsException("Index: " + index + ", Size: " + size); - } - return array[index]; - } - - /** - * Sets the size of this list to 0. - */ - public void clear() { - size = 0; - } - - /** - * Sets a value at a given index in this list. - * - * @param index the index of the element to update - * @param value the new value of the element - * @throws IndexOutOfBoundsException if {@code index < 0 || index >= size()} - */ - public void set(int index, int value) { - if (index >= size) { - throw new IndexOutOfBoundsException("Index: " + index + ", Size: " + size); - } - array[index] = value; - } - - /** - * Adjusts the {@linkplain #size() size} of this int list. - * - * If {@code newSize < size()}, the size is changed to {@code newSize}. If - * {@code newSize > size()}, sufficient 0 elements are {@linkplain #add(int) added} until - * {@code size() == newSize}. - * - * @param newSize the new size of this int list - */ - public void setSize(int newSize) { - if (newSize < size) { - size = newSize; - } else if (newSize > size) { - array = Arrays.copyOf(array, newSize); - } - } - - @Override - public String toString() { - if (array.length == size) { - return Arrays.toString(array); - } - return Arrays.toString(Arrays.copyOf(array, size)); - } -} diff -r 32c7a5a88523 -r d599eeab1b53 graal/com.oracle.graal.phases/src/com/oracle/graal/phases/util/Util.java --- a/graal/com.oracle.graal.phases/src/com/oracle/graal/phases/util/Util.java Fri Feb 06 12:17:20 2015 +0100 +++ /dev/null Thu Jan 01 00:00:00 1970 +0000 @@ -1,337 +0,0 @@ -/* - * Copyright (c) 2009, 2011, Oracle and/or its affiliates. All rights reserved. - * DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER. - * - * This code is free software; you can redistribute it and/or modify it - * under the terms of the GNU General Public License version 2 only, as - * published by the Free Software Foundation. - * - * This code is distributed in the hope that it will be useful, but WITHOUT - * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or - * FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License - * version 2 for more details (a copy is included in the LICENSE file that - * accompanied this code). - * - * You should have received a copy of the GNU General Public License version - * 2 along with this work; if not, write to the Free Software Foundation, - * Inc., 51 Franklin St, Fifth Floor, Boston, MA 02110-1301 USA. - * - * Please contact Oracle, 500 Oracle Parkway, Redwood Shores, CA 94065 USA - * or visit www.oracle.com if you need additional information or have any - * questions. - */ -package com.oracle.graal.phases.util; - -import java.util.*; - -import com.oracle.graal.api.code.*; -import com.oracle.graal.debug.*; - -/** - * The {@code Util} class contains a motley collection of utility methods used throughout the - * compiler. - */ -public class Util { - - public static final int PRINTING_LINE_WIDTH = 40; - public static final char SECTION_CHARACTER = '*'; - public static final char SUB_SECTION_CHARACTER = '='; - public static final char SEPERATOR_CHARACTER = '-'; - - public static boolean replaceInList(T a, T b, List list) { - final int max = list.size(); - for (int i = 0; i < max; i++) { - if (list.get(i) == a) { - list.set(i, b); - return true; - } - } - return false; - } - - /** - * Statically cast an object to an arbitrary Object type. Dynamically checked. - */ - @SuppressWarnings("unchecked") - public static T uncheckedCast(@SuppressWarnings("unused") Class type, Object object) { - return (T) object; - } - - /** - * Statically cast an object to an arbitrary Object type. Dynamically checked. - */ - @SuppressWarnings("unchecked") - public static T uncheckedCast(Object object) { - return (T) object; - } - - /** - * Utility method to combine a base hash with the identity hash of one or more objects. - * - * @param hash the base hash - * @param x the object to add to the hash - * @return the combined hash - */ - public static int hash1(int hash, Object x) { - // always set at least one bit in case the hash wraps to zero - return 0x10000000 | (hash + 7 * System.identityHashCode(x)); - } - - /** - * Utility method to combine a base hash with the identity hash of one or more objects. - * - * @param hash the base hash - * @param x the first object to add to the hash - * @param y the second object to add to the hash - * @return the combined hash - */ - public static int hash2(int hash, Object x, Object y) { - // always set at least one bit in case the hash wraps to zero - return 0x20000000 | (hash + 7 * System.identityHashCode(x) + 11 * System.identityHashCode(y)); - } - - /** - * Utility method to combine a base hash with the identity hash of one or more objects. - * - * @param hash the base hash - * @param x the first object to add to the hash - * @param y the second object to add to the hash - * @param z the third object to add to the hash - * @return the combined hash - */ - public static int hash3(int hash, Object x, Object y, Object z) { - // always set at least one bit in case the hash wraps to zero - return 0x30000000 | (hash + 7 * System.identityHashCode(x) + 11 * System.identityHashCode(y) + 13 * System.identityHashCode(z)); - } - - /** - * Utility method to combine a base hash with the identity hash of one or more objects. - * - * @param hash the base hash - * @param x the first object to add to the hash - * @param y the second object to add to the hash - * @param z the third object to add to the hash - * @param w the fourth object to add to the hash - * @return the combined hash - */ - public static int hash4(int hash, Object x, Object y, Object z, Object w) { - // always set at least one bit in case the hash wraps to zero - return 0x40000000 | (hash + 7 * System.identityHashCode(x) + 11 * System.identityHashCode(y) + 13 * System.identityHashCode(z) + 17 * System.identityHashCode(w)); - } - - static { - assert CodeUtil.log2(2) == 1; - assert CodeUtil.log2(4) == 2; - assert CodeUtil.log2(8) == 3; - assert CodeUtil.log2(16) == 4; - assert CodeUtil.log2(32) == 5; - assert CodeUtil.log2(0x40000000) == 30; - - assert CodeUtil.log2(2L) == 1; - assert CodeUtil.log2(4L) == 2; - assert CodeUtil.log2(8L) == 3; - assert CodeUtil.log2(16L) == 4; - assert CodeUtil.log2(32L) == 5; - assert CodeUtil.log2(0x4000000000000000L) == 62; - - assert !CodeUtil.isPowerOf2(3); - assert !CodeUtil.isPowerOf2(5); - assert !CodeUtil.isPowerOf2(7); - assert !CodeUtil.isPowerOf2(-1); - - assert CodeUtil.isPowerOf2(2); - assert CodeUtil.isPowerOf2(4); - assert CodeUtil.isPowerOf2(8); - assert CodeUtil.isPowerOf2(16); - assert CodeUtil.isPowerOf2(32); - assert CodeUtil.isPowerOf2(64); - } - - /** - * Sets the element at a given position of a list and ensures that this position exists. If the - * list is current shorter than the position, intermediate positions are filled with a given - * value. - * - * @param list the list to put the element into - * @param pos the position at which to insert the element - * @param x the element that should be inserted - * @param filler the filler element that is used for the intermediate positions in case the list - * is shorter than pos - */ - public static void atPutGrow(List list, int pos, T x, T filler) { - if (list.size() < pos + 1) { - while (list.size() < pos + 1) { - list.add(filler); - } - assert list.size() == pos + 1; - } - - assert list.size() >= pos + 1; - list.set(pos, x); - } - - public static void breakpoint() { - // do nothing. - } - - public static void guarantee(boolean b, String string) { - if (!b) { - throw new BailoutException(string); - } - } - - public static void warning(String string) { - TTY.println("WARNING: " + string); - } - - public static int safeToInt(long l) { - assert (int) l == l; - return (int) l; - } - - public static int roundUp(int number, int mod) { - return ((number + mod - 1) / mod) * mod; - } - - public static void printSection(String name, char sectionCharacter) { - - String header = " " + name + " "; - int remainingCharacters = PRINTING_LINE_WIDTH - header.length(); - int leftPart = remainingCharacters / 2; - int rightPart = remainingCharacters - leftPart; - for (int i = 0; i < leftPart; i++) { - TTY.print(sectionCharacter); - } - - TTY.print(header); - - for (int i = 0; i < rightPart; i++) { - TTY.print(sectionCharacter); - } - - TTY.println(); - } - - /** - * Prints entries in a byte array as space separated hex values to {@link TTY}. - * - * @param address an address at which the bytes are located. This is used to print an address - * prefix per line of output. - * @param array the array containing all the bytes to print - * @param bytesPerLine the number of values to print per line of output - */ - public static void printBytes(long address, byte[] array, int bytesPerLine) { - printBytes(address, array, 0, array.length, bytesPerLine); - } - - /** - * Prints entries in a byte array as space separated hex values to {@link TTY}. - * - * @param address an address at which the bytes are located. This is used to print an address - * prefix per line of output. - * @param array the array containing the bytes to print - * @param offset the offset in {@code array} of the values to print - * @param length the number of values from {@code array} print - * @param bytesPerLine the number of values to print per line of output - */ - public static void printBytes(long address, byte[] array, int offset, int length, int bytesPerLine) { - assert bytesPerLine > 0; - boolean newLine = true; - for (int i = 0; i < length; i++) { - if (newLine) { - TTY.print("%08x: ", address + i); - newLine = false; - } - TTY.print("%02x ", array[i]); - if (i % bytesPerLine == bytesPerLine - 1) { - TTY.println(); - newLine = true; - } - } - - if (length % bytesPerLine != bytesPerLine) { - TTY.println(); - } - } - - public static boolean isShiftCount(int x) { - return 0 <= x && x < 32; - } - - /** - * Determines if a given {@code int} value is the range of unsigned byte values. - */ - public static boolean isUByte(int x) { - return (x & 0xff) == x; - } - - /** - * Determines if a given {@code int} value is the range of signed byte values. - */ - public static boolean isByte(int x) { - return (byte) x == x; - } - - /** - * Determines if a given {@code long} value is the range of unsigned byte values. - */ - public static boolean isUByte(long x) { - return (x & 0xffL) == x; - } - - /** - * Determines if a given {@code long} value is the range of signed byte values. - */ - public static boolean isByte(long l) { - return (byte) l == l; - } - - /** - * Determines if a given {@code long} value is the range of unsigned int values. - */ - public static boolean isUInt(long x) { - return (x & 0xffffffffL) == x; - } - - /** - * Determines if a given {@code long} value is the range of signed int values. - */ - public static boolean isInt(long l) { - return (int) l == l; - } - - /** - * Determines if a given {@code int} value is the range of signed short values. - */ - public static boolean isShort(int x) { - return (short) x == x; - } - - public static boolean is32bit(long x) { - return -0x80000000L <= x && x < 0x80000000L; - } - - public static short safeToShort(int v) { - assert isShort(v); - return (short) v; - } - - /** - * Creates an array of integers of length "size", in which each number from 0 to (size - 1) - * occurs exactly once. The integers are sorted using the given comparator. This can be used to - * create a sorting for arrays that cannot be modified directly. - * - * @param size The size of the range to be sorted. - * @param comparator A comparator that is used to compare indexes. - * @return An array of integers that contains each number from 0 to (size - 1) exactly once, - * sorted using the comparator. - */ - public static Integer[] createSortedPermutation(int size, Comparator comparator) { - Integer[] indexes = new Integer[size]; - for (int i = 0; i < size; i++) { - indexes[i] = i; - } - Arrays.sort(indexes, comparator); - return indexes; - } -} diff -r 32c7a5a88523 -r d599eeab1b53 graal/com.oracle.graal.printer/src/com/oracle/graal/printer/CFGPrinter.java --- a/graal/com.oracle.graal.printer/src/com/oracle/graal/printer/CFGPrinter.java Fri Feb 06 12:17:20 2015 +0100 +++ b/graal/com.oracle.graal.printer/src/com/oracle/graal/printer/CFGPrinter.java Fri Feb 06 12:44:50 2015 +0100 @@ -29,14 +29,14 @@ import com.oracle.graal.api.code.*; import com.oracle.graal.api.meta.*; -import com.oracle.graal.compiler.alloc.*; -import com.oracle.graal.compiler.alloc.Interval.UsePosList; import com.oracle.graal.compiler.common.cfg.*; import com.oracle.graal.compiler.gen.*; import com.oracle.graal.graph.*; import com.oracle.graal.java.*; import com.oracle.graal.java.BciBlockMapping.BciBlock; import com.oracle.graal.lir.*; +import com.oracle.graal.lir.alloc.lsra.*; +import com.oracle.graal.lir.alloc.lsra.Interval.*; import com.oracle.graal.lir.stackslotalloc.*; import com.oracle.graal.nodeinfo.*; import com.oracle.graal.nodes.*; diff -r 32c7a5a88523 -r d599eeab1b53 graal/com.oracle.graal.printer/src/com/oracle/graal/printer/CFGPrinterObserver.java --- a/graal/com.oracle.graal.printer/src/com/oracle/graal/printer/CFGPrinterObserver.java Fri Feb 06 12:17:20 2015 +0100 +++ b/graal/com.oracle.graal.printer/src/com/oracle/graal/printer/CFGPrinterObserver.java Fri Feb 06 12:44:50 2015 +0100 @@ -28,13 +28,13 @@ import com.oracle.graal.api.code.*; import com.oracle.graal.api.meta.*; -import com.oracle.graal.compiler.alloc.*; import com.oracle.graal.compiler.common.*; import com.oracle.graal.compiler.gen.*; import com.oracle.graal.debug.*; import com.oracle.graal.graph.*; import com.oracle.graal.java.*; import com.oracle.graal.lir.*; +import com.oracle.graal.lir.alloc.lsra.*; import com.oracle.graal.lir.stackslotalloc.*; import com.oracle.graal.nodes.*; import com.oracle.graal.nodes.cfg.*; diff -r 32c7a5a88523 -r d599eeab1b53 graal/com.oracle.graal.virtual/src/com/oracle/graal/virtual/phases/ea/PartialEscapeClosure.java --- a/graal/com.oracle.graal.virtual/src/com/oracle/graal/virtual/phases/ea/PartialEscapeClosure.java Fri Feb 06 12:17:20 2015 +0100 +++ b/graal/com.oracle.graal.virtual/src/com/oracle/graal/virtual/phases/ea/PartialEscapeClosure.java Fri Feb 06 12:44:50 2015 +0100 @@ -28,6 +28,7 @@ import com.oracle.graal.api.meta.*; import com.oracle.graal.compiler.common.*; import com.oracle.graal.compiler.common.type.*; +import com.oracle.graal.compiler.common.util.*; import com.oracle.graal.debug.*; import com.oracle.graal.graph.*; import com.oracle.graal.nodes.*; @@ -38,7 +39,6 @@ import com.oracle.graal.nodes.spi.Virtualizable.EscapeState; import com.oracle.graal.nodes.virtual.*; import com.oracle.graal.phases.schedule.*; -import com.oracle.graal.phases.util.*; import com.oracle.graal.virtual.nodes.*; public abstract class PartialEscapeClosure> extends EffectsClosure { diff -r 32c7a5a88523 -r d599eeab1b53 mx/suite.py --- a/mx/suite.py Fri Feb 06 12:17:20 2015 +0100 +++ b/mx/suite.py Fri Feb 06 12:44:50 2015 +0100 @@ -490,7 +490,6 @@ "sourceDirs" : ["src"], "dependencies" : [ "com.oracle.graal.nodeinfo", - "com.oracle.graal.debug", "com.oracle.graal.compiler.common", "com.oracle.graal.api.collections", "com.oracle.graal.api.runtime", @@ -539,7 +538,6 @@ "dependencies" : [ "com.oracle.graal.compiler.common", "com.oracle.graal.asm", - "com.oracle.graal.debug", ], "checkstyle" : "com.oracle.graal.graph", "javaCompliance" : "1.8", @@ -579,15 +577,6 @@ "workingSets" : "Graal,LIR,SPARC", }, - "com.oracle.graal.alloc" : { - "subDir" : "graal", - "sourceDirs" : ["src"], - "dependencies" : ["com.oracle.graal.compiler.common"], - "checkstyle" : "com.oracle.graal.graph", - "javaCompliance" : "1.8", - "workingSets" : "Graal", - }, - "com.oracle.graal.word" : { "subDir" : "graal", "sourceDirs" : ["src"], @@ -740,7 +729,6 @@ "dependencies" : [ "com.oracle.graal.virtual", "com.oracle.graal.loop", - "com.oracle.graal.alloc", ], "checkstyle" : "com.oracle.graal.graph", "javaCompliance" : "1.8", @@ -829,6 +817,7 @@ "dependencies" : [ "com.oracle.graal.api.code", "com.oracle.graal.options", + "com.oracle.graal.debug", ], "checkstyle" : "com.oracle.graal.graph", "javaCompliance" : "1.8",