# HG changeset patch # User Stefan Anzinger # Date 1423273620 -3600 # Node ID 98967b613c886a3e4aff6e936af27a3cc7a030f9 # Parent 3059a52d9614a95e051e965c89459d8cf6abf077# Parent 8cd798884d606c08a3a7d875c5232b997cc6c96c Merge diff -r 3059a52d9614 -r 98967b613c88 CHANGELOG.md --- a/CHANGELOG.md Sat Feb 07 02:34:43 2015 +0100 +++ b/CHANGELOG.md Sat Feb 07 02:47:00 2015 +0100 @@ -12,6 +12,10 @@ ### Truffle * Added Node#deepCopy as primary method to copy ASTs. * Disable inlining across Truffle boundary by default. New option TruffleInlineAcrossTruffleBoundary default false. +* Node.replace(Node) now guards against non-assignable replacement, and Node.isReplacementSafe(Node) checks in advance. +* Instrumentation: AST "probing" is now safe and implemented by Node.probe(); language implementors need only implement Node.isInstrumentable() and Node.createWrapperNode(). +* Instrumentation: A new framework defines a category of simple "instrumentation tools" that can be created, configured, and installed, after which they autonomously collect execution data of some kind. +* Instrumentation: A new example "instrumentation tool" is a language-agnostic collector of code coverage information (CoverageTracker); there are two other examples. ### Truffle-DSL * All methods enclosed in a @TypeSystem must now be static. diff -r 3059a52d9614 -r 98967b613c88 graal/com.oracle.graal.alloc/overview.html --- a/graal/com.oracle.graal.alloc/overview.html Sat Feb 07 02:34:43 2015 +0100 +++ /dev/null Thu Jan 01 00:00:00 1970 +0000 @@ -1,36 +0,0 @@ - - - - - - - - -Documentation for the com.oracle.graal.alloc project. - - - diff -r 3059a52d9614 -r 98967b613c88 graal/com.oracle.graal.alloc/src/com/oracle/graal/alloc/ComputeBlockOrder.java --- a/graal/com.oracle.graal.alloc/src/com/oracle/graal/alloc/ComputeBlockOrder.java Sat Feb 07 02:34:43 2015 +0100 +++ /dev/null Thu Jan 01 00:00:00 1970 +0000 @@ -1,265 +0,0 @@ -/* - * Copyright (c) 2009, 2011, Oracle and/or its affiliates. All rights reserved. - * DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER. - * - * This code is free software; you can redistribute it and/or modify it - * under the terms of the GNU General Public License version 2 only, as - * published by the Free Software Foundation. - * - * This code is distributed in the hope that it will be useful, but WITHOUT - * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or - * FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License - * version 2 for more details (a copy is included in the LICENSE file that - * accompanied this code). - * - * You should have received a copy of the GNU General Public License version - * 2 along with this work; if not, write to the Free Software Foundation, - * Inc., 51 Franklin St, Fifth Floor, Boston, MA 02110-1301 USA. - * - * Please contact Oracle, 500 Oracle Parkway, Redwood Shores, CA 94065 USA - * or visit www.oracle.com if you need additional information or have any - * questions. - */ - -package com.oracle.graal.alloc; - -import java.util.*; - -import com.oracle.graal.compiler.common.cfg.*; - -/** - * Computes an ordering of the block that can be used by the linear scan register allocator and the - * machine code generator. The machine code generation order will start with the first block and - * produce a straight sequence always following the most likely successor. Then it will continue - * with the most likely path that was left out during this process. The process iteratively - * continues until all blocks are scheduled. Additionally, it is guaranteed that all blocks of a - * loop are scheduled before any block following the loop is scheduled. - * - * The machine code generator order includes reordering of loop headers such that the backward jump - * is a conditional jump if there is only one loop end block. Additionally, the target of loop - * backward jumps are always marked as aligned. Aligning the target of conditional jumps does not - * bring a measurable benefit and is therefore avoided to keep the code size small. - * - * The linear scan register allocator order has an additional mechanism that prevents merge nodes - * from being scheduled if there is at least one highly likely predecessor still unscheduled. This - * increases the probability that the merge node and the corresponding predecessor are more closely - * together in the schedule thus decreasing the probability for inserted phi moves. Also, the - * algorithm sets the linear scan order number of the block that corresponds to its index in the - * linear scan order. - */ -public final class ComputeBlockOrder { - - /** - * The initial capacities of the worklists used for iteratively finding the block order. - */ - private static final int INITIAL_WORKLIST_CAPACITY = 10; - - /** - * Divisor used for degrading the probability of the current path versus unscheduled paths at a - * merge node when calculating the linear scan order. A high value means that predecessors of - * merge nodes are more likely to be scheduled before the merge node. - */ - private static final int PENALTY_VERSUS_UNSCHEDULED = 10; - - /** - * Computes the block order used for the linear scan register allocator. - * - * @return sorted list of blocks - */ - public static > List computeLinearScanOrder(int blockCount, T startBlock) { - List order = new ArrayList<>(); - BitSet visitedBlocks = new BitSet(blockCount); - PriorityQueue worklist = initializeWorklist(startBlock, visitedBlocks); - computeLinearScanOrder(order, worklist, visitedBlocks); - assert checkOrder(order, blockCount); - return order; - } - - /** - * Computes the block order used for code emission. - * - * @return sorted list of blocks - */ - public static > List computeCodeEmittingOrder(int blockCount, T startBlock) { - List order = new ArrayList<>(); - BitSet visitedBlocks = new BitSet(blockCount); - PriorityQueue worklist = initializeWorklist(startBlock, visitedBlocks); - computeCodeEmittingOrder(order, worklist, visitedBlocks); - assert checkOrder(order, blockCount); - return order; - } - - /** - * Iteratively adds paths to the code emission block order. - */ - private static > void computeCodeEmittingOrder(List order, PriorityQueue worklist, BitSet visitedBlocks) { - while (!worklist.isEmpty()) { - T nextImportantPath = worklist.poll(); - addPathToCodeEmittingOrder(nextImportantPath, order, worklist, visitedBlocks); - } - } - - /** - * Iteratively adds paths to the linear scan block order. - */ - private static > void computeLinearScanOrder(List order, PriorityQueue worklist, BitSet visitedBlocks) { - while (!worklist.isEmpty()) { - T nextImportantPath = worklist.poll(); - addPathToLinearScanOrder(nextImportantPath, order, worklist, visitedBlocks); - } - } - - /** - * Initializes the priority queue used for the work list of blocks and adds the start block. - */ - private static > PriorityQueue initializeWorklist(T startBlock, BitSet visitedBlocks) { - PriorityQueue result = new PriorityQueue<>(INITIAL_WORKLIST_CAPACITY, new BlockOrderComparator<>()); - result.add(startBlock); - visitedBlocks.set(startBlock.getId()); - return result; - } - - /** - * Add a linear path to the linear scan order greedily following the most likely successor. - */ - private static > void addPathToLinearScanOrder(T block, List order, PriorityQueue worklist, BitSet visitedBlocks) { - block.setLinearScanNumber(order.size()); - order.add(block); - T mostLikelySuccessor = findAndMarkMostLikelySuccessor(block, visitedBlocks); - enqueueSuccessors(block, worklist, visitedBlocks); - if (mostLikelySuccessor != null) { - if (!mostLikelySuccessor.isLoopHeader() && mostLikelySuccessor.getPredecessorCount() > 1) { - // We are at a merge. Check probabilities of predecessors that are not yet - // scheduled. - double unscheduledSum = 0.0; - for (T pred : mostLikelySuccessor.getPredecessors()) { - if (pred.getLinearScanNumber() == -1) { - unscheduledSum += pred.probability(); - } - } - - if (unscheduledSum > block.probability() / PENALTY_VERSUS_UNSCHEDULED) { - // Add this merge only after at least one additional predecessor gets scheduled. - visitedBlocks.clear(mostLikelySuccessor.getId()); - return; - } - } - addPathToLinearScanOrder(mostLikelySuccessor, order, worklist, visitedBlocks); - } - } - - /** - * Add a linear path to the code emission order greedily following the most likely successor. - */ - private static > void addPathToCodeEmittingOrder(T initialBlock, List order, PriorityQueue worklist, BitSet visitedBlocks) { - T block = initialBlock; - while (block != null) { - // Skip loop headers if there is only a single loop end block to - // make the backward jump be a conditional jump. - if (!skipLoopHeader(block)) { - - // Align unskipped loop headers as they are the target of the backward jump. - if (block.isLoopHeader()) { - block.setAlign(true); - } - addBlock(block, order); - } - - Loop loop = block.getLoop(); - if (block.isLoopEnd() && skipLoopHeader(loop.getHeader())) { - - // This is the only loop end of a skipped loop header. - // Add the header immediately afterwards. - addBlock(loop.getHeader(), order); - - // Make sure the loop successors of the loop header are aligned - // as they are the target - // of the backward jump. - for (T successor : loop.getHeader().getSuccessors()) { - if (successor.getLoopDepth() == block.getLoopDepth()) { - successor.setAlign(true); - } - } - } - - T mostLikelySuccessor = findAndMarkMostLikelySuccessor(block, visitedBlocks); - enqueueSuccessors(block, worklist, visitedBlocks); - block = mostLikelySuccessor; - } - } - - /** - * Adds a block to the ordering. - */ - private static > void addBlock(T header, List order) { - assert !order.contains(header) : "Cannot insert block twice"; - order.add(header); - } - - /** - * Find the highest likely unvisited successor block of a given block. - */ - private static > T findAndMarkMostLikelySuccessor(T block, BitSet visitedBlocks) { - T result = null; - for (T successor : block.getSuccessors()) { - assert successor.probability() >= 0.0 : "Probabilities must be positive"; - if (!visitedBlocks.get(successor.getId()) && successor.getLoopDepth() >= block.getLoopDepth() && (result == null || successor.probability() >= result.probability())) { - result = successor; - } - } - if (result != null) { - visitedBlocks.set(result.getId()); - } - return result; - } - - /** - * Add successor blocks into the given work list if they are not already marked as visited. - */ - private static > void enqueueSuccessors(T block, PriorityQueue worklist, BitSet visitedBlocks) { - for (T successor : block.getSuccessors()) { - if (!visitedBlocks.get(successor.getId())) { - visitedBlocks.set(successor.getId()); - worklist.add(successor); - } - } - } - - /** - * Skip the loop header block if the loop consists of more than one block and it has only a - * single loop end block. - */ - private static > boolean skipLoopHeader(AbstractBlock block) { - return (block.isLoopHeader() && !block.isLoopEnd() && block.getLoop().numBackedges() == 1); - } - - /** - * Checks that the ordering contains the expected number of blocks. - */ - private static boolean checkOrder(List> order, int expectedBlockCount) { - assert order.size() == expectedBlockCount : String.format("Number of blocks in ordering (%d) does not match expected block count (%d)", order.size(), expectedBlockCount); - return true; - } - - /** - * Comparator for sorting blocks based on loop depth and probability. - */ - private static class BlockOrderComparator> implements Comparator { - - @Override - public int compare(T a, T b) { - // Loop blocks before any loop exit block. - int diff = b.getLoopDepth() - a.getLoopDepth(); - if (diff != 0) { - return diff; - } - - // Blocks with high probability before blocks with low probability. - if (a.probability() > b.probability()) { - return -1; - } else { - return 1; - } - } - } -} diff -r 3059a52d9614 -r 98967b613c88 graal/com.oracle.graal.baseline/src/com/oracle/graal/baseline/BaselineBytecodeParser.java --- a/graal/com.oracle.graal.baseline/src/com/oracle/graal/baseline/BaselineBytecodeParser.java Sat Feb 07 02:34:43 2015 +0100 +++ b/graal/com.oracle.graal.baseline/src/com/oracle/graal/baseline/BaselineBytecodeParser.java Sat Feb 07 02:47:00 2015 +0100 @@ -26,11 +26,10 @@ import java.util.*; -import com.oracle.graal.alloc.*; import com.oracle.graal.api.code.*; import com.oracle.graal.api.meta.*; -import com.oracle.graal.compiler.alloc.*; import com.oracle.graal.compiler.common.*; +import com.oracle.graal.compiler.common.alloc.*; import com.oracle.graal.compiler.common.calc.*; import com.oracle.graal.compiler.common.cfg.*; import com.oracle.graal.compiler.gen.*; @@ -42,6 +41,7 @@ import com.oracle.graal.java.BciBlockMapping.LocalLiveness; import com.oracle.graal.lir.*; import com.oracle.graal.lir.StandardOp.BlockEndOp; +import com.oracle.graal.lir.alloc.lsra.*; import com.oracle.graal.lir.framemap.*; import com.oracle.graal.lir.gen.*; import com.oracle.graal.lir.stackslotalloc.*; @@ -95,7 +95,7 @@ BciBlockMapping blockMap; try (Scope ds = Debug.scope("BciBlockMapping")) { // compute the block map, setup exception handlers and get the entrypoint(s) - blockMap = BciBlockMapping.create(method, graphBuilderConfig.doLivenessAnalysis()); + blockMap = BciBlockMapping.create(method, graphBuilderConfig.doLivenessAnalysis(), false); } catch (Throwable e) { throw Debug.handle(e); } @@ -112,7 +112,7 @@ frameState.clearNonLiveLocals(blockMap.startBlock, liveness, true); currentBlock = blockMap.startBlock; - blockMap.startBlock.entryState = frameState; + blockMap.startBlock.setEntryState(0, frameState); if (blockMap.startBlock.isLoopHeader) { throw GraalInternalError.unimplemented("Handle start block as loop header"); } @@ -624,15 +624,15 @@ */ moveConstantsToVariables(); } - block.entryState = frameState.copy(); - block.entryState.clearNonLiveLocals(block, liveness, true); + block.setEntryState(0, frameState.copy()); + block.getEntryState(0).clearNonLiveLocals(block, liveness, true); Debug.log("createTarget %s: first visit", block); return; } // We already saw this block before, so we have to merge states. - if (!((BaselineFrameStateBuilder) block.entryState).isCompatibleWith(frameState)) { + if (!((BaselineFrameStateBuilder) block.getEntryState(0)).isCompatibleWith(frameState)) { throw new BailoutException("stacks do not match; bytecodes would not verify"); } @@ -640,7 +640,7 @@ assert currentBlock == null || currentBlock.getId() >= block.getId() : "must be backward branch"; if (currentBlock != null && currentBlock.numNormalSuccessors() == 1) { // this is the only successor of the current block so we can adjust - adaptFramestate((BaselineFrameStateBuilder) block.entryState); + adaptFramestate((BaselineFrameStateBuilder) block.getEntryState(0)); return; } GraalInternalError.unimplemented("Loops not yet supported"); @@ -654,7 +654,7 @@ */ if (currentBlock != null && currentBlock.numNormalSuccessors() == 1) { // this is the only successor of the current block so we can adjust - adaptFramestate((BaselineFrameStateBuilder) block.entryState); + adaptFramestate((BaselineFrameStateBuilder) block.getEntryState(0)); return; } GraalInternalError.unimplemented("second block visit not yet implemented"); @@ -715,7 +715,7 @@ } protected void processBlock(BciBlock block) { - frameState = (BaselineFrameStateBuilder) block.entryState; + frameState = (BaselineFrameStateBuilder) block.getEntryState(0); setCurrentFrameState(frameState); currentBlock = block; iterateBytecodesForBlock(block); @@ -745,7 +745,7 @@ * We need to preserve the frame state builder of the loop header so that we can merge * values for phi functions, so make a copy of it. */ - block.entryState = frameState.copy(); + block.setEntryState(0, frameState.copy()); } int endBCI = stream.endBCI(); diff -r 3059a52d9614 -r 98967b613c88 graal/com.oracle.graal.compiler.amd64/src/com/oracle/graal/compiler/amd64/AMD64LIRGenerator.java --- a/graal/com.oracle.graal.compiler.amd64/src/com/oracle/graal/compiler/amd64/AMD64LIRGenerator.java Sat Feb 07 02:34:43 2015 +0100 +++ b/graal/com.oracle.graal.compiler.amd64/src/com/oracle/graal/compiler/amd64/AMD64LIRGenerator.java Sat Feb 07 02:47:00 2015 +0100 @@ -38,6 +38,7 @@ import com.oracle.graal.compiler.common.*; import com.oracle.graal.compiler.common.calc.*; import com.oracle.graal.compiler.common.spi.*; +import com.oracle.graal.compiler.common.util.*; import com.oracle.graal.lir.*; import com.oracle.graal.lir.StandardOp.JumpOp; import com.oracle.graal.lir.amd64.*; @@ -998,7 +999,7 @@ } @Override - public Value emitBitCount(Value value) { + public Variable emitBitCount(Value value) { Variable result = newVariable(LIRKind.derive(value).changeType(Kind.Int)); if (value.getKind().getStackKind() == Kind.Int) { append(new AMD64BitManipulationOp(IPOPCNT, result, asAllocatable(value))); @@ -1009,14 +1010,14 @@ } @Override - public Value emitBitScanForward(Value value) { + public Variable emitBitScanForward(Value value) { Variable result = newVariable(LIRKind.derive(value).changeType(Kind.Int)); append(new AMD64BitManipulationOp(BSF, result, asAllocatable(value))); return result; } @Override - public Value emitBitScanReverse(Value value) { + public Variable emitBitScanReverse(Value value) { Variable result = newVariable(LIRKind.derive(value).changeType(Kind.Int)); if (value.getKind().getStackKind() == Kind.Int) { append(new AMD64BitManipulationOp(IBSR, result, asAllocatable(value))); @@ -1098,14 +1099,14 @@ } @Override - public Value emitByteSwap(Value input) { + public Variable emitByteSwap(Value input) { Variable result = newVariable(LIRKind.derive(input)); append(new AMD64ByteSwapOp(result, input)); return result; } @Override - public Value emitArrayEquals(Kind kind, Value array1, Value array2, Value length) { + public Variable emitArrayEquals(Kind kind, Value array1, Value array2, Value length) { Variable result = newVariable(LIRKind.value(Kind.Int)); append(new AMD64ArrayEqualsOp(this, kind, result, array1, array2, asAllocatable(length))); return result; diff -r 3059a52d9614 -r 98967b613c88 graal/com.oracle.graal.compiler.common/src/com/oracle/graal/compiler/common/GraalOptions.java --- a/graal/com.oracle.graal.compiler.common/src/com/oracle/graal/compiler/common/GraalOptions.java Sat Feb 07 02:34:43 2015 +0100 +++ b/graal/com.oracle.graal.compiler.common/src/com/oracle/graal/compiler/common/GraalOptions.java Sat Feb 07 02:47:00 2015 +0100 @@ -343,9 +343,23 @@ public static final OptionValue ImplicitStableValues = new OptionValue<>(true); + @Option(help = "Max number of loop explosions per method.", type = OptionType.Debug) + public static final OptionValue MaximumLoopExplosionCount = new OptionValue<>(10000); + /** * Counts the various paths taken through snippets. */ @Option(help = "", type = OptionType.Debug) public static final OptionValue SnippetCounters = new OptionValue<>(false); + + @Option(help = "Enable expensive assertions", type = OptionType.Debug) + public static final OptionValue DetailedAsserts = new StableOptionValue() { + @Override + protected Boolean initialValue() { + boolean enabled = false; + // turn detailed assertions on when the general assertions are on (misusing the assert keyword for this) + assert (enabled = true) == true; + return enabled; + } + }; } diff -r 3059a52d9614 -r 98967b613c88 graal/com.oracle.graal.compiler.common/src/com/oracle/graal/compiler/common/alloc/ComputeBlockOrder.java --- /dev/null Thu Jan 01 00:00:00 1970 +0000 +++ b/graal/com.oracle.graal.compiler.common/src/com/oracle/graal/compiler/common/alloc/ComputeBlockOrder.java Sat Feb 07 02:47:00 2015 +0100 @@ -0,0 +1,265 @@ +/* + * Copyright (c) 2009, 2011, Oracle and/or its affiliates. All rights reserved. + * DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER. + * + * This code is free software; you can redistribute it and/or modify it + * under the terms of the GNU General Public License version 2 only, as + * published by the Free Software Foundation. + * + * This code is distributed in the hope that it will be useful, but WITHOUT + * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or + * FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License + * version 2 for more details (a copy is included in the LICENSE file that + * accompanied this code). + * + * You should have received a copy of the GNU General Public License version + * 2 along with this work; if not, write to the Free Software Foundation, + * Inc., 51 Franklin St, Fifth Floor, Boston, MA 02110-1301 USA. + * + * Please contact Oracle, 500 Oracle Parkway, Redwood Shores, CA 94065 USA + * or visit www.oracle.com if you need additional information or have any + * questions. + */ + +package com.oracle.graal.compiler.common.alloc; + +import java.util.*; + +import com.oracle.graal.compiler.common.cfg.*; + +/** + * Computes an ordering of the block that can be used by the linear scan register allocator and the + * machine code generator. The machine code generation order will start with the first block and + * produce a straight sequence always following the most likely successor. Then it will continue + * with the most likely path that was left out during this process. The process iteratively + * continues until all blocks are scheduled. Additionally, it is guaranteed that all blocks of a + * loop are scheduled before any block following the loop is scheduled. + * + * The machine code generator order includes reordering of loop headers such that the backward jump + * is a conditional jump if there is only one loop end block. Additionally, the target of loop + * backward jumps are always marked as aligned. Aligning the target of conditional jumps does not + * bring a measurable benefit and is therefore avoided to keep the code size small. + * + * The linear scan register allocator order has an additional mechanism that prevents merge nodes + * from being scheduled if there is at least one highly likely predecessor still unscheduled. This + * increases the probability that the merge node and the corresponding predecessor are more closely + * together in the schedule thus decreasing the probability for inserted phi moves. Also, the + * algorithm sets the linear scan order number of the block that corresponds to its index in the + * linear scan order. + */ +public final class ComputeBlockOrder { + + /** + * The initial capacities of the worklists used for iteratively finding the block order. + */ + private static final int INITIAL_WORKLIST_CAPACITY = 10; + + /** + * Divisor used for degrading the probability of the current path versus unscheduled paths at a + * merge node when calculating the linear scan order. A high value means that predecessors of + * merge nodes are more likely to be scheduled before the merge node. + */ + private static final int PENALTY_VERSUS_UNSCHEDULED = 10; + + /** + * Computes the block order used for the linear scan register allocator. + * + * @return sorted list of blocks + */ + public static > List computeLinearScanOrder(int blockCount, T startBlock) { + List order = new ArrayList<>(); + BitSet visitedBlocks = new BitSet(blockCount); + PriorityQueue worklist = initializeWorklist(startBlock, visitedBlocks); + computeLinearScanOrder(order, worklist, visitedBlocks); + assert checkOrder(order, blockCount); + return order; + } + + /** + * Computes the block order used for code emission. + * + * @return sorted list of blocks + */ + public static > List computeCodeEmittingOrder(int blockCount, T startBlock) { + List order = new ArrayList<>(); + BitSet visitedBlocks = new BitSet(blockCount); + PriorityQueue worklist = initializeWorklist(startBlock, visitedBlocks); + computeCodeEmittingOrder(order, worklist, visitedBlocks); + assert checkOrder(order, blockCount); + return order; + } + + /** + * Iteratively adds paths to the code emission block order. + */ + private static > void computeCodeEmittingOrder(List order, PriorityQueue worklist, BitSet visitedBlocks) { + while (!worklist.isEmpty()) { + T nextImportantPath = worklist.poll(); + addPathToCodeEmittingOrder(nextImportantPath, order, worklist, visitedBlocks); + } + } + + /** + * Iteratively adds paths to the linear scan block order. + */ + private static > void computeLinearScanOrder(List order, PriorityQueue worklist, BitSet visitedBlocks) { + while (!worklist.isEmpty()) { + T nextImportantPath = worklist.poll(); + addPathToLinearScanOrder(nextImportantPath, order, worklist, visitedBlocks); + } + } + + /** + * Initializes the priority queue used for the work list of blocks and adds the start block. + */ + private static > PriorityQueue initializeWorklist(T startBlock, BitSet visitedBlocks) { + PriorityQueue result = new PriorityQueue<>(INITIAL_WORKLIST_CAPACITY, new BlockOrderComparator<>()); + result.add(startBlock); + visitedBlocks.set(startBlock.getId()); + return result; + } + + /** + * Add a linear path to the linear scan order greedily following the most likely successor. + */ + private static > void addPathToLinearScanOrder(T block, List order, PriorityQueue worklist, BitSet visitedBlocks) { + block.setLinearScanNumber(order.size()); + order.add(block); + T mostLikelySuccessor = findAndMarkMostLikelySuccessor(block, visitedBlocks); + enqueueSuccessors(block, worklist, visitedBlocks); + if (mostLikelySuccessor != null) { + if (!mostLikelySuccessor.isLoopHeader() && mostLikelySuccessor.getPredecessorCount() > 1) { + // We are at a merge. Check probabilities of predecessors that are not yet + // scheduled. + double unscheduledSum = 0.0; + for (T pred : mostLikelySuccessor.getPredecessors()) { + if (pred.getLinearScanNumber() == -1) { + unscheduledSum += pred.probability(); + } + } + + if (unscheduledSum > block.probability() / PENALTY_VERSUS_UNSCHEDULED) { + // Add this merge only after at least one additional predecessor gets scheduled. + visitedBlocks.clear(mostLikelySuccessor.getId()); + return; + } + } + addPathToLinearScanOrder(mostLikelySuccessor, order, worklist, visitedBlocks); + } + } + + /** + * Add a linear path to the code emission order greedily following the most likely successor. + */ + private static > void addPathToCodeEmittingOrder(T initialBlock, List order, PriorityQueue worklist, BitSet visitedBlocks) { + T block = initialBlock; + while (block != null) { + // Skip loop headers if there is only a single loop end block to + // make the backward jump be a conditional jump. + if (!skipLoopHeader(block)) { + + // Align unskipped loop headers as they are the target of the backward jump. + if (block.isLoopHeader()) { + block.setAlign(true); + } + addBlock(block, order); + } + + Loop loop = block.getLoop(); + if (block.isLoopEnd() && skipLoopHeader(loop.getHeader())) { + + // This is the only loop end of a skipped loop header. + // Add the header immediately afterwards. + addBlock(loop.getHeader(), order); + + // Make sure the loop successors of the loop header are aligned + // as they are the target + // of the backward jump. + for (T successor : loop.getHeader().getSuccessors()) { + if (successor.getLoopDepth() == block.getLoopDepth()) { + successor.setAlign(true); + } + } + } + + T mostLikelySuccessor = findAndMarkMostLikelySuccessor(block, visitedBlocks); + enqueueSuccessors(block, worklist, visitedBlocks); + block = mostLikelySuccessor; + } + } + + /** + * Adds a block to the ordering. + */ + private static > void addBlock(T header, List order) { + assert !order.contains(header) : "Cannot insert block twice"; + order.add(header); + } + + /** + * Find the highest likely unvisited successor block of a given block. + */ + private static > T findAndMarkMostLikelySuccessor(T block, BitSet visitedBlocks) { + T result = null; + for (T successor : block.getSuccessors()) { + assert successor.probability() >= 0.0 : "Probabilities must be positive"; + if (!visitedBlocks.get(successor.getId()) && successor.getLoopDepth() >= block.getLoopDepth() && (result == null || successor.probability() >= result.probability())) { + result = successor; + } + } + if (result != null) { + visitedBlocks.set(result.getId()); + } + return result; + } + + /** + * Add successor blocks into the given work list if they are not already marked as visited. + */ + private static > void enqueueSuccessors(T block, PriorityQueue worklist, BitSet visitedBlocks) { + for (T successor : block.getSuccessors()) { + if (!visitedBlocks.get(successor.getId())) { + visitedBlocks.set(successor.getId()); + worklist.add(successor); + } + } + } + + /** + * Skip the loop header block if the loop consists of more than one block and it has only a + * single loop end block. + */ + private static > boolean skipLoopHeader(AbstractBlock block) { + return (block.isLoopHeader() && !block.isLoopEnd() && block.getLoop().numBackedges() == 1); + } + + /** + * Checks that the ordering contains the expected number of blocks. + */ + private static boolean checkOrder(List> order, int expectedBlockCount) { + assert order.size() == expectedBlockCount : String.format("Number of blocks in ordering (%d) does not match expected block count (%d)", order.size(), expectedBlockCount); + return true; + } + + /** + * Comparator for sorting blocks based on loop depth and probability. + */ + private static class BlockOrderComparator> implements Comparator { + + @Override + public int compare(T a, T b) { + // Loop blocks before any loop exit block. + int diff = b.getLoopDepth() - a.getLoopDepth(); + if (diff != 0) { + return diff; + } + + // Blocks with high probability before blocks with low probability. + if (a.probability() > b.probability()) { + return -1; + } else { + return 1; + } + } + } +} diff -r 3059a52d9614 -r 98967b613c88 graal/com.oracle.graal.compiler.common/src/com/oracle/graal/compiler/common/util/ArrayMap.java --- /dev/null Thu Jan 01 00:00:00 1970 +0000 +++ b/graal/com.oracle.graal.compiler.common/src/com/oracle/graal/compiler/common/util/ArrayMap.java Sat Feb 07 02:47:00 2015 +0100 @@ -0,0 +1,118 @@ +/* + * Copyright (c) 2009, 2011, Oracle and/or its affiliates. All rights reserved. + * DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER. + * + * This code is free software; you can redistribute it and/or modify it + * under the terms of the GNU General Public License version 2 only, as + * published by the Free Software Foundation. + * + * This code is distributed in the hope that it will be useful, but WITHOUT + * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or + * FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License + * version 2 for more details (a copy is included in the LICENSE file that + * accompanied this code). + * + * You should have received a copy of the GNU General Public License version + * 2 along with this work; if not, write to the Free Software Foundation, + * Inc., 51 Franklin St, Fifth Floor, Boston, MA 02110-1301 USA. + * + * Please contact Oracle, 500 Oracle Parkway, Redwood Shores, CA 94065 USA + * or visit www.oracle.com if you need additional information or have any + * questions. + */ +package com.oracle.graal.compiler.common.util; + +/** + * The {@code ArrayMap} class implements an efficient one-level map which is implemented as an + * array. Note that because of the one-level array inside, this data structure performs best when + * the range of integer keys is small and densely used. Note that the implementation can handle + * arbitrary intervals, including negative numbers, up to intervals of size 2^31 - 1. + */ +public class ArrayMap { + + private static final int INITIAL_SIZE = 5; // how big the initial array should be + private static final int EXTRA = 2; // how far on the left or right of a new element to grow + + Object[] map; + int low; + + /** + * Constructs a new {@code ArrayMap} with no initial assumptions. + */ + public ArrayMap() { + } + + /** + * Constructs a new {@code ArrayMap} that initially covers the specified interval. Note that + * this map will automatically expand if necessary later. + * + * @param low the low index, inclusive + * @param high the high index, exclusive + */ + public ArrayMap(int low, int high) { + this.low = low; + this.map = new Object[high - low + 1]; + } + + /** + * Puts a new value in the map at the specified index. + * + * @param i the index at which to store the value + * @param value the value to store at the specified index + */ + public void put(int i, T value) { + int index = i - low; + if (map == null) { + // no map yet + map = new Object[INITIAL_SIZE]; + low = index - 2; + map[INITIAL_SIZE / 2] = value; + } else if (index < 0) { + // grow backwards + growBackward(i, value); + } else if (index >= map.length) { + // grow forwards + growForward(i, value); + } else { + // no growth necessary + map[index] = value; + } + } + + /** + * Gets the value at the specified index in the map. + * + * @param i the index + * @return the value at the specified index; {@code null} if there is no value at the specified + * index, or if the index is out of the currently stored range + */ + public T get(int i) { + int index = i - low; + if (map == null || index < 0 || index >= map.length) { + return null; + } + Class type = null; + return Util.uncheckedCast(type, map[index]); + } + + public int length() { + return map.length; + } + + private void growBackward(int i, T value) { + int nlow = i - EXTRA; + Object[] nmap = new Object[low - nlow + map.length]; + System.arraycopy(map, 0, nmap, low - nlow, map.length); + map = nmap; + low = nlow; + map[i - low] = value; + } + + private void growForward(int i, T value) { + int nlen = i - low + 1 + EXTRA; + Object[] nmap = new Object[nlen]; + System.arraycopy(map, 0, nmap, 0, map.length); + map = nmap; + map[i - low] = value; + } +} diff -r 3059a52d9614 -r 98967b613c88 graal/com.oracle.graal.compiler.common/src/com/oracle/graal/compiler/common/util/ArraySet.java --- /dev/null Thu Jan 01 00:00:00 1970 +0000 +++ b/graal/com.oracle.graal.compiler.common/src/com/oracle/graal/compiler/common/util/ArraySet.java Sat Feb 07 02:47:00 2015 +0100 @@ -0,0 +1,54 @@ +/* + * Copyright (c) 2013, Oracle and/or its affiliates. All rights reserved. + * DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER. + * + * This code is free software; you can redistribute it and/or modify it + * under the terms of the GNU General Public License version 2 only, as + * published by the Free Software Foundation. + * + * This code is distributed in the hope that it will be useful, but WITHOUT + * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or + * FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License + * version 2 for more details (a copy is included in the LICENSE file that + * accompanied this code). + * + * You should have received a copy of the GNU General Public License version + * 2 along with this work; if not, write to the Free Software Foundation, + * Inc., 51 Franklin St, Fifth Floor, Boston, MA 02110-1301 USA. + * + * Please contact Oracle, 500 Oracle Parkway, Redwood Shores, CA 94065 USA + * or visit www.oracle.com if you need additional information or have any + * questions. + */ +package com.oracle.graal.compiler.common.util; + +import java.util.*; + +/** + * Mimic a set implementation with an ArrayList. Beneficial for small sets (compared to + * {@link HashSet}). + */ +public class ArraySet extends ArrayList implements Set { + private static final long serialVersionUID = 4476957522387436654L; + + public ArraySet() { + super(); + } + + public ArraySet(int i) { + super(i); + } + + public ArraySet(Collection c) { + super(c); + } + + @Override + public boolean add(E e) { + // avoid duplicated entries + if (contains(e)) { + return false; + } + return super.add(e); + } +} diff -r 3059a52d9614 -r 98967b613c88 graal/com.oracle.graal.compiler.common/src/com/oracle/graal/compiler/common/util/BitMap2D.java --- /dev/null Thu Jan 01 00:00:00 1970 +0000 +++ b/graal/com.oracle.graal.compiler.common/src/com/oracle/graal/compiler/common/util/BitMap2D.java Sat Feb 07 02:47:00 2015 +0100 @@ -0,0 +1,99 @@ +/* + * Copyright (c) 2009, 2011, Oracle and/or its affiliates. All rights reserved. + * DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER. + * + * This code is free software; you can redistribute it and/or modify it + * under the terms of the GNU General Public License version 2 only, as + * published by the Free Software Foundation. + * + * This code is distributed in the hope that it will be useful, but WITHOUT + * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or + * FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License + * version 2 for more details (a copy is included in the LICENSE file that + * accompanied this code). + * + * You should have received a copy of the GNU General Public License version + * 2 along with this work; if not, write to the Free Software Foundation, + * Inc., 51 Franklin St, Fifth Floor, Boston, MA 02110-1301 USA. + * + * Please contact Oracle, 500 Oracle Parkway, Redwood Shores, CA 94065 USA + * or visit www.oracle.com if you need additional information or have any + * questions. + */ +package com.oracle.graal.compiler.common.util; + +import java.util.*; + +/** + * This class implements a two-dimensional bitmap. + */ +public final class BitMap2D { + + private BitSet map; + private final int bitsPerSlot; + + private int bitIndex(int slotIndex, int bitWithinSlotIndex) { + return slotIndex * bitsPerSlot + bitWithinSlotIndex; + } + + private boolean verifyBitWithinSlotIndex(int index) { + assert index < bitsPerSlot : "index " + index + " is out of bounds " + bitsPerSlot; + return true; + } + + public BitMap2D(int sizeInSlots, int bitsPerSlot) { + map = new BitSet(sizeInSlots * bitsPerSlot); + this.bitsPerSlot = bitsPerSlot; + } + + public int sizeInBits() { + return map.size(); + } + + // Returns number of full slots that have been allocated + public int sizeInSlots() { + return map.size() / bitsPerSlot; + } + + public boolean isValidIndex(int slotIndex, int bitWithinSlotIndex) { + assert verifyBitWithinSlotIndex(bitWithinSlotIndex); + return (bitIndex(slotIndex, bitWithinSlotIndex) < sizeInBits()); + } + + public boolean at(int slotIndex, int bitWithinSlotIndex) { + assert verifyBitWithinSlotIndex(bitWithinSlotIndex); + return map.get(bitIndex(slotIndex, bitWithinSlotIndex)); + } + + public void setBit(int slotIndex, int bitWithinSlotIndex) { + assert verifyBitWithinSlotIndex(bitWithinSlotIndex); + map.set(bitIndex(slotIndex, bitWithinSlotIndex)); + } + + public void clearBit(int slotIndex, int bitWithinSlotIndex) { + assert verifyBitWithinSlotIndex(bitWithinSlotIndex); + map.clear(bitIndex(slotIndex, bitWithinSlotIndex)); + } + + public void atPutGrow(int slotIndex, int bitWithinSlotIndex, boolean value) { + int size = sizeInSlots(); + if (size <= slotIndex) { + while (size <= slotIndex) { + size *= 2; + } + BitSet newBitMap = new BitSet(size * bitsPerSlot); + newBitMap.or(map); + map = newBitMap; + } + + if (value) { + setBit(slotIndex, bitWithinSlotIndex); + } else { + clearBit(slotIndex, bitWithinSlotIndex); + } + } + + public void clear() { + map.clear(); + } +} diff -r 3059a52d9614 -r 98967b613c88 graal/com.oracle.graal.compiler.common/src/com/oracle/graal/compiler/common/util/IntList.java --- /dev/null Thu Jan 01 00:00:00 1970 +0000 +++ b/graal/com.oracle.graal.compiler.common/src/com/oracle/graal/compiler/common/util/IntList.java Sat Feb 07 02:47:00 2015 +0100 @@ -0,0 +1,163 @@ +/* + * Copyright (c) 2010, 2011, Oracle and/or its affiliates. All rights reserved. + * DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER. + * + * This code is free software; you can redistribute it and/or modify it + * under the terms of the GNU General Public License version 2 only, as + * published by the Free Software Foundation. + * + * This code is distributed in the hope that it will be useful, but WITHOUT + * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or + * FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License + * version 2 for more details (a copy is included in the LICENSE file that + * accompanied this code). + * + * You should have received a copy of the GNU General Public License version + * 2 along with this work; if not, write to the Free Software Foundation, + * Inc., 51 Franklin St, Fifth Floor, Boston, MA 02110-1301 USA. + * + * Please contact Oracle, 500 Oracle Parkway, Redwood Shores, CA 94065 USA + * or visit www.oracle.com if you need additional information or have any + * questions. + */ +package com.oracle.graal.compiler.common.util; + +import java.util.*; + +/** + * An expandable and indexable list of {@code int}s. + * + * This class avoids the boxing/unboxing incurred by {@code ArrayList}. + */ +public final class IntList { + + private int[] array; + private int size; + + /** + * Creates an int list with a specified initial capacity. + * + * @param initialCapacity + */ + public IntList(int initialCapacity) { + array = new int[initialCapacity]; + } + + /** + * Creates an int list with a specified initial array. + * + * @param array the initial array used for the list (no copy is made) + * @param initialSize the initial {@linkplain #size() size} of the list (must be less than or + * equal to {@code array.length} + */ + public IntList(int[] array, int initialSize) { + assert initialSize <= array.length; + this.array = array; + this.size = initialSize; + } + + /** + * Makes a new int list by copying a range from a given int list. + * + * @param other the list from which a range of values is to be copied into the new list + * @param startIndex the index in {@code other} at which to start copying + * @param length the number of values to copy from {@code other} + * @return a new int list whose {@linkplain #size() size} and capacity is {@code length} + */ + public static IntList copy(IntList other, int startIndex, int length) { + return copy(other, startIndex, length, length); + } + + /** + * Makes a new int list by copying a range from a given int list. + * + * @param other the list from which a range of values is to be copied into the new list + * @param startIndex the index in {@code other} at which to start copying + * @param length the number of values to copy from {@code other} + * @param initialCapacity the initial capacity of the new int list (must be greater or equal to + * {@code length}) + * @return a new int list whose {@linkplain #size() size} is {@code length} + */ + public static IntList copy(IntList other, int startIndex, int length, int initialCapacity) { + assert initialCapacity >= length : "initialCapacity < length"; + int[] array = new int[initialCapacity]; + System.arraycopy(other.array, startIndex, array, 0, length); + return new IntList(array, length); + } + + public int size() { + return size; + } + + /** + * Appends a value to the end of this list, increasing its {@linkplain #size() size} by 1. + * + * @param value the value to append + */ + public void add(int value) { + if (size == array.length) { + int newSize = (size * 3) / 2 + 1; + array = Arrays.copyOf(array, newSize); + } + array[size++] = value; + } + + /** + * Gets the value in this list at a given index. + * + * @param index the index of the element to return + * @throws IndexOutOfBoundsException if {@code index < 0 || index >= size()} + */ + public int get(int index) { + if (index >= size) { + throw new IndexOutOfBoundsException("Index: " + index + ", Size: " + size); + } + return array[index]; + } + + /** + * Sets the size of this list to 0. + */ + public void clear() { + size = 0; + } + + /** + * Sets a value at a given index in this list. + * + * @param index the index of the element to update + * @param value the new value of the element + * @throws IndexOutOfBoundsException if {@code index < 0 || index >= size()} + */ + public void set(int index, int value) { + if (index >= size) { + throw new IndexOutOfBoundsException("Index: " + index + ", Size: " + size); + } + array[index] = value; + } + + /** + * Adjusts the {@linkplain #size() size} of this int list. + * + * If {@code newSize < size()}, the size is changed to {@code newSize}. If + * {@code newSize > size()}, sufficient 0 elements are {@linkplain #add(int) added} until + * {@code size() == newSize}. + * + * @param newSize the new size of this int list + */ + public void setSize(int newSize) { + if (newSize < size) { + size = newSize; + } else if (newSize > size) { + array = Arrays.copyOf(array, newSize); + } + } + + @Override + public String toString() { + if (array.length == size) { + return Arrays.toString(array); + } + return Arrays.toString(Arrays.copyOf(array, size)); + } +} diff -r 3059a52d9614 -r 98967b613c88 graal/com.oracle.graal.compiler.common/src/com/oracle/graal/compiler/common/util/Util.java --- /dev/null Thu Jan 01 00:00:00 1970 +0000 +++ b/graal/com.oracle.graal.compiler.common/src/com/oracle/graal/compiler/common/util/Util.java Sat Feb 07 02:47:00 2015 +0100 @@ -0,0 +1,337 @@ +/* + * Copyright (c) 2009, 2011, Oracle and/or its affiliates. All rights reserved. + * DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER. + * + * This code is free software; you can redistribute it and/or modify it + * under the terms of the GNU General Public License version 2 only, as + * published by the Free Software Foundation. + * + * This code is distributed in the hope that it will be useful, but WITHOUT + * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or + * FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License + * version 2 for more details (a copy is included in the LICENSE file that + * accompanied this code). + * + * You should have received a copy of the GNU General Public License version + * 2 along with this work; if not, write to the Free Software Foundation, + * Inc., 51 Franklin St, Fifth Floor, Boston, MA 02110-1301 USA. + * + * Please contact Oracle, 500 Oracle Parkway, Redwood Shores, CA 94065 USA + * or visit www.oracle.com if you need additional information or have any + * questions. + */ +package com.oracle.graal.compiler.common.util; + +import java.util.*; + +import com.oracle.graal.api.code.*; +import com.oracle.graal.debug.*; + +/** + * The {@code Util} class contains a motley collection of utility methods used throughout the + * compiler. + */ +public class Util { + + public static final int PRINTING_LINE_WIDTH = 40; + public static final char SECTION_CHARACTER = '*'; + public static final char SUB_SECTION_CHARACTER = '='; + public static final char SEPERATOR_CHARACTER = '-'; + + public static boolean replaceInList(T a, T b, List list) { + final int max = list.size(); + for (int i = 0; i < max; i++) { + if (list.get(i) == a) { + list.set(i, b); + return true; + } + } + return false; + } + + /** + * Statically cast an object to an arbitrary Object type. Dynamically checked. + */ + @SuppressWarnings("unchecked") + public static T uncheckedCast(@SuppressWarnings("unused") Class type, Object object) { + return (T) object; + } + + /** + * Statically cast an object to an arbitrary Object type. Dynamically checked. + */ + @SuppressWarnings("unchecked") + public static T uncheckedCast(Object object) { + return (T) object; + } + + /** + * Utility method to combine a base hash with the identity hash of one or more objects. + * + * @param hash the base hash + * @param x the object to add to the hash + * @return the combined hash + */ + public static int hash1(int hash, Object x) { + // always set at least one bit in case the hash wraps to zero + return 0x10000000 | (hash + 7 * System.identityHashCode(x)); + } + + /** + * Utility method to combine a base hash with the identity hash of one or more objects. + * + * @param hash the base hash + * @param x the first object to add to the hash + * @param y the second object to add to the hash + * @return the combined hash + */ + public static int hash2(int hash, Object x, Object y) { + // always set at least one bit in case the hash wraps to zero + return 0x20000000 | (hash + 7 * System.identityHashCode(x) + 11 * System.identityHashCode(y)); + } + + /** + * Utility method to combine a base hash with the identity hash of one or more objects. + * + * @param hash the base hash + * @param x the first object to add to the hash + * @param y the second object to add to the hash + * @param z the third object to add to the hash + * @return the combined hash + */ + public static int hash3(int hash, Object x, Object y, Object z) { + // always set at least one bit in case the hash wraps to zero + return 0x30000000 | (hash + 7 * System.identityHashCode(x) + 11 * System.identityHashCode(y) + 13 * System.identityHashCode(z)); + } + + /** + * Utility method to combine a base hash with the identity hash of one or more objects. + * + * @param hash the base hash + * @param x the first object to add to the hash + * @param y the second object to add to the hash + * @param z the third object to add to the hash + * @param w the fourth object to add to the hash + * @return the combined hash + */ + public static int hash4(int hash, Object x, Object y, Object z, Object w) { + // always set at least one bit in case the hash wraps to zero + return 0x40000000 | (hash + 7 * System.identityHashCode(x) + 11 * System.identityHashCode(y) + 13 * System.identityHashCode(z) + 17 * System.identityHashCode(w)); + } + + static { + assert CodeUtil.log2(2) == 1; + assert CodeUtil.log2(4) == 2; + assert CodeUtil.log2(8) == 3; + assert CodeUtil.log2(16) == 4; + assert CodeUtil.log2(32) == 5; + assert CodeUtil.log2(0x40000000) == 30; + + assert CodeUtil.log2(2L) == 1; + assert CodeUtil.log2(4L) == 2; + assert CodeUtil.log2(8L) == 3; + assert CodeUtil.log2(16L) == 4; + assert CodeUtil.log2(32L) == 5; + assert CodeUtil.log2(0x4000000000000000L) == 62; + + assert !CodeUtil.isPowerOf2(3); + assert !CodeUtil.isPowerOf2(5); + assert !CodeUtil.isPowerOf2(7); + assert !CodeUtil.isPowerOf2(-1); + + assert CodeUtil.isPowerOf2(2); + assert CodeUtil.isPowerOf2(4); + assert CodeUtil.isPowerOf2(8); + assert CodeUtil.isPowerOf2(16); + assert CodeUtil.isPowerOf2(32); + assert CodeUtil.isPowerOf2(64); + } + + /** + * Sets the element at a given position of a list and ensures that this position exists. If the + * list is current shorter than the position, intermediate positions are filled with a given + * value. + * + * @param list the list to put the element into + * @param pos the position at which to insert the element + * @param x the element that should be inserted + * @param filler the filler element that is used for the intermediate positions in case the list + * is shorter than pos + */ + public static void atPutGrow(List list, int pos, T x, T filler) { + if (list.size() < pos + 1) { + while (list.size() < pos + 1) { + list.add(filler); + } + assert list.size() == pos + 1; + } + + assert list.size() >= pos + 1; + list.set(pos, x); + } + + public static void breakpoint() { + // do nothing. + } + + public static void guarantee(boolean b, String string) { + if (!b) { + throw new BailoutException(string); + } + } + + public static void warning(String string) { + TTY.println("WARNING: " + string); + } + + public static int safeToInt(long l) { + assert (int) l == l; + return (int) l; + } + + public static int roundUp(int number, int mod) { + return ((number + mod - 1) / mod) * mod; + } + + public static void printSection(String name, char sectionCharacter) { + + String header = " " + name + " "; + int remainingCharacters = PRINTING_LINE_WIDTH - header.length(); + int leftPart = remainingCharacters / 2; + int rightPart = remainingCharacters - leftPart; + for (int i = 0; i < leftPart; i++) { + TTY.print(sectionCharacter); + } + + TTY.print(header); + + for (int i = 0; i < rightPart; i++) { + TTY.print(sectionCharacter); + } + + TTY.println(); + } + + /** + * Prints entries in a byte array as space separated hex values to {@link TTY}. + * + * @param address an address at which the bytes are located. This is used to print an address + * prefix per line of output. + * @param array the array containing all the bytes to print + * @param bytesPerLine the number of values to print per line of output + */ + public static void printBytes(long address, byte[] array, int bytesPerLine) { + printBytes(address, array, 0, array.length, bytesPerLine); + } + + /** + * Prints entries in a byte array as space separated hex values to {@link TTY}. + * + * @param address an address at which the bytes are located. This is used to print an address + * prefix per line of output. + * @param array the array containing the bytes to print + * @param offset the offset in {@code array} of the values to print + * @param length the number of values from {@code array} print + * @param bytesPerLine the number of values to print per line of output + */ + public static void printBytes(long address, byte[] array, int offset, int length, int bytesPerLine) { + assert bytesPerLine > 0; + boolean newLine = true; + for (int i = 0; i < length; i++) { + if (newLine) { + TTY.print("%08x: ", address + i); + newLine = false; + } + TTY.print("%02x ", array[i]); + if (i % bytesPerLine == bytesPerLine - 1) { + TTY.println(); + newLine = true; + } + } + + if (length % bytesPerLine != bytesPerLine) { + TTY.println(); + } + } + + public static boolean isShiftCount(int x) { + return 0 <= x && x < 32; + } + + /** + * Determines if a given {@code int} value is the range of unsigned byte values. + */ + public static boolean isUByte(int x) { + return (x & 0xff) == x; + } + + /** + * Determines if a given {@code int} value is the range of signed byte values. + */ + public static boolean isByte(int x) { + return (byte) x == x; + } + + /** + * Determines if a given {@code long} value is the range of unsigned byte values. + */ + public static boolean isUByte(long x) { + return (x & 0xffL) == x; + } + + /** + * Determines if a given {@code long} value is the range of signed byte values. + */ + public static boolean isByte(long l) { + return (byte) l == l; + } + + /** + * Determines if a given {@code long} value is the range of unsigned int values. + */ + public static boolean isUInt(long x) { + return (x & 0xffffffffL) == x; + } + + /** + * Determines if a given {@code long} value is the range of signed int values. + */ + public static boolean isInt(long l) { + return (int) l == l; + } + + /** + * Determines if a given {@code int} value is the range of signed short values. + */ + public static boolean isShort(int x) { + return (short) x == x; + } + + public static boolean is32bit(long x) { + return -0x80000000L <= x && x < 0x80000000L; + } + + public static short safeToShort(int v) { + assert isShort(v); + return (short) v; + } + + /** + * Creates an array of integers of length "size", in which each number from 0 to (size - 1) + * occurs exactly once. The integers are sorted using the given comparator. This can be used to + * create a sorting for arrays that cannot be modified directly. + * + * @param size The size of the range to be sorted. + * @param comparator A comparator that is used to compare indexes. + * @return An array of integers that contains each number from 0 to (size - 1) exactly once, + * sorted using the comparator. + */ + public static Integer[] createSortedPermutation(int size, Comparator comparator) { + Integer[] indexes = new Integer[size]; + for (int i = 0; i < size; i++) { + indexes[i] = i; + } + Arrays.sort(indexes, comparator); + return indexes; + } +} diff -r 3059a52d9614 -r 98967b613c88 graal/com.oracle.graal.compiler.sparc/src/com/oracle/graal/compiler/sparc/SPARCLIRGenerator.java --- a/graal/com.oracle.graal.compiler.sparc/src/com/oracle/graal/compiler/sparc/SPARCLIRGenerator.java Sat Feb 07 02:34:43 2015 +0100 +++ b/graal/com.oracle.graal.compiler.sparc/src/com/oracle/graal/compiler/sparc/SPARCLIRGenerator.java Sat Feb 07 02:47:00 2015 +0100 @@ -189,7 +189,7 @@ } @Override - public Value emitAddress(StackSlotValue address) { + public Variable emitAddress(StackSlotValue address) { Variable result = newVariable(LIRKind.value(target().wordKind)); append(new StackLoadAddressOp(result, address)); return result; @@ -278,11 +278,9 @@ JavaConstant c = asConstant(value); if (c.isNull() || SPARCAssembler.isSimm11(c)) { return value; - } else { - return load(c); } } - return emitMove(value); + return load(value); } @Override @@ -417,12 +415,13 @@ protected void emitTableSwitch(int lowKey, LabelRef defaultTarget, LabelRef[] targets, Value key) { // Making a copy of the switch value is necessary because jump table destroys the input // value - Variable tmp = emitMove(key); + Variable tmp = newVariable(key.getLIRKind()); + emitMove(tmp, key); append(new TableSwitchOp(lowKey, defaultTarget, targets, tmp, newVariable(LIRKind.value(target().wordKind)))); } @Override - public Value emitBitCount(Value operand) { + public Variable emitBitCount(Value operand) { Variable result = newVariable(LIRKind.derive(operand).changeType(Kind.Int)); if (operand.getKind().getStackKind() == Kind.Int) { append(new SPARCBitManipulationOp(IPOPCNT, result, asAllocatable(operand), this)); @@ -433,14 +432,14 @@ } @Override - public Value emitBitScanForward(Value operand) { + public Variable emitBitScanForward(Value operand) { Variable result = newVariable(LIRKind.derive(operand).changeType(Kind.Int)); append(new SPARCBitManipulationOp(BSF, result, asAllocatable(operand), this)); return result; } @Override - public Value emitBitScanReverse(Value operand) { + public Variable emitBitScanReverse(Value operand) { Variable result = newVariable(LIRKind.derive(operand).changeType(Kind.Int)); if (operand.getKind().getStackKind() == Kind.Int) { append(new SPARCBitManipulationOp(IBSR, result, asAllocatable(operand), this)); @@ -493,14 +492,14 @@ } @Override - public Value emitByteSwap(Value input) { + public Variable emitByteSwap(Value input) { Variable result = newVariable(LIRKind.derive(input)); append(new SPARCByteSwapOp(this, result, input)); return result; } @Override - public Value emitArrayEquals(Kind kind, Value array1, Value array2, Value length) { + public Variable emitArrayEquals(Kind kind, Value array1, Value array2, Value length) { Variable result = newVariable(LIRKind.value(Kind.Int)); append(new SPARCArrayEqualsOp(this, kind, result, load(array1), load(array2), asAllocatable(length))); return result; diff -r 3059a52d9614 -r 98967b613c88 graal/com.oracle.graal.compiler.test/src/com/oracle/graal/compiler/test/FinalizableSubclassTest.java --- a/graal/com.oracle.graal.compiler.test/src/com/oracle/graal/compiler/test/FinalizableSubclassTest.java Sat Feb 07 02:34:43 2015 +0100 +++ b/graal/com.oracle.graal.compiler.test/src/com/oracle/graal/compiler/test/FinalizableSubclassTest.java Sat Feb 07 02:47:00 2015 +0100 @@ -67,7 +67,7 @@ StructuredGraph graph = new StructuredGraph(javaMethod); GraphBuilderConfiguration conf = GraphBuilderConfiguration.getSnippetDefault(); - new GraphBuilderPhase.Instance(getMetaAccess(), getProviders().getStampProvider(), assumptions, conf, OptimisticOptimizations.ALL).apply(graph); + new GraphBuilderPhase.Instance(getMetaAccess(), getProviders().getStampProvider(), assumptions, getProviders().getConstantReflection(), conf, OptimisticOptimizations.ALL).apply(graph); HighTierContext context = new HighTierContext(getProviders(), assumptions, null, getDefaultGraphBuilderSuite(), OptimisticOptimizations.ALL); new InliningPhase(new CanonicalizerPhase(true)).apply(graph, context); new CanonicalizerPhase(true).apply(graph, context); diff -r 3059a52d9614 -r 98967b613c88 graal/com.oracle.graal.compiler.test/src/com/oracle/graal/compiler/test/GraalCompilerTest.java --- a/graal/com.oracle.graal.compiler.test/src/com/oracle/graal/compiler/test/GraalCompilerTest.java Sat Feb 07 02:34:43 2015 +0100 +++ b/graal/com.oracle.graal.compiler.test/src/com/oracle/graal/compiler/test/GraalCompilerTest.java Sat Feb 07 02:47:00 2015 +0100 @@ -298,6 +298,8 @@ NodeMap canonicalId = graph.createNodeMap(); int nextId = 0; + List constantsLines = new ArrayList<>(); + StringBuilder result = new StringBuilder(); for (Block block : schedule.getCFG().getBlocks()) { result.append("Block " + block + " "); @@ -312,20 +314,36 @@ for (Node node : schedule.getBlockToNodesMap().get(block)) { if (node.isAlive()) { if (!excludeVirtual || !(node instanceof VirtualObjectNode || node instanceof ProxyNode)) { - int id; - if (canonicalId.get(node) != null) { - id = canonicalId.get(node); + if (node instanceof ConstantNode) { + String name = checkConstants ? node.toString(Verbosity.Name) : node.getClass().getSimpleName(); + String str = name + (excludeVirtual ? "\n" : " (" + node.getUsageCount() + ")\n"); + constantsLines.add(str); } else { - id = nextId++; - canonicalId.set(node, id); + int id; + if (canonicalId.get(node) != null) { + id = canonicalId.get(node); + } else { + id = nextId++; + canonicalId.set(node, id); + } + String name = node.getClass().getSimpleName(); + String str = " " + id + "|" + name + (excludeVirtual ? "\n" : " (" + node.getUsageCount() + ")\n"); + result.append(str); } - String name = node instanceof ConstantNode && checkConstants ? node.toString(Verbosity.Name) : node.getClass().getSimpleName(); - result.append(" " + id + "|" + name + (excludeVirtual ? "\n" : " (" + node.getUsageCount() + ")\n")); } } } } - return result.toString(); + + StringBuilder constantsLinesResult = new StringBuilder(); + constantsLinesResult.append(constantsLines.size() + " constants:\n"); + Collections.sort(constantsLines); + for (String s : constantsLines) { + constantsLinesResult.append(s); + constantsLinesResult.append("\n"); + } + + return constantsLines.toString() + result.toString(); } protected Backend getBackend() { @@ -839,8 +857,9 @@ protected PhaseSuite getCustomGraphBuilderSuite(GraphBuilderConfiguration gbConf) { PhaseSuite suite = getDefaultGraphBuilderSuite().copy(); ListIterator> iterator = suite.findPhase(GraphBuilderPhase.class); + GraphBuilderPhase graphBuilderPhase = (GraphBuilderPhase) iterator.previous(); iterator.remove(); - iterator.add(new GraphBuilderPhase(gbConf)); + iterator.add(new GraphBuilderPhase(gbConf, graphBuilderPhase.getGraphBuilderPlugins())); return suite; } diff -r 3059a52d9614 -r 98967b613c88 graal/com.oracle.graal.compiler.test/src/com/oracle/graal/compiler/test/ea/EATestBase.java --- a/graal/com.oracle.graal.compiler.test/src/com/oracle/graal/compiler/test/ea/EATestBase.java Sat Feb 07 02:34:43 2015 +0100 +++ b/graal/com.oracle.graal.compiler.test/src/com/oracle/graal/compiler/test/ea/EATestBase.java Sat Feb 07 02:47:00 2015 +0100 @@ -152,7 +152,8 @@ graph = new StructuredGraph(method); try (Scope s = Debug.scope(getClass(), graph, method, getCodeCache())) { Assumptions assumptions = new Assumptions(false); - new GraphBuilderPhase.Instance(getMetaAccess(), getProviders().getStampProvider(), assumptions, GraphBuilderConfiguration.getEagerDefault(), OptimisticOptimizations.ALL).apply(graph); + new GraphBuilderPhase.Instance(getMetaAccess(), getProviders().getStampProvider(), assumptions, getProviders().getConstantReflection(), GraphBuilderConfiguration.getEagerDefault(), + OptimisticOptimizations.ALL).apply(graph); context = new HighTierContext(getProviders(), assumptions, null, getDefaultGraphBuilderSuite(), OptimisticOptimizations.ALL); new InliningPhase(new CanonicalizerPhase(true)).apply(graph, context); new DeadCodeEliminationPhase().apply(graph); diff -r 3059a52d9614 -r 98967b613c88 graal/com.oracle.graal.compiler.test/src/com/oracle/graal/compiler/test/tutorial/StaticAnalysis.java --- a/graal/com.oracle.graal.compiler.test/src/com/oracle/graal/compiler/test/tutorial/StaticAnalysis.java Sat Feb 07 02:34:43 2015 +0100 +++ b/graal/com.oracle.graal.compiler.test/src/com/oracle/graal/compiler/test/tutorial/StaticAnalysis.java Sat Feb 07 02:47:00 2015 +0100 @@ -238,7 +238,7 @@ OptimisticOptimizations optimisticOpts = OptimisticOptimizations.NONE; Assumptions assumptions = new Assumptions(false); - GraphBuilderPhase.Instance graphBuilder = new GraphBuilderPhase.Instance(metaAccess, stampProvider, assumptions, graphBuilderConfig, optimisticOpts); + GraphBuilderPhase.Instance graphBuilder = new GraphBuilderPhase.Instance(metaAccess, stampProvider, assumptions, null, graphBuilderConfig, optimisticOpts); graphBuilder.apply(graph); } catch (Throwable ex) { Debug.handle(ex); diff -r 3059a52d9614 -r 98967b613c88 graal/com.oracle.graal.compiler/src/com/oracle/graal/compiler/GraalCompiler.java --- a/graal/com.oracle.graal.compiler/src/com/oracle/graal/compiler/GraalCompiler.java Sat Feb 07 02:34:43 2015 +0100 +++ b/graal/com.oracle.graal.compiler/src/com/oracle/graal/compiler/GraalCompiler.java Sat Feb 07 02:47:00 2015 +0100 @@ -29,19 +29,19 @@ import java.util.*; -import com.oracle.graal.alloc.*; import com.oracle.graal.api.code.*; import com.oracle.graal.api.code.CompilationResult.ConstantReference; import com.oracle.graal.api.code.CompilationResult.DataPatch; import com.oracle.graal.api.meta.*; import com.oracle.graal.api.meta.ProfilingInfo.TriState; -import com.oracle.graal.compiler.alloc.*; +import com.oracle.graal.compiler.common.alloc.*; import com.oracle.graal.compiler.common.cfg.*; import com.oracle.graal.compiler.target.*; import com.oracle.graal.debug.*; import com.oracle.graal.debug.Debug.Scope; import com.oracle.graal.debug.internal.*; import com.oracle.graal.lir.*; +import com.oracle.graal.lir.alloc.lsra.*; import com.oracle.graal.lir.asm.*; import com.oracle.graal.lir.constopt.*; import com.oracle.graal.lir.framemap.*; diff -r 3059a52d9614 -r 98967b613c88 graal/com.oracle.graal.compiler/src/com/oracle/graal/compiler/GraalDebugConfig.java --- a/graal/com.oracle.graal.compiler/src/com/oracle/graal/compiler/GraalDebugConfig.java Sat Feb 07 02:34:43 2015 +0100 +++ b/graal/com.oracle.graal.compiler/src/com/oracle/graal/compiler/GraalDebugConfig.java Sat Feb 07 02:47:00 2015 +0100 @@ -65,16 +65,6 @@ public static final OptionValue SuppressZeroDebugValues = new OptionValue<>(false); @Option(help = "Send Graal IR to dump handlers on error", type = OptionType.Debug) public static final OptionValue DumpOnError = new OptionValue<>(false); - @Option(help = "Enable expensive assertions", type = OptionType.Debug) - public static final OptionValue DetailedAsserts = new StableOptionValue() { - @Override - protected Boolean initialValue() { - boolean enabled = false; - // turn detailed assertions on when the general assertions are on (misusing the assert keyword for this) - assert (enabled = true) == true; - return enabled; - } - }; @Option(help = "Enable more verbose log output when available", type = OptionType.Debug) public static final OptionValue LogVerbose = new OptionValue<>(false); // @formatter:on diff -r 3059a52d9614 -r 98967b613c88 graal/com.oracle.graal.compiler/src/com/oracle/graal/compiler/alloc/Interval.java --- a/graal/com.oracle.graal.compiler/src/com/oracle/graal/compiler/alloc/Interval.java Sat Feb 07 02:34:43 2015 +0100 +++ /dev/null Thu Jan 01 00:00:00 1970 +0000 @@ -1,1303 +0,0 @@ -/* - * Copyright (c) 2009, 2014, Oracle and/or its affiliates. All rights reserved. - * DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER. - * - * This code is free software; you can redistribute it and/or modify it - * under the terms of the GNU General Public License version 2 only, as - * published by the Free Software Foundation. - * - * This code is distributed in the hope that it will be useful, but WITHOUT - * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or - * FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License - * version 2 for more details (a copy is included in the LICENSE file that - * accompanied this code). - * - * You should have received a copy of the GNU General Public License version - * 2 along with this work; if not, write to the Free Software Foundation, - * Inc., 51 Franklin St, Fifth Floor, Boston, MA 02110-1301 USA. - * - * Please contact Oracle, 500 Oracle Parkway, Redwood Shores, CA 94065 USA - * or visit www.oracle.com if you need additional information or have any - * questions. - */ -package com.oracle.graal.compiler.alloc; - -import static com.oracle.graal.api.code.ValueUtil.*; -import static com.oracle.graal.compiler.GraalDebugConfig.*; -import static com.oracle.graal.lir.LIRValueUtil.*; - -import java.util.*; - -import com.oracle.graal.api.code.*; -import com.oracle.graal.api.meta.*; -import com.oracle.graal.compiler.common.*; -import com.oracle.graal.debug.*; -import com.oracle.graal.lir.*; -import com.oracle.graal.phases.util.*; - -/** - * Represents an interval in the {@linkplain LinearScan linear scan register allocator}. - */ -public final class Interval { - - /** - * A pair of intervals. - */ - static final class Pair { - - public final Interval first; - public final Interval second; - - public Pair(Interval first, Interval second) { - this.first = first; - this.second = second; - } - } - - /** - * A set of interval lists, one per {@linkplain RegisterBinding binding} type. - */ - static final class RegisterBindingLists { - - /** - * List of intervals whose binding is currently {@link RegisterBinding#Fixed}. - */ - public Interval fixed; - - /** - * List of intervals whose binding is currently {@link RegisterBinding#Any}. - */ - public Interval any; - - /** - * List of intervals whose binding is currently {@link RegisterBinding#Stack}. - */ - public Interval stack; - - public RegisterBindingLists(Interval fixed, Interval any, Interval stack) { - this.fixed = fixed; - this.any = any; - this.stack = stack; - } - - /** - * Gets the list for a specified binding. - * - * @param binding specifies the list to be returned - * @return the list of intervals whose binding is {@code binding} - */ - public Interval get(RegisterBinding binding) { - switch (binding) { - case Any: - return any; - case Fixed: - return fixed; - case Stack: - return stack; - } - throw GraalInternalError.shouldNotReachHere(); - } - - /** - * Sets the list for a specified binding. - * - * @param binding specifies the list to be replaced - * @param list a list of intervals whose binding is {@code binding} - */ - public void set(RegisterBinding binding, Interval list) { - assert list != null; - switch (binding) { - case Any: - any = list; - break; - case Fixed: - fixed = list; - break; - case Stack: - stack = list; - break; - } - } - - /** - * Adds an interval to a list sorted by {@linkplain Interval#currentFrom() current from} - * positions. - * - * @param binding specifies the list to be updated - * @param interval the interval to add - */ - public void addToListSortedByCurrentFromPositions(RegisterBinding binding, Interval interval) { - Interval list = get(binding); - Interval prev = null; - Interval cur = list; - while (cur.currentFrom() < interval.currentFrom()) { - prev = cur; - cur = cur.next; - } - Interval result = list; - if (prev == null) { - // add to head of list - result = interval; - } else { - // add before 'cur' - prev.next = interval; - } - interval.next = cur; - set(binding, result); - } - - /** - * Adds an interval to a list sorted by {@linkplain Interval#from() start} positions and - * {@linkplain Interval#firstUsage(RegisterPriority) first usage} positions. - * - * @param binding specifies the list to be updated - * @param interval the interval to add - */ - public void addToListSortedByStartAndUsePositions(RegisterBinding binding, Interval interval) { - Interval list = get(binding); - Interval prev = null; - Interval cur = list; - while (cur.from() < interval.from() || (cur.from() == interval.from() && cur.firstUsage(RegisterPriority.None) < interval.firstUsage(RegisterPriority.None))) { - prev = cur; - cur = cur.next; - } - if (prev == null) { - list = interval; - } else { - prev.next = interval; - } - interval.next = cur; - set(binding, list); - } - - /** - * Removes an interval from a list. - * - * @param binding specifies the list to be updated - * @param i the interval to remove - */ - public void remove(RegisterBinding binding, Interval i) { - Interval list = get(binding); - Interval prev = null; - Interval cur = list; - while (cur != i) { - assert cur != null && cur != Interval.EndMarker : "interval has not been found in list: " + i; - prev = cur; - cur = cur.next; - } - if (prev == null) { - set(binding, cur.next); - } else { - prev.next = cur.next; - } - } - } - - /** - * Constants denoting the register usage priority for an interval. The constants are declared in - * increasing order of priority are are used to optimize spilling when multiple overlapping - * intervals compete for limited registers. - */ - public enum RegisterPriority { - /** - * No special reason for an interval to be allocated a register. - */ - None, - - /** - * Priority level for intervals live at the end of a loop. - */ - LiveAtLoopEnd, - - /** - * Priority level for intervals that should be allocated to a register. - */ - ShouldHaveRegister, - - /** - * Priority level for intervals that must be allocated to a register. - */ - MustHaveRegister; - - public static final RegisterPriority[] VALUES = values(); - - /** - * Determines if this priority is higher than or equal to a given priority. - */ - public boolean greaterEqual(RegisterPriority other) { - return ordinal() >= other.ordinal(); - } - - /** - * Determines if this priority is lower than a given priority. - */ - public boolean lessThan(RegisterPriority other) { - return ordinal() < other.ordinal(); - } - } - - /** - * Constants denoting whether an interval is bound to a specific register. This models platform - * dependencies on register usage for certain instructions. - */ - enum RegisterBinding { - /** - * Interval is bound to a specific register as required by the platform. - */ - Fixed, - - /** - * Interval has no specific register requirements. - */ - Any, - - /** - * Interval is bound to a stack slot. - */ - Stack; - - public static final RegisterBinding[] VALUES = values(); - } - - /** - * Constants denoting the linear-scan states an interval may be in with respect to the - * {@linkplain Interval#from() start} {@code position} of the interval being processed. - */ - enum State { - /** - * An interval that starts after {@code position}. - */ - Unhandled, - - /** - * An interval that {@linkplain Interval#covers covers} {@code position} and has an assigned - * register. - */ - Active, - - /** - * An interval that starts before and ends after {@code position} but does not - * {@linkplain Interval#covers cover} it due to a lifetime hole. - */ - Inactive, - - /** - * An interval that ends before {@code position} or is spilled to memory. - */ - Handled; - } - - /** - * Constants used in optimization of spilling of an interval. - */ - enum SpillState { - /** - * Starting state of calculation: no definition found yet. - */ - NoDefinitionFound, - - /** - * One definition has already been found. Two consecutive definitions are treated as one - * (e.g. a consecutive move and add because of two-operand LIR form). The position of this - * definition is given by {@link Interval#spillDefinitionPos()}. - */ - NoSpillStore, - - /** - * One spill move has already been inserted. - */ - OneSpillStore, - - /** - * The interval is spilled multiple times or is spilled in a loop. Place the store somewhere - * on the dominator path between the definition and the usages. - */ - SpillInDominator, - - /** - * The interval should be stored immediately after its definition to prevent multiple - * redundant stores. - */ - StoreAtDefinition, - - /** - * The interval starts in memory (e.g. method parameter), so a store is never necessary. - */ - StartInMemory, - - /** - * The interval has more than one definition (e.g. resulting from phi moves), so stores to - * memory are not optimized. - */ - NoOptimization - } - - /** - * List of use positions. Each entry in the list records the use position and register priority - * associated with the use position. The entries in the list are in descending order of use - * position. - * - */ - public static final class UsePosList { - - private IntList list; - - /** - * Creates a use list. - * - * @param initialCapacity the initial capacity of the list in terms of entries - */ - public UsePosList(int initialCapacity) { - list = new IntList(initialCapacity * 2); - } - - private UsePosList(IntList list) { - this.list = list; - } - - /** - * Splits this list around a given position. All entries in this list with a use position - * greater or equal than {@code splitPos} are removed from this list and added to the - * returned list. - * - * @param splitPos the position for the split - * @return a use position list containing all entries removed from this list that have a use - * position greater or equal than {@code splitPos} - */ - public UsePosList splitAt(int splitPos) { - int i = size() - 1; - int len = 0; - while (i >= 0 && usePos(i) < splitPos) { - --i; - len += 2; - } - int listSplitIndex = (i + 1) * 2; - IntList childList = list; - list = IntList.copy(this.list, listSplitIndex, len); - childList.setSize(listSplitIndex); - UsePosList child = new UsePosList(childList); - return child; - } - - /** - * Gets the use position at a specified index in this list. - * - * @param index the index of the entry for which the use position is returned - * @return the use position of entry {@code index} in this list - */ - public int usePos(int index) { - return list.get(index << 1); - } - - /** - * Gets the register priority for the use position at a specified index in this list. - * - * @param index the index of the entry for which the register priority is returned - * @return the register priority of entry {@code index} in this list - */ - public RegisterPriority registerPriority(int index) { - return RegisterPriority.VALUES[list.get((index << 1) + 1)]; - } - - public void add(int usePos, RegisterPriority registerPriority) { - assert list.size() == 0 || usePos(size() - 1) > usePos; - list.add(usePos); - list.add(registerPriority.ordinal()); - } - - public int size() { - return list.size() >> 1; - } - - public void removeLowestUsePos() { - list.setSize(list.size() - 2); - } - - public void setRegisterPriority(int index, RegisterPriority registerPriority) { - list.set(index * 2, registerPriority.ordinal()); - } - - @Override - public String toString() { - StringBuilder buf = new StringBuilder("["); - for (int i = size() - 1; i >= 0; --i) { - if (buf.length() != 1) { - buf.append(", "); - } - RegisterPriority prio = registerPriority(i); - buf.append(usePos(i)).append(" -> ").append(prio.ordinal()).append(':').append(prio); - } - return buf.append("]").toString(); - } - } - - /** - * The {@linkplain RegisterValue register} or {@linkplain Variable variable} for this interval - * prior to register allocation. - */ - public final AllocatableValue operand; - - /** - * The operand number for this interval's {@linkplain #operand operand}. - */ - public final int operandNumber; - - /** - * The {@linkplain RegisterValue register} or {@linkplain StackSlot spill slot} assigned to this - * interval. In case of a spilled interval which is re-materialized this is - * {@link Value#ILLEGAL}. - */ - private AllocatableValue location; - - /** - * The stack slot to which all splits of this interval are spilled if necessary. - */ - private StackSlotValue spillSlot; - - /** - * The kind of this interval. - */ - private LIRKind kind; - - /** - * The head of the list of ranges describing this interval. This list is sorted by - * {@linkplain LIRInstruction#id instruction ids}. - */ - private Range first; - - /** - * List of (use-positions, register-priorities) pairs, sorted by use-positions. - */ - private UsePosList usePosList; - - /** - * Iterator used to traverse the ranges of an interval. - */ - private Range current; - - /** - * Link to next interval in a sorted list of intervals that ends with {@link #EndMarker}. - */ - Interval next; - - /** - * The linear-scan state of this interval. - */ - State state; - - private int cachedTo; // cached value: to of last range (-1: not cached) - - /** - * The interval from which this one is derived. If this is a {@linkplain #isSplitParent() split - * parent}, it points to itself. - */ - private Interval splitParent; - - /** - * List of all intervals that are split off from this interval. This is only used if this is a - * {@linkplain #isSplitParent() split parent}. - */ - private List splitChildren = Collections.emptyList(); - - /** - * Current split child that has been active or inactive last (always stored in split parents). - */ - private Interval currentSplitChild; - - /** - * Specifies if move is inserted between currentSplitChild and this interval when interval gets - * active the first time. - */ - private boolean insertMoveWhenActivated; - - /** - * For spill move optimization. - */ - private SpillState spillState; - - /** - * Position where this interval is defined (if defined only once). - */ - private int spillDefinitionPos; - - /** - * This interval should be assigned the same location as the hint interval. - */ - private Interval locationHint; - - /** - * The value with which a spilled child interval can be re-materialized. Currently this must be - * a Constant. - */ - private JavaConstant materializedValue; - - /** - * The number of times {@link #addMaterializationValue(JavaConstant)} is called. - */ - private int numMaterializationValuesAdded; - - void assignLocation(AllocatableValue newLocation) { - if (isRegister(newLocation)) { - assert this.location == null : "cannot re-assign location for " + this; - if (newLocation.getLIRKind().equals(LIRKind.Illegal) && !kind.equals(LIRKind.Illegal)) { - this.location = asRegister(newLocation).asValue(kind); - return; - } - } else if (isIllegal(newLocation)) { - assert canMaterialize(); - } else { - assert this.location == null || isRegister(this.location) || (isVirtualStackSlot(this.location) && isStackSlot(newLocation)) : "cannot re-assign location for " + this; - assert isStackSlotValue(newLocation); - assert !newLocation.getLIRKind().equals(LIRKind.Illegal); - assert newLocation.getLIRKind().equals(this.kind); - } - this.location = newLocation; - } - - /** - * Gets the {@linkplain RegisterValue register} or {@linkplain StackSlot spill slot} assigned to - * this interval. - */ - public AllocatableValue location() { - return location; - } - - public LIRKind kind() { - assert !isRegister(operand) : "cannot access type for fixed interval"; - return kind; - } - - void setKind(LIRKind kind) { - assert isRegister(operand) || this.kind().equals(LIRKind.Illegal) || this.kind().equals(kind) : "overwriting existing type"; - this.kind = kind; - } - - public Range first() { - return first; - } - - int from() { - return first.from; - } - - int to() { - if (cachedTo == -1) { - cachedTo = calcTo(); - } - assert cachedTo == calcTo() : "invalid cached value"; - return cachedTo; - } - - int numUsePositions() { - return usePosList.size(); - } - - void setLocationHint(Interval interval) { - locationHint = interval; - } - - boolean isSplitParent() { - return splitParent == this; - } - - boolean isSplitChild() { - return splitParent != this; - } - - /** - * Gets the split parent for this interval. - */ - public Interval splitParent() { - assert splitParent.isSplitParent() : "not a split parent: " + this; - return splitParent; - } - - /** - * Gets the canonical spill slot for this interval. - */ - StackSlotValue spillSlot() { - return splitParent().spillSlot; - } - - void setSpillSlot(StackSlotValue slot) { - assert splitParent().spillSlot == null || (isVirtualStackSlot(splitParent().spillSlot) && isStackSlot(slot)) : "connot overwrite existing spill slot"; - splitParent().spillSlot = slot; - } - - Interval currentSplitChild() { - return splitParent().currentSplitChild; - } - - void makeCurrentSplitChild() { - splitParent().currentSplitChild = this; - } - - boolean insertMoveWhenActivated() { - return insertMoveWhenActivated; - } - - void setInsertMoveWhenActivated(boolean b) { - insertMoveWhenActivated = b; - } - - // for spill optimization - public SpillState spillState() { - return splitParent().spillState; - } - - int spillDefinitionPos() { - return splitParent().spillDefinitionPos; - } - - void setSpillState(SpillState state) { - assert state.ordinal() >= spillState().ordinal() : "state cannot decrease"; - splitParent().spillState = state; - } - - void setSpillDefinitionPos(int pos) { - assert spillState() == SpillState.SpillInDominator || spillDefinitionPos() == -1 : "cannot set the position twice"; - splitParent().spillDefinitionPos = pos; - } - - // returns true if this interval has a shadow copy on the stack that is always correct - boolean alwaysInMemory() { - return (splitParent().spillState == SpillState.SpillInDominator || splitParent().spillState == SpillState.StoreAtDefinition || splitParent().spillState == SpillState.StartInMemory) && - !canMaterialize(); - } - - void removeFirstUsePos() { - usePosList.removeLowestUsePos(); - } - - // test intersection - boolean intersects(Interval i) { - return first.intersects(i.first); - } - - int intersectsAt(Interval i) { - return first.intersectsAt(i.first); - } - - // range iteration - void rewindRange() { - current = first; - } - - void nextRange() { - assert this != EndMarker : "not allowed on sentinel"; - current = current.next; - } - - int currentFrom() { - return current.from; - } - - int currentTo() { - return current.to; - } - - boolean currentAtEnd() { - return current == Range.EndMarker; - } - - boolean currentIntersects(Interval it) { - return current.intersects(it.current); - } - - int currentIntersectsAt(Interval it) { - return current.intersectsAt(it.current); - } - - /** - * Sentinel interval to denote the end of an interval list. - */ - static final Interval EndMarker = new Interval(Value.ILLEGAL, -1); - - Interval(AllocatableValue operand, int operandNumber) { - assert operand != null; - this.operand = operand; - this.operandNumber = operandNumber; - if (isRegister(operand)) { - location = operand; - } else { - assert isIllegal(operand) || isVariable(operand); - } - this.kind = LIRKind.Illegal; - this.first = Range.EndMarker; - this.usePosList = new UsePosList(4); - this.current = Range.EndMarker; - this.next = EndMarker; - this.cachedTo = -1; - this.spillState = SpillState.NoDefinitionFound; - this.spillDefinitionPos = -1; - splitParent = this; - currentSplitChild = this; - } - - /** - * Sets the value which is used for re-materialization. - */ - void addMaterializationValue(JavaConstant value) { - if (numMaterializationValuesAdded == 0) { - materializedValue = value; - } else { - // Interval is defined on multiple places -> no materialization is possible. - materializedValue = null; - } - numMaterializationValuesAdded++; - } - - /** - * Returns true if this interval can be re-materialized when spilled. This means that no - * spill-moves are needed. Instead of restore-moves the {@link #materializedValue} is restored. - */ - public boolean canMaterialize() { - return getMaterializedValue() != null; - } - - /** - * Returns a value which can be moved to a register instead of a restore-move from stack. - */ - public JavaConstant getMaterializedValue() { - return splitParent().materializedValue; - } - - int calcTo() { - assert first != Range.EndMarker : "interval has no range"; - - Range r = first; - while (r.next != Range.EndMarker) { - r = r.next; - } - return r.to; - } - - // consistency check of split-children - boolean checkSplitChildren() { - if (!splitChildren.isEmpty()) { - assert isSplitParent() : "only split parents can have children"; - - for (int i = 0; i < splitChildren.size(); i++) { - Interval i1 = splitChildren.get(i); - - assert i1.splitParent() == this : "not a split child of this interval"; - assert i1.kind().equals(kind()) : "must be equal for all split children"; - assert (i1.spillSlot() == null && spillSlot == null) || i1.spillSlot().equals(spillSlot()) : "must be equal for all split children"; - - for (int j = i + 1; j < splitChildren.size(); j++) { - Interval i2 = splitChildren.get(j); - - assert !i1.operand.equals(i2.operand) : "same register number"; - - if (i1.from() < i2.from()) { - assert i1.to() <= i2.from() && i1.to() < i2.to() : "intervals overlapping"; - } else { - assert i2.from() < i1.from() : "intervals start at same opId"; - assert i2.to() <= i1.from() && i2.to() < i1.to() : "intervals overlapping"; - } - } - } - } - - return true; - } - - public Interval locationHint(boolean searchSplitChild) { - if (!searchSplitChild) { - return locationHint; - } - - if (locationHint != null) { - assert locationHint.isSplitParent() : "ony split parents are valid hint registers"; - - if (locationHint.location != null && isRegister(locationHint.location)) { - return locationHint; - } else if (!locationHint.splitChildren.isEmpty()) { - // search the first split child that has a register assigned - int len = locationHint.splitChildren.size(); - for (int i = 0; i < len; i++) { - Interval interval = locationHint.splitChildren.get(i); - if (interval.location != null && isRegister(interval.location)) { - return interval; - } - } - } - } - - // no hint interval found that has a register assigned - return null; - } - - Interval getSplitChildAtOpId(int opId, LIRInstruction.OperandMode mode, LinearScan allocator) { - assert isSplitParent() : "can only be called for split parents"; - assert opId >= 0 : "invalid opId (method cannot be called for spill moves)"; - - if (splitChildren.isEmpty()) { - assert this.covers(opId, mode) : this + " does not cover " + opId; - return this; - } else { - Interval result = null; - int len = splitChildren.size(); - - // in outputMode, the end of the interval (opId == cur.to()) is not valid - int toOffset = (mode == LIRInstruction.OperandMode.DEF ? 0 : 1); - - int i; - for (i = 0; i < len; i++) { - Interval cur = splitChildren.get(i); - if (cur.from() <= opId && opId < cur.to() + toOffset) { - if (i > 0) { - // exchange current split child to start of list (faster access for next - // call) - Util.atPutGrow(splitChildren, i, splitChildren.get(0), null); - Util.atPutGrow(splitChildren, 0, cur, null); - } - - // interval found - result = cur; - break; - } - } - - assert checkSplitChild(result, opId, allocator, toOffset, mode); - return result; - } - } - - private boolean checkSplitChild(Interval result, int opId, LinearScan allocator, int toOffset, LIRInstruction.OperandMode mode) { - if (result == null) { - // this is an error - StringBuilder msg = new StringBuilder(this.toString()).append(" has no child at ").append(opId); - if (!splitChildren.isEmpty()) { - Interval firstChild = splitChildren.get(0); - Interval lastChild = splitChildren.get(splitChildren.size() - 1); - msg.append(" (first = ").append(firstChild).append(", last = ").append(lastChild).append(")"); - } - throw new GraalInternalError("Linear Scan Error: %s", msg); - } - - if (!splitChildren.isEmpty()) { - for (Interval interval : splitChildren) { - if (interval != result && interval.from() <= opId && opId < interval.to() + toOffset) { - TTY.println(String.format("two valid result intervals found for opId %d: %d and %d", opId, result.operandNumber, interval.operandNumber)); - TTY.println(result.logString(allocator)); - TTY.println(interval.logString(allocator)); - throw new BailoutException("two valid result intervals found"); - } - } - } - assert result.covers(opId, mode) : "opId not covered by interval"; - return true; - } - - // returns the interval that covers the given opId or null if there is none - Interval getIntervalCoveringOpId(int opId) { - assert opId >= 0 : "invalid opId"; - assert opId < to() : "can only look into the past"; - - if (opId >= from()) { - return this; - } - - Interval parent = splitParent(); - Interval result = null; - - assert !parent.splitChildren.isEmpty() : "no split children available"; - int len = parent.splitChildren.size(); - - for (int i = len - 1; i >= 0; i--) { - Interval cur = parent.splitChildren.get(i); - if (cur.from() <= opId && opId < cur.to()) { - assert result == null : "covered by multiple split children " + result + " and " + cur; - result = cur; - } - } - - return result; - } - - // returns the last split child that ends before the given opId - Interval getSplitChildBeforeOpId(int opId) { - assert opId >= 0 : "invalid opId"; - - Interval parent = splitParent(); - Interval result = null; - - assert !parent.splitChildren.isEmpty() : "no split children available"; - int len = parent.splitChildren.size(); - - for (int i = len - 1; i >= 0; i--) { - Interval cur = parent.splitChildren.get(i); - if (cur.to() <= opId && (result == null || result.to() < cur.to())) { - result = cur; - } - } - - assert result != null : "no split child found"; - return result; - } - - // checks if opId is covered by any split child - boolean splitChildCovers(int opId, LIRInstruction.OperandMode mode) { - assert isSplitParent() : "can only be called for split parents"; - assert opId >= 0 : "invalid opId (method can not be called for spill moves)"; - - if (splitChildren.isEmpty()) { - // simple case if interval was not split - return covers(opId, mode); - - } else { - // extended case: check all split children - int len = splitChildren.size(); - for (int i = 0; i < len; i++) { - Interval cur = splitChildren.get(i); - if (cur.covers(opId, mode)) { - return true; - } - } - return false; - } - } - - private RegisterPriority adaptPriority(RegisterPriority priority) { - /* - * In case of re-materialized values we require that use-operands are registers, because we - * don't have the value in a stack location. (Note that ShouldHaveRegister means that the - * operand can also be a StackSlot). - */ - if (priority == RegisterPriority.ShouldHaveRegister && canMaterialize()) { - return RegisterPriority.MustHaveRegister; - } - return priority; - } - - // Note: use positions are sorted descending . first use has highest index - int firstUsage(RegisterPriority minRegisterPriority) { - assert isVariable(operand) : "cannot access use positions for fixed intervals"; - - for (int i = usePosList.size() - 1; i >= 0; --i) { - RegisterPriority registerPriority = adaptPriority(usePosList.registerPriority(i)); - if (registerPriority.greaterEqual(minRegisterPriority)) { - return usePosList.usePos(i); - } - } - return Integer.MAX_VALUE; - } - - int nextUsage(RegisterPriority minRegisterPriority, int from) { - assert isVariable(operand) : "cannot access use positions for fixed intervals"; - - for (int i = usePosList.size() - 1; i >= 0; --i) { - int usePos = usePosList.usePos(i); - if (usePos >= from && adaptPriority(usePosList.registerPriority(i)).greaterEqual(minRegisterPriority)) { - return usePos; - } - } - return Integer.MAX_VALUE; - } - - int nextUsageExact(RegisterPriority exactRegisterPriority, int from) { - assert isVariable(operand) : "cannot access use positions for fixed intervals"; - - for (int i = usePosList.size() - 1; i >= 0; --i) { - int usePos = usePosList.usePos(i); - if (usePos >= from && adaptPriority(usePosList.registerPriority(i)) == exactRegisterPriority) { - return usePos; - } - } - return Integer.MAX_VALUE; - } - - int previousUsage(RegisterPriority minRegisterPriority, int from) { - assert isVariable(operand) : "cannot access use positions for fixed intervals"; - - int prev = 0; - for (int i = usePosList.size() - 1; i >= 0; --i) { - int usePos = usePosList.usePos(i); - if (usePos > from) { - return prev; - } - if (adaptPriority(usePosList.registerPriority(i)).greaterEqual(minRegisterPriority)) { - prev = usePos; - } - } - return prev; - } - - void addUsePos(int pos, RegisterPriority registerPriority) { - assert covers(pos, LIRInstruction.OperandMode.USE) : "use position not covered by live range"; - - // do not add use positions for precolored intervals because they are never used - if (registerPriority != RegisterPriority.None && isVariable(operand)) { - if (DetailedAsserts.getValue()) { - for (int i = 0; i < usePosList.size(); i++) { - assert pos <= usePosList.usePos(i) : "already added a use-position with lower position"; - if (i > 0) { - assert usePosList.usePos(i) < usePosList.usePos(i - 1) : "not sorted descending"; - } - } - } - - // Note: addUse is called in descending order, so list gets sorted - // automatically by just appending new use positions - int len = usePosList.size(); - if (len == 0 || usePosList.usePos(len - 1) > pos) { - usePosList.add(pos, registerPriority); - } else if (usePosList.registerPriority(len - 1).lessThan(registerPriority)) { - assert usePosList.usePos(len - 1) == pos : "list not sorted correctly"; - usePosList.setRegisterPriority(len - 1, registerPriority); - } - } - } - - void addRange(int from, int to) { - assert from < to : "invalid range"; - assert first() == Range.EndMarker || to < first().next.from : "not inserting at begin of interval"; - assert from <= first().to : "not inserting at begin of interval"; - - if (first.from <= to) { - assert first != Range.EndMarker; - // join intersecting ranges - first.from = Math.min(from, first().from); - first.to = Math.max(to, first().to); - } else { - // insert new range - first = new Range(from, to, first()); - } - } - - Interval newSplitChild(LinearScan allocator) { - // allocate new interval - Interval parent = splitParent(); - Interval result = allocator.createDerivedInterval(parent); - result.setKind(kind()); - - result.splitParent = parent; - result.setLocationHint(parent); - - // insert new interval in children-list of parent - if (parent.splitChildren.isEmpty()) { - assert isSplitParent() : "list must be initialized at first split"; - - // Create new non-shared list - parent.splitChildren = new ArrayList<>(4); - parent.splitChildren.add(this); - } - parent.splitChildren.add(result); - - return result; - } - - /** - * Splits this interval at a specified position and returns the remainder as a new child - * interval of this interval's {@linkplain #splitParent() parent} interval. - *

- * When an interval is split, a bi-directional link is established between the original - * parent interval and the children intervals that are split off this interval. - * When a split child is split again, the new created interval is a direct child of the original - * parent. That is, there is no tree of split children stored, just a flat list. All split - * children are spilled to the same {@linkplain #spillSlot spill slot}. - * - * @param splitPos the position at which to split this interval - * @param allocator the register allocator context - * @return the child interval split off from this interval - */ - Interval split(int splitPos, LinearScan allocator) { - assert isVariable(operand) : "cannot split fixed intervals"; - - // allocate new interval - Interval result = newSplitChild(allocator); - - // split the ranges - Range prev = null; - Range cur = first; - while (cur != Range.EndMarker && cur.to <= splitPos) { - prev = cur; - cur = cur.next; - } - assert cur != Range.EndMarker : "split interval after end of last range"; - - if (cur.from < splitPos) { - result.first = new Range(splitPos, cur.to, cur.next); - cur.to = splitPos; - cur.next = Range.EndMarker; - - } else { - assert prev != null : "split before start of first range"; - result.first = cur; - prev.next = Range.EndMarker; - } - result.current = result.first; - cachedTo = -1; // clear cached value - - // split list of use positions - result.usePosList = usePosList.splitAt(splitPos); - - if (DetailedAsserts.getValue()) { - for (int i = 0; i < usePosList.size(); i++) { - assert usePosList.usePos(i) < splitPos; - } - for (int i = 0; i < result.usePosList.size(); i++) { - assert result.usePosList.usePos(i) >= splitPos; - } - } - return result; - } - - /** - * Splits this interval at a specified position and returns the head as a new interval (this - * interval is the tail). - * - * Currently, only the first range can be split, and the new interval must not have split - * positions - */ - Interval splitFromStart(int splitPos, LinearScan allocator) { - assert isVariable(operand) : "cannot split fixed intervals"; - assert splitPos > from() && splitPos < to() : "can only split inside interval"; - assert splitPos > first.from && splitPos <= first.to : "can only split inside first range"; - assert firstUsage(RegisterPriority.None) > splitPos : "can not split when use positions are present"; - - // allocate new interval - Interval result = newSplitChild(allocator); - - // the new interval has only one range (checked by assertion above, - // so the splitting of the ranges is very simple - result.addRange(first.from, splitPos); - - if (splitPos == first.to) { - assert first.next != Range.EndMarker : "must not be at end"; - first = first.next; - } else { - first.from = splitPos; - } - - return result; - } - - // returns true if the opId is inside the interval - boolean covers(int opId, LIRInstruction.OperandMode mode) { - Range cur = first; - - while (cur != Range.EndMarker && cur.to < opId) { - cur = cur.next; - } - if (cur != Range.EndMarker) { - assert cur.to != cur.next.from : "ranges not separated"; - - if (mode == LIRInstruction.OperandMode.DEF) { - return cur.from <= opId && opId < cur.to; - } else { - return cur.from <= opId && opId <= cur.to; - } - } - return false; - } - - // returns true if the interval has any hole between holeFrom and holeTo - // (even if the hole has only the length 1) - boolean hasHoleBetween(int holeFrom, int holeTo) { - assert holeFrom < holeTo : "check"; - assert from() <= holeFrom && holeTo <= to() : "index out of interval"; - - Range cur = first; - while (cur != Range.EndMarker) { - assert cur.to < cur.next.from : "no space between ranges"; - - // hole-range starts before this range . hole - if (holeFrom < cur.from) { - return true; - - // hole-range completely inside this range . no hole - } else { - if (holeTo <= cur.to) { - return false; - - // overlapping of hole-range with this range . hole - } else { - if (holeFrom <= cur.to) { - return true; - } - } - } - - cur = cur.next; - } - - return false; - } - - @Override - public String toString() { - String from = "?"; - String to = "?"; - if (first != null && first != Range.EndMarker) { - from = String.valueOf(from()); - // to() may cache a computed value, modifying the current object, which is a bad idea - // for a printing function. Compute it directly instead. - to = String.valueOf(calcTo()); - } - String locationString = this.location == null ? "" : "@" + this.location; - return operandNumber + ":" + operand + (isRegister(operand) ? "" : locationString) + "[" + from + "," + to + "]"; - } - - /** - * Gets the use position information for this interval. - */ - public UsePosList usePosList() { - return usePosList; - } - - /** - * Gets a single line string for logging the details of this interval to a log stream. - * - * @param allocator the register allocator context - */ - public String logString(LinearScan allocator) { - StringBuilder buf = new StringBuilder(100); - buf.append(operandNumber).append(':').append(operand).append(' '); - if (!isRegister(operand)) { - if (location != null) { - buf.append("location{").append(location).append("} "); - } - } - - buf.append("hints{").append(splitParent.operandNumber); - Interval hint = locationHint(false); - if (hint != null && hint.operandNumber != splitParent.operandNumber) { - buf.append(", ").append(hint.operandNumber); - } - buf.append("} ranges{"); - - // print ranges - Range cur = first; - while (cur != Range.EndMarker) { - if (cur != first) { - buf.append(", "); - } - buf.append(cur); - cur = cur.next; - assert cur != null : "range list not closed with range sentinel"; - } - buf.append("} uses{"); - - // print use positions - int prev = 0; - for (int i = usePosList.size() - 1; i >= 0; --i) { - assert prev < usePosList.usePos(i) : "use positions not sorted"; - if (i != usePosList.size() - 1) { - buf.append(", "); - } - buf.append(usePosList.usePos(i)).append(':').append(usePosList.registerPriority(i)); - prev = usePosList.usePos(i); - } - buf.append("} spill-state{").append(spillState()).append("}"); - if (canMaterialize()) { - buf.append(" (remat:").append(getMaterializedValue().toString()).append(")"); - } - return buf.toString(); - } - - List getSplitChildren() { - return Collections.unmodifiableList(splitChildren); - } -} diff -r 3059a52d9614 -r 98967b613c88 graal/com.oracle.graal.compiler/src/com/oracle/graal/compiler/alloc/IntervalWalker.java --- a/graal/com.oracle.graal.compiler/src/com/oracle/graal/compiler/alloc/IntervalWalker.java Sat Feb 07 02:34:43 2015 +0100 +++ /dev/null Thu Jan 01 00:00:00 1970 +0000 @@ -1,284 +0,0 @@ -/* - * Copyright (c) 2009, 2011, Oracle and/or its affiliates. All rights reserved. - * DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER. - * - * This code is free software; you can redistribute it and/or modify it - * under the terms of the GNU General Public License version 2 only, as - * published by the Free Software Foundation. - * - * This code is distributed in the hope that it will be useful, but WITHOUT - * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or - * FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License - * version 2 for more details (a copy is included in the LICENSE file that - * accompanied this code). - * - * You should have received a copy of the GNU General Public License version - * 2 along with this work; if not, write to the Free Software Foundation, - * Inc., 51 Franklin St, Fifth Floor, Boston, MA 02110-1301 USA. - * - * Please contact Oracle, 500 Oracle Parkway, Redwood Shores, CA 94065 USA - * or visit www.oracle.com if you need additional information or have any - * questions. - */ -package com.oracle.graal.compiler.alloc; - -import com.oracle.graal.compiler.alloc.Interval.RegisterBinding; -import com.oracle.graal.compiler.alloc.Interval.RegisterBindingLists; -import com.oracle.graal.compiler.alloc.Interval.State; -import com.oracle.graal.debug.*; - -/** - */ -public class IntervalWalker { - - protected final LinearScan allocator; - - /** - * Sorted list of intervals, not live before the current position. - */ - protected RegisterBindingLists unhandledLists; - - /** - * Sorted list of intervals, live at the current position. - */ - protected RegisterBindingLists activeLists; - - /** - * Sorted list of intervals in a life time hole at the current position. - */ - protected RegisterBindingLists inactiveLists; - - /** - * The current position (intercept point through the intervals). - */ - protected int currentPosition; - - /** - * The binding of the current interval being processed. - */ - protected RegisterBinding currentBinding; - - /** - * Processes the {@code currentInterval} interval in an attempt to allocate a physical register - * to it and thus allow it to be moved to a list of {@linkplain #activeLists active} intervals. - * - * @return {@code true} if a register was allocated to the {@code currentInterval} interval - */ - protected boolean activateCurrent(@SuppressWarnings({"unused"}) Interval currentInterval) { - return true; - } - - void walkBefore(int lirOpId) { - walkTo(lirOpId - 1); - } - - void walk() { - walkTo(Integer.MAX_VALUE); - } - - /** - * Creates a new interval walker. - * - * @param allocator the register allocator context - * @param unhandledFixed the list of unhandled {@linkplain RegisterBinding#Fixed fixed} - * intervals - * @param unhandledAny the list of unhandled {@linkplain RegisterBinding#Any non-fixed} - * intervals - */ - IntervalWalker(LinearScan allocator, Interval unhandledFixed, Interval unhandledAny) { - this.allocator = allocator; - - unhandledLists = new RegisterBindingLists(unhandledFixed, unhandledAny, Interval.EndMarker); - activeLists = new RegisterBindingLists(Interval.EndMarker, Interval.EndMarker, Interval.EndMarker); - inactiveLists = new RegisterBindingLists(Interval.EndMarker, Interval.EndMarker, Interval.EndMarker); - currentPosition = -1; - } - - protected void removeFromList(Interval interval) { - if (interval.state == State.Active) { - activeLists.remove(RegisterBinding.Any, interval); - } else { - assert interval.state == State.Inactive : "invalid state"; - inactiveLists.remove(RegisterBinding.Any, interval); - } - } - - private void walkTo(State state, int from) { - assert state == State.Active || state == State.Inactive : "wrong state"; - for (RegisterBinding binding : RegisterBinding.VALUES) { - Interval prevprev = null; - Interval prev = (state == State.Active) ? activeLists.get(binding) : inactiveLists.get(binding); - Interval next = prev; - while (next.currentFrom() <= from) { - Interval cur = next; - next = cur.next; - - boolean rangeHasChanged = false; - while (cur.currentTo() <= from) { - cur.nextRange(); - rangeHasChanged = true; - } - - // also handle move from inactive list to active list - rangeHasChanged = rangeHasChanged || (state == State.Inactive && cur.currentFrom() <= from); - - if (rangeHasChanged) { - // remove cur from list - if (prevprev == null) { - if (state == State.Active) { - activeLists.set(binding, next); - } else { - inactiveLists.set(binding, next); - } - } else { - prevprev.next = next; - } - prev = next; - if (cur.currentAtEnd()) { - // move to handled state (not maintained as a list) - cur.state = State.Handled; - intervalMoved(cur, state, State.Handled); - } else if (cur.currentFrom() <= from) { - // sort into active list - activeLists.addToListSortedByCurrentFromPositions(binding, cur); - cur.state = State.Active; - if (prev == cur) { - assert state == State.Active : "check"; - prevprev = prev; - prev = cur.next; - } - intervalMoved(cur, state, State.Active); - } else { - // sort into inactive list - inactiveLists.addToListSortedByCurrentFromPositions(binding, cur); - cur.state = State.Inactive; - if (prev == cur) { - assert state == State.Inactive : "check"; - prevprev = prev; - prev = cur.next; - } - intervalMoved(cur, state, State.Inactive); - } - } else { - prevprev = prev; - prev = cur.next; - } - } - } - } - - /** - * Get the next interval from {@linkplain #unhandledLists} which starts before or at - * {@code toOpId}. The returned interval is removed and {@link #currentBinding} is set. - * - * @postcondition all intervals in {@linkplain #unhandledLists} start after {@code toOpId}. - * - * @return The next interval or null if there is no {@linkplain #unhandledLists unhandled} - * interval at position {@code toOpId}. - */ - private Interval nextInterval(int toOpId) { - RegisterBinding binding; - Interval any = unhandledLists.any; - Interval fixed = unhandledLists.fixed; - - if (any != Interval.EndMarker) { - // intervals may start at same position . prefer fixed interval - binding = fixed != Interval.EndMarker && fixed.from() <= any.from() ? RegisterBinding.Fixed : RegisterBinding.Any; - - assert binding == RegisterBinding.Fixed && fixed.from() <= any.from() || binding == RegisterBinding.Any && any.from() <= fixed.from() : "wrong interval!!!"; - assert any == Interval.EndMarker || fixed == Interval.EndMarker || any.from() != fixed.from() || binding == RegisterBinding.Fixed : "if fixed and any-Interval start at same position, fixed must be processed first"; - - } else if (fixed != Interval.EndMarker) { - binding = RegisterBinding.Fixed; - } else { - return null; - } - Interval currentInterval = unhandledLists.get(binding); - - if (toOpId < currentInterval.from()) { - return null; - } - - currentBinding = binding; - unhandledLists.set(binding, currentInterval.next); - currentInterval.next = Interval.EndMarker; - currentInterval.rewindRange(); - return currentInterval; - } - - /** - * Walk up to {@code toOpId}. - * - * @postcondition {@link #currentPosition} is set to {@code toOpId}, {@link #activeLists} and - * {@link #inactiveLists} are populated and {@link Interval#state}s are up to - * date. - */ - protected void walkTo(int toOpId) { - assert currentPosition <= toOpId : "can not walk backwards"; - for (Interval currentInterval = nextInterval(toOpId); currentInterval != null; currentInterval = nextInterval(toOpId)) { - int opId = currentInterval.from(); - - // set currentPosition prior to call of walkTo - currentPosition = opId; - - // update unhandled stack intervals - updateUnhandledStackIntervals(opId); - - // call walkTo even if currentPosition == id - walkTo(State.Active, opId); - walkTo(State.Inactive, opId); - - try (Indent indent = Debug.logAndIndent("walk to op %d", opId)) { - currentInterval.state = State.Active; - if (activateCurrent(currentInterval)) { - activeLists.addToListSortedByCurrentFromPositions(currentBinding, currentInterval); - intervalMoved(currentInterval, State.Unhandled, State.Active); - } - } - } - // set currentPosition prior to call of walkTo - currentPosition = toOpId; - - if (currentPosition <= allocator.maxOpId()) { - // update unhandled stack intervals - updateUnhandledStackIntervals(toOpId); - - // call walkTo if still in range - walkTo(State.Active, toOpId); - walkTo(State.Inactive, toOpId); - } - } - - private void intervalMoved(Interval interval, State from, State to) { - // intervalMoved() is called whenever an interval moves from one interval list to another. - // In the implementation of this method it is prohibited to move the interval to any list. - if (Debug.isLogEnabled()) { - Debug.log("interval moved from %s to %s: %s", from, to, interval.logString(allocator)); - } - } - - /** - * Move {@linkplain #unhandledLists unhandled} stack intervals to - * {@linkplain IntervalWalker #activeLists active}. - * - * Note that for {@linkplain RegisterBinding#Fixed fixed} and {@linkplain RegisterBinding#Any - * any} intervals this is done in {@link #nextInterval(int)}. - */ - private void updateUnhandledStackIntervals(int opId) { - Interval currentInterval = unhandledLists.get(RegisterBinding.Stack); - while (currentInterval != Interval.EndMarker && currentInterval.from() <= opId) { - Interval next = currentInterval.next; - if (currentInterval.to() > opId) { - currentInterval.state = State.Active; - activeLists.addToListSortedByCurrentFromPositions(RegisterBinding.Stack, currentInterval); - intervalMoved(currentInterval, State.Unhandled, State.Active); - } else { - currentInterval.state = State.Handled; - intervalMoved(currentInterval, State.Unhandled, State.Handled); - } - currentInterval = next; - } - unhandledLists.set(RegisterBinding.Stack, currentInterval); - } - -} diff -r 3059a52d9614 -r 98967b613c88 graal/com.oracle.graal.compiler/src/com/oracle/graal/compiler/alloc/LinearScan.java --- a/graal/com.oracle.graal.compiler/src/com/oracle/graal/compiler/alloc/LinearScan.java Sat Feb 07 02:34:43 2015 +0100 +++ /dev/null Thu Jan 01 00:00:00 1970 +0000 @@ -1,2197 +0,0 @@ -/* - * Copyright (c) 2009, 2014, Oracle and/or its affiliates. All rights reserved. - * DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER. - * - * This code is free software; you can redistribute it and/or modify it - * under the terms of the GNU General Public License version 2 only, as - * published by the Free Software Foundation. - * - * This code is distributed in the hope that it will be useful, but WITHOUT - * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or - * FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License - * version 2 for more details (a copy is included in the LICENSE file that - * accompanied this code). - * - * You should have received a copy of the GNU General Public License version - * 2 along with this work; if not, write to the Free Software Foundation, - * Inc., 51 Franklin St, Fifth Floor, Boston, MA 02110-1301 USA. - * - * Please contact Oracle, 500 Oracle Parkway, Redwood Shores, CA 94065 USA - * or visit www.oracle.com if you need additional information or have any - * questions. - */ -package com.oracle.graal.compiler.alloc; - -import static com.oracle.graal.api.code.CodeUtil.*; -import static com.oracle.graal.api.code.ValueUtil.*; -import static com.oracle.graal.compiler.GraalDebugConfig.*; -import static com.oracle.graal.compiler.common.cfg.AbstractControlFlowGraph.*; -import static com.oracle.graal.lir.LIRValueUtil.*; - -import java.util.*; - -import com.oracle.graal.alloc.*; -import com.oracle.graal.api.code.*; -import com.oracle.graal.api.meta.*; -import com.oracle.graal.compiler.alloc.Interval.RegisterBinding; -import com.oracle.graal.compiler.alloc.Interval.RegisterPriority; -import com.oracle.graal.compiler.alloc.Interval.SpillState; -import com.oracle.graal.compiler.common.*; -import com.oracle.graal.compiler.common.cfg.*; -import com.oracle.graal.compiler.gen.*; -import com.oracle.graal.debug.*; -import com.oracle.graal.debug.Debug.Scope; -import com.oracle.graal.lir.*; -import com.oracle.graal.lir.LIRInstruction.OperandFlag; -import com.oracle.graal.lir.LIRInstruction.OperandMode; -import com.oracle.graal.lir.StandardOp.MoveOp; -import com.oracle.graal.lir.framemap.*; -import com.oracle.graal.lir.gen.*; -import com.oracle.graal.nodes.*; -import com.oracle.graal.options.*; -import com.oracle.graal.phases.util.*; - -/** - * An implementation of the linear scan register allocator algorithm described in "Optimized Interval Splitting in a Linear Scan Register Allocator" by Christian Wimmer and - * Hanspeter Moessenboeck. - */ -public final class LinearScan { - - final TargetDescription target; - final LIRGenerationResult res; - final LIR ir; - final FrameMapBuilder frameMapBuilder; - final RegisterAttributes[] registerAttributes; - final Register[] registers; - - final boolean callKillsRegisters; - - public static final int DOMINATOR_SPILL_MOVE_ID = -2; - private static final int SPLIT_INTERVALS_CAPACITY_RIGHT_SHIFT = 1; - - public static class Options { - // @formatter:off - @Option(help = "Enable spill position optimization", type = OptionType.Debug) - public static final OptionValue LSRAOptimizeSpillPosition = new OptionValue<>(true); - // @formatter:on - } - - public static class BlockData { - - /** - * Bit map specifying which operands are live upon entry to this block. These are values - * used in this block or any of its successors where such value are not defined in this - * block. The bit index of an operand is its {@linkplain LinearScan#operandNumber(Value) - * operand number}. - */ - public BitSet liveIn; - - /** - * Bit map specifying which operands are live upon exit from this block. These are values - * used in a successor block that are either defined in this block or were live upon entry - * to this block. The bit index of an operand is its - * {@linkplain LinearScan#operandNumber(Value) operand number}. - */ - public BitSet liveOut; - - /** - * Bit map specifying which operands are used (before being defined) in this block. That is, - * these are the values that are live upon entry to the block. The bit index of an operand - * is its {@linkplain LinearScan#operandNumber(Value) operand number}. - */ - public BitSet liveGen; - - /** - * Bit map specifying which operands are defined/overwritten in this block. The bit index of - * an operand is its {@linkplain LinearScan#operandNumber(Value) operand number}. - */ - public BitSet liveKill; - } - - public final BlockMap blockData; - - /** - * List of blocks in linear-scan order. This is only correct as long as the CFG does not change. - */ - final List> sortedBlocks; - - /** - * Map from {@linkplain #operandNumber(Value) operand numbers} to intervals. - */ - Interval[] intervals; - - /** - * The number of valid entries in {@link #intervals}. - */ - int intervalsSize; - - /** - * The index of the first entry in {@link #intervals} for a - * {@linkplain #createDerivedInterval(Interval) derived interval}. - */ - int firstDerivedIntervalIndex = -1; - - /** - * Intervals sorted by {@link Interval#from()}. - */ - Interval[] sortedIntervals; - - /** - * Map from an instruction {@linkplain LIRInstruction#id id} to the instruction. Entries should - * be retrieved with {@link #instructionForId(int)} as the id is not simply an index into this - * array. - */ - LIRInstruction[] opIdToInstructionMap; - - /** - * Map from an instruction {@linkplain LIRInstruction#id id} to the {@linkplain AbstractBlock - * block} containing the instruction. Entries should be retrieved with {@link #blockForId(int)} - * as the id is not simply an index into this array. - */ - AbstractBlock[] opIdToBlockMap; - - /** - * Bit set for each variable that is contained in each loop. - */ - BitMap2D intervalInLoop; - - /** - * The {@linkplain #operandNumber(Value) number} of the first variable operand allocated. - */ - private final int firstVariableNumber; - - public LinearScan(TargetDescription target, LIRGenerationResult res) { - this.target = target; - this.res = res; - this.ir = res.getLIR(); - this.frameMapBuilder = res.getFrameMapBuilder(); - this.sortedBlocks = ir.linearScanOrder(); - this.registerAttributes = frameMapBuilder.getRegisterConfig().getAttributesMap(); - - this.registers = target.arch.getRegisters(); - this.firstVariableNumber = registers.length; - this.blockData = new BlockMap<>(ir.getControlFlowGraph()); - - // If all allocatable registers are caller saved, then no registers are live across a call - // site. The register allocator can save time not trying to find a register at a call site. - this.callKillsRegisters = this.frameMapBuilder.getRegisterConfig().areAllAllocatableRegistersCallerSaved(); - } - - public int getFirstLirInstructionId(AbstractBlock block) { - int result = ir.getLIRforBlock(block).get(0).id(); - assert result >= 0; - return result; - } - - public int getLastLirInstructionId(AbstractBlock block) { - List instructions = ir.getLIRforBlock(block); - int result = instructions.get(instructions.size() - 1).id(); - assert result >= 0; - return result; - } - - public static boolean isVariableOrRegister(Value value) { - return isVariable(value) || isRegister(value); - } - - /** - * Converts an operand (variable or register) to an index in a flat address space covering all - * the {@linkplain Variable variables} and {@linkplain RegisterValue registers} being processed - * by this allocator. - */ - private int operandNumber(Value operand) { - if (isRegister(operand)) { - int number = asRegister(operand).number; - assert number < firstVariableNumber; - return number; - } - assert isVariable(operand) : operand; - return firstVariableNumber + ((Variable) operand).index; - } - - /** - * Gets the number of operands. This value will increase by 1 for new variable. - */ - private int operandSize() { - return firstVariableNumber + ir.numVariables(); - } - - /** - * Gets the highest operand number for a register operand. This value will never change. - */ - public int maxRegisterNumber() { - return firstVariableNumber - 1; - } - - static final IntervalPredicate IS_PRECOLORED_INTERVAL = new IntervalPredicate() { - - @Override - public boolean apply(Interval i) { - return isRegister(i.operand); - } - }; - - static final IntervalPredicate IS_VARIABLE_INTERVAL = new IntervalPredicate() { - - @Override - public boolean apply(Interval i) { - return isVariable(i.operand); - } - }; - - static final IntervalPredicate IS_STACK_INTERVAL = new IntervalPredicate() { - - @Override - public boolean apply(Interval i) { - return !isRegister(i.operand); - } - }; - - /** - * Gets an object describing the attributes of a given register according to this register - * configuration. - */ - RegisterAttributes attributes(Register reg) { - return registerAttributes[reg.number]; - } - - void assignSpillSlot(Interval interval) { - // assign the canonical spill slot of the parent (if a part of the interval - // is already spilled) or allocate a new spill slot - if (interval.canMaterialize()) { - interval.assignLocation(Value.ILLEGAL); - } else if (interval.spillSlot() != null) { - interval.assignLocation(interval.spillSlot()); - } else { - VirtualStackSlot slot = frameMapBuilder.allocateSpillSlot(interval.kind()); - interval.setSpillSlot(slot); - interval.assignLocation(slot); - } - } - - /** - * Creates a new interval. - * - * @param operand the operand for the interval - * @return the created interval - */ - Interval createInterval(AllocatableValue operand) { - assert isLegal(operand); - int operandNumber = operandNumber(operand); - Interval interval = new Interval(operand, operandNumber); - assert operandNumber < intervalsSize; - assert intervals[operandNumber] == null; - intervals[operandNumber] = interval; - return interval; - } - - /** - * Creates an interval as a result of splitting or spilling another interval. - * - * @param source an interval being split of spilled - * @return a new interval derived from {@code source} - */ - Interval createDerivedInterval(Interval source) { - if (firstDerivedIntervalIndex == -1) { - firstDerivedIntervalIndex = intervalsSize; - } - if (intervalsSize == intervals.length) { - intervals = Arrays.copyOf(intervals, intervals.length + (intervals.length >> SPLIT_INTERVALS_CAPACITY_RIGHT_SHIFT)); - } - intervalsSize++; - Variable variable = new Variable(source.kind(), ir.nextVariable()); - - Interval interval = createInterval(variable); - assert intervals[intervalsSize - 1] == interval; - return interval; - } - - // access to block list (sorted in linear scan order) - int blockCount() { - return sortedBlocks.size(); - } - - AbstractBlock blockAt(int index) { - return sortedBlocks.get(index); - } - - /** - * Gets the size of the {@link BlockData#liveIn} and {@link BlockData#liveOut} sets for a basic - * block. These sets do not include any operands allocated as a result of creating - * {@linkplain #createDerivedInterval(Interval) derived intervals}. - */ - int liveSetSize() { - return firstDerivedIntervalIndex == -1 ? operandSize() : firstDerivedIntervalIndex; - } - - int numLoops() { - return ir.getControlFlowGraph().getLoops().size(); - } - - boolean isIntervalInLoop(int interval, int loop) { - return intervalInLoop.at(interval, loop); - } - - Interval intervalFor(int operandNumber) { - return intervals[operandNumber]; - } - - Interval intervalFor(Value operand) { - int operandNumber = operandNumber(operand); - assert operandNumber < intervalsSize; - return intervals[operandNumber]; - } - - Interval getOrCreateInterval(AllocatableValue operand) { - Interval ret = intervalFor(operand); - if (ret == null) { - return createInterval(operand); - } else { - return ret; - } - } - - /** - * Gets the highest instruction id allocated by this object. - */ - int maxOpId() { - assert opIdToInstructionMap.length > 0 : "no operations"; - return (opIdToInstructionMap.length - 1) << 1; - } - - /** - * Converts an {@linkplain LIRInstruction#id instruction id} to an instruction index. All LIR - * instructions in a method have an index one greater than their linear-scan order predecesor - * with the first instruction having an index of 0. - */ - static int opIdToIndex(int opId) { - return opId >> 1; - } - - /** - * Retrieves the {@link LIRInstruction} based on its {@linkplain LIRInstruction#id id}. - * - * @param opId an instruction {@linkplain LIRInstruction#id id} - * @return the instruction whose {@linkplain LIRInstruction#id} {@code == id} - */ - LIRInstruction instructionForId(int opId) { - assert isEven(opId) : "opId not even"; - LIRInstruction instr = opIdToInstructionMap[opIdToIndex(opId)]; - assert instr.id() == opId; - return instr; - } - - /** - * Gets the block containing a given instruction. - * - * @param opId an instruction {@linkplain LIRInstruction#id id} - * @return the block containing the instruction denoted by {@code opId} - */ - AbstractBlock blockForId(int opId) { - assert opIdToBlockMap.length > 0 && opId >= 0 && opId <= maxOpId() + 1 : "opId out of range"; - return opIdToBlockMap[opIdToIndex(opId)]; - } - - boolean isBlockBegin(int opId) { - return opId == 0 || blockForId(opId) != blockForId(opId - 1); - } - - boolean coversBlockBegin(int opId1, int opId2) { - return blockForId(opId1) != blockForId(opId2); - } - - /** - * Determines if an {@link LIRInstruction} destroys all caller saved registers. - * - * @param opId an instruction {@linkplain LIRInstruction#id id} - * @return {@code true} if the instruction denoted by {@code id} destroys all caller saved - * registers. - */ - boolean hasCall(int opId) { - assert isEven(opId) : "opId not even"; - return instructionForId(opId).destroysCallerSavedRegisters(); - } - - /** - * Eliminates moves from register to stack if the stack slot is known to be correct. - */ - void changeSpillDefinitionPos(Interval interval, int defPos) { - assert interval.isSplitParent() : "can only be called for split parents"; - - switch (interval.spillState()) { - case NoDefinitionFound: - assert interval.spillDefinitionPos() == -1 : "must no be set before"; - interval.setSpillDefinitionPos(defPos); - interval.setSpillState(SpillState.NoSpillStore); - break; - - case NoSpillStore: - assert defPos <= interval.spillDefinitionPos() : "positions are processed in reverse order when intervals are created"; - if (defPos < interval.spillDefinitionPos() - 2) { - // second definition found, so no spill optimization possible for this interval - interval.setSpillState(SpillState.NoOptimization); - } else { - // two consecutive definitions (because of two-operand LIR form) - assert blockForId(defPos) == blockForId(interval.spillDefinitionPos()) : "block must be equal"; - } - break; - - case NoOptimization: - // nothing to do - break; - - default: - throw new BailoutException("other states not allowed at this time"); - } - } - - // called during register allocation - void changeSpillState(Interval interval, int spillPos) { - switch (interval.spillState()) { - case NoSpillStore: { - int defLoopDepth = blockForId(interval.spillDefinitionPos()).getLoopDepth(); - int spillLoopDepth = blockForId(spillPos).getLoopDepth(); - - if (defLoopDepth < spillLoopDepth) { - // the loop depth of the spilling position is higher then the loop depth - // at the definition of the interval . move write to memory out of loop. - if (Options.LSRAOptimizeSpillPosition.getValue()) { - // find best spill position in dominator the tree - interval.setSpillState(SpillState.SpillInDominator); - } else { - // store at definition of the interval - interval.setSpillState(SpillState.StoreAtDefinition); - } - } else { - // the interval is currently spilled only once, so for now there is no - // reason to store the interval at the definition - interval.setSpillState(SpillState.OneSpillStore); - } - break; - } - - case OneSpillStore: { - if (Options.LSRAOptimizeSpillPosition.getValue()) { - // the interval is spilled more then once - interval.setSpillState(SpillState.SpillInDominator); - } else { - // it is better to store it to - // memory at the definition - interval.setSpillState(SpillState.StoreAtDefinition); - } - break; - } - - case SpillInDominator: - case StoreAtDefinition: - case StartInMemory: - case NoOptimization: - case NoDefinitionFound: - // nothing to do - break; - - default: - throw new BailoutException("other states not allowed at this time"); - } - } - - abstract static class IntervalPredicate { - - abstract boolean apply(Interval i); - } - - private static final IntervalPredicate mustStoreAtDefinition = new IntervalPredicate() { - - @Override - public boolean apply(Interval i) { - return i.isSplitParent() && i.spillState() == SpillState.StoreAtDefinition; - } - }; - - // called once before assignment of register numbers - void eliminateSpillMoves() { - try (Indent indent = Debug.logAndIndent("Eliminating unnecessary spill moves")) { - - // collect all intervals that must be stored after their definition. - // the list is sorted by Interval.spillDefinitionPos - Interval interval; - interval = createUnhandledLists(mustStoreAtDefinition, null).first; - if (DetailedAsserts.getValue()) { - checkIntervals(interval); - } - - LIRInsertionBuffer insertionBuffer = new LIRInsertionBuffer(); - for (AbstractBlock block : sortedBlocks) { - List instructions = ir.getLIRforBlock(block); - int numInst = instructions.size(); - - // iterate all instructions of the block. skip the first - // because it is always a label - for (int j = 1; j < numInst; j++) { - LIRInstruction op = instructions.get(j); - int opId = op.id(); - - if (opId == -1) { - MoveOp move = (MoveOp) op; - // remove move from register to stack if the stack slot is guaranteed to be - // correct. - // only moves that have been inserted by LinearScan can be removed. - assert isVariable(move.getResult()) : "LinearScan inserts only moves to variables"; - - Interval curInterval = intervalFor(move.getResult()); - - if (!isRegister(curInterval.location()) && curInterval.alwaysInMemory()) { - // move target is a stack slot that is always correct, so eliminate - // instruction - if (Debug.isLogEnabled()) { - Debug.log("eliminating move from interval %d to %d", operandNumber(move.getInput()), operandNumber(move.getResult())); - } - // null-instructions are deleted by assignRegNum - instructions.set(j, null); - } - - } else { - // insert move from register to stack just after - // the beginning of the interval - assert interval == Interval.EndMarker || interval.spillDefinitionPos() >= opId : "invalid order"; - assert interval == Interval.EndMarker || (interval.isSplitParent() && interval.spillState() == SpillState.StoreAtDefinition) : "invalid interval"; - - while (interval != Interval.EndMarker && interval.spillDefinitionPos() == opId) { - if (!interval.canMaterialize()) { - if (!insertionBuffer.initialized()) { - // prepare insertion buffer (appended when all instructions in - // the block are processed) - insertionBuffer.init(instructions); - } - - AllocatableValue fromLocation = interval.location(); - AllocatableValue toLocation = canonicalSpillOpr(interval); - - assert isRegister(fromLocation) : "from operand must be a register but is: " + fromLocation + " toLocation=" + toLocation + " spillState=" + interval.spillState(); - assert isStackSlotValue(toLocation) : "to operand must be a stack slot"; - - insertionBuffer.append(j + 1, ir.getSpillMoveFactory().createMove(toLocation, fromLocation)); - - Debug.log("inserting move after definition of interval %d to stack slot %s at opId %d", interval.operandNumber, interval.spillSlot(), opId); - } - interval = interval.next; - } - } - } // end of instruction iteration - - if (insertionBuffer.initialized()) { - insertionBuffer.finish(); - } - } // end of block iteration - - assert interval == Interval.EndMarker : "missed an interval"; - } - } - - private static void checkIntervals(Interval interval) { - Interval prev = null; - Interval temp = interval; - while (temp != Interval.EndMarker) { - assert temp.spillDefinitionPos() > 0 : "invalid spill definition pos"; - if (prev != null) { - assert temp.from() >= prev.from() : "intervals not sorted"; - assert temp.spillDefinitionPos() >= prev.spillDefinitionPos() : "when intervals are sorted by from : then they must also be sorted by spillDefinitionPos"; - } - - assert temp.spillSlot() != null || temp.canMaterialize() : "interval has no spill slot assigned"; - assert temp.spillDefinitionPos() >= temp.from() : "invalid order"; - assert temp.spillDefinitionPos() <= temp.from() + 2 : "only intervals defined once at their start-pos can be optimized"; - - Debug.log("interval %d (from %d to %d) must be stored at %d", temp.operandNumber, temp.from(), temp.to(), temp.spillDefinitionPos()); - - prev = temp; - temp = temp.next; - } - } - - /** - * Numbers all instructions in all blocks. The numbering follows the - * {@linkplain ComputeBlockOrder linear scan order}. - */ - void numberInstructions() { - - intervalsSize = operandSize(); - intervals = new Interval[intervalsSize + (intervalsSize >> SPLIT_INTERVALS_CAPACITY_RIGHT_SHIFT)]; - - ValueConsumer setVariableConsumer = (value, mode, flags) -> { - if (isVariable(value)) { - getOrCreateInterval(asVariable(value)); - } - }; - - // Assign IDs to LIR nodes and build a mapping, lirOps, from ID to LIRInstruction node. - int numInstructions = 0; - for (AbstractBlock block : sortedBlocks) { - numInstructions += ir.getLIRforBlock(block).size(); - } - - // initialize with correct length - opIdToInstructionMap = new LIRInstruction[numInstructions]; - opIdToBlockMap = new AbstractBlock[numInstructions]; - - int opId = 0; - int index = 0; - for (AbstractBlock block : sortedBlocks) { - blockData.put(block, new BlockData()); - - List instructions = ir.getLIRforBlock(block); - - int numInst = instructions.size(); - for (int j = 0; j < numInst; j++) { - LIRInstruction op = instructions.get(j); - op.setId(opId); - - opIdToInstructionMap[index] = op; - opIdToBlockMap[index] = block; - assert instructionForId(opId) == op : "must match"; - - op.visitEachTemp(setVariableConsumer); - op.visitEachOutput(setVariableConsumer); - - index++; - opId += 2; // numbering of lirOps by two - } - } - assert index == numInstructions : "must match"; - assert (index << 1) == opId : "must match: " + (index << 1); - } - - /** - * Computes local live sets (i.e. {@link BlockData#liveGen} and {@link BlockData#liveKill}) - * separately for each block. - */ - void computeLocalLiveSets() { - int liveSize = liveSetSize(); - - intervalInLoop = new BitMap2D(operandSize(), numLoops()); - - // iterate all blocks - for (final AbstractBlock block : sortedBlocks) { - try (Indent indent = Debug.logAndIndent("compute local live sets for block %d", block.getId())) { - - final BitSet liveGen = new BitSet(liveSize); - final BitSet liveKill = new BitSet(liveSize); - - List instructions = ir.getLIRforBlock(block); - int numInst = instructions.size(); - - ValueConsumer useConsumer = (operand, mode, flags) -> { - if (isVariable(operand)) { - int operandNum = operandNumber(operand); - if (!liveKill.get(operandNum)) { - liveGen.set(operandNum); - Debug.log("liveGen for operand %d", operandNum); - } - if (block.getLoop() != null) { - intervalInLoop.setBit(operandNum, block.getLoop().getIndex()); - } - } - - if (DetailedAsserts.getValue()) { - verifyInput(block, liveKill, operand); - } - }; - ValueConsumer stateConsumer = (operand, mode, flags) -> { - if (isVariableOrRegister(operand)) { - int operandNum = operandNumber(operand); - if (!liveKill.get(operandNum)) { - liveGen.set(operandNum); - Debug.log("liveGen in state for operand %d", operandNum); - } - } - }; - ValueConsumer defConsumer = (operand, mode, flags) -> { - if (isVariable(operand)) { - int varNum = operandNumber(operand); - liveKill.set(varNum); - Debug.log("liveKill for operand %d", varNum); - if (block.getLoop() != null) { - intervalInLoop.setBit(varNum, block.getLoop().getIndex()); - } - } - - if (DetailedAsserts.getValue()) { - // fixed intervals are never live at block boundaries, so - // they need not be processed in live sets - // process them only in debug mode so that this can be checked - verifyTemp(liveKill, operand); - } - }; - - // iterate all instructions of the block - for (int j = 0; j < numInst; j++) { - final LIRInstruction op = instructions.get(j); - - try (Indent indent2 = Debug.logAndIndent("handle op %d", op.id())) { - op.visitEachInput(useConsumer); - op.visitEachAlive(useConsumer); - // Add uses of live locals from interpreter's point of view for proper debug - // information generation - op.visitEachState(stateConsumer); - op.visitEachTemp(defConsumer); - op.visitEachOutput(defConsumer); - } - } // end of instruction iteration - - BlockData blockSets = blockData.get(block); - blockSets.liveGen = liveGen; - blockSets.liveKill = liveKill; - blockSets.liveIn = new BitSet(liveSize); - blockSets.liveOut = new BitSet(liveSize); - - Debug.log("liveGen B%d %s", block.getId(), blockSets.liveGen); - Debug.log("liveKill B%d %s", block.getId(), blockSets.liveKill); - - } - } // end of block iteration - } - - private void verifyTemp(BitSet liveKill, Value operand) { - // fixed intervals are never live at block boundaries, so - // they need not be processed in live sets - // process them only in debug mode so that this can be checked - if (isRegister(operand)) { - if (isProcessed(operand)) { - liveKill.set(operandNumber(operand)); - } - } - } - - private void verifyInput(AbstractBlock block, BitSet liveKill, Value operand) { - // fixed intervals are never live at block boundaries, so - // they need not be processed in live sets. - // this is checked by these assertions to be sure about it. - // the entry block may have incoming - // values in registers, which is ok. - if (isRegister(operand) && block != ir.getControlFlowGraph().getStartBlock()) { - if (isProcessed(operand)) { - assert liveKill.get(operandNumber(operand)) : "using fixed register that is not defined in this block"; - } - } - } - - /** - * Performs a backward dataflow analysis to compute global live sets (i.e. - * {@link BlockData#liveIn} and {@link BlockData#liveOut}) for each block. - */ - void computeGlobalLiveSets() { - try (Indent indent = Debug.logAndIndent("compute global live sets")) { - int numBlocks = blockCount(); - boolean changeOccurred; - boolean changeOccurredInBlock; - int iterationCount = 0; - BitSet liveOut = new BitSet(liveSetSize()); // scratch set for calculations - - // Perform a backward dataflow analysis to compute liveOut and liveIn for each block. - // The loop is executed until a fixpoint is reached (no changes in an iteration) - do { - changeOccurred = false; - - try (Indent indent2 = Debug.logAndIndent("new iteration %d", iterationCount)) { - - // iterate all blocks in reverse order - for (int i = numBlocks - 1; i >= 0; i--) { - AbstractBlock block = blockAt(i); - BlockData blockSets = blockData.get(block); - - changeOccurredInBlock = false; - - // liveOut(block) is the union of liveIn(sux), for successors sux of block - int n = block.getSuccessorCount(); - if (n > 0) { - liveOut.clear(); - // block has successors - if (n > 0) { - for (AbstractBlock successor : block.getSuccessors()) { - liveOut.or(blockData.get(successor).liveIn); - } - } - - if (!blockSets.liveOut.equals(liveOut)) { - // A change occurred. Swap the old and new live out - // sets to avoid copying. - BitSet temp = blockSets.liveOut; - blockSets.liveOut = liveOut; - liveOut = temp; - - changeOccurred = true; - changeOccurredInBlock = true; - } - } - - if (iterationCount == 0 || changeOccurredInBlock) { - // liveIn(block) is the union of liveGen(block) with (liveOut(block) & - // !liveKill(block)) - // note: liveIn has to be computed only in first iteration - // or if liveOut has changed! - BitSet liveIn = blockSets.liveIn; - liveIn.clear(); - liveIn.or(blockSets.liveOut); - liveIn.andNot(blockSets.liveKill); - liveIn.or(blockSets.liveGen); - - Debug.log("block %d: livein = %s, liveout = %s", block.getId(), liveIn, blockSets.liveOut); - } - } - iterationCount++; - - if (changeOccurred && iterationCount > 50) { - throw new BailoutException("too many iterations in computeGlobalLiveSets"); - } - } - } while (changeOccurred); - - if (DetailedAsserts.getValue()) { - verifyLiveness(); - } - - // check that the liveIn set of the first block is empty - AbstractBlock startBlock = ir.getControlFlowGraph().getStartBlock(); - if (blockData.get(startBlock).liveIn.cardinality() != 0) { - if (DetailedAsserts.getValue()) { - reportFailure(numBlocks); - } - // bailout if this occurs in product mode. - throw new GraalInternalError("liveIn set of first block must be empty: " + blockData.get(startBlock).liveIn); - } - } - } - - private static NodeLIRBuilder getNodeLIRGeneratorFromDebugContext() { - if (Debug.isEnabled()) { - NodeLIRBuilder lirGen = Debug.contextLookup(NodeLIRBuilder.class); - assert lirGen != null; - return lirGen; - } - return null; - } - - private static ValueNode getValueForOperandFromDebugContext(Value value) { - NodeLIRBuilder gen = getNodeLIRGeneratorFromDebugContext(); - if (gen != null) { - return gen.valueForOperand(value); - } - return null; - } - - private void reportFailure(int numBlocks) { - try (Scope s = Debug.forceLog()) { - try (Indent indent = Debug.logAndIndent("report failure")) { - - BitSet startBlockLiveIn = blockData.get(ir.getControlFlowGraph().getStartBlock()).liveIn; - try (Indent indent2 = Debug.logAndIndent("Error: liveIn set of first block must be empty (when this fails, variables are used before they are defined):")) { - for (int operandNum = startBlockLiveIn.nextSetBit(0); operandNum >= 0; operandNum = startBlockLiveIn.nextSetBit(operandNum + 1)) { - Interval interval = intervalFor(operandNum); - if (interval != null) { - Value operand = interval.operand; - Debug.log("var %d; operand=%s; node=%s", operandNum, operand, getValueForOperandFromDebugContext(operand)); - } else { - Debug.log("var %d; missing operand", operandNum); - } - } - } - - // print some additional information to simplify debugging - for (int operandNum = startBlockLiveIn.nextSetBit(0); operandNum >= 0; operandNum = startBlockLiveIn.nextSetBit(operandNum + 1)) { - Interval interval = intervalFor(operandNum); - Value operand = null; - ValueNode valueForOperandFromDebugContext = null; - if (interval != null) { - operand = interval.operand; - valueForOperandFromDebugContext = getValueForOperandFromDebugContext(operand); - } - try (Indent indent2 = Debug.logAndIndent("---- Detailed information for var %d; operand=%s; node=%s ----", operandNum, operand, valueForOperandFromDebugContext)) { - - Deque> definedIn = new ArrayDeque<>(); - HashSet> usedIn = new HashSet<>(); - for (AbstractBlock block : sortedBlocks) { - if (blockData.get(block).liveGen.get(operandNum)) { - usedIn.add(block); - try (Indent indent3 = Debug.logAndIndent("used in block B%d", block.getId())) { - for (LIRInstruction ins : ir.getLIRforBlock(block)) { - try (Indent indent4 = Debug.logAndIndent("%d: %s", ins.id(), ins)) { - ins.forEachState((liveStateOperand, mode, flags) -> { - Debug.log("operand=%s", liveStateOperand); - return liveStateOperand; - }); - } - } - } - } - if (blockData.get(block).liveKill.get(operandNum)) { - definedIn.add(block); - try (Indent indent3 = Debug.logAndIndent("defined in block B%d", block.getId())) { - for (LIRInstruction ins : ir.getLIRforBlock(block)) { - Debug.log("%d: %s", ins.id(), ins); - } - } - } - } - - int[] hitCount = new int[numBlocks]; - - while (!definedIn.isEmpty()) { - AbstractBlock block = definedIn.removeFirst(); - usedIn.remove(block); - for (AbstractBlock successor : block.getSuccessors()) { - if (successor.isLoopHeader()) { - if (!block.isLoopEnd()) { - definedIn.add(successor); - } - } else { - if (++hitCount[successor.getId()] == successor.getPredecessorCount()) { - definedIn.add(successor); - } - } - } - } - try (Indent indent3 = Debug.logAndIndent("**** offending usages are in: ")) { - for (AbstractBlock block : usedIn) { - Debug.log("B%d", block.getId()); - } - } - } - } - } - } catch (Throwable e) { - throw Debug.handle(e); - } - } - - private void verifyLiveness() { - // check that fixed intervals are not live at block boundaries - // (live set must be empty at fixed intervals) - for (AbstractBlock block : sortedBlocks) { - for (int j = 0; j <= maxRegisterNumber(); j++) { - assert !blockData.get(block).liveIn.get(j) : "liveIn set of fixed register must be empty"; - assert !blockData.get(block).liveOut.get(j) : "liveOut set of fixed register must be empty"; - assert !blockData.get(block).liveGen.get(j) : "liveGen set of fixed register must be empty"; - } - } - } - - void addUse(AllocatableValue operand, int from, int to, RegisterPriority registerPriority, LIRKind kind) { - if (!isProcessed(operand)) { - return; - } - - Interval interval = getOrCreateInterval(operand); - if (!kind.equals(LIRKind.Illegal)) { - interval.setKind(kind); - } - - interval.addRange(from, to); - - // Register use position at even instruction id. - interval.addUsePos(to & ~1, registerPriority); - - Debug.log("add use: %s, from %d to %d (%s)", interval, from, to, registerPriority.name()); - } - - void addTemp(AllocatableValue operand, int tempPos, RegisterPriority registerPriority, LIRKind kind) { - if (!isProcessed(operand)) { - return; - } - - Interval interval = getOrCreateInterval(operand); - if (!kind.equals(LIRKind.Illegal)) { - interval.setKind(kind); - } - - interval.addRange(tempPos, tempPos + 1); - interval.addUsePos(tempPos, registerPriority); - interval.addMaterializationValue(null); - - Debug.log("add temp: %s tempPos %d (%s)", interval, tempPos, RegisterPriority.MustHaveRegister.name()); - } - - boolean isProcessed(Value operand) { - return !isRegister(operand) || attributes(asRegister(operand)).isAllocatable(); - } - - void addDef(AllocatableValue operand, LIRInstruction op, RegisterPriority registerPriority, LIRKind kind) { - if (!isProcessed(operand)) { - return; - } - int defPos = op.id(); - - Interval interval = getOrCreateInterval(operand); - if (!kind.equals(LIRKind.Illegal)) { - interval.setKind(kind); - } - - Range r = interval.first(); - if (r.from <= defPos) { - // Update the starting point (when a range is first created for a use, its - // start is the beginning of the current block until a def is encountered.) - r.from = defPos; - interval.addUsePos(defPos, registerPriority); - - } else { - // Dead value - make vacuous interval - // also add register priority for dead intervals - interval.addRange(defPos, defPos + 1); - interval.addUsePos(defPos, registerPriority); - Debug.log("Warning: def of operand %s at %d occurs without use", operand, defPos); - } - - changeSpillDefinitionPos(interval, defPos); - if (registerPriority == RegisterPriority.None && interval.spillState().ordinal() <= SpillState.StartInMemory.ordinal()) { - // detection of method-parameters and roundfp-results - interval.setSpillState(SpillState.StartInMemory); - } - interval.addMaterializationValue(LinearScan.getMaterializedValue(op, operand, interval)); - - Debug.log("add def: %s defPos %d (%s)", interval, defPos, registerPriority.name()); - } - - /** - * Determines the register priority for an instruction's output/result operand. - */ - static RegisterPriority registerPriorityOfOutputOperand(LIRInstruction op) { - if (op instanceof MoveOp) { - MoveOp move = (MoveOp) op; - if (optimizeMethodArgument(move.getInput())) { - return RegisterPriority.None; - } - } - - // all other operands require a register - return RegisterPriority.MustHaveRegister; - } - - /** - * Determines the priority which with an instruction's input operand will be allocated a - * register. - */ - static RegisterPriority registerPriorityOfInputOperand(EnumSet flags) { - if (flags.contains(OperandFlag.STACK)) { - return RegisterPriority.ShouldHaveRegister; - } - // all other operands require a register - return RegisterPriority.MustHaveRegister; - } - - private static boolean optimizeMethodArgument(Value value) { - /* - * Object method arguments that are passed on the stack are currently not optimized because - * this requires that the runtime visits method arguments during stack walking. - */ - return isStackSlot(value) && asStackSlot(value).isInCallerFrame() && value.getKind() != Kind.Object; - } - - /** - * Optimizes moves related to incoming stack based arguments. The interval for the destination - * of such moves is assigned the stack slot (which is in the caller's frame) as its spill slot. - */ - void handleMethodArguments(LIRInstruction op) { - if (op instanceof MoveOp) { - MoveOp move = (MoveOp) op; - if (optimizeMethodArgument(move.getInput())) { - StackSlot slot = asStackSlot(move.getInput()); - if (DetailedAsserts.getValue()) { - assert op.id() > 0 : "invalid id"; - assert blockForId(op.id()).getPredecessorCount() == 0 : "move from stack must be in first block"; - assert isVariable(move.getResult()) : "result of move must be a variable"; - - Debug.log("found move from stack slot %s to %s", slot, move.getResult()); - } - - Interval interval = intervalFor(move.getResult()); - interval.setSpillSlot(slot); - interval.assignLocation(slot); - } - } - } - - void addRegisterHint(final LIRInstruction op, final Value targetValue, OperandMode mode, EnumSet flags, final boolean hintAtDef) { - if (flags.contains(OperandFlag.HINT) && isVariableOrRegister(targetValue)) { - - op.forEachRegisterHint(targetValue, mode, (registerHint, valueMode, valueFlags) -> { - if (isVariableOrRegister(registerHint)) { - Interval from = getOrCreateInterval((AllocatableValue) registerHint); - Interval to = getOrCreateInterval((AllocatableValue) targetValue); - - /* hints always point from def to use */ - if (hintAtDef) { - to.setLocationHint(from); - } else { - from.setLocationHint(to); - } - Debug.log("operation at opId %d: added hint from interval %d to %d", op.id(), from.operandNumber, to.operandNumber); - - return registerHint; - } - return null; - }); - } - } - - void buildIntervals() { - - try (Indent indent = Debug.logAndIndent("build intervals")) { - InstructionValueConsumer outputConsumer = (op, operand, mode, flags) -> { - if (isVariableOrRegister(operand)) { - addDef((AllocatableValue) operand, op, registerPriorityOfOutputOperand(op), operand.getLIRKind()); - addRegisterHint(op, operand, mode, flags, true); - } - }; - - InstructionValueConsumer tempConsumer = (op, operand, mode, flags) -> { - if (isVariableOrRegister(operand)) { - addTemp((AllocatableValue) operand, op.id(), RegisterPriority.MustHaveRegister, operand.getLIRKind()); - addRegisterHint(op, operand, mode, flags, false); - } - }; - - InstructionValueConsumer aliveConsumer = (op, operand, mode, flags) -> { - if (isVariableOrRegister(operand)) { - RegisterPriority p = registerPriorityOfInputOperand(flags); - int opId = op.id(); - int blockFrom = getFirstLirInstructionId((blockForId(opId))); - addUse((AllocatableValue) operand, blockFrom, opId + 1, p, operand.getLIRKind()); - addRegisterHint(op, operand, mode, flags, false); - } - }; - - InstructionValueConsumer inputConsumer = (op, operand, mode, flags) -> { - if (isVariableOrRegister(operand)) { - int opId = op.id(); - int blockFrom = getFirstLirInstructionId((blockForId(opId))); - RegisterPriority p = registerPriorityOfInputOperand(flags); - addUse((AllocatableValue) operand, blockFrom, opId, p, operand.getLIRKind()); - addRegisterHint(op, operand, mode, flags, false); - } - }; - - InstructionValueConsumer stateProc = (op, operand, mode, flags) -> { - if (isVariableOrRegister(operand)) { - int opId = op.id(); - int blockFrom = getFirstLirInstructionId((blockForId(opId))); - addUse((AllocatableValue) operand, blockFrom, opId + 1, RegisterPriority.None, operand.getLIRKind()); - } - }; - - // create a list with all caller-save registers (cpu, fpu, xmm) - Register[] callerSaveRegs = frameMapBuilder.getRegisterConfig().getCallerSaveRegisters(); - - // iterate all blocks in reverse order - for (int i = blockCount() - 1; i >= 0; i--) { - - AbstractBlock block = blockAt(i); - try (Indent indent2 = Debug.logAndIndent("handle block %d", block.getId())) { - - List instructions = ir.getLIRforBlock(block); - final int blockFrom = getFirstLirInstructionId(block); - int blockTo = getLastLirInstructionId(block); - - assert blockFrom == instructions.get(0).id(); - assert blockTo == instructions.get(instructions.size() - 1).id(); - - // Update intervals for operands live at the end of this block; - BitSet live = blockData.get(block).liveOut; - for (int operandNum = live.nextSetBit(0); operandNum >= 0; operandNum = live.nextSetBit(operandNum + 1)) { - assert live.get(operandNum) : "should not stop here otherwise"; - AllocatableValue operand = intervalFor(operandNum).operand; - Debug.log("live in %d: %s", operandNum, operand); - - addUse(operand, blockFrom, blockTo + 2, RegisterPriority.None, LIRKind.Illegal); - - // add special use positions for loop-end blocks when the - // interval is used anywhere inside this loop. It's possible - // that the block was part of a non-natural loop, so it might - // have an invalid loop index. - if (block.isLoopEnd() && block.getLoop() != null && isIntervalInLoop(operandNum, block.getLoop().getIndex())) { - intervalFor(operandNum).addUsePos(blockTo + 1, RegisterPriority.LiveAtLoopEnd); - } - } - - // iterate all instructions of the block in reverse order. - // definitions of intervals are processed before uses - for (int j = instructions.size() - 1; j >= 0; j--) { - final LIRInstruction op = instructions.get(j); - final int opId = op.id(); - - try (Indent indent3 = Debug.logAndIndent("handle inst %d: %s", opId, op)) { - - // add a temp range for each register if operation destroys - // caller-save registers - if (op.destroysCallerSavedRegisters()) { - for (Register r : callerSaveRegs) { - if (attributes(r).isAllocatable()) { - addTemp(r.asValue(), opId, RegisterPriority.None, LIRKind.Illegal); - } - } - Debug.log("operation destroys all caller-save registers"); - } - - op.visitEachOutput(outputConsumer); - op.visitEachTemp(tempConsumer); - op.visitEachAlive(aliveConsumer); - op.visitEachInput(inputConsumer); - - // Add uses of live locals from interpreter's point of view for proper - // debug information generation - // Treat these operands as temp values (if the live range is extended - // to a call site, the value would be in a register at - // the call otherwise) - op.visitEachState(stateProc); - - // special steps for some instructions (especially moves) - handleMethodArguments(op); - - } - - } // end of instruction iteration - } - } // end of block iteration - - // add the range [0, 1] to all fixed intervals. - // the register allocator need not handle unhandled fixed intervals - for (Interval interval : intervals) { - if (interval != null && isRegister(interval.operand)) { - interval.addRange(0, 1); - } - } - } - } - - // * Phase 5: actual register allocation - - private static boolean isSorted(Interval[] intervals) { - int from = -1; - for (Interval interval : intervals) { - assert interval != null; - assert from <= interval.from(); - from = interval.from(); - } - return true; - } - - static Interval addToList(Interval first, Interval prev, Interval interval) { - Interval newFirst = first; - if (prev != null) { - prev.next = interval; - } else { - newFirst = interval; - } - return newFirst; - } - - Interval.Pair createUnhandledLists(IntervalPredicate isList1, IntervalPredicate isList2) { - assert isSorted(sortedIntervals) : "interval list is not sorted"; - - Interval list1 = Interval.EndMarker; - Interval list2 = Interval.EndMarker; - - Interval list1Prev = null; - Interval list2Prev = null; - Interval v; - - int n = sortedIntervals.length; - for (int i = 0; i < n; i++) { - v = sortedIntervals[i]; - if (v == null) { - continue; - } - - if (isList1.apply(v)) { - list1 = addToList(list1, list1Prev, v); - list1Prev = v; - } else if (isList2 == null || isList2.apply(v)) { - list2 = addToList(list2, list2Prev, v); - list2Prev = v; - } - } - - if (list1Prev != null) { - list1Prev.next = Interval.EndMarker; - } - if (list2Prev != null) { - list2Prev.next = Interval.EndMarker; - } - - assert list1Prev == null || list1Prev.next == Interval.EndMarker : "linear list ends not with sentinel"; - assert list2Prev == null || list2Prev.next == Interval.EndMarker : "linear list ends not with sentinel"; - - return new Interval.Pair(list1, list2); - } - - void sortIntervalsBeforeAllocation() { - int sortedLen = 0; - for (Interval interval : intervals) { - if (interval != null) { - sortedLen++; - } - } - - Interval[] sortedList = new Interval[sortedLen]; - int sortedIdx = 0; - int sortedFromMax = -1; - - // special sorting algorithm: the original interval-list is almost sorted, - // only some intervals are swapped. So this is much faster than a complete QuickSort - for (Interval interval : intervals) { - if (interval != null) { - int from = interval.from(); - - if (sortedFromMax <= from) { - sortedList[sortedIdx++] = interval; - sortedFromMax = interval.from(); - } else { - // the assumption that the intervals are already sorted failed, - // so this interval must be sorted in manually - int j; - for (j = sortedIdx - 1; j >= 0 && from < sortedList[j].from(); j--) { - sortedList[j + 1] = sortedList[j]; - } - sortedList[j + 1] = interval; - sortedIdx++; - } - } - } - sortedIntervals = sortedList; - } - - void sortIntervalsAfterAllocation() { - if (firstDerivedIntervalIndex == -1) { - // no intervals have been added during allocation, so sorted list is already up to date - return; - } - - Interval[] oldList = sortedIntervals; - Interval[] newList = Arrays.copyOfRange(intervals, firstDerivedIntervalIndex, intervalsSize); - int oldLen = oldList.length; - int newLen = newList.length; - - // conventional sort-algorithm for new intervals - Arrays.sort(newList, (Interval a, Interval b) -> a.from() - b.from()); - - // merge old and new list (both already sorted) into one combined list - Interval[] combinedList = new Interval[oldLen + newLen]; - int oldIdx = 0; - int newIdx = 0; - - while (oldIdx + newIdx < combinedList.length) { - if (newIdx >= newLen || (oldIdx < oldLen && oldList[oldIdx].from() <= newList[newIdx].from())) { - combinedList[oldIdx + newIdx] = oldList[oldIdx]; - oldIdx++; - } else { - combinedList[oldIdx + newIdx] = newList[newIdx]; - newIdx++; - } - } - - sortedIntervals = combinedList; - } - - public void allocateRegisters() { - try (Indent indent = Debug.logAndIndent("allocate registers")) { - Interval precoloredIntervals; - Interval notPrecoloredIntervals; - - Interval.Pair result = createUnhandledLists(IS_PRECOLORED_INTERVAL, IS_VARIABLE_INTERVAL); - precoloredIntervals = result.first; - notPrecoloredIntervals = result.second; - - // allocate cpu registers - LinearScanWalker lsw; - if (OptimizingLinearScanWalker.Options.LSRAOptimization.getValue()) { - lsw = new OptimizingLinearScanWalker(this, precoloredIntervals, notPrecoloredIntervals); - } else { - lsw = new LinearScanWalker(this, precoloredIntervals, notPrecoloredIntervals); - } - lsw.walk(); - lsw.finishAllocation(); - } - } - - // * Phase 6: resolve data flow - // (insert moves at edges between blocks if intervals have been split) - - // wrapper for Interval.splitChildAtOpId that performs a bailout in product mode - // instead of returning null - Interval splitChildAtOpId(Interval interval, int opId, LIRInstruction.OperandMode mode) { - Interval result = interval.getSplitChildAtOpId(opId, mode, this); - - if (result != null) { - Debug.log("Split child at pos %d of interval %s is %s", opId, interval, result); - return result; - } - - throw new BailoutException("LinearScan: interval is null"); - } - - Interval intervalAtBlockBegin(AbstractBlock block, int operandNumber) { - return splitChildAtOpId(intervalFor(operandNumber), getFirstLirInstructionId(block), LIRInstruction.OperandMode.DEF); - } - - Interval intervalAtBlockEnd(AbstractBlock block, int operandNumber) { - return splitChildAtOpId(intervalFor(operandNumber), getLastLirInstructionId(block) + 1, LIRInstruction.OperandMode.DEF); - } - - void resolveCollectMappings(AbstractBlock fromBlock, AbstractBlock toBlock, MoveResolver moveResolver) { - assert moveResolver.checkEmpty(); - - int numOperands = operandSize(); - BitSet liveAtEdge = blockData.get(toBlock).liveIn; - - // visit all variables for which the liveAtEdge bit is set - for (int operandNum = liveAtEdge.nextSetBit(0); operandNum >= 0; operandNum = liveAtEdge.nextSetBit(operandNum + 1)) { - assert operandNum < numOperands : "live information set for not exisiting interval"; - assert blockData.get(fromBlock).liveOut.get(operandNum) && blockData.get(toBlock).liveIn.get(operandNum) : "interval not live at this edge"; - - Interval fromInterval = intervalAtBlockEnd(fromBlock, operandNum); - Interval toInterval = intervalAtBlockBegin(toBlock, operandNum); - - if (fromInterval != toInterval && !fromInterval.location().equals(toInterval.location())) { - // need to insert move instruction - moveResolver.addMapping(fromInterval, toInterval); - } - } - } - - void resolveFindInsertPos(AbstractBlock fromBlock, AbstractBlock toBlock, MoveResolver moveResolver) { - if (fromBlock.getSuccessorCount() <= 1) { - Debug.log("inserting moves at end of fromBlock B%d", fromBlock.getId()); - - List instructions = ir.getLIRforBlock(fromBlock); - LIRInstruction instr = instructions.get(instructions.size() - 1); - if (instr instanceof StandardOp.JumpOp) { - // insert moves before branch - moveResolver.setInsertPosition(instructions, instructions.size() - 1); - } else { - moveResolver.setInsertPosition(instructions, instructions.size()); - } - - } else { - Debug.log("inserting moves at beginning of toBlock B%d", toBlock.getId()); - - if (DetailedAsserts.getValue()) { - assert ir.getLIRforBlock(fromBlock).get(0) instanceof StandardOp.LabelOp : "block does not start with a label"; - - // because the number of predecessor edges matches the number of - // successor edges, blocks which are reached by switch statements - // may have be more than one predecessor but it will be guaranteed - // that all predecessors will be the same. - for (AbstractBlock predecessor : toBlock.getPredecessors()) { - assert fromBlock == predecessor : "all critical edges must be broken"; - } - } - - moveResolver.setInsertPosition(ir.getLIRforBlock(toBlock), 1); - } - } - - /** - * Inserts necessary moves (spilling or reloading) at edges between blocks for intervals that - * have been split. - */ - void resolveDataFlow() { - try (Indent indent = Debug.logAndIndent("resolve data flow")) { - - int numBlocks = blockCount(); - MoveResolver moveResolver = new MoveResolver(this); - BitSet blockCompleted = new BitSet(numBlocks); - BitSet alreadyResolved = new BitSet(numBlocks); - - for (AbstractBlock block : sortedBlocks) { - - // check if block has only one predecessor and only one successor - if (block.getPredecessorCount() == 1 && block.getSuccessorCount() == 1) { - List instructions = ir.getLIRforBlock(block); - assert instructions.get(0) instanceof StandardOp.LabelOp : "block must start with label"; - assert instructions.get(instructions.size() - 1) instanceof StandardOp.JumpOp : "block with successor must end with unconditional jump"; - - // check if block is empty (only label and branch) - if (instructions.size() == 2) { - AbstractBlock pred = block.getPredecessors().iterator().next(); - AbstractBlock sux = block.getSuccessors().iterator().next(); - - // prevent optimization of two consecutive blocks - if (!blockCompleted.get(pred.getLinearScanNumber()) && !blockCompleted.get(sux.getLinearScanNumber())) { - Debug.log(" optimizing empty block B%d (pred: B%d, sux: B%d)", block.getId(), pred.getId(), sux.getId()); - - blockCompleted.set(block.getLinearScanNumber()); - - // directly resolve between pred and sux (without looking - // at the empty block - // between) - resolveCollectMappings(pred, sux, moveResolver); - if (moveResolver.hasMappings()) { - moveResolver.setInsertPosition(instructions, 1); - moveResolver.resolveAndAppendMoves(); - } - } - } - } - } - - for (AbstractBlock fromBlock : sortedBlocks) { - if (!blockCompleted.get(fromBlock.getLinearScanNumber())) { - alreadyResolved.clear(); - alreadyResolved.or(blockCompleted); - - for (AbstractBlock toBlock : fromBlock.getSuccessors()) { - - // check for duplicate edges between the same blocks (can happen with switch - // blocks) - if (!alreadyResolved.get(toBlock.getLinearScanNumber())) { - Debug.log("processing edge between B%d and B%d", fromBlock.getId(), toBlock.getId()); - - alreadyResolved.set(toBlock.getLinearScanNumber()); - - // collect all intervals that have been split between - // fromBlock and toBlock - resolveCollectMappings(fromBlock, toBlock, moveResolver); - if (moveResolver.hasMappings()) { - resolveFindInsertPos(fromBlock, toBlock, moveResolver); - moveResolver.resolveAndAppendMoves(); - } - } - } - } - } - } - } - - // * Phase 7: assign register numbers back to LIR - // (includes computation of debug information and oop maps) - - static StackSlotValue canonicalSpillOpr(Interval interval) { - assert interval.spillSlot() != null : "canonical spill slot not set"; - return interval.spillSlot(); - } - - /** - * Assigns the allocated location for an LIR instruction operand back into the instruction. - * - * @param operand an LIR instruction operand - * @param opId the id of the LIR instruction using {@code operand} - * @param mode the usage mode for {@code operand} by the instruction - * @return the location assigned for the operand - */ - private Value colorLirOperand(Variable operand, int opId, OperandMode mode) { - Interval interval = intervalFor(operand); - assert interval != null : "interval must exist"; - - if (opId != -1) { - if (DetailedAsserts.getValue()) { - AbstractBlock block = blockForId(opId); - if (block.getSuccessorCount() <= 1 && opId == getLastLirInstructionId(block)) { - // check if spill moves could have been appended at the end of this block, but - // before the branch instruction. So the split child information for this branch - // would - // be incorrect. - LIRInstruction instr = ir.getLIRforBlock(block).get(ir.getLIRforBlock(block).size() - 1); - if (instr instanceof StandardOp.JumpOp) { - if (blockData.get(block).liveOut.get(operandNumber(operand))) { - assert false : "can't get split child for the last branch of a block because the information would be incorrect (moves are inserted before the branch in resolveDataFlow)"; - } - } - } - } - - // operands are not changed when an interval is split during allocation, - // so search the right interval here - interval = splitChildAtOpId(interval, opId, mode); - } - - if (isIllegal(interval.location()) && interval.canMaterialize()) { - assert mode != OperandMode.DEF; - return interval.getMaterializedValue(); - } - return interval.location(); - } - - private boolean isMaterialized(AllocatableValue operand, int opId, OperandMode mode) { - Interval interval = intervalFor(operand); - assert interval != null : "interval must exist"; - - if (opId != -1) { - // operands are not changed when an interval is split during allocation, - // so search the right interval here - interval = splitChildAtOpId(interval, opId, mode); - } - - return isIllegal(interval.location()) && interval.canMaterialize(); - } - - protected IntervalWalker initIntervalWalker(IntervalPredicate predicate) { - // setup lists of potential oops for walking - Interval oopIntervals; - Interval nonOopIntervals; - - oopIntervals = createUnhandledLists(predicate, null).first; - - // intervals that have no oops inside need not to be processed. - // to ensure a walking until the last instruction id, add a dummy interval - // with a high operation id - nonOopIntervals = new Interval(Value.ILLEGAL, -1); - nonOopIntervals.addRange(Integer.MAX_VALUE - 2, Integer.MAX_VALUE - 1); - - return new IntervalWalker(this, oopIntervals, nonOopIntervals); - } - - private boolean isCallerSave(Value operand) { - return attributes(asRegister(operand)).isCallerSave(); - } - - /** - * @param op - * @param operand - * @param valueMode - * @param flags - * @see InstructionValueProcedure#doValue(LIRInstruction, Value, OperandMode, EnumSet) - */ - private Value debugInfoProcedure(LIRInstruction op, Value operand, OperandMode valueMode, EnumSet flags) { - if (isVirtualStackSlot(operand)) { - return operand; - } - int tempOpId = op.id(); - OperandMode mode = OperandMode.USE; - AbstractBlock block = blockForId(tempOpId); - if (block.getSuccessorCount() == 1 && tempOpId == getLastLirInstructionId(block)) { - // generating debug information for the last instruction of a block. - // if this instruction is a branch, spill moves are inserted before this branch - // and so the wrong operand would be returned (spill moves at block boundaries - // are not - // considered in the live ranges of intervals) - // Solution: use the first opId of the branch target block instead. - final LIRInstruction instr = ir.getLIRforBlock(block).get(ir.getLIRforBlock(block).size() - 1); - if (instr instanceof StandardOp.JumpOp) { - if (blockData.get(block).liveOut.get(operandNumber(operand))) { - tempOpId = getFirstLirInstructionId(block.getSuccessors().iterator().next()); - mode = OperandMode.DEF; - } - } - } - - // Get current location of operand - // The operand must be live because debug information is considered when building - // the intervals - // if the interval is not live, colorLirOperand will cause an assert on failure - Value result = colorLirOperand((Variable) operand, tempOpId, mode); - assert !hasCall(tempOpId) || isStackSlotValue(result) || isConstant(result) || !isCallerSave(result) : "cannot have caller-save register operands at calls"; - return result; - } - - private void computeDebugInfo(final LIRInstruction op, LIRFrameState info) { - info.forEachState(op, this::debugInfoProcedure); - } - - private void assignLocations(List instructions) { - int numInst = instructions.size(); - boolean hasDead = false; - - InstructionValueProcedure assignProc = (op, operand, mode, flags) -> isVariable(operand) ? colorLirOperand((Variable) operand, op.id(), mode) : operand; - for (int j = 0; j < numInst; j++) { - final LIRInstruction op = instructions.get(j); - if (op == null) { // this can happen when spill-moves are removed in eliminateSpillMoves - hasDead = true; - continue; - } - - // remove useless moves - MoveOp move = null; - if (op instanceof MoveOp) { - move = (MoveOp) op; - AllocatableValue result = move.getResult(); - if (isVariable(result) && isMaterialized(result, op.id(), OperandMode.DEF)) { - /* - * This happens if a materializable interval is originally not spilled but then - * kicked out in LinearScanWalker.splitForSpilling(). When kicking out such an - * interval this move operation was already generated. - */ - instructions.set(j, null); - hasDead = true; - continue; - } - } - - op.forEachInput(assignProc); - op.forEachAlive(assignProc); - op.forEachTemp(assignProc); - op.forEachOutput(assignProc); - - // compute reference map and debug information - op.forEachState((inst, state) -> computeDebugInfo(inst, state)); - - // remove useless moves - if (move != null) { - if (move.getInput().equals(move.getResult())) { - instructions.set(j, null); - hasDead = true; - } - } - } - - if (hasDead) { - // Remove null values from the list. - instructions.removeAll(Collections.singleton(null)); - } - } - - private void assignLocations() { - try (Indent indent = Debug.logAndIndent("assign locations")) { - for (AbstractBlock block : sortedBlocks) { - try (Indent indent2 = Debug.logAndIndent("assign locations in block B%d", block.getId())) { - assignLocations(ir.getLIRforBlock(block)); - } - } - } - } - - public static void allocate(TargetDescription target, LIRGenerationResult res) { - new LinearScan(target, res).allocate(); - } - - private void allocate() { - - /* - * This is the point to enable debug logging for the whole register allocation. - */ - try (Indent indent = Debug.logAndIndent("LinearScan allocate")) { - - try (Scope s = Debug.scope("LifetimeAnalysis")) { - numberInstructions(); - printLir("Before register allocation", true); - computeLocalLiveSets(); - computeGlobalLiveSets(); - buildIntervals(); - sortIntervalsBeforeAllocation(); - } catch (Throwable e) { - throw Debug.handle(e); - } - - try (Scope s = Debug.scope("RegisterAllocation")) { - printIntervals("Before register allocation"); - allocateRegisters(); - } catch (Throwable e) { - throw Debug.handle(e); - } - - if (Options.LSRAOptimizeSpillPosition.getValue()) { - try (Scope s = Debug.scope("OptimizeSpillPosition")) { - optimizeSpillPosition(); - } catch (Throwable e) { - throw Debug.handle(e); - } - } - - try (Scope s = Debug.scope("ResolveDataFlow")) { - resolveDataFlow(); - } catch (Throwable e) { - throw Debug.handle(e); - } - - try (Scope s = Debug.scope("DebugInfo")) { - printIntervals("After register allocation"); - printLir("After register allocation", true); - - sortIntervalsAfterAllocation(); - - if (DetailedAsserts.getValue()) { - verify(); - } - - try (Scope s1 = Debug.scope("EliminateSpillMove")) { - eliminateSpillMoves(); - } catch (Throwable e) { - throw Debug.handle(e); - } - printLir("After spill move elimination", true); - - try (Scope s1 = Debug.scope("AssignLocations")) { - assignLocations(); - } catch (Throwable e) { - throw Debug.handle(e); - } - - if (DetailedAsserts.getValue()) { - verifyIntervals(); - } - } catch (Throwable e) { - throw Debug.handle(e); - } - - printLir("After register number assignment", true); - } - } - - private DebugMetric betterSpillPos = Debug.metric("BetterSpillPosition"); - private DebugMetric betterSpillPosWithLowerProbability = Debug.metric("BetterSpillPositionWithLowerProbability"); - - private void optimizeSpillPosition() { - LIRInsertionBuffer[] insertionBuffers = new LIRInsertionBuffer[ir.linearScanOrder().size()]; - for (Interval interval : intervals) { - if (interval != null && interval.isSplitParent() && interval.spillState() == SpillState.SpillInDominator) { - AbstractBlock defBlock = blockForId(interval.spillDefinitionPos()); - AbstractBlock spillBlock = null; - Interval firstSpillChild = null; - try (Indent indent = Debug.logAndIndent("interval %s (%s)", interval, defBlock)) { - for (Interval splitChild : interval.getSplitChildren()) { - if (isStackSlotValue(splitChild.location())) { - if (firstSpillChild == null || splitChild.from() < firstSpillChild.from()) { - firstSpillChild = splitChild; - } else { - assert firstSpillChild.from() < splitChild.from(); - } - // iterate all blocks where the interval has use positions - for (AbstractBlock splitBlock : blocksForInterval(splitChild)) { - if (dominates(defBlock, splitBlock)) { - Debug.log("Split interval %s, block %s", splitChild, splitBlock); - if (spillBlock == null) { - spillBlock = splitBlock; - } else { - spillBlock = commonDominator(spillBlock, splitBlock); - assert spillBlock != null; - } - } - } - } - } - if (spillBlock == null) { - // no spill interval - interval.setSpillState(SpillState.StoreAtDefinition); - } else { - // move out of loops - if (defBlock.getLoopDepth() < spillBlock.getLoopDepth()) { - spillBlock = moveSpillOutOfLoop(defBlock, spillBlock); - } - - /* - * If the spill block is the begin of the first split child (aka the value - * is on the stack) spill in the dominator. - */ - assert firstSpillChild != null; - if (!defBlock.equals(spillBlock) && spillBlock.equals(blockForId(firstSpillChild.from()))) { - AbstractBlock dom = spillBlock.getDominator(); - Debug.log("Spill block (%s) is the beginning of a spill child -> use dominator (%s)", spillBlock, dom); - spillBlock = dom; - } - - if (!defBlock.equals(spillBlock)) { - assert dominates(defBlock, spillBlock); - betterSpillPos.increment(); - Debug.log("Better spill position found (Block %s)", spillBlock); - - if (defBlock.probability() <= spillBlock.probability()) { - // better spill block has the same probability -> do nothing - interval.setSpillState(SpillState.StoreAtDefinition); - } else { - LIRInsertionBuffer insertionBuffer = insertionBuffers[spillBlock.getId()]; - if (insertionBuffer == null) { - insertionBuffer = new LIRInsertionBuffer(); - insertionBuffers[spillBlock.getId()] = insertionBuffer; - insertionBuffer.init(ir.getLIRforBlock(spillBlock)); - } - int spillOpId = getFirstLirInstructionId(spillBlock); - // insert spill move - AllocatableValue fromLocation = interval.getSplitChildAtOpId(spillOpId, OperandMode.DEF, this).location(); - AllocatableValue toLocation = canonicalSpillOpr(interval); - LIRInstruction move = ir.getSpillMoveFactory().createMove(toLocation, fromLocation); - move.setId(DOMINATOR_SPILL_MOVE_ID); - /* - * We can use the insertion buffer directly because we always insert - * at position 1. - */ - insertionBuffer.append(1, move); - - betterSpillPosWithLowerProbability.increment(); - interval.setSpillDefinitionPos(spillOpId); - } - } else { - // definition is the best choice - interval.setSpillState(SpillState.StoreAtDefinition); - } - } - } - } - } - for (LIRInsertionBuffer insertionBuffer : insertionBuffers) { - if (insertionBuffer != null) { - assert insertionBuffer.initialized() : "Insertion buffer is nonnull but not initialized!"; - insertionBuffer.finish(); - } - } - } - - /** - * Iterate over all {@link AbstractBlock blocks} of an interval. - */ - private class IntervalBlockIterator implements Iterator> { - - Range range; - AbstractBlock block; - - public IntervalBlockIterator(Interval interval) { - range = interval.first(); - block = blockForId(range.from); - } - - public AbstractBlock next() { - AbstractBlock currentBlock = block; - int nextBlockIndex = block.getLinearScanNumber() + 1; - if (nextBlockIndex < sortedBlocks.size()) { - block = sortedBlocks.get(nextBlockIndex); - if (range.to <= getFirstLirInstructionId(block)) { - range = range.next; - if (range == Range.EndMarker) { - block = null; - } else { - block = blockForId(range.from); - } - } - } else { - block = null; - } - return currentBlock; - } - - public boolean hasNext() { - return block != null; - } - } - - private Iterable> blocksForInterval(Interval interval) { - return new Iterable>() { - public Iterator> iterator() { - return new IntervalBlockIterator(interval); - } - }; - } - - private static AbstractBlock moveSpillOutOfLoop(AbstractBlock defBlock, AbstractBlock spillBlock) { - int defLoopDepth = defBlock.getLoopDepth(); - for (AbstractBlock block = spillBlock.getDominator(); !defBlock.equals(block); block = block.getDominator()) { - assert block != null : "spill block not dominated by definition block?"; - if (block.getLoopDepth() <= defLoopDepth) { - assert block.getLoopDepth() == defLoopDepth : "Cannot spill an interval outside of the loop where it is defined!"; - return block; - } - } - return defBlock; - } - - void printIntervals(String label) { - if (Debug.isLogEnabled()) { - try (Indent indent = Debug.logAndIndent("intervals %s", label)) { - for (Interval interval : intervals) { - if (interval != null) { - Debug.log("%s", interval.logString(this)); - } - } - - try (Indent indent2 = Debug.logAndIndent("Basic Blocks")) { - for (int i = 0; i < blockCount(); i++) { - AbstractBlock block = blockAt(i); - Debug.log("B%d [%d, %d, %s] ", block.getId(), getFirstLirInstructionId(block), getLastLirInstructionId(block), block.getLoop()); - } - } - } - } - Debug.dump(Arrays.copyOf(intervals, intervalsSize), label); - } - - void printLir(String label, @SuppressWarnings("unused") boolean hirValid) { - Debug.dump(ir, label); - } - - boolean verify() { - // (check that all intervals have a correct register and that no registers are overwritten) - verifyIntervals(); - - verifyRegisters(); - - Debug.log("no errors found"); - - return true; - } - - private void verifyRegisters() { - // Enable this logging to get output for the verification process. - try (Indent indent = Debug.logAndIndent("verifying register allocation")) { - RegisterVerifier verifier = new RegisterVerifier(this); - verifier.verify(blockAt(0)); - } - } - - void verifyIntervals() { - try (Indent indent = Debug.logAndIndent("verifying intervals")) { - int len = intervalsSize; - - for (int i = 0; i < len; i++) { - Interval i1 = intervals[i]; - if (i1 == null) { - continue; - } - - i1.checkSplitChildren(); - - if (i1.operandNumber != i) { - Debug.log("Interval %d is on position %d in list", i1.operandNumber, i); - Debug.log(i1.logString(this)); - throw new GraalInternalError(""); - } - - if (isVariable(i1.operand) && i1.kind().equals(LIRKind.Illegal)) { - Debug.log("Interval %d has no type assigned", i1.operandNumber); - Debug.log(i1.logString(this)); - throw new GraalInternalError(""); - } - - if (i1.location() == null) { - Debug.log("Interval %d has no register assigned", i1.operandNumber); - Debug.log(i1.logString(this)); - throw new GraalInternalError(""); - } - - if (i1.first() == Range.EndMarker) { - Debug.log("Interval %d has no Range", i1.operandNumber); - Debug.log(i1.logString(this)); - throw new GraalInternalError(""); - } - - for (Range r = i1.first(); r != Range.EndMarker; r = r.next) { - if (r.from >= r.to) { - Debug.log("Interval %d has zero length range", i1.operandNumber); - Debug.log(i1.logString(this)); - throw new GraalInternalError(""); - } - } - - for (int j = i + 1; j < len; j++) { - Interval i2 = intervals[j]; - if (i2 == null) { - continue; - } - - // special intervals that are created in MoveResolver - // . ignore them because the range information has no meaning there - if (i1.from() == 1 && i1.to() == 2) { - continue; - } - if (i2.from() == 1 && i2.to() == 2) { - continue; - } - Value l1 = i1.location(); - Value l2 = i2.location(); - if (i1.intersects(i2) && !isIllegal(l1) && (l1.equals(l2))) { - if (DetailedAsserts.getValue()) { - Debug.log("Intervals %d and %d overlap and have the same register assigned", i1.operandNumber, i2.operandNumber); - Debug.log(i1.logString(this)); - Debug.log(i2.logString(this)); - } - throw new BailoutException(""); - } - } - } - } - } - - class CheckConsumer implements ValueConsumer { - - boolean ok; - Interval curInterval; - - @Override - public void visitValue(Value operand, OperandMode mode, EnumSet flags) { - if (isRegister(operand)) { - if (intervalFor(operand) == curInterval) { - ok = true; - } - } - } - } - - void verifyNoOopsInFixedIntervals() { - try (Indent indent = Debug.logAndIndent("verifying that no oops are in fixed intervals *")) { - CheckConsumer checkConsumer = new CheckConsumer(); - - Interval fixedIntervals; - Interval otherIntervals; - fixedIntervals = createUnhandledLists(IS_PRECOLORED_INTERVAL, null).first; - // to ensure a walking until the last instruction id, add a dummy interval - // with a high operation id - otherIntervals = new Interval(Value.ILLEGAL, -1); - otherIntervals.addRange(Integer.MAX_VALUE - 2, Integer.MAX_VALUE - 1); - IntervalWalker iw = new IntervalWalker(this, fixedIntervals, otherIntervals); - - for (AbstractBlock block : sortedBlocks) { - List instructions = ir.getLIRforBlock(block); - - for (int j = 0; j < instructions.size(); j++) { - LIRInstruction op = instructions.get(j); - - if (op.hasState()) { - iw.walkBefore(op.id()); - boolean checkLive = true; - - // Make sure none of the fixed registers is live across an - // oopmap since we can't handle that correctly. - if (checkLive) { - for (Interval interval = iw.activeLists.get(RegisterBinding.Fixed); interval != Interval.EndMarker; interval = interval.next) { - if (interval.currentTo() > op.id() + 1) { - // This interval is live out of this op so make sure - // that this interval represents some value that's - // referenced by this op either as an input or output. - checkConsumer.curInterval = interval; - checkConsumer.ok = false; - - op.visitEachInput(checkConsumer); - op.visitEachAlive(checkConsumer); - op.visitEachTemp(checkConsumer); - op.visitEachOutput(checkConsumer); - - assert checkConsumer.ok : "fixed intervals should never be live across an oopmap point"; - } - } - } - } - } - } - } - } - - /** - * Returns a value for a interval definition, which can be used for re-materialization. - * - * @param op An instruction which defines a value - * @param operand The destination operand of the instruction - * @param interval The interval for this defined value. - * @return Returns the value which is moved to the instruction and which can be reused at all - * reload-locations in case the interval of this instruction is spilled. Currently this - * can only be a {@link JavaConstant}. - */ - public static JavaConstant getMaterializedValue(LIRInstruction op, Value operand, Interval interval) { - if (op instanceof MoveOp) { - MoveOp move = (MoveOp) op; - if (move.getInput() instanceof JavaConstant) { - /* - * Check if the interval has any uses which would accept an stack location (priority - * == ShouldHaveRegister). Rematerialization of such intervals can result in a - * degradation, because rematerialization always inserts a constant load, even if - * the value is not needed in a register. - */ - Interval.UsePosList usePosList = interval.usePosList(); - int numUsePos = usePosList.size(); - for (int useIdx = 0; useIdx < numUsePos; useIdx++) { - Interval.RegisterPriority priority = usePosList.registerPriority(useIdx); - if (priority == Interval.RegisterPriority.ShouldHaveRegister) { - return null; - } - } - return (JavaConstant) move.getInput(); - } - } - return null; - } -} diff -r 3059a52d9614 -r 98967b613c88 graal/com.oracle.graal.compiler/src/com/oracle/graal/compiler/alloc/LinearScanWalker.java --- a/graal/com.oracle.graal.compiler/src/com/oracle/graal/compiler/alloc/LinearScanWalker.java Sat Feb 07 02:34:43 2015 +0100 +++ /dev/null Thu Jan 01 00:00:00 1970 +0000 @@ -1,903 +0,0 @@ -/* - * Copyright (c) 2009, 2014, Oracle and/or its affiliates. All rights reserved. - * DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER. - * - * This code is free software; you can redistribute it and/or modify it - * under the terms of the GNU General Public License version 2 only, as - * published by the Free Software Foundation. - * - * This code is distributed in the hope that it will be useful, but WITHOUT - * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or - * FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License - * version 2 for more details (a copy is included in the LICENSE file that - * accompanied this code). - * - * You should have received a copy of the GNU General Public License version - * 2 along with this work; if not, write to the Free Software Foundation, - * Inc., 51 Franklin St, Fifth Floor, Boston, MA 02110-1301 USA. - * - * Please contact Oracle, 500 Oracle Parkway, Redwood Shores, CA 94065 USA - * or visit www.oracle.com if you need additional information or have any - * questions. - */ -package com.oracle.graal.compiler.alloc; - -import static com.oracle.graal.api.code.CodeUtil.*; -import static com.oracle.graal.api.code.ValueUtil.*; -import static com.oracle.graal.lir.LIRValueUtil.*; - -import java.util.*; - -import com.oracle.graal.api.code.*; -import com.oracle.graal.api.meta.*; -import com.oracle.graal.compiler.alloc.Interval.RegisterBinding; -import com.oracle.graal.compiler.alloc.Interval.RegisterPriority; -import com.oracle.graal.compiler.alloc.Interval.SpillState; -import com.oracle.graal.compiler.alloc.Interval.State; -import com.oracle.graal.compiler.common.cfg.*; -import com.oracle.graal.debug.*; -import com.oracle.graal.lir.*; -import com.oracle.graal.lir.StandardOp.MoveOp; -import com.oracle.graal.phases.util.*; - -/** - */ -class LinearScanWalker extends IntervalWalker { - - protected Register[] availableRegs; - - protected final int[] usePos; - protected final int[] blockPos; - - protected List[] spillIntervals; - - private MoveResolver moveResolver; // for ordering spill moves - - /** - * Only 10% of the lists in {@link #spillIntervals} are actually used. But when they are used, - * they can grow quite long. The maximum length observed was 45 (all numbers taken from a - * bootstrap run of Graal). Therefore, we initialize {@link #spillIntervals} with this marker - * value, and allocate a "real" list only on demand in {@link #setUsePos}. - */ - private static final List EMPTY_LIST = new ArrayList<>(0); - - // accessors mapped to same functions in class LinearScan - int blockCount() { - return allocator.blockCount(); - } - - AbstractBlock blockAt(int idx) { - return allocator.blockAt(idx); - } - - AbstractBlock blockOfOpWithId(int opId) { - return allocator.blockForId(opId); - } - - LinearScanWalker(LinearScan allocator, Interval unhandledFixedFirst, Interval unhandledAnyFirst) { - super(allocator, unhandledFixedFirst, unhandledAnyFirst); - - moveResolver = new MoveResolver(allocator); - spillIntervals = Util.uncheckedCast(new List[allocator.registers.length]); - for (int i = 0; i < allocator.registers.length; i++) { - spillIntervals[i] = EMPTY_LIST; - } - usePos = new int[allocator.registers.length]; - blockPos = new int[allocator.registers.length]; - } - - void initUseLists(boolean onlyProcessUsePos) { - for (Register register : availableRegs) { - int i = register.number; - usePos[i] = Integer.MAX_VALUE; - - if (!onlyProcessUsePos) { - blockPos[i] = Integer.MAX_VALUE; - spillIntervals[i].clear(); - } - } - } - - void excludeFromUse(Interval i) { - Value location = i.location(); - int i1 = asRegister(location).number; - if (i1 >= availableRegs[0].number && i1 <= availableRegs[availableRegs.length - 1].number) { - usePos[i1] = 0; - } - } - - void setUsePos(Interval interval, int usePos, boolean onlyProcessUsePos) { - if (usePos != -1) { - assert usePos != 0 : "must use excludeFromUse to set usePos to 0"; - int i = asRegister(interval.location()).number; - if (i >= availableRegs[0].number && i <= availableRegs[availableRegs.length - 1].number) { - if (this.usePos[i] > usePos) { - this.usePos[i] = usePos; - } - if (!onlyProcessUsePos) { - List list = spillIntervals[i]; - if (list == EMPTY_LIST) { - list = new ArrayList<>(2); - spillIntervals[i] = list; - } - list.add(interval); - } - } - } - } - - void setBlockPos(Interval i, int blockPos) { - if (blockPos != -1) { - int reg = asRegister(i.location()).number; - if (reg >= availableRegs[0].number && reg <= availableRegs[availableRegs.length - 1].number) { - if (this.blockPos[reg] > blockPos) { - this.blockPos[reg] = blockPos; - } - if (usePos[reg] > blockPos) { - usePos[reg] = blockPos; - } - } - } - } - - void freeExcludeActiveFixed() { - Interval interval = activeLists.get(RegisterBinding.Fixed); - while (interval != Interval.EndMarker) { - assert isRegister(interval.location()) : "active interval must have a register assigned"; - excludeFromUse(interval); - interval = interval.next; - } - } - - void freeExcludeActiveAny() { - Interval interval = activeLists.get(RegisterBinding.Any); - while (interval != Interval.EndMarker) { - assert isRegister(interval.location()) : "active interval must have a register assigned"; - excludeFromUse(interval); - interval = interval.next; - } - } - - void freeCollectInactiveFixed(Interval current) { - Interval interval = inactiveLists.get(RegisterBinding.Fixed); - while (interval != Interval.EndMarker) { - if (current.to() <= interval.currentFrom()) { - assert interval.currentIntersectsAt(current) == -1 : "must not intersect"; - setUsePos(interval, interval.currentFrom(), true); - } else { - setUsePos(interval, interval.currentIntersectsAt(current), true); - } - interval = interval.next; - } - } - - void freeCollectInactiveAny(Interval current) { - Interval interval = inactiveLists.get(RegisterBinding.Any); - while (interval != Interval.EndMarker) { - setUsePos(interval, interval.currentIntersectsAt(current), true); - interval = interval.next; - } - } - - void freeCollectUnhandled(RegisterBinding kind, Interval current) { - Interval interval = unhandledLists.get(kind); - while (interval != Interval.EndMarker) { - setUsePos(interval, interval.intersectsAt(current), true); - if (kind == RegisterBinding.Fixed && current.to() <= interval.from()) { - setUsePos(interval, interval.from(), true); - } - interval = interval.next; - } - } - - void spillExcludeActiveFixed() { - Interval interval = activeLists.get(RegisterBinding.Fixed); - while (interval != Interval.EndMarker) { - excludeFromUse(interval); - interval = interval.next; - } - } - - void spillBlockUnhandledFixed(Interval current) { - Interval interval = unhandledLists.get(RegisterBinding.Fixed); - while (interval != Interval.EndMarker) { - setBlockPos(interval, interval.intersectsAt(current)); - interval = interval.next; - } - } - - void spillBlockInactiveFixed(Interval current) { - Interval interval = inactiveLists.get(RegisterBinding.Fixed); - while (interval != Interval.EndMarker) { - if (current.to() > interval.currentFrom()) { - setBlockPos(interval, interval.currentIntersectsAt(current)); - } else { - assert interval.currentIntersectsAt(current) == -1 : "invalid optimization: intervals intersect"; - } - - interval = interval.next; - } - } - - void spillCollectActiveAny() { - Interval interval = activeLists.get(RegisterBinding.Any); - while (interval != Interval.EndMarker) { - setUsePos(interval, Math.min(interval.nextUsage(RegisterPriority.LiveAtLoopEnd, currentPosition), interval.to()), false); - interval = interval.next; - } - } - - void spillCollectInactiveAny(Interval current) { - Interval interval = inactiveLists.get(RegisterBinding.Any); - while (interval != Interval.EndMarker) { - if (interval.currentIntersects(current)) { - setUsePos(interval, Math.min(interval.nextUsage(RegisterPriority.LiveAtLoopEnd, currentPosition), interval.to()), false); - } - interval = interval.next; - } - } - - void insertMove(int operandId, Interval srcIt, Interval dstIt) { - // output all moves here. When source and target are equal, the move is - // optimized away later in assignRegNums - - int opId = (operandId + 1) & ~1; - AbstractBlock opBlock = allocator.blockForId(opId); - assert opId > 0 && allocator.blockForId(opId - 2) == opBlock : "cannot insert move at block boundary"; - - // calculate index of instruction inside instruction list of current block - // the minimal index (for a block with no spill moves) can be calculated because the - // numbering of instructions is known. - // When the block already contains spill moves, the index must be increased until the - // correct index is reached. - List instructions = allocator.ir.getLIRforBlock(opBlock); - int index = (opId - instructions.get(0).id()) >> 1; - assert instructions.get(index).id() <= opId : "error in calculation"; - - while (instructions.get(index).id() != opId) { - index++; - assert 0 <= index && index < instructions.size() : "index out of bounds"; - } - assert 1 <= index && index < instructions.size() : "index out of bounds"; - assert instructions.get(index).id() == opId : "error in calculation"; - - // insert new instruction before instruction at position index - moveResolver.moveInsertPosition(instructions, index); - moveResolver.addMapping(srcIt, dstIt); - } - - int findOptimalSplitPos(AbstractBlock minBlock, AbstractBlock maxBlock, int maxSplitPos) { - int fromBlockNr = minBlock.getLinearScanNumber(); - int toBlockNr = maxBlock.getLinearScanNumber(); - - assert 0 <= fromBlockNr && fromBlockNr < blockCount() : "out of range"; - assert 0 <= toBlockNr && toBlockNr < blockCount() : "out of range"; - assert fromBlockNr < toBlockNr : "must cross block boundary"; - - // Try to split at end of maxBlock. If this would be after - // maxSplitPos, then use the begin of maxBlock - int optimalSplitPos = allocator.getLastLirInstructionId(maxBlock) + 2; - if (optimalSplitPos > maxSplitPos) { - optimalSplitPos = allocator.getFirstLirInstructionId(maxBlock); - } - - int minLoopDepth = maxBlock.getLoopDepth(); - for (int i = toBlockNr - 1; i >= fromBlockNr; i--) { - AbstractBlock cur = blockAt(i); - - if (cur.getLoopDepth() < minLoopDepth) { - // block with lower loop-depth found . split at the end of this block - minLoopDepth = cur.getLoopDepth(); - optimalSplitPos = allocator.getLastLirInstructionId(cur) + 2; - } - } - assert optimalSplitPos > allocator.maxOpId() || allocator.isBlockBegin(optimalSplitPos) : "algorithm must move split pos to block boundary"; - - return optimalSplitPos; - } - - int findOptimalSplitPos(Interval interval, int minSplitPos, int maxSplitPos, boolean doLoopOptimization) { - int optimalSplitPos = -1; - if (minSplitPos == maxSplitPos) { - // trivial case, no optimization of split position possible - Debug.log("min-pos and max-pos are equal, no optimization possible"); - optimalSplitPos = minSplitPos; - - } else { - assert minSplitPos < maxSplitPos : "must be true then"; - assert minSplitPos > 0 : "cannot access minSplitPos - 1 otherwise"; - - // reason for using minSplitPos - 1: when the minimal split pos is exactly at the - // beginning of a block, then minSplitPos is also a possible split position. - // Use the block before as minBlock, because then minBlock.lastLirInstructionId() + 2 == - // minSplitPos - AbstractBlock minBlock = allocator.blockForId(minSplitPos - 1); - - // reason for using maxSplitPos - 1: otherwise there would be an assert on failure - // when an interval ends at the end of the last block of the method - // (in this case, maxSplitPos == allocator().maxLirOpId() + 2, and there is no - // block at this opId) - AbstractBlock maxBlock = allocator.blockForId(maxSplitPos - 1); - - assert minBlock.getLinearScanNumber() <= maxBlock.getLinearScanNumber() : "invalid order"; - if (minBlock == maxBlock) { - // split position cannot be moved to block boundary : so split as late as possible - Debug.log("cannot move split pos to block boundary because minPos and maxPos are in same block"); - optimalSplitPos = maxSplitPos; - - } else { - if (interval.hasHoleBetween(maxSplitPos - 1, maxSplitPos) && !allocator.isBlockBegin(maxSplitPos)) { - // Do not move split position if the interval has a hole before maxSplitPos. - // Intervals resulting from Phi-Functions have more than one definition (marked - // as mustHaveRegister) with a hole before each definition. When the register is - // needed - // for the second definition : an earlier reloading is unnecessary. - Debug.log("interval has hole just before maxSplitPos, so splitting at maxSplitPos"); - optimalSplitPos = maxSplitPos; - - } else { - // seach optimal block boundary between minSplitPos and maxSplitPos - Debug.log("moving split pos to optimal block boundary between block B%d and B%d", minBlock.getId(), maxBlock.getId()); - - if (doLoopOptimization) { - // Loop optimization: if a loop-end marker is found between min- and - // max-position : - // then split before this loop - int loopEndPos = interval.nextUsageExact(RegisterPriority.LiveAtLoopEnd, allocator.getLastLirInstructionId(minBlock) + 2); - Debug.log("loop optimization: loop end found at pos %d", loopEndPos); - - assert loopEndPos > minSplitPos : "invalid order"; - if (loopEndPos < maxSplitPos) { - // loop-end marker found between min- and max-position - // if it is not the end marker for the same loop as the min-position : - // then move - // the max-position to this loop block. - // Desired result: uses tagged as shouldHaveRegister inside a loop cause - // a reloading - // of the interval (normally, only mustHaveRegister causes a reloading) - AbstractBlock loopBlock = allocator.blockForId(loopEndPos); - - Debug.log("interval is used in loop that ends in block B%d, so trying to move maxBlock back from B%d to B%d", loopBlock.getId(), maxBlock.getId(), loopBlock.getId()); - assert loopBlock != minBlock : "loopBlock and minBlock must be different because block boundary is needed between"; - - optimalSplitPos = findOptimalSplitPos(minBlock, loopBlock, allocator.getLastLirInstructionId(loopBlock) + 2); - if (optimalSplitPos == allocator.getLastLirInstructionId(loopBlock) + 2) { - optimalSplitPos = -1; - Debug.log("loop optimization not necessary"); - } else { - Debug.log("loop optimization successful"); - } - } - } - - if (optimalSplitPos == -1) { - // not calculated by loop optimization - optimalSplitPos = findOptimalSplitPos(minBlock, maxBlock, maxSplitPos); - } - } - } - } - Debug.log("optimal split position: %d", optimalSplitPos); - - return optimalSplitPos; - } - - // split an interval at the optimal position between minSplitPos and - // maxSplitPos in two parts: - // 1) the left part has already a location assigned - // 2) the right part is sorted into to the unhandled-list - void splitBeforeUsage(Interval interval, int minSplitPos, int maxSplitPos) { - - try (Indent indent = Debug.logAndIndent("splitting interval %s between %d and %d", interval, minSplitPos, maxSplitPos)) { - - assert interval.from() < minSplitPos : "cannot split at start of interval"; - assert currentPosition < minSplitPos : "cannot split before current position"; - assert minSplitPos <= maxSplitPos : "invalid order"; - assert maxSplitPos <= interval.to() : "cannot split after end of interval"; - - int optimalSplitPos = findOptimalSplitPos(interval, minSplitPos, maxSplitPos, true); - - assert minSplitPos <= optimalSplitPos && optimalSplitPos <= maxSplitPos : "out of range"; - assert optimalSplitPos <= interval.to() : "cannot split after end of interval"; - assert optimalSplitPos > interval.from() : "cannot split at start of interval"; - - if (optimalSplitPos == interval.to() && interval.nextUsage(RegisterPriority.MustHaveRegister, minSplitPos) == Integer.MAX_VALUE) { - // the split position would be just before the end of the interval - // . no split at all necessary - Debug.log("no split necessary because optimal split position is at end of interval"); - return; - } - - // must calculate this before the actual split is performed and before split position is - // moved to odd opId - boolean moveNecessary = !allocator.isBlockBegin(optimalSplitPos) && !interval.hasHoleBetween(optimalSplitPos - 1, optimalSplitPos); - - if (!allocator.isBlockBegin(optimalSplitPos)) { - // move position before actual instruction (odd opId) - optimalSplitPos = (optimalSplitPos - 1) | 1; - } - - Debug.log("splitting at position %d", optimalSplitPos); - - assert allocator.isBlockBegin(optimalSplitPos) || ((optimalSplitPos & 1) == 1) : "split pos must be odd when not on block boundary"; - assert !allocator.isBlockBegin(optimalSplitPos) || ((optimalSplitPos & 1) == 0) : "split pos must be even on block boundary"; - - Interval splitPart = interval.split(optimalSplitPos, allocator); - - splitPart.setInsertMoveWhenActivated(moveNecessary); - - assert splitPart.from() >= currentPosition : "cannot append new interval before current walk position"; - unhandledLists.addToListSortedByStartAndUsePositions(RegisterBinding.Any, splitPart); - - if (Debug.isLogEnabled()) { - Debug.log("left interval %s: %s", moveNecessary ? " " : "", interval.logString(allocator)); - Debug.log("right interval %s: %s", moveNecessary ? "(move)" : "", splitPart.logString(allocator)); - } - } - } - - // split an interval at the optimal position between minSplitPos and - // maxSplitPos in two parts: - // 1) the left part has already a location assigned - // 2) the right part is always on the stack and therefore ignored in further processing - - void splitForSpilling(Interval interval) { - // calculate allowed range of splitting position - int maxSplitPos = currentPosition; - int minSplitPos = Math.max(interval.previousUsage(RegisterPriority.ShouldHaveRegister, maxSplitPos) + 1, interval.from()); - - try (Indent indent = Debug.logAndIndent("splitting and spilling interval %s between %d and %d", interval, minSplitPos, maxSplitPos)) { - - assert interval.state == State.Active : "why spill interval that is not active?"; - assert interval.from() <= minSplitPos : "cannot split before start of interval"; - assert minSplitPos <= maxSplitPos : "invalid order"; - assert maxSplitPos < interval.to() : "cannot split at end end of interval"; - assert currentPosition < interval.to() : "interval must not end before current position"; - - if (minSplitPos == interval.from()) { - // the whole interval is never used, so spill it entirely to memory - - try (Indent indent2 = Debug.logAndIndent("spilling entire interval because split pos is at beginning of interval (use positions: %d)", interval.usePosList().size())) { - - assert interval.firstUsage(RegisterPriority.ShouldHaveRegister) > currentPosition : "interval must not have use position before currentPosition"; - - allocator.assignSpillSlot(interval); - handleSpillSlot(interval); - allocator.changeSpillState(interval, minSplitPos); - - // Also kick parent intervals out of register to memory when they have no use - // position. This avoids short interval in register surrounded by intervals in - // memory . avoid useless moves from memory to register and back - Interval parent = interval; - while (parent != null && parent.isSplitChild()) { - parent = parent.getSplitChildBeforeOpId(parent.from()); - - if (isRegister(parent.location())) { - if (parent.firstUsage(RegisterPriority.ShouldHaveRegister) == Integer.MAX_VALUE) { - // parent is never used, so kick it out of its assigned register - Debug.log("kicking out interval %d out of its register because it is never used", parent.operandNumber); - allocator.assignSpillSlot(parent); - handleSpillSlot(parent); - } else { - // do not go further back because the register is actually used by - // the interval - parent = null; - } - } - } - } - - } else { - // search optimal split pos, split interval and spill only the right hand part - int optimalSplitPos = findOptimalSplitPos(interval, minSplitPos, maxSplitPos, false); - - assert minSplitPos <= optimalSplitPos && optimalSplitPos <= maxSplitPos : "out of range"; - assert optimalSplitPos < interval.to() : "cannot split at end of interval"; - assert optimalSplitPos >= interval.from() : "cannot split before start of interval"; - - if (!allocator.isBlockBegin(optimalSplitPos)) { - // move position before actual instruction (odd opId) - optimalSplitPos = (optimalSplitPos - 1) | 1; - } - - try (Indent indent2 = Debug.logAndIndent("splitting at position %d", optimalSplitPos)) { - assert allocator.isBlockBegin(optimalSplitPos) || ((optimalSplitPos & 1) == 1) : "split pos must be odd when not on block boundary"; - assert !allocator.isBlockBegin(optimalSplitPos) || ((optimalSplitPos & 1) == 0) : "split pos must be even on block boundary"; - - Interval spilledPart = interval.split(optimalSplitPos, allocator); - allocator.assignSpillSlot(spilledPart); - handleSpillSlot(spilledPart); - allocator.changeSpillState(spilledPart, optimalSplitPos); - - if (!allocator.isBlockBegin(optimalSplitPos)) { - Debug.log("inserting move from interval %d to %d", interval.operandNumber, spilledPart.operandNumber); - insertMove(optimalSplitPos, interval, spilledPart); - } - - // the currentSplitChild is needed later when moves are inserted for reloading - assert spilledPart.currentSplitChild() == interval : "overwriting wrong currentSplitChild"; - spilledPart.makeCurrentSplitChild(); - - if (Debug.isLogEnabled()) { - Debug.log("left interval: %s", interval.logString(allocator)); - Debug.log("spilled interval : %s", spilledPart.logString(allocator)); - } - } - } - } - } - - /** - * This is called for every interval that is assigned to a stack slot. - */ - protected void handleSpillSlot(Interval interval) { - assert interval.location() != null && (interval.canMaterialize() || isStackSlotValue(interval.location())) : "interval not assigned to a stack slot " + interval; - // Do nothing. Stack slots are not processed in this implementation. - } - - void splitStackInterval(Interval interval) { - int minSplitPos = currentPosition + 1; - int maxSplitPos = Math.min(interval.firstUsage(RegisterPriority.ShouldHaveRegister), interval.to()); - - splitBeforeUsage(interval, minSplitPos, maxSplitPos); - } - - void splitWhenPartialRegisterAvailable(Interval interval, int registerAvailableUntil) { - int minSplitPos = Math.max(interval.previousUsage(RegisterPriority.ShouldHaveRegister, registerAvailableUntil), interval.from() + 1); - splitBeforeUsage(interval, minSplitPos, registerAvailableUntil); - } - - void splitAndSpillInterval(Interval interval) { - assert interval.state == State.Active || interval.state == State.Inactive : "other states not allowed"; - - int currentPos = currentPosition; - if (interval.state == State.Inactive) { - // the interval is currently inactive, so no spill slot is needed for now. - // when the split part is activated, the interval has a new chance to get a register, - // so in the best case no stack slot is necessary - assert interval.hasHoleBetween(currentPos - 1, currentPos + 1) : "interval can not be inactive otherwise"; - splitBeforeUsage(interval, currentPos + 1, currentPos + 1); - - } else { - // search the position where the interval must have a register and split - // at the optimal position before. - // The new created part is added to the unhandled list and will get a register - // when it is activated - int minSplitPos = currentPos + 1; - int maxSplitPos = Math.min(interval.nextUsage(RegisterPriority.MustHaveRegister, minSplitPos), interval.to()); - - splitBeforeUsage(interval, minSplitPos, maxSplitPos); - - assert interval.nextUsage(RegisterPriority.MustHaveRegister, currentPos) == Integer.MAX_VALUE : "the remaining part is spilled to stack and therefore has no register"; - splitForSpilling(interval); - } - } - - boolean allocFreeRegister(Interval interval) { - try (Indent indent = Debug.logAndIndent("trying to find free register for %s", interval)) { - - initUseLists(true); - freeExcludeActiveFixed(); - freeExcludeActiveAny(); - freeCollectInactiveFixed(interval); - freeCollectInactiveAny(interval); - // freeCollectUnhandled(fixedKind, cur); - assert unhandledLists.get(RegisterBinding.Fixed) == Interval.EndMarker : "must not have unhandled fixed intervals because all fixed intervals have a use at position 0"; - - // usePos contains the start of the next interval that has this register assigned - // (either as a fixed register or a normal allocated register in the past) - // only intervals overlapping with cur are processed, non-overlapping invervals can be - // ignored safely - if (Debug.isLogEnabled()) { - // Enable this logging to see all register states - try (Indent indent2 = Debug.logAndIndent("state of registers:")) { - for (Register register : availableRegs) { - int i = register.number; - Debug.log("reg %d: usePos: %d", register.number, usePos[i]); - } - } - } - - Register hint = null; - Interval locationHint = interval.locationHint(true); - if (locationHint != null && locationHint.location() != null && isRegister(locationHint.location())) { - hint = asRegister(locationHint.location()); - Debug.log("hint register %d from interval %s", hint.number, locationHint); - } - assert interval.location() == null : "register already assigned to interval"; - - // the register must be free at least until this position - int regNeededUntil = interval.from() + 1; - int intervalTo = interval.to(); - - boolean needSplit = false; - int splitPos = -1; - - Register reg = null; - Register minFullReg = null; - Register maxPartialReg = null; - - for (int i = 0; i < availableRegs.length; ++i) { - Register availableReg = availableRegs[i]; - int number = availableReg.number; - if (usePos[number] >= intervalTo) { - // this register is free for the full interval - if (minFullReg == null || availableReg.equals(hint) || (usePos[number] < usePos[minFullReg.number] && !minFullReg.equals(hint))) { - minFullReg = availableReg; - } - } else if (usePos[number] > regNeededUntil) { - // this register is at least free until regNeededUntil - if (maxPartialReg == null || availableReg.equals(hint) || (usePos[number] > usePos[maxPartialReg.number] && !maxPartialReg.equals(hint))) { - maxPartialReg = availableReg; - } - } - } - - if (minFullReg != null) { - reg = minFullReg; - } else if (maxPartialReg != null) { - needSplit = true; - reg = maxPartialReg; - } else { - return false; - } - - splitPos = usePos[reg.number]; - interval.assignLocation(reg.asValue(interval.kind())); - Debug.log("selected register %d", reg.number); - - assert splitPos > 0 : "invalid splitPos"; - if (needSplit) { - // register not available for full interval, so split it - splitWhenPartialRegisterAvailable(interval, splitPos); - } - // only return true if interval is completely assigned - return true; - } - } - - void splitAndSpillIntersectingIntervals(Register reg) { - assert reg != null : "no register assigned"; - - for (int i = 0; i < spillIntervals[reg.number].size(); i++) { - Interval interval = spillIntervals[reg.number].get(i); - removeFromList(interval); - splitAndSpillInterval(interval); - } - } - - // Split an Interval and spill it to memory so that cur can be placed in a register - void allocLockedRegister(Interval interval) { - try (Indent indent = Debug.logAndIndent("alloc locked register: need to split and spill to get register for %s", interval)) { - - // collect current usage of registers - initUseLists(false); - spillExcludeActiveFixed(); - // spillBlockUnhandledFixed(cur); - assert unhandledLists.get(RegisterBinding.Fixed) == Interval.EndMarker : "must not have unhandled fixed intervals because all fixed intervals have a use at position 0"; - spillBlockInactiveFixed(interval); - spillCollectActiveAny(); - spillCollectInactiveAny(interval); - - if (Debug.isLogEnabled()) { - try (Indent indent2 = Debug.logAndIndent("state of registers:")) { - for (Register reg : availableRegs) { - int i = reg.number; - try (Indent indent3 = Debug.logAndIndent("reg %d: usePos: %d, blockPos: %d, intervals: ", i, usePos[i], blockPos[i])) { - for (int j = 0; j < spillIntervals[i].size(); j++) { - Debug.log("%d ", spillIntervals[i].get(j).operandNumber); - } - } - } - } - } - - // the register must be free at least until this position - int firstUsage = interval.firstUsage(RegisterPriority.MustHaveRegister); - int regNeededUntil = Math.min(firstUsage, interval.from() + 1); - int intervalTo = interval.to(); - assert regNeededUntil > 0 && regNeededUntil < Integer.MAX_VALUE : "interval has no use"; - - Register reg = null; - Register ignore = interval.location() != null && isRegister(interval.location()) ? asRegister(interval.location()) : null; - for (Register availableReg : availableRegs) { - int number = availableReg.number; - if (availableReg.equals(ignore)) { - // this register must be ignored - } else if (usePos[number] > regNeededUntil) { - if (reg == null || (usePos[number] > usePos[reg.number])) { - reg = availableReg; - } - } - } - - int regUsePos = (reg == null ? 0 : usePos[reg.number]); - if (regUsePos <= firstUsage) { - Debug.log("able to spill current interval. firstUsage(register): %d, usePos: %d", firstUsage, regUsePos); - - if (firstUsage <= interval.from() + 1) { - assert false : "cannot spill interval that is used in first instruction (possible reason: no register found) firstUsage=" + firstUsage + ", interval.from()=" + interval.from(); - // assign a reasonable register and do a bailout in product mode to avoid errors - allocator.assignSpillSlot(interval); - throw new BailoutException("LinearScan: no register found"); - } - - splitAndSpillInterval(interval); - return; - } - - boolean needSplit = blockPos[reg.number] <= intervalTo; - - int splitPos = blockPos[reg.number]; - - Debug.log("decided to use register %d", reg.number); - assert splitPos > 0 : "invalid splitPos"; - assert needSplit || splitPos > interval.from() : "splitting interval at from"; - - interval.assignLocation(reg.asValue(interval.kind())); - if (needSplit) { - // register not available for full interval : so split it - splitWhenPartialRegisterAvailable(interval, splitPos); - } - - // perform splitting and spilling for all affected intervals - splitAndSpillIntersectingIntervals(reg); - } - } - - boolean noAllocationPossible(Interval interval) { - if (allocator.callKillsRegisters) { - // fast calculation of intervals that can never get a register because the - // the next instruction is a call that blocks all registers - // Note: this only works if a call kills all registers - - // check if this interval is the result of a split operation - // (an interval got a register until this position) - int pos = interval.from(); - if (isOdd(pos)) { - // the current instruction is a call that blocks all registers - if (pos < allocator.maxOpId() && allocator.hasCall(pos + 1) && interval.to() > pos + 1) { - Debug.log("free register cannot be available because all registers blocked by following call"); - - // safety check that there is really no register available - assert !allocFreeRegister(interval) : "found a register for this interval"; - return true; - } - } - } - return false; - } - - void initVarsForAlloc(Interval interval) { - availableRegs = allocator.frameMapBuilder.getRegisterConfig().getAllocatableRegisters(interval.kind().getPlatformKind()); - } - - static boolean isMove(LIRInstruction op, Interval from, Interval to) { - if (op instanceof MoveOp) { - MoveOp move = (MoveOp) op; - if (isVariable(move.getInput()) && isVariable(move.getResult())) { - return move.getInput() != null && move.getInput().equals(from.operand) && move.getResult() != null && move.getResult().equals(to.operand); - } - } - return false; - } - - // optimization (especially for phi functions of nested loops): - // assign same spill slot to non-intersecting intervals - void combineSpilledIntervals(Interval interval) { - if (interval.isSplitChild()) { - // optimization is only suitable for split parents - return; - } - - Interval registerHint = interval.locationHint(false); - if (registerHint == null) { - // cur is not the target of a move : otherwise registerHint would be set - return; - } - assert registerHint.isSplitParent() : "register hint must be split parent"; - - if (interval.spillState() != SpillState.NoOptimization || registerHint.spillState() != SpillState.NoOptimization) { - // combining the stack slots for intervals where spill move optimization is applied - // is not benefitial and would cause problems - return; - } - - int beginPos = interval.from(); - int endPos = interval.to(); - if (endPos > allocator.maxOpId() || isOdd(beginPos) || isOdd(endPos)) { - // safety check that lirOpWithId is allowed - return; - } - - if (!isMove(allocator.instructionForId(beginPos), registerHint, interval) || !isMove(allocator.instructionForId(endPos), interval, registerHint)) { - // cur and registerHint are not connected with two moves - return; - } - - Interval beginHint = registerHint.getSplitChildAtOpId(beginPos, LIRInstruction.OperandMode.USE, allocator); - Interval endHint = registerHint.getSplitChildAtOpId(endPos, LIRInstruction.OperandMode.DEF, allocator); - if (beginHint == endHint || beginHint.to() != beginPos || endHint.from() != endPos) { - // registerHint must be split : otherwise the re-writing of use positions does not work - return; - } - - assert beginHint.location() != null : "must have register assigned"; - assert endHint.location() == null : "must not have register assigned"; - assert interval.firstUsage(RegisterPriority.MustHaveRegister) == beginPos : "must have use position at begin of interval because of move"; - assert endHint.firstUsage(RegisterPriority.MustHaveRegister) == endPos : "must have use position at begin of interval because of move"; - - if (isRegister(beginHint.location())) { - // registerHint is not spilled at beginPos : so it would not be benefitial to - // immediately spill cur - return; - } - assert registerHint.spillSlot() != null : "must be set when part of interval was spilled"; - - // modify intervals such that cur gets the same stack slot as registerHint - // delete use positions to prevent the intervals to get a register at beginning - interval.setSpillSlot(registerHint.spillSlot()); - interval.removeFirstUsePos(); - endHint.removeFirstUsePos(); - } - - // allocate a physical register or memory location to an interval - @Override - protected boolean activateCurrent(Interval interval) { - boolean result = true; - - try (Indent indent = Debug.logAndIndent("activating interval %s, splitParent: %d", interval, interval.splitParent().operandNumber)) { - - final Value operand = interval.operand; - if (interval.location() != null && isStackSlotValue(interval.location())) { - // activating an interval that has a stack slot assigned . split it at first use - // position - // used for method parameters - Debug.log("interval has spill slot assigned (method parameter) . split it before first use"); - splitStackInterval(interval); - result = false; - - } else { - if (interval.location() == null) { - // interval has not assigned register . normal allocation - // (this is the normal case for most intervals) - Debug.log("normal allocation of register"); - - // assign same spill slot to non-intersecting intervals - combineSpilledIntervals(interval); - - initVarsForAlloc(interval); - if (noAllocationPossible(interval) || !allocFreeRegister(interval)) { - // no empty register available. - // split and spill another interval so that this interval gets a register - allocLockedRegister(interval); - } - - // spilled intervals need not be move to active-list - if (!isRegister(interval.location())) { - result = false; - } - } - } - - // load spilled values that become active from stack slot to register - if (interval.insertMoveWhenActivated()) { - assert interval.isSplitChild(); - assert interval.currentSplitChild() != null; - assert !interval.currentSplitChild().operand.equals(operand) : "cannot insert move between same interval"; - Debug.log("Inserting move from interval %d to %d because insertMoveWhenActivated is set", interval.currentSplitChild().operandNumber, interval.operandNumber); - - insertMove(interval.from(), interval.currentSplitChild(), interval); - } - interval.makeCurrentSplitChild(); - - } - - return result; // true = interval is moved to active list - } - - public void finishAllocation() { - // must be called when all intervals are allocated - moveResolver.resolveAndAppendMoves(); - } -} diff -r 3059a52d9614 -r 98967b613c88 graal/com.oracle.graal.compiler/src/com/oracle/graal/compiler/alloc/LocationMarker.java --- a/graal/com.oracle.graal.compiler/src/com/oracle/graal/compiler/alloc/LocationMarker.java Sat Feb 07 02:34:43 2015 +0100 +++ /dev/null Thu Jan 01 00:00:00 1970 +0000 @@ -1,221 +0,0 @@ -/* - * Copyright (c) 2014, 2014, Oracle and/or its affiliates. All rights reserved. - * DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER. - * - * This code is free software; you can redistribute it and/or modify it - * under the terms of the GNU General Public License version 2 only, as - * published by the Free Software Foundation. - * - * This code is distributed in the hope that it will be useful, but WITHOUT - * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or - * FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License - * version 2 for more details (a copy is included in the LICENSE file that - * accompanied this code). - * - * You should have received a copy of the GNU General Public License version - * 2 along with this work; if not, write to the Free Software Foundation, - * Inc., 51 Franklin St, Fifth Floor, Boston, MA 02110-1301 USA. - * - * Please contact Oracle, 500 Oracle Parkway, Redwood Shores, CA 94065 USA - * or visit www.oracle.com if you need additional information or have any - * questions. - */ -package com.oracle.graal.compiler.alloc; - -import static com.oracle.graal.api.code.ValueUtil.*; - -import java.util.*; - -import com.oracle.graal.api.code.*; -import com.oracle.graal.api.meta.*; -import com.oracle.graal.compiler.common.cfg.*; -import com.oracle.graal.debug.*; -import com.oracle.graal.lir.*; -import com.oracle.graal.lir.LIRInstruction.OperandFlag; -import com.oracle.graal.lir.LIRInstruction.OperandMode; -import com.oracle.graal.lir.framemap.*; -import com.oracle.graal.options.*; - -public final class LocationMarker { - - public static class Options { - // @formatter:off - @Option(help = "Use decoupled pass for location marking (instead of using LSRA marking)", type = OptionType.Debug) - public static final OptionValue UseLocationMarker = new OptionValue<>(true); - // @formatter:on - } - - /** - * Mark all live references for a frame state. The frame state use this information to build the - * OOP maps. - */ - public static void markLocations(LIR lir, FrameMap frameMap) { - new LocationMarker(lir, frameMap).build(); - } - - private final LIR lir; - private final FrameMap frameMap; - private final RegisterAttributes[] registerAttributes; - private final BlockMap liveInMap; - private final BlockMap liveOutMap; - - private LocationMarker(LIR lir, FrameMap frameMap) { - this.lir = lir; - this.frameMap = frameMap; - this.registerAttributes = frameMap.getRegisterConfig().getAttributesMap(); - liveInMap = new BlockMap<>(lir.getControlFlowGraph()); - liveOutMap = new BlockMap<>(lir.getControlFlowGraph()); - } - - private void build() { - Deque> worklist = new ArrayDeque<>(); - for (int i = lir.getControlFlowGraph().getBlocks().size() - 1; i >= 0; i--) { - worklist.add(lir.getControlFlowGraph().getBlocks().get(i)); - } - for (AbstractBlock block : lir.getControlFlowGraph().getBlocks()) { - liveInMap.put(block, frameMap.initReferenceMap(true)); - } - while (!worklist.isEmpty()) { - AbstractBlock block = worklist.poll(); - processBlock(block, worklist); - } - // finish states - for (AbstractBlock block : lir.getControlFlowGraph().getBlocks()) { - List instructions = lir.getLIRforBlock(block); - for (int i = instructions.size() - 1; i >= 0; i--) { - LIRInstruction inst = instructions.get(i); - inst.forEachState((op, info) -> info.finish(op, frameMap)); - } - - } - } - - /** - * Merge outSet with in-set of successors. - */ - private boolean updateOutBlock(AbstractBlock block) { - ReferenceMap union = frameMap.initReferenceMap(true); - block.getSuccessors().forEach(succ -> union.updateUnion(liveInMap.get(succ))); - ReferenceMap outSet = liveOutMap.get(block); - // check if changed - if (outSet == null || !union.equals(outSet)) { - liveOutMap.put(block, union); - return true; - } - return false; - } - - private void processBlock(AbstractBlock block, Deque> worklist) { - if (updateOutBlock(block)) { - try (Indent indent = Debug.logAndIndent("handle block %s", block)) { - BlockClosure closure = new BlockClosure(liveOutMap.get(block).clone()); - List instructions = lir.getLIRforBlock(block); - for (int i = instructions.size() - 1; i >= 0; i--) { - LIRInstruction inst = instructions.get(i); - closure.processInstructionBottomUp(inst); - } - liveInMap.put(block, closure.getCurrentSet()); - worklist.addAll(block.getPredecessors()); - } - } - } - - private static final EnumSet REGISTER_FLAG_SET = EnumSet.of(OperandFlag.REG); - private static final LIRKind REFERENCE_KIND = LIRKind.reference(Kind.Object); - - private void forEachDestroyedCallerSavedRegister(LIRInstruction op, ValueConsumer consumer) { - if (op.destroysCallerSavedRegisters()) { - for (Register reg : frameMap.getRegisterConfig().getCallerSaveRegisters()) { - consumer.visitValue(reg.asValue(REFERENCE_KIND), OperandMode.TEMP, REGISTER_FLAG_SET); - } - } - } - - private final class BlockClosure { - private final ReferenceMap currentSet; - - private BlockClosure(ReferenceMap set) { - currentSet = set; - } - - private ReferenceMap getCurrentSet() { - return currentSet; - } - - /** - * Process all values of an instruction bottom-up, i.e. definitions before usages. Values - * that start or end at the current operation are not included. - */ - private void processInstructionBottomUp(LIRInstruction op) { - try (Indent indent = Debug.logAndIndent("handle op %d, %s", op.id(), op)) { - // kills - op.visitEachTemp(this::defConsumer); - op.visitEachOutput(this::defConsumer); - forEachDestroyedCallerSavedRegister(op, this::defConsumer); - - // gen - values that are considered alive for this state - op.visitEachAlive(this::useConsumer); - op.visitEachState(this::useConsumer); - // mark locations - op.forEachState((inst, info) -> markLocation(inst, info, this.getCurrentSet())); - // gen - op.visitEachInput(this::useConsumer); - } - } - - /** - * @see InstructionValueConsumer - * @param operand - * @param mode - * @param flags - */ - private void useConsumer(Value operand, OperandMode mode, EnumSet flags) { - LIRKind kind = operand.getLIRKind(); - if (shouldProcessValue(operand) && !kind.isValue() && !kind.isDerivedReference()) { - // no need to insert values and derived reference - Debug.log("set operand: %s", operand); - frameMap.setReference(operand, currentSet); - } - } - - /** - * @see InstructionValueConsumer - * @param operand - * @param mode - * @param flags - */ - private void defConsumer(Value operand, OperandMode mode, EnumSet flags) { - if (shouldProcessValue(operand)) { - Debug.log("clear operand: %s", operand); - frameMap.clearReference(operand, currentSet); - } else { - assert isIllegal(operand) || operand.getPlatformKind() != Kind.Illegal || mode == OperandMode.TEMP : String.format("Illegal PlatformKind is only allowed for TEMP mode: %s, %s", - operand, mode); - } - } - - protected boolean shouldProcessValue(Value operand) { - return (isRegister(operand) && attributes(asRegister(operand)).isAllocatable() || isStackSlot(operand)) && operand.getPlatformKind() != Kind.Illegal; - } - } - - /** - * This method does the actual marking. - */ - private void markLocation(LIRInstruction op, LIRFrameState info, ReferenceMap refMap) { - if (!info.hasDebugInfo()) { - info.initDebugInfo(frameMap, !op.destroysCallerSavedRegisters() || !frameMap.getRegisterConfig().areAllAllocatableRegistersCallerSaved()); - } - info.updateUnion(refMap); - } - - /** - * Gets an object describing the attributes of a given register according to this register - * configuration. - * - * @see LinearScan#attributes - */ - private RegisterAttributes attributes(Register reg) { - return registerAttributes[reg.number]; - } -} diff -r 3059a52d9614 -r 98967b613c88 graal/com.oracle.graal.compiler/src/com/oracle/graal/compiler/alloc/MoveResolver.java --- a/graal/com.oracle.graal.compiler/src/com/oracle/graal/compiler/alloc/MoveResolver.java Sat Feb 07 02:34:43 2015 +0100 +++ /dev/null Thu Jan 01 00:00:00 1970 +0000 @@ -1,364 +0,0 @@ -/* - * Copyright (c) 2009, 2014, Oracle and/or its affiliates. All rights reserved. - * DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER. - * - * This code is free software; you can redistribute it and/or modify it - * under the terms of the GNU General Public License version 2 only, as - * published by the Free Software Foundation. - * - * This code is distributed in the hope that it will be useful, but WITHOUT - * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or - * FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License - * version 2 for more details (a copy is included in the LICENSE file that - * accompanied this code). - * - * You should have received a copy of the GNU General Public License version - * 2 along with this work; if not, write to the Free Software Foundation, - * Inc., 51 Franklin St, Fifth Floor, Boston, MA 02110-1301 USA. - * - * Please contact Oracle, 500 Oracle Parkway, Redwood Shores, CA 94065 USA - * or visit www.oracle.com if you need additional information or have any - * questions. - */ -package com.oracle.graal.compiler.alloc; - -import static com.oracle.graal.api.code.ValueUtil.*; - -import java.util.*; - -import com.oracle.graal.api.code.*; -import com.oracle.graal.api.meta.*; -import com.oracle.graal.debug.*; -import com.oracle.graal.lir.*; - -/** - */ -final class MoveResolver { - - private final LinearScan allocator; - - private int insertIdx; - private LIRInsertionBuffer insertionBuffer; // buffer where moves are inserted - - private final List mappingFrom; - private final List mappingFromOpr; - private final List mappingTo; - private boolean multipleReadsAllowed; - private final int[] registerBlocked; - - private int registerBlocked(int reg) { - return registerBlocked[reg]; - } - - private void setRegisterBlocked(int reg, int direction) { - assert direction == 1 || direction == -1 : "out of bounds"; - registerBlocked[reg] += direction; - } - - void setMultipleReadsAllowed() { - multipleReadsAllowed = true; - } - - boolean hasMappings() { - return mappingFrom.size() > 0; - } - - MoveResolver(LinearScan allocator) { - - this.allocator = allocator; - this.multipleReadsAllowed = false; - this.mappingFrom = new ArrayList<>(8); - this.mappingFromOpr = new ArrayList<>(8); - this.mappingTo = new ArrayList<>(8); - this.insertIdx = -1; - this.insertionBuffer = new LIRInsertionBuffer(); - this.registerBlocked = new int[allocator.registers.length]; - assert checkEmpty(); - } - - boolean checkEmpty() { - assert mappingFrom.size() == 0 && mappingFromOpr.size() == 0 && mappingTo.size() == 0 : "list must be empty before and after processing"; - for (int i = 0; i < allocator.registers.length; i++) { - assert registerBlocked(i) == 0 : "register map must be empty before and after processing"; - } - assert !multipleReadsAllowed : "must have default value"; - return true; - } - - private boolean verifyBeforeResolve() { - assert mappingFrom.size() == mappingFromOpr.size() : "length must be equal"; - assert mappingFrom.size() == mappingTo.size() : "length must be equal"; - assert insertIdx != -1 : "insert position not set"; - - int i; - int j; - if (!multipleReadsAllowed) { - for (i = 0; i < mappingFrom.size(); i++) { - for (j = i + 1; j < mappingFrom.size(); j++) { - assert mappingFrom.get(i) == null || mappingFrom.get(i) != mappingFrom.get(j) : "cannot read from same interval twice"; - } - } - } - - for (i = 0; i < mappingTo.size(); i++) { - for (j = i + 1; j < mappingTo.size(); j++) { - assert mappingTo.get(i) != mappingTo.get(j) : "cannot write to same interval twice"; - } - } - - HashSet usedRegs = new HashSet<>(); - if (!multipleReadsAllowed) { - for (i = 0; i < mappingFrom.size(); i++) { - Interval interval = mappingFrom.get(i); - if (interval != null && !isIllegal(interval.location())) { - boolean unique = usedRegs.add(interval.location()); - assert unique : "cannot read from same register twice"; - } - } - } - - usedRegs.clear(); - for (i = 0; i < mappingTo.size(); i++) { - Interval interval = mappingTo.get(i); - if (isIllegal(interval.location())) { - // After insertion the location may become illegal, so don't check it since multiple - // intervals might be illegal. - continue; - } - boolean unique = usedRegs.add(interval.location()); - assert unique : "cannot write to same register twice"; - } - - usedRegs.clear(); - for (i = 0; i < mappingFrom.size(); i++) { - Interval interval = mappingFrom.get(i); - if (interval != null && !isRegister(interval.location())) { - usedRegs.add(interval.location()); - } - } - for (i = 0; i < mappingTo.size(); i++) { - Interval interval = mappingTo.get(i); - assert !usedRegs.contains(interval.location()) || interval.location().equals(mappingFrom.get(i).location()) : "stack slots used in mappingFrom must be disjoint to mappingTo"; - } - - return true; - } - - // mark assignedReg and assignedRegHi of the interval as blocked - private void blockRegisters(Interval interval) { - Value location = interval.location(); - if (isRegister(location)) { - int reg = asRegister(location).number; - assert multipleReadsAllowed || registerBlocked(reg) == 0 : "register already marked as used"; - setRegisterBlocked(reg, 1); - } - } - - // mark assignedReg and assignedRegHi of the interval as unblocked - private void unblockRegisters(Interval interval) { - Value location = interval.location(); - if (isRegister(location)) { - int reg = asRegister(location).number; - assert registerBlocked(reg) > 0 : "register already marked as unused"; - setRegisterBlocked(reg, -1); - } - } - - /** - * Checks if the {@linkplain Interval#location() location} of {@code to} is not blocked or is - * only blocked by {@code from}. - */ - private boolean safeToProcessMove(Interval from, Interval to) { - Value fromReg = from != null ? from.location() : null; - - Value reg = to.location(); - if (isRegister(reg)) { - if (registerBlocked(asRegister(reg).number) > 1 || (registerBlocked(asRegister(reg).number) == 1 && !reg.equals(fromReg))) { - return false; - } - } - - return true; - } - - private void createInsertionBuffer(List list) { - assert !insertionBuffer.initialized() : "overwriting existing buffer"; - insertionBuffer.init(list); - } - - private void appendInsertionBuffer() { - if (insertionBuffer.initialized()) { - insertionBuffer.finish(); - } - assert !insertionBuffer.initialized() : "must be uninitialized now"; - - insertIdx = -1; - } - - private void insertMove(Interval fromInterval, Interval toInterval) { - assert !fromInterval.operand.equals(toInterval.operand) : "from and to interval equal: " + fromInterval; - assert fromInterval.kind().equals(toInterval.kind()) : "move between different types"; - assert insertIdx != -1 : "must setup insert position first"; - - AllocatableValue fromOpr = fromInterval.operand; - AllocatableValue toOpr = toInterval.operand; - - insertionBuffer.append(insertIdx, allocator.ir.getSpillMoveFactory().createMove(toOpr, fromOpr)); - - Debug.log("insert move from %s to %s at %d", fromInterval, toInterval, insertIdx); - } - - private void insertMove(Value fromOpr, Interval toInterval) { - assert fromOpr.getLIRKind().equals(toInterval.kind()) : "move between different types"; - assert insertIdx != -1 : "must setup insert position first"; - - AllocatableValue toOpr = toInterval.operand; - insertionBuffer.append(insertIdx, allocator.ir.getSpillMoveFactory().createMove(toOpr, fromOpr)); - - Debug.log("insert move from value %s to %s at %d", fromOpr, toInterval, insertIdx); - } - - private void resolveMappings() { - assert verifyBeforeResolve(); - - // Block all registers that are used as input operands of a move. - // When a register is blocked, no move to this register is emitted. - // This is necessary for detecting cycles in moves. - int i; - for (i = mappingFrom.size() - 1; i >= 0; i--) { - Interval fromInterval = mappingFrom.get(i); - if (fromInterval != null) { - blockRegisters(fromInterval); - } - } - - int spillCandidate = -1; - while (mappingFrom.size() > 0) { - boolean processedInterval = false; - - for (i = mappingFrom.size() - 1; i >= 0; i--) { - Interval fromInterval = mappingFrom.get(i); - Interval toInterval = mappingTo.get(i); - - if (safeToProcessMove(fromInterval, toInterval)) { - // this interval can be processed because target is free - if (fromInterval != null) { - insertMove(fromInterval, toInterval); - unblockRegisters(fromInterval); - } else { - insertMove(mappingFromOpr.get(i), toInterval); - } - mappingFrom.remove(i); - mappingFromOpr.remove(i); - mappingTo.remove(i); - - processedInterval = true; - } else if (fromInterval != null && isRegister(fromInterval.location())) { - // this interval cannot be processed now because target is not free - // it starts in a register, so it is a possible candidate for spilling - spillCandidate = i; - } - } - - if (!processedInterval) { - // no move could be processed because there is a cycle in the move list - // (e.g. r1 . r2, r2 . r1), so one interval must be spilled to memory - assert spillCandidate != -1 : "no interval in register for spilling found"; - - // create a new spill interval and assign a stack slot to it - Interval fromInterval = mappingFrom.get(spillCandidate); - Interval spillInterval = allocator.createDerivedInterval(fromInterval); - spillInterval.setKind(fromInterval.kind()); - - // add a dummy range because real position is difficult to calculate - // Note: this range is a special case when the integrity of the allocation is - // checked - spillInterval.addRange(1, 2); - - // do not allocate a new spill slot for temporary interval, but - // use spill slot assigned to fromInterval. Otherwise moves from - // one stack slot to another can happen (not allowed by LIRAssembler - StackSlotValue spillSlot = fromInterval.spillSlot(); - if (spillSlot == null) { - spillSlot = allocator.frameMapBuilder.allocateSpillSlot(spillInterval.kind()); - fromInterval.setSpillSlot(spillSlot); - } - spillInterval.assignLocation(spillSlot); - - Debug.log("created new Interval for spilling: %s", spillInterval); - - // insert a move from register to stack and update the mapping - insertMove(fromInterval, spillInterval); - mappingFrom.set(spillCandidate, spillInterval); - unblockRegisters(fromInterval); - } - } - - // reset to default value - multipleReadsAllowed = false; - - // check that all intervals have been processed - assert checkEmpty(); - } - - void setInsertPosition(List insertList, int insertIdx) { - assert this.insertIdx == -1 : "use moveInsertPosition instead of setInsertPosition when data already set"; - - createInsertionBuffer(insertList); - this.insertIdx = insertIdx; - } - - void moveInsertPosition(List newInsertList, int newInsertIdx) { - if (insertionBuffer.lirList() != null && (insertionBuffer.lirList() != newInsertList || this.insertIdx != newInsertIdx)) { - // insert position changed . resolve current mappings - resolveMappings(); - } - - if (insertionBuffer.lirList() != newInsertList) { - // block changed . append insertionBuffer because it is - // bound to a specific block and create a new insertionBuffer - appendInsertionBuffer(); - createInsertionBuffer(newInsertList); - } - - this.insertIdx = newInsertIdx; - } - - void addMapping(Interval fromInterval, Interval toInterval) { - - if (isIllegal(toInterval.location()) && toInterval.canMaterialize()) { - Debug.log("no store to rematerializable interval %s needed", toInterval); - return; - } - if (isIllegal(fromInterval.location()) && fromInterval.canMaterialize()) { - // Instead of a reload, re-materialize the value - Value rematValue = fromInterval.getMaterializedValue(); - addMapping(rematValue, toInterval); - return; - } - Debug.log("add move mapping from %s to %s", fromInterval, toInterval); - - assert !fromInterval.operand.equals(toInterval.operand) : "from and to interval equal: " + fromInterval; - assert fromInterval.kind().equals(toInterval.kind()); - mappingFrom.add(fromInterval); - mappingFromOpr.add(Value.ILLEGAL); - mappingTo.add(toInterval); - } - - void addMapping(Value fromOpr, Interval toInterval) { - Debug.log("add move mapping from %s to %s", fromOpr, toInterval); - - assert isConstant(fromOpr) : "only for constants"; - - mappingFrom.add(null); - mappingFromOpr.add(fromOpr); - mappingTo.add(toInterval); - } - - void resolveAndAppendMoves() { - if (hasMappings()) { - resolveMappings(); - } - appendInsertionBuffer(); - } -} diff -r 3059a52d9614 -r 98967b613c88 graal/com.oracle.graal.compiler/src/com/oracle/graal/compiler/alloc/OptimizingLinearScanWalker.java --- a/graal/com.oracle.graal.compiler/src/com/oracle/graal/compiler/alloc/OptimizingLinearScanWalker.java Sat Feb 07 02:34:43 2015 +0100 +++ /dev/null Thu Jan 01 00:00:00 1970 +0000 @@ -1,251 +0,0 @@ -/* - * Copyright (c) 2014, 2014, Oracle and/or its affiliates. All rights reserved. - * DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER. - * - * This code is free software; you can redistribute it and/or modify it - * under the terms of the GNU General Public License version 2 only, as - * published by the Free Software Foundation. - * - * This code is distributed in the hope that it will be useful, but WITHOUT - * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or - * FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License - * version 2 for more details (a copy is included in the LICENSE file that - * accompanied this code). - * - * You should have received a copy of the GNU General Public License version - * 2 along with this work; if not, write to the Free Software Foundation, - * Inc., 51 Franklin St, Fifth Floor, Boston, MA 02110-1301 USA. - * - * Please contact Oracle, 500 Oracle Parkway, Redwood Shores, CA 94065 USA - * or visit www.oracle.com if you need additional information or have any - * questions. - */ -package com.oracle.graal.compiler.alloc; - -import static com.oracle.graal.api.code.ValueUtil.*; - -import com.oracle.graal.api.code.*; -import com.oracle.graal.api.meta.*; -import com.oracle.graal.compiler.alloc.Interval.RegisterBinding; -import com.oracle.graal.compiler.alloc.Interval.RegisterBindingLists; -import com.oracle.graal.compiler.alloc.Interval.State; -import com.oracle.graal.compiler.common.cfg.*; -import com.oracle.graal.debug.*; -import com.oracle.graal.debug.Debug.Scope; -import com.oracle.graal.options.*; - -public class OptimizingLinearScanWalker extends LinearScanWalker { - - public static class Options { - // @formatter:off - @Option(help = "Enable LSRA optimization", type = OptionType.Debug) - public static final OptionValue LSRAOptimization = new OptionValue<>(true); - @Option(help = "LSRA optimization: Only split but do not reassign", type = OptionType.Debug) - public static final OptionValue LSRAOptSplitOnly = new OptionValue<>(false); - // @formatter:on - } - - OptimizingLinearScanWalker(LinearScan allocator, Interval unhandledFixedFirst, Interval unhandledAnyFirst) { - super(allocator, unhandledFixedFirst, unhandledAnyFirst); - } - - @Override - protected void handleSpillSlot(Interval interval) { - assert interval.location() != null : "interval not assigned " + interval; - if (interval.canMaterialize()) { - assert !isStackSlotValue(interval.location()) : "interval can materialize but assigned to a stack slot " + interval; - return; - } - assert isStackSlotValue(interval.location()) : "interval not assigned to a stack slot " + interval; - try (Scope s1 = Debug.scope("LSRAOptimization")) { - Debug.log("adding stack to unhandled list %s", interval); - unhandledLists.addToListSortedByStartAndUsePositions(RegisterBinding.Stack, interval); - } - } - - @SuppressWarnings("unused") - private static void printRegisterBindingList(RegisterBindingLists list, RegisterBinding binding) { - for (Interval interval = list.get(binding); interval != Interval.EndMarker; interval = interval.next) { - Debug.log("%s", interval); - } - } - - @Override - void walk() { - try (Scope s = Debug.scope("OptimizingLinearScanWalker")) { - for (AbstractBlock block : allocator.sortedBlocks) { - optimizeBlock(block); - } - } - super.walk(); - } - - private void optimizeBlock(AbstractBlock block) { - if (block.getPredecessorCount() == 1) { - int nextBlock = allocator.getFirstLirInstructionId(block); - try (Scope s1 = Debug.scope("LSRAOptimization")) { - Debug.log("next block: %s (%d)", block, nextBlock); - } - try (Indent indent0 = Debug.indent()) { - walkTo(nextBlock); - - try (Scope s1 = Debug.scope("LSRAOptimization")) { - boolean changed = true; - // we need to do this because the active lists might change - loop: while (changed) { - changed = false; - try (Indent indent1 = Debug.logAndIndent("Active intervals: (block %s [%d])", block, nextBlock)) { - for (Interval active = activeLists.get(RegisterBinding.Any); active != Interval.EndMarker; active = active.next) { - Debug.log("active (any): %s", active); - if (optimize(nextBlock, block, active, RegisterBinding.Any)) { - changed = true; - break loop; - } - } - for (Interval active = activeLists.get(RegisterBinding.Stack); active != Interval.EndMarker; active = active.next) { - Debug.log("active (stack): %s", active); - if (optimize(nextBlock, block, active, RegisterBinding.Stack)) { - changed = true; - break loop; - } - } - } - } - } - } - } - } - - private boolean optimize(int currentPos, AbstractBlock currentBlock, Interval currentInterval, RegisterBinding binding) { - // BEGIN initialize and sanity checks - assert currentBlock != null : "block must not be null"; - assert currentInterval != null : "interval must not be null"; - - assert currentBlock.getPredecessorCount() == 1 : "more than one predecessors -> optimization not possible"; - - if (!currentInterval.isSplitChild()) { - // interval is not a split child -> no need for optimization - return false; - } - - if (currentInterval.from() == currentPos) { - // the interval starts at the current position so no need for splitting - return false; - } - - // get current location - AllocatableValue currentLocation = currentInterval.location(); - assert currentLocation != null : "active intervals must have a location assigned!"; - - // get predecessor stuff - AbstractBlock predecessorBlock = currentBlock.getPredecessors().get(0); - int predEndId = allocator.getLastLirInstructionId(predecessorBlock); - Interval predecessorInterval = currentInterval.getIntervalCoveringOpId(predEndId); - assert predecessorInterval != null : "variable not live at the end of the only predecessor! " + predecessorBlock + " -> " + currentBlock + " interval: " + currentInterval; - AllocatableValue predecessorLocation = predecessorInterval.location(); - assert predecessorLocation != null : "handled intervals must have a location assigned!"; - - // END initialize and sanity checks - - if (currentLocation.equals(predecessorLocation)) { - // locations are already equal -> nothing to optimize - return false; - } - - if (!isStackSlotValue(predecessorLocation) && !isRegister(predecessorLocation)) { - assert predecessorInterval.canMaterialize(); - // value is materialized -> no need for optimization - return false; - } - - assert isStackSlotValue(currentLocation) || isRegister(currentLocation) : "current location not a register or stack slot " + currentLocation; - - try (Indent indent = Debug.logAndIndent("location differs: %s vs. %s", predecessorLocation, currentLocation)) { - // split current interval at current position - Debug.log("splitting at position %d", currentPos); - - assert allocator.isBlockBegin(currentPos) && ((currentPos & 1) == 0) : "split pos must be even when on block boundary"; - - Interval splitPart = currentInterval.split(currentPos, allocator); - activeLists.remove(binding, currentInterval); - - assert splitPart.from() >= currentPosition : "cannot append new interval before current walk position"; - - // the currentSplitChild is needed later when moves are inserted for reloading - assert splitPart.currentSplitChild() == currentInterval : "overwriting wrong currentSplitChild"; - splitPart.makeCurrentSplitChild(); - - if (Debug.isLogEnabled()) { - Debug.log("left interval : %s", currentInterval.logString(allocator)); - Debug.log("right interval : %s", splitPart.logString(allocator)); - } - - if (Options.LSRAOptSplitOnly.getValue()) { - // just add the split interval to the unhandled list - unhandledLists.addToListSortedByStartAndUsePositions(RegisterBinding.Any, splitPart); - } else { - if (isRegister(predecessorLocation)) { - splitRegisterInterval(splitPart, asRegister(predecessorLocation)); - } else { - assert isStackSlotValue(predecessorLocation); - Debug.log("assigning interval %s to %s", splitPart, predecessorLocation); - splitPart.assignLocation(predecessorLocation); - // activate interval - activeLists.addToListSortedByCurrentFromPositions(RegisterBinding.Stack, splitPart); - splitPart.state = State.Active; - - splitStackInterval(splitPart); - } - } - } - return true; - } - - private void splitRegisterInterval(Interval interval, Register reg) { - // collect current usage of registers - initVarsForAlloc(interval); - initUseLists(false); - spillExcludeActiveFixed(); - // spillBlockUnhandledFixed(cur); - assert unhandledLists.get(RegisterBinding.Fixed) == Interval.EndMarker : "must not have unhandled fixed intervals because all fixed intervals have a use at position 0"; - spillBlockInactiveFixed(interval); - spillCollectActiveAny(); - spillCollectInactiveAny(interval); - - if (Debug.isLogEnabled()) { - try (Indent indent2 = Debug.logAndIndent("state of registers:")) { - for (Register register : availableRegs) { - int i = register.number; - try (Indent indent3 = Debug.logAndIndent("reg %d: usePos: %d, blockPos: %d, intervals: ", i, usePos[i], blockPos[i])) { - for (int j = 0; j < spillIntervals[i].size(); j++) { - Debug.log("%d ", spillIntervals[i].get(j).operandNumber); - } - } - } - } - } - - // the register must be free at least until this position - boolean needSplit = blockPos[reg.number] <= interval.to(); - - int splitPos = blockPos[reg.number]; - - assert splitPos > 0 : "invalid splitPos"; - assert needSplit || splitPos > interval.from() : "splitting interval at from"; - - Debug.log("assigning interval %s to %s", interval, reg); - interval.assignLocation(reg.asValue(interval.kind())); - if (needSplit) { - // register not available for full interval : so split it - splitWhenPartialRegisterAvailable(interval, splitPos); - } - - // perform splitting and spilling for all affected intervals - splitAndSpillIntersectingIntervals(reg); - - // activate interval - activeLists.addToListSortedByCurrentFromPositions(RegisterBinding.Any, interval); - interval.state = State.Active; - - } -} diff -r 3059a52d9614 -r 98967b613c88 graal/com.oracle.graal.compiler/src/com/oracle/graal/compiler/alloc/Range.java --- a/graal/com.oracle.graal.compiler/src/com/oracle/graal/compiler/alloc/Range.java Sat Feb 07 02:34:43 2015 +0100 +++ /dev/null Thu Jan 01 00:00:00 1970 +0000 @@ -1,116 +0,0 @@ -/* - * Copyright (c) 2009, 2011, Oracle and/or its affiliates. All rights reserved. - * DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER. - * - * This code is free software; you can redistribute it and/or modify it - * under the terms of the GNU General Public License version 2 only, as - * published by the Free Software Foundation. - * - * This code is distributed in the hope that it will be useful, but WITHOUT - * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or - * FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License - * version 2 for more details (a copy is included in the LICENSE file that - * accompanied this code). - * - * You should have received a copy of the GNU General Public License version - * 2 along with this work; if not, write to the Free Software Foundation, - * Inc., 51 Franklin St, Fifth Floor, Boston, MA 02110-1301 USA. - * - * Please contact Oracle, 500 Oracle Parkway, Redwood Shores, CA 94065 USA - * or visit www.oracle.com if you need additional information or have any - * questions. - */ -package com.oracle.graal.compiler.alloc; - -/** - * Represents a range of integers from a start (inclusive) to an end (exclusive. - */ -public final class Range { - - public static final Range EndMarker = new Range(Integer.MAX_VALUE, Integer.MAX_VALUE, null); - - /** - * The start of the range, inclusive. - */ - public int from; - - /** - * The end of the range, exclusive. - */ - public int to; - - /** - * A link to allow the range to be put into a singly linked list. - */ - public Range next; - - boolean intersects(Range r) { - return intersectsAt(r) != -1; - } - - /** - * Creates a new range. - * - * @param from the start of the range, inclusive - * @param to the end of the range, exclusive - * @param next link to the next range in a linked list - */ - Range(int from, int to, Range next) { - this.from = from; - this.to = to; - this.next = next; - } - - int intersectsAt(Range other) { - Range r1 = this; - Range r2 = other; - - assert r2 != null : "null ranges not allowed"; - assert r1 != EndMarker && r2 != EndMarker : "empty ranges not allowed"; - - do { - if (r1.from < r2.from) { - if (r1.to <= r2.from) { - r1 = r1.next; - if (r1 == EndMarker) { - return -1; - } - } else { - return r2.from; - } - } else { - if (r2.from < r1.from) { - if (r2.to <= r1.from) { - r2 = r2.next; - if (r2 == EndMarker) { - return -1; - } - } else { - return r1.from; - } - } else { // r1.from() == r2.from() - if (r1.from == r1.to) { - r1 = r1.next; - if (r1 == EndMarker) { - return -1; - } - } else { - if (r2.from == r2.to) { - r2 = r2.next; - if (r2 == EndMarker) { - return -1; - } - } else { - return r1.from; - } - } - } - } - } while (true); - } - - @Override - public String toString() { - return "[" + from + ", " + to + "]"; - } -} diff -r 3059a52d9614 -r 98967b613c88 graal/com.oracle.graal.compiler/src/com/oracle/graal/compiler/alloc/RegisterVerifier.java --- a/graal/com.oracle.graal.compiler/src/com/oracle/graal/compiler/alloc/RegisterVerifier.java Sat Feb 07 02:34:43 2015 +0100 +++ /dev/null Thu Jan 01 00:00:00 1970 +0000 @@ -1,240 +0,0 @@ -/* - * Copyright (c) 2009, 2012, Oracle and/or its affiliates. All rights reserved. - * DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER. - * - * This code is free software; you can redistribute it and/or modify it - * under the terms of the GNU General Public License version 2 only, as - * published by the Free Software Foundation. - * - * This code is distributed in the hope that it will be useful, but WITHOUT - * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or - * FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License - * version 2 for more details (a copy is included in the LICENSE file that - * accompanied this code). - * - * You should have received a copy of the GNU General Public License version - * 2 along with this work; if not, write to the Free Software Foundation, - * Inc., 51 Franklin St, Fifth Floor, Boston, MA 02110-1301 USA. - * - * Please contact Oracle, 500 Oracle Parkway, Redwood Shores, CA 94065 USA - * or visit www.oracle.com if you need additional information or have any - * questions. - */ -package com.oracle.graal.compiler.alloc; - -import static com.oracle.graal.api.code.ValueUtil.*; - -import java.util.*; - -import com.oracle.graal.api.code.*; -import com.oracle.graal.api.meta.*; -import com.oracle.graal.compiler.common.*; -import com.oracle.graal.compiler.common.cfg.*; -import com.oracle.graal.debug.*; -import com.oracle.graal.lir.*; -import com.oracle.graal.lir.LIRInstruction.OperandFlag; -import com.oracle.graal.lir.LIRInstruction.OperandMode; -import com.oracle.graal.phases.util.*; - -/** - */ -final class RegisterVerifier { - - LinearScan allocator; - List> workList; // all blocks that must be processed - ArrayMap savedStates; // saved information of previous check - - // simplified access to methods of LinearScan - Interval intervalAt(Value operand) { - return allocator.intervalFor(operand); - } - - // currently, only registers are processed - int stateSize() { - return allocator.maxRegisterNumber() + 1; - } - - // accessors - Interval[] stateForBlock(AbstractBlock block) { - return savedStates.get(block.getId()); - } - - void setStateForBlock(AbstractBlock block, Interval[] savedState) { - savedStates.put(block.getId(), savedState); - } - - void addToWorkList(AbstractBlock block) { - if (!workList.contains(block)) { - workList.add(block); - } - } - - RegisterVerifier(LinearScan allocator) { - this.allocator = allocator; - workList = new ArrayList<>(16); - this.savedStates = new ArrayMap<>(); - - } - - void verify(AbstractBlock start) { - // setup input registers (method arguments) for first block - Interval[] inputState = new Interval[stateSize()]; - setStateForBlock(start, inputState); - addToWorkList(start); - - // main loop for verification - do { - AbstractBlock block = workList.get(0); - workList.remove(0); - - processBlock(block); - } while (!workList.isEmpty()); - } - - private void processBlock(AbstractBlock block) { - try (Indent indent = Debug.logAndIndent("processBlock B%d", block.getId())) { - // must copy state because it is modified - Interval[] inputState = copy(stateForBlock(block)); - - try (Indent indent2 = Debug.logAndIndent("Input-State of intervals:")) { - for (int i = 0; i < stateSize(); i++) { - if (inputState[i] != null) { - Debug.log(" %4d", inputState[i].operandNumber); - } else { - Debug.log(" __"); - } - } - } - - // process all operations of the block - processOperations(allocator.ir.getLIRforBlock(block), inputState); - - // iterate all successors - for (AbstractBlock succ : block.getSuccessors()) { - processSuccessor(succ, inputState); - } - } - } - - private void processSuccessor(AbstractBlock block, Interval[] inputState) { - Interval[] savedState = stateForBlock(block); - - if (savedState != null) { - // this block was already processed before. - // check if new inputState is consistent with savedState - - boolean savedStateCorrect = true; - for (int i = 0; i < stateSize(); i++) { - if (inputState[i] != savedState[i]) { - // current inputState and previous savedState assume a different - // interval in this register . assume that this register is invalid - if (savedState[i] != null) { - // invalidate old calculation only if it assumed that - // register was valid. when the register was already invalid, - // then the old calculation was correct. - savedStateCorrect = false; - savedState[i] = null; - - Debug.log("processSuccessor B%d: invalidating slot %d", block.getId(), i); - } - } - } - - if (savedStateCorrect) { - // already processed block with correct inputState - Debug.log("processSuccessor B%d: previous visit already correct", block.getId()); - } else { - // must re-visit this block - Debug.log("processSuccessor B%d: must re-visit because input state changed", block.getId()); - addToWorkList(block); - } - - } else { - // block was not processed before, so set initial inputState - Debug.log("processSuccessor B%d: initial visit", block.getId()); - - setStateForBlock(block, copy(inputState)); - addToWorkList(block); - } - } - - static Interval[] copy(Interval[] inputState) { - return inputState.clone(); - } - - static void statePut(Interval[] inputState, Value location, Interval interval) { - if (location != null && isRegister(location)) { - Register reg = asRegister(location); - int regNum = reg.number; - if (interval != null) { - Debug.log("%s = %s", reg, interval.operand); - } else if (inputState[regNum] != null) { - Debug.log("%s = null", reg); - } - - inputState[regNum] = interval; - } - } - - static boolean checkState(Interval[] inputState, Value reg, Interval interval) { - if (reg != null && isRegister(reg)) { - if (inputState[asRegister(reg).number] != interval) { - throw new GraalInternalError("!! Error in register allocation: register %s does not contain interval %s but interval %s", reg, interval.operand, inputState[asRegister(reg).number]); - } - } - return true; - } - - void processOperations(List ops, final Interval[] inputState) { - InstructionValueConsumer useConsumer = new InstructionValueConsumer() { - - @Override - public void visitValue(LIRInstruction op, Value operand, OperandMode mode, EnumSet flags) { - // we skip spill moves inserted by the spill position optimization - if (LinearScan.isVariableOrRegister(operand) && allocator.isProcessed(operand) && op.id() != LinearScan.DOMINATOR_SPILL_MOVE_ID) { - Interval interval = intervalAt(operand); - if (op.id() != -1) { - interval = interval.getSplitChildAtOpId(op.id(), mode, allocator); - } - - assert checkState(inputState, interval.location(), interval.splitParent()); - } - } - }; - - InstructionValueConsumer defConsumer = (op, operand, mode, flags) -> { - if (LinearScan.isVariableOrRegister(operand) && allocator.isProcessed(operand)) { - Interval interval = intervalAt(operand); - if (op.id() != -1) { - interval = interval.getSplitChildAtOpId(op.id(), mode, allocator); - } - - statePut(inputState, interval.location(), interval.splitParent()); - } - }; - - // visit all instructions of the block - for (int i = 0; i < ops.size(); i++) { - final LIRInstruction op = ops.get(i); - - if (Debug.isLogEnabled()) { - Debug.log("%s", op.toStringWithIdPrefix()); - } - - // check if input operands are correct - op.visitEachInput(useConsumer); - // invalidate all caller save registers at calls - if (op.destroysCallerSavedRegisters()) { - for (Register r : allocator.frameMapBuilder.getRegisterConfig().getCallerSaveRegisters()) { - statePut(inputState, r.asValue(), null); - } - } - op.visitEachAlive(useConsumer); - // set temp operands (some operations use temp operands also as output operands, so - // can't set them null) - op.visitEachTemp(defConsumer); - // set output operands - op.visitEachOutput(defConsumer); - } - } -} diff -r 3059a52d9614 -r 98967b613c88 graal/com.oracle.graal.compiler/src/com/oracle/graal/compiler/gen/NodeLIRBuilder.java --- a/graal/com.oracle.graal.compiler/src/com/oracle/graal/compiler/gen/NodeLIRBuilder.java Sat Feb 07 02:34:43 2015 +0100 +++ b/graal/com.oracle.graal.compiler/src/com/oracle/graal/compiler/gen/NodeLIRBuilder.java Sat Feb 07 02:47:00 2015 +0100 @@ -42,6 +42,7 @@ import com.oracle.graal.graph.*; import com.oracle.graal.lir.*; import com.oracle.graal.lir.StandardOp.JumpOp; +import com.oracle.graal.lir.debug.*; import com.oracle.graal.lir.gen.*; import com.oracle.graal.lir.gen.LIRGenerator.Options; import com.oracle.graal.nodes.*; @@ -81,7 +82,7 @@ @MatchableNode(nodeClass = XorNode.class, inputs = {"x", "y"}, commutative = true) @MatchableNode(nodeClass = PiNode.class, inputs = {"object"}) @MatchableNode(nodeClass = ConstantLocationNode.class, shareable = true) -public abstract class NodeLIRBuilder implements NodeLIRBuilderTool { +public abstract class NodeLIRBuilder implements NodeLIRBuilderTool, LIRGenerationDebugContext { private final NodeMap nodeOperands; private final DebugInfoBuilder debugInfoBuilder; @@ -143,6 +144,11 @@ } @Override + public Object getSourceForOperand(Value value) { + return valueForOperand(value); + } + + @Override public Value setResult(ValueNode x, Value operand) { assert (!isRegister(operand) || !gen.attributes(asRegister(operand)).isAllocatable()); assert nodeOperands != null && (nodeOperands.get(x) == null || nodeOperands.get(x) instanceof ComplexMatchValue) : "operand cannot be set twice"; diff -r 3059a52d9614 -r 98967b613c88 graal/com.oracle.graal.graph/src/com/oracle/graal/graph/NodeClass.java --- a/graal/com.oracle.graal.graph/src/com/oracle/graal/graph/NodeClass.java Sat Feb 07 02:34:43 2015 +0100 +++ b/graal/com.oracle.graal.graph/src/com/oracle/graal/graph/NodeClass.java Sat Feb 07 02:47:00 2015 +0100 @@ -166,7 +166,7 @@ isLeafNode = inputs.getCount() + successors.getCount() == 0; canGVN = Node.ValueNumberable.class.isAssignableFrom(clazz); - startGVNNumber = clazz.hashCode(); + startGVNNumber = clazz.getName().hashCode(); NodeInfo info = getAnnotationTimed(clazz, NodeInfo.class); assert info != null : "Missing NodeInfo annotation on " + clazz; diff -r 3059a52d9614 -r 98967b613c88 graal/com.oracle.graal.hotspot.amd64/src/com/oracle/graal/hotspot/amd64/AMD64HotSpotLIRGenerator.java --- a/graal/com.oracle.graal.hotspot.amd64/src/com/oracle/graal/hotspot/amd64/AMD64HotSpotLIRGenerator.java Sat Feb 07 02:34:43 2015 +0100 +++ b/graal/com.oracle.graal.hotspot.amd64/src/com/oracle/graal/hotspot/amd64/AMD64HotSpotLIRGenerator.java Sat Feb 07 02:47:00 2015 +0100 @@ -527,7 +527,7 @@ } } - public Value emitCompareAndSwap(Value address, Value expectedValue, Value newValue, Value trueValue, Value falseValue) { + public Variable emitCompareAndSwap(Value address, Value expectedValue, Value newValue, Value trueValue, Value falseValue) { LIRKind kind = newValue.getLIRKind(); assert kind.equals(expectedValue.getLIRKind()); Kind memKind = (Kind) kind.getPlatformKind(); diff -r 3059a52d9614 -r 98967b613c88 graal/com.oracle.graal.hotspot.sparc/src/com/oracle/graal/hotspot/sparc/SPARCHotSpotLIRGenerator.java --- a/graal/com.oracle.graal.hotspot.sparc/src/com/oracle/graal/hotspot/sparc/SPARCHotSpotLIRGenerator.java Sat Feb 07 02:34:43 2015 +0100 +++ b/graal/com.oracle.graal.hotspot.sparc/src/com/oracle/graal/hotspot/sparc/SPARCHotSpotLIRGenerator.java Sat Feb 07 02:47:00 2015 +0100 @@ -172,7 +172,7 @@ LIRKind wordKind = LIRKind.value(getProviders().getCodeCache().getTarget().wordKind); RegisterValue thread = getProviders().getRegisters().getThreadRegister().asValue(wordKind); SPARCAddressValue pendingDeoptAddress = new SPARCAddressValue(wordKind, thread, offset); - append(new StoreOp(v.getKind(), pendingDeoptAddress, emitMove(v), null)); + append(new StoreOp(v.getKind(), pendingDeoptAddress, load(v), null)); } @Override @@ -220,16 +220,15 @@ append(new StoreOp((Kind) kind.getPlatformKind(), storeAddress, input, state)); } - public Value emitCompareAndSwap(Value address, Value expectedValue, Value newValue, Value trueValue, Value falseValue) { - Variable newValueTemp = newVariable(newValue.getLIRKind()); - emitMove(newValueTemp, newValue); + public Variable emitCompareAndSwap(Value address, Value expectedValue, Value newValue, Value trueValue, Value falseValue) { LIRKind kind = newValue.getLIRKind(); assert kind.equals(expectedValue.getLIRKind()); Kind memKind = (Kind) kind.getPlatformKind(); SPARCAddressValue addressValue = asAddressValue(address); - append(new CompareAndSwapOp(asAllocatable(addressValue), asAllocatable(expectedValue), asAllocatable(newValueTemp))); - return emitConditionalMove(memKind, expectedValue, newValueTemp, Condition.EQ, true, trueValue, falseValue); + Variable result = newVariable(newValue.getLIRKind()); + append(new CompareAndSwapOp(result, asAllocatable(addressValue), asAllocatable(expectedValue), asAllocatable(newValue))); + return emitConditionalMove(memKind, expectedValue, result, Condition.EQ, true, trueValue, falseValue); } public StackSlot getDeoptimizationRescueSlot() { diff -r 3059a52d9614 -r 98967b613c88 graal/com.oracle.graal.hotspot.sparc/src/com/oracle/graal/hotspot/sparc/SPARCHotSpotNodeLIRBuilder.java --- a/graal/com.oracle.graal.hotspot.sparc/src/com/oracle/graal/hotspot/sparc/SPARCHotSpotNodeLIRBuilder.java Sat Feb 07 02:34:43 2015 +0100 +++ b/graal/com.oracle.graal.hotspot.sparc/src/com/oracle/graal/hotspot/sparc/SPARCHotSpotNodeLIRBuilder.java Sat Feb 07 02:47:00 2015 +0100 @@ -76,8 +76,6 @@ Value offset = operand(x.offset()); Variable cmpValue = (Variable) gen.loadNonConst(operand(x.expectedValue())); Variable newValue = gen.load(operand(x.newValue())); - Variable newValueTemp = gen.newVariable(newValue.getLIRKind()); - getGen().emitMove(newValueTemp, newValue); LIRKind kind = cmpValue.getLIRKind(); assert kind.equals(newValue.getLIRKind()); @@ -92,8 +90,9 @@ } } - append(new CompareAndSwapOp(address, cmpValue, newValueTemp)); - setResult(x, gen.emitMove(newValueTemp)); + Variable result = gen.newVariable(newValue.getLIRKind()); + append(new CompareAndSwapOp(result, address, cmpValue, newValue)); + setResult(x, result); } @Override diff -r 3059a52d9614 -r 98967b613c88 graal/com.oracle.graal.hotspot/src/com/oracle/graal/hotspot/meta/HotSpotSuitesProvider.java --- a/graal/com.oracle.graal.hotspot/src/com/oracle/graal/hotspot/meta/HotSpotSuitesProvider.java Sat Feb 07 02:34:43 2015 +0100 +++ b/graal/com.oracle.graal.hotspot/src/com/oracle/graal/hotspot/meta/HotSpotSuitesProvider.java Sat Feb 07 02:47:00 2015 +0100 @@ -26,11 +26,14 @@ import java.util.function.*; +import com.oracle.graal.api.meta.*; +import com.oracle.graal.compiler.common.*; import com.oracle.graal.hotspot.*; import com.oracle.graal.hotspot.bridge.*; import com.oracle.graal.hotspot.phases.*; import com.oracle.graal.java.*; import com.oracle.graal.java.GraphBuilderConfiguration.DebugInfoMode; +import com.oracle.graal.java.GraphBuilderPlugins.InlineInvokePlugin; import com.oracle.graal.options.*; import com.oracle.graal.phases.*; import com.oracle.graal.phases.tiers.*; @@ -84,7 +87,12 @@ protected PhaseSuite createGraphBuilderSuite() { PhaseSuite suite = new PhaseSuite<>(); GraphBuilderConfiguration config = GraphBuilderConfiguration.getDefault(); - config = config.withInlineTrivial(true); + config.setInlineInvokePlugin(new InlineInvokePlugin() { + public boolean shouldInlineInvoke(ResolvedJavaMethod method, int depth) { + return GraalOptions.InlineDuringParsing.getValue() && method.getCode().length <= GraalOptions.TrivialInliningSize.getValue() && + depth < GraalOptions.InlineDuringParsingMaxDepth.getValue(); + } + }); suite.appendPhase(new GraphBuilderPhase(config)); return suite; } diff -r 3059a52d9614 -r 98967b613c88 graal/com.oracle.graal.hotspot/src/com/oracle/graal/hotspot/nodes/CurrentLockNode.java --- a/graal/com.oracle.graal.hotspot/src/com/oracle/graal/hotspot/nodes/CurrentLockNode.java Sat Feb 07 02:34:43 2015 +0100 +++ b/graal/com.oracle.graal.hotspot/src/com/oracle/graal/hotspot/nodes/CurrentLockNode.java Sat Feb 07 02:47:00 2015 +0100 @@ -49,7 +49,7 @@ HotSpotLIRGenerator hsGen = (HotSpotLIRGenerator) gen.getLIRGeneratorTool(); StackSlotValue slot = hsGen.getLockSlot(lockDepth); // The register allocator cannot handle stack -> register moves so we use an LEA here - Value result = gen.getLIRGeneratorTool().emitMove(gen.getLIRGeneratorTool().emitAddress(slot)); + Value result = gen.getLIRGeneratorTool().emitAddress(slot); gen.setResult(this, result); } diff -r 3059a52d9614 -r 98967b613c88 graal/com.oracle.graal.hotspot/src/com/oracle/graal/hotspot/replacements/HotSpotGraphBuilderPluginsProvider.java --- a/graal/com.oracle.graal.hotspot/src/com/oracle/graal/hotspot/replacements/HotSpotGraphBuilderPluginsProvider.java Sat Feb 07 02:34:43 2015 +0100 +++ b/graal/com.oracle.graal.hotspot/src/com/oracle/graal/hotspot/replacements/HotSpotGraphBuilderPluginsProvider.java Sat Feb 07 02:47:00 2015 +0100 @@ -22,9 +22,15 @@ */ package com.oracle.graal.hotspot.replacements; +import static com.oracle.graal.java.GraphBuilderContext.*; + import com.oracle.graal.api.meta.*; import com.oracle.graal.api.runtime.*; +import com.oracle.graal.compiler.common.type.*; import com.oracle.graal.java.*; +import com.oracle.graal.java.GraphBuilderPlugins.InvocationPlugin; +import com.oracle.graal.java.GraphBuilderPlugins.Registration; +import com.oracle.graal.java.GraphBuilderPlugins.Registration.Receiver; import com.oracle.graal.nodes.*; import com.oracle.graal.nodes.extended.*; import com.oracle.graal.nodes.spi.*; @@ -35,28 +41,21 @@ @ServiceProvider(GraphBuilderPluginsProvider.class) public class HotSpotGraphBuilderPluginsProvider implements GraphBuilderPluginsProvider { public void registerPlugins(MetaAccessProvider metaAccess, GraphBuilderPlugins plugins) { - plugins.register(metaAccess, ObjectPlugin.class); - } - - /** - * HotSpot specific plugins for {@link Object}. - */ - enum ObjectPlugin implements GraphBuilderPlugin { - getClass() { - public boolean handleInvocation(GraphBuilderContext builder, ValueNode[] args) { - assert args.length == 1; - ValueNode rcvr = args[0]; - GuardingPiNode pi = builder.append(new GuardingPiNode(rcvr)); - StampProvider stampProvider = builder.getStampProvider(); - LoadHubNode hub = builder.append(new LoadHubNode(stampProvider, pi)); - HubGetClassNode mirror = builder.append(new HubGetClassNode(builder.getMetaAccess(), hub)); + Registration r = new Registration(plugins, metaAccess, Object.class); + r.register1("getClass", Receiver.class, new InvocationPlugin() { + public boolean apply(GraphBuilderContext builder, ValueNode rcvr) { + ObjectStamp objectStamp = (ObjectStamp) rcvr.stamp(); + ValueNode mirror; + if (objectStamp.isExactType() && objectStamp.nonNull()) { + mirror = builder.append(ConstantNode.forConstant(objectStamp.type().getJavaClass(), metaAccess)); + } else { + StampProvider stampProvider = builder.getStampProvider(); + LoadHubNode hub = builder.append(new LoadHubNode(stampProvider, nullCheckedValue(builder, rcvr))); + mirror = builder.append(new HubGetClassNode(builder.getMetaAccess(), hub)); + } builder.push(Kind.Object, mirror); return true; } - }; - - public ResolvedJavaMethod getInvocationTarget(MetaAccessProvider metaAccess) { - return GraphBuilderPlugin.resolveTarget(metaAccess, Object.class, name()); - } + }); } } diff -r 3059a52d9614 -r 98967b613c88 graal/com.oracle.graal.java/src/com/oracle/graal/java/AbstractBytecodeParser.java --- a/graal/com.oracle.graal.java/src/com/oracle/graal/java/AbstractBytecodeParser.java Sat Feb 07 02:34:43 2015 +0100 +++ b/graal/com.oracle.graal.java/src/com/oracle/graal/java/AbstractBytecodeParser.java Sat Feb 07 02:47:00 2015 +0100 @@ -36,6 +36,7 @@ import com.oracle.graal.compiler.common.calc.*; import com.oracle.graal.debug.*; import com.oracle.graal.java.BciBlockMapping.BciBlock; +import com.oracle.graal.nodes.*; import com.oracle.graal.options.*; import com.oracle.graal.phases.*; @@ -687,7 +688,10 @@ Kind kind = field.getKind(); T receiver = frameState.apop(); if ((field instanceof ResolvedJavaField) && ((ResolvedJavaField) field).getDeclaringClass().isInitialized()) { - appendOptimizedLoadField(kind, genLoadField(receiver, (ResolvedJavaField) field)); + GraphBuilderPlugins.LoadFieldPlugin loadFieldPlugin = this.graphBuilderConfig.getLoadFieldPlugin(); + if (loadFieldPlugin == null || !loadFieldPlugin.apply((GraphBuilderContext) this, (ValueNode) receiver, (ResolvedJavaField) field)) { + appendOptimizedLoadField(kind, genLoadField(receiver, (ResolvedJavaField) field)); + } } else { handleUnresolvedLoadField(field, receiver); } @@ -733,7 +737,10 @@ private void genGetStatic(JavaField field) { Kind kind = field.getKind(); if (field instanceof ResolvedJavaField && ((ResolvedJavaType) field.getDeclaringClass()).isInitialized()) { - appendOptimizedLoadField(kind, genLoadField(null, (ResolvedJavaField) field)); + GraphBuilderPlugins.LoadFieldPlugin loadFieldPlugin = this.graphBuilderConfig.getLoadFieldPlugin(); + if (loadFieldPlugin == null || !loadFieldPlugin.apply((GraphBuilderContext) this, (ResolvedJavaField) field)) { + appendOptimizedLoadField(kind, genLoadField(null, (ResolvedJavaField) field)); + } } else { handleUnresolvedLoadField(field, null); } diff -r 3059a52d9614 -r 98967b613c88 graal/com.oracle.graal.java/src/com/oracle/graal/java/BciBlockMapping.java --- a/graal/com.oracle.graal.java/src/com/oracle/graal/java/BciBlockMapping.java Sat Feb 07 02:34:43 2015 +0100 +++ b/graal/com.oracle.graal.java/src/com/oracle/graal/java/BciBlockMapping.java Sat Feb 07 02:47:00 2015 +0100 @@ -82,6 +82,7 @@ public boolean isExceptionEntry; public boolean isLoopHeader; public int loopId; + public int loopEnd; /** * XXX to be removed - currently only used by baseline compiler. @@ -89,8 +90,10 @@ public Loop loop; public boolean isLoopEnd; - public FixedWithNextNode firstInstruction; - public AbstractFrameStateBuilder entryState; + private FixedWithNextNode firstInstruction; + private AbstractFrameStateBuilder entryState; + private FixedWithNextNode[] firstInstructionArray; + private AbstractFrameStateBuilder[] entryStateArray; private boolean visited; private boolean active; @@ -329,6 +332,66 @@ public void setJsrReturnBci(int bci) { this.getOrCreateJSRData().jsrReturnBci = bci; } + + public FixedWithNextNode getFirstInstruction(int dimension) { + if (dimension == 0) { + return firstInstruction; + } else { + if (firstInstructionArray != null && dimension - 1 < firstInstructionArray.length) { + return firstInstructionArray[dimension - 1]; + } else { + return null; + } + } + } + + public void setFirstInstruction(int dimension, FixedWithNextNode firstInstruction) { + if (dimension == 0) { + this.firstInstruction = firstInstruction; + } else { + if (firstInstructionArray == null) { + firstInstructionArray = new FixedWithNextNode[4]; + } + if (dimension - 1 < firstInstructionArray.length) { + // We are within bounds. + } else { + // We are out of bounds. + firstInstructionArray = Arrays.copyOf(firstInstructionArray, Math.max(firstInstructionArray.length * 2, dimension)); + } + + firstInstructionArray[dimension - 1] = firstInstruction; + } + } + + public AbstractFrameStateBuilder getEntryState(int dimension) { + if (dimension == 0) { + return entryState; + } else { + if (entryStateArray != null && dimension - 1 < entryStateArray.length) { + return entryStateArray[dimension - 1]; + } else { + return null; + } + } + } + + public void setEntryState(int dimension, AbstractFrameStateBuilder entryState) { + if (dimension == 0) { + this.entryState = entryState; + } else { + if (entryStateArray == null) { + entryStateArray = new AbstractFrameStateBuilder[4]; + } + if (dimension - 1 < entryStateArray.length) { + // We are within bounds. + } else { + // We are out of bounds. + entryStateArray = Arrays.copyOf(entryStateArray, Math.max(entryStateArray.length * 2, dimension)); + } + + entryStateArray[dimension - 1] = entryState; + } + } } public static class ExceptionDispatchBlock extends BciBlock { @@ -358,14 +421,16 @@ private final boolean doLivenessAnalysis; public LocalLiveness liveness; private int blocksNotYetAssignedId; + private final boolean consecutiveLoopBlocks; /** * Creates a new BlockMap instance from bytecode of the given method . * * @param method the compiler interface method containing the code */ - private BciBlockMapping(ResolvedJavaMethod method, boolean doLivenessAnalysis) { + private BciBlockMapping(ResolvedJavaMethod method, boolean doLivenessAnalysis, boolean consecutiveLoopBlocks) { this.doLivenessAnalysis = doLivenessAnalysis; + this.consecutiveLoopBlocks = consecutiveLoopBlocks; this.method = method; this.exceptionHandlers = method.getExceptionHandlers(); this.stream = new BytecodeStream(method.getCode()); @@ -737,6 +802,7 @@ } private void computeBlockOrder() { + int maxBlocks = blocksNotYetAssignedId; this.blocks = new BciBlock[blocksNotYetAssignedId]; long loop = computeBlockOrder(blockMap[0]); @@ -747,23 +813,49 @@ throw new BailoutException("Non-reducible loop"); } - if (blocks[0] == null) { - purgeLeadingNullBlocks(); + if (blocks[0] != null && this.nextLoop == 0) { + // No unreached blocks and no loops + for (int i = 0; i < blocks.length; ++i) { + blocks[i].setId(i); + } + return; } + + // Purge null entries for unreached blocks and sort blocks such that loop bodies are always + // consecutively in the array. + int blockCount = maxBlocks - blocksNotYetAssignedId; + BciBlock[] newBlocks = new BciBlock[blockCount]; + int next = 0; + for (int i = 0; i < blocks.length; ++i) { + BciBlock b = blocks[i]; + if (b != null) { + b.setId(next); + newBlocks[next++] = b; + if (consecutiveLoopBlocks && b.isLoopHeader) { + next = handleLoopHeader(newBlocks, next, i, b); + } + } + } + blocks = newBlocks; } - private void purgeLeadingNullBlocks() { - // Purge leading null values due to unreachable blocks. - int i = 0; - for (; i < blocks.length; ++i) { - if (blocks[i] != null) { - break; + private int handleLoopHeader(BciBlock[] newBlocks, int nextStart, int i, BciBlock loopHeader) { + int next = nextStart; + int endOfLoop = nextStart - 1; + for (int j = i + 1; j < blocks.length; ++j) { + BciBlock other = blocks[j]; + if (other != null && (other.loops & (1L << loopHeader.loopId)) != 0) { + other.setId(next); + endOfLoop = next; + newBlocks[next++] = other; + blocks[j] = null; + if (other.isLoopHeader) { + next = handleLoopHeader(newBlocks, next, j, other); + } } } - blocks = Arrays.copyOfRange(blocks, i, blocks.length); - for (i = 0; i < blocks.length; ++i) { - blocks[i].setId(i); - } + loopHeader.loopEnd = endOfLoop; + return next; } public void log(String name) { @@ -885,6 +977,7 @@ if (successor.active) { // Reached block via backward branch. block.isLoopEnd = true; + loops |= (1L << successor.loopId); } } @@ -898,7 +991,6 @@ block.active = false; blocksNotYetAssignedId--; blocks[blocksNotYetAssignedId] = block; - block.setId(blocksNotYetAssignedId); return loops; } @@ -988,6 +1080,11 @@ public abstract boolean localIsLiveIn(BciBlock block, int local); /** + * Returns whether the local is set in the given loop. + */ + public abstract boolean localIsChangedInLoop(int loopId, int local); + + /** * Returns whether the local is live at the end of the given block. */ public abstract boolean localIsLiveOut(BciBlock block, int local); @@ -1042,6 +1139,7 @@ return; } int blockID = block.getId(); + int localIndex; stream.setBCI(block.startBci); while (stream.currentBCI() <= block.endBci) { switch (stream.currentBC()) { @@ -1065,8 +1163,12 @@ case DLOAD_3: loadTwo(blockID, 3); break; + case IINC: + localIndex = stream.readLocalIndex(); + loadOne(blockID, localIndex); + storeOne(blockID, localIndex); + break; case ILOAD: - case IINC: case FLOAD: case ALOAD: case RET: @@ -1154,8 +1256,8 @@ } } - public static BciBlockMapping create(ResolvedJavaMethod method, boolean doLivenessAnalysis) { - BciBlockMapping map = new BciBlockMapping(method, doLivenessAnalysis); + public static BciBlockMapping create(ResolvedJavaMethod method, boolean doLivenessAnalysis, boolean consecutiveLoopBlocks) { + BciBlockMapping map = new BciBlockMapping(method, doLivenessAnalysis, consecutiveLoopBlocks); map.build(); if (Debug.isDumpEnabled()) { Debug.dump(map, method.format("After block building %f %R %H.%n(%P)")); @@ -1173,6 +1275,7 @@ private final long[] localsLiveOut; private final long[] localsLiveGen; private final long[] localsLiveKill; + private final long[] localsChangedInLoop; public SmallLocalLiveness() { int blockSize = blocks.length; @@ -1180,6 +1283,7 @@ localsLiveOut = new long[blockSize]; localsLiveGen = new long[blockSize]; localsLiveKill = new long[blockSize]; + localsChangedInLoop = new long[BciBlockMapping.this.nextLoop]; } private String debugString(long value) { @@ -1246,6 +1350,17 @@ if ((localsLiveGen[blockID] & bit) == 0L) { localsLiveKill[blockID] |= bit; } + + BciBlock block = blocks[blockID]; + long tmp = block.loops; + int pos = 0; + while (tmp != 0) { + if ((tmp & 1L) == 1L) { + this.localsChangedInLoop[pos] |= bit; + } + tmp >>= 1; + ++pos; + } } @Override @@ -1259,6 +1374,11 @@ int blockID = block.getId(); return blockID >= Integer.MAX_VALUE ? false : (localsLiveOut[blockID] & (1L << local)) != 0L; } + + @Override + public boolean localIsChangedInLoop(int loopId, int local) { + return (localsChangedInLoop[loopId] & (1L << local)) != 0L; + } } public final class LargeLocalLiveness extends LocalLiveness { @@ -1266,6 +1386,7 @@ private BitSet[] localsLiveOut; private BitSet[] localsLiveGen; private BitSet[] localsLiveKill; + private BitSet[] localsChangedInLoop; public LargeLocalLiveness() { int blocksSize = blocks.length; @@ -1273,11 +1394,16 @@ localsLiveOut = new BitSet[blocksSize]; localsLiveGen = new BitSet[blocksSize]; localsLiveKill = new BitSet[blocksSize]; + int maxLocals = method.getMaxLocals(); for (int i = 0; i < blocksSize; i++) { - localsLiveIn[i] = new BitSet(method.getMaxLocals()); - localsLiveOut[i] = new BitSet(method.getMaxLocals()); - localsLiveGen[i] = new BitSet(method.getMaxLocals()); - localsLiveKill[i] = new BitSet(method.getMaxLocals()); + localsLiveIn[i] = new BitSet(maxLocals); + localsLiveOut[i] = new BitSet(maxLocals); + localsLiveGen[i] = new BitSet(maxLocals); + localsLiveKill[i] = new BitSet(maxLocals); + } + localsChangedInLoop = new BitSet[nextLoop]; + for (int i = 0; i < nextLoop; ++i) { + localsChangedInLoop[i] = new BitSet(maxLocals); } } @@ -1332,6 +1458,17 @@ if (!localsLiveGen[blockID].get(local)) { localsLiveKill[blockID].set(local); } + + BciBlock block = blocks[blockID]; + long tmp = block.loops; + int pos = 0; + while (tmp != 0) { + if ((tmp & 1L) == 1L) { + this.localsChangedInLoop[pos].set(local); + } + tmp >>= 1; + ++pos; + } } @Override @@ -1343,6 +1480,11 @@ public boolean localIsLiveOut(BciBlock block, int local) { return block.getId() >= Integer.MAX_VALUE ? true : localsLiveOut[block.getId()].get(local); } + + @Override + public boolean localIsChangedInLoop(int loopId, int local) { + return localsChangedInLoop[loopId].get(local); + } } public BciBlock[] getLoopHeaders() { diff -r 3059a52d9614 -r 98967b613c88 graal/com.oracle.graal.java/src/com/oracle/graal/java/DefaultGraphBuilderPlugins.java --- /dev/null Thu Jan 01 00:00:00 1970 +0000 +++ b/graal/com.oracle.graal.java/src/com/oracle/graal/java/DefaultGraphBuilderPlugins.java Sat Feb 07 02:47:00 2015 +0100 @@ -0,0 +1,62 @@ +/* + * Copyright (c) 2015, Oracle and/or its affiliates. All rights reserved. + * DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER. + * + * This code is free software; you can redistribute it and/or modify it + * under the terms of the GNU General Public License version 2 only, as + * published by the Free Software Foundation. + * + * This code is distributed in the hope that it will be useful, but WITHOUT + * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or + * FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License + * version 2 for more details (a copy is included in the LICENSE file that + * accompanied this code). + * + * You should have received a copy of the GNU General Public License version + * 2 along with this work; if not, write to the Free Software Foundation, + * Inc., 51 Franklin St, Fifth Floor, Boston, MA 02110-1301 USA. + * + * Please contact Oracle, 500 Oracle Parkway, Redwood Shores, CA 94065 USA + * or visit www.oracle.com if you need additional information or have any + * questions. + */ +package com.oracle.graal.java; + +import java.util.*; +import java.util.stream.*; + +import com.oracle.graal.api.meta.*; + +/** + * Default implementation of {@link GraphBuilderPlugins} that uses a map. + */ +public class DefaultGraphBuilderPlugins implements GraphBuilderPlugins { + + private final Map plugins = new HashMap<>(); + + /** + * Registers an invocation plugin for a given method. There must be no plugin currently + * registered for {@code method}. + */ + public void register(ResolvedJavaMethod method, InvocationPlugin plugin) { + assert InvocationPluginChecker.check(method, plugin); + GraphBuilderPlugin oldValue = plugins.put(method, plugin); + // System.out.println("registered: " + plugin); + assert oldValue == null; + } + + /** + * Gets the plugin for a given method. + * + * @param method the method to lookup + * @return the plugin associated with {@code method} or {@code null} if none exists + */ + public InvocationPlugin lookupInvocation(ResolvedJavaMethod method) { + return plugins.get(method); + } + + @Override + public String toString() { + return plugins.keySet().stream().map(m -> m.format("%H.%n(%p)")).collect(Collectors.joining(", ")); + } +} diff -r 3059a52d9614 -r 98967b613c88 graal/com.oracle.graal.java/src/com/oracle/graal/java/DefaultGraphBuilderPluginsProvider.java --- a/graal/com.oracle.graal.java/src/com/oracle/graal/java/DefaultGraphBuilderPluginsProvider.java Sat Feb 07 02:34:43 2015 +0100 +++ /dev/null Thu Jan 01 00:00:00 1970 +0000 @@ -1,110 +0,0 @@ -/* - * Copyright (c) 2015, Oracle and/or its affiliates. All rights reserved. - * DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER. - * - * This code is free software; you can redistribute it and/or modify it - * under the terms of the GNU General Public License version 2 only, as - * published by the Free Software Foundation. - * - * This code is distributed in the hope that it will be useful, but WITHOUT - * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or - * FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License - * version 2 for more details (a copy is included in the LICENSE file that - * accompanied this code). - * - * You should have received a copy of the GNU General Public License version - * 2 along with this work; if not, write to the Free Software Foundation, - * Inc., 51 Franklin St, Fifth Floor, Boston, MA 02110-1301 USA. - * - * Please contact Oracle, 500 Oracle Parkway, Redwood Shores, CA 94065 USA - * or visit www.oracle.com if you need additional information or have any - * questions. - */ -package com.oracle.graal.java; - -import com.oracle.graal.api.meta.*; -import com.oracle.graal.api.runtime.*; -import com.oracle.graal.nodes.*; -import com.oracle.graal.nodes.extended.*; -import com.oracle.graal.nodes.java.*; - -/** - * Provider of non-runtime specific {@link GraphBuilderPlugin}s. - */ -@ServiceProvider(GraphBuilderPluginsProvider.class) -public class DefaultGraphBuilderPluginsProvider implements GraphBuilderPluginsProvider { - public void registerPlugins(MetaAccessProvider metaAccess, GraphBuilderPlugins plugins) { - plugins.register(metaAccess, ObjectPlugin.class); - plugins.register(metaAccess, BoxingPlugin.class); - } - - /** - * Plugins for {@link Object}. - */ - enum ObjectPlugin implements GraphBuilderPlugin { - init() { - public boolean handleInvocation(GraphBuilderContext builder, ValueNode[] args) { - ValueNode object = args[0]; - if (RegisterFinalizerNode.mayHaveFinalizer(object, builder.getAssumptions())) { - builder.append(new RegisterFinalizerNode(object)); - } - return true; - } - - public ResolvedJavaMethod getInvocationTarget(MetaAccessProvider metaAccess) { - return GraphBuilderPlugin.resolveTarget(metaAccess, Object.class, ""); - } - }; - - @Override - public String toString() { - return Object.class.getName() + "." + name() + "()"; - } - } - - /** - * Plugins for the standard primitive box classes (e.g., {@link Integer} and friends). - */ - enum BoxingPlugin implements GraphBuilderPlugin { - valueOf$Boolean(Kind.Boolean), - booleanValue$Boolean(Kind.Boolean), - valueOf$Byte(Kind.Byte), - byteValue$Byte(Kind.Byte), - valueOf$Short(Kind.Short), - shortValue$Short(Kind.Short), - valueOf$Char(Kind.Char), - charValue$Char(Kind.Char), - valueOf$Int(Kind.Int), - intValue$Int(Kind.Int), - valueOf$Long(Kind.Long), - longValue$Long(Kind.Long), - valueOf$Float(Kind.Float), - floatValue$Float(Kind.Float), - valueOf$Double(Kind.Double), - doubleValue$Double(Kind.Double); - - BoxingPlugin(Kind kind) { - assert name().startsWith("valueOf$") || name().startsWith(kind.getJavaName() + "Value$"); - this.kind = kind; - this.box = name().charAt(0) == 'v'; - } - - private final Kind kind; - private final boolean box; - - public final boolean handleInvocation(GraphBuilderContext builder, ValueNode[] args) { - if (box) { - ResolvedJavaType resultType = builder.getMetaAccess().lookupJavaType(kind.toBoxedJavaClass()); - builder.push(Kind.Object, builder.append(new BoxNode(args[0], resultType, kind))); - } else { - builder.push(kind, builder.append(new UnboxNode(args[0], kind))); - } - return true; - } - - public ResolvedJavaMethod getInvocationTarget(MetaAccessProvider metaAccess) { - Class[] parameterTypes = box ? new Class[]{kind.toJavaClass()} : new Class[0]; - return GraphBuilderPlugin.resolveTarget(metaAccess, kind.toBoxedJavaClass(), name(), parameterTypes); - } - } -} diff -r 3059a52d9614 -r 98967b613c88 graal/com.oracle.graal.java/src/com/oracle/graal/java/GraalDirectivePlugins.java --- /dev/null Thu Jan 01 00:00:00 1970 +0000 +++ b/graal/com.oracle.graal.java/src/com/oracle/graal/java/GraalDirectivePlugins.java Sat Feb 07 02:47:00 2015 +0100 @@ -0,0 +1,103 @@ +/* + * Copyright (c) 2015, 2015, Oracle and/or its affiliates. All rights reserved. + * DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER. + * + * This code is free software; you can redistribute it and/or modify it + * under the terms of the GNU General Public License version 2 only, as + * published by the Free Software Foundation. + * + * This code is distributed in the hope that it will be useful, but WITHOUT + * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or + * FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License + * version 2 for more details (a copy is included in the LICENSE file that + * accompanied this code). + * + * You should have received a copy of the GNU General Public License version + * 2 along with this work; if not, write to the Free Software Foundation, + * Inc., 51 Franklin St, Fifth Floor, Boston, MA 02110-1301 USA. + * + * Please contact Oracle, 500 Oracle Parkway, Redwood Shores, CA 94065 USA + * or visit www.oracle.com if you need additional information or have any + * questions. + */ +package com.oracle.graal.java; + +import com.oracle.graal.api.directives.*; +import com.oracle.graal.api.meta.*; +import com.oracle.graal.java.GraphBuilderPlugins.InvocationPlugin; +import com.oracle.graal.java.GraphBuilderPlugins.Registration; +import com.oracle.graal.nodes.*; +import com.oracle.graal.nodes.debug.*; +import com.oracle.graal.nodes.extended.*; + +public class GraalDirectivePlugins { + + public static void registerPlugins(MetaAccessProvider metaAccess, GraphBuilderPlugins plugins) { + Registration r = new Registration(plugins, metaAccess, GraalDirectives.class); + r.register0("deoptimize", new InvocationPlugin() { + public boolean apply(GraphBuilderContext builder) { + builder.append(new DeoptimizeNode(DeoptimizationAction.None, DeoptimizationReason.TransferToInterpreter)); + return true; + } + }); + + r.register0("deoptimizeAndInvalidate", new InvocationPlugin() { + public boolean apply(GraphBuilderContext builder) { + builder.append(new DeoptimizeNode(DeoptimizationAction.InvalidateReprofile, DeoptimizationReason.TransferToInterpreter)); + return true; + } + }); + + r.register0("inCompiledCode", new InvocationPlugin() { + public boolean apply(GraphBuilderContext builder) { + builder.push(Kind.Int, builder.append(ConstantNode.forInt(1))); + return true; + } + }); + + r.register0("controlFlowAnchor", new InvocationPlugin() { + public boolean apply(GraphBuilderContext builder) { + builder.append(new ControlFlowAnchorNode()); + return true; + } + }); + + r.register2("injectBranchProbability", double.class, boolean.class, new InvocationPlugin() { + public boolean apply(GraphBuilderContext builder, ValueNode probability, ValueNode condition) { + builder.push(Kind.Int, builder.append(new BranchProbabilityNode(probability, condition))); + return true; + } + }); + + InvocationPlugin blackholePlugin = new InvocationPlugin() { + public boolean apply(GraphBuilderContext builder, ValueNode value) { + builder.append(new BlackholeNode(value)); + return true; + } + }; + + for (Kind kind : Kind.values()) { + Class cls = null; + switch (kind) { + case Object: + cls = Object.class; + break; + case Void: + case Illegal: + continue; + default: + cls = kind.toJavaClass(); + } + + r.register1("blackhole", cls, blackholePlugin); + + final Kind stackKind = kind.getStackKind(); + r.register1("opaque", cls, new InvocationPlugin() { + public boolean apply(GraphBuilderContext builder, ValueNode value) { + builder.push(stackKind, builder.append(new OpaqueNode(value))); + return true; + } + }); + } + } +} diff -r 3059a52d9614 -r 98967b613c88 graal/com.oracle.graal.java/src/com/oracle/graal/java/GraphBuilderConfiguration.java --- a/graal/com.oracle.graal.java/src/com/oracle/graal/java/GraphBuilderConfiguration.java Sat Feb 07 02:34:43 2015 +0100 +++ b/graal/com.oracle.graal.java/src/com/oracle/graal/java/GraphBuilderConfiguration.java Sat Feb 07 02:47:00 2015 +0100 @@ -36,7 +36,10 @@ private final ResolvedJavaType[] skippedExceptionTypes; private final DebugInfoMode debugInfoMode; private final boolean doLivenessAnalysis; - private final boolean inlineTrivial; + private GraphBuilderPlugins.LoadFieldPlugin loadFieldPlugin; + private GraphBuilderPlugins.ParameterPlugin parameterPlugin; + private GraphBuilderPlugins.InlineInvokePlugin inlineInvokePlugin; + private GraphBuilderPlugins.LoopExplosionPlugin loopExplosionPlugin; public static enum DebugInfoMode { SafePointsOnly, @@ -62,35 +65,43 @@ Full, } - protected GraphBuilderConfiguration(boolean eagerResolving, boolean omitAllExceptionEdges, DebugInfoMode debugInfoMode, ResolvedJavaType[] skippedExceptionTypes, boolean doLivenessAnalysis, - boolean inlineTrivial) { + protected GraphBuilderConfiguration(boolean eagerResolving, boolean omitAllExceptionEdges, DebugInfoMode debugInfoMode, ResolvedJavaType[] skippedExceptionTypes, boolean doLivenessAnalysis) { this.eagerResolving = eagerResolving; this.omitAllExceptionEdges = omitAllExceptionEdges; this.debugInfoMode = debugInfoMode; this.skippedExceptionTypes = skippedExceptionTypes; this.doLivenessAnalysis = doLivenessAnalysis; - this.inlineTrivial = inlineTrivial; + } + + public GraphBuilderConfiguration copy() { + GraphBuilderConfiguration result = new GraphBuilderConfiguration(eagerResolving, omitAllExceptionEdges, debugInfoMode, skippedExceptionTypes, doLivenessAnalysis); + result.loadFieldPlugin = loadFieldPlugin; + return result; } public GraphBuilderConfiguration withSkippedExceptionTypes(ResolvedJavaType[] newSkippedExceptionTypes) { - return new GraphBuilderConfiguration(eagerResolving, omitAllExceptionEdges, debugInfoMode, newSkippedExceptionTypes, doLivenessAnalysis, inlineTrivial); + return new GraphBuilderConfiguration(eagerResolving, omitAllExceptionEdges, debugInfoMode, newSkippedExceptionTypes, doLivenessAnalysis); } public GraphBuilderConfiguration withOmitAllExceptionEdges(boolean newOmitAllExceptionEdges) { - return new GraphBuilderConfiguration(eagerResolving, newOmitAllExceptionEdges, debugInfoMode, skippedExceptionTypes, doLivenessAnalysis, inlineTrivial); + return new GraphBuilderConfiguration(eagerResolving, newOmitAllExceptionEdges, debugInfoMode, skippedExceptionTypes, doLivenessAnalysis); } public GraphBuilderConfiguration withDebugInfoMode(DebugInfoMode newDebugInfoMode) { ResolvedJavaType[] newSkippedExceptionTypes = skippedExceptionTypes == EMPTY ? EMPTY : Arrays.copyOf(skippedExceptionTypes, skippedExceptionTypes.length); - return new GraphBuilderConfiguration(eagerResolving, omitAllExceptionEdges, newDebugInfoMode, newSkippedExceptionTypes, doLivenessAnalysis, inlineTrivial); + return new GraphBuilderConfiguration(eagerResolving, omitAllExceptionEdges, newDebugInfoMode, newSkippedExceptionTypes, doLivenessAnalysis); } public GraphBuilderConfiguration withDoLivenessAnalysis(boolean newLivenessAnalysis) { - return new GraphBuilderConfiguration(eagerResolving, omitAllExceptionEdges, debugInfoMode, skippedExceptionTypes, newLivenessAnalysis, inlineTrivial); + return new GraphBuilderConfiguration(eagerResolving, omitAllExceptionEdges, debugInfoMode, skippedExceptionTypes, newLivenessAnalysis); } - public GraphBuilderConfiguration withInlineTrivial(boolean newInlineTrivial) { - return new GraphBuilderConfiguration(eagerResolving, omitAllExceptionEdges, debugInfoMode, skippedExceptionTypes, doLivenessAnalysis, newInlineTrivial); + public GraphBuilderPlugins.LoadFieldPlugin getLoadFieldPlugin() { + return loadFieldPlugin; + } + + public void setLoadFieldPlugin(GraphBuilderPlugins.LoadFieldPlugin loadFieldPlugin) { + this.loadFieldPlugin = loadFieldPlugin; } public ResolvedJavaType[] getSkippedExceptionTypes() { @@ -118,19 +129,19 @@ } public static GraphBuilderConfiguration getDefault() { - return new GraphBuilderConfiguration(false, false, DebugInfoMode.SafePointsOnly, EMPTY, GraalOptions.OptLivenessAnalysis.getValue(), false); + return new GraphBuilderConfiguration(false, false, DebugInfoMode.SafePointsOnly, EMPTY, GraalOptions.OptLivenessAnalysis.getValue()); } public static GraphBuilderConfiguration getEagerDefault() { - return new GraphBuilderConfiguration(true, false, DebugInfoMode.SafePointsOnly, EMPTY, GraalOptions.OptLivenessAnalysis.getValue(), false); + return new GraphBuilderConfiguration(true, false, DebugInfoMode.SafePointsOnly, EMPTY, GraalOptions.OptLivenessAnalysis.getValue()); } public static GraphBuilderConfiguration getSnippetDefault() { - return new GraphBuilderConfiguration(true, true, DebugInfoMode.SafePointsOnly, EMPTY, GraalOptions.OptLivenessAnalysis.getValue(), false); + return new GraphBuilderConfiguration(true, true, DebugInfoMode.SafePointsOnly, EMPTY, GraalOptions.OptLivenessAnalysis.getValue()); } public static GraphBuilderConfiguration getFullDebugDefault() { - return new GraphBuilderConfiguration(true, false, DebugInfoMode.Full, EMPTY, GraalOptions.OptLivenessAnalysis.getValue(), false); + return new GraphBuilderConfiguration(true, false, DebugInfoMode.Full, EMPTY, GraalOptions.OptLivenessAnalysis.getValue()); } /** @@ -142,7 +153,27 @@ return eagerResolving; } - public boolean shouldInlineTrivial() { - return inlineTrivial; + public GraphBuilderPlugins.ParameterPlugin getParameterPlugin() { + return parameterPlugin; + } + + public void setParameterPlugin(GraphBuilderPlugins.ParameterPlugin parameterPlugin) { + this.parameterPlugin = parameterPlugin; + } + + public GraphBuilderPlugins.InlineInvokePlugin getInlineInvokePlugin() { + return inlineInvokePlugin; + } + + public void setInlineInvokePlugin(GraphBuilderPlugins.InlineInvokePlugin inlineInvokePlugin) { + this.inlineInvokePlugin = inlineInvokePlugin; + } + + public GraphBuilderPlugins.LoopExplosionPlugin getLoopExplosionPlugin() { + return loopExplosionPlugin; + } + + public void setLoopExplosionPlugin(GraphBuilderPlugins.LoopExplosionPlugin loopExplosionPlugin) { + this.loopExplosionPlugin = loopExplosionPlugin; } } diff -r 3059a52d9614 -r 98967b613c88 graal/com.oracle.graal.java/src/com/oracle/graal/java/GraphBuilderContext.java --- a/graal/com.oracle.graal.java/src/com/oracle/graal/java/GraphBuilderContext.java Sat Feb 07 02:34:43 2015 +0100 +++ b/graal/com.oracle.graal.java/src/com/oracle/graal/java/GraphBuilderContext.java Sat Feb 07 02:47:00 2015 +0100 @@ -39,7 +39,7 @@ T append(T fixed); - T append(T v); + T append(T value); StampProvider getStampProvider(); @@ -47,5 +47,18 @@ Assumptions getAssumptions(); + ConstantReflectionProvider getConstantReflection(); + void push(Kind kind, ValueNode value); + + /** + * @see GuardingPiNode#nullCheckedValue(ValueNode) + */ + static ValueNode nullCheckedValue(GraphBuilderContext builder, ValueNode value) { + ValueNode nonNullValue = GuardingPiNode.nullCheckedValue(value); + if (nonNullValue != value) { + builder.append((FixedWithNextNode) nonNullValue); + } + return nonNullValue; + } } diff -r 3059a52d9614 -r 98967b613c88 graal/com.oracle.graal.java/src/com/oracle/graal/java/GraphBuilderPhase.java --- a/graal/com.oracle.graal.java/src/com/oracle/graal/java/GraphBuilderPhase.java Sat Feb 07 02:34:43 2015 +0100 +++ b/graal/com.oracle.graal.java/src/com/oracle/graal/java/GraphBuilderPhase.java Sat Feb 07 02:47:00 2015 +0100 @@ -37,11 +37,16 @@ import com.oracle.graal.compiler.common.calc.*; import com.oracle.graal.compiler.common.type.*; import com.oracle.graal.debug.*; +import com.oracle.graal.graph.Graph.Mark; import com.oracle.graal.graph.*; import com.oracle.graal.graph.Node.ValueNumberable; +import com.oracle.graal.graph.iterators.*; import com.oracle.graal.java.BciBlockMapping.BciBlock; import com.oracle.graal.java.BciBlockMapping.ExceptionDispatchBlock; import com.oracle.graal.java.BciBlockMapping.LocalLiveness; +import com.oracle.graal.java.GraphBuilderPlugins.InlineInvokePlugin; +import com.oracle.graal.java.GraphBuilderPlugins.InvocationPlugin; +import com.oracle.graal.java.GraphBuilderPlugins.LoopExplosionPlugin; import com.oracle.graal.nodes.*; import com.oracle.graal.nodes.CallTargetNode.InvokeKind; import com.oracle.graal.nodes.calc.*; @@ -62,13 +67,18 @@ private final GraphBuilderPlugins graphBuilderPlugins; public GraphBuilderPhase(GraphBuilderConfiguration config) { + this(config, new DefaultGraphBuilderPlugins()); + } + + public GraphBuilderPhase(GraphBuilderConfiguration config, GraphBuilderPlugins graphBuilderPlugins) { this.graphBuilderConfig = config; - this.graphBuilderPlugins = new GraphBuilderPlugins(); + this.graphBuilderPlugins = graphBuilderPlugins; } @Override protected void run(StructuredGraph graph, HighTierContext context) { - new Instance(context.getMetaAccess(), context.getStampProvider(), context.getAssumptions(), graphBuilderConfig, graphBuilderPlugins, context.getOptimisticOptimizations()).run(graph); + new Instance(context.getMetaAccess(), context.getStampProvider(), context.getAssumptions(), context.getConstantReflection(), graphBuilderConfig, graphBuilderPlugins, + context.getOptimisticOptimizations()).run(graph); } public GraphBuilderConfiguration getGraphBuilderConfig() { @@ -92,6 +102,7 @@ private final OptimisticOptimizations optimisticOpts; private final StampProvider stampProvider; private final Assumptions assumptions; + private final ConstantReflectionProvider constantReflection; /** * Gets the graph being processed by this builder. @@ -100,19 +111,21 @@ return currentGraph; } - public Instance(MetaAccessProvider metaAccess, StampProvider stampProvider, Assumptions assumptions, GraphBuilderConfiguration graphBuilderConfig, GraphBuilderPlugins graphBuilderPlugins, - OptimisticOptimizations optimisticOpts) { + public Instance(MetaAccessProvider metaAccess, StampProvider stampProvider, Assumptions assumptions, ConstantReflectionProvider constantReflection, + GraphBuilderConfiguration graphBuilderConfig, GraphBuilderPlugins graphBuilderPlugins, OptimisticOptimizations optimisticOpts) { this.graphBuilderConfig = graphBuilderConfig; this.optimisticOpts = optimisticOpts; this.metaAccess = metaAccess; this.stampProvider = stampProvider; this.assumptions = assumptions; this.graphBuilderPlugins = graphBuilderPlugins; + this.constantReflection = constantReflection; assert metaAccess != null; } - public Instance(MetaAccessProvider metaAccess, StampProvider stampProvider, Assumptions assumptions, GraphBuilderConfiguration graphBuilderConfig, OptimisticOptimizations optimisticOpts) { - this(metaAccess, stampProvider, assumptions, graphBuilderConfig, null, optimisticOpts); + public Instance(MetaAccessProvider metaAccess, StampProvider stampProvider, Assumptions assumptions, ConstantReflectionProvider constantReflection, + GraphBuilderConfiguration graphBuilderConfig, OptimisticOptimizations optimisticOpts) { + this(metaAccess, stampProvider, assumptions, constantReflection, graphBuilderConfig, null, optimisticOpts); } @Override @@ -123,7 +136,7 @@ assert method.getCode() != null : "method must contain bytecodes: " + method; this.currentGraph = graph; HIRFrameStateBuilder frameState = new HIRFrameStateBuilder(method, graph, null); - frameState.initializeForMethodStart(graphBuilderConfig.eagerResolving()); + frameState.initializeForMethodStart(graphBuilderConfig.eagerResolving(), this.graphBuilderConfig.getParameterPlugin()); TTY.Filter filter = new TTY.Filter(PrintFilter.getValue(), method); try { BytecodeParser parser = new BytecodeParser(metaAccess, method, graphBuilderConfig, optimisticOpts, entryBCI); @@ -161,6 +174,12 @@ } } + private static class ExplodedLoopContext { + private BciBlock header; + private int targetPeelIteration; + private int peelIteration; + } + public class BytecodeParser extends AbstractBytecodeParser implements GraphBuilderContext { private BciBlock[] loopHeaders; @@ -182,6 +201,9 @@ private FixedWithNextNode beforeUnwindNode; private FixedWithNextNode lastInstr; // the last instruction added + private final boolean explodeLoops; + private Stack explodeLoopsContext; + private int nextPeelIteration = 1; public BytecodeParser(MetaAccessProvider metaAccess, ResolvedJavaMethod method, GraphBuilderConfiguration graphBuilderConfig, OptimisticOptimizations optimisticOpts, int entryBCI) { super(metaAccess, method, graphBuilderConfig, optimisticOpts); @@ -191,6 +213,13 @@ lnt = method.getLineNumberTable(); previousLineNumber = -1; } + + LoopExplosionPlugin loopExplosionPlugin = graphBuilderConfig.getLoopExplosionPlugin(); + if (loopExplosionPlugin != null) { + explodeLoops = loopExplosionPlugin.shouldExplodeLoops(method); + } else { + explodeLoops = false; + } } public ValueNode getReturnValue() { @@ -219,7 +248,7 @@ try (Indent indent = Debug.logAndIndent("build graph for %s", method)) { // compute the block map, setup exception handlers and get the entrypoint(s) - BciBlockMapping blockMap = BciBlockMapping.create(method, graphBuilderConfig.doLivenessAnalysis()); + BciBlockMapping blockMap = BciBlockMapping.create(method, graphBuilderConfig.doLivenessAnalysis(), explodeLoops); loopHeaders = blockMap.getLoopHeaders(); liveness = blockMap.liveness; @@ -251,15 +280,18 @@ } currentBlock = blockMap.startBlock; - blockMap.startBlock.entryState = frameState; - if (blockMap.startBlock.isLoopHeader) { + blockMap.startBlock.setEntryState(0, frameState); + if (blockMap.startBlock.isLoopHeader && !explodeLoops) { appendGoto(createTarget(blockMap.startBlock, frameState)); } else { - blockMap.startBlock.firstInstruction = lastInstr; + blockMap.startBlock.setFirstInstruction(0, lastInstr); } - for (BciBlock block : blockMap.getBlocks()) { - processBlock(this, block); + int index = 0; + BciBlock[] blocks = blockMap.getBlocks(); + while (index < blocks.length) { + BciBlock block = blocks[index]; + index = iterateBlock(blocks, block); } processBlock(this, returnBlock); processBlock(this, unwindBlock); @@ -269,6 +301,50 @@ } } + private int iterateBlock(BciBlock[] blocks, BciBlock block) { + if (block.isLoopHeader && this.explodeLoops) { + return iterateExplodedLoopHeader(blocks, block); + } else { + processBlock(this, block); + return block.getId() + 1; + } + } + + private int iterateExplodedLoopHeader(BciBlock[] blocks, BciBlock header) { + if (explodeLoopsContext == null) { + explodeLoopsContext = new Stack<>(); + } + + ExplodedLoopContext context = new ExplodedLoopContext(); + context.header = header; + context.peelIteration = this.getCurrentDimension(); + context.targetPeelIteration = -1; + explodeLoopsContext.push(context); + Debug.dump(currentGraph, "before loop explosion " + context.peelIteration); + + while (true) { + + processBlock(this, header); + for (int j = header.getId() + 1; j <= header.loopEnd; ++j) { + BciBlock block = blocks[j]; + iterateBlock(blocks, block); + } + + if (context.targetPeelIteration != -1) { + // We were reaching the backedge during explosion. Explode further. + Debug.dump(currentGraph, "Before loop explosion " + context.targetPeelIteration); + context.peelIteration = context.targetPeelIteration; + context.targetPeelIteration = -1; + } else { + // We did not reach the backedge. Exit. + Debug.dump(currentGraph, "after loop explosion " + context.peelIteration); + break; + } + } + explodeLoopsContext.pop(); + return header.loopEnd + 1; + } + private BciBlock returnBlock(int bci) { if (returnBlock == null) { returnBlock = new BciBlock(); @@ -431,7 +507,7 @@ @Override protected ValueNode genLoadIndexed(ValueNode array, ValueNode index, Kind kind) { - return new LoadIndexedNode(array, index, kind); + return LoadIndexedNode.create(array, index, kind, metaAccess, constantReflection); } @Override @@ -441,7 +517,7 @@ @Override protected ValueNode genIntegerAdd(Kind kind, ValueNode x, ValueNode y) { - return new AddNode(x, y); + return AddNode.create(x, y); } @Override @@ -557,17 +633,17 @@ @Override protected ValueNode genObjectEquals(ValueNode x, ValueNode y) { - return new ObjectEqualsNode(x, y); + return ObjectEqualsNode.create(x, y, constantReflection); } @Override protected ValueNode genIntegerEquals(ValueNode x, ValueNode y) { - return new IntegerEqualsNode(x, y); + return IntegerEqualsNode.create(x, y, constantReflection); } @Override protected ValueNode genIntegerLessThan(ValueNode x, ValueNode y) { - return new IntegerLessThanNode(x, y); + return IntegerLessThanNode.create(x, y, constantReflection); } @Override @@ -639,7 +715,7 @@ protected void emitBoundsCheck(ValueNode index, ValueNode length) { AbstractBeginNode trueSucc = currentGraph.add(new BeginNode()); BytecodeExceptionNode exception = currentGraph.add(new BytecodeExceptionNode(metaAccess, ArrayIndexOutOfBoundsException.class, index)); - append(new IfNode(currentGraph.unique(new IntegerBelowNode(index, length)), trueSucc, exception, 0.99)); + append(new IfNode(currentGraph.unique(IntegerBelowNode.create(index, length, constantReflection)), trueSucc, exception, 0.99)); lastInstr = trueSucc; exception.setStateAfter(frameState.create(bci())); @@ -648,7 +724,7 @@ @Override protected ValueNode genArrayLength(ValueNode x) { - return new ArrayLengthNode(x); + return ArrayLengthNode.create(x, constantReflection); } @Override @@ -751,7 +827,18 @@ } } - private void appendInvoke(InvokeKind invokeKind, ResolvedJavaMethod targetMethod, ValueNode[] args) { + private void appendInvoke(InvokeKind initialInvokeKind, ResolvedJavaMethod initialTargetMethod, ValueNode[] args) { + ResolvedJavaMethod targetMethod = initialTargetMethod; + InvokeKind invokeKind = initialInvokeKind; + if (initialInvokeKind.isIndirect()) { + ResolvedJavaType contextType = this.frameState.method.getDeclaringClass(); + ResolvedJavaMethod specialCallTarget = MethodCallTargetNode.findSpecialCallTarget(initialInvokeKind, args[0], initialTargetMethod, assumptions, contextType); + if (specialCallTarget != null) { + invokeKind = InvokeKind.Special; + targetMethod = specialCallTarget; + } + } + Kind resultType = targetMethod.getSignature().getReturnKind(); if (DeoptALot.getValue()) { append(new DeoptimizeNode(DeoptimizationAction.None, RuntimeConstraint)); @@ -770,54 +857,18 @@ args[0] = TypeProfileProxyNode.proxify(args[0], profile); } } - if (GraalOptions.InlineDuringParsing.getValue() && invokeKind.isDirect()) { - - if (graphBuilderPlugins != null) { - GraphBuilderPlugin plugin = graphBuilderPlugins.getPlugin(targetMethod); - if (plugin != null) { - int beforeStackSize = frameState.stackSize; - if (plugin.handleInvocation(this, args)) { - // System.out.println("used plugin: " + plugin); - assert beforeStackSize + resultType.getSlotCount() == frameState.stackSize; - return; - } - assert beforeStackSize == frameState.stackSize; - } - } - if (targetMethod.canBeInlined() && targetMethod.hasBytecodes()) { - if (targetMethod.getCode().length <= GraalOptions.TrivialInliningSize.getValue() && currentDepth < GraalOptions.InlineDuringParsingMaxDepth.getValue() && - graphBuilderConfig.shouldInlineTrivial()) { - BytecodeParser parser = new BytecodeParser(metaAccess, targetMethod, graphBuilderConfig, optimisticOpts, StructuredGraph.INVOCATION_ENTRY_BCI); - final FrameState[] lazyFrameState = new FrameState[1]; - HIRFrameStateBuilder startFrameState = new HIRFrameStateBuilder(targetMethod, currentGraph, () -> { - if (lazyFrameState[0] == null) { - lazyFrameState[0] = frameState.create(bci()); - } - return lazyFrameState[0]; - }); - startFrameState.initializeFromArgumentsArray(args); - parser.build(currentDepth + 1, this.lastInstr, startFrameState); + if (graphBuilderPlugins != null) { + if (tryUsingInvocationPlugin(args, targetMethod, resultType)) { + return; + } + } - FixedWithNextNode calleeBeforeReturnNode = parser.getBeforeReturnNode(); - this.lastInstr = calleeBeforeReturnNode; - if (calleeBeforeReturnNode != null) { - ValueNode calleeReturnValue = parser.getReturnValue(); - if (calleeReturnValue != null) { - frameState.push(calleeReturnValue.getKind().getStackKind(), calleeReturnValue); - } - } - - FixedWithNextNode calleeBeforeUnwindNode = parser.getBeforeUnwindNode(); - if (calleeBeforeUnwindNode != null) { - ValueNode calleeUnwindValue = parser.getUnwindValue(); - assert calleeUnwindValue != null; - calleeBeforeUnwindNode.setNext(handleException(calleeUnwindValue, bci())); - } - - return; - } - } + InlineInvokePlugin inlineInvokePlugin = graphBuilderConfig.getInlineInvokePlugin(); + if (inlineInvokePlugin != null && invokeKind.isDirect() && targetMethod.canBeInlined() && targetMethod.hasBytecodes() && + inlineInvokePlugin.shouldInlineInvoke(targetMethod, currentDepth)) { + parseAndInlineCallee(targetMethod, args); + return; } MethodCallTargetNode callTarget = currentGraph.add(createMethodCallTarget(invokeKind, targetMethod, args, returnType)); @@ -834,6 +885,65 @@ } } + private boolean tryUsingInvocationPlugin(ValueNode[] args, ResolvedJavaMethod targetMethod, Kind resultType) { + InvocationPlugin plugin = graphBuilderPlugins.lookupInvocation(targetMethod); + if (plugin != null) { + int beforeStackSize = frameState.stackSize; + boolean needsNullCheck = !targetMethod.isStatic() && !StampTool.isPointerNonNull(args[0].stamp()); + int nodeCount = currentGraph.getNodeCount(); + Mark mark = needsNullCheck ? currentGraph.getMark() : null; + if (InvocationPlugin.execute(this, plugin, args)) { + assert beforeStackSize + resultType.getSlotCount() == frameState.stackSize : "plugin manipulated the stack incorrectly"; + assert !needsNullCheck || containsNullCheckOf(currentGraph.getNewNodes(mark), args[0]) : "plugin needs to null check the receiver of " + targetMethod + ": " + args[0]; + return true; + } + assert nodeCount == currentGraph.getNodeCount() : "plugin that returns false must not create new nodes"; + assert beforeStackSize == frameState.stackSize : "plugin that returns false must modify the stack"; + } + return false; + } + + private boolean containsNullCheckOf(NodeIterable nodes, Node value) { + for (Node n : nodes) { + if (n instanceof GuardingPiNode) { + GuardingPiNode pi = (GuardingPiNode) n; + if (pi.condition() instanceof IsNullNode) { + return ((IsNullNode) pi.condition()).getValue() == value; + } + } + } + return false; + } + + private void parseAndInlineCallee(ResolvedJavaMethod targetMethod, ValueNode[] args) { + BytecodeParser parser = new BytecodeParser(metaAccess, targetMethod, graphBuilderConfig, optimisticOpts, StructuredGraph.INVOCATION_ENTRY_BCI); + final FrameState[] lazyFrameState = new FrameState[1]; + HIRFrameStateBuilder startFrameState = new HIRFrameStateBuilder(targetMethod, currentGraph, () -> { + if (lazyFrameState[0] == null) { + lazyFrameState[0] = frameState.create(bci()); + } + return lazyFrameState[0]; + }); + startFrameState.initializeFromArgumentsArray(args); + parser.build(currentDepth + 1, this.lastInstr, startFrameState); + + FixedWithNextNode calleeBeforeReturnNode = parser.getBeforeReturnNode(); + this.lastInstr = calleeBeforeReturnNode; + if (calleeBeforeReturnNode != null) { + ValueNode calleeReturnValue = parser.getReturnValue(); + if (calleeReturnValue != null) { + frameState.push(calleeReturnValue.getKind().getStackKind(), calleeReturnValue); + } + } + + FixedWithNextNode calleeBeforeUnwindNode = parser.getBeforeUnwindNode(); + if (calleeBeforeUnwindNode != null) { + ValueNode calleeUnwindValue = parser.getUnwindValue(); + assert calleeUnwindValue != null; + calleeBeforeUnwindNode.setNext(handleException(calleeUnwindValue, bci())); + } + } + protected MethodCallTargetNode createMethodCallTarget(InvokeKind invokeKind, ResolvedJavaMethod targetMethod, ValueNode[] args, JavaType returnType) { return new MethodCallTargetNode(invokeKind, targetMethod, args, returnType); } @@ -922,7 +1032,7 @@ JsrScope scope = currentBlock.getJsrScope(); int retAddress = scope.nextReturnAddress(); ConstantNode returnBciNode = getJsrConstant(retAddress); - LogicNode guard = new IntegerEqualsNode(local, returnBciNode); + LogicNode guard = IntegerEqualsNode.create(local, returnBciNode, constantReflection); guard = currentGraph.unique(guard); append(new FixedGuardNode(guard, JavaSubroutineMismatch, InvalidateReprofile)); if (!successor.getJsrScope().equals(scope.pop())) { @@ -998,13 +1108,12 @@ } public T append(T v) { - assert !(v instanceof ConstantNode); T added = currentGraph.unique(v); return added; } private Target checkLoopExit(FixedNode target, BciBlock targetBlock, HIRFrameStateBuilder state) { - if (currentBlock != null) { + if (currentBlock != null && !explodeLoops) { long exits = currentBlock.loops & ~targetBlock.loops; if (exits != 0) { LoopExitNode firstLoopExit = null; @@ -1035,7 +1144,7 @@ } HIRFrameStateBuilder newState = state.copy(); for (BciBlock loop : exitLoops) { - LoopBeginNode loopBegin = (LoopBeginNode) loop.firstInstruction; + LoopBeginNode loopBegin = (LoopBeginNode) loop.getFirstInstruction(this.getCurrentDimension()); LoopExitNode loopExit = currentGraph.add(new LoopExitNode(loopBegin)); if (lastLoopExit != null) { lastLoopExit.setNext(loopExit); @@ -1045,7 +1154,7 @@ } lastLoopExit = loopExit; Debug.log("Target %s (%s) Exits %s, scanning framestates...", targetBlock, target, loop); - newState.insertLoopProxies(loopExit, (HIRFrameStateBuilder) loop.entryState); + newState.insertLoopProxies(loopExit, (HIRFrameStateBuilder) loop.getEntryState(this.getCurrentDimension())); loopExit.setStateAfter(newState.create(bci)); } @@ -1070,53 +1179,88 @@ assert block != null && state != null; assert !block.isExceptionEntry || state.stackSize() == 1; - if (block.firstInstruction == null) { + int operatingDimension = this.getCurrentDimension(); + if (this.explodeLoops && this.explodeLoopsContext != null && !this.explodeLoopsContext.isEmpty()) { + int i; + for (i = explodeLoopsContext.size() - 1; i >= 0; --i) { + ExplodedLoopContext context = explodeLoopsContext.elementAt(i); + if (context.header == block) { + + // We have a hit on our current explosion context loop begin. + if (context.targetPeelIteration == -1) { + // This is the first hit => allocate a new dimension and at the same + // time mark the context loop begin as hit during the current + // iteration. + context.targetPeelIteration = nextPeelIteration++; + if (nextPeelIteration > GraalOptions.MaximumLoopExplosionCount.getValue()) { + throw new BailoutException("too many loop explosion interations - does the explosion not terminate?"); + } + } + + // Operate on the target dimension. + operatingDimension = context.targetPeelIteration; + break; + } else if (block.getId() > context.header.getId() && block.getId() <= context.header.loopEnd) { + // We hit the range of this context. + operatingDimension = context.peelIteration; + break; + } + } + + if (i == -1) { + // I did not find a dimension. + operatingDimension = 0; + } + } + + if (block.getFirstInstruction(operatingDimension) == null) { /* * This is the first time we see this block as a branch target. Create and * return a placeholder that later can be replaced with a MergeNode when we see * this block again. */ FixedNode targetNode; - block.firstInstruction = currentGraph.add(new BeginNode()); - targetNode = block.firstInstruction; + block.setFirstInstruction(operatingDimension, currentGraph.add(new BeginNode())); + targetNode = block.getFirstInstruction(operatingDimension); Target target = checkLoopExit(targetNode, block, state); FixedNode result = target.fixed; - block.entryState = target.state == state ? state.copy() : target.state; - block.entryState.clearNonLiveLocals(block, liveness, true); + AbstractFrameStateBuilder entryState = target.state == state ? state.copy() : target.state; + block.setEntryState(operatingDimension, entryState); + entryState.clearNonLiveLocals(block, liveness, true); Debug.log("createTarget %s: first visit, result: %s", block, targetNode); return result; } // We already saw this block before, so we have to merge states. - if (!((HIRFrameStateBuilder) block.entryState).isCompatibleWith(state)) { + if (!((HIRFrameStateBuilder) block.getEntryState(operatingDimension)).isCompatibleWith(state)) { throw new BailoutException("stacks do not match; bytecodes would not verify"); } - if (block.firstInstruction instanceof LoopBeginNode) { - assert block.isLoopHeader && currentBlock.getId() >= block.getId() : "must be backward branch"; + if (block.getFirstInstruction(operatingDimension) instanceof LoopBeginNode) { + assert this.explodeLoops || (block.isLoopHeader && currentBlock.getId() >= block.getId()) : "must be backward branch"; /* * Backward loop edge. We need to create a special LoopEndNode and merge with * the loop begin node created before. */ - LoopBeginNode loopBegin = (LoopBeginNode) block.firstInstruction; + LoopBeginNode loopBegin = (LoopBeginNode) block.getFirstInstruction(operatingDimension); Target target = checkLoopExit(currentGraph.add(new LoopEndNode(loopBegin)), block, state); FixedNode result = target.fixed; - ((HIRFrameStateBuilder) block.entryState).merge(loopBegin, target.state); + ((HIRFrameStateBuilder) block.getEntryState(operatingDimension)).merge(loopBegin, target.state); Debug.log("createTarget %s: merging backward branch to loop header %s, result: %s", block, loopBegin, result); return result; } assert currentBlock == null || currentBlock.getId() < block.getId() : "must not be backward branch"; - assert block.firstInstruction.next() == null : "bytecodes already parsed for block"; + assert block.getFirstInstruction(operatingDimension).next() == null : "bytecodes already parsed for block"; - if (block.firstInstruction instanceof AbstractBeginNode && !(block.firstInstruction instanceof AbstractMergeNode)) { + if (block.getFirstInstruction(operatingDimension) instanceof AbstractBeginNode && !(block.getFirstInstruction(operatingDimension) instanceof AbstractMergeNode)) { /* * This is the second time we see this block. Create the actual MergeNode and * the End Node for the already existing edge. For simplicity, we leave the * placeholder in the graph and just append the new nodes after the placeholder. */ - AbstractBeginNode placeholder = (AbstractBeginNode) block.firstInstruction; + AbstractBeginNode placeholder = (AbstractBeginNode) block.getFirstInstruction(operatingDimension); // The EndNode for the already existing edge. AbstractEndNode end = currentGraph.add(new EndNode()); @@ -1134,16 +1278,16 @@ mergeNode.addForwardEnd(end); mergeNode.setNext(next); - block.firstInstruction = mergeNode; + block.setFirstInstruction(operatingDimension, mergeNode); } - AbstractMergeNode mergeNode = (AbstractMergeNode) block.firstInstruction; + AbstractMergeNode mergeNode = (AbstractMergeNode) block.getFirstInstruction(operatingDimension); // The EndNode for the newly merged edge. AbstractEndNode newEnd = currentGraph.add(new EndNode()); Target target = checkLoopExit(newEnd, block, state); FixedNode result = target.fixed; - ((HIRFrameStateBuilder) block.entryState).merge(mergeNode, target.state); + ((HIRFrameStateBuilder) block.getEntryState(operatingDimension)).merge(mergeNode, target.state); mergeNode.addForwardEnd(newEnd); Debug.log("createTarget %s: merging state, result: %s", block, result); @@ -1173,14 +1317,14 @@ protected void processBlock(BytecodeParser parser, BciBlock block) { // Ignore blocks that have no predecessors by the time their bytecodes are parsed - if (block == null || block.firstInstruction == null) { + if (block == null || block.getFirstInstruction(this.getCurrentDimension()) == null) { Debug.log("Ignoring block %s", block); return; } - try (Indent indent = Debug.logAndIndent("Parsing block %s firstInstruction: %s loopHeader: %b", block, block.firstInstruction, block.isLoopHeader)) { + try (Indent indent = Debug.logAndIndent("Parsing block %s firstInstruction: %s loopHeader: %b", block, block.getFirstInstruction(this.getCurrentDimension()), block.isLoopHeader)) { - lastInstr = block.firstInstruction; - frameState = (HIRFrameStateBuilder) block.entryState; + lastInstr = block.getFirstInstruction(this.getCurrentDimension()); + frameState = (HIRFrameStateBuilder) block.getEntryState(this.getCurrentDimension()); parser.setCurrentFrameState(frameState); currentBlock = block; @@ -1315,7 +1459,7 @@ @Override protected void iterateBytecodesForBlock(BciBlock block) { - if (block.isLoopHeader) { + if (block.isLoopHeader && !explodeLoops) { // Create the loop header block, which later will merge the backward branches of // the loop. AbstractEndNode preLoopEnd = currentGraph.add(new EndNode()); @@ -1326,7 +1470,7 @@ lastInstr = loopBegin; // Create phi functions for all local variables and operand stack slots. - frameState.insertLoopPhis(loopBegin); + frameState.insertLoopPhis(liveness, block.loopId, loopBegin); loopBegin.setStateAfter(frameState.create(block.startBci)); /* @@ -1334,12 +1478,12 @@ * merge to the loop header. This ensures that the loop header has exactly one * non-loop predecessor. */ - block.firstInstruction = loopBegin; + block.setFirstInstruction(this.getCurrentDimension(), loopBegin); /* * We need to preserve the frame state builder of the loop header so that we can * merge values for phi functions, so make a copy of it. */ - block.entryState = frameState.copy(); + block.setEntryState(this.getCurrentDimension(), frameState.copy()); Debug.log(" created loop header %s", loopBegin); } @@ -1482,11 +1626,24 @@ } condition = genUnique(condition); - ValueNode trueSuccessor = createBlockTarget(probability, trueBlock, frameState); - ValueNode falseSuccessor = createBlockTarget(1 - probability, falseBlock, frameState); + if (condition instanceof LogicConstantNode) { + LogicConstantNode constantLogicNode = (LogicConstantNode) condition; + boolean value = constantLogicNode.getValue(); + if (negate) { + value = !value; + } + BciBlock nextBlock = falseBlock; + if (value) { + nextBlock = trueBlock; + } + appendGoto(createTarget(nextBlock, frameState)); + } else { + ValueNode trueSuccessor = createBlockTarget(probability, trueBlock, frameState); + ValueNode falseSuccessor = createBlockTarget(1 - probability, falseBlock, frameState); - ValueNode ifNode = negate ? genIfNode(condition, falseSuccessor, trueSuccessor, 1 - probability) : genIfNode(condition, trueSuccessor, falseSuccessor, probability); - append(ifNode); + ValueNode ifNode = negate ? genIfNode(condition, falseSuccessor, trueSuccessor, 1 - probability) : genIfNode(condition, trueSuccessor, falseSuccessor, probability); + append(ifNode); + } } public StampProvider getStampProvider() { @@ -1502,9 +1659,22 @@ } public void push(Kind kind, ValueNode value) { + assert kind == kind.getStackKind(); frameState.push(kind, value); } + private int getCurrentDimension() { + if (this.explodeLoopsContext == null || this.explodeLoopsContext.isEmpty()) { + return 0; + } else { + return this.explodeLoopsContext.peek().peelIteration; + } + } + + public ConstantReflectionProvider getConstantReflection() { + return constantReflection; + } + } } } diff -r 3059a52d9614 -r 98967b613c88 graal/com.oracle.graal.java/src/com/oracle/graal/java/GraphBuilderPlugin.java --- a/graal/com.oracle.graal.java/src/com/oracle/graal/java/GraphBuilderPlugin.java Sat Feb 07 02:34:43 2015 +0100 +++ b/graal/com.oracle.graal.java/src/com/oracle/graal/java/GraphBuilderPlugin.java Sat Feb 07 02:47:00 2015 +0100 @@ -22,43 +22,14 @@ */ package com.oracle.graal.java; -import com.oracle.graal.api.meta.*; -import com.oracle.graal.compiler.common.*; -import com.oracle.graal.nodes.*; - /** - * Extensions for handling certain bytecode instructions while building a - * {@linkplain StructuredGraph graph} from a bytecode stream. + * Marker interface for graph builder plugins. + * + * Concrete plugins implement one of the sub-interfaces of this interface. + * + * @see GraphBuilderPluginsProvider + * @see GraphBuilderPlugins + * @see GraphBuilderPlugins.Registration */ public interface GraphBuilderPlugin { - - /** - * Processes an invocation parsed in a bytecode stream and add nodes to a graph being - * constructed that implement the semantics of the invocation. - * - * @param builder object being used to build a graph - * @param args the arguments to the invocation - */ - boolean handleInvocation(GraphBuilderContext builder, ValueNode[] args); - - /** - * Gets the method handled by {@link #handleInvocation(GraphBuilderContext, ValueNode[])} . - */ - ResolvedJavaMethod getInvocationTarget(MetaAccessProvider metaAccess); - - /** - * Looks up a {@link ResolvedJavaMethod}. - * - * @param methodNameBase the name of the method is the prefix of this value up to the first '$' - * character - */ - static ResolvedJavaMethod resolveTarget(MetaAccessProvider metaAccess, Class declaringClass, String methodNameBase, Class... parameterTypes) { - int index = methodNameBase.indexOf('$'); - String methodName = index == -1 ? methodNameBase : methodNameBase.substring(0, index); - try { - return metaAccess.lookupJavaMethod(methodName.equals("") ? declaringClass.getDeclaredConstructor(parameterTypes) : declaringClass.getDeclaredMethod(methodName, parameterTypes)); - } catch (NoSuchMethodException | SecurityException e) { - throw new GraalInternalError(e); - } - } } diff -r 3059a52d9614 -r 98967b613c88 graal/com.oracle.graal.java/src/com/oracle/graal/java/GraphBuilderPlugins.java --- a/graal/com.oracle.graal.java/src/com/oracle/graal/java/GraphBuilderPlugins.java Sat Feb 07 02:34:43 2015 +0100 +++ b/graal/com.oracle.graal.java/src/com/oracle/graal/java/GraphBuilderPlugins.java Sat Feb 07 02:47:00 2015 +0100 @@ -22,45 +22,278 @@ */ package com.oracle.graal.java; +import static java.lang.String.*; + +import java.lang.reflect.*; import java.util.*; -import java.util.stream.*; import com.oracle.graal.api.meta.*; +import com.oracle.graal.compiler.common.*; +import com.oracle.graal.nodes.*; +import com.oracle.graal.nodes.calc.*; /** - * A repository of {@link GraphBuilderPlugin}s. + * Interface for managing a set of graph builder {@link GraphBuilderPlugin}s. */ -public class GraphBuilderPlugins { +public interface GraphBuilderPlugins { + + public interface LoadFieldPlugin extends GraphBuilderPlugin { + @SuppressWarnings("unused") + default boolean apply(GraphBuilderContext builder, ValueNode receiver, ResolvedJavaField field) { + return false; + } - private final Map map = new HashMap<>(); + @SuppressWarnings("unused") + default boolean apply(GraphBuilderContext graphBuilderContext, ResolvedJavaField staticField) { + return false; + } + } + + public interface ParameterPlugin extends GraphBuilderPlugin { + FloatingNode interceptParameter(int index); + } + + public interface InlineInvokePlugin extends GraphBuilderPlugin { + boolean shouldInlineInvoke(ResolvedJavaMethod method, int depth); + } + + public interface LoopExplosionPlugin extends GraphBuilderPlugin { + boolean shouldExplodeLoops(ResolvedJavaMethod method); + } /** - * Registers all the constants of an enum that implements {@link GraphBuilderPlugin}. + * Plugin for handling a method invocation. */ - public & GraphBuilderPlugin> void register(MetaAccessProvider metaAccess, Class enumClass) { - assert Enum.class.isAssignableFrom(enumClass); - Object[] enumConstants = enumClass.getEnumConstants(); - for (Object o : enumConstants) { - GraphBuilderPlugin gbp = (GraphBuilderPlugin) o; - ResolvedJavaMethod target = gbp.getInvocationTarget(metaAccess); - GraphBuilderPlugin oldValue = map.put(target, gbp); - // System.out.println("registered: " + gbp); - assert oldValue == null; + public interface InvocationPlugin extends GraphBuilderPlugin { + /** + * @see #execute(GraphBuilderContext, InvocationPlugin, ValueNode[]) + */ + default boolean apply(GraphBuilderContext builder) { + throw invalidHandler(builder); + } + + /** + * @see #execute(GraphBuilderContext, InvocationPlugin, ValueNode[]) + */ + default boolean apply(GraphBuilderContext builder, ValueNode arg) { + throw invalidHandler(builder, arg); + } + + /** + * @see #execute(GraphBuilderContext, InvocationPlugin, ValueNode[]) + */ + default boolean apply(GraphBuilderContext builder, ValueNode arg1, ValueNode arg2) { + throw invalidHandler(builder, arg1, arg2); + } + + /** + * @see #execute(GraphBuilderContext, InvocationPlugin, ValueNode[]) + */ + default boolean apply(GraphBuilderContext builder, ValueNode arg1, ValueNode arg2, ValueNode arg3) { + throw invalidHandler(builder, arg1, arg2, arg3); + } + + /** + * @see #execute(GraphBuilderContext, InvocationPlugin, ValueNode[]) + */ + default boolean apply(GraphBuilderContext builder, ValueNode arg1, ValueNode arg2, ValueNode arg3, ValueNode arg4) { + throw invalidHandler(builder, arg1, arg2, arg3, arg4); + } + + /** + * Executes a given plugin against a set of invocation arguments by dispatching to the + * plugin's {@code apply(...)} method that matches the number of arguments. + * + * @return {@code true} if the plugin handled the invocation, {@code false} if the graph + * builder should process the invoke further (e.g., by inlining it or creating an + * {@link Invoke} node). A plugin that does not handle an invocation must not modify + * the graph being constructed. + */ + static boolean execute(GraphBuilderContext builder, InvocationPlugin plugin, ValueNode[] args) { + if (args.length == 0) { + return plugin.apply(builder); + } else if (args.length == 1) { + return plugin.apply(builder, args[0]); + } else if (args.length == 2) { + return plugin.apply(builder, args[0], args[1]); + } else if (args.length == 3) { + return plugin.apply(builder, args[0], args[1], args[2]); + } else if (args.length == 4) { + return plugin.apply(builder, args[0], args[1], args[2], args[3]); + } else { + throw plugin.invalidHandler(builder, args); + } + } + + default Error invalidHandler(@SuppressWarnings("unused") GraphBuilderContext builder, ValueNode... args) { + return new GraalInternalError("Invocation plugin %s does not handle invocations with %d arguments", getClass().getSimpleName(), args.length); } } /** - * Gets the plugin for a given method registered in the object. + * Utility for {@linkplain GraphBuilderPlugins#register(ResolvedJavaMethod, InvocationPlugin) + * registration} of plugins. + */ + public static class Registration { + + /** + * Sentinel class for use with {@link Registration#register1}, + * {@link Registration#register2} or {@link Registration#register3} to denote the receiver + * argument for a non-static method. + */ + public static final class Receiver { + private Receiver() { + throw GraalInternalError.shouldNotReachHere(); + } + } + + private final GraphBuilderPlugins plugins; + private final MetaAccessProvider metaAccess; + private final Class declaringClass; + + /** + * Creates an object for registering plugins for methods declared by a given class. + * + * @param plugins where to register the plugins + * @param metaAccess used to resolve classes and methods + * @param declaringClass the class declaring the methods for which plugins will be + * registered via this object + */ + public Registration(GraphBuilderPlugins plugins, MetaAccessProvider metaAccess, Class declaringClass) { + this.plugins = plugins; + this.metaAccess = metaAccess; + this.declaringClass = declaringClass; + } + + /** + * Registers a plugin for a method with no arguments. + * + * @param name the name of the method + * @param plugin the plugin to be registered + */ + public void register0(String name, InvocationPlugin plugin) { + plugins.register(resolve(metaAccess, declaringClass, name), plugin); + } + + /** + * Registers a plugin for a method with 1 argument. + * + * @param name the name of the method + * @param plugin the plugin to be registered + */ + public void register1(String name, Class arg, InvocationPlugin plugin) { + ResolvedJavaMethod method = arg == Receiver.class ? resolve(metaAccess, declaringClass, name) : resolve(metaAccess, declaringClass, name, arg); + plugins.register(method, plugin); + } + + /** + * Registers a plugin for a method with 2 arguments. + * + * @param name the name of the method + * @param plugin the plugin to be registered + */ + public void register2(String name, Class arg1, Class arg2, InvocationPlugin plugin) { + ResolvedJavaMethod method = arg1 == Receiver.class ? resolve(metaAccess, declaringClass, name, arg2) : resolve(metaAccess, declaringClass, name, arg1, arg2); + plugins.register(method, plugin); + } + + /** + * Registers a plugin for a method with 3 arguments. + * + * @param name the name of the method + * @param plugin the plugin to be registered + */ + public void register3(String name, Class arg1, Class arg2, Class arg3, InvocationPlugin plugin) { + ResolvedJavaMethod method = arg1 == Receiver.class ? resolve(metaAccess, declaringClass, name, arg2, arg3) : resolve(metaAccess, declaringClass, name, arg1, arg2, arg3); + plugins.register(method, plugin); + } + + /** + * Registers a plugin for a method with 4 arguments. + * + * @param name the name of the method + * @param plugin the plugin to be registered + */ + public void register4(String name, Class arg1, Class arg2, Class arg3, Class arg4, InvocationPlugin plugin) { + ResolvedJavaMethod method = arg1 == Receiver.class ? resolve(metaAccess, declaringClass, name, arg2, arg3, arg4) : resolve(metaAccess, declaringClass, name, arg1, arg2, arg3, arg4); + plugins.register(method, plugin); + } + + /** + * Resolves a method given a declaring class, name and parameter types. + */ + public static ResolvedJavaMethod resolve(MetaAccessProvider metaAccess, Class declaringClass, String name, Class... parameterTypes) { + try { + return metaAccess.lookupJavaMethod(name.equals("") ? declaringClass.getDeclaredConstructor(parameterTypes) : declaringClass.getDeclaredMethod(name, parameterTypes)); + } catch (NoSuchMethodException | SecurityException e) { + throw new GraalInternalError(e); + } + } + } + + public static class InvocationPluginChecker { + /** + * The set of all {@link InvocationPlugin#apply} method signatures. + */ + static final Class[][] SIGS; + static { + ArrayList[]> sigs = new ArrayList<>(5); + for (Method method : InvocationPlugin.class.getDeclaredMethods()) { + if (!Modifier.isStatic(method.getModifiers()) && method.getName().equals("apply")) { + Class[] sig = method.getParameterTypes(); + assert sig[0] == GraphBuilderContext.class; + assert Arrays.asList(Arrays.copyOfRange(sig, 1, sig.length)).stream().allMatch(c -> c == ValueNode.class); + while (sigs.size() < sig.length) { + sigs.add(null); + } + sigs.set(sig.length - 1, sig); + } + } + assert sigs.indexOf(null) == -1 : format("need to add an apply() method to %s that takes %d %s arguments ", InvocationPlugin.class.getName(), sigs.indexOf(null), + ValueNode.class.getSimpleName()); + SIGS = sigs.toArray(new Class[sigs.size()][]); + } + + public static boolean check(ResolvedJavaMethod method, InvocationPlugin plugin) { + int arguments = method.getSignature().getParameterCount(!method.isStatic()); + assert arguments < SIGS.length : format("need to extend %s to support method with %d arguments: %s", InvocationPlugin.class.getSimpleName(), arguments, method.format("%H.%n(%p)")); + Method expected = null; + for (Method m : plugin.getClass().getDeclaredMethods()) { + if (m.getName().equals("apply")) { + Class[] parameterTypes = m.getParameterTypes(); + assert Arrays.equals(SIGS[arguments], parameterTypes) : format("graph builder plugin for %s has wrong signature%nexpected: (%s)%n actual: (%s)", method.format("%H.%n(%p)"), + sigString(SIGS[arguments]), sigString(m.getParameterTypes())); + expected = m; + } + } + assert expected != null : format("graph builder plugin %s must define exactly one \"apply\" method, none found", plugin); + return true; + } + + protected static String sigString(Class... sig) { + StringBuilder sb = new StringBuilder(); + for (Class t : sig) { + if (sb.length() != 0) { + sb.append(", "); + } + sb.append(t.getSimpleName()); + } + return sb.toString(); + } + + } + + /** + * Registers an {@link InvocationPlugin} for a given method. There must be no plugin currently + * registered for {@code method}. + */ + void register(ResolvedJavaMethod method, InvocationPlugin plugin); + + /** + * Gets the {@link InvocationPlugin} for a given method. * * @param method the method to lookup * @return the plugin associated with {@code method} or {@code null} if none exists */ - public GraphBuilderPlugin getPlugin(ResolvedJavaMethod method) { - return map.get(method); - } - - @Override - public String toString() { - return map.keySet().stream().map(m -> m.format("%H.%n(%p)")).collect(Collectors.joining(", ")); - } + InvocationPlugin lookupInvocation(ResolvedJavaMethod method); } diff -r 3059a52d9614 -r 98967b613c88 graal/com.oracle.graal.java/src/com/oracle/graal/java/GraphBuilderPluginsProvider.java --- a/graal/com.oracle.graal.java/src/com/oracle/graal/java/GraphBuilderPluginsProvider.java Sat Feb 07 02:34:43 2015 +0100 +++ b/graal/com.oracle.graal.java/src/com/oracle/graal/java/GraphBuilderPluginsProvider.java Sat Feb 07 02:47:00 2015 +0100 @@ -30,9 +30,7 @@ */ public interface GraphBuilderPluginsProvider extends Service { /** - * Registers the plugins provided by this object with a plugins registry. - * - * @param plugins registry of plugins + * Registers the plugins provided by this object. */ void registerPlugins(MetaAccessProvider metaAccess, GraphBuilderPlugins plugins); } diff -r 3059a52d9614 -r 98967b613c88 graal/com.oracle.graal.java/src/com/oracle/graal/java/HIRFrameStateBuilder.java --- a/graal/com.oracle.graal.java/src/com/oracle/graal/java/HIRFrameStateBuilder.java Sat Feb 07 02:34:43 2015 +0100 +++ b/graal/com.oracle.graal.java/src/com/oracle/graal/java/HIRFrameStateBuilder.java Sat Feb 07 02:47:00 2015 +0100 @@ -31,6 +31,8 @@ import com.oracle.graal.api.meta.*; import com.oracle.graal.compiler.common.type.*; import com.oracle.graal.debug.*; +import com.oracle.graal.java.BciBlockMapping.LocalLiveness; +import com.oracle.graal.java.GraphBuilderPlugins.ParameterPlugin; import com.oracle.graal.nodeinfo.*; import com.oracle.graal.nodes.*; import com.oracle.graal.nodes.calc.*; @@ -81,14 +83,20 @@ } } - public final void initializeForMethodStart(boolean eagerResolve) { + public final void initializeForMethodStart(boolean eagerResolve, ParameterPlugin parameterPlugin) { int javaIndex = 0; int index = 0; if (!method.isStatic()) { // add the receiver - ParameterNode receiver = graph.unique(new ParameterNode(javaIndex, StampFactory.declaredNonNull(method.getDeclaringClass()))); - storeLocal(javaIndex, receiver); + FloatingNode receiver = null; + if (parameterPlugin != null) { + receiver = parameterPlugin.interceptParameter(index); + } + if (receiver == null) { + receiver = new ParameterNode(javaIndex, StampFactory.declaredNonNull(method.getDeclaringClass())); + } + storeLocal(javaIndex, graph.unique(receiver)); javaIndex = 1; index = 1; } @@ -107,8 +115,14 @@ } else { stamp = StampFactory.forKind(kind); } - ParameterNode param = graph.unique(new ParameterNode(index, stamp)); - storeLocal(javaIndex, param); + FloatingNode param = null; + if (parameterPlugin != null) { + param = parameterPlugin.interceptParameter(index); + } + if (param == null) { + param = new ParameterNode(index, stamp); + } + storeLocal(javaIndex, graph.unique(param)); javaIndex += kind.getSlotCount(); index++; } @@ -221,7 +235,7 @@ return currentValue; } else if (currentValue != otherValue) { - assert !(block instanceof LoopBeginNode) : "Phi functions for loop headers are create eagerly for all locals and stack slots"; + assert !(block instanceof LoopBeginNode) : "Phi functions for loop headers are create eagerly for changed locals and all stack slots"; if (otherValue == null || otherValue.isDeleted() || currentValue.getKind() != otherValue.getKind()) { return null; } @@ -258,9 +272,11 @@ } } - public void insertLoopPhis(LoopBeginNode loopBegin) { + public void insertLoopPhis(LocalLiveness liveness, int loopId, LoopBeginNode loopBegin) { for (int i = 0; i < localsSize(); i++) { - storeLocal(i, createLoopPhi(loopBegin, localAt(i))); + if (loopBegin.graph().isOSR() || liveness.localIsChangedInLoop(loopId, i)) { + storeLocal(i, createLoopPhi(loopBegin, localAt(i))); + } } for (int i = 0; i < stackSize(); i++) { storeStack(i, createLoopPhi(loopBegin, stackAt(i))); diff -r 3059a52d9614 -r 98967b613c88 graal/com.oracle.graal.java/src/com/oracle/graal/java/StandardGraphBuilderPluginsProvider.java --- /dev/null Thu Jan 01 00:00:00 1970 +0000 +++ b/graal/com.oracle.graal.java/src/com/oracle/graal/java/StandardGraphBuilderPluginsProvider.java Sat Feb 07 02:47:00 2015 +0100 @@ -0,0 +1,101 @@ +/* + * Copyright (c) 2015, Oracle and/or its affiliates. All rights reserved. + * DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER. + * + * This code is free software; you can redistribute it and/or modify it + * under the terms of the GNU General Public License version 2 only, as + * published by the Free Software Foundation. + * + * This code is distributed in the hope that it will be useful, but WITHOUT + * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or + * FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License + * version 2 for more details (a copy is included in the LICENSE file that + * accompanied this code). + * + * You should have received a copy of the GNU General Public License version + * 2 along with this work; if not, write to the Free Software Foundation, + * Inc., 51 Franklin St, Fifth Floor, Boston, MA 02110-1301 USA. + * + * Please contact Oracle, 500 Oracle Parkway, Redwood Shores, CA 94065 USA + * or visit www.oracle.com if you need additional information or have any + * questions. + */ +package com.oracle.graal.java; + +import static com.oracle.graal.java.GraphBuilderContext.*; + +import com.oracle.graal.api.meta.*; +import com.oracle.graal.api.runtime.*; +import com.oracle.graal.java.GraphBuilderPlugins.InvocationPlugin; +import com.oracle.graal.java.GraphBuilderPlugins.Registration; +import com.oracle.graal.java.GraphBuilderPlugins.Registration.Receiver; +import com.oracle.graal.nodes.*; +import com.oracle.graal.nodes.extended.*; +import com.oracle.graal.nodes.java.*; + +/** + * Provider of non-runtime specific {@link GraphBuilderPlugin}s. + */ +@ServiceProvider(GraphBuilderPluginsProvider.class) +public class StandardGraphBuilderPluginsProvider implements GraphBuilderPluginsProvider { + public void registerPlugins(MetaAccessProvider metaAccess, GraphBuilderPlugins plugins) { + Registration r = new Registration(plugins, metaAccess, Object.class); + r.register1("", Receiver.class, new InvocationPlugin() { + public boolean apply(GraphBuilderContext builder, ValueNode object) { + if (RegisterFinalizerNode.mayHaveFinalizer(object, builder.getAssumptions())) { + builder.append(new RegisterFinalizerNode(object)); + } + return true; + } + }); + + for (Kind kind : Kind.values()) { + if (kind.isPrimitive() && kind != Kind.Void) { + new BoxPlugin(kind).register(metaAccess, plugins); + new UnboxPlugin(kind).register(metaAccess, plugins); + } + } + + GraalDirectivePlugins.registerPlugins(metaAccess, plugins); + } + + static class BoxPlugin implements InvocationPlugin { + + private final Kind kind; + + BoxPlugin(Kind kind) { + this.kind = kind; + } + + public boolean apply(GraphBuilderContext builder, ValueNode value) { + ResolvedJavaType resultType = builder.getMetaAccess().lookupJavaType(kind.toBoxedJavaClass()); + builder.push(Kind.Object, builder.append(new BoxNode(value, resultType, kind))); + return true; + } + + void register(MetaAccessProvider metaAccess, GraphBuilderPlugins plugins) { + ResolvedJavaMethod method = Registration.resolve(metaAccess, kind.toBoxedJavaClass(), "valueOf", kind.toJavaClass()); + plugins.register(method, this); + } + } + + static class UnboxPlugin implements InvocationPlugin { + + private final Kind kind; + + UnboxPlugin(Kind kind) { + this.kind = kind; + } + + public boolean apply(GraphBuilderContext builder, ValueNode value) { + builder.push(kind.getStackKind(), builder.append(new UnboxNode(nullCheckedValue(builder, value), kind))); + return true; + } + + void register(MetaAccessProvider metaAccess, GraphBuilderPlugins plugins) { + String name = kind.toJavaClass().getSimpleName() + "Value"; + ResolvedJavaMethod method = Registration.resolve(metaAccess, kind.toBoxedJavaClass(), name); + plugins.register(method, this); + } + } +} diff -r 3059a52d9614 -r 98967b613c88 graal/com.oracle.graal.lir.sparc/src/com/oracle/graal/lir/sparc/SPARCMove.java --- a/graal/com.oracle.graal.lir.sparc/src/com/oracle/graal/lir/sparc/SPARCMove.java Sat Feb 07 02:34:43 2015 +0100 +++ b/graal/com.oracle.graal.lir.sparc/src/com/oracle/graal/lir/sparc/SPARCMove.java Sat Feb 07 02:47:00 2015 +0100 @@ -436,12 +436,13 @@ @Opcode("CAS") public static class CompareAndSwapOp extends SPARCLIRInstruction { - // @Def protected AllocatableValue result; - @Use protected AllocatableValue address; - @Use protected AllocatableValue cmpValue; - @Use protected AllocatableValue newValue; + @Def({REG, HINT}) protected AllocatableValue result; + @Alive({REG}) protected AllocatableValue address; + @Alive({REG}) protected AllocatableValue cmpValue; + @Use({REG}) protected AllocatableValue newValue; - public CompareAndSwapOp(AllocatableValue address, AllocatableValue cmpValue, AllocatableValue newValue) { + public CompareAndSwapOp(AllocatableValue result, AllocatableValue address, AllocatableValue cmpValue, AllocatableValue newValue) { + this.result = result; this.address = address; this.cmpValue = cmpValue; this.newValue = newValue; @@ -449,7 +450,8 @@ @Override public void emitCode(CompilationResultBuilder crb, SPARCMacroAssembler masm) { - compareAndSwap(masm, address, cmpValue, newValue); + move(crb, masm, result, newValue, delayedControlTransfer); + compareAndSwap(masm, address, cmpValue, result); } } diff -r 3059a52d9614 -r 98967b613c88 graal/com.oracle.graal.lir/src/com/oracle/graal/lir/alloc/lsra/Interval.java --- /dev/null Thu Jan 01 00:00:00 1970 +0000 +++ b/graal/com.oracle.graal.lir/src/com/oracle/graal/lir/alloc/lsra/Interval.java Sat Feb 07 02:47:00 2015 +0100 @@ -0,0 +1,1303 @@ +/* + * Copyright (c) 2009, 2014, Oracle and/or its affiliates. All rights reserved. + * DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER. + * + * This code is free software; you can redistribute it and/or modify it + * under the terms of the GNU General Public License version 2 only, as + * published by the Free Software Foundation. + * + * This code is distributed in the hope that it will be useful, but WITHOUT + * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or + * FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License + * version 2 for more details (a copy is included in the LICENSE file that + * accompanied this code). + * + * You should have received a copy of the GNU General Public License version + * 2 along with this work; if not, write to the Free Software Foundation, + * Inc., 51 Franklin St, Fifth Floor, Boston, MA 02110-1301 USA. + * + * Please contact Oracle, 500 Oracle Parkway, Redwood Shores, CA 94065 USA + * or visit www.oracle.com if you need additional information or have any + * questions. + */ +package com.oracle.graal.lir.alloc.lsra; + +import static com.oracle.graal.api.code.ValueUtil.*; +import static com.oracle.graal.compiler.common.GraalOptions.*; +import static com.oracle.graal.lir.LIRValueUtil.*; + +import java.util.*; + +import com.oracle.graal.api.code.*; +import com.oracle.graal.api.meta.*; +import com.oracle.graal.compiler.common.*; +import com.oracle.graal.compiler.common.util.*; +import com.oracle.graal.debug.*; +import com.oracle.graal.lir.*; + +/** + * Represents an interval in the {@linkplain LinearScan linear scan register allocator}. + */ +public final class Interval { + + /** + * A pair of intervals. + */ + static final class Pair { + + public final Interval first; + public final Interval second; + + public Pair(Interval first, Interval second) { + this.first = first; + this.second = second; + } + } + + /** + * A set of interval lists, one per {@linkplain RegisterBinding binding} type. + */ + static final class RegisterBindingLists { + + /** + * List of intervals whose binding is currently {@link RegisterBinding#Fixed}. + */ + public Interval fixed; + + /** + * List of intervals whose binding is currently {@link RegisterBinding#Any}. + */ + public Interval any; + + /** + * List of intervals whose binding is currently {@link RegisterBinding#Stack}. + */ + public Interval stack; + + public RegisterBindingLists(Interval fixed, Interval any, Interval stack) { + this.fixed = fixed; + this.any = any; + this.stack = stack; + } + + /** + * Gets the list for a specified binding. + * + * @param binding specifies the list to be returned + * @return the list of intervals whose binding is {@code binding} + */ + public Interval get(RegisterBinding binding) { + switch (binding) { + case Any: + return any; + case Fixed: + return fixed; + case Stack: + return stack; + } + throw GraalInternalError.shouldNotReachHere(); + } + + /** + * Sets the list for a specified binding. + * + * @param binding specifies the list to be replaced + * @param list a list of intervals whose binding is {@code binding} + */ + public void set(RegisterBinding binding, Interval list) { + assert list != null; + switch (binding) { + case Any: + any = list; + break; + case Fixed: + fixed = list; + break; + case Stack: + stack = list; + break; + } + } + + /** + * Adds an interval to a list sorted by {@linkplain Interval#currentFrom() current from} + * positions. + * + * @param binding specifies the list to be updated + * @param interval the interval to add + */ + public void addToListSortedByCurrentFromPositions(RegisterBinding binding, Interval interval) { + Interval list = get(binding); + Interval prev = null; + Interval cur = list; + while (cur.currentFrom() < interval.currentFrom()) { + prev = cur; + cur = cur.next; + } + Interval result = list; + if (prev == null) { + // add to head of list + result = interval; + } else { + // add before 'cur' + prev.next = interval; + } + interval.next = cur; + set(binding, result); + } + + /** + * Adds an interval to a list sorted by {@linkplain Interval#from() start} positions and + * {@linkplain Interval#firstUsage(RegisterPriority) first usage} positions. + * + * @param binding specifies the list to be updated + * @param interval the interval to add + */ + public void addToListSortedByStartAndUsePositions(RegisterBinding binding, Interval interval) { + Interval list = get(binding); + Interval prev = null; + Interval cur = list; + while (cur.from() < interval.from() || (cur.from() == interval.from() && cur.firstUsage(RegisterPriority.None) < interval.firstUsage(RegisterPriority.None))) { + prev = cur; + cur = cur.next; + } + if (prev == null) { + list = interval; + } else { + prev.next = interval; + } + interval.next = cur; + set(binding, list); + } + + /** + * Removes an interval from a list. + * + * @param binding specifies the list to be updated + * @param i the interval to remove + */ + public void remove(RegisterBinding binding, Interval i) { + Interval list = get(binding); + Interval prev = null; + Interval cur = list; + while (cur != i) { + assert cur != null && cur != Interval.EndMarker : "interval has not been found in list: " + i; + prev = cur; + cur = cur.next; + } + if (prev == null) { + set(binding, cur.next); + } else { + prev.next = cur.next; + } + } + } + + /** + * Constants denoting the register usage priority for an interval. The constants are declared in + * increasing order of priority are are used to optimize spilling when multiple overlapping + * intervals compete for limited registers. + */ + public enum RegisterPriority { + /** + * No special reason for an interval to be allocated a register. + */ + None, + + /** + * Priority level for intervals live at the end of a loop. + */ + LiveAtLoopEnd, + + /** + * Priority level for intervals that should be allocated to a register. + */ + ShouldHaveRegister, + + /** + * Priority level for intervals that must be allocated to a register. + */ + MustHaveRegister; + + public static final RegisterPriority[] VALUES = values(); + + /** + * Determines if this priority is higher than or equal to a given priority. + */ + public boolean greaterEqual(RegisterPriority other) { + return ordinal() >= other.ordinal(); + } + + /** + * Determines if this priority is lower than a given priority. + */ + public boolean lessThan(RegisterPriority other) { + return ordinal() < other.ordinal(); + } + } + + /** + * Constants denoting whether an interval is bound to a specific register. This models platform + * dependencies on register usage for certain instructions. + */ + enum RegisterBinding { + /** + * Interval is bound to a specific register as required by the platform. + */ + Fixed, + + /** + * Interval has no specific register requirements. + */ + Any, + + /** + * Interval is bound to a stack slot. + */ + Stack; + + public static final RegisterBinding[] VALUES = values(); + } + + /** + * Constants denoting the linear-scan states an interval may be in with respect to the + * {@linkplain Interval#from() start} {@code position} of the interval being processed. + */ + enum State { + /** + * An interval that starts after {@code position}. + */ + Unhandled, + + /** + * An interval that {@linkplain Interval#covers covers} {@code position} and has an assigned + * register. + */ + Active, + + /** + * An interval that starts before and ends after {@code position} but does not + * {@linkplain Interval#covers cover} it due to a lifetime hole. + */ + Inactive, + + /** + * An interval that ends before {@code position} or is spilled to memory. + */ + Handled; + } + + /** + * Constants used in optimization of spilling of an interval. + */ + enum SpillState { + /** + * Starting state of calculation: no definition found yet. + */ + NoDefinitionFound, + + /** + * One definition has already been found. Two consecutive definitions are treated as one + * (e.g. a consecutive move and add because of two-operand LIR form). The position of this + * definition is given by {@link Interval#spillDefinitionPos()}. + */ + NoSpillStore, + + /** + * One spill move has already been inserted. + */ + OneSpillStore, + + /** + * The interval is spilled multiple times or is spilled in a loop. Place the store somewhere + * on the dominator path between the definition and the usages. + */ + SpillInDominator, + + /** + * The interval should be stored immediately after its definition to prevent multiple + * redundant stores. + */ + StoreAtDefinition, + + /** + * The interval starts in memory (e.g. method parameter), so a store is never necessary. + */ + StartInMemory, + + /** + * The interval has more than one definition (e.g. resulting from phi moves), so stores to + * memory are not optimized. + */ + NoOptimization + } + + /** + * List of use positions. Each entry in the list records the use position and register priority + * associated with the use position. The entries in the list are in descending order of use + * position. + * + */ + public static final class UsePosList { + + private IntList list; + + /** + * Creates a use list. + * + * @param initialCapacity the initial capacity of the list in terms of entries + */ + public UsePosList(int initialCapacity) { + list = new IntList(initialCapacity * 2); + } + + private UsePosList(IntList list) { + this.list = list; + } + + /** + * Splits this list around a given position. All entries in this list with a use position + * greater or equal than {@code splitPos} are removed from this list and added to the + * returned list. + * + * @param splitPos the position for the split + * @return a use position list containing all entries removed from this list that have a use + * position greater or equal than {@code splitPos} + */ + public UsePosList splitAt(int splitPos) { + int i = size() - 1; + int len = 0; + while (i >= 0 && usePos(i) < splitPos) { + --i; + len += 2; + } + int listSplitIndex = (i + 1) * 2; + IntList childList = list; + list = IntList.copy(this.list, listSplitIndex, len); + childList.setSize(listSplitIndex); + UsePosList child = new UsePosList(childList); + return child; + } + + /** + * Gets the use position at a specified index in this list. + * + * @param index the index of the entry for which the use position is returned + * @return the use position of entry {@code index} in this list + */ + public int usePos(int index) { + return list.get(index << 1); + } + + /** + * Gets the register priority for the use position at a specified index in this list. + * + * @param index the index of the entry for which the register priority is returned + * @return the register priority of entry {@code index} in this list + */ + public RegisterPriority registerPriority(int index) { + return RegisterPriority.VALUES[list.get((index << 1) + 1)]; + } + + public void add(int usePos, RegisterPriority registerPriority) { + assert list.size() == 0 || usePos(size() - 1) > usePos; + list.add(usePos); + list.add(registerPriority.ordinal()); + } + + public int size() { + return list.size() >> 1; + } + + public void removeLowestUsePos() { + list.setSize(list.size() - 2); + } + + public void setRegisterPriority(int index, RegisterPriority registerPriority) { + list.set(index * 2, registerPriority.ordinal()); + } + + @Override + public String toString() { + StringBuilder buf = new StringBuilder("["); + for (int i = size() - 1; i >= 0; --i) { + if (buf.length() != 1) { + buf.append(", "); + } + RegisterPriority prio = registerPriority(i); + buf.append(usePos(i)).append(" -> ").append(prio.ordinal()).append(':').append(prio); + } + return buf.append("]").toString(); + } + } + + /** + * The {@linkplain RegisterValue register} or {@linkplain Variable variable} for this interval + * prior to register allocation. + */ + public final AllocatableValue operand; + + /** + * The operand number for this interval's {@linkplain #operand operand}. + */ + public final int operandNumber; + + /** + * The {@linkplain RegisterValue register} or {@linkplain StackSlot spill slot} assigned to this + * interval. In case of a spilled interval which is re-materialized this is + * {@link Value#ILLEGAL}. + */ + private AllocatableValue location; + + /** + * The stack slot to which all splits of this interval are spilled if necessary. + */ + private StackSlotValue spillSlot; + + /** + * The kind of this interval. + */ + private LIRKind kind; + + /** + * The head of the list of ranges describing this interval. This list is sorted by + * {@linkplain LIRInstruction#id instruction ids}. + */ + private Range first; + + /** + * List of (use-positions, register-priorities) pairs, sorted by use-positions. + */ + private UsePosList usePosList; + + /** + * Iterator used to traverse the ranges of an interval. + */ + private Range current; + + /** + * Link to next interval in a sorted list of intervals that ends with {@link #EndMarker}. + */ + Interval next; + + /** + * The linear-scan state of this interval. + */ + State state; + + private int cachedTo; // cached value: to of last range (-1: not cached) + + /** + * The interval from which this one is derived. If this is a {@linkplain #isSplitParent() split + * parent}, it points to itself. + */ + private Interval splitParent; + + /** + * List of all intervals that are split off from this interval. This is only used if this is a + * {@linkplain #isSplitParent() split parent}. + */ + private List splitChildren = Collections.emptyList(); + + /** + * Current split child that has been active or inactive last (always stored in split parents). + */ + private Interval currentSplitChild; + + /** + * Specifies if move is inserted between currentSplitChild and this interval when interval gets + * active the first time. + */ + private boolean insertMoveWhenActivated; + + /** + * For spill move optimization. + */ + private SpillState spillState; + + /** + * Position where this interval is defined (if defined only once). + */ + private int spillDefinitionPos; + + /** + * This interval should be assigned the same location as the hint interval. + */ + private Interval locationHint; + + /** + * The value with which a spilled child interval can be re-materialized. Currently this must be + * a Constant. + */ + private JavaConstant materializedValue; + + /** + * The number of times {@link #addMaterializationValue(JavaConstant)} is called. + */ + private int numMaterializationValuesAdded; + + void assignLocation(AllocatableValue newLocation) { + if (isRegister(newLocation)) { + assert this.location == null : "cannot re-assign location for " + this; + if (newLocation.getLIRKind().equals(LIRKind.Illegal) && !kind.equals(LIRKind.Illegal)) { + this.location = asRegister(newLocation).asValue(kind); + return; + } + } else if (isIllegal(newLocation)) { + assert canMaterialize(); + } else { + assert this.location == null || isRegister(this.location) || (isVirtualStackSlot(this.location) && isStackSlot(newLocation)) : "cannot re-assign location for " + this; + assert isStackSlotValue(newLocation); + assert !newLocation.getLIRKind().equals(LIRKind.Illegal); + assert newLocation.getLIRKind().equals(this.kind); + } + this.location = newLocation; + } + + /** + * Gets the {@linkplain RegisterValue register} or {@linkplain StackSlot spill slot} assigned to + * this interval. + */ + public AllocatableValue location() { + return location; + } + + public LIRKind kind() { + assert !isRegister(operand) : "cannot access type for fixed interval"; + return kind; + } + + void setKind(LIRKind kind) { + assert isRegister(operand) || this.kind().equals(LIRKind.Illegal) || this.kind().equals(kind) : "overwriting existing type"; + this.kind = kind; + } + + public Range first() { + return first; + } + + int from() { + return first.from; + } + + int to() { + if (cachedTo == -1) { + cachedTo = calcTo(); + } + assert cachedTo == calcTo() : "invalid cached value"; + return cachedTo; + } + + int numUsePositions() { + return usePosList.size(); + } + + void setLocationHint(Interval interval) { + locationHint = interval; + } + + boolean isSplitParent() { + return splitParent == this; + } + + boolean isSplitChild() { + return splitParent != this; + } + + /** + * Gets the split parent for this interval. + */ + public Interval splitParent() { + assert splitParent.isSplitParent() : "not a split parent: " + this; + return splitParent; + } + + /** + * Gets the canonical spill slot for this interval. + */ + StackSlotValue spillSlot() { + return splitParent().spillSlot; + } + + void setSpillSlot(StackSlotValue slot) { + assert splitParent().spillSlot == null || (isVirtualStackSlot(splitParent().spillSlot) && isStackSlot(slot)) : "connot overwrite existing spill slot"; + splitParent().spillSlot = slot; + } + + Interval currentSplitChild() { + return splitParent().currentSplitChild; + } + + void makeCurrentSplitChild() { + splitParent().currentSplitChild = this; + } + + boolean insertMoveWhenActivated() { + return insertMoveWhenActivated; + } + + void setInsertMoveWhenActivated(boolean b) { + insertMoveWhenActivated = b; + } + + // for spill optimization + public SpillState spillState() { + return splitParent().spillState; + } + + int spillDefinitionPos() { + return splitParent().spillDefinitionPos; + } + + void setSpillState(SpillState state) { + assert state.ordinal() >= spillState().ordinal() : "state cannot decrease"; + splitParent().spillState = state; + } + + void setSpillDefinitionPos(int pos) { + assert spillState() == SpillState.SpillInDominator || spillDefinitionPos() == -1 : "cannot set the position twice"; + splitParent().spillDefinitionPos = pos; + } + + // returns true if this interval has a shadow copy on the stack that is always correct + boolean alwaysInMemory() { + return (splitParent().spillState == SpillState.SpillInDominator || splitParent().spillState == SpillState.StoreAtDefinition || splitParent().spillState == SpillState.StartInMemory) && + !canMaterialize(); + } + + void removeFirstUsePos() { + usePosList.removeLowestUsePos(); + } + + // test intersection + boolean intersects(Interval i) { + return first.intersects(i.first); + } + + int intersectsAt(Interval i) { + return first.intersectsAt(i.first); + } + + // range iteration + void rewindRange() { + current = first; + } + + void nextRange() { + assert this != EndMarker : "not allowed on sentinel"; + current = current.next; + } + + int currentFrom() { + return current.from; + } + + int currentTo() { + return current.to; + } + + boolean currentAtEnd() { + return current == Range.EndMarker; + } + + boolean currentIntersects(Interval it) { + return current.intersects(it.current); + } + + int currentIntersectsAt(Interval it) { + return current.intersectsAt(it.current); + } + + /** + * Sentinel interval to denote the end of an interval list. + */ + static final Interval EndMarker = new Interval(Value.ILLEGAL, -1); + + Interval(AllocatableValue operand, int operandNumber) { + assert operand != null; + this.operand = operand; + this.operandNumber = operandNumber; + if (isRegister(operand)) { + location = operand; + } else { + assert isIllegal(operand) || isVariable(operand); + } + this.kind = LIRKind.Illegal; + this.first = Range.EndMarker; + this.usePosList = new UsePosList(4); + this.current = Range.EndMarker; + this.next = EndMarker; + this.cachedTo = -1; + this.spillState = SpillState.NoDefinitionFound; + this.spillDefinitionPos = -1; + splitParent = this; + currentSplitChild = this; + } + + /** + * Sets the value which is used for re-materialization. + */ + void addMaterializationValue(JavaConstant value) { + if (numMaterializationValuesAdded == 0) { + materializedValue = value; + } else { + // Interval is defined on multiple places -> no materialization is possible. + materializedValue = null; + } + numMaterializationValuesAdded++; + } + + /** + * Returns true if this interval can be re-materialized when spilled. This means that no + * spill-moves are needed. Instead of restore-moves the {@link #materializedValue} is restored. + */ + public boolean canMaterialize() { + return getMaterializedValue() != null; + } + + /** + * Returns a value which can be moved to a register instead of a restore-move from stack. + */ + public JavaConstant getMaterializedValue() { + return splitParent().materializedValue; + } + + int calcTo() { + assert first != Range.EndMarker : "interval has no range"; + + Range r = first; + while (r.next != Range.EndMarker) { + r = r.next; + } + return r.to; + } + + // consistency check of split-children + boolean checkSplitChildren() { + if (!splitChildren.isEmpty()) { + assert isSplitParent() : "only split parents can have children"; + + for (int i = 0; i < splitChildren.size(); i++) { + Interval i1 = splitChildren.get(i); + + assert i1.splitParent() == this : "not a split child of this interval"; + assert i1.kind().equals(kind()) : "must be equal for all split children"; + assert (i1.spillSlot() == null && spillSlot == null) || i1.spillSlot().equals(spillSlot()) : "must be equal for all split children"; + + for (int j = i + 1; j < splitChildren.size(); j++) { + Interval i2 = splitChildren.get(j); + + assert !i1.operand.equals(i2.operand) : "same register number"; + + if (i1.from() < i2.from()) { + assert i1.to() <= i2.from() && i1.to() < i2.to() : "intervals overlapping"; + } else { + assert i2.from() < i1.from() : "intervals start at same opId"; + assert i2.to() <= i1.from() && i2.to() < i1.to() : "intervals overlapping"; + } + } + } + } + + return true; + } + + public Interval locationHint(boolean searchSplitChild) { + if (!searchSplitChild) { + return locationHint; + } + + if (locationHint != null) { + assert locationHint.isSplitParent() : "ony split parents are valid hint registers"; + + if (locationHint.location != null && isRegister(locationHint.location)) { + return locationHint; + } else if (!locationHint.splitChildren.isEmpty()) { + // search the first split child that has a register assigned + int len = locationHint.splitChildren.size(); + for (int i = 0; i < len; i++) { + Interval interval = locationHint.splitChildren.get(i); + if (interval.location != null && isRegister(interval.location)) { + return interval; + } + } + } + } + + // no hint interval found that has a register assigned + return null; + } + + Interval getSplitChildAtOpId(int opId, LIRInstruction.OperandMode mode, LinearScan allocator) { + assert isSplitParent() : "can only be called for split parents"; + assert opId >= 0 : "invalid opId (method cannot be called for spill moves)"; + + if (splitChildren.isEmpty()) { + assert this.covers(opId, mode) : this + " does not cover " + opId; + return this; + } else { + Interval result = null; + int len = splitChildren.size(); + + // in outputMode, the end of the interval (opId == cur.to()) is not valid + int toOffset = (mode == LIRInstruction.OperandMode.DEF ? 0 : 1); + + int i; + for (i = 0; i < len; i++) { + Interval cur = splitChildren.get(i); + if (cur.from() <= opId && opId < cur.to() + toOffset) { + if (i > 0) { + // exchange current split child to start of list (faster access for next + // call) + Util.atPutGrow(splitChildren, i, splitChildren.get(0), null); + Util.atPutGrow(splitChildren, 0, cur, null); + } + + // interval found + result = cur; + break; + } + } + + assert checkSplitChild(result, opId, allocator, toOffset, mode); + return result; + } + } + + private boolean checkSplitChild(Interval result, int opId, LinearScan allocator, int toOffset, LIRInstruction.OperandMode mode) { + if (result == null) { + // this is an error + StringBuilder msg = new StringBuilder(this.toString()).append(" has no child at ").append(opId); + if (!splitChildren.isEmpty()) { + Interval firstChild = splitChildren.get(0); + Interval lastChild = splitChildren.get(splitChildren.size() - 1); + msg.append(" (first = ").append(firstChild).append(", last = ").append(lastChild).append(")"); + } + throw new GraalInternalError("Linear Scan Error: %s", msg); + } + + if (!splitChildren.isEmpty()) { + for (Interval interval : splitChildren) { + if (interval != result && interval.from() <= opId && opId < interval.to() + toOffset) { + TTY.println(String.format("two valid result intervals found for opId %d: %d and %d", opId, result.operandNumber, interval.operandNumber)); + TTY.println(result.logString(allocator)); + TTY.println(interval.logString(allocator)); + throw new BailoutException("two valid result intervals found"); + } + } + } + assert result.covers(opId, mode) : "opId not covered by interval"; + return true; + } + + // returns the interval that covers the given opId or null if there is none + Interval getIntervalCoveringOpId(int opId) { + assert opId >= 0 : "invalid opId"; + assert opId < to() : "can only look into the past"; + + if (opId >= from()) { + return this; + } + + Interval parent = splitParent(); + Interval result = null; + + assert !parent.splitChildren.isEmpty() : "no split children available"; + int len = parent.splitChildren.size(); + + for (int i = len - 1; i >= 0; i--) { + Interval cur = parent.splitChildren.get(i); + if (cur.from() <= opId && opId < cur.to()) { + assert result == null : "covered by multiple split children " + result + " and " + cur; + result = cur; + } + } + + return result; + } + + // returns the last split child that ends before the given opId + Interval getSplitChildBeforeOpId(int opId) { + assert opId >= 0 : "invalid opId"; + + Interval parent = splitParent(); + Interval result = null; + + assert !parent.splitChildren.isEmpty() : "no split children available"; + int len = parent.splitChildren.size(); + + for (int i = len - 1; i >= 0; i--) { + Interval cur = parent.splitChildren.get(i); + if (cur.to() <= opId && (result == null || result.to() < cur.to())) { + result = cur; + } + } + + assert result != null : "no split child found"; + return result; + } + + // checks if opId is covered by any split child + boolean splitChildCovers(int opId, LIRInstruction.OperandMode mode) { + assert isSplitParent() : "can only be called for split parents"; + assert opId >= 0 : "invalid opId (method can not be called for spill moves)"; + + if (splitChildren.isEmpty()) { + // simple case if interval was not split + return covers(opId, mode); + + } else { + // extended case: check all split children + int len = splitChildren.size(); + for (int i = 0; i < len; i++) { + Interval cur = splitChildren.get(i); + if (cur.covers(opId, mode)) { + return true; + } + } + return false; + } + } + + private RegisterPriority adaptPriority(RegisterPriority priority) { + /* + * In case of re-materialized values we require that use-operands are registers, because we + * don't have the value in a stack location. (Note that ShouldHaveRegister means that the + * operand can also be a StackSlot). + */ + if (priority == RegisterPriority.ShouldHaveRegister && canMaterialize()) { + return RegisterPriority.MustHaveRegister; + } + return priority; + } + + // Note: use positions are sorted descending . first use has highest index + int firstUsage(RegisterPriority minRegisterPriority) { + assert isVariable(operand) : "cannot access use positions for fixed intervals"; + + for (int i = usePosList.size() - 1; i >= 0; --i) { + RegisterPriority registerPriority = adaptPriority(usePosList.registerPriority(i)); + if (registerPriority.greaterEqual(minRegisterPriority)) { + return usePosList.usePos(i); + } + } + return Integer.MAX_VALUE; + } + + int nextUsage(RegisterPriority minRegisterPriority, int from) { + assert isVariable(operand) : "cannot access use positions for fixed intervals"; + + for (int i = usePosList.size() - 1; i >= 0; --i) { + int usePos = usePosList.usePos(i); + if (usePos >= from && adaptPriority(usePosList.registerPriority(i)).greaterEqual(minRegisterPriority)) { + return usePos; + } + } + return Integer.MAX_VALUE; + } + + int nextUsageExact(RegisterPriority exactRegisterPriority, int from) { + assert isVariable(operand) : "cannot access use positions for fixed intervals"; + + for (int i = usePosList.size() - 1; i >= 0; --i) { + int usePos = usePosList.usePos(i); + if (usePos >= from && adaptPriority(usePosList.registerPriority(i)) == exactRegisterPriority) { + return usePos; + } + } + return Integer.MAX_VALUE; + } + + int previousUsage(RegisterPriority minRegisterPriority, int from) { + assert isVariable(operand) : "cannot access use positions for fixed intervals"; + + int prev = 0; + for (int i = usePosList.size() - 1; i >= 0; --i) { + int usePos = usePosList.usePos(i); + if (usePos > from) { + return prev; + } + if (adaptPriority(usePosList.registerPriority(i)).greaterEqual(minRegisterPriority)) { + prev = usePos; + } + } + return prev; + } + + void addUsePos(int pos, RegisterPriority registerPriority) { + assert covers(pos, LIRInstruction.OperandMode.USE) : "use position not covered by live range"; + + // do not add use positions for precolored intervals because they are never used + if (registerPriority != RegisterPriority.None && isVariable(operand)) { + if (DetailedAsserts.getValue()) { + for (int i = 0; i < usePosList.size(); i++) { + assert pos <= usePosList.usePos(i) : "already added a use-position with lower position"; + if (i > 0) { + assert usePosList.usePos(i) < usePosList.usePos(i - 1) : "not sorted descending"; + } + } + } + + // Note: addUse is called in descending order, so list gets sorted + // automatically by just appending new use positions + int len = usePosList.size(); + if (len == 0 || usePosList.usePos(len - 1) > pos) { + usePosList.add(pos, registerPriority); + } else if (usePosList.registerPriority(len - 1).lessThan(registerPriority)) { + assert usePosList.usePos(len - 1) == pos : "list not sorted correctly"; + usePosList.setRegisterPriority(len - 1, registerPriority); + } + } + } + + void addRange(int from, int to) { + assert from < to : "invalid range"; + assert first() == Range.EndMarker || to < first().next.from : "not inserting at begin of interval"; + assert from <= first().to : "not inserting at begin of interval"; + + if (first.from <= to) { + assert first != Range.EndMarker; + // join intersecting ranges + first.from = Math.min(from, first().from); + first.to = Math.max(to, first().to); + } else { + // insert new range + first = new Range(from, to, first()); + } + } + + Interval newSplitChild(LinearScan allocator) { + // allocate new interval + Interval parent = splitParent(); + Interval result = allocator.createDerivedInterval(parent); + result.setKind(kind()); + + result.splitParent = parent; + result.setLocationHint(parent); + + // insert new interval in children-list of parent + if (parent.splitChildren.isEmpty()) { + assert isSplitParent() : "list must be initialized at first split"; + + // Create new non-shared list + parent.splitChildren = new ArrayList<>(4); + parent.splitChildren.add(this); + } + parent.splitChildren.add(result); + + return result; + } + + /** + * Splits this interval at a specified position and returns the remainder as a new child + * interval of this interval's {@linkplain #splitParent() parent} interval. + *

+ * When an interval is split, a bi-directional link is established between the original + * parent interval and the children intervals that are split off this interval. + * When a split child is split again, the new created interval is a direct child of the original + * parent. That is, there is no tree of split children stored, just a flat list. All split + * children are spilled to the same {@linkplain #spillSlot spill slot}. + * + * @param splitPos the position at which to split this interval + * @param allocator the register allocator context + * @return the child interval split off from this interval + */ + Interval split(int splitPos, LinearScan allocator) { + assert isVariable(operand) : "cannot split fixed intervals"; + + // allocate new interval + Interval result = newSplitChild(allocator); + + // split the ranges + Range prev = null; + Range cur = first; + while (cur != Range.EndMarker && cur.to <= splitPos) { + prev = cur; + cur = cur.next; + } + assert cur != Range.EndMarker : "split interval after end of last range"; + + if (cur.from < splitPos) { + result.first = new Range(splitPos, cur.to, cur.next); + cur.to = splitPos; + cur.next = Range.EndMarker; + + } else { + assert prev != null : "split before start of first range"; + result.first = cur; + prev.next = Range.EndMarker; + } + result.current = result.first; + cachedTo = -1; // clear cached value + + // split list of use positions + result.usePosList = usePosList.splitAt(splitPos); + + if (DetailedAsserts.getValue()) { + for (int i = 0; i < usePosList.size(); i++) { + assert usePosList.usePos(i) < splitPos; + } + for (int i = 0; i < result.usePosList.size(); i++) { + assert result.usePosList.usePos(i) >= splitPos; + } + } + return result; + } + + /** + * Splits this interval at a specified position and returns the head as a new interval (this + * interval is the tail). + * + * Currently, only the first range can be split, and the new interval must not have split + * positions + */ + Interval splitFromStart(int splitPos, LinearScan allocator) { + assert isVariable(operand) : "cannot split fixed intervals"; + assert splitPos > from() && splitPos < to() : "can only split inside interval"; + assert splitPos > first.from && splitPos <= first.to : "can only split inside first range"; + assert firstUsage(RegisterPriority.None) > splitPos : "can not split when use positions are present"; + + // allocate new interval + Interval result = newSplitChild(allocator); + + // the new interval has only one range (checked by assertion above, + // so the splitting of the ranges is very simple + result.addRange(first.from, splitPos); + + if (splitPos == first.to) { + assert first.next != Range.EndMarker : "must not be at end"; + first = first.next; + } else { + first.from = splitPos; + } + + return result; + } + + // returns true if the opId is inside the interval + boolean covers(int opId, LIRInstruction.OperandMode mode) { + Range cur = first; + + while (cur != Range.EndMarker && cur.to < opId) { + cur = cur.next; + } + if (cur != Range.EndMarker) { + assert cur.to != cur.next.from : "ranges not separated"; + + if (mode == LIRInstruction.OperandMode.DEF) { + return cur.from <= opId && opId < cur.to; + } else { + return cur.from <= opId && opId <= cur.to; + } + } + return false; + } + + // returns true if the interval has any hole between holeFrom and holeTo + // (even if the hole has only the length 1) + boolean hasHoleBetween(int holeFrom, int holeTo) { + assert holeFrom < holeTo : "check"; + assert from() <= holeFrom && holeTo <= to() : "index out of interval"; + + Range cur = first; + while (cur != Range.EndMarker) { + assert cur.to < cur.next.from : "no space between ranges"; + + // hole-range starts before this range . hole + if (holeFrom < cur.from) { + return true; + + // hole-range completely inside this range . no hole + } else { + if (holeTo <= cur.to) { + return false; + + // overlapping of hole-range with this range . hole + } else { + if (holeFrom <= cur.to) { + return true; + } + } + } + + cur = cur.next; + } + + return false; + } + + @Override + public String toString() { + String from = "?"; + String to = "?"; + if (first != null && first != Range.EndMarker) { + from = String.valueOf(from()); + // to() may cache a computed value, modifying the current object, which is a bad idea + // for a printing function. Compute it directly instead. + to = String.valueOf(calcTo()); + } + String locationString = this.location == null ? "" : "@" + this.location; + return operandNumber + ":" + operand + (isRegister(operand) ? "" : locationString) + "[" + from + "," + to + "]"; + } + + /** + * Gets the use position information for this interval. + */ + public UsePosList usePosList() { + return usePosList; + } + + /** + * Gets a single line string for logging the details of this interval to a log stream. + * + * @param allocator the register allocator context + */ + public String logString(LinearScan allocator) { + StringBuilder buf = new StringBuilder(100); + buf.append(operandNumber).append(':').append(operand).append(' '); + if (!isRegister(operand)) { + if (location != null) { + buf.append("location{").append(location).append("} "); + } + } + + buf.append("hints{").append(splitParent.operandNumber); + Interval hint = locationHint(false); + if (hint != null && hint.operandNumber != splitParent.operandNumber) { + buf.append(", ").append(hint.operandNumber); + } + buf.append("} ranges{"); + + // print ranges + Range cur = first; + while (cur != Range.EndMarker) { + if (cur != first) { + buf.append(", "); + } + buf.append(cur); + cur = cur.next; + assert cur != null : "range list not closed with range sentinel"; + } + buf.append("} uses{"); + + // print use positions + int prev = 0; + for (int i = usePosList.size() - 1; i >= 0; --i) { + assert prev < usePosList.usePos(i) : "use positions not sorted"; + if (i != usePosList.size() - 1) { + buf.append(", "); + } + buf.append(usePosList.usePos(i)).append(':').append(usePosList.registerPriority(i)); + prev = usePosList.usePos(i); + } + buf.append("} spill-state{").append(spillState()).append("}"); + if (canMaterialize()) { + buf.append(" (remat:").append(getMaterializedValue().toString()).append(")"); + } + return buf.toString(); + } + + List getSplitChildren() { + return Collections.unmodifiableList(splitChildren); + } +} diff -r 3059a52d9614 -r 98967b613c88 graal/com.oracle.graal.lir/src/com/oracle/graal/lir/alloc/lsra/IntervalWalker.java --- /dev/null Thu Jan 01 00:00:00 1970 +0000 +++ b/graal/com.oracle.graal.lir/src/com/oracle/graal/lir/alloc/lsra/IntervalWalker.java Sat Feb 07 02:47:00 2015 +0100 @@ -0,0 +1,282 @@ +/* + * Copyright (c) 2009, 2011, Oracle and/or its affiliates. All rights reserved. + * DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER. + * + * This code is free software; you can redistribute it and/or modify it + * under the terms of the GNU General Public License version 2 only, as + * published by the Free Software Foundation. + * + * This code is distributed in the hope that it will be useful, but WITHOUT + * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or + * FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License + * version 2 for more details (a copy is included in the LICENSE file that + * accompanied this code). + * + * You should have received a copy of the GNU General Public License version + * 2 along with this work; if not, write to the Free Software Foundation, + * Inc., 51 Franklin St, Fifth Floor, Boston, MA 02110-1301 USA. + * + * Please contact Oracle, 500 Oracle Parkway, Redwood Shores, CA 94065 USA + * or visit www.oracle.com if you need additional information or have any + * questions. + */ +package com.oracle.graal.lir.alloc.lsra; + +import com.oracle.graal.debug.*; +import com.oracle.graal.lir.alloc.lsra.Interval.*; + +/** + */ +public class IntervalWalker { + + protected final LinearScan allocator; + + /** + * Sorted list of intervals, not live before the current position. + */ + protected RegisterBindingLists unhandledLists; + + /** + * Sorted list of intervals, live at the current position. + */ + protected RegisterBindingLists activeLists; + + /** + * Sorted list of intervals in a life time hole at the current position. + */ + protected RegisterBindingLists inactiveLists; + + /** + * The current position (intercept point through the intervals). + */ + protected int currentPosition; + + /** + * The binding of the current interval being processed. + */ + protected RegisterBinding currentBinding; + + /** + * Processes the {@code currentInterval} interval in an attempt to allocate a physical register + * to it and thus allow it to be moved to a list of {@linkplain #activeLists active} intervals. + * + * @return {@code true} if a register was allocated to the {@code currentInterval} interval + */ + protected boolean activateCurrent(@SuppressWarnings({"unused"}) Interval currentInterval) { + return true; + } + + void walkBefore(int lirOpId) { + walkTo(lirOpId - 1); + } + + void walk() { + walkTo(Integer.MAX_VALUE); + } + + /** + * Creates a new interval walker. + * + * @param allocator the register allocator context + * @param unhandledFixed the list of unhandled {@linkplain RegisterBinding#Fixed fixed} + * intervals + * @param unhandledAny the list of unhandled {@linkplain RegisterBinding#Any non-fixed} + * intervals + */ + IntervalWalker(LinearScan allocator, Interval unhandledFixed, Interval unhandledAny) { + this.allocator = allocator; + + unhandledLists = new RegisterBindingLists(unhandledFixed, unhandledAny, Interval.EndMarker); + activeLists = new RegisterBindingLists(Interval.EndMarker, Interval.EndMarker, Interval.EndMarker); + inactiveLists = new RegisterBindingLists(Interval.EndMarker, Interval.EndMarker, Interval.EndMarker); + currentPosition = -1; + } + + protected void removeFromList(Interval interval) { + if (interval.state == State.Active) { + activeLists.remove(RegisterBinding.Any, interval); + } else { + assert interval.state == State.Inactive : "invalid state"; + inactiveLists.remove(RegisterBinding.Any, interval); + } + } + + private void walkTo(State state, int from) { + assert state == State.Active || state == State.Inactive : "wrong state"; + for (RegisterBinding binding : RegisterBinding.VALUES) { + Interval prevprev = null; + Interval prev = (state == State.Active) ? activeLists.get(binding) : inactiveLists.get(binding); + Interval next = prev; + while (next.currentFrom() <= from) { + Interval cur = next; + next = cur.next; + + boolean rangeHasChanged = false; + while (cur.currentTo() <= from) { + cur.nextRange(); + rangeHasChanged = true; + } + + // also handle move from inactive list to active list + rangeHasChanged = rangeHasChanged || (state == State.Inactive && cur.currentFrom() <= from); + + if (rangeHasChanged) { + // remove cur from list + if (prevprev == null) { + if (state == State.Active) { + activeLists.set(binding, next); + } else { + inactiveLists.set(binding, next); + } + } else { + prevprev.next = next; + } + prev = next; + if (cur.currentAtEnd()) { + // move to handled state (not maintained as a list) + cur.state = State.Handled; + intervalMoved(cur, state, State.Handled); + } else if (cur.currentFrom() <= from) { + // sort into active list + activeLists.addToListSortedByCurrentFromPositions(binding, cur); + cur.state = State.Active; + if (prev == cur) { + assert state == State.Active : "check"; + prevprev = prev; + prev = cur.next; + } + intervalMoved(cur, state, State.Active); + } else { + // sort into inactive list + inactiveLists.addToListSortedByCurrentFromPositions(binding, cur); + cur.state = State.Inactive; + if (prev == cur) { + assert state == State.Inactive : "check"; + prevprev = prev; + prev = cur.next; + } + intervalMoved(cur, state, State.Inactive); + } + } else { + prevprev = prev; + prev = cur.next; + } + } + } + } + + /** + * Get the next interval from {@linkplain #unhandledLists} which starts before or at + * {@code toOpId}. The returned interval is removed and {@link #currentBinding} is set. + * + * @postcondition all intervals in {@linkplain #unhandledLists} start after {@code toOpId}. + * + * @return The next interval or null if there is no {@linkplain #unhandledLists unhandled} + * interval at position {@code toOpId}. + */ + private Interval nextInterval(int toOpId) { + RegisterBinding binding; + Interval any = unhandledLists.any; + Interval fixed = unhandledLists.fixed; + + if (any != Interval.EndMarker) { + // intervals may start at same position . prefer fixed interval + binding = fixed != Interval.EndMarker && fixed.from() <= any.from() ? RegisterBinding.Fixed : RegisterBinding.Any; + + assert binding == RegisterBinding.Fixed && fixed.from() <= any.from() || binding == RegisterBinding.Any && any.from() <= fixed.from() : "wrong interval!!!"; + assert any == Interval.EndMarker || fixed == Interval.EndMarker || any.from() != fixed.from() || binding == RegisterBinding.Fixed : "if fixed and any-Interval start at same position, fixed must be processed first"; + + } else if (fixed != Interval.EndMarker) { + binding = RegisterBinding.Fixed; + } else { + return null; + } + Interval currentInterval = unhandledLists.get(binding); + + if (toOpId < currentInterval.from()) { + return null; + } + + currentBinding = binding; + unhandledLists.set(binding, currentInterval.next); + currentInterval.next = Interval.EndMarker; + currentInterval.rewindRange(); + return currentInterval; + } + + /** + * Walk up to {@code toOpId}. + * + * @postcondition {@link #currentPosition} is set to {@code toOpId}, {@link #activeLists} and + * {@link #inactiveLists} are populated and {@link Interval#state}s are up to + * date. + */ + protected void walkTo(int toOpId) { + assert currentPosition <= toOpId : "can not walk backwards"; + for (Interval currentInterval = nextInterval(toOpId); currentInterval != null; currentInterval = nextInterval(toOpId)) { + int opId = currentInterval.from(); + + // set currentPosition prior to call of walkTo + currentPosition = opId; + + // update unhandled stack intervals + updateUnhandledStackIntervals(opId); + + // call walkTo even if currentPosition == id + walkTo(State.Active, opId); + walkTo(State.Inactive, opId); + + try (Indent indent = Debug.logAndIndent("walk to op %d", opId)) { + currentInterval.state = State.Active; + if (activateCurrent(currentInterval)) { + activeLists.addToListSortedByCurrentFromPositions(currentBinding, currentInterval); + intervalMoved(currentInterval, State.Unhandled, State.Active); + } + } + } + // set currentPosition prior to call of walkTo + currentPosition = toOpId; + + if (currentPosition <= allocator.maxOpId()) { + // update unhandled stack intervals + updateUnhandledStackIntervals(toOpId); + + // call walkTo if still in range + walkTo(State.Active, toOpId); + walkTo(State.Inactive, toOpId); + } + } + + private void intervalMoved(Interval interval, State from, State to) { + // intervalMoved() is called whenever an interval moves from one interval list to another. + // In the implementation of this method it is prohibited to move the interval to any list. + if (Debug.isLogEnabled()) { + Debug.log("interval moved from %s to %s: %s", from, to, interval.logString(allocator)); + } + } + + /** + * Move {@linkplain #unhandledLists unhandled} stack intervals to + * {@linkplain IntervalWalker #activeLists active}. + * + * Note that for {@linkplain RegisterBinding#Fixed fixed} and {@linkplain RegisterBinding#Any + * any} intervals this is done in {@link #nextInterval(int)}. + */ + private void updateUnhandledStackIntervals(int opId) { + Interval currentInterval = unhandledLists.get(RegisterBinding.Stack); + while (currentInterval != Interval.EndMarker && currentInterval.from() <= opId) { + Interval next = currentInterval.next; + if (currentInterval.to() > opId) { + currentInterval.state = State.Active; + activeLists.addToListSortedByCurrentFromPositions(RegisterBinding.Stack, currentInterval); + intervalMoved(currentInterval, State.Unhandled, State.Active); + } else { + currentInterval.state = State.Handled; + intervalMoved(currentInterval, State.Unhandled, State.Handled); + } + currentInterval = next; + } + unhandledLists.set(RegisterBinding.Stack, currentInterval); + } + +} diff -r 3059a52d9614 -r 98967b613c88 graal/com.oracle.graal.lir/src/com/oracle/graal/lir/alloc/lsra/LinearScan.java --- /dev/null Thu Jan 01 00:00:00 1970 +0000 +++ b/graal/com.oracle.graal.lir/src/com/oracle/graal/lir/alloc/lsra/LinearScan.java Sat Feb 07 02:47:00 2015 +0100 @@ -0,0 +1,2179 @@ +/* + * Copyright (c) 2009, 2014, Oracle and/or its affiliates. All rights reserved. + * DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER. + * + * This code is free software; you can redistribute it and/or modify it + * under the terms of the GNU General Public License version 2 only, as + * published by the Free Software Foundation. + * + * This code is distributed in the hope that it will be useful, but WITHOUT + * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or + * FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License + * version 2 for more details (a copy is included in the LICENSE file that + * accompanied this code). + * + * You should have received a copy of the GNU General Public License version + * 2 along with this work; if not, write to the Free Software Foundation, + * Inc., 51 Franklin St, Fifth Floor, Boston, MA 02110-1301 USA. + * + * Please contact Oracle, 500 Oracle Parkway, Redwood Shores, CA 94065 USA + * or visit www.oracle.com if you need additional information or have any + * questions. + */ +package com.oracle.graal.lir.alloc.lsra; + +import static com.oracle.graal.api.code.CodeUtil.*; +import static com.oracle.graal.api.code.ValueUtil.*; +import static com.oracle.graal.compiler.common.GraalOptions.*; +import static com.oracle.graal.compiler.common.cfg.AbstractControlFlowGraph.*; +import static com.oracle.graal.lir.LIRValueUtil.*; +import static com.oracle.graal.lir.debug.LIRGenerationDebugContext.*; + +import java.util.*; + +import com.oracle.graal.api.code.*; +import com.oracle.graal.api.meta.*; +import com.oracle.graal.compiler.common.*; +import com.oracle.graal.compiler.common.alloc.*; +import com.oracle.graal.compiler.common.cfg.*; +import com.oracle.graal.compiler.common.util.*; +import com.oracle.graal.debug.*; +import com.oracle.graal.debug.Debug.Scope; +import com.oracle.graal.lir.*; +import com.oracle.graal.lir.LIRInstruction.OperandFlag; +import com.oracle.graal.lir.LIRInstruction.OperandMode; +import com.oracle.graal.lir.StandardOp.MoveOp; +import com.oracle.graal.lir.alloc.lsra.Interval.RegisterBinding; +import com.oracle.graal.lir.alloc.lsra.Interval.RegisterPriority; +import com.oracle.graal.lir.alloc.lsra.Interval.SpillState; +import com.oracle.graal.lir.framemap.*; +import com.oracle.graal.lir.gen.*; +import com.oracle.graal.options.*; + +/** + * An implementation of the linear scan register allocator algorithm described in "Optimized Interval Splitting in a Linear Scan Register Allocator" by Christian Wimmer and + * Hanspeter Moessenboeck. + */ +public final class LinearScan { + + final TargetDescription target; + final LIRGenerationResult res; + final LIR ir; + final FrameMapBuilder frameMapBuilder; + final RegisterAttributes[] registerAttributes; + final Register[] registers; + + final boolean callKillsRegisters; + + public static final int DOMINATOR_SPILL_MOVE_ID = -2; + private static final int SPLIT_INTERVALS_CAPACITY_RIGHT_SHIFT = 1; + + public static class Options { + // @formatter:off + @Option(help = "Enable spill position optimization", type = OptionType.Debug) + public static final OptionValue LSRAOptimizeSpillPosition = new OptionValue<>(true); + // @formatter:on + } + + public static class BlockData { + + /** + * Bit map specifying which operands are live upon entry to this block. These are values + * used in this block or any of its successors where such value are not defined in this + * block. The bit index of an operand is its {@linkplain LinearScan#operandNumber(Value) + * operand number}. + */ + public BitSet liveIn; + + /** + * Bit map specifying which operands are live upon exit from this block. These are values + * used in a successor block that are either defined in this block or were live upon entry + * to this block. The bit index of an operand is its + * {@linkplain LinearScan#operandNumber(Value) operand number}. + */ + public BitSet liveOut; + + /** + * Bit map specifying which operands are used (before being defined) in this block. That is, + * these are the values that are live upon entry to the block. The bit index of an operand + * is its {@linkplain LinearScan#operandNumber(Value) operand number}. + */ + public BitSet liveGen; + + /** + * Bit map specifying which operands are defined/overwritten in this block. The bit index of + * an operand is its {@linkplain LinearScan#operandNumber(Value) operand number}. + */ + public BitSet liveKill; + } + + public final BlockMap blockData; + + /** + * List of blocks in linear-scan order. This is only correct as long as the CFG does not change. + */ + final List> sortedBlocks; + + /** + * Map from {@linkplain #operandNumber(Value) operand numbers} to intervals. + */ + Interval[] intervals; + + /** + * The number of valid entries in {@link #intervals}. + */ + int intervalsSize; + + /** + * The index of the first entry in {@link #intervals} for a + * {@linkplain #createDerivedInterval(Interval) derived interval}. + */ + int firstDerivedIntervalIndex = -1; + + /** + * Intervals sorted by {@link Interval#from()}. + */ + Interval[] sortedIntervals; + + /** + * Map from an instruction {@linkplain LIRInstruction#id id} to the instruction. Entries should + * be retrieved with {@link #instructionForId(int)} as the id is not simply an index into this + * array. + */ + LIRInstruction[] opIdToInstructionMap; + + /** + * Map from an instruction {@linkplain LIRInstruction#id id} to the {@linkplain AbstractBlock + * block} containing the instruction. Entries should be retrieved with {@link #blockForId(int)} + * as the id is not simply an index into this array. + */ + AbstractBlock[] opIdToBlockMap; + + /** + * Bit set for each variable that is contained in each loop. + */ + BitMap2D intervalInLoop; + + /** + * The {@linkplain #operandNumber(Value) number} of the first variable operand allocated. + */ + private final int firstVariableNumber; + + public LinearScan(TargetDescription target, LIRGenerationResult res) { + this.target = target; + this.res = res; + this.ir = res.getLIR(); + this.frameMapBuilder = res.getFrameMapBuilder(); + this.sortedBlocks = ir.linearScanOrder(); + this.registerAttributes = frameMapBuilder.getRegisterConfig().getAttributesMap(); + + this.registers = target.arch.getRegisters(); + this.firstVariableNumber = registers.length; + this.blockData = new BlockMap<>(ir.getControlFlowGraph()); + + // If all allocatable registers are caller saved, then no registers are live across a call + // site. The register allocator can save time not trying to find a register at a call site. + this.callKillsRegisters = this.frameMapBuilder.getRegisterConfig().areAllAllocatableRegistersCallerSaved(); + } + + public int getFirstLirInstructionId(AbstractBlock block) { + int result = ir.getLIRforBlock(block).get(0).id(); + assert result >= 0; + return result; + } + + public int getLastLirInstructionId(AbstractBlock block) { + List instructions = ir.getLIRforBlock(block); + int result = instructions.get(instructions.size() - 1).id(); + assert result >= 0; + return result; + } + + public static boolean isVariableOrRegister(Value value) { + return isVariable(value) || isRegister(value); + } + + /** + * Converts an operand (variable or register) to an index in a flat address space covering all + * the {@linkplain Variable variables} and {@linkplain RegisterValue registers} being processed + * by this allocator. + */ + private int operandNumber(Value operand) { + if (isRegister(operand)) { + int number = asRegister(operand).number; + assert number < firstVariableNumber; + return number; + } + assert isVariable(operand) : operand; + return firstVariableNumber + ((Variable) operand).index; + } + + /** + * Gets the number of operands. This value will increase by 1 for new variable. + */ + private int operandSize() { + return firstVariableNumber + ir.numVariables(); + } + + /** + * Gets the highest operand number for a register operand. This value will never change. + */ + public int maxRegisterNumber() { + return firstVariableNumber - 1; + } + + static final IntervalPredicate IS_PRECOLORED_INTERVAL = new IntervalPredicate() { + + @Override + public boolean apply(Interval i) { + return isRegister(i.operand); + } + }; + + static final IntervalPredicate IS_VARIABLE_INTERVAL = new IntervalPredicate() { + + @Override + public boolean apply(Interval i) { + return isVariable(i.operand); + } + }; + + static final IntervalPredicate IS_STACK_INTERVAL = new IntervalPredicate() { + + @Override + public boolean apply(Interval i) { + return !isRegister(i.operand); + } + }; + + /** + * Gets an object describing the attributes of a given register according to this register + * configuration. + */ + RegisterAttributes attributes(Register reg) { + return registerAttributes[reg.number]; + } + + void assignSpillSlot(Interval interval) { + // assign the canonical spill slot of the parent (if a part of the interval + // is already spilled) or allocate a new spill slot + if (interval.canMaterialize()) { + interval.assignLocation(Value.ILLEGAL); + } else if (interval.spillSlot() != null) { + interval.assignLocation(interval.spillSlot()); + } else { + VirtualStackSlot slot = frameMapBuilder.allocateSpillSlot(interval.kind()); + interval.setSpillSlot(slot); + interval.assignLocation(slot); + } + } + + /** + * Creates a new interval. + * + * @param operand the operand for the interval + * @return the created interval + */ + Interval createInterval(AllocatableValue operand) { + assert isLegal(operand); + int operandNumber = operandNumber(operand); + Interval interval = new Interval(operand, operandNumber); + assert operandNumber < intervalsSize; + assert intervals[operandNumber] == null; + intervals[operandNumber] = interval; + return interval; + } + + /** + * Creates an interval as a result of splitting or spilling another interval. + * + * @param source an interval being split of spilled + * @return a new interval derived from {@code source} + */ + Interval createDerivedInterval(Interval source) { + if (firstDerivedIntervalIndex == -1) { + firstDerivedIntervalIndex = intervalsSize; + } + if (intervalsSize == intervals.length) { + intervals = Arrays.copyOf(intervals, intervals.length + (intervals.length >> SPLIT_INTERVALS_CAPACITY_RIGHT_SHIFT)); + } + intervalsSize++; + Variable variable = new Variable(source.kind(), ir.nextVariable()); + + Interval interval = createInterval(variable); + assert intervals[intervalsSize - 1] == interval; + return interval; + } + + // access to block list (sorted in linear scan order) + int blockCount() { + return sortedBlocks.size(); + } + + AbstractBlock blockAt(int index) { + return sortedBlocks.get(index); + } + + /** + * Gets the size of the {@link BlockData#liveIn} and {@link BlockData#liveOut} sets for a basic + * block. These sets do not include any operands allocated as a result of creating + * {@linkplain #createDerivedInterval(Interval) derived intervals}. + */ + int liveSetSize() { + return firstDerivedIntervalIndex == -1 ? operandSize() : firstDerivedIntervalIndex; + } + + int numLoops() { + return ir.getControlFlowGraph().getLoops().size(); + } + + boolean isIntervalInLoop(int interval, int loop) { + return intervalInLoop.at(interval, loop); + } + + Interval intervalFor(int operandNumber) { + return intervals[operandNumber]; + } + + Interval intervalFor(Value operand) { + int operandNumber = operandNumber(operand); + assert operandNumber < intervalsSize; + return intervals[operandNumber]; + } + + Interval getOrCreateInterval(AllocatableValue operand) { + Interval ret = intervalFor(operand); + if (ret == null) { + return createInterval(operand); + } else { + return ret; + } + } + + /** + * Gets the highest instruction id allocated by this object. + */ + int maxOpId() { + assert opIdToInstructionMap.length > 0 : "no operations"; + return (opIdToInstructionMap.length - 1) << 1; + } + + /** + * Converts an {@linkplain LIRInstruction#id instruction id} to an instruction index. All LIR + * instructions in a method have an index one greater than their linear-scan order predecesor + * with the first instruction having an index of 0. + */ + static int opIdToIndex(int opId) { + return opId >> 1; + } + + /** + * Retrieves the {@link LIRInstruction} based on its {@linkplain LIRInstruction#id id}. + * + * @param opId an instruction {@linkplain LIRInstruction#id id} + * @return the instruction whose {@linkplain LIRInstruction#id} {@code == id} + */ + LIRInstruction instructionForId(int opId) { + assert isEven(opId) : "opId not even"; + LIRInstruction instr = opIdToInstructionMap[opIdToIndex(opId)]; + assert instr.id() == opId; + return instr; + } + + /** + * Gets the block containing a given instruction. + * + * @param opId an instruction {@linkplain LIRInstruction#id id} + * @return the block containing the instruction denoted by {@code opId} + */ + AbstractBlock blockForId(int opId) { + assert opIdToBlockMap.length > 0 && opId >= 0 && opId <= maxOpId() + 1 : "opId out of range"; + return opIdToBlockMap[opIdToIndex(opId)]; + } + + boolean isBlockBegin(int opId) { + return opId == 0 || blockForId(opId) != blockForId(opId - 1); + } + + boolean coversBlockBegin(int opId1, int opId2) { + return blockForId(opId1) != blockForId(opId2); + } + + /** + * Determines if an {@link LIRInstruction} destroys all caller saved registers. + * + * @param opId an instruction {@linkplain LIRInstruction#id id} + * @return {@code true} if the instruction denoted by {@code id} destroys all caller saved + * registers. + */ + boolean hasCall(int opId) { + assert isEven(opId) : "opId not even"; + return instructionForId(opId).destroysCallerSavedRegisters(); + } + + /** + * Eliminates moves from register to stack if the stack slot is known to be correct. + */ + void changeSpillDefinitionPos(Interval interval, int defPos) { + assert interval.isSplitParent() : "can only be called for split parents"; + + switch (interval.spillState()) { + case NoDefinitionFound: + assert interval.spillDefinitionPos() == -1 : "must no be set before"; + interval.setSpillDefinitionPos(defPos); + interval.setSpillState(SpillState.NoSpillStore); + break; + + case NoSpillStore: + assert defPos <= interval.spillDefinitionPos() : "positions are processed in reverse order when intervals are created"; + if (defPos < interval.spillDefinitionPos() - 2) { + // second definition found, so no spill optimization possible for this interval + interval.setSpillState(SpillState.NoOptimization); + } else { + // two consecutive definitions (because of two-operand LIR form) + assert blockForId(defPos) == blockForId(interval.spillDefinitionPos()) : "block must be equal"; + } + break; + + case NoOptimization: + // nothing to do + break; + + default: + throw new BailoutException("other states not allowed at this time"); + } + } + + // called during register allocation + void changeSpillState(Interval interval, int spillPos) { + switch (interval.spillState()) { + case NoSpillStore: { + int defLoopDepth = blockForId(interval.spillDefinitionPos()).getLoopDepth(); + int spillLoopDepth = blockForId(spillPos).getLoopDepth(); + + if (defLoopDepth < spillLoopDepth) { + // the loop depth of the spilling position is higher then the loop depth + // at the definition of the interval . move write to memory out of loop. + if (Options.LSRAOptimizeSpillPosition.getValue()) { + // find best spill position in dominator the tree + interval.setSpillState(SpillState.SpillInDominator); + } else { + // store at definition of the interval + interval.setSpillState(SpillState.StoreAtDefinition); + } + } else { + // the interval is currently spilled only once, so for now there is no + // reason to store the interval at the definition + interval.setSpillState(SpillState.OneSpillStore); + } + break; + } + + case OneSpillStore: { + if (Options.LSRAOptimizeSpillPosition.getValue()) { + // the interval is spilled more then once + interval.setSpillState(SpillState.SpillInDominator); + } else { + // it is better to store it to + // memory at the definition + interval.setSpillState(SpillState.StoreAtDefinition); + } + break; + } + + case SpillInDominator: + case StoreAtDefinition: + case StartInMemory: + case NoOptimization: + case NoDefinitionFound: + // nothing to do + break; + + default: + throw new BailoutException("other states not allowed at this time"); + } + } + + abstract static class IntervalPredicate { + + abstract boolean apply(Interval i); + } + + private static final IntervalPredicate mustStoreAtDefinition = new IntervalPredicate() { + + @Override + public boolean apply(Interval i) { + return i.isSplitParent() && i.spillState() == SpillState.StoreAtDefinition; + } + }; + + // called once before assignment of register numbers + void eliminateSpillMoves() { + try (Indent indent = Debug.logAndIndent("Eliminating unnecessary spill moves")) { + + // collect all intervals that must be stored after their definition. + // the list is sorted by Interval.spillDefinitionPos + Interval interval; + interval = createUnhandledLists(mustStoreAtDefinition, null).first; + if (DetailedAsserts.getValue()) { + checkIntervals(interval); + } + + LIRInsertionBuffer insertionBuffer = new LIRInsertionBuffer(); + for (AbstractBlock block : sortedBlocks) { + List instructions = ir.getLIRforBlock(block); + int numInst = instructions.size(); + + // iterate all instructions of the block. skip the first + // because it is always a label + for (int j = 1; j < numInst; j++) { + LIRInstruction op = instructions.get(j); + int opId = op.id(); + + if (opId == -1) { + MoveOp move = (MoveOp) op; + // remove move from register to stack if the stack slot is guaranteed to be + // correct. + // only moves that have been inserted by LinearScan can be removed. + assert isVariable(move.getResult()) : "LinearScan inserts only moves to variables"; + + Interval curInterval = intervalFor(move.getResult()); + + if (!isRegister(curInterval.location()) && curInterval.alwaysInMemory()) { + // move target is a stack slot that is always correct, so eliminate + // instruction + if (Debug.isLogEnabled()) { + Debug.log("eliminating move from interval %d to %d", operandNumber(move.getInput()), operandNumber(move.getResult())); + } + // null-instructions are deleted by assignRegNum + instructions.set(j, null); + } + + } else { + // insert move from register to stack just after + // the beginning of the interval + assert interval == Interval.EndMarker || interval.spillDefinitionPos() >= opId : "invalid order"; + assert interval == Interval.EndMarker || (interval.isSplitParent() && interval.spillState() == SpillState.StoreAtDefinition) : "invalid interval"; + + while (interval != Interval.EndMarker && interval.spillDefinitionPos() == opId) { + if (!interval.canMaterialize()) { + if (!insertionBuffer.initialized()) { + // prepare insertion buffer (appended when all instructions in + // the block are processed) + insertionBuffer.init(instructions); + } + + AllocatableValue fromLocation = interval.location(); + AllocatableValue toLocation = canonicalSpillOpr(interval); + + assert isRegister(fromLocation) : "from operand must be a register but is: " + fromLocation + " toLocation=" + toLocation + " spillState=" + interval.spillState(); + assert isStackSlotValue(toLocation) : "to operand must be a stack slot"; + + insertionBuffer.append(j + 1, ir.getSpillMoveFactory().createMove(toLocation, fromLocation)); + + Debug.log("inserting move after definition of interval %d to stack slot %s at opId %d", interval.operandNumber, interval.spillSlot(), opId); + } + interval = interval.next; + } + } + } // end of instruction iteration + + if (insertionBuffer.initialized()) { + insertionBuffer.finish(); + } + } // end of block iteration + + assert interval == Interval.EndMarker : "missed an interval"; + } + } + + private static void checkIntervals(Interval interval) { + Interval prev = null; + Interval temp = interval; + while (temp != Interval.EndMarker) { + assert temp.spillDefinitionPos() > 0 : "invalid spill definition pos"; + if (prev != null) { + assert temp.from() >= prev.from() : "intervals not sorted"; + assert temp.spillDefinitionPos() >= prev.spillDefinitionPos() : "when intervals are sorted by from : then they must also be sorted by spillDefinitionPos"; + } + + assert temp.spillSlot() != null || temp.canMaterialize() : "interval has no spill slot assigned"; + assert temp.spillDefinitionPos() >= temp.from() : "invalid order"; + assert temp.spillDefinitionPos() <= temp.from() + 2 : "only intervals defined once at their start-pos can be optimized"; + + Debug.log("interval %d (from %d to %d) must be stored at %d", temp.operandNumber, temp.from(), temp.to(), temp.spillDefinitionPos()); + + prev = temp; + temp = temp.next; + } + } + + /** + * Numbers all instructions in all blocks. The numbering follows the + * {@linkplain ComputeBlockOrder linear scan order}. + */ + void numberInstructions() { + + intervalsSize = operandSize(); + intervals = new Interval[intervalsSize + (intervalsSize >> SPLIT_INTERVALS_CAPACITY_RIGHT_SHIFT)]; + + ValueConsumer setVariableConsumer = (value, mode, flags) -> { + if (isVariable(value)) { + getOrCreateInterval(asVariable(value)); + } + }; + + // Assign IDs to LIR nodes and build a mapping, lirOps, from ID to LIRInstruction node. + int numInstructions = 0; + for (AbstractBlock block : sortedBlocks) { + numInstructions += ir.getLIRforBlock(block).size(); + } + + // initialize with correct length + opIdToInstructionMap = new LIRInstruction[numInstructions]; + opIdToBlockMap = new AbstractBlock[numInstructions]; + + int opId = 0; + int index = 0; + for (AbstractBlock block : sortedBlocks) { + blockData.put(block, new BlockData()); + + List instructions = ir.getLIRforBlock(block); + + int numInst = instructions.size(); + for (int j = 0; j < numInst; j++) { + LIRInstruction op = instructions.get(j); + op.setId(opId); + + opIdToInstructionMap[index] = op; + opIdToBlockMap[index] = block; + assert instructionForId(opId) == op : "must match"; + + op.visitEachTemp(setVariableConsumer); + op.visitEachOutput(setVariableConsumer); + + index++; + opId += 2; // numbering of lirOps by two + } + } + assert index == numInstructions : "must match"; + assert (index << 1) == opId : "must match: " + (index << 1); + } + + /** + * Computes local live sets (i.e. {@link BlockData#liveGen} and {@link BlockData#liveKill}) + * separately for each block. + */ + void computeLocalLiveSets() { + int liveSize = liveSetSize(); + + intervalInLoop = new BitMap2D(operandSize(), numLoops()); + + // iterate all blocks + for (final AbstractBlock block : sortedBlocks) { + try (Indent indent = Debug.logAndIndent("compute local live sets for block %d", block.getId())) { + + final BitSet liveGen = new BitSet(liveSize); + final BitSet liveKill = new BitSet(liveSize); + + List instructions = ir.getLIRforBlock(block); + int numInst = instructions.size(); + + ValueConsumer useConsumer = (operand, mode, flags) -> { + if (isVariable(operand)) { + int operandNum = operandNumber(operand); + if (!liveKill.get(operandNum)) { + liveGen.set(operandNum); + Debug.log("liveGen for operand %d", operandNum); + } + if (block.getLoop() != null) { + intervalInLoop.setBit(operandNum, block.getLoop().getIndex()); + } + } + + if (DetailedAsserts.getValue()) { + verifyInput(block, liveKill, operand); + } + }; + ValueConsumer stateConsumer = (operand, mode, flags) -> { + if (isVariableOrRegister(operand)) { + int operandNum = operandNumber(operand); + if (!liveKill.get(operandNum)) { + liveGen.set(operandNum); + Debug.log("liveGen in state for operand %d", operandNum); + } + } + }; + ValueConsumer defConsumer = (operand, mode, flags) -> { + if (isVariable(operand)) { + int varNum = operandNumber(operand); + liveKill.set(varNum); + Debug.log("liveKill for operand %d", varNum); + if (block.getLoop() != null) { + intervalInLoop.setBit(varNum, block.getLoop().getIndex()); + } + } + + if (DetailedAsserts.getValue()) { + // fixed intervals are never live at block boundaries, so + // they need not be processed in live sets + // process them only in debug mode so that this can be checked + verifyTemp(liveKill, operand); + } + }; + + // iterate all instructions of the block + for (int j = 0; j < numInst; j++) { + final LIRInstruction op = instructions.get(j); + + try (Indent indent2 = Debug.logAndIndent("handle op %d", op.id())) { + op.visitEachInput(useConsumer); + op.visitEachAlive(useConsumer); + // Add uses of live locals from interpreter's point of view for proper debug + // information generation + op.visitEachState(stateConsumer); + op.visitEachTemp(defConsumer); + op.visitEachOutput(defConsumer); + } + } // end of instruction iteration + + BlockData blockSets = blockData.get(block); + blockSets.liveGen = liveGen; + blockSets.liveKill = liveKill; + blockSets.liveIn = new BitSet(liveSize); + blockSets.liveOut = new BitSet(liveSize); + + Debug.log("liveGen B%d %s", block.getId(), blockSets.liveGen); + Debug.log("liveKill B%d %s", block.getId(), blockSets.liveKill); + + } + } // end of block iteration + } + + private void verifyTemp(BitSet liveKill, Value operand) { + // fixed intervals are never live at block boundaries, so + // they need not be processed in live sets + // process them only in debug mode so that this can be checked + if (isRegister(operand)) { + if (isProcessed(operand)) { + liveKill.set(operandNumber(operand)); + } + } + } + + private void verifyInput(AbstractBlock block, BitSet liveKill, Value operand) { + // fixed intervals are never live at block boundaries, so + // they need not be processed in live sets. + // this is checked by these assertions to be sure about it. + // the entry block may have incoming + // values in registers, which is ok. + if (isRegister(operand) && block != ir.getControlFlowGraph().getStartBlock()) { + if (isProcessed(operand)) { + assert liveKill.get(operandNumber(operand)) : "using fixed register that is not defined in this block"; + } + } + } + + /** + * Performs a backward dataflow analysis to compute global live sets (i.e. + * {@link BlockData#liveIn} and {@link BlockData#liveOut}) for each block. + */ + void computeGlobalLiveSets() { + try (Indent indent = Debug.logAndIndent("compute global live sets")) { + int numBlocks = blockCount(); + boolean changeOccurred; + boolean changeOccurredInBlock; + int iterationCount = 0; + BitSet liveOut = new BitSet(liveSetSize()); // scratch set for calculations + + // Perform a backward dataflow analysis to compute liveOut and liveIn for each block. + // The loop is executed until a fixpoint is reached (no changes in an iteration) + do { + changeOccurred = false; + + try (Indent indent2 = Debug.logAndIndent("new iteration %d", iterationCount)) { + + // iterate all blocks in reverse order + for (int i = numBlocks - 1; i >= 0; i--) { + AbstractBlock block = blockAt(i); + BlockData blockSets = blockData.get(block); + + changeOccurredInBlock = false; + + // liveOut(block) is the union of liveIn(sux), for successors sux of block + int n = block.getSuccessorCount(); + if (n > 0) { + liveOut.clear(); + // block has successors + if (n > 0) { + for (AbstractBlock successor : block.getSuccessors()) { + liveOut.or(blockData.get(successor).liveIn); + } + } + + if (!blockSets.liveOut.equals(liveOut)) { + // A change occurred. Swap the old and new live out + // sets to avoid copying. + BitSet temp = blockSets.liveOut; + blockSets.liveOut = liveOut; + liveOut = temp; + + changeOccurred = true; + changeOccurredInBlock = true; + } + } + + if (iterationCount == 0 || changeOccurredInBlock) { + // liveIn(block) is the union of liveGen(block) with (liveOut(block) & + // !liveKill(block)) + // note: liveIn has to be computed only in first iteration + // or if liveOut has changed! + BitSet liveIn = blockSets.liveIn; + liveIn.clear(); + liveIn.or(blockSets.liveOut); + liveIn.andNot(blockSets.liveKill); + liveIn.or(blockSets.liveGen); + + Debug.log("block %d: livein = %s, liveout = %s", block.getId(), liveIn, blockSets.liveOut); + } + } + iterationCount++; + + if (changeOccurred && iterationCount > 50) { + throw new BailoutException("too many iterations in computeGlobalLiveSets"); + } + } + } while (changeOccurred); + + if (DetailedAsserts.getValue()) { + verifyLiveness(); + } + + // check that the liveIn set of the first block is empty + AbstractBlock startBlock = ir.getControlFlowGraph().getStartBlock(); + if (blockData.get(startBlock).liveIn.cardinality() != 0) { + if (DetailedAsserts.getValue()) { + reportFailure(numBlocks); + } + // bailout if this occurs in product mode. + throw new GraalInternalError("liveIn set of first block must be empty: " + blockData.get(startBlock).liveIn); + } + } + } + + private void reportFailure(int numBlocks) { + try (Scope s = Debug.forceLog()) { + try (Indent indent = Debug.logAndIndent("report failure")) { + + BitSet startBlockLiveIn = blockData.get(ir.getControlFlowGraph().getStartBlock()).liveIn; + try (Indent indent2 = Debug.logAndIndent("Error: liveIn set of first block must be empty (when this fails, variables are used before they are defined):")) { + for (int operandNum = startBlockLiveIn.nextSetBit(0); operandNum >= 0; operandNum = startBlockLiveIn.nextSetBit(operandNum + 1)) { + Interval interval = intervalFor(operandNum); + if (interval != null) { + Value operand = interval.operand; + Debug.log("var %d; operand=%s; node=%s", operandNum, operand, getSourceForOperandFromDebugContext(operand)); + } else { + Debug.log("var %d; missing operand", operandNum); + } + } + } + + // print some additional information to simplify debugging + for (int operandNum = startBlockLiveIn.nextSetBit(0); operandNum >= 0; operandNum = startBlockLiveIn.nextSetBit(operandNum + 1)) { + Interval interval = intervalFor(operandNum); + Value operand = null; + Object valueForOperandFromDebugContext = null; + if (interval != null) { + operand = interval.operand; + valueForOperandFromDebugContext = getSourceForOperandFromDebugContext(operand); + } + try (Indent indent2 = Debug.logAndIndent("---- Detailed information for var %d; operand=%s; node=%s ----", operandNum, operand, valueForOperandFromDebugContext)) { + + Deque> definedIn = new ArrayDeque<>(); + HashSet> usedIn = new HashSet<>(); + for (AbstractBlock block : sortedBlocks) { + if (blockData.get(block).liveGen.get(operandNum)) { + usedIn.add(block); + try (Indent indent3 = Debug.logAndIndent("used in block B%d", block.getId())) { + for (LIRInstruction ins : ir.getLIRforBlock(block)) { + try (Indent indent4 = Debug.logAndIndent("%d: %s", ins.id(), ins)) { + ins.forEachState((liveStateOperand, mode, flags) -> { + Debug.log("operand=%s", liveStateOperand); + return liveStateOperand; + }); + } + } + } + } + if (blockData.get(block).liveKill.get(operandNum)) { + definedIn.add(block); + try (Indent indent3 = Debug.logAndIndent("defined in block B%d", block.getId())) { + for (LIRInstruction ins : ir.getLIRforBlock(block)) { + Debug.log("%d: %s", ins.id(), ins); + } + } + } + } + + int[] hitCount = new int[numBlocks]; + + while (!definedIn.isEmpty()) { + AbstractBlock block = definedIn.removeFirst(); + usedIn.remove(block); + for (AbstractBlock successor : block.getSuccessors()) { + if (successor.isLoopHeader()) { + if (!block.isLoopEnd()) { + definedIn.add(successor); + } + } else { + if (++hitCount[successor.getId()] == successor.getPredecessorCount()) { + definedIn.add(successor); + } + } + } + } + try (Indent indent3 = Debug.logAndIndent("**** offending usages are in: ")) { + for (AbstractBlock block : usedIn) { + Debug.log("B%d", block.getId()); + } + } + } + } + } + } catch (Throwable e) { + throw Debug.handle(e); + } + } + + private void verifyLiveness() { + // check that fixed intervals are not live at block boundaries + // (live set must be empty at fixed intervals) + for (AbstractBlock block : sortedBlocks) { + for (int j = 0; j <= maxRegisterNumber(); j++) { + assert !blockData.get(block).liveIn.get(j) : "liveIn set of fixed register must be empty"; + assert !blockData.get(block).liveOut.get(j) : "liveOut set of fixed register must be empty"; + assert !blockData.get(block).liveGen.get(j) : "liveGen set of fixed register must be empty"; + } + } + } + + void addUse(AllocatableValue operand, int from, int to, RegisterPriority registerPriority, LIRKind kind) { + if (!isProcessed(operand)) { + return; + } + + Interval interval = getOrCreateInterval(operand); + if (!kind.equals(LIRKind.Illegal)) { + interval.setKind(kind); + } + + interval.addRange(from, to); + + // Register use position at even instruction id. + interval.addUsePos(to & ~1, registerPriority); + + Debug.log("add use: %s, from %d to %d (%s)", interval, from, to, registerPriority.name()); + } + + void addTemp(AllocatableValue operand, int tempPos, RegisterPriority registerPriority, LIRKind kind) { + if (!isProcessed(operand)) { + return; + } + + Interval interval = getOrCreateInterval(operand); + if (!kind.equals(LIRKind.Illegal)) { + interval.setKind(kind); + } + + interval.addRange(tempPos, tempPos + 1); + interval.addUsePos(tempPos, registerPriority); + interval.addMaterializationValue(null); + + Debug.log("add temp: %s tempPos %d (%s)", interval, tempPos, RegisterPriority.MustHaveRegister.name()); + } + + boolean isProcessed(Value operand) { + return !isRegister(operand) || attributes(asRegister(operand)).isAllocatable(); + } + + void addDef(AllocatableValue operand, LIRInstruction op, RegisterPriority registerPriority, LIRKind kind) { + if (!isProcessed(operand)) { + return; + } + int defPos = op.id(); + + Interval interval = getOrCreateInterval(operand); + if (!kind.equals(LIRKind.Illegal)) { + interval.setKind(kind); + } + + Range r = interval.first(); + if (r.from <= defPos) { + // Update the starting point (when a range is first created for a use, its + // start is the beginning of the current block until a def is encountered.) + r.from = defPos; + interval.addUsePos(defPos, registerPriority); + + } else { + // Dead value - make vacuous interval + // also add register priority for dead intervals + interval.addRange(defPos, defPos + 1); + interval.addUsePos(defPos, registerPriority); + Debug.log("Warning: def of operand %s at %d occurs without use", operand, defPos); + } + + changeSpillDefinitionPos(interval, defPos); + if (registerPriority == RegisterPriority.None && interval.spillState().ordinal() <= SpillState.StartInMemory.ordinal()) { + // detection of method-parameters and roundfp-results + interval.setSpillState(SpillState.StartInMemory); + } + interval.addMaterializationValue(LinearScan.getMaterializedValue(op, operand, interval)); + + Debug.log("add def: %s defPos %d (%s)", interval, defPos, registerPriority.name()); + } + + /** + * Determines the register priority for an instruction's output/result operand. + */ + static RegisterPriority registerPriorityOfOutputOperand(LIRInstruction op) { + if (op instanceof MoveOp) { + MoveOp move = (MoveOp) op; + if (optimizeMethodArgument(move.getInput())) { + return RegisterPriority.None; + } + } + + // all other operands require a register + return RegisterPriority.MustHaveRegister; + } + + /** + * Determines the priority which with an instruction's input operand will be allocated a + * register. + */ + static RegisterPriority registerPriorityOfInputOperand(EnumSet flags) { + if (flags.contains(OperandFlag.STACK)) { + return RegisterPriority.ShouldHaveRegister; + } + // all other operands require a register + return RegisterPriority.MustHaveRegister; + } + + private static boolean optimizeMethodArgument(Value value) { + /* + * Object method arguments that are passed on the stack are currently not optimized because + * this requires that the runtime visits method arguments during stack walking. + */ + return isStackSlot(value) && asStackSlot(value).isInCallerFrame() && value.getKind() != Kind.Object; + } + + /** + * Optimizes moves related to incoming stack based arguments. The interval for the destination + * of such moves is assigned the stack slot (which is in the caller's frame) as its spill slot. + */ + void handleMethodArguments(LIRInstruction op) { + if (op instanceof MoveOp) { + MoveOp move = (MoveOp) op; + if (optimizeMethodArgument(move.getInput())) { + StackSlot slot = asStackSlot(move.getInput()); + if (DetailedAsserts.getValue()) { + assert op.id() > 0 : "invalid id"; + assert blockForId(op.id()).getPredecessorCount() == 0 : "move from stack must be in first block"; + assert isVariable(move.getResult()) : "result of move must be a variable"; + + Debug.log("found move from stack slot %s to %s", slot, move.getResult()); + } + + Interval interval = intervalFor(move.getResult()); + interval.setSpillSlot(slot); + interval.assignLocation(slot); + } + } + } + + void addRegisterHint(final LIRInstruction op, final Value targetValue, OperandMode mode, EnumSet flags, final boolean hintAtDef) { + if (flags.contains(OperandFlag.HINT) && isVariableOrRegister(targetValue)) { + + op.forEachRegisterHint(targetValue, mode, (registerHint, valueMode, valueFlags) -> { + if (isVariableOrRegister(registerHint)) { + Interval from = getOrCreateInterval((AllocatableValue) registerHint); + Interval to = getOrCreateInterval((AllocatableValue) targetValue); + + /* hints always point from def to use */ + if (hintAtDef) { + to.setLocationHint(from); + } else { + from.setLocationHint(to); + } + Debug.log("operation at opId %d: added hint from interval %d to %d", op.id(), from.operandNumber, to.operandNumber); + + return registerHint; + } + return null; + }); + } + } + + void buildIntervals() { + + try (Indent indent = Debug.logAndIndent("build intervals")) { + InstructionValueConsumer outputConsumer = (op, operand, mode, flags) -> { + if (isVariableOrRegister(operand)) { + addDef((AllocatableValue) operand, op, registerPriorityOfOutputOperand(op), operand.getLIRKind()); + addRegisterHint(op, operand, mode, flags, true); + } + }; + + InstructionValueConsumer tempConsumer = (op, operand, mode, flags) -> { + if (isVariableOrRegister(operand)) { + addTemp((AllocatableValue) operand, op.id(), RegisterPriority.MustHaveRegister, operand.getLIRKind()); + addRegisterHint(op, operand, mode, flags, false); + } + }; + + InstructionValueConsumer aliveConsumer = (op, operand, mode, flags) -> { + if (isVariableOrRegister(operand)) { + RegisterPriority p = registerPriorityOfInputOperand(flags); + int opId = op.id(); + int blockFrom = getFirstLirInstructionId((blockForId(opId))); + addUse((AllocatableValue) operand, blockFrom, opId + 1, p, operand.getLIRKind()); + addRegisterHint(op, operand, mode, flags, false); + } + }; + + InstructionValueConsumer inputConsumer = (op, operand, mode, flags) -> { + if (isVariableOrRegister(operand)) { + int opId = op.id(); + int blockFrom = getFirstLirInstructionId((blockForId(opId))); + RegisterPriority p = registerPriorityOfInputOperand(flags); + addUse((AllocatableValue) operand, blockFrom, opId, p, operand.getLIRKind()); + addRegisterHint(op, operand, mode, flags, false); + } + }; + + InstructionValueConsumer stateProc = (op, operand, mode, flags) -> { + if (isVariableOrRegister(operand)) { + int opId = op.id(); + int blockFrom = getFirstLirInstructionId((blockForId(opId))); + addUse((AllocatableValue) operand, blockFrom, opId + 1, RegisterPriority.None, operand.getLIRKind()); + } + }; + + // create a list with all caller-save registers (cpu, fpu, xmm) + Register[] callerSaveRegs = frameMapBuilder.getRegisterConfig().getCallerSaveRegisters(); + + // iterate all blocks in reverse order + for (int i = blockCount() - 1; i >= 0; i--) { + + AbstractBlock block = blockAt(i); + try (Indent indent2 = Debug.logAndIndent("handle block %d", block.getId())) { + + List instructions = ir.getLIRforBlock(block); + final int blockFrom = getFirstLirInstructionId(block); + int blockTo = getLastLirInstructionId(block); + + assert blockFrom == instructions.get(0).id(); + assert blockTo == instructions.get(instructions.size() - 1).id(); + + // Update intervals for operands live at the end of this block; + BitSet live = blockData.get(block).liveOut; + for (int operandNum = live.nextSetBit(0); operandNum >= 0; operandNum = live.nextSetBit(operandNum + 1)) { + assert live.get(operandNum) : "should not stop here otherwise"; + AllocatableValue operand = intervalFor(operandNum).operand; + Debug.log("live in %d: %s", operandNum, operand); + + addUse(operand, blockFrom, blockTo + 2, RegisterPriority.None, LIRKind.Illegal); + + // add special use positions for loop-end blocks when the + // interval is used anywhere inside this loop. It's possible + // that the block was part of a non-natural loop, so it might + // have an invalid loop index. + if (block.isLoopEnd() && block.getLoop() != null && isIntervalInLoop(operandNum, block.getLoop().getIndex())) { + intervalFor(operandNum).addUsePos(blockTo + 1, RegisterPriority.LiveAtLoopEnd); + } + } + + // iterate all instructions of the block in reverse order. + // definitions of intervals are processed before uses + for (int j = instructions.size() - 1; j >= 0; j--) { + final LIRInstruction op = instructions.get(j); + final int opId = op.id(); + + try (Indent indent3 = Debug.logAndIndent("handle inst %d: %s", opId, op)) { + + // add a temp range for each register if operation destroys + // caller-save registers + if (op.destroysCallerSavedRegisters()) { + for (Register r : callerSaveRegs) { + if (attributes(r).isAllocatable()) { + addTemp(r.asValue(), opId, RegisterPriority.None, LIRKind.Illegal); + } + } + Debug.log("operation destroys all caller-save registers"); + } + + op.visitEachOutput(outputConsumer); + op.visitEachTemp(tempConsumer); + op.visitEachAlive(aliveConsumer); + op.visitEachInput(inputConsumer); + + // Add uses of live locals from interpreter's point of view for proper + // debug information generation + // Treat these operands as temp values (if the live range is extended + // to a call site, the value would be in a register at + // the call otherwise) + op.visitEachState(stateProc); + + // special steps for some instructions (especially moves) + handleMethodArguments(op); + + } + + } // end of instruction iteration + } + } // end of block iteration + + // add the range [0, 1] to all fixed intervals. + // the register allocator need not handle unhandled fixed intervals + for (Interval interval : intervals) { + if (interval != null && isRegister(interval.operand)) { + interval.addRange(0, 1); + } + } + } + } + + // * Phase 5: actual register allocation + + private static boolean isSorted(Interval[] intervals) { + int from = -1; + for (Interval interval : intervals) { + assert interval != null; + assert from <= interval.from(); + from = interval.from(); + } + return true; + } + + static Interval addToList(Interval first, Interval prev, Interval interval) { + Interval newFirst = first; + if (prev != null) { + prev.next = interval; + } else { + newFirst = interval; + } + return newFirst; + } + + Interval.Pair createUnhandledLists(IntervalPredicate isList1, IntervalPredicate isList2) { + assert isSorted(sortedIntervals) : "interval list is not sorted"; + + Interval list1 = Interval.EndMarker; + Interval list2 = Interval.EndMarker; + + Interval list1Prev = null; + Interval list2Prev = null; + Interval v; + + int n = sortedIntervals.length; + for (int i = 0; i < n; i++) { + v = sortedIntervals[i]; + if (v == null) { + continue; + } + + if (isList1.apply(v)) { + list1 = addToList(list1, list1Prev, v); + list1Prev = v; + } else if (isList2 == null || isList2.apply(v)) { + list2 = addToList(list2, list2Prev, v); + list2Prev = v; + } + } + + if (list1Prev != null) { + list1Prev.next = Interval.EndMarker; + } + if (list2Prev != null) { + list2Prev.next = Interval.EndMarker; + } + + assert list1Prev == null || list1Prev.next == Interval.EndMarker : "linear list ends not with sentinel"; + assert list2Prev == null || list2Prev.next == Interval.EndMarker : "linear list ends not with sentinel"; + + return new Interval.Pair(list1, list2); + } + + void sortIntervalsBeforeAllocation() { + int sortedLen = 0; + for (Interval interval : intervals) { + if (interval != null) { + sortedLen++; + } + } + + Interval[] sortedList = new Interval[sortedLen]; + int sortedIdx = 0; + int sortedFromMax = -1; + + // special sorting algorithm: the original interval-list is almost sorted, + // only some intervals are swapped. So this is much faster than a complete QuickSort + for (Interval interval : intervals) { + if (interval != null) { + int from = interval.from(); + + if (sortedFromMax <= from) { + sortedList[sortedIdx++] = interval; + sortedFromMax = interval.from(); + } else { + // the assumption that the intervals are already sorted failed, + // so this interval must be sorted in manually + int j; + for (j = sortedIdx - 1; j >= 0 && from < sortedList[j].from(); j--) { + sortedList[j + 1] = sortedList[j]; + } + sortedList[j + 1] = interval; + sortedIdx++; + } + } + } + sortedIntervals = sortedList; + } + + void sortIntervalsAfterAllocation() { + if (firstDerivedIntervalIndex == -1) { + // no intervals have been added during allocation, so sorted list is already up to date + return; + } + + Interval[] oldList = sortedIntervals; + Interval[] newList = Arrays.copyOfRange(intervals, firstDerivedIntervalIndex, intervalsSize); + int oldLen = oldList.length; + int newLen = newList.length; + + // conventional sort-algorithm for new intervals + Arrays.sort(newList, (Interval a, Interval b) -> a.from() - b.from()); + + // merge old and new list (both already sorted) into one combined list + Interval[] combinedList = new Interval[oldLen + newLen]; + int oldIdx = 0; + int newIdx = 0; + + while (oldIdx + newIdx < combinedList.length) { + if (newIdx >= newLen || (oldIdx < oldLen && oldList[oldIdx].from() <= newList[newIdx].from())) { + combinedList[oldIdx + newIdx] = oldList[oldIdx]; + oldIdx++; + } else { + combinedList[oldIdx + newIdx] = newList[newIdx]; + newIdx++; + } + } + + sortedIntervals = combinedList; + } + + public void allocateRegisters() { + try (Indent indent = Debug.logAndIndent("allocate registers")) { + Interval precoloredIntervals; + Interval notPrecoloredIntervals; + + Interval.Pair result = createUnhandledLists(IS_PRECOLORED_INTERVAL, IS_VARIABLE_INTERVAL); + precoloredIntervals = result.first; + notPrecoloredIntervals = result.second; + + // allocate cpu registers + LinearScanWalker lsw; + if (OptimizingLinearScanWalker.Options.LSRAOptimization.getValue()) { + lsw = new OptimizingLinearScanWalker(this, precoloredIntervals, notPrecoloredIntervals); + } else { + lsw = new LinearScanWalker(this, precoloredIntervals, notPrecoloredIntervals); + } + lsw.walk(); + lsw.finishAllocation(); + } + } + + // * Phase 6: resolve data flow + // (insert moves at edges between blocks if intervals have been split) + + // wrapper for Interval.splitChildAtOpId that performs a bailout in product mode + // instead of returning null + Interval splitChildAtOpId(Interval interval, int opId, LIRInstruction.OperandMode mode) { + Interval result = interval.getSplitChildAtOpId(opId, mode, this); + + if (result != null) { + Debug.log("Split child at pos %d of interval %s is %s", opId, interval, result); + return result; + } + + throw new BailoutException("LinearScan: interval is null"); + } + + Interval intervalAtBlockBegin(AbstractBlock block, int operandNumber) { + return splitChildAtOpId(intervalFor(operandNumber), getFirstLirInstructionId(block), LIRInstruction.OperandMode.DEF); + } + + Interval intervalAtBlockEnd(AbstractBlock block, int operandNumber) { + return splitChildAtOpId(intervalFor(operandNumber), getLastLirInstructionId(block) + 1, LIRInstruction.OperandMode.DEF); + } + + void resolveCollectMappings(AbstractBlock fromBlock, AbstractBlock toBlock, MoveResolver moveResolver) { + assert moveResolver.checkEmpty(); + + int numOperands = operandSize(); + BitSet liveAtEdge = blockData.get(toBlock).liveIn; + + // visit all variables for which the liveAtEdge bit is set + for (int operandNum = liveAtEdge.nextSetBit(0); operandNum >= 0; operandNum = liveAtEdge.nextSetBit(operandNum + 1)) { + assert operandNum < numOperands : "live information set for not exisiting interval"; + assert blockData.get(fromBlock).liveOut.get(operandNum) && blockData.get(toBlock).liveIn.get(operandNum) : "interval not live at this edge"; + + Interval fromInterval = intervalAtBlockEnd(fromBlock, operandNum); + Interval toInterval = intervalAtBlockBegin(toBlock, operandNum); + + if (fromInterval != toInterval && !fromInterval.location().equals(toInterval.location())) { + // need to insert move instruction + moveResolver.addMapping(fromInterval, toInterval); + } + } + } + + void resolveFindInsertPos(AbstractBlock fromBlock, AbstractBlock toBlock, MoveResolver moveResolver) { + if (fromBlock.getSuccessorCount() <= 1) { + Debug.log("inserting moves at end of fromBlock B%d", fromBlock.getId()); + + List instructions = ir.getLIRforBlock(fromBlock); + LIRInstruction instr = instructions.get(instructions.size() - 1); + if (instr instanceof StandardOp.JumpOp) { + // insert moves before branch + moveResolver.setInsertPosition(instructions, instructions.size() - 1); + } else { + moveResolver.setInsertPosition(instructions, instructions.size()); + } + + } else { + Debug.log("inserting moves at beginning of toBlock B%d", toBlock.getId()); + + if (DetailedAsserts.getValue()) { + assert ir.getLIRforBlock(fromBlock).get(0) instanceof StandardOp.LabelOp : "block does not start with a label"; + + // because the number of predecessor edges matches the number of + // successor edges, blocks which are reached by switch statements + // may have be more than one predecessor but it will be guaranteed + // that all predecessors will be the same. + for (AbstractBlock predecessor : toBlock.getPredecessors()) { + assert fromBlock == predecessor : "all critical edges must be broken"; + } + } + + moveResolver.setInsertPosition(ir.getLIRforBlock(toBlock), 1); + } + } + + /** + * Inserts necessary moves (spilling or reloading) at edges between blocks for intervals that + * have been split. + */ + void resolveDataFlow() { + try (Indent indent = Debug.logAndIndent("resolve data flow")) { + + int numBlocks = blockCount(); + MoveResolver moveResolver = new MoveResolver(this); + BitSet blockCompleted = new BitSet(numBlocks); + BitSet alreadyResolved = new BitSet(numBlocks); + + for (AbstractBlock block : sortedBlocks) { + + // check if block has only one predecessor and only one successor + if (block.getPredecessorCount() == 1 && block.getSuccessorCount() == 1) { + List instructions = ir.getLIRforBlock(block); + assert instructions.get(0) instanceof StandardOp.LabelOp : "block must start with label"; + assert instructions.get(instructions.size() - 1) instanceof StandardOp.JumpOp : "block with successor must end with unconditional jump"; + + // check if block is empty (only label and branch) + if (instructions.size() == 2) { + AbstractBlock pred = block.getPredecessors().iterator().next(); + AbstractBlock sux = block.getSuccessors().iterator().next(); + + // prevent optimization of two consecutive blocks + if (!blockCompleted.get(pred.getLinearScanNumber()) && !blockCompleted.get(sux.getLinearScanNumber())) { + Debug.log(" optimizing empty block B%d (pred: B%d, sux: B%d)", block.getId(), pred.getId(), sux.getId()); + + blockCompleted.set(block.getLinearScanNumber()); + + // directly resolve between pred and sux (without looking + // at the empty block + // between) + resolveCollectMappings(pred, sux, moveResolver); + if (moveResolver.hasMappings()) { + moveResolver.setInsertPosition(instructions, 1); + moveResolver.resolveAndAppendMoves(); + } + } + } + } + } + + for (AbstractBlock fromBlock : sortedBlocks) { + if (!blockCompleted.get(fromBlock.getLinearScanNumber())) { + alreadyResolved.clear(); + alreadyResolved.or(blockCompleted); + + for (AbstractBlock toBlock : fromBlock.getSuccessors()) { + + // check for duplicate edges between the same blocks (can happen with switch + // blocks) + if (!alreadyResolved.get(toBlock.getLinearScanNumber())) { + Debug.log("processing edge between B%d and B%d", fromBlock.getId(), toBlock.getId()); + + alreadyResolved.set(toBlock.getLinearScanNumber()); + + // collect all intervals that have been split between + // fromBlock and toBlock + resolveCollectMappings(fromBlock, toBlock, moveResolver); + if (moveResolver.hasMappings()) { + resolveFindInsertPos(fromBlock, toBlock, moveResolver); + moveResolver.resolveAndAppendMoves(); + } + } + } + } + } + } + } + + // * Phase 7: assign register numbers back to LIR + // (includes computation of debug information and oop maps) + + static StackSlotValue canonicalSpillOpr(Interval interval) { + assert interval.spillSlot() != null : "canonical spill slot not set"; + return interval.spillSlot(); + } + + /** + * Assigns the allocated location for an LIR instruction operand back into the instruction. + * + * @param operand an LIR instruction operand + * @param opId the id of the LIR instruction using {@code operand} + * @param mode the usage mode for {@code operand} by the instruction + * @return the location assigned for the operand + */ + private Value colorLirOperand(Variable operand, int opId, OperandMode mode) { + Interval interval = intervalFor(operand); + assert interval != null : "interval must exist"; + + if (opId != -1) { + if (DetailedAsserts.getValue()) { + AbstractBlock block = blockForId(opId); + if (block.getSuccessorCount() <= 1 && opId == getLastLirInstructionId(block)) { + // check if spill moves could have been appended at the end of this block, but + // before the branch instruction. So the split child information for this branch + // would + // be incorrect. + LIRInstruction instr = ir.getLIRforBlock(block).get(ir.getLIRforBlock(block).size() - 1); + if (instr instanceof StandardOp.JumpOp) { + if (blockData.get(block).liveOut.get(operandNumber(operand))) { + assert false : "can't get split child for the last branch of a block because the information would be incorrect (moves are inserted before the branch in resolveDataFlow)"; + } + } + } + } + + // operands are not changed when an interval is split during allocation, + // so search the right interval here + interval = splitChildAtOpId(interval, opId, mode); + } + + if (isIllegal(interval.location()) && interval.canMaterialize()) { + assert mode != OperandMode.DEF; + return interval.getMaterializedValue(); + } + return interval.location(); + } + + private boolean isMaterialized(AllocatableValue operand, int opId, OperandMode mode) { + Interval interval = intervalFor(operand); + assert interval != null : "interval must exist"; + + if (opId != -1) { + // operands are not changed when an interval is split during allocation, + // so search the right interval here + interval = splitChildAtOpId(interval, opId, mode); + } + + return isIllegal(interval.location()) && interval.canMaterialize(); + } + + protected IntervalWalker initIntervalWalker(IntervalPredicate predicate) { + // setup lists of potential oops for walking + Interval oopIntervals; + Interval nonOopIntervals; + + oopIntervals = createUnhandledLists(predicate, null).first; + + // intervals that have no oops inside need not to be processed. + // to ensure a walking until the last instruction id, add a dummy interval + // with a high operation id + nonOopIntervals = new Interval(Value.ILLEGAL, -1); + nonOopIntervals.addRange(Integer.MAX_VALUE - 2, Integer.MAX_VALUE - 1); + + return new IntervalWalker(this, oopIntervals, nonOopIntervals); + } + + private boolean isCallerSave(Value operand) { + return attributes(asRegister(operand)).isCallerSave(); + } + + /** + * @param op + * @param operand + * @param valueMode + * @param flags + * @see InstructionValueProcedure#doValue(LIRInstruction, Value, OperandMode, EnumSet) + */ + private Value debugInfoProcedure(LIRInstruction op, Value operand, OperandMode valueMode, EnumSet flags) { + if (isVirtualStackSlot(operand)) { + return operand; + } + int tempOpId = op.id(); + OperandMode mode = OperandMode.USE; + AbstractBlock block = blockForId(tempOpId); + if (block.getSuccessorCount() == 1 && tempOpId == getLastLirInstructionId(block)) { + // generating debug information for the last instruction of a block. + // if this instruction is a branch, spill moves are inserted before this branch + // and so the wrong operand would be returned (spill moves at block boundaries + // are not + // considered in the live ranges of intervals) + // Solution: use the first opId of the branch target block instead. + final LIRInstruction instr = ir.getLIRforBlock(block).get(ir.getLIRforBlock(block).size() - 1); + if (instr instanceof StandardOp.JumpOp) { + if (blockData.get(block).liveOut.get(operandNumber(operand))) { + tempOpId = getFirstLirInstructionId(block.getSuccessors().iterator().next()); + mode = OperandMode.DEF; + } + } + } + + // Get current location of operand + // The operand must be live because debug information is considered when building + // the intervals + // if the interval is not live, colorLirOperand will cause an assert on failure + Value result = colorLirOperand((Variable) operand, tempOpId, mode); + assert !hasCall(tempOpId) || isStackSlotValue(result) || isConstant(result) || !isCallerSave(result) : "cannot have caller-save register operands at calls"; + return result; + } + + private void computeDebugInfo(final LIRInstruction op, LIRFrameState info) { + info.forEachState(op, this::debugInfoProcedure); + } + + private void assignLocations(List instructions) { + int numInst = instructions.size(); + boolean hasDead = false; + + InstructionValueProcedure assignProc = (op, operand, mode, flags) -> isVariable(operand) ? colorLirOperand((Variable) operand, op.id(), mode) : operand; + for (int j = 0; j < numInst; j++) { + final LIRInstruction op = instructions.get(j); + if (op == null) { // this can happen when spill-moves are removed in eliminateSpillMoves + hasDead = true; + continue; + } + + // remove useless moves + MoveOp move = null; + if (op instanceof MoveOp) { + move = (MoveOp) op; + AllocatableValue result = move.getResult(); + if (isVariable(result) && isMaterialized(result, op.id(), OperandMode.DEF)) { + /* + * This happens if a materializable interval is originally not spilled but then + * kicked out in LinearScanWalker.splitForSpilling(). When kicking out such an + * interval this move operation was already generated. + */ + instructions.set(j, null); + hasDead = true; + continue; + } + } + + op.forEachInput(assignProc); + op.forEachAlive(assignProc); + op.forEachTemp(assignProc); + op.forEachOutput(assignProc); + + // compute reference map and debug information + op.forEachState((inst, state) -> computeDebugInfo(inst, state)); + + // remove useless moves + if (move != null) { + if (move.getInput().equals(move.getResult())) { + instructions.set(j, null); + hasDead = true; + } + } + } + + if (hasDead) { + // Remove null values from the list. + instructions.removeAll(Collections.singleton(null)); + } + } + + private void assignLocations() { + try (Indent indent = Debug.logAndIndent("assign locations")) { + for (AbstractBlock block : sortedBlocks) { + try (Indent indent2 = Debug.logAndIndent("assign locations in block B%d", block.getId())) { + assignLocations(ir.getLIRforBlock(block)); + } + } + } + } + + public static void allocate(TargetDescription target, LIRGenerationResult res) { + new LinearScan(target, res).allocate(); + } + + private void allocate() { + + /* + * This is the point to enable debug logging for the whole register allocation. + */ + try (Indent indent = Debug.logAndIndent("LinearScan allocate")) { + + try (Scope s = Debug.scope("LifetimeAnalysis")) { + numberInstructions(); + printLir("Before register allocation", true); + computeLocalLiveSets(); + computeGlobalLiveSets(); + buildIntervals(); + sortIntervalsBeforeAllocation(); + } catch (Throwable e) { + throw Debug.handle(e); + } + + try (Scope s = Debug.scope("RegisterAllocation")) { + printIntervals("Before register allocation"); + allocateRegisters(); + } catch (Throwable e) { + throw Debug.handle(e); + } + + if (Options.LSRAOptimizeSpillPosition.getValue()) { + try (Scope s = Debug.scope("OptimizeSpillPosition")) { + optimizeSpillPosition(); + } catch (Throwable e) { + throw Debug.handle(e); + } + } + + try (Scope s = Debug.scope("ResolveDataFlow")) { + resolveDataFlow(); + } catch (Throwable e) { + throw Debug.handle(e); + } + + try (Scope s = Debug.scope("DebugInfo")) { + printIntervals("After register allocation"); + printLir("After register allocation", true); + + sortIntervalsAfterAllocation(); + + if (DetailedAsserts.getValue()) { + verify(); + } + + try (Scope s1 = Debug.scope("EliminateSpillMove")) { + eliminateSpillMoves(); + } catch (Throwable e) { + throw Debug.handle(e); + } + printLir("After spill move elimination", true); + + try (Scope s1 = Debug.scope("AssignLocations")) { + assignLocations(); + } catch (Throwable e) { + throw Debug.handle(e); + } + + if (DetailedAsserts.getValue()) { + verifyIntervals(); + } + } catch (Throwable e) { + throw Debug.handle(e); + } + + printLir("After register number assignment", true); + } + } + + private DebugMetric betterSpillPos = Debug.metric("BetterSpillPosition"); + private DebugMetric betterSpillPosWithLowerProbability = Debug.metric("BetterSpillPositionWithLowerProbability"); + + private void optimizeSpillPosition() { + LIRInsertionBuffer[] insertionBuffers = new LIRInsertionBuffer[ir.linearScanOrder().size()]; + for (Interval interval : intervals) { + if (interval != null && interval.isSplitParent() && interval.spillState() == SpillState.SpillInDominator) { + AbstractBlock defBlock = blockForId(interval.spillDefinitionPos()); + AbstractBlock spillBlock = null; + Interval firstSpillChild = null; + try (Indent indent = Debug.logAndIndent("interval %s (%s)", interval, defBlock)) { + for (Interval splitChild : interval.getSplitChildren()) { + if (isStackSlotValue(splitChild.location())) { + if (firstSpillChild == null || splitChild.from() < firstSpillChild.from()) { + firstSpillChild = splitChild; + } else { + assert firstSpillChild.from() < splitChild.from(); + } + // iterate all blocks where the interval has use positions + for (AbstractBlock splitBlock : blocksForInterval(splitChild)) { + if (dominates(defBlock, splitBlock)) { + Debug.log("Split interval %s, block %s", splitChild, splitBlock); + if (spillBlock == null) { + spillBlock = splitBlock; + } else { + spillBlock = commonDominator(spillBlock, splitBlock); + assert spillBlock != null; + } + } + } + } + } + if (spillBlock == null) { + // no spill interval + interval.setSpillState(SpillState.StoreAtDefinition); + } else { + // move out of loops + if (defBlock.getLoopDepth() < spillBlock.getLoopDepth()) { + spillBlock = moveSpillOutOfLoop(defBlock, spillBlock); + } + + /* + * If the spill block is the begin of the first split child (aka the value + * is on the stack) spill in the dominator. + */ + assert firstSpillChild != null; + if (!defBlock.equals(spillBlock) && spillBlock.equals(blockForId(firstSpillChild.from()))) { + AbstractBlock dom = spillBlock.getDominator(); + Debug.log("Spill block (%s) is the beginning of a spill child -> use dominator (%s)", spillBlock, dom); + spillBlock = dom; + } + + if (!defBlock.equals(spillBlock)) { + assert dominates(defBlock, spillBlock); + betterSpillPos.increment(); + Debug.log("Better spill position found (Block %s)", spillBlock); + + if (defBlock.probability() <= spillBlock.probability()) { + // better spill block has the same probability -> do nothing + interval.setSpillState(SpillState.StoreAtDefinition); + } else { + LIRInsertionBuffer insertionBuffer = insertionBuffers[spillBlock.getId()]; + if (insertionBuffer == null) { + insertionBuffer = new LIRInsertionBuffer(); + insertionBuffers[spillBlock.getId()] = insertionBuffer; + insertionBuffer.init(ir.getLIRforBlock(spillBlock)); + } + int spillOpId = getFirstLirInstructionId(spillBlock); + // insert spill move + AllocatableValue fromLocation = interval.getSplitChildAtOpId(spillOpId, OperandMode.DEF, this).location(); + AllocatableValue toLocation = canonicalSpillOpr(interval); + LIRInstruction move = ir.getSpillMoveFactory().createMove(toLocation, fromLocation); + move.setId(DOMINATOR_SPILL_MOVE_ID); + /* + * We can use the insertion buffer directly because we always insert + * at position 1. + */ + insertionBuffer.append(1, move); + + betterSpillPosWithLowerProbability.increment(); + interval.setSpillDefinitionPos(spillOpId); + } + } else { + // definition is the best choice + interval.setSpillState(SpillState.StoreAtDefinition); + } + } + } + } + } + for (LIRInsertionBuffer insertionBuffer : insertionBuffers) { + if (insertionBuffer != null) { + assert insertionBuffer.initialized() : "Insertion buffer is nonnull but not initialized!"; + insertionBuffer.finish(); + } + } + } + + /** + * Iterate over all {@link AbstractBlock blocks} of an interval. + */ + private class IntervalBlockIterator implements Iterator> { + + Range range; + AbstractBlock block; + + public IntervalBlockIterator(Interval interval) { + range = interval.first(); + block = blockForId(range.from); + } + + public AbstractBlock next() { + AbstractBlock currentBlock = block; + int nextBlockIndex = block.getLinearScanNumber() + 1; + if (nextBlockIndex < sortedBlocks.size()) { + block = sortedBlocks.get(nextBlockIndex); + if (range.to <= getFirstLirInstructionId(block)) { + range = range.next; + if (range == Range.EndMarker) { + block = null; + } else { + block = blockForId(range.from); + } + } + } else { + block = null; + } + return currentBlock; + } + + public boolean hasNext() { + return block != null; + } + } + + private Iterable> blocksForInterval(Interval interval) { + return new Iterable>() { + public Iterator> iterator() { + return new IntervalBlockIterator(interval); + } + }; + } + + private static AbstractBlock moveSpillOutOfLoop(AbstractBlock defBlock, AbstractBlock spillBlock) { + int defLoopDepth = defBlock.getLoopDepth(); + for (AbstractBlock block = spillBlock.getDominator(); !defBlock.equals(block); block = block.getDominator()) { + assert block != null : "spill block not dominated by definition block?"; + if (block.getLoopDepth() <= defLoopDepth) { + assert block.getLoopDepth() == defLoopDepth : "Cannot spill an interval outside of the loop where it is defined!"; + return block; + } + } + return defBlock; + } + + void printIntervals(String label) { + if (Debug.isLogEnabled()) { + try (Indent indent = Debug.logAndIndent("intervals %s", label)) { + for (Interval interval : intervals) { + if (interval != null) { + Debug.log("%s", interval.logString(this)); + } + } + + try (Indent indent2 = Debug.logAndIndent("Basic Blocks")) { + for (int i = 0; i < blockCount(); i++) { + AbstractBlock block = blockAt(i); + Debug.log("B%d [%d, %d, %s] ", block.getId(), getFirstLirInstructionId(block), getLastLirInstructionId(block), block.getLoop()); + } + } + } + } + Debug.dump(Arrays.copyOf(intervals, intervalsSize), label); + } + + void printLir(String label, @SuppressWarnings("unused") boolean hirValid) { + Debug.dump(ir, label); + } + + boolean verify() { + // (check that all intervals have a correct register and that no registers are overwritten) + verifyIntervals(); + + verifyRegisters(); + + Debug.log("no errors found"); + + return true; + } + + private void verifyRegisters() { + // Enable this logging to get output for the verification process. + try (Indent indent = Debug.logAndIndent("verifying register allocation")) { + RegisterVerifier verifier = new RegisterVerifier(this); + verifier.verify(blockAt(0)); + } + } + + void verifyIntervals() { + try (Indent indent = Debug.logAndIndent("verifying intervals")) { + int len = intervalsSize; + + for (int i = 0; i < len; i++) { + Interval i1 = intervals[i]; + if (i1 == null) { + continue; + } + + i1.checkSplitChildren(); + + if (i1.operandNumber != i) { + Debug.log("Interval %d is on position %d in list", i1.operandNumber, i); + Debug.log(i1.logString(this)); + throw new GraalInternalError(""); + } + + if (isVariable(i1.operand) && i1.kind().equals(LIRKind.Illegal)) { + Debug.log("Interval %d has no type assigned", i1.operandNumber); + Debug.log(i1.logString(this)); + throw new GraalInternalError(""); + } + + if (i1.location() == null) { + Debug.log("Interval %d has no register assigned", i1.operandNumber); + Debug.log(i1.logString(this)); + throw new GraalInternalError(""); + } + + if (i1.first() == Range.EndMarker) { + Debug.log("Interval %d has no Range", i1.operandNumber); + Debug.log(i1.logString(this)); + throw new GraalInternalError(""); + } + + for (Range r = i1.first(); r != Range.EndMarker; r = r.next) { + if (r.from >= r.to) { + Debug.log("Interval %d has zero length range", i1.operandNumber); + Debug.log(i1.logString(this)); + throw new GraalInternalError(""); + } + } + + for (int j = i + 1; j < len; j++) { + Interval i2 = intervals[j]; + if (i2 == null) { + continue; + } + + // special intervals that are created in MoveResolver + // . ignore them because the range information has no meaning there + if (i1.from() == 1 && i1.to() == 2) { + continue; + } + if (i2.from() == 1 && i2.to() == 2) { + continue; + } + Value l1 = i1.location(); + Value l2 = i2.location(); + if (i1.intersects(i2) && !isIllegal(l1) && (l1.equals(l2))) { + if (DetailedAsserts.getValue()) { + Debug.log("Intervals %d and %d overlap and have the same register assigned", i1.operandNumber, i2.operandNumber); + Debug.log(i1.logString(this)); + Debug.log(i2.logString(this)); + } + throw new BailoutException(""); + } + } + } + } + } + + class CheckConsumer implements ValueConsumer { + + boolean ok; + Interval curInterval; + + @Override + public void visitValue(Value operand, OperandMode mode, EnumSet flags) { + if (isRegister(operand)) { + if (intervalFor(operand) == curInterval) { + ok = true; + } + } + } + } + + void verifyNoOopsInFixedIntervals() { + try (Indent indent = Debug.logAndIndent("verifying that no oops are in fixed intervals *")) { + CheckConsumer checkConsumer = new CheckConsumer(); + + Interval fixedIntervals; + Interval otherIntervals; + fixedIntervals = createUnhandledLists(IS_PRECOLORED_INTERVAL, null).first; + // to ensure a walking until the last instruction id, add a dummy interval + // with a high operation id + otherIntervals = new Interval(Value.ILLEGAL, -1); + otherIntervals.addRange(Integer.MAX_VALUE - 2, Integer.MAX_VALUE - 1); + IntervalWalker iw = new IntervalWalker(this, fixedIntervals, otherIntervals); + + for (AbstractBlock block : sortedBlocks) { + List instructions = ir.getLIRforBlock(block); + + for (int j = 0; j < instructions.size(); j++) { + LIRInstruction op = instructions.get(j); + + if (op.hasState()) { + iw.walkBefore(op.id()); + boolean checkLive = true; + + // Make sure none of the fixed registers is live across an + // oopmap since we can't handle that correctly. + if (checkLive) { + for (Interval interval = iw.activeLists.get(RegisterBinding.Fixed); interval != Interval.EndMarker; interval = interval.next) { + if (interval.currentTo() > op.id() + 1) { + // This interval is live out of this op so make sure + // that this interval represents some value that's + // referenced by this op either as an input or output. + checkConsumer.curInterval = interval; + checkConsumer.ok = false; + + op.visitEachInput(checkConsumer); + op.visitEachAlive(checkConsumer); + op.visitEachTemp(checkConsumer); + op.visitEachOutput(checkConsumer); + + assert checkConsumer.ok : "fixed intervals should never be live across an oopmap point"; + } + } + } + } + } + } + } + } + + /** + * Returns a value for a interval definition, which can be used for re-materialization. + * + * @param op An instruction which defines a value + * @param operand The destination operand of the instruction + * @param interval The interval for this defined value. + * @return Returns the value which is moved to the instruction and which can be reused at all + * reload-locations in case the interval of this instruction is spilled. Currently this + * can only be a {@link JavaConstant}. + */ + public static JavaConstant getMaterializedValue(LIRInstruction op, Value operand, Interval interval) { + if (op instanceof MoveOp) { + MoveOp move = (MoveOp) op; + if (move.getInput() instanceof JavaConstant) { + /* + * Check if the interval has any uses which would accept an stack location (priority + * == ShouldHaveRegister). Rematerialization of such intervals can result in a + * degradation, because rematerialization always inserts a constant load, even if + * the value is not needed in a register. + */ + Interval.UsePosList usePosList = interval.usePosList(); + int numUsePos = usePosList.size(); + for (int useIdx = 0; useIdx < numUsePos; useIdx++) { + Interval.RegisterPriority priority = usePosList.registerPriority(useIdx); + if (priority == Interval.RegisterPriority.ShouldHaveRegister) { + return null; + } + } + return (JavaConstant) move.getInput(); + } + } + return null; + } +} diff -r 3059a52d9614 -r 98967b613c88 graal/com.oracle.graal.lir/src/com/oracle/graal/lir/alloc/lsra/LinearScanWalker.java --- /dev/null Thu Jan 01 00:00:00 1970 +0000 +++ b/graal/com.oracle.graal.lir/src/com/oracle/graal/lir/alloc/lsra/LinearScanWalker.java Sat Feb 07 02:47:00 2015 +0100 @@ -0,0 +1,900 @@ +/* + * Copyright (c) 2009, 2014, Oracle and/or its affiliates. All rights reserved. + * DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER. + * + * This code is free software; you can redistribute it and/or modify it + * under the terms of the GNU General Public License version 2 only, as + * published by the Free Software Foundation. + * + * This code is distributed in the hope that it will be useful, but WITHOUT + * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or + * FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License + * version 2 for more details (a copy is included in the LICENSE file that + * accompanied this code). + * + * You should have received a copy of the GNU General Public License version + * 2 along with this work; if not, write to the Free Software Foundation, + * Inc., 51 Franklin St, Fifth Floor, Boston, MA 02110-1301 USA. + * + * Please contact Oracle, 500 Oracle Parkway, Redwood Shores, CA 94065 USA + * or visit www.oracle.com if you need additional information or have any + * questions. + */ +package com.oracle.graal.lir.alloc.lsra; + +import static com.oracle.graal.api.code.CodeUtil.*; +import static com.oracle.graal.api.code.ValueUtil.*; +import static com.oracle.graal.lir.LIRValueUtil.*; + +import java.util.*; + +import com.oracle.graal.api.code.*; +import com.oracle.graal.api.meta.*; +import com.oracle.graal.compiler.common.cfg.*; +import com.oracle.graal.compiler.common.util.*; +import com.oracle.graal.debug.*; +import com.oracle.graal.lir.*; +import com.oracle.graal.lir.StandardOp.MoveOp; +import com.oracle.graal.lir.alloc.lsra.Interval.*; + +/** + */ +class LinearScanWalker extends IntervalWalker { + + protected Register[] availableRegs; + + protected final int[] usePos; + protected final int[] blockPos; + + protected List[] spillIntervals; + + private MoveResolver moveResolver; // for ordering spill moves + + /** + * Only 10% of the lists in {@link #spillIntervals} are actually used. But when they are used, + * they can grow quite long. The maximum length observed was 45 (all numbers taken from a + * bootstrap run of Graal). Therefore, we initialize {@link #spillIntervals} with this marker + * value, and allocate a "real" list only on demand in {@link #setUsePos}. + */ + private static final List EMPTY_LIST = new ArrayList<>(0); + + // accessors mapped to same functions in class LinearScan + int blockCount() { + return allocator.blockCount(); + } + + AbstractBlock blockAt(int idx) { + return allocator.blockAt(idx); + } + + AbstractBlock blockOfOpWithId(int opId) { + return allocator.blockForId(opId); + } + + LinearScanWalker(LinearScan allocator, Interval unhandledFixedFirst, Interval unhandledAnyFirst) { + super(allocator, unhandledFixedFirst, unhandledAnyFirst); + + moveResolver = new MoveResolver(allocator); + spillIntervals = Util.uncheckedCast(new List[allocator.registers.length]); + for (int i = 0; i < allocator.registers.length; i++) { + spillIntervals[i] = EMPTY_LIST; + } + usePos = new int[allocator.registers.length]; + blockPos = new int[allocator.registers.length]; + } + + void initUseLists(boolean onlyProcessUsePos) { + for (Register register : availableRegs) { + int i = register.number; + usePos[i] = Integer.MAX_VALUE; + + if (!onlyProcessUsePos) { + blockPos[i] = Integer.MAX_VALUE; + spillIntervals[i].clear(); + } + } + } + + void excludeFromUse(Interval i) { + Value location = i.location(); + int i1 = asRegister(location).number; + if (i1 >= availableRegs[0].number && i1 <= availableRegs[availableRegs.length - 1].number) { + usePos[i1] = 0; + } + } + + void setUsePos(Interval interval, int usePos, boolean onlyProcessUsePos) { + if (usePos != -1) { + assert usePos != 0 : "must use excludeFromUse to set usePos to 0"; + int i = asRegister(interval.location()).number; + if (i >= availableRegs[0].number && i <= availableRegs[availableRegs.length - 1].number) { + if (this.usePos[i] > usePos) { + this.usePos[i] = usePos; + } + if (!onlyProcessUsePos) { + List list = spillIntervals[i]; + if (list == EMPTY_LIST) { + list = new ArrayList<>(2); + spillIntervals[i] = list; + } + list.add(interval); + } + } + } + } + + void setBlockPos(Interval i, int blockPos) { + if (blockPos != -1) { + int reg = asRegister(i.location()).number; + if (reg >= availableRegs[0].number && reg <= availableRegs[availableRegs.length - 1].number) { + if (this.blockPos[reg] > blockPos) { + this.blockPos[reg] = blockPos; + } + if (usePos[reg] > blockPos) { + usePos[reg] = blockPos; + } + } + } + } + + void freeExcludeActiveFixed() { + Interval interval = activeLists.get(RegisterBinding.Fixed); + while (interval != Interval.EndMarker) { + assert isRegister(interval.location()) : "active interval must have a register assigned"; + excludeFromUse(interval); + interval = interval.next; + } + } + + void freeExcludeActiveAny() { + Interval interval = activeLists.get(RegisterBinding.Any); + while (interval != Interval.EndMarker) { + assert isRegister(interval.location()) : "active interval must have a register assigned"; + excludeFromUse(interval); + interval = interval.next; + } + } + + void freeCollectInactiveFixed(Interval current) { + Interval interval = inactiveLists.get(RegisterBinding.Fixed); + while (interval != Interval.EndMarker) { + if (current.to() <= interval.currentFrom()) { + assert interval.currentIntersectsAt(current) == -1 : "must not intersect"; + setUsePos(interval, interval.currentFrom(), true); + } else { + setUsePos(interval, interval.currentIntersectsAt(current), true); + } + interval = interval.next; + } + } + + void freeCollectInactiveAny(Interval current) { + Interval interval = inactiveLists.get(RegisterBinding.Any); + while (interval != Interval.EndMarker) { + setUsePos(interval, interval.currentIntersectsAt(current), true); + interval = interval.next; + } + } + + void freeCollectUnhandled(RegisterBinding kind, Interval current) { + Interval interval = unhandledLists.get(kind); + while (interval != Interval.EndMarker) { + setUsePos(interval, interval.intersectsAt(current), true); + if (kind == RegisterBinding.Fixed && current.to() <= interval.from()) { + setUsePos(interval, interval.from(), true); + } + interval = interval.next; + } + } + + void spillExcludeActiveFixed() { + Interval interval = activeLists.get(RegisterBinding.Fixed); + while (interval != Interval.EndMarker) { + excludeFromUse(interval); + interval = interval.next; + } + } + + void spillBlockUnhandledFixed(Interval current) { + Interval interval = unhandledLists.get(RegisterBinding.Fixed); + while (interval != Interval.EndMarker) { + setBlockPos(interval, interval.intersectsAt(current)); + interval = interval.next; + } + } + + void spillBlockInactiveFixed(Interval current) { + Interval interval = inactiveLists.get(RegisterBinding.Fixed); + while (interval != Interval.EndMarker) { + if (current.to() > interval.currentFrom()) { + setBlockPos(interval, interval.currentIntersectsAt(current)); + } else { + assert interval.currentIntersectsAt(current) == -1 : "invalid optimization: intervals intersect"; + } + + interval = interval.next; + } + } + + void spillCollectActiveAny() { + Interval interval = activeLists.get(RegisterBinding.Any); + while (interval != Interval.EndMarker) { + setUsePos(interval, Math.min(interval.nextUsage(RegisterPriority.LiveAtLoopEnd, currentPosition), interval.to()), false); + interval = interval.next; + } + } + + void spillCollectInactiveAny(Interval current) { + Interval interval = inactiveLists.get(RegisterBinding.Any); + while (interval != Interval.EndMarker) { + if (interval.currentIntersects(current)) { + setUsePos(interval, Math.min(interval.nextUsage(RegisterPriority.LiveAtLoopEnd, currentPosition), interval.to()), false); + } + interval = interval.next; + } + } + + void insertMove(int operandId, Interval srcIt, Interval dstIt) { + // output all moves here. When source and target are equal, the move is + // optimized away later in assignRegNums + + int opId = (operandId + 1) & ~1; + AbstractBlock opBlock = allocator.blockForId(opId); + assert opId > 0 && allocator.blockForId(opId - 2) == opBlock : "cannot insert move at block boundary"; + + // calculate index of instruction inside instruction list of current block + // the minimal index (for a block with no spill moves) can be calculated because the + // numbering of instructions is known. + // When the block already contains spill moves, the index must be increased until the + // correct index is reached. + List instructions = allocator.ir.getLIRforBlock(opBlock); + int index = (opId - instructions.get(0).id()) >> 1; + assert instructions.get(index).id() <= opId : "error in calculation"; + + while (instructions.get(index).id() != opId) { + index++; + assert 0 <= index && index < instructions.size() : "index out of bounds"; + } + assert 1 <= index && index < instructions.size() : "index out of bounds"; + assert instructions.get(index).id() == opId : "error in calculation"; + + // insert new instruction before instruction at position index + moveResolver.moveInsertPosition(instructions, index); + moveResolver.addMapping(srcIt, dstIt); + } + + int findOptimalSplitPos(AbstractBlock minBlock, AbstractBlock maxBlock, int maxSplitPos) { + int fromBlockNr = minBlock.getLinearScanNumber(); + int toBlockNr = maxBlock.getLinearScanNumber(); + + assert 0 <= fromBlockNr && fromBlockNr < blockCount() : "out of range"; + assert 0 <= toBlockNr && toBlockNr < blockCount() : "out of range"; + assert fromBlockNr < toBlockNr : "must cross block boundary"; + + // Try to split at end of maxBlock. If this would be after + // maxSplitPos, then use the begin of maxBlock + int optimalSplitPos = allocator.getLastLirInstructionId(maxBlock) + 2; + if (optimalSplitPos > maxSplitPos) { + optimalSplitPos = allocator.getFirstLirInstructionId(maxBlock); + } + + int minLoopDepth = maxBlock.getLoopDepth(); + for (int i = toBlockNr - 1; i >= fromBlockNr; i--) { + AbstractBlock cur = blockAt(i); + + if (cur.getLoopDepth() < minLoopDepth) { + // block with lower loop-depth found . split at the end of this block + minLoopDepth = cur.getLoopDepth(); + optimalSplitPos = allocator.getLastLirInstructionId(cur) + 2; + } + } + assert optimalSplitPos > allocator.maxOpId() || allocator.isBlockBegin(optimalSplitPos) : "algorithm must move split pos to block boundary"; + + return optimalSplitPos; + } + + int findOptimalSplitPos(Interval interval, int minSplitPos, int maxSplitPos, boolean doLoopOptimization) { + int optimalSplitPos = -1; + if (minSplitPos == maxSplitPos) { + // trivial case, no optimization of split position possible + Debug.log("min-pos and max-pos are equal, no optimization possible"); + optimalSplitPos = minSplitPos; + + } else { + assert minSplitPos < maxSplitPos : "must be true then"; + assert minSplitPos > 0 : "cannot access minSplitPos - 1 otherwise"; + + // reason for using minSplitPos - 1: when the minimal split pos is exactly at the + // beginning of a block, then minSplitPos is also a possible split position. + // Use the block before as minBlock, because then minBlock.lastLirInstructionId() + 2 == + // minSplitPos + AbstractBlock minBlock = allocator.blockForId(minSplitPos - 1); + + // reason for using maxSplitPos - 1: otherwise there would be an assert on failure + // when an interval ends at the end of the last block of the method + // (in this case, maxSplitPos == allocator().maxLirOpId() + 2, and there is no + // block at this opId) + AbstractBlock maxBlock = allocator.blockForId(maxSplitPos - 1); + + assert minBlock.getLinearScanNumber() <= maxBlock.getLinearScanNumber() : "invalid order"; + if (minBlock == maxBlock) { + // split position cannot be moved to block boundary : so split as late as possible + Debug.log("cannot move split pos to block boundary because minPos and maxPos are in same block"); + optimalSplitPos = maxSplitPos; + + } else { + if (interval.hasHoleBetween(maxSplitPos - 1, maxSplitPos) && !allocator.isBlockBegin(maxSplitPos)) { + // Do not move split position if the interval has a hole before maxSplitPos. + // Intervals resulting from Phi-Functions have more than one definition (marked + // as mustHaveRegister) with a hole before each definition. When the register is + // needed + // for the second definition : an earlier reloading is unnecessary. + Debug.log("interval has hole just before maxSplitPos, so splitting at maxSplitPos"); + optimalSplitPos = maxSplitPos; + + } else { + // seach optimal block boundary between minSplitPos and maxSplitPos + Debug.log("moving split pos to optimal block boundary between block B%d and B%d", minBlock.getId(), maxBlock.getId()); + + if (doLoopOptimization) { + // Loop optimization: if a loop-end marker is found between min- and + // max-position : + // then split before this loop + int loopEndPos = interval.nextUsageExact(RegisterPriority.LiveAtLoopEnd, allocator.getLastLirInstructionId(minBlock) + 2); + Debug.log("loop optimization: loop end found at pos %d", loopEndPos); + + assert loopEndPos > minSplitPos : "invalid order"; + if (loopEndPos < maxSplitPos) { + // loop-end marker found between min- and max-position + // if it is not the end marker for the same loop as the min-position : + // then move + // the max-position to this loop block. + // Desired result: uses tagged as shouldHaveRegister inside a loop cause + // a reloading + // of the interval (normally, only mustHaveRegister causes a reloading) + AbstractBlock loopBlock = allocator.blockForId(loopEndPos); + + Debug.log("interval is used in loop that ends in block B%d, so trying to move maxBlock back from B%d to B%d", loopBlock.getId(), maxBlock.getId(), loopBlock.getId()); + assert loopBlock != minBlock : "loopBlock and minBlock must be different because block boundary is needed between"; + + optimalSplitPos = findOptimalSplitPos(minBlock, loopBlock, allocator.getLastLirInstructionId(loopBlock) + 2); + if (optimalSplitPos == allocator.getLastLirInstructionId(loopBlock) + 2) { + optimalSplitPos = -1; + Debug.log("loop optimization not necessary"); + } else { + Debug.log("loop optimization successful"); + } + } + } + + if (optimalSplitPos == -1) { + // not calculated by loop optimization + optimalSplitPos = findOptimalSplitPos(minBlock, maxBlock, maxSplitPos); + } + } + } + } + Debug.log("optimal split position: %d", optimalSplitPos); + + return optimalSplitPos; + } + + // split an interval at the optimal position between minSplitPos and + // maxSplitPos in two parts: + // 1) the left part has already a location assigned + // 2) the right part is sorted into to the unhandled-list + void splitBeforeUsage(Interval interval, int minSplitPos, int maxSplitPos) { + + try (Indent indent = Debug.logAndIndent("splitting interval %s between %d and %d", interval, minSplitPos, maxSplitPos)) { + + assert interval.from() < minSplitPos : "cannot split at start of interval"; + assert currentPosition < minSplitPos : "cannot split before current position"; + assert minSplitPos <= maxSplitPos : "invalid order"; + assert maxSplitPos <= interval.to() : "cannot split after end of interval"; + + int optimalSplitPos = findOptimalSplitPos(interval, minSplitPos, maxSplitPos, true); + + assert minSplitPos <= optimalSplitPos && optimalSplitPos <= maxSplitPos : "out of range"; + assert optimalSplitPos <= interval.to() : "cannot split after end of interval"; + assert optimalSplitPos > interval.from() : "cannot split at start of interval"; + + if (optimalSplitPos == interval.to() && interval.nextUsage(RegisterPriority.MustHaveRegister, minSplitPos) == Integer.MAX_VALUE) { + // the split position would be just before the end of the interval + // . no split at all necessary + Debug.log("no split necessary because optimal split position is at end of interval"); + return; + } + + // must calculate this before the actual split is performed and before split position is + // moved to odd opId + boolean moveNecessary = !allocator.isBlockBegin(optimalSplitPos) && !interval.hasHoleBetween(optimalSplitPos - 1, optimalSplitPos); + + if (!allocator.isBlockBegin(optimalSplitPos)) { + // move position before actual instruction (odd opId) + optimalSplitPos = (optimalSplitPos - 1) | 1; + } + + Debug.log("splitting at position %d", optimalSplitPos); + + assert allocator.isBlockBegin(optimalSplitPos) || ((optimalSplitPos & 1) == 1) : "split pos must be odd when not on block boundary"; + assert !allocator.isBlockBegin(optimalSplitPos) || ((optimalSplitPos & 1) == 0) : "split pos must be even on block boundary"; + + Interval splitPart = interval.split(optimalSplitPos, allocator); + + splitPart.setInsertMoveWhenActivated(moveNecessary); + + assert splitPart.from() >= currentPosition : "cannot append new interval before current walk position"; + unhandledLists.addToListSortedByStartAndUsePositions(RegisterBinding.Any, splitPart); + + if (Debug.isLogEnabled()) { + Debug.log("left interval %s: %s", moveNecessary ? " " : "", interval.logString(allocator)); + Debug.log("right interval %s: %s", moveNecessary ? "(move)" : "", splitPart.logString(allocator)); + } + } + } + + // split an interval at the optimal position between minSplitPos and + // maxSplitPos in two parts: + // 1) the left part has already a location assigned + // 2) the right part is always on the stack and therefore ignored in further processing + + void splitForSpilling(Interval interval) { + // calculate allowed range of splitting position + int maxSplitPos = currentPosition; + int minSplitPos = Math.max(interval.previousUsage(RegisterPriority.ShouldHaveRegister, maxSplitPos) + 1, interval.from()); + + try (Indent indent = Debug.logAndIndent("splitting and spilling interval %s between %d and %d", interval, minSplitPos, maxSplitPos)) { + + assert interval.state == State.Active : "why spill interval that is not active?"; + assert interval.from() <= minSplitPos : "cannot split before start of interval"; + assert minSplitPos <= maxSplitPos : "invalid order"; + assert maxSplitPos < interval.to() : "cannot split at end end of interval"; + assert currentPosition < interval.to() : "interval must not end before current position"; + + if (minSplitPos == interval.from()) { + // the whole interval is never used, so spill it entirely to memory + + try (Indent indent2 = Debug.logAndIndent("spilling entire interval because split pos is at beginning of interval (use positions: %d)", interval.usePosList().size())) { + + assert interval.firstUsage(RegisterPriority.ShouldHaveRegister) > currentPosition : "interval must not have use position before currentPosition"; + + allocator.assignSpillSlot(interval); + handleSpillSlot(interval); + allocator.changeSpillState(interval, minSplitPos); + + // Also kick parent intervals out of register to memory when they have no use + // position. This avoids short interval in register surrounded by intervals in + // memory . avoid useless moves from memory to register and back + Interval parent = interval; + while (parent != null && parent.isSplitChild()) { + parent = parent.getSplitChildBeforeOpId(parent.from()); + + if (isRegister(parent.location())) { + if (parent.firstUsage(RegisterPriority.ShouldHaveRegister) == Integer.MAX_VALUE) { + // parent is never used, so kick it out of its assigned register + Debug.log("kicking out interval %d out of its register because it is never used", parent.operandNumber); + allocator.assignSpillSlot(parent); + handleSpillSlot(parent); + } else { + // do not go further back because the register is actually used by + // the interval + parent = null; + } + } + } + } + + } else { + // search optimal split pos, split interval and spill only the right hand part + int optimalSplitPos = findOptimalSplitPos(interval, minSplitPos, maxSplitPos, false); + + assert minSplitPos <= optimalSplitPos && optimalSplitPos <= maxSplitPos : "out of range"; + assert optimalSplitPos < interval.to() : "cannot split at end of interval"; + assert optimalSplitPos >= interval.from() : "cannot split before start of interval"; + + if (!allocator.isBlockBegin(optimalSplitPos)) { + // move position before actual instruction (odd opId) + optimalSplitPos = (optimalSplitPos - 1) | 1; + } + + try (Indent indent2 = Debug.logAndIndent("splitting at position %d", optimalSplitPos)) { + assert allocator.isBlockBegin(optimalSplitPos) || ((optimalSplitPos & 1) == 1) : "split pos must be odd when not on block boundary"; + assert !allocator.isBlockBegin(optimalSplitPos) || ((optimalSplitPos & 1) == 0) : "split pos must be even on block boundary"; + + Interval spilledPart = interval.split(optimalSplitPos, allocator); + allocator.assignSpillSlot(spilledPart); + handleSpillSlot(spilledPart); + allocator.changeSpillState(spilledPart, optimalSplitPos); + + if (!allocator.isBlockBegin(optimalSplitPos)) { + Debug.log("inserting move from interval %d to %d", interval.operandNumber, spilledPart.operandNumber); + insertMove(optimalSplitPos, interval, spilledPart); + } + + // the currentSplitChild is needed later when moves are inserted for reloading + assert spilledPart.currentSplitChild() == interval : "overwriting wrong currentSplitChild"; + spilledPart.makeCurrentSplitChild(); + + if (Debug.isLogEnabled()) { + Debug.log("left interval: %s", interval.logString(allocator)); + Debug.log("spilled interval : %s", spilledPart.logString(allocator)); + } + } + } + } + } + + /** + * This is called for every interval that is assigned to a stack slot. + */ + protected void handleSpillSlot(Interval interval) { + assert interval.location() != null && (interval.canMaterialize() || isStackSlotValue(interval.location())) : "interval not assigned to a stack slot " + interval; + // Do nothing. Stack slots are not processed in this implementation. + } + + void splitStackInterval(Interval interval) { + int minSplitPos = currentPosition + 1; + int maxSplitPos = Math.min(interval.firstUsage(RegisterPriority.ShouldHaveRegister), interval.to()); + + splitBeforeUsage(interval, minSplitPos, maxSplitPos); + } + + void splitWhenPartialRegisterAvailable(Interval interval, int registerAvailableUntil) { + int minSplitPos = Math.max(interval.previousUsage(RegisterPriority.ShouldHaveRegister, registerAvailableUntil), interval.from() + 1); + splitBeforeUsage(interval, minSplitPos, registerAvailableUntil); + } + + void splitAndSpillInterval(Interval interval) { + assert interval.state == State.Active || interval.state == State.Inactive : "other states not allowed"; + + int currentPos = currentPosition; + if (interval.state == State.Inactive) { + // the interval is currently inactive, so no spill slot is needed for now. + // when the split part is activated, the interval has a new chance to get a register, + // so in the best case no stack slot is necessary + assert interval.hasHoleBetween(currentPos - 1, currentPos + 1) : "interval can not be inactive otherwise"; + splitBeforeUsage(interval, currentPos + 1, currentPos + 1); + + } else { + // search the position where the interval must have a register and split + // at the optimal position before. + // The new created part is added to the unhandled list and will get a register + // when it is activated + int minSplitPos = currentPos + 1; + int maxSplitPos = Math.min(interval.nextUsage(RegisterPriority.MustHaveRegister, minSplitPos), interval.to()); + + splitBeforeUsage(interval, minSplitPos, maxSplitPos); + + assert interval.nextUsage(RegisterPriority.MustHaveRegister, currentPos) == Integer.MAX_VALUE : "the remaining part is spilled to stack and therefore has no register"; + splitForSpilling(interval); + } + } + + boolean allocFreeRegister(Interval interval) { + try (Indent indent = Debug.logAndIndent("trying to find free register for %s", interval)) { + + initUseLists(true); + freeExcludeActiveFixed(); + freeExcludeActiveAny(); + freeCollectInactiveFixed(interval); + freeCollectInactiveAny(interval); + // freeCollectUnhandled(fixedKind, cur); + assert unhandledLists.get(RegisterBinding.Fixed) == Interval.EndMarker : "must not have unhandled fixed intervals because all fixed intervals have a use at position 0"; + + // usePos contains the start of the next interval that has this register assigned + // (either as a fixed register or a normal allocated register in the past) + // only intervals overlapping with cur are processed, non-overlapping invervals can be + // ignored safely + if (Debug.isLogEnabled()) { + // Enable this logging to see all register states + try (Indent indent2 = Debug.logAndIndent("state of registers:")) { + for (Register register : availableRegs) { + int i = register.number; + Debug.log("reg %d: usePos: %d", register.number, usePos[i]); + } + } + } + + Register hint = null; + Interval locationHint = interval.locationHint(true); + if (locationHint != null && locationHint.location() != null && isRegister(locationHint.location())) { + hint = asRegister(locationHint.location()); + Debug.log("hint register %d from interval %s", hint.number, locationHint); + } + assert interval.location() == null : "register already assigned to interval"; + + // the register must be free at least until this position + int regNeededUntil = interval.from() + 1; + int intervalTo = interval.to(); + + boolean needSplit = false; + int splitPos = -1; + + Register reg = null; + Register minFullReg = null; + Register maxPartialReg = null; + + for (int i = 0; i < availableRegs.length; ++i) { + Register availableReg = availableRegs[i]; + int number = availableReg.number; + if (usePos[number] >= intervalTo) { + // this register is free for the full interval + if (minFullReg == null || availableReg.equals(hint) || (usePos[number] < usePos[minFullReg.number] && !minFullReg.equals(hint))) { + minFullReg = availableReg; + } + } else if (usePos[number] > regNeededUntil) { + // this register is at least free until regNeededUntil + if (maxPartialReg == null || availableReg.equals(hint) || (usePos[number] > usePos[maxPartialReg.number] && !maxPartialReg.equals(hint))) { + maxPartialReg = availableReg; + } + } + } + + if (minFullReg != null) { + reg = minFullReg; + } else if (maxPartialReg != null) { + needSplit = true; + reg = maxPartialReg; + } else { + return false; + } + + splitPos = usePos[reg.number]; + interval.assignLocation(reg.asValue(interval.kind())); + Debug.log("selected register %d", reg.number); + + assert splitPos > 0 : "invalid splitPos"; + if (needSplit) { + // register not available for full interval, so split it + splitWhenPartialRegisterAvailable(interval, splitPos); + } + // only return true if interval is completely assigned + return true; + } + } + + void splitAndSpillIntersectingIntervals(Register reg) { + assert reg != null : "no register assigned"; + + for (int i = 0; i < spillIntervals[reg.number].size(); i++) { + Interval interval = spillIntervals[reg.number].get(i); + removeFromList(interval); + splitAndSpillInterval(interval); + } + } + + // Split an Interval and spill it to memory so that cur can be placed in a register + void allocLockedRegister(Interval interval) { + try (Indent indent = Debug.logAndIndent("alloc locked register: need to split and spill to get register for %s", interval)) { + + // collect current usage of registers + initUseLists(false); + spillExcludeActiveFixed(); + // spillBlockUnhandledFixed(cur); + assert unhandledLists.get(RegisterBinding.Fixed) == Interval.EndMarker : "must not have unhandled fixed intervals because all fixed intervals have a use at position 0"; + spillBlockInactiveFixed(interval); + spillCollectActiveAny(); + spillCollectInactiveAny(interval); + + if (Debug.isLogEnabled()) { + try (Indent indent2 = Debug.logAndIndent("state of registers:")) { + for (Register reg : availableRegs) { + int i = reg.number; + try (Indent indent3 = Debug.logAndIndent("reg %d: usePos: %d, blockPos: %d, intervals: ", i, usePos[i], blockPos[i])) { + for (int j = 0; j < spillIntervals[i].size(); j++) { + Debug.log("%d ", spillIntervals[i].get(j).operandNumber); + } + } + } + } + } + + // the register must be free at least until this position + int firstUsage = interval.firstUsage(RegisterPriority.MustHaveRegister); + int regNeededUntil = Math.min(firstUsage, interval.from() + 1); + int intervalTo = interval.to(); + assert regNeededUntil > 0 && regNeededUntil < Integer.MAX_VALUE : "interval has no use"; + + Register reg = null; + Register ignore = interval.location() != null && isRegister(interval.location()) ? asRegister(interval.location()) : null; + for (Register availableReg : availableRegs) { + int number = availableReg.number; + if (availableReg.equals(ignore)) { + // this register must be ignored + } else if (usePos[number] > regNeededUntil) { + if (reg == null || (usePos[number] > usePos[reg.number])) { + reg = availableReg; + } + } + } + + int regUsePos = (reg == null ? 0 : usePos[reg.number]); + if (regUsePos <= firstUsage) { + Debug.log("able to spill current interval. firstUsage(register): %d, usePos: %d", firstUsage, regUsePos); + + if (firstUsage <= interval.from() + 1) { + assert false : "cannot spill interval that is used in first instruction (possible reason: no register found) firstUsage=" + firstUsage + ", interval.from()=" + interval.from(); + // assign a reasonable register and do a bailout in product mode to avoid errors + allocator.assignSpillSlot(interval); + throw new BailoutException("LinearScan: no register found"); + } + + splitAndSpillInterval(interval); + return; + } + + boolean needSplit = blockPos[reg.number] <= intervalTo; + + int splitPos = blockPos[reg.number]; + + Debug.log("decided to use register %d", reg.number); + assert splitPos > 0 : "invalid splitPos"; + assert needSplit || splitPos > interval.from() : "splitting interval at from"; + + interval.assignLocation(reg.asValue(interval.kind())); + if (needSplit) { + // register not available for full interval : so split it + splitWhenPartialRegisterAvailable(interval, splitPos); + } + + // perform splitting and spilling for all affected intervals + splitAndSpillIntersectingIntervals(reg); + } + } + + boolean noAllocationPossible(Interval interval) { + if (allocator.callKillsRegisters) { + // fast calculation of intervals that can never get a register because the + // the next instruction is a call that blocks all registers + // Note: this only works if a call kills all registers + + // check if this interval is the result of a split operation + // (an interval got a register until this position) + int pos = interval.from(); + if (isOdd(pos)) { + // the current instruction is a call that blocks all registers + if (pos < allocator.maxOpId() && allocator.hasCall(pos + 1) && interval.to() > pos + 1) { + Debug.log("free register cannot be available because all registers blocked by following call"); + + // safety check that there is really no register available + assert !allocFreeRegister(interval) : "found a register for this interval"; + return true; + } + } + } + return false; + } + + void initVarsForAlloc(Interval interval) { + availableRegs = allocator.frameMapBuilder.getRegisterConfig().getAllocatableRegisters(interval.kind().getPlatformKind()); + } + + static boolean isMove(LIRInstruction op, Interval from, Interval to) { + if (op instanceof MoveOp) { + MoveOp move = (MoveOp) op; + if (isVariable(move.getInput()) && isVariable(move.getResult())) { + return move.getInput() != null && move.getInput().equals(from.operand) && move.getResult() != null && move.getResult().equals(to.operand); + } + } + return false; + } + + // optimization (especially for phi functions of nested loops): + // assign same spill slot to non-intersecting intervals + void combineSpilledIntervals(Interval interval) { + if (interval.isSplitChild()) { + // optimization is only suitable for split parents + return; + } + + Interval registerHint = interval.locationHint(false); + if (registerHint == null) { + // cur is not the target of a move : otherwise registerHint would be set + return; + } + assert registerHint.isSplitParent() : "register hint must be split parent"; + + if (interval.spillState() != SpillState.NoOptimization || registerHint.spillState() != SpillState.NoOptimization) { + // combining the stack slots for intervals where spill move optimization is applied + // is not benefitial and would cause problems + return; + } + + int beginPos = interval.from(); + int endPos = interval.to(); + if (endPos > allocator.maxOpId() || isOdd(beginPos) || isOdd(endPos)) { + // safety check that lirOpWithId is allowed + return; + } + + if (!isMove(allocator.instructionForId(beginPos), registerHint, interval) || !isMove(allocator.instructionForId(endPos), interval, registerHint)) { + // cur and registerHint are not connected with two moves + return; + } + + Interval beginHint = registerHint.getSplitChildAtOpId(beginPos, LIRInstruction.OperandMode.USE, allocator); + Interval endHint = registerHint.getSplitChildAtOpId(endPos, LIRInstruction.OperandMode.DEF, allocator); + if (beginHint == endHint || beginHint.to() != beginPos || endHint.from() != endPos) { + // registerHint must be split : otherwise the re-writing of use positions does not work + return; + } + + assert beginHint.location() != null : "must have register assigned"; + assert endHint.location() == null : "must not have register assigned"; + assert interval.firstUsage(RegisterPriority.MustHaveRegister) == beginPos : "must have use position at begin of interval because of move"; + assert endHint.firstUsage(RegisterPriority.MustHaveRegister) == endPos : "must have use position at begin of interval because of move"; + + if (isRegister(beginHint.location())) { + // registerHint is not spilled at beginPos : so it would not be benefitial to + // immediately spill cur + return; + } + assert registerHint.spillSlot() != null : "must be set when part of interval was spilled"; + + // modify intervals such that cur gets the same stack slot as registerHint + // delete use positions to prevent the intervals to get a register at beginning + interval.setSpillSlot(registerHint.spillSlot()); + interval.removeFirstUsePos(); + endHint.removeFirstUsePos(); + } + + // allocate a physical register or memory location to an interval + @Override + protected boolean activateCurrent(Interval interval) { + boolean result = true; + + try (Indent indent = Debug.logAndIndent("activating interval %s, splitParent: %d", interval, interval.splitParent().operandNumber)) { + + final Value operand = interval.operand; + if (interval.location() != null && isStackSlotValue(interval.location())) { + // activating an interval that has a stack slot assigned . split it at first use + // position + // used for method parameters + Debug.log("interval has spill slot assigned (method parameter) . split it before first use"); + splitStackInterval(interval); + result = false; + + } else { + if (interval.location() == null) { + // interval has not assigned register . normal allocation + // (this is the normal case for most intervals) + Debug.log("normal allocation of register"); + + // assign same spill slot to non-intersecting intervals + combineSpilledIntervals(interval); + + initVarsForAlloc(interval); + if (noAllocationPossible(interval) || !allocFreeRegister(interval)) { + // no empty register available. + // split and spill another interval so that this interval gets a register + allocLockedRegister(interval); + } + + // spilled intervals need not be move to active-list + if (!isRegister(interval.location())) { + result = false; + } + } + } + + // load spilled values that become active from stack slot to register + if (interval.insertMoveWhenActivated()) { + assert interval.isSplitChild(); + assert interval.currentSplitChild() != null; + assert !interval.currentSplitChild().operand.equals(operand) : "cannot insert move between same interval"; + Debug.log("Inserting move from interval %d to %d because insertMoveWhenActivated is set", interval.currentSplitChild().operandNumber, interval.operandNumber); + + insertMove(interval.from(), interval.currentSplitChild(), interval); + } + interval.makeCurrentSplitChild(); + + } + + return result; // true = interval is moved to active list + } + + public void finishAllocation() { + // must be called when all intervals are allocated + moveResolver.resolveAndAppendMoves(); + } +} diff -r 3059a52d9614 -r 98967b613c88 graal/com.oracle.graal.lir/src/com/oracle/graal/lir/alloc/lsra/LocationMarker.java --- /dev/null Thu Jan 01 00:00:00 1970 +0000 +++ b/graal/com.oracle.graal.lir/src/com/oracle/graal/lir/alloc/lsra/LocationMarker.java Sat Feb 07 02:47:00 2015 +0100 @@ -0,0 +1,221 @@ +/* + * Copyright (c) 2014, 2014, Oracle and/or its affiliates. All rights reserved. + * DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER. + * + * This code is free software; you can redistribute it and/or modify it + * under the terms of the GNU General Public License version 2 only, as + * published by the Free Software Foundation. + * + * This code is distributed in the hope that it will be useful, but WITHOUT + * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or + * FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License + * version 2 for more details (a copy is included in the LICENSE file that + * accompanied this code). + * + * You should have received a copy of the GNU General Public License version + * 2 along with this work; if not, write to the Free Software Foundation, + * Inc., 51 Franklin St, Fifth Floor, Boston, MA 02110-1301 USA. + * + * Please contact Oracle, 500 Oracle Parkway, Redwood Shores, CA 94065 USA + * or visit www.oracle.com if you need additional information or have any + * questions. + */ +package com.oracle.graal.lir.alloc.lsra; + +import static com.oracle.graal.api.code.ValueUtil.*; + +import java.util.*; + +import com.oracle.graal.api.code.*; +import com.oracle.graal.api.meta.*; +import com.oracle.graal.compiler.common.cfg.*; +import com.oracle.graal.debug.*; +import com.oracle.graal.lir.*; +import com.oracle.graal.lir.LIRInstruction.OperandFlag; +import com.oracle.graal.lir.LIRInstruction.OperandMode; +import com.oracle.graal.lir.framemap.*; +import com.oracle.graal.options.*; + +public final class LocationMarker { + + public static class Options { + // @formatter:off + @Option(help = "Use decoupled pass for location marking (instead of using LSRA marking)", type = OptionType.Debug) + public static final OptionValue UseLocationMarker = new OptionValue<>(true); + // @formatter:on + } + + /** + * Mark all live references for a frame state. The frame state use this information to build the + * OOP maps. + */ + public static void markLocations(LIR lir, FrameMap frameMap) { + new LocationMarker(lir, frameMap).build(); + } + + private final LIR lir; + private final FrameMap frameMap; + private final RegisterAttributes[] registerAttributes; + private final BlockMap liveInMap; + private final BlockMap liveOutMap; + + private LocationMarker(LIR lir, FrameMap frameMap) { + this.lir = lir; + this.frameMap = frameMap; + this.registerAttributes = frameMap.getRegisterConfig().getAttributesMap(); + liveInMap = new BlockMap<>(lir.getControlFlowGraph()); + liveOutMap = new BlockMap<>(lir.getControlFlowGraph()); + } + + private void build() { + Deque> worklist = new ArrayDeque<>(); + for (int i = lir.getControlFlowGraph().getBlocks().size() - 1; i >= 0; i--) { + worklist.add(lir.getControlFlowGraph().getBlocks().get(i)); + } + for (AbstractBlock block : lir.getControlFlowGraph().getBlocks()) { + liveInMap.put(block, frameMap.initReferenceMap(true)); + } + while (!worklist.isEmpty()) { + AbstractBlock block = worklist.poll(); + processBlock(block, worklist); + } + // finish states + for (AbstractBlock block : lir.getControlFlowGraph().getBlocks()) { + List instructions = lir.getLIRforBlock(block); + for (int i = instructions.size() - 1; i >= 0; i--) { + LIRInstruction inst = instructions.get(i); + inst.forEachState((op, info) -> info.finish(op, frameMap)); + } + + } + } + + /** + * Merge outSet with in-set of successors. + */ + private boolean updateOutBlock(AbstractBlock block) { + ReferenceMap union = frameMap.initReferenceMap(true); + block.getSuccessors().forEach(succ -> union.updateUnion(liveInMap.get(succ))); + ReferenceMap outSet = liveOutMap.get(block); + // check if changed + if (outSet == null || !union.equals(outSet)) { + liveOutMap.put(block, union); + return true; + } + return false; + } + + private void processBlock(AbstractBlock block, Deque> worklist) { + if (updateOutBlock(block)) { + try (Indent indent = Debug.logAndIndent("handle block %s", block)) { + BlockClosure closure = new BlockClosure(liveOutMap.get(block).clone()); + List instructions = lir.getLIRforBlock(block); + for (int i = instructions.size() - 1; i >= 0; i--) { + LIRInstruction inst = instructions.get(i); + closure.processInstructionBottomUp(inst); + } + liveInMap.put(block, closure.getCurrentSet()); + worklist.addAll(block.getPredecessors()); + } + } + } + + private static final EnumSet REGISTER_FLAG_SET = EnumSet.of(OperandFlag.REG); + private static final LIRKind REFERENCE_KIND = LIRKind.reference(Kind.Object); + + private void forEachDestroyedCallerSavedRegister(LIRInstruction op, ValueConsumer consumer) { + if (op.destroysCallerSavedRegisters()) { + for (Register reg : frameMap.getRegisterConfig().getCallerSaveRegisters()) { + consumer.visitValue(reg.asValue(REFERENCE_KIND), OperandMode.TEMP, REGISTER_FLAG_SET); + } + } + } + + private final class BlockClosure { + private final ReferenceMap currentSet; + + private BlockClosure(ReferenceMap set) { + currentSet = set; + } + + private ReferenceMap getCurrentSet() { + return currentSet; + } + + /** + * Process all values of an instruction bottom-up, i.e. definitions before usages. Values + * that start or end at the current operation are not included. + */ + private void processInstructionBottomUp(LIRInstruction op) { + try (Indent indent = Debug.logAndIndent("handle op %d, %s", op.id(), op)) { + // kills + op.visitEachTemp(this::defConsumer); + op.visitEachOutput(this::defConsumer); + forEachDestroyedCallerSavedRegister(op, this::defConsumer); + + // gen - values that are considered alive for this state + op.visitEachAlive(this::useConsumer); + op.visitEachState(this::useConsumer); + // mark locations + op.forEachState((inst, info) -> markLocation(inst, info, this.getCurrentSet())); + // gen + op.visitEachInput(this::useConsumer); + } + } + + /** + * @see InstructionValueConsumer + * @param operand + * @param mode + * @param flags + */ + private void useConsumer(Value operand, OperandMode mode, EnumSet flags) { + LIRKind kind = operand.getLIRKind(); + if (shouldProcessValue(operand) && !kind.isValue() && !kind.isDerivedReference()) { + // no need to insert values and derived reference + Debug.log("set operand: %s", operand); + frameMap.setReference(operand, currentSet); + } + } + + /** + * @see InstructionValueConsumer + * @param operand + * @param mode + * @param flags + */ + private void defConsumer(Value operand, OperandMode mode, EnumSet flags) { + if (shouldProcessValue(operand)) { + Debug.log("clear operand: %s", operand); + frameMap.clearReference(operand, currentSet); + } else { + assert isIllegal(operand) || operand.getPlatformKind() != Kind.Illegal || mode == OperandMode.TEMP : String.format("Illegal PlatformKind is only allowed for TEMP mode: %s, %s", + operand, mode); + } + } + + protected boolean shouldProcessValue(Value operand) { + return (isRegister(operand) && attributes(asRegister(operand)).isAllocatable() || isStackSlot(operand)) && operand.getPlatformKind() != Kind.Illegal; + } + } + + /** + * This method does the actual marking. + */ + private void markLocation(LIRInstruction op, LIRFrameState info, ReferenceMap refMap) { + if (!info.hasDebugInfo()) { + info.initDebugInfo(frameMap, !op.destroysCallerSavedRegisters() || !frameMap.getRegisterConfig().areAllAllocatableRegistersCallerSaved()); + } + info.updateUnion(refMap); + } + + /** + * Gets an object describing the attributes of a given register according to this register + * configuration. + * + * @see LinearScan#attributes + */ + private RegisterAttributes attributes(Register reg) { + return registerAttributes[reg.number]; + } +} diff -r 3059a52d9614 -r 98967b613c88 graal/com.oracle.graal.lir/src/com/oracle/graal/lir/alloc/lsra/MoveResolver.java --- /dev/null Thu Jan 01 00:00:00 1970 +0000 +++ b/graal/com.oracle.graal.lir/src/com/oracle/graal/lir/alloc/lsra/MoveResolver.java Sat Feb 07 02:47:00 2015 +0100 @@ -0,0 +1,364 @@ +/* + * Copyright (c) 2009, 2014, Oracle and/or its affiliates. All rights reserved. + * DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER. + * + * This code is free software; you can redistribute it and/or modify it + * under the terms of the GNU General Public License version 2 only, as + * published by the Free Software Foundation. + * + * This code is distributed in the hope that it will be useful, but WITHOUT + * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or + * FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License + * version 2 for more details (a copy is included in the LICENSE file that + * accompanied this code). + * + * You should have received a copy of the GNU General Public License version + * 2 along with this work; if not, write to the Free Software Foundation, + * Inc., 51 Franklin St, Fifth Floor, Boston, MA 02110-1301 USA. + * + * Please contact Oracle, 500 Oracle Parkway, Redwood Shores, CA 94065 USA + * or visit www.oracle.com if you need additional information or have any + * questions. + */ +package com.oracle.graal.lir.alloc.lsra; + +import static com.oracle.graal.api.code.ValueUtil.*; + +import java.util.*; + +import com.oracle.graal.api.code.*; +import com.oracle.graal.api.meta.*; +import com.oracle.graal.debug.*; +import com.oracle.graal.lir.*; + +/** + */ +final class MoveResolver { + + private final LinearScan allocator; + + private int insertIdx; + private LIRInsertionBuffer insertionBuffer; // buffer where moves are inserted + + private final List mappingFrom; + private final List mappingFromOpr; + private final List mappingTo; + private boolean multipleReadsAllowed; + private final int[] registerBlocked; + + private int registerBlocked(int reg) { + return registerBlocked[reg]; + } + + private void setRegisterBlocked(int reg, int direction) { + assert direction == 1 || direction == -1 : "out of bounds"; + registerBlocked[reg] += direction; + } + + void setMultipleReadsAllowed() { + multipleReadsAllowed = true; + } + + boolean hasMappings() { + return mappingFrom.size() > 0; + } + + MoveResolver(LinearScan allocator) { + + this.allocator = allocator; + this.multipleReadsAllowed = false; + this.mappingFrom = new ArrayList<>(8); + this.mappingFromOpr = new ArrayList<>(8); + this.mappingTo = new ArrayList<>(8); + this.insertIdx = -1; + this.insertionBuffer = new LIRInsertionBuffer(); + this.registerBlocked = new int[allocator.registers.length]; + assert checkEmpty(); + } + + boolean checkEmpty() { + assert mappingFrom.size() == 0 && mappingFromOpr.size() == 0 && mappingTo.size() == 0 : "list must be empty before and after processing"; + for (int i = 0; i < allocator.registers.length; i++) { + assert registerBlocked(i) == 0 : "register map must be empty before and after processing"; + } + assert !multipleReadsAllowed : "must have default value"; + return true; + } + + private boolean verifyBeforeResolve() { + assert mappingFrom.size() == mappingFromOpr.size() : "length must be equal"; + assert mappingFrom.size() == mappingTo.size() : "length must be equal"; + assert insertIdx != -1 : "insert position not set"; + + int i; + int j; + if (!multipleReadsAllowed) { + for (i = 0; i < mappingFrom.size(); i++) { + for (j = i + 1; j < mappingFrom.size(); j++) { + assert mappingFrom.get(i) == null || mappingFrom.get(i) != mappingFrom.get(j) : "cannot read from same interval twice"; + } + } + } + + for (i = 0; i < mappingTo.size(); i++) { + for (j = i + 1; j < mappingTo.size(); j++) { + assert mappingTo.get(i) != mappingTo.get(j) : "cannot write to same interval twice"; + } + } + + HashSet usedRegs = new HashSet<>(); + if (!multipleReadsAllowed) { + for (i = 0; i < mappingFrom.size(); i++) { + Interval interval = mappingFrom.get(i); + if (interval != null && !isIllegal(interval.location())) { + boolean unique = usedRegs.add(interval.location()); + assert unique : "cannot read from same register twice"; + } + } + } + + usedRegs.clear(); + for (i = 0; i < mappingTo.size(); i++) { + Interval interval = mappingTo.get(i); + if (isIllegal(interval.location())) { + // After insertion the location may become illegal, so don't check it since multiple + // intervals might be illegal. + continue; + } + boolean unique = usedRegs.add(interval.location()); + assert unique : "cannot write to same register twice"; + } + + usedRegs.clear(); + for (i = 0; i < mappingFrom.size(); i++) { + Interval interval = mappingFrom.get(i); + if (interval != null && !isRegister(interval.location())) { + usedRegs.add(interval.location()); + } + } + for (i = 0; i < mappingTo.size(); i++) { + Interval interval = mappingTo.get(i); + assert !usedRegs.contains(interval.location()) || interval.location().equals(mappingFrom.get(i).location()) : "stack slots used in mappingFrom must be disjoint to mappingTo"; + } + + return true; + } + + // mark assignedReg and assignedRegHi of the interval as blocked + private void blockRegisters(Interval interval) { + Value location = interval.location(); + if (isRegister(location)) { + int reg = asRegister(location).number; + assert multipleReadsAllowed || registerBlocked(reg) == 0 : "register already marked as used"; + setRegisterBlocked(reg, 1); + } + } + + // mark assignedReg and assignedRegHi of the interval as unblocked + private void unblockRegisters(Interval interval) { + Value location = interval.location(); + if (isRegister(location)) { + int reg = asRegister(location).number; + assert registerBlocked(reg) > 0 : "register already marked as unused"; + setRegisterBlocked(reg, -1); + } + } + + /** + * Checks if the {@linkplain Interval#location() location} of {@code to} is not blocked or is + * only blocked by {@code from}. + */ + private boolean safeToProcessMove(Interval from, Interval to) { + Value fromReg = from != null ? from.location() : null; + + Value reg = to.location(); + if (isRegister(reg)) { + if (registerBlocked(asRegister(reg).number) > 1 || (registerBlocked(asRegister(reg).number) == 1 && !reg.equals(fromReg))) { + return false; + } + } + + return true; + } + + private void createInsertionBuffer(List list) { + assert !insertionBuffer.initialized() : "overwriting existing buffer"; + insertionBuffer.init(list); + } + + private void appendInsertionBuffer() { + if (insertionBuffer.initialized()) { + insertionBuffer.finish(); + } + assert !insertionBuffer.initialized() : "must be uninitialized now"; + + insertIdx = -1; + } + + private void insertMove(Interval fromInterval, Interval toInterval) { + assert !fromInterval.operand.equals(toInterval.operand) : "from and to interval equal: " + fromInterval; + assert fromInterval.kind().equals(toInterval.kind()) : "move between different types"; + assert insertIdx != -1 : "must setup insert position first"; + + AllocatableValue fromOpr = fromInterval.operand; + AllocatableValue toOpr = toInterval.operand; + + insertionBuffer.append(insertIdx, allocator.ir.getSpillMoveFactory().createMove(toOpr, fromOpr)); + + Debug.log("insert move from %s to %s at %d", fromInterval, toInterval, insertIdx); + } + + private void insertMove(Value fromOpr, Interval toInterval) { + assert fromOpr.getLIRKind().equals(toInterval.kind()) : "move between different types"; + assert insertIdx != -1 : "must setup insert position first"; + + AllocatableValue toOpr = toInterval.operand; + insertionBuffer.append(insertIdx, allocator.ir.getSpillMoveFactory().createMove(toOpr, fromOpr)); + + Debug.log("insert move from value %s to %s at %d", fromOpr, toInterval, insertIdx); + } + + private void resolveMappings() { + assert verifyBeforeResolve(); + + // Block all registers that are used as input operands of a move. + // When a register is blocked, no move to this register is emitted. + // This is necessary for detecting cycles in moves. + int i; + for (i = mappingFrom.size() - 1; i >= 0; i--) { + Interval fromInterval = mappingFrom.get(i); + if (fromInterval != null) { + blockRegisters(fromInterval); + } + } + + int spillCandidate = -1; + while (mappingFrom.size() > 0) { + boolean processedInterval = false; + + for (i = mappingFrom.size() - 1; i >= 0; i--) { + Interval fromInterval = mappingFrom.get(i); + Interval toInterval = mappingTo.get(i); + + if (safeToProcessMove(fromInterval, toInterval)) { + // this interval can be processed because target is free + if (fromInterval != null) { + insertMove(fromInterval, toInterval); + unblockRegisters(fromInterval); + } else { + insertMove(mappingFromOpr.get(i), toInterval); + } + mappingFrom.remove(i); + mappingFromOpr.remove(i); + mappingTo.remove(i); + + processedInterval = true; + } else if (fromInterval != null && isRegister(fromInterval.location())) { + // this interval cannot be processed now because target is not free + // it starts in a register, so it is a possible candidate for spilling + spillCandidate = i; + } + } + + if (!processedInterval) { + // no move could be processed because there is a cycle in the move list + // (e.g. r1 . r2, r2 . r1), so one interval must be spilled to memory + assert spillCandidate != -1 : "no interval in register for spilling found"; + + // create a new spill interval and assign a stack slot to it + Interval fromInterval = mappingFrom.get(spillCandidate); + Interval spillInterval = allocator.createDerivedInterval(fromInterval); + spillInterval.setKind(fromInterval.kind()); + + // add a dummy range because real position is difficult to calculate + // Note: this range is a special case when the integrity of the allocation is + // checked + spillInterval.addRange(1, 2); + + // do not allocate a new spill slot for temporary interval, but + // use spill slot assigned to fromInterval. Otherwise moves from + // one stack slot to another can happen (not allowed by LIRAssembler + StackSlotValue spillSlot = fromInterval.spillSlot(); + if (spillSlot == null) { + spillSlot = allocator.frameMapBuilder.allocateSpillSlot(spillInterval.kind()); + fromInterval.setSpillSlot(spillSlot); + } + spillInterval.assignLocation(spillSlot); + + Debug.log("created new Interval for spilling: %s", spillInterval); + + // insert a move from register to stack and update the mapping + insertMove(fromInterval, spillInterval); + mappingFrom.set(spillCandidate, spillInterval); + unblockRegisters(fromInterval); + } + } + + // reset to default value + multipleReadsAllowed = false; + + // check that all intervals have been processed + assert checkEmpty(); + } + + void setInsertPosition(List insertList, int insertIdx) { + assert this.insertIdx == -1 : "use moveInsertPosition instead of setInsertPosition when data already set"; + + createInsertionBuffer(insertList); + this.insertIdx = insertIdx; + } + + void moveInsertPosition(List newInsertList, int newInsertIdx) { + if (insertionBuffer.lirList() != null && (insertionBuffer.lirList() != newInsertList || this.insertIdx != newInsertIdx)) { + // insert position changed . resolve current mappings + resolveMappings(); + } + + if (insertionBuffer.lirList() != newInsertList) { + // block changed . append insertionBuffer because it is + // bound to a specific block and create a new insertionBuffer + appendInsertionBuffer(); + createInsertionBuffer(newInsertList); + } + + this.insertIdx = newInsertIdx; + } + + void addMapping(Interval fromInterval, Interval toInterval) { + + if (isIllegal(toInterval.location()) && toInterval.canMaterialize()) { + Debug.log("no store to rematerializable interval %s needed", toInterval); + return; + } + if (isIllegal(fromInterval.location()) && fromInterval.canMaterialize()) { + // Instead of a reload, re-materialize the value + Value rematValue = fromInterval.getMaterializedValue(); + addMapping(rematValue, toInterval); + return; + } + Debug.log("add move mapping from %s to %s", fromInterval, toInterval); + + assert !fromInterval.operand.equals(toInterval.operand) : "from and to interval equal: " + fromInterval; + assert fromInterval.kind().equals(toInterval.kind()); + mappingFrom.add(fromInterval); + mappingFromOpr.add(Value.ILLEGAL); + mappingTo.add(toInterval); + } + + void addMapping(Value fromOpr, Interval toInterval) { + Debug.log("add move mapping from %s to %s", fromOpr, toInterval); + + assert isConstant(fromOpr) : "only for constants"; + + mappingFrom.add(null); + mappingFromOpr.add(fromOpr); + mappingTo.add(toInterval); + } + + void resolveAndAppendMoves() { + if (hasMappings()) { + resolveMappings(); + } + appendInsertionBuffer(); + } +} diff -r 3059a52d9614 -r 98967b613c88 graal/com.oracle.graal.lir/src/com/oracle/graal/lir/alloc/lsra/OptimizingLinearScanWalker.java --- /dev/null Thu Jan 01 00:00:00 1970 +0000 +++ b/graal/com.oracle.graal.lir/src/com/oracle/graal/lir/alloc/lsra/OptimizingLinearScanWalker.java Sat Feb 07 02:47:00 2015 +0100 @@ -0,0 +1,249 @@ +/* + * Copyright (c) 2014, 2014, Oracle and/or its affiliates. All rights reserved. + * DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER. + * + * This code is free software; you can redistribute it and/or modify it + * under the terms of the GNU General Public License version 2 only, as + * published by the Free Software Foundation. + * + * This code is distributed in the hope that it will be useful, but WITHOUT + * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or + * FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License + * version 2 for more details (a copy is included in the LICENSE file that + * accompanied this code). + * + * You should have received a copy of the GNU General Public License version + * 2 along with this work; if not, write to the Free Software Foundation, + * Inc., 51 Franklin St, Fifth Floor, Boston, MA 02110-1301 USA. + * + * Please contact Oracle, 500 Oracle Parkway, Redwood Shores, CA 94065 USA + * or visit www.oracle.com if you need additional information or have any + * questions. + */ +package com.oracle.graal.lir.alloc.lsra; + +import static com.oracle.graal.api.code.ValueUtil.*; + +import com.oracle.graal.api.code.*; +import com.oracle.graal.api.meta.*; +import com.oracle.graal.compiler.common.cfg.*; +import com.oracle.graal.debug.*; +import com.oracle.graal.debug.Debug.Scope; +import com.oracle.graal.lir.alloc.lsra.Interval.*; +import com.oracle.graal.options.*; + +public class OptimizingLinearScanWalker extends LinearScanWalker { + + public static class Options { + // @formatter:off + @Option(help = "Enable LSRA optimization", type = OptionType.Debug) + public static final OptionValue LSRAOptimization = new OptionValue<>(true); + @Option(help = "LSRA optimization: Only split but do not reassign", type = OptionType.Debug) + public static final OptionValue LSRAOptSplitOnly = new OptionValue<>(false); + // @formatter:on + } + + OptimizingLinearScanWalker(LinearScan allocator, Interval unhandledFixedFirst, Interval unhandledAnyFirst) { + super(allocator, unhandledFixedFirst, unhandledAnyFirst); + } + + @Override + protected void handleSpillSlot(Interval interval) { + assert interval.location() != null : "interval not assigned " + interval; + if (interval.canMaterialize()) { + assert !isStackSlotValue(interval.location()) : "interval can materialize but assigned to a stack slot " + interval; + return; + } + assert isStackSlotValue(interval.location()) : "interval not assigned to a stack slot " + interval; + try (Scope s1 = Debug.scope("LSRAOptimization")) { + Debug.log("adding stack to unhandled list %s", interval); + unhandledLists.addToListSortedByStartAndUsePositions(RegisterBinding.Stack, interval); + } + } + + @SuppressWarnings("unused") + private static void printRegisterBindingList(RegisterBindingLists list, RegisterBinding binding) { + for (Interval interval = list.get(binding); interval != Interval.EndMarker; interval = interval.next) { + Debug.log("%s", interval); + } + } + + @Override + void walk() { + try (Scope s = Debug.scope("OptimizingLinearScanWalker")) { + for (AbstractBlock block : allocator.sortedBlocks) { + optimizeBlock(block); + } + } + super.walk(); + } + + private void optimizeBlock(AbstractBlock block) { + if (block.getPredecessorCount() == 1) { + int nextBlock = allocator.getFirstLirInstructionId(block); + try (Scope s1 = Debug.scope("LSRAOptimization")) { + Debug.log("next block: %s (%d)", block, nextBlock); + } + try (Indent indent0 = Debug.indent()) { + walkTo(nextBlock); + + try (Scope s1 = Debug.scope("LSRAOptimization")) { + boolean changed = true; + // we need to do this because the active lists might change + loop: while (changed) { + changed = false; + try (Indent indent1 = Debug.logAndIndent("Active intervals: (block %s [%d])", block, nextBlock)) { + for (Interval active = activeLists.get(RegisterBinding.Any); active != Interval.EndMarker; active = active.next) { + Debug.log("active (any): %s", active); + if (optimize(nextBlock, block, active, RegisterBinding.Any)) { + changed = true; + break loop; + } + } + for (Interval active = activeLists.get(RegisterBinding.Stack); active != Interval.EndMarker; active = active.next) { + Debug.log("active (stack): %s", active); + if (optimize(nextBlock, block, active, RegisterBinding.Stack)) { + changed = true; + break loop; + } + } + } + } + } + } + } + } + + private boolean optimize(int currentPos, AbstractBlock currentBlock, Interval currentInterval, RegisterBinding binding) { + // BEGIN initialize and sanity checks + assert currentBlock != null : "block must not be null"; + assert currentInterval != null : "interval must not be null"; + + assert currentBlock.getPredecessorCount() == 1 : "more than one predecessors -> optimization not possible"; + + if (!currentInterval.isSplitChild()) { + // interval is not a split child -> no need for optimization + return false; + } + + if (currentInterval.from() == currentPos) { + // the interval starts at the current position so no need for splitting + return false; + } + + // get current location + AllocatableValue currentLocation = currentInterval.location(); + assert currentLocation != null : "active intervals must have a location assigned!"; + + // get predecessor stuff + AbstractBlock predecessorBlock = currentBlock.getPredecessors().get(0); + int predEndId = allocator.getLastLirInstructionId(predecessorBlock); + Interval predecessorInterval = currentInterval.getIntervalCoveringOpId(predEndId); + assert predecessorInterval != null : "variable not live at the end of the only predecessor! " + predecessorBlock + " -> " + currentBlock + " interval: " + currentInterval; + AllocatableValue predecessorLocation = predecessorInterval.location(); + assert predecessorLocation != null : "handled intervals must have a location assigned!"; + + // END initialize and sanity checks + + if (currentLocation.equals(predecessorLocation)) { + // locations are already equal -> nothing to optimize + return false; + } + + if (!isStackSlotValue(predecessorLocation) && !isRegister(predecessorLocation)) { + assert predecessorInterval.canMaterialize(); + // value is materialized -> no need for optimization + return false; + } + + assert isStackSlotValue(currentLocation) || isRegister(currentLocation) : "current location not a register or stack slot " + currentLocation; + + try (Indent indent = Debug.logAndIndent("location differs: %s vs. %s", predecessorLocation, currentLocation)) { + // split current interval at current position + Debug.log("splitting at position %d", currentPos); + + assert allocator.isBlockBegin(currentPos) && ((currentPos & 1) == 0) : "split pos must be even when on block boundary"; + + Interval splitPart = currentInterval.split(currentPos, allocator); + activeLists.remove(binding, currentInterval); + + assert splitPart.from() >= currentPosition : "cannot append new interval before current walk position"; + + // the currentSplitChild is needed later when moves are inserted for reloading + assert splitPart.currentSplitChild() == currentInterval : "overwriting wrong currentSplitChild"; + splitPart.makeCurrentSplitChild(); + + if (Debug.isLogEnabled()) { + Debug.log("left interval : %s", currentInterval.logString(allocator)); + Debug.log("right interval : %s", splitPart.logString(allocator)); + } + + if (Options.LSRAOptSplitOnly.getValue()) { + // just add the split interval to the unhandled list + unhandledLists.addToListSortedByStartAndUsePositions(RegisterBinding.Any, splitPart); + } else { + if (isRegister(predecessorLocation)) { + splitRegisterInterval(splitPart, asRegister(predecessorLocation)); + } else { + assert isStackSlotValue(predecessorLocation); + Debug.log("assigning interval %s to %s", splitPart, predecessorLocation); + splitPart.assignLocation(predecessorLocation); + // activate interval + activeLists.addToListSortedByCurrentFromPositions(RegisterBinding.Stack, splitPart); + splitPart.state = State.Active; + + splitStackInterval(splitPart); + } + } + } + return true; + } + + private void splitRegisterInterval(Interval interval, Register reg) { + // collect current usage of registers + initVarsForAlloc(interval); + initUseLists(false); + spillExcludeActiveFixed(); + // spillBlockUnhandledFixed(cur); + assert unhandledLists.get(RegisterBinding.Fixed) == Interval.EndMarker : "must not have unhandled fixed intervals because all fixed intervals have a use at position 0"; + spillBlockInactiveFixed(interval); + spillCollectActiveAny(); + spillCollectInactiveAny(interval); + + if (Debug.isLogEnabled()) { + try (Indent indent2 = Debug.logAndIndent("state of registers:")) { + for (Register register : availableRegs) { + int i = register.number; + try (Indent indent3 = Debug.logAndIndent("reg %d: usePos: %d, blockPos: %d, intervals: ", i, usePos[i], blockPos[i])) { + for (int j = 0; j < spillIntervals[i].size(); j++) { + Debug.log("%d ", spillIntervals[i].get(j).operandNumber); + } + } + } + } + } + + // the register must be free at least until this position + boolean needSplit = blockPos[reg.number] <= interval.to(); + + int splitPos = blockPos[reg.number]; + + assert splitPos > 0 : "invalid splitPos"; + assert needSplit || splitPos > interval.from() : "splitting interval at from"; + + Debug.log("assigning interval %s to %s", interval, reg); + interval.assignLocation(reg.asValue(interval.kind())); + if (needSplit) { + // register not available for full interval : so split it + splitWhenPartialRegisterAvailable(interval, splitPos); + } + + // perform splitting and spilling for all affected intervals + splitAndSpillIntersectingIntervals(reg); + + // activate interval + activeLists.addToListSortedByCurrentFromPositions(RegisterBinding.Any, interval); + interval.state = State.Active; + + } +} diff -r 3059a52d9614 -r 98967b613c88 graal/com.oracle.graal.lir/src/com/oracle/graal/lir/alloc/lsra/Range.java --- /dev/null Thu Jan 01 00:00:00 1970 +0000 +++ b/graal/com.oracle.graal.lir/src/com/oracle/graal/lir/alloc/lsra/Range.java Sat Feb 07 02:47:00 2015 +0100 @@ -0,0 +1,116 @@ +/* + * Copyright (c) 2009, 2011, Oracle and/or its affiliates. All rights reserved. + * DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER. + * + * This code is free software; you can redistribute it and/or modify it + * under the terms of the GNU General Public License version 2 only, as + * published by the Free Software Foundation. + * + * This code is distributed in the hope that it will be useful, but WITHOUT + * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or + * FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License + * version 2 for more details (a copy is included in the LICENSE file that + * accompanied this code). + * + * You should have received a copy of the GNU General Public License version + * 2 along with this work; if not, write to the Free Software Foundation, + * Inc., 51 Franklin St, Fifth Floor, Boston, MA 02110-1301 USA. + * + * Please contact Oracle, 500 Oracle Parkway, Redwood Shores, CA 94065 USA + * or visit www.oracle.com if you need additional information or have any + * questions. + */ +package com.oracle.graal.lir.alloc.lsra; + +/** + * Represents a range of integers from a start (inclusive) to an end (exclusive. + */ +public final class Range { + + public static final Range EndMarker = new Range(Integer.MAX_VALUE, Integer.MAX_VALUE, null); + + /** + * The start of the range, inclusive. + */ + public int from; + + /** + * The end of the range, exclusive. + */ + public int to; + + /** + * A link to allow the range to be put into a singly linked list. + */ + public Range next; + + boolean intersects(Range r) { + return intersectsAt(r) != -1; + } + + /** + * Creates a new range. + * + * @param from the start of the range, inclusive + * @param to the end of the range, exclusive + * @param next link to the next range in a linked list + */ + Range(int from, int to, Range next) { + this.from = from; + this.to = to; + this.next = next; + } + + int intersectsAt(Range other) { + Range r1 = this; + Range r2 = other; + + assert r2 != null : "null ranges not allowed"; + assert r1 != EndMarker && r2 != EndMarker : "empty ranges not allowed"; + + do { + if (r1.from < r2.from) { + if (r1.to <= r2.from) { + r1 = r1.next; + if (r1 == EndMarker) { + return -1; + } + } else { + return r2.from; + } + } else { + if (r2.from < r1.from) { + if (r2.to <= r1.from) { + r2 = r2.next; + if (r2 == EndMarker) { + return -1; + } + } else { + return r1.from; + } + } else { // r1.from() == r2.from() + if (r1.from == r1.to) { + r1 = r1.next; + if (r1 == EndMarker) { + return -1; + } + } else { + if (r2.from == r2.to) { + r2 = r2.next; + if (r2 == EndMarker) { + return -1; + } + } else { + return r1.from; + } + } + } + } + } while (true); + } + + @Override + public String toString() { + return "[" + from + ", " + to + "]"; + } +} diff -r 3059a52d9614 -r 98967b613c88 graal/com.oracle.graal.lir/src/com/oracle/graal/lir/alloc/lsra/RegisterVerifier.java --- /dev/null Thu Jan 01 00:00:00 1970 +0000 +++ b/graal/com.oracle.graal.lir/src/com/oracle/graal/lir/alloc/lsra/RegisterVerifier.java Sat Feb 07 02:47:00 2015 +0100 @@ -0,0 +1,240 @@ +/* + * Copyright (c) 2009, 2012, Oracle and/or its affiliates. All rights reserved. + * DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER. + * + * This code is free software; you can redistribute it and/or modify it + * under the terms of the GNU General Public License version 2 only, as + * published by the Free Software Foundation. + * + * This code is distributed in the hope that it will be useful, but WITHOUT + * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or + * FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License + * version 2 for more details (a copy is included in the LICENSE file that + * accompanied this code). + * + * You should have received a copy of the GNU General Public License version + * 2 along with this work; if not, write to the Free Software Foundation, + * Inc., 51 Franklin St, Fifth Floor, Boston, MA 02110-1301 USA. + * + * Please contact Oracle, 500 Oracle Parkway, Redwood Shores, CA 94065 USA + * or visit www.oracle.com if you need additional information or have any + * questions. + */ +package com.oracle.graal.lir.alloc.lsra; + +import static com.oracle.graal.api.code.ValueUtil.*; + +import java.util.*; + +import com.oracle.graal.api.code.*; +import com.oracle.graal.api.meta.*; +import com.oracle.graal.compiler.common.*; +import com.oracle.graal.compiler.common.cfg.*; +import com.oracle.graal.compiler.common.util.*; +import com.oracle.graal.debug.*; +import com.oracle.graal.lir.*; +import com.oracle.graal.lir.LIRInstruction.OperandFlag; +import com.oracle.graal.lir.LIRInstruction.OperandMode; + +/** + */ +final class RegisterVerifier { + + LinearScan allocator; + List> workList; // all blocks that must be processed + ArrayMap savedStates; // saved information of previous check + + // simplified access to methods of LinearScan + Interval intervalAt(Value operand) { + return allocator.intervalFor(operand); + } + + // currently, only registers are processed + int stateSize() { + return allocator.maxRegisterNumber() + 1; + } + + // accessors + Interval[] stateForBlock(AbstractBlock block) { + return savedStates.get(block.getId()); + } + + void setStateForBlock(AbstractBlock block, Interval[] savedState) { + savedStates.put(block.getId(), savedState); + } + + void addToWorkList(AbstractBlock block) { + if (!workList.contains(block)) { + workList.add(block); + } + } + + RegisterVerifier(LinearScan allocator) { + this.allocator = allocator; + workList = new ArrayList<>(16); + this.savedStates = new ArrayMap<>(); + + } + + void verify(AbstractBlock start) { + // setup input registers (method arguments) for first block + Interval[] inputState = new Interval[stateSize()]; + setStateForBlock(start, inputState); + addToWorkList(start); + + // main loop for verification + do { + AbstractBlock block = workList.get(0); + workList.remove(0); + + processBlock(block); + } while (!workList.isEmpty()); + } + + private void processBlock(AbstractBlock block) { + try (Indent indent = Debug.logAndIndent("processBlock B%d", block.getId())) { + // must copy state because it is modified + Interval[] inputState = copy(stateForBlock(block)); + + try (Indent indent2 = Debug.logAndIndent("Input-State of intervals:")) { + for (int i = 0; i < stateSize(); i++) { + if (inputState[i] != null) { + Debug.log(" %4d", inputState[i].operandNumber); + } else { + Debug.log(" __"); + } + } + } + + // process all operations of the block + processOperations(allocator.ir.getLIRforBlock(block), inputState); + + // iterate all successors + for (AbstractBlock succ : block.getSuccessors()) { + processSuccessor(succ, inputState); + } + } + } + + private void processSuccessor(AbstractBlock block, Interval[] inputState) { + Interval[] savedState = stateForBlock(block); + + if (savedState != null) { + // this block was already processed before. + // check if new inputState is consistent with savedState + + boolean savedStateCorrect = true; + for (int i = 0; i < stateSize(); i++) { + if (inputState[i] != savedState[i]) { + // current inputState and previous savedState assume a different + // interval in this register . assume that this register is invalid + if (savedState[i] != null) { + // invalidate old calculation only if it assumed that + // register was valid. when the register was already invalid, + // then the old calculation was correct. + savedStateCorrect = false; + savedState[i] = null; + + Debug.log("processSuccessor B%d: invalidating slot %d", block.getId(), i); + } + } + } + + if (savedStateCorrect) { + // already processed block with correct inputState + Debug.log("processSuccessor B%d: previous visit already correct", block.getId()); + } else { + // must re-visit this block + Debug.log("processSuccessor B%d: must re-visit because input state changed", block.getId()); + addToWorkList(block); + } + + } else { + // block was not processed before, so set initial inputState + Debug.log("processSuccessor B%d: initial visit", block.getId()); + + setStateForBlock(block, copy(inputState)); + addToWorkList(block); + } + } + + static Interval[] copy(Interval[] inputState) { + return inputState.clone(); + } + + static void statePut(Interval[] inputState, Value location, Interval interval) { + if (location != null && isRegister(location)) { + Register reg = asRegister(location); + int regNum = reg.number; + if (interval != null) { + Debug.log("%s = %s", reg, interval.operand); + } else if (inputState[regNum] != null) { + Debug.log("%s = null", reg); + } + + inputState[regNum] = interval; + } + } + + static boolean checkState(Interval[] inputState, Value reg, Interval interval) { + if (reg != null && isRegister(reg)) { + if (inputState[asRegister(reg).number] != interval) { + throw new GraalInternalError("!! Error in register allocation: register %s does not contain interval %s but interval %s", reg, interval.operand, inputState[asRegister(reg).number]); + } + } + return true; + } + + void processOperations(List ops, final Interval[] inputState) { + InstructionValueConsumer useConsumer = new InstructionValueConsumer() { + + @Override + public void visitValue(LIRInstruction op, Value operand, OperandMode mode, EnumSet flags) { + // we skip spill moves inserted by the spill position optimization + if (LinearScan.isVariableOrRegister(operand) && allocator.isProcessed(operand) && op.id() != LinearScan.DOMINATOR_SPILL_MOVE_ID) { + Interval interval = intervalAt(operand); + if (op.id() != -1) { + interval = interval.getSplitChildAtOpId(op.id(), mode, allocator); + } + + assert checkState(inputState, interval.location(), interval.splitParent()); + } + } + }; + + InstructionValueConsumer defConsumer = (op, operand, mode, flags) -> { + if (LinearScan.isVariableOrRegister(operand) && allocator.isProcessed(operand)) { + Interval interval = intervalAt(operand); + if (op.id() != -1) { + interval = interval.getSplitChildAtOpId(op.id(), mode, allocator); + } + + statePut(inputState, interval.location(), interval.splitParent()); + } + }; + + // visit all instructions of the block + for (int i = 0; i < ops.size(); i++) { + final LIRInstruction op = ops.get(i); + + if (Debug.isLogEnabled()) { + Debug.log("%s", op.toStringWithIdPrefix()); + } + + // check if input operands are correct + op.visitEachInput(useConsumer); + // invalidate all caller save registers at calls + if (op.destroysCallerSavedRegisters()) { + for (Register r : allocator.frameMapBuilder.getRegisterConfig().getCallerSaveRegisters()) { + statePut(inputState, r.asValue(), null); + } + } + op.visitEachAlive(useConsumer); + // set temp operands (some operations use temp operands also as output operands, so + // can't set them null) + op.visitEachTemp(defConsumer); + // set output operands + op.visitEachOutput(defConsumer); + } + } +} diff -r 3059a52d9614 -r 98967b613c88 graal/com.oracle.graal.lir/src/com/oracle/graal/lir/debug/LIRGenerationDebugContext.java --- /dev/null Thu Jan 01 00:00:00 1970 +0000 +++ b/graal/com.oracle.graal.lir/src/com/oracle/graal/lir/debug/LIRGenerationDebugContext.java Sat Feb 07 02:47:00 2015 +0100 @@ -0,0 +1,57 @@ +/* + * Copyright (c) 2015, 2015, Oracle and/or its affiliates. All rights reserved. + * DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER. + * + * This code is free software; you can redistribute it and/or modify it + * under the terms of the GNU General Public License version 2 only, as + * published by the Free Software Foundation. + * + * This code is distributed in the hope that it will be useful, but WITHOUT + * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or + * FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License + * version 2 for more details (a copy is included in the LICENSE file that + * accompanied this code). + * + * You should have received a copy of the GNU General Public License version + * 2 along with this work; if not, write to the Free Software Foundation, + * Inc., 51 Franklin St, Fifth Floor, Boston, MA 02110-1301 USA. + * + * Please contact Oracle, 500 Oracle Parkway, Redwood Shores, CA 94065 USA + * or visit www.oracle.com if you need additional information or have any + * questions. + */ +package com.oracle.graal.lir.debug; + +import com.oracle.graal.api.meta.*; +import com.oracle.graal.debug.*; +import com.oracle.graal.lir.*; + +/** + * Provides information about {@link LIR} generation for debugging purposes. + */ +public interface LIRGenerationDebugContext { + + /** + * Gets an object that represents the source of an {@link LIR} {@link Value operand} in a higher + * representation. + */ + Object getSourceForOperand(Value value); + + static LIRGenerationDebugContext getFromDebugContext() { + if (Debug.isEnabled()) { + LIRGenerationDebugContext lirGen = Debug.contextLookup(LIRGenerationDebugContext.class); + assert lirGen != null; + return lirGen; + } + return null; + } + + static Object getSourceForOperandFromDebugContext(Value value) { + LIRGenerationDebugContext gen = getFromDebugContext(); + if (gen != null) { + return gen.getSourceForOperand(value); + } + return null; + } + +} diff -r 3059a52d9614 -r 98967b613c88 graal/com.oracle.graal.lir/src/com/oracle/graal/lir/framemap/FrameMapBuilderImpl.java --- a/graal/com.oracle.graal.lir/src/com/oracle/graal/lir/framemap/FrameMapBuilderImpl.java Sat Feb 07 02:34:43 2015 +0100 +++ b/graal/com.oracle.graal.lir/src/com/oracle/graal/lir/framemap/FrameMapBuilderImpl.java Sat Feb 07 02:47:00 2015 +0100 @@ -22,13 +22,19 @@ */ package com.oracle.graal.lir.framemap; +import static com.oracle.graal.api.code.ValueUtil.*; + import java.util.*; import com.oracle.graal.api.code.*; import com.oracle.graal.api.meta.*; import com.oracle.graal.compiler.common.*; +import com.oracle.graal.compiler.common.cfg.*; import com.oracle.graal.debug.*; import com.oracle.graal.debug.Debug.Scope; +import com.oracle.graal.lir.*; +import com.oracle.graal.lir.LIRInstruction.OperandFlag; +import com.oracle.graal.lir.LIRInstruction.OperandMode; import com.oracle.graal.lir.gen.*; import com.oracle.graal.lir.stackslotalloc.*; @@ -95,6 +101,9 @@ public FrameMap buildFrameMap(LIRGenerationResult res, StackSlotAllocator allocator) { try (Scope s = Debug.scope("StackSlotAllocation")) { allocator.allocateStackSlots(this, res); + if (Debug.isEnabled()) { + verifyStackSlotAllocation(res); + } } for (CallingConvention cc : calls) { frameMap.callsMethod(cc); @@ -103,6 +112,23 @@ return frameMap; } + private static void verifyStackSlotAllocation(LIRGenerationResult res) { + LIR lir = res.getLIR(); + InstructionValueConsumer verifySlots = (LIRInstruction op, Value value, OperandMode mode, EnumSet flags) -> { + assert !isVirtualStackSlot(value) : String.format("Instruction %s contains a virtual stack slot %s", op, value); + }; + for (AbstractBlock block : lir.getControlFlowGraph().getBlocks()) { + lir.getLIRforBlock(block).forEach(op -> { + op.visitEachInput(verifySlots); + op.visitEachAlive(verifySlots); + op.visitEachState(verifySlots); + + op.visitEachTemp(verifySlots); + op.visitEachOutput(verifySlots); + }); + } + } + public List getStackSlots() { return stackSlots; } diff -r 3059a52d9614 -r 98967b613c88 graal/com.oracle.graal.lir/src/com/oracle/graal/lir/gen/LIRGenerator.java --- a/graal/com.oracle.graal.lir/src/com/oracle/graal/lir/gen/LIRGenerator.java Sat Feb 07 02:34:43 2015 +0100 +++ b/graal/com.oracle.graal.lir/src/com/oracle/graal/lir/gen/LIRGenerator.java Sat Feb 07 02:47:00 2015 +0100 @@ -119,6 +119,7 @@ @Override public Variable emitMove(Value input) { + assert !(input instanceof Variable) : "Creating a copy of a variable via this method is not supported (and potentially a bug): " + input; Variable result = newVariable(input.getLIRKind()); emitMove(result, input); return result; diff -r 3059a52d9614 -r 98967b613c88 graal/com.oracle.graal.lir/src/com/oracle/graal/lir/gen/LIRGeneratorTool.java --- a/graal/com.oracle.graal.lir/src/com/oracle/graal/lir/gen/LIRGeneratorTool.java Sat Feb 07 02:34:43 2015 +0100 +++ b/graal/com.oracle.graal.lir/src/com/oracle/graal/lir/gen/LIRGeneratorTool.java Sat Feb 07 02:47:00 2015 +0100 @@ -54,13 +54,13 @@ Value emitLoadConstant(LIRKind kind, Constant constant); - Value emitLoad(LIRKind kind, Value address, LIRFrameState state); + Variable emitLoad(LIRKind kind, Value address, LIRFrameState state); void emitStore(LIRKind kind, Value address, Value input, LIRFrameState state); void emitNullCheck(Value address, LIRFrameState state); - Value emitCompareAndSwap(Value address, Value expectedValue, Value newValue, Value trueValue, Value falseValue); + Variable emitCompareAndSwap(Value address, Value expectedValue, Value newValue, Value trueValue, Value falseValue); /** * Emit an atomic read-and-add instruction. @@ -112,7 +112,7 @@ Value emitAddress(Value base, long displacement, Value index, int scale); - Value emitAddress(StackSlotValue slot); + Variable emitAddress(StackSlotValue slot); void emitMembar(int barriers); @@ -179,15 +179,15 @@ CallingConvention getCallingConvention(); - Value emitBitCount(Value operand); + Variable emitBitCount(Value operand); - Value emitBitScanForward(Value operand); + Variable emitBitScanForward(Value operand); - Value emitBitScanReverse(Value operand); + Variable emitBitScanReverse(Value operand); - Value emitByteSwap(Value operand); + Variable emitByteSwap(Value operand); - Value emitArrayEquals(Kind kind, Value array1, Value array2, Value length); + Variable emitArrayEquals(Kind kind, Value array1, Value array2, Value length); void emitBlackhole(Value operand); diff -r 3059a52d9614 -r 98967b613c88 graal/com.oracle.graal.lir/src/com/oracle/graal/lir/stackslotalloc/FixPointIntervalBuilder.java --- a/graal/com.oracle.graal.lir/src/com/oracle/graal/lir/stackslotalloc/FixPointIntervalBuilder.java Sat Feb 07 02:34:43 2015 +0100 +++ b/graal/com.oracle.graal.lir/src/com/oracle/graal/lir/stackslotalloc/FixPointIntervalBuilder.java Sat Feb 07 02:47:00 2015 +0100 @@ -43,6 +43,7 @@ private final LIR lir; private final int maxOpId; private final StackInterval[] stackSlotMap; + private final HashSet usePos; /** * The number of allocated stack slots. @@ -55,9 +56,15 @@ this.maxOpId = maxOpId; liveInMap = new BlockMap<>(lir.getControlFlowGraph()); liveOutMap = new BlockMap<>(lir.getControlFlowGraph()); + this.usePos = new HashSet<>(); } - void build() { + /** + * Builds the lifetime intervals for {@link VirtualStackSlot virtual stack slots}, sets up + * {@link #stackSlotMap} and returns a set of use positions, i.e. instructions that contain + * virtual stack slots. + */ + Set build() { Deque> worklist = new ArrayDeque<>(); for (int i = lir.getControlFlowGraph().getBlocks().size() - 1; i >= 0; i--) { worklist.add(lir.getControlFlowGraph().getBlocks().get(i)); @@ -69,6 +76,7 @@ AbstractBlock block = worklist.poll(); processBlock(block, worklist); } + return usePos; } /** @@ -188,6 +196,7 @@ if (isVirtualStackSlot(operand)) { VirtualStackSlot vslot = asVirtualStackSlot(operand); addUse(vslot, inst, flags); + usePos.add(inst); Debug.log("set operand: %s", operand); currentSet.set(vslot.getId()); } @@ -206,6 +215,7 @@ if (isVirtualStackSlot(operand)) { VirtualStackSlot vslot = asVirtualStackSlot(operand); addDef(vslot, inst); + usePos.add(inst); Debug.log("clear operand: %s", operand); currentSet.clear(vslot.getId()); } diff -r 3059a52d9614 -r 98967b613c88 graal/com.oracle.graal.lir/src/com/oracle/graal/lir/stackslotalloc/LSStackSlotAllocator.java --- a/graal/com.oracle.graal.lir/src/com/oracle/graal/lir/stackslotalloc/LSStackSlotAllocator.java Sat Feb 07 02:34:43 2015 +0100 +++ b/graal/com.oracle.graal.lir/src/com/oracle/graal/lir/stackslotalloc/LSStackSlotAllocator.java Sat Feb 07 02:47:00 2015 +0100 @@ -32,6 +32,7 @@ import com.oracle.graal.compiler.common.cfg.*; import com.oracle.graal.debug.*; import com.oracle.graal.debug.Debug.Scope; +import com.oracle.graal.debug.internal.*; import com.oracle.graal.lir.*; import com.oracle.graal.lir.LIRInstruction.OperandFlag; import com.oracle.graal.lir.LIRInstruction.OperandMode; @@ -58,8 +59,17 @@ // @formatter:on } + private static final DebugTimer MainTimer = Debug.timer("LSStackSlotAllocator"); + private static final DebugTimer NumInstTimer = Debug.timer("LSStackSlotAllocator[NumberInstruction]"); + private static final DebugTimer BuildIntervalsTimer = Debug.timer("LSStackSlotAllocator[BuildIntervals]"); + private static final DebugTimer VerifyIntervalsTimer = Debug.timer("LSStackSlotAllocator[VerifyIntervals]"); + private static final DebugTimer AllocateSlotsTimer = Debug.timer("LSStackSlotAllocator[AllocateSlots]"); + private static final DebugTimer AssignSlotsTimer = Debug.timer("LSStackSlotAllocator[AssignSlots]"); + public void allocateStackSlots(FrameMapBuilderTool builder, LIRGenerationResult res) { - new Allocator(res.getLIR(), builder).allocate(); + try (TimerCloseable t = MainTimer.start()) { + new Allocator(res.getLIR(), builder).allocate(); + } } private static final class Allocator { @@ -82,33 +92,42 @@ // insert by to this.active = new PriorityQueue<>((a, b) -> a.to() - b.to()); - // step 1: number instructions - this.maxOpId = numberInstructions(lir, sortedBlocks); + try (TimerCloseable t = NumInstTimer.start()) { + // step 1: number instructions + this.maxOpId = numberInstructions(lir, sortedBlocks); + } } private void allocate() { Debug.dump(lir, "After StackSlot numbering"); long currentFrameSize = Debug.isMeterEnabled() ? frameMapBuilder.getFrameMap().currentFrameSize() : 0; + Set usePos; // step 2: build intervals - try (Scope s = Debug.scope("StackSlotAllocationBuildIntervals"); Indent indent = Debug.logAndIndent("BuildIntervals")) { - buildIntervals(); + try (Scope s = Debug.scope("StackSlotAllocationBuildIntervals"); Indent indent = Debug.logAndIndent("BuildIntervals"); TimerCloseable t = BuildIntervalsTimer.start()) { + usePos = buildIntervals(); } // step 3: verify intervals if (Debug.isEnabled()) { - verifyIntervals(); + try (TimerCloseable t = VerifyIntervalsTimer.start()) { + verifyIntervals(); + } } if (Debug.isDumpEnabled()) { dumpIntervals("Before stack slot allocation"); } // step 4: allocate stack slots - allocateStackSlots(); + try (TimerCloseable t = AllocateSlotsTimer.start()) { + allocateStackSlots(); + } if (Debug.isDumpEnabled()) { dumpIntervals("After stack slot allocation"); } // step 5: assign stack slots - assignStackSlots(); + try (TimerCloseable t = AssignSlotsTimer.start()) { + assignStackSlots(usePos); + } Debug.dump(lir, "After StackSlot assignment"); if (Debug.isMeterEnabled()) { StackSlotAllocator.allocatedFramesize.add(frameMapBuilder.getFrameMap().currentFrameSize() - currentFrameSize); @@ -148,8 +167,8 @@ // step 2: build intervals // ==================== - private void buildIntervals() { - new FixPointIntervalBuilder(lir, stackSlotMap, maxOpId()).build(); + private Set buildIntervals() { + return new FixPointIntervalBuilder(lir, stackSlotMap, maxOpId()).build(); } // ==================== @@ -335,16 +354,14 @@ // step 5: assign stack slots // ==================== - private void assignStackSlots() { - for (AbstractBlock block : sortedBlocks) { - lir.getLIRforBlock(block).forEach(op -> { - op.forEachInput(this::assignSlot); - op.forEachAlive(this::assignSlot); - op.forEachState(this::assignSlot); + private void assignStackSlots(Set usePos) { + for (LIRInstruction op : usePos) { + op.forEachInput(this::assignSlot); + op.forEachAlive(this::assignSlot); + op.forEachState(this::assignSlot); - op.forEachTemp(this::assignSlot); - op.forEachOutput(this::assignSlot); - }); + op.forEachTemp(this::assignSlot); + op.forEachOutput(this::assignSlot); } } diff -r 3059a52d9614 -r 98967b613c88 graal/com.oracle.graal.nodes/src/com/oracle/graal/nodes/CallTargetNode.java --- a/graal/com.oracle.graal.nodes/src/com/oracle/graal/nodes/CallTargetNode.java Sat Feb 07 02:34:43 2015 +0100 +++ b/graal/com.oracle.graal.nodes/src/com/oracle/graal/nodes/CallTargetNode.java Sat Feb 07 02:47:00 2015 +0100 @@ -55,6 +55,10 @@ public boolean isIndirect() { return !direct; } + + public boolean isInterface() { + return this == InvokeKind.Interface; + } } @Input protected NodeInputList arguments; diff -r 3059a52d9614 -r 98967b613c88 graal/com.oracle.graal.nodes/src/com/oracle/graal/nodes/GuardingPiNode.java --- a/graal/com.oracle.graal.nodes/src/com/oracle/graal/nodes/GuardingPiNode.java Sat Feb 07 02:34:43 2015 +0100 +++ b/graal/com.oracle.graal.nodes/src/com/oracle/graal/nodes/GuardingPiNode.java Sat Feb 07 02:47:00 2015 +0100 @@ -22,6 +22,10 @@ */ package com.oracle.graal.nodes; +import static com.oracle.graal.api.meta.DeoptimizationAction.*; +import static com.oracle.graal.api.meta.DeoptimizationReason.*; +import static com.oracle.graal.compiler.common.type.StampFactory.*; + import com.oracle.graal.api.meta.*; import com.oracle.graal.compiler.common.type.*; import com.oracle.graal.graph.*; @@ -65,6 +69,21 @@ return action; } + /** + * Returns a node whose stamp is guaranteed to be {@linkplain StampTool#isPointerNonNull(Stamp) + * non-null}. If {@code value} already has such a stamp, then it is returned. Otherwise a fixed + * node guarding {@code value} is returned where the guard performs a null check. + */ + public static ValueNode nullCheckedValue(ValueNode value) { + ObjectStamp receiverStamp = (ObjectStamp) value.stamp(); + if (!StampTool.isPointerNonNull(receiverStamp)) { + IsNullNode condition = value.graph().unique(new IsNullNode(value)); + Stamp stamp = receiverStamp.join(objectNonNull()); + return new GuardingPiNode(value, condition, true, NullCheckException, InvalidateReprofile, stamp); + } + return value; + } + public GuardingPiNode(ValueNode object) { this(object, object.graph().unique(new IsNullNode(object)), true, DeoptimizationReason.NullCheckException, DeoptimizationAction.None, object.stamp().join(StampFactory.objectNonNull())); } diff -r 3059a52d9614 -r 98967b613c88 graal/com.oracle.graal.nodes/src/com/oracle/graal/nodes/calc/AddNode.java --- a/graal/com.oracle.graal.nodes/src/com/oracle/graal/nodes/calc/AddNode.java Sat Feb 07 02:34:43 2015 +0100 +++ b/graal/com.oracle.graal.nodes/src/com/oracle/graal/nodes/calc/AddNode.java Sat Feb 07 02:47:00 2015 +0100 @@ -39,6 +39,17 @@ super(ArithmeticOpTable::getAdd, x, y); } + public static ValueNode create(ValueNode x, ValueNode y) { + BinaryOp op = ArithmeticOpTable.forStamp(x.stamp()).getAdd(); + Stamp stamp = op.foldStamp(x.stamp(), y.stamp()); + ConstantNode tryConstantFold = tryConstantFold(op, x, y, stamp); + if (tryConstantFold != null) { + return tryConstantFold; + } else { + return new AddNode(x, y); + } + } + @Override public ValueNode canonical(CanonicalizerTool tool, ValueNode forX, ValueNode forY) { ValueNode ret = super.canonical(tool, forX, forY); diff -r 3059a52d9614 -r 98967b613c88 graal/com.oracle.graal.nodes/src/com/oracle/graal/nodes/calc/BinaryArithmeticNode.java --- a/graal/com.oracle.graal.nodes/src/com/oracle/graal/nodes/calc/BinaryArithmeticNode.java Sat Feb 07 02:34:43 2015 +0100 +++ b/graal/com.oracle.graal.nodes/src/com/oracle/graal/nodes/calc/BinaryArithmeticNode.java Sat Feb 07 02:47:00 2015 +0100 @@ -61,13 +61,21 @@ @Override public ValueNode canonical(CanonicalizerTool tool, ValueNode forX, ValueNode forY) { - if (forX.isConstant() && forY.isConstant()) { - Constant ret = getOp(forX, forY).foldConstant(forX.asConstant(), forY.asConstant()); - return ConstantNode.forPrimitive(stamp(), ret); + ValueNode result = tryConstantFold(getOp(forX, forY), forX, forY, stamp()); + if (result != null) { + return result; } return this; } + public static ConstantNode tryConstantFold(BinaryOp op, ValueNode forX, ValueNode forY, Stamp stamp) { + if (forX.isConstant() && forY.isConstant()) { + Constant ret = op.foldConstant(forX.asConstant(), forY.asConstant()); + return ConstantNode.forPrimitive(stamp, ret); + } + return null; + } + @Override public boolean inferStamp() { return updateStamp(getOp(getX(), getY()).foldStamp(getX().stamp(), getY().stamp())); diff -r 3059a52d9614 -r 98967b613c88 graal/com.oracle.graal.nodes/src/com/oracle/graal/nodes/calc/CompareNode.java --- a/graal/com.oracle.graal.nodes/src/com/oracle/graal/nodes/calc/CompareNode.java Sat Feb 07 02:34:43 2015 +0100 +++ b/graal/com.oracle.graal.nodes/src/com/oracle/graal/nodes/calc/CompareNode.java Sat Feb 07 02:47:00 2015 +0100 @@ -39,14 +39,19 @@ @NodeInfo public abstract class CompareNode extends BinaryOpLogicNode { + protected final Condition condition; + protected final boolean unorderedIsTrue; + /** * Constructs a new Compare instruction. * * @param x the instruction producing the first input to the instruction * @param y the instruction that produces the second input to this instruction */ - public CompareNode(ValueNode x, ValueNode y) { + public CompareNode(Condition condition, boolean unorderedIsTrue, ValueNode x, ValueNode y) { super(x, y); + this.condition = condition; + this.unorderedIsTrue = unorderedIsTrue; } /** @@ -54,14 +59,18 @@ * * @return the condition */ - public abstract Condition condition(); + public final Condition condition() { + return condition; + } /** * Checks whether unordered inputs mean true or false (only applies to float operations). * * @return {@code true} if unordered inputs produce true */ - public abstract boolean unorderedIsTrue(); + public final boolean unorderedIsTrue() { + return this.unorderedIsTrue; + } private ValueNode optimizeConditional(Constant constant, ConditionalNode conditionalNode, ConstantReflectionProvider constantReflection, Condition cond) { Constant trueConstant = conditionalNode.trueValue().asConstant(); @@ -93,8 +102,10 @@ @Override public ValueNode canonical(CanonicalizerTool tool, ValueNode forX, ValueNode forY) { - if (forX.isConstant() && forY.isConstant()) { - return LogicConstantNode.forBoolean(condition().foldCondition(forX.asConstant(), forY.asConstant(), tool.getConstantReflection(), unorderedIsTrue())); + ConstantReflectionProvider constantReflection = tool.getConstantReflection(); + LogicNode constantCondition = tryConstantFold(condition(), forX, forY, constantReflection, unorderedIsTrue()); + if (constantCondition != null) { + return constantCondition; } ValueNode result; if (forX.isConstant()) { @@ -115,7 +126,14 @@ return this; } - protected abstract CompareNode duplicateModified(ValueNode newX, ValueNode newY); + public static LogicNode tryConstantFold(Condition condition, ValueNode forX, ValueNode forY, ConstantReflectionProvider constantReflection, boolean unorderedIsTrue) { + if (forX.isConstant() && forY.isConstant() && constantReflection != null) { + return LogicConstantNode.forBoolean(condition.foldCondition(forX.asConstant(), forY.asConstant(), constantReflection, unorderedIsTrue)); + } + return null; + } + + protected abstract LogicNode duplicateModified(ValueNode newX, ValueNode newY); protected ValueNode canonicalizeSymmetricConstant(CanonicalizerTool tool, Constant constant, ValueNode nonConstant, boolean mirrored) { if (nonConstant instanceof ConditionalNode) { diff -r 3059a52d9614 -r 98967b613c88 graal/com.oracle.graal.nodes/src/com/oracle/graal/nodes/calc/FloatEqualsNode.java --- a/graal/com.oracle.graal.nodes/src/com/oracle/graal/nodes/calc/FloatEqualsNode.java Sat Feb 07 02:34:43 2015 +0100 +++ b/graal/com.oracle.graal.nodes/src/com/oracle/graal/nodes/calc/FloatEqualsNode.java Sat Feb 07 02:47:00 2015 +0100 @@ -22,6 +22,7 @@ */ package com.oracle.graal.nodes.calc; +import com.oracle.graal.api.meta.*; import com.oracle.graal.compiler.common.*; import com.oracle.graal.compiler.common.calc.*; import com.oracle.graal.compiler.common.type.*; @@ -31,22 +32,21 @@ import com.oracle.graal.nodes.util.*; @NodeInfo(shortName = "==") -public class FloatEqualsNode extends CompareNode { +public final class FloatEqualsNode extends CompareNode { public FloatEqualsNode(ValueNode x, ValueNode y) { - super(x, y); + super(Condition.EQ, false, x, y); assert x.stamp() instanceof FloatStamp && y.stamp() instanceof FloatStamp : x.stamp() + " " + y.stamp(); assert x.stamp().isCompatible(y.stamp()); } - @Override - public Condition condition() { - return Condition.EQ; - } - - @Override - public boolean unorderedIsTrue() { - return false; + public static LogicNode create(ValueNode x, ValueNode y, ConstantReflectionProvider constantReflection) { + LogicNode result = CompareNode.tryConstantFold(Condition.EQ, x, y, constantReflection, false); + if (result != null) { + return result; + } else { + return new FloatEqualsNode(x, y); + } } @Override diff -r 3059a52d9614 -r 98967b613c88 graal/com.oracle.graal.nodes/src/com/oracle/graal/nodes/calc/FloatLessThanNode.java --- a/graal/com.oracle.graal.nodes/src/com/oracle/graal/nodes/calc/FloatLessThanNode.java Sat Feb 07 02:34:43 2015 +0100 +++ b/graal/com.oracle.graal.nodes/src/com/oracle/graal/nodes/calc/FloatLessThanNode.java Sat Feb 07 02:47:00 2015 +0100 @@ -33,23 +33,10 @@ @NodeInfo(shortName = "<") public class FloatLessThanNode extends CompareNode { - protected final boolean unorderedIsTrue; - public FloatLessThanNode(ValueNode x, ValueNode y, boolean unorderedIsTrue) { - super(x, y); + super(Condition.LT, unorderedIsTrue, x, y); assert x.stamp() instanceof FloatStamp && y.stamp() instanceof FloatStamp; assert x.stamp().isCompatible(y.stamp()); - this.unorderedIsTrue = unorderedIsTrue; - } - - @Override - public Condition condition() { - return Condition.LT; - } - - @Override - public boolean unorderedIsTrue() { - return unorderedIsTrue; } @Override diff -r 3059a52d9614 -r 98967b613c88 graal/com.oracle.graal.nodes/src/com/oracle/graal/nodes/calc/IntegerBelowNode.java --- a/graal/com.oracle.graal.nodes/src/com/oracle/graal/nodes/calc/IntegerBelowNode.java Sat Feb 07 02:34:43 2015 +0100 +++ b/graal/com.oracle.graal.nodes/src/com/oracle/graal/nodes/calc/IntegerBelowNode.java Sat Feb 07 02:47:00 2015 +0100 @@ -22,6 +22,7 @@ */ package com.oracle.graal.nodes.calc; +import com.oracle.graal.api.meta.*; import com.oracle.graal.compiler.common.calc.*; import com.oracle.graal.compiler.common.type.*; import com.oracle.graal.graph.spi.*; @@ -33,19 +34,22 @@ public class IntegerBelowNode extends CompareNode { public IntegerBelowNode(ValueNode x, ValueNode y) { - super(x, y); + super(Condition.BT, false, x, y); assert x.stamp() instanceof IntegerStamp; assert y.stamp() instanceof IntegerStamp; } - @Override - public Condition condition() { - return Condition.BT; - } - - @Override - public boolean unorderedIsTrue() { - return false; + public static LogicNode create(ValueNode x, ValueNode y, ConstantReflectionProvider constantReflection) { + LogicNode result = CompareNode.tryConstantFold(Condition.BT, x, y, constantReflection, false); + if (result != null) { + return result; + } else { + result = findSynonym(x, y); + if (result != null) { + return result; + } + return new IntegerBelowNode(x, y); + } } @Override @@ -54,6 +58,18 @@ if (result != this) { return result; } + LogicNode synonym = findSynonym(forX, forY); + if (synonym != null) { + return synonym; + } + if (forX.isConstant() && forX.asJavaConstant().asLong() == 0) { + // 0 |<| y is the same as 0 != y + return new LogicNegationNode(CompareNode.createCompareNode(Condition.EQ, forX, forY)); + } + return this; + } + + private static LogicNode findSynonym(ValueNode forX, ValueNode forY) { if (GraphUtil.unproxify(forX) == GraphUtil.unproxify(forY)) { return LogicConstantNode.contradiction(); } else if (forX.stamp() instanceof IntegerStamp && forY.stamp() instanceof IntegerStamp) { @@ -67,11 +83,7 @@ } } } - if (forX.isConstant() && forX.asJavaConstant().asLong() == 0) { - // 0 |<| y is the same as 0 != y - return new LogicNegationNode(CompareNode.createCompareNode(Condition.EQ, forX, forY)); - } - return this; + return null; } @Override diff -r 3059a52d9614 -r 98967b613c88 graal/com.oracle.graal.nodes/src/com/oracle/graal/nodes/calc/IntegerEqualsNode.java --- a/graal/com.oracle.graal.nodes/src/com/oracle/graal/nodes/calc/IntegerEqualsNode.java Sat Feb 07 02:34:43 2015 +0100 +++ b/graal/com.oracle.graal.nodes/src/com/oracle/graal/nodes/calc/IntegerEqualsNode.java Sat Feb 07 02:47:00 2015 +0100 @@ -32,22 +32,21 @@ import com.oracle.graal.nodes.util.*; @NodeInfo(shortName = "==") -public class IntegerEqualsNode extends CompareNode { +public final class IntegerEqualsNode extends CompareNode { public IntegerEqualsNode(ValueNode x, ValueNode y) { - super(x, y); + super(Condition.EQ, false, x, y); assert !x.getKind().isNumericFloat() && x.getKind() != Kind.Object; assert !y.getKind().isNumericFloat() && y.getKind() != Kind.Object; } - @Override - public Condition condition() { - return Condition.EQ; - } - - @Override - public boolean unorderedIsTrue() { - return false; + public static LogicNode create(ValueNode x, ValueNode y, ConstantReflectionProvider constantReflection) { + LogicNode result = CompareNode.tryConstantFold(Condition.EQ, x, y, constantReflection, false); + if (result != null) { + return result; + } else { + return new IntegerEqualsNode(x, y); + } } @Override diff -r 3059a52d9614 -r 98967b613c88 graal/com.oracle.graal.nodes/src/com/oracle/graal/nodes/calc/IntegerLessThanNode.java --- a/graal/com.oracle.graal.nodes/src/com/oracle/graal/nodes/calc/IntegerLessThanNode.java Sat Feb 07 02:34:43 2015 +0100 +++ b/graal/com.oracle.graal.nodes/src/com/oracle/graal/nodes/calc/IntegerLessThanNode.java Sat Feb 07 02:47:00 2015 +0100 @@ -35,19 +35,22 @@ public class IntegerLessThanNode extends CompareNode { public IntegerLessThanNode(ValueNode x, ValueNode y) { - super(x, y); + super(Condition.LT, false, x, y); assert !x.getKind().isNumericFloat() && x.getKind() != Kind.Object; assert !y.getKind().isNumericFloat() && y.getKind() != Kind.Object; } - @Override - public Condition condition() { - return Condition.LT; - } - - @Override - public boolean unorderedIsTrue() { - return false; + public static LogicNode create(ValueNode x, ValueNode y, ConstantReflectionProvider constantReflection) { + LogicNode result = CompareNode.tryConstantFold(Condition.LT, x, y, constantReflection, false); + if (result != null) { + return result; + } else { + result = findSynonym(x, y); + if (result != null) { + return result; + } + return new IntegerLessThanNode(x, y); + } } @Override @@ -73,6 +76,19 @@ if (result != this) { return result; } + ValueNode synonym = findSynonym(forX, forY); + if (synonym != null) { + return synonym; + } + if (forX.stamp() instanceof IntegerStamp && forY.stamp() instanceof IntegerStamp) { + if (IntegerStamp.sameSign((IntegerStamp) forX.stamp(), (IntegerStamp) forY.stamp())) { + return new IntegerBelowNode(forX, forY); + } + } + return this; + } + + private static LogicNode findSynonym(ValueNode forX, ValueNode forY) { if (GraphUtil.unproxify(forX) == GraphUtil.unproxify(forY)) { return LogicConstantNode.contradiction(); } else if (forX.stamp() instanceof IntegerStamp && forY.stamp() instanceof IntegerStamp) { @@ -84,12 +100,7 @@ return LogicConstantNode.contradiction(); } } - if (forX.stamp() instanceof IntegerStamp && forY.stamp() instanceof IntegerStamp) { - if (IntegerStamp.sameSign((IntegerStamp) forX.stamp(), (IntegerStamp) forY.stamp())) { - return new IntegerBelowNode(forX, forY); - } - } - return this; + return null; } @Override diff -r 3059a52d9614 -r 98967b613c88 graal/com.oracle.graal.nodes/src/com/oracle/graal/nodes/calc/IntegerTestNode.java --- a/graal/com.oracle.graal.nodes/src/com/oracle/graal/nodes/calc/IntegerTestNode.java Sat Feb 07 02:34:43 2015 +0100 +++ b/graal/com.oracle.graal.nodes/src/com/oracle/graal/nodes/calc/IntegerTestNode.java Sat Feb 07 02:47:00 2015 +0100 @@ -44,7 +44,7 @@ if (forX.isConstant() && forY.isConstant()) { return LogicConstantNode.forBoolean((forX.asJavaConstant().asLong() & forY.asJavaConstant().asLong()) == 0); } - if (getX().stamp() instanceof IntegerStamp && getY().stamp() instanceof IntegerStamp) { + if (forX.stamp() instanceof IntegerStamp && forY.stamp() instanceof IntegerStamp) { IntegerStamp xStamp = (IntegerStamp) forX.stamp(); IntegerStamp yStamp = (IntegerStamp) forY.stamp(); if ((xStamp.upMask() & yStamp.upMask()) == 0) { diff -r 3059a52d9614 -r 98967b613c88 graal/com.oracle.graal.nodes/src/com/oracle/graal/nodes/calc/NormalizeCompareNode.java --- a/graal/com.oracle.graal.nodes/src/com/oracle/graal/nodes/calc/NormalizeCompareNode.java Sat Feb 07 02:34:43 2015 +0100 +++ b/graal/com.oracle.graal.nodes/src/com/oracle/graal/nodes/calc/NormalizeCompareNode.java Sat Feb 07 02:47:00 2015 +0100 @@ -55,7 +55,7 @@ LogicNode equalComp; LogicNode lessComp; if (getX().stamp() instanceof FloatStamp) { - equalComp = graph().unique(new FloatEqualsNode(getX(), getY())); + equalComp = graph().unique(FloatEqualsNode.create(getX(), getY(), tool.getConstantReflection())); lessComp = graph().unique(new FloatLessThanNode(getX(), getY(), isUnorderedLess)); } else { equalComp = graph().unique(new IntegerEqualsNode(getX(), getY())); diff -r 3059a52d9614 -r 98967b613c88 graal/com.oracle.graal.nodes/src/com/oracle/graal/nodes/calc/ObjectEqualsNode.java --- a/graal/com.oracle.graal.nodes/src/com/oracle/graal/nodes/calc/ObjectEqualsNode.java Sat Feb 07 02:34:43 2015 +0100 +++ b/graal/com.oracle.graal.nodes/src/com/oracle/graal/nodes/calc/ObjectEqualsNode.java Sat Feb 07 02:47:00 2015 +0100 @@ -1,5 +1,5 @@ /* - * Copyright (c) 2011, 2014, Oracle and/or its affiliates. All rights reserved. + * Copyright (c) 2011, 2015, Oracle and/or its affiliates. All rights reserved. * DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER. * * This code is free software; you can redistribute it and/or modify it @@ -24,13 +24,14 @@ import com.oracle.graal.api.meta.*; import com.oracle.graal.compiler.common.*; +import com.oracle.graal.compiler.common.calc.*; import com.oracle.graal.compiler.common.type.*; import com.oracle.graal.nodeinfo.*; import com.oracle.graal.nodes.*; import com.oracle.graal.nodes.spi.*; @NodeInfo(shortName = "==") -public class ObjectEqualsNode extends PointerEqualsNode implements Virtualizable { +public final class ObjectEqualsNode extends PointerEqualsNode implements Virtualizable { public ObjectEqualsNode(ValueNode x, ValueNode y) { super(x, y); @@ -38,6 +39,19 @@ assert y.stamp() instanceof AbstractObjectStamp; } + public static LogicNode create(ValueNode x, ValueNode y, ConstantReflectionProvider constantReflection) { + LogicNode result = CompareNode.tryConstantFold(Condition.EQ, x, y, constantReflection, false); + if (result != null) { + return result; + } else { + result = findSynonym(x, y); + if (result != null) { + return result; + } + return new ObjectEqualsNode(x, y); + } + } + private void virtualizeNonVirtualComparison(State state, ValueNode other, VirtualizerTool tool) { if (!state.getVirtualObject().hasIdentity() && state.getVirtualObject().entryKind(0) == Kind.Boolean) { if (other.isConstant()) { diff -r 3059a52d9614 -r 98967b613c88 graal/com.oracle.graal.nodes/src/com/oracle/graal/nodes/calc/PointerEqualsNode.java --- a/graal/com.oracle.graal.nodes/src/com/oracle/graal/nodes/calc/PointerEqualsNode.java Sat Feb 07 02:34:43 2015 +0100 +++ b/graal/com.oracle.graal.nodes/src/com/oracle/graal/nodes/calc/PointerEqualsNode.java Sat Feb 07 02:47:00 2015 +0100 @@ -1,5 +1,5 @@ /* - * Copyright (c) 2011, 2014, Oracle and/or its affiliates. All rights reserved. + * Copyright (c) 2011, 2015, Oracle and/or its affiliates. All rights reserved. * DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER. * * This code is free software; you can redistribute it and/or modify it @@ -33,23 +33,21 @@ public class PointerEqualsNode extends CompareNode { public PointerEqualsNode(ValueNode x, ValueNode y) { - super(x, y); + super(Condition.EQ, false, x, y); assert x.stamp() instanceof AbstractPointerStamp; assert y.stamp() instanceof AbstractPointerStamp; } @Override - public Condition condition() { - return Condition.EQ; + public ValueNode canonical(CanonicalizerTool tool, ValueNode forX, ValueNode forY) { + LogicNode result = findSynonym(forX, forY); + if (result != null) { + return result; + } + return super.canonical(tool, forX, forY); } - @Override - public boolean unorderedIsTrue() { - return false; - } - - @Override - public ValueNode canonical(CanonicalizerTool tool, ValueNode forX, ValueNode forY) { + public static LogicNode findSynonym(ValueNode forX, ValueNode forY) { if (GraphUtil.unproxify(forX) == GraphUtil.unproxify(forY)) { return LogicConstantNode.tautology(); } else if (forX.stamp().alwaysDistinct(forY.stamp())) { @@ -58,8 +56,9 @@ return new IsNullNode(forY); } else if (((AbstractPointerStamp) forY.stamp()).alwaysNull()) { return new IsNullNode(forX); + } else { + return null; } - return super.canonical(tool, forX, forY); } @Override diff -r 3059a52d9614 -r 98967b613c88 graal/com.oracle.graal.nodes/src/com/oracle/graal/nodes/debug/BlackholeNode.java --- /dev/null Thu Jan 01 00:00:00 1970 +0000 +++ b/graal/com.oracle.graal.nodes/src/com/oracle/graal/nodes/debug/BlackholeNode.java Sat Feb 07 02:47:00 2015 +0100 @@ -0,0 +1,71 @@ +/* + * Copyright (c) 2015, 2015, Oracle and/or its affiliates. All rights reserved. + * DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER. + * + * This code is free software; you can redistribute it and/or modify it + * under the terms of the GNU General Public License version 2 only, as + * published by the Free Software Foundation. + * + * This code is distributed in the hope that it will be useful, but WITHOUT + * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or + * FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License + * version 2 for more details (a copy is included in the LICENSE file that + * accompanied this code). + * + * You should have received a copy of the GNU General Public License version + * 2 along with this work; if not, write to the Free Software Foundation, + * Inc., 51 Franklin St, Fifth Floor, Boston, MA 02110-1301 USA. + * + * Please contact Oracle, 500 Oracle Parkway, Redwood Shores, CA 94065 USA + * or visit www.oracle.com if you need additional information or have any + * questions. + */ +package com.oracle.graal.nodes.debug; + +import com.oracle.graal.compiler.common.type.*; +import com.oracle.graal.nodeinfo.*; +import com.oracle.graal.nodes.*; +import com.oracle.graal.nodes.spi.*; + +@NodeInfo +public final class BlackholeNode extends FixedWithNextNode implements LIRLowerable { + + @Input ValueNode value; + + public BlackholeNode(ValueNode value) { + super(StampFactory.forVoid()); + this.value = value; + } + + @Override + public void generate(NodeLIRBuilderTool gen) { + gen.getLIRGeneratorTool().emitBlackhole(gen.operand(value)); + } + + @NodeIntrinsic + public static native void consume(boolean v); + + @NodeIntrinsic + public static native void consume(byte v); + + @NodeIntrinsic + public static native void consume(short v); + + @NodeIntrinsic + public static native void consume(char v); + + @NodeIntrinsic + public static native void consume(int v); + + @NodeIntrinsic + public static native void consume(long v); + + @NodeIntrinsic + public static native void consume(float v); + + @NodeIntrinsic + public static native void consume(double v); + + @NodeIntrinsic + public static native void consume(Object v); +} diff -r 3059a52d9614 -r 98967b613c88 graal/com.oracle.graal.nodes/src/com/oracle/graal/nodes/debug/ControlFlowAnchorNode.java --- a/graal/com.oracle.graal.nodes/src/com/oracle/graal/nodes/debug/ControlFlowAnchorNode.java Sat Feb 07 02:34:43 2015 +0100 +++ b/graal/com.oracle.graal.nodes/src/com/oracle/graal/nodes/debug/ControlFlowAnchorNode.java Sat Feb 07 02:47:00 2015 +0100 @@ -40,11 +40,18 @@ protected Unique unique; - public ControlFlowAnchorNode(@SuppressWarnings("unused") Invoke invoke) { + public ControlFlowAnchorNode() { super(StampFactory.forVoid()); this.unique = new Unique(); } + /** + * Used by MacroSubstitution. + */ + public ControlFlowAnchorNode(@SuppressWarnings("unused") Invoke invoke) { + this(); + } + public void generate(NodeLIRBuilderTool generator) { // do nothing } diff -r 3059a52d9614 -r 98967b613c88 graal/com.oracle.graal.nodes/src/com/oracle/graal/nodes/debug/OpaqueNode.java --- /dev/null Thu Jan 01 00:00:00 1970 +0000 +++ b/graal/com.oracle.graal.nodes/src/com/oracle/graal/nodes/debug/OpaqueNode.java Sat Feb 07 02:47:00 2015 +0100 @@ -0,0 +1,71 @@ +/* + * Copyright (c) 2015, 2015, Oracle and/or its affiliates. All rights reserved. + * DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER. + * + * This code is free software; you can redistribute it and/or modify it + * under the terms of the GNU General Public License version 2 only, as + * published by the Free Software Foundation. + * + * This code is distributed in the hope that it will be useful, but WITHOUT + * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or + * FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License + * version 2 for more details (a copy is included in the LICENSE file that + * accompanied this code). + * + * You should have received a copy of the GNU General Public License version + * 2 along with this work; if not, write to the Free Software Foundation, + * Inc., 51 Franklin St, Fifth Floor, Boston, MA 02110-1301 USA. + * + * Please contact Oracle, 500 Oracle Parkway, Redwood Shores, CA 94065 USA + * or visit www.oracle.com if you need additional information or have any + * questions. + */ +package com.oracle.graal.nodes.debug; + +import com.oracle.graal.nodeinfo.*; +import com.oracle.graal.nodes.*; +import com.oracle.graal.nodes.calc.*; +import com.oracle.graal.nodes.spi.*; + +@NodeInfo +public class OpaqueNode extends FloatingNode implements LIRLowerable { + + @Input ValueNode value; + + public OpaqueNode(ValueNode value) { + super(value.stamp().unrestricted()); + this.value = value; + } + + @Override + public void generate(NodeLIRBuilderTool gen) { + gen.setResult(this, gen.operand(value)); + } + + @NodeIntrinsic + public static native boolean opaque(boolean v); + + @NodeIntrinsic + public static native byte opaque(byte v); + + @NodeIntrinsic + public static native short opaque(short v); + + @NodeIntrinsic + public static native char opaque(char v); + + @NodeIntrinsic + public static native int opaque(int v); + + @NodeIntrinsic + public static native long opaque(long v); + + @NodeIntrinsic + public static native float opaque(float v); + + @NodeIntrinsic + public static native double opaque(double v); + + @NodeIntrinsic + public static native T opaque(T v); +} diff -r 3059a52d9614 -r 98967b613c88 graal/com.oracle.graal.nodes/src/com/oracle/graal/nodes/java/ArrayLengthNode.java --- a/graal/com.oracle.graal.nodes/src/com/oracle/graal/nodes/java/ArrayLengthNode.java Sat Feb 07 02:34:43 2015 +0100 +++ b/graal/com.oracle.graal.nodes/src/com/oracle/graal/nodes/java/ArrayLengthNode.java Sat Feb 07 02:47:00 2015 +0100 @@ -51,6 +51,14 @@ this.array = array; } + public static ValueNode create(ValueNode forValue, ConstantReflectionProvider constantReflection) { + ValueNode length = readArrayLengthConstant(forValue, constantReflection); + if (length != null) { + return length; + } + return new ArrayLengthNode(forValue); + } + public ValueNode canonical(CanonicalizerTool tool, ValueNode forValue) { ValueNode length = readArrayLength(forValue, tool.getConstantReflection()); if (length != null) { @@ -93,6 +101,10 @@ // Ensure that any proxies on the original value end up on the length value return reproxyValue(originalArray, length); } + return readArrayLengthConstant(originalArray, constantReflection); + } + + private static ValueNode readArrayLengthConstant(ValueNode originalArray, ConstantReflectionProvider constantReflection) { ValueNode array = GraphUtil.unproxify(originalArray); if (constantReflection != null && array.isConstant() && !array.isNullConstant()) { JavaConstant constantValue = array.asJavaConstant(); diff -r 3059a52d9614 -r 98967b613c88 graal/com.oracle.graal.nodes/src/com/oracle/graal/nodes/java/LoadIndexedNode.java --- a/graal/com.oracle.graal.nodes/src/com/oracle/graal/nodes/java/LoadIndexedNode.java Sat Feb 07 02:34:43 2015 +0100 +++ b/graal/com.oracle.graal.nodes/src/com/oracle/graal/nodes/java/LoadIndexedNode.java Sat Feb 07 02:47:00 2015 +0100 @@ -48,6 +48,14 @@ this(createStamp(array, elementKind), array, index, elementKind); } + public static ValueNode create(ValueNode array, ValueNode index, Kind elementKind, MetaAccessProvider metaAccess, ConstantReflectionProvider constantReflection) { + ValueNode constant = tryConstantFold(array, index, metaAccess, constantReflection); + if (constant != null) { + return constant; + } + return new LoadIndexedNode(array, index, elementKind); + } + protected LoadIndexedNode(Stamp stamp, ValueNode array, ValueNode index, Kind elementKind) { super(stamp, array, index, elementKind); } @@ -79,15 +87,23 @@ } public Node canonical(CanonicalizerTool tool) { - if (array().isConstant() && !array().isNullConstant() && index().isConstant()) { - JavaConstant arrayConstant = array().asJavaConstant(); + ValueNode constant = tryConstantFold(array(), index(), tool.getMetaAccess(), tool.getConstantReflection()); + if (constant != null) { + return constant; + } + return this; + } + + private static ValueNode tryConstantFold(ValueNode array, ValueNode index, MetaAccessProvider metaAccess, ConstantReflectionProvider constantReflection) { + if (array.isConstant() && !array.isNullConstant() && index.isConstant()) { + JavaConstant arrayConstant = array.asJavaConstant(); if (arrayConstant != null) { - JavaConstant constant = tool.getConstantReflection().readConstantArrayElement(arrayConstant, index().asJavaConstant().asInt()); + JavaConstant constant = constantReflection.readConstantArrayElement(arrayConstant, index.asJavaConstant().asInt()); if (constant != null) { - return ConstantNode.forConstant(constant, tool.getMetaAccess()); + return ConstantNode.forConstant(constant, metaAccess); } } } - return this; + return null; } } diff -r 3059a52d9614 -r 98967b613c88 graal/com.oracle.graal.nodes/src/com/oracle/graal/nodes/java/MethodCallTargetNode.java --- a/graal/com.oracle.graal.nodes/src/com/oracle/graal/nodes/java/MethodCallTargetNode.java Sat Feb 07 02:34:43 2015 +0100 +++ b/graal/com.oracle.graal.nodes/src/com/oracle/graal/nodes/java/MethodCallTargetNode.java Sat Feb 07 02:47:00 2015 +0100 @@ -22,6 +22,7 @@ */ package com.oracle.graal.nodes.java; +import com.oracle.graal.api.code.*; import com.oracle.graal.api.meta.*; import com.oracle.graal.compiler.common.type.*; import com.oracle.graal.graph.*; @@ -93,67 +94,79 @@ } } + public static ResolvedJavaMethod findSpecialCallTarget(InvokeKind invokeKind, ValueNode receiver, ResolvedJavaMethod targetMethod, Assumptions assumptions, ResolvedJavaType contextType) { + if (invokeKind.isDirect()) { + return null; + } + + // check for trivial cases (e.g. final methods, nonvirtual methods) + if (targetMethod.canBeStaticallyBound()) { + return targetMethod; + } + + ResolvedJavaType type = StampTool.typeOrNull(receiver); + if (type == null && invokeKind == InvokeKind.Virtual) { + // For virtual calls, we are guaranteed to receive a correct receiver type. + type = targetMethod.getDeclaringClass(); + } + + if (type != null) { + /* + * either the holder class is exact, or the receiver object has an exact type, or it's + * an array type + */ + ResolvedJavaMethod resolvedMethod = type.resolveConcreteMethod(targetMethod, contextType); + if (resolvedMethod != null && (resolvedMethod.canBeStaticallyBound() || StampTool.isExactType(receiver) || type.isArray())) { + return resolvedMethod; + } + if (assumptions != null && assumptions.useOptimisticAssumptions()) { + ResolvedJavaType uniqueConcreteType = type.findUniqueConcreteSubtype(); + if (uniqueConcreteType != null) { + ResolvedJavaMethod methodFromUniqueType = uniqueConcreteType.resolveConcreteMethod(targetMethod, contextType); + if (methodFromUniqueType != null) { + assumptions.recordConcreteSubtype(type, uniqueConcreteType); + return methodFromUniqueType; + } + } + + ResolvedJavaMethod uniqueConcreteMethod = type.findUniqueConcreteMethod(targetMethod); + if (uniqueConcreteMethod != null) { + assumptions.recordConcreteMethod(targetMethod, type, uniqueConcreteMethod); + return uniqueConcreteMethod; + } + } + } + + return null; + } + @Override public void simplify(SimplifierTool tool) { - if (invokeKind().isIndirect()) { - // attempt to devirtualize the call + // attempt to devirtualize the call + ResolvedJavaType contextType = (invoke().stateAfter() == null && invoke().stateDuring() == null) ? null : invoke().getContextType(); + ResolvedJavaMethod specialCallTarget = findSpecialCallTarget(invokeKind, receiver(), targetMethod, tool.assumptions(), contextType); + if (specialCallTarget != null) { + this.setTargetMethod(specialCallTarget); + setInvokeKind(InvokeKind.Special); + return; + } - // check for trivial cases (e.g. final methods, nonvirtual methods) - if (targetMethod().canBeStaticallyBound()) { - setInvokeKind(InvokeKind.Special); - return; - } + if (invokeKind().isIndirect() && invokeKind().isInterface()) { // check if the type of the receiver can narrow the result ValueNode receiver = receiver(); - ResolvedJavaType type = StampTool.typeOrNull(receiver); - if (type == null && invokeKind == InvokeKind.Virtual) { - // For virtual calls, we are guaranteed to receive a correct receiver type. - type = targetMethod.getDeclaringClass(); - } - if (type != null && (invoke().stateAfter() != null || invoke().stateDuring() != null)) { - /* - * either the holder class is exact, or the receiver object has an exact type, or - * it's an array type - */ - ResolvedJavaMethod resolvedMethod = type.resolveConcreteMethod(targetMethod(), invoke().getContextType()); - if (resolvedMethod != null && (resolvedMethod.canBeStaticallyBound() || StampTool.isExactType(receiver) || type.isArray())) { - setInvokeKind(InvokeKind.Special); - setTargetMethod(resolvedMethod); - return; - } - if (tool.assumptions() != null && tool.assumptions().useOptimisticAssumptions()) { - ResolvedJavaType uniqueConcreteType = type.findUniqueConcreteSubtype(); - if (uniqueConcreteType != null) { - ResolvedJavaMethod methodFromUniqueType = uniqueConcreteType.resolveConcreteMethod(targetMethod(), invoke().getContextType()); - if (methodFromUniqueType != null) { - tool.assumptions().recordConcreteSubtype(type, uniqueConcreteType); - setInvokeKind(InvokeKind.Special); - setTargetMethod(methodFromUniqueType); - return; - } - } - ResolvedJavaMethod uniqueConcreteMethod = type.findUniqueConcreteMethod(targetMethod()); - if (uniqueConcreteMethod != null) { - tool.assumptions().recordConcreteMethod(targetMethod(), type, uniqueConcreteMethod); - setInvokeKind(InvokeKind.Special); - setTargetMethod(uniqueConcreteMethod); - return; - } - } - } // try to turn a interface call into a virtual call ResolvedJavaType declaredReceiverType = targetMethod().getDeclaringClass(); /* * We need to check the invoke kind to avoid recursive simplification for virtual * interface methods calls. */ - if (declaredReceiverType.isInterface() && !invokeKind().equals(InvokeKind.Virtual)) { + if (declaredReceiverType.isInterface()) { tryCheckCastSingleImplementor(receiver, declaredReceiverType); } - if (invokeKind().equals(InvokeKind.Interface) && receiver instanceof UncheckedInterfaceProvider) { + if (receiver instanceof UncheckedInterfaceProvider) { UncheckedInterfaceProvider uncheckedInterfaceProvider = (UncheckedInterfaceProvider) receiver; Stamp uncheckedStamp = uncheckedInterfaceProvider.uncheckedStamp(); if (uncheckedStamp != null) { diff -r 3059a52d9614 -r 98967b613c88 graal/com.oracle.graal.phases/src/com/oracle/graal/phases/util/ArrayMap.java --- a/graal/com.oracle.graal.phases/src/com/oracle/graal/phases/util/ArrayMap.java Sat Feb 07 02:34:43 2015 +0100 +++ /dev/null Thu Jan 01 00:00:00 1970 +0000 @@ -1,118 +0,0 @@ -/* - * Copyright (c) 2009, 2011, Oracle and/or its affiliates. All rights reserved. - * DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER. - * - * This code is free software; you can redistribute it and/or modify it - * under the terms of the GNU General Public License version 2 only, as - * published by the Free Software Foundation. - * - * This code is distributed in the hope that it will be useful, but WITHOUT - * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or - * FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License - * version 2 for more details (a copy is included in the LICENSE file that - * accompanied this code). - * - * You should have received a copy of the GNU General Public License version - * 2 along with this work; if not, write to the Free Software Foundation, - * Inc., 51 Franklin St, Fifth Floor, Boston, MA 02110-1301 USA. - * - * Please contact Oracle, 500 Oracle Parkway, Redwood Shores, CA 94065 USA - * or visit www.oracle.com if you need additional information or have any - * questions. - */ -package com.oracle.graal.phases.util; - -/** - * The {@code ArrayMap} class implements an efficient one-level map which is implemented as an - * array. Note that because of the one-level array inside, this data structure performs best when - * the range of integer keys is small and densely used. Note that the implementation can handle - * arbitrary intervals, including negative numbers, up to intervals of size 2^31 - 1. - */ -public class ArrayMap { - - private static final int INITIAL_SIZE = 5; // how big the initial array should be - private static final int EXTRA = 2; // how far on the left or right of a new element to grow - - Object[] map; - int low; - - /** - * Constructs a new {@code ArrayMap} with no initial assumptions. - */ - public ArrayMap() { - } - - /** - * Constructs a new {@code ArrayMap} that initially covers the specified interval. Note that - * this map will automatically expand if necessary later. - * - * @param low the low index, inclusive - * @param high the high index, exclusive - */ - public ArrayMap(int low, int high) { - this.low = low; - this.map = new Object[high - low + 1]; - } - - /** - * Puts a new value in the map at the specified index. - * - * @param i the index at which to store the value - * @param value the value to store at the specified index - */ - public void put(int i, T value) { - int index = i - low; - if (map == null) { - // no map yet - map = new Object[INITIAL_SIZE]; - low = index - 2; - map[INITIAL_SIZE / 2] = value; - } else if (index < 0) { - // grow backwards - growBackward(i, value); - } else if (index >= map.length) { - // grow forwards - growForward(i, value); - } else { - // no growth necessary - map[index] = value; - } - } - - /** - * Gets the value at the specified index in the map. - * - * @param i the index - * @return the value at the specified index; {@code null} if there is no value at the specified - * index, or if the index is out of the currently stored range - */ - public T get(int i) { - int index = i - low; - if (map == null || index < 0 || index >= map.length) { - return null; - } - Class type = null; - return Util.uncheckedCast(type, map[index]); - } - - public int length() { - return map.length; - } - - private void growBackward(int i, T value) { - int nlow = i - EXTRA; - Object[] nmap = new Object[low - nlow + map.length]; - System.arraycopy(map, 0, nmap, low - nlow, map.length); - map = nmap; - low = nlow; - map[i - low] = value; - } - - private void growForward(int i, T value) { - int nlen = i - low + 1 + EXTRA; - Object[] nmap = new Object[nlen]; - System.arraycopy(map, 0, nmap, 0, map.length); - map = nmap; - map[i - low] = value; - } -} diff -r 3059a52d9614 -r 98967b613c88 graal/com.oracle.graal.phases/src/com/oracle/graal/phases/util/ArraySet.java --- a/graal/com.oracle.graal.phases/src/com/oracle/graal/phases/util/ArraySet.java Sat Feb 07 02:34:43 2015 +0100 +++ /dev/null Thu Jan 01 00:00:00 1970 +0000 @@ -1,54 +0,0 @@ -/* - * Copyright (c) 2013, Oracle and/or its affiliates. All rights reserved. - * DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER. - * - * This code is free software; you can redistribute it and/or modify it - * under the terms of the GNU General Public License version 2 only, as - * published by the Free Software Foundation. - * - * This code is distributed in the hope that it will be useful, but WITHOUT - * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or - * FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License - * version 2 for more details (a copy is included in the LICENSE file that - * accompanied this code). - * - * You should have received a copy of the GNU General Public License version - * 2 along with this work; if not, write to the Free Software Foundation, - * Inc., 51 Franklin St, Fifth Floor, Boston, MA 02110-1301 USA. - * - * Please contact Oracle, 500 Oracle Parkway, Redwood Shores, CA 94065 USA - * or visit www.oracle.com if you need additional information or have any - * questions. - */ -package com.oracle.graal.phases.util; - -import java.util.*; - -/** - * Mimic a set implementation with an ArrayList. Beneficial for small sets (compared to - * {@link HashSet}). - */ -public class ArraySet extends ArrayList implements Set { - private static final long serialVersionUID = 4476957522387436654L; - - public ArraySet() { - super(); - } - - public ArraySet(int i) { - super(i); - } - - public ArraySet(Collection c) { - super(c); - } - - @Override - public boolean add(E e) { - // avoid duplicated entries - if (contains(e)) { - return false; - } - return super.add(e); - } -} diff -r 3059a52d9614 -r 98967b613c88 graal/com.oracle.graal.phases/src/com/oracle/graal/phases/util/BitMap2D.java --- a/graal/com.oracle.graal.phases/src/com/oracle/graal/phases/util/BitMap2D.java Sat Feb 07 02:34:43 2015 +0100 +++ /dev/null Thu Jan 01 00:00:00 1970 +0000 @@ -1,99 +0,0 @@ -/* - * Copyright (c) 2009, 2011, Oracle and/or its affiliates. All rights reserved. - * DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER. - * - * This code is free software; you can redistribute it and/or modify it - * under the terms of the GNU General Public License version 2 only, as - * published by the Free Software Foundation. - * - * This code is distributed in the hope that it will be useful, but WITHOUT - * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or - * FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License - * version 2 for more details (a copy is included in the LICENSE file that - * accompanied this code). - * - * You should have received a copy of the GNU General Public License version - * 2 along with this work; if not, write to the Free Software Foundation, - * Inc., 51 Franklin St, Fifth Floor, Boston, MA 02110-1301 USA. - * - * Please contact Oracle, 500 Oracle Parkway, Redwood Shores, CA 94065 USA - * or visit www.oracle.com if you need additional information or have any - * questions. - */ -package com.oracle.graal.phases.util; - -import java.util.*; - -/** - * This class implements a two-dimensional bitmap. - */ -public final class BitMap2D { - - private BitSet map; - private final int bitsPerSlot; - - private int bitIndex(int slotIndex, int bitWithinSlotIndex) { - return slotIndex * bitsPerSlot + bitWithinSlotIndex; - } - - private boolean verifyBitWithinSlotIndex(int index) { - assert index < bitsPerSlot : "index " + index + " is out of bounds " + bitsPerSlot; - return true; - } - - public BitMap2D(int sizeInSlots, int bitsPerSlot) { - map = new BitSet(sizeInSlots * bitsPerSlot); - this.bitsPerSlot = bitsPerSlot; - } - - public int sizeInBits() { - return map.size(); - } - - // Returns number of full slots that have been allocated - public int sizeInSlots() { - return map.size() / bitsPerSlot; - } - - public boolean isValidIndex(int slotIndex, int bitWithinSlotIndex) { - assert verifyBitWithinSlotIndex(bitWithinSlotIndex); - return (bitIndex(slotIndex, bitWithinSlotIndex) < sizeInBits()); - } - - public boolean at(int slotIndex, int bitWithinSlotIndex) { - assert verifyBitWithinSlotIndex(bitWithinSlotIndex); - return map.get(bitIndex(slotIndex, bitWithinSlotIndex)); - } - - public void setBit(int slotIndex, int bitWithinSlotIndex) { - assert verifyBitWithinSlotIndex(bitWithinSlotIndex); - map.set(bitIndex(slotIndex, bitWithinSlotIndex)); - } - - public void clearBit(int slotIndex, int bitWithinSlotIndex) { - assert verifyBitWithinSlotIndex(bitWithinSlotIndex); - map.clear(bitIndex(slotIndex, bitWithinSlotIndex)); - } - - public void atPutGrow(int slotIndex, int bitWithinSlotIndex, boolean value) { - int size = sizeInSlots(); - if (size <= slotIndex) { - while (size <= slotIndex) { - size *= 2; - } - BitSet newBitMap = new BitSet(size * bitsPerSlot); - newBitMap.or(map); - map = newBitMap; - } - - if (value) { - setBit(slotIndex, bitWithinSlotIndex); - } else { - clearBit(slotIndex, bitWithinSlotIndex); - } - } - - public void clear() { - map.clear(); - } -} diff -r 3059a52d9614 -r 98967b613c88 graal/com.oracle.graal.phases/src/com/oracle/graal/phases/util/IntList.java --- a/graal/com.oracle.graal.phases/src/com/oracle/graal/phases/util/IntList.java Sat Feb 07 02:34:43 2015 +0100 +++ /dev/null Thu Jan 01 00:00:00 1970 +0000 @@ -1,163 +0,0 @@ -/* - * Copyright (c) 2010, 2011, Oracle and/or its affiliates. All rights reserved. - * DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER. - * - * This code is free software; you can redistribute it and/or modify it - * under the terms of the GNU General Public License version 2 only, as - * published by the Free Software Foundation. - * - * This code is distributed in the hope that it will be useful, but WITHOUT - * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or - * FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License - * version 2 for more details (a copy is included in the LICENSE file that - * accompanied this code). - * - * You should have received a copy of the GNU General Public License version - * 2 along with this work; if not, write to the Free Software Foundation, - * Inc., 51 Franklin St, Fifth Floor, Boston, MA 02110-1301 USA. - * - * Please contact Oracle, 500 Oracle Parkway, Redwood Shores, CA 94065 USA - * or visit www.oracle.com if you need additional information or have any - * questions. - */ -package com.oracle.graal.phases.util; - -import java.util.*; - -/** - * An expandable and indexable list of {@code int}s. - * - * This class avoids the boxing/unboxing incurred by {@code ArrayList}. - */ -public final class IntList { - - private int[] array; - private int size; - - /** - * Creates an int list with a specified initial capacity. - * - * @param initialCapacity - */ - public IntList(int initialCapacity) { - array = new int[initialCapacity]; - } - - /** - * Creates an int list with a specified initial array. - * - * @param array the initial array used for the list (no copy is made) - * @param initialSize the initial {@linkplain #size() size} of the list (must be less than or - * equal to {@code array.length} - */ - public IntList(int[] array, int initialSize) { - assert initialSize <= array.length; - this.array = array; - this.size = initialSize; - } - - /** - * Makes a new int list by copying a range from a given int list. - * - * @param other the list from which a range of values is to be copied into the new list - * @param startIndex the index in {@code other} at which to start copying - * @param length the number of values to copy from {@code other} - * @return a new int list whose {@linkplain #size() size} and capacity is {@code length} - */ - public static IntList copy(IntList other, int startIndex, int length) { - return copy(other, startIndex, length, length); - } - - /** - * Makes a new int list by copying a range from a given int list. - * - * @param other the list from which a range of values is to be copied into the new list - * @param startIndex the index in {@code other} at which to start copying - * @param length the number of values to copy from {@code other} - * @param initialCapacity the initial capacity of the new int list (must be greater or equal to - * {@code length}) - * @return a new int list whose {@linkplain #size() size} is {@code length} - */ - public static IntList copy(IntList other, int startIndex, int length, int initialCapacity) { - assert initialCapacity >= length : "initialCapacity < length"; - int[] array = new int[initialCapacity]; - System.arraycopy(other.array, startIndex, array, 0, length); - return new IntList(array, length); - } - - public int size() { - return size; - } - - /** - * Appends a value to the end of this list, increasing its {@linkplain #size() size} by 1. - * - * @param value the value to append - */ - public void add(int value) { - if (size == array.length) { - int newSize = (size * 3) / 2 + 1; - array = Arrays.copyOf(array, newSize); - } - array[size++] = value; - } - - /** - * Gets the value in this list at a given index. - * - * @param index the index of the element to return - * @throws IndexOutOfBoundsException if {@code index < 0 || index >= size()} - */ - public int get(int index) { - if (index >= size) { - throw new IndexOutOfBoundsException("Index: " + index + ", Size: " + size); - } - return array[index]; - } - - /** - * Sets the size of this list to 0. - */ - public void clear() { - size = 0; - } - - /** - * Sets a value at a given index in this list. - * - * @param index the index of the element to update - * @param value the new value of the element - * @throws IndexOutOfBoundsException if {@code index < 0 || index >= size()} - */ - public void set(int index, int value) { - if (index >= size) { - throw new IndexOutOfBoundsException("Index: " + index + ", Size: " + size); - } - array[index] = value; - } - - /** - * Adjusts the {@linkplain #size() size} of this int list. - * - * If {@code newSize < size()}, the size is changed to {@code newSize}. If - * {@code newSize > size()}, sufficient 0 elements are {@linkplain #add(int) added} until - * {@code size() == newSize}. - * - * @param newSize the new size of this int list - */ - public void setSize(int newSize) { - if (newSize < size) { - size = newSize; - } else if (newSize > size) { - array = Arrays.copyOf(array, newSize); - } - } - - @Override - public String toString() { - if (array.length == size) { - return Arrays.toString(array); - } - return Arrays.toString(Arrays.copyOf(array, size)); - } -} diff -r 3059a52d9614 -r 98967b613c88 graal/com.oracle.graal.phases/src/com/oracle/graal/phases/util/Util.java --- a/graal/com.oracle.graal.phases/src/com/oracle/graal/phases/util/Util.java Sat Feb 07 02:34:43 2015 +0100 +++ /dev/null Thu Jan 01 00:00:00 1970 +0000 @@ -1,337 +0,0 @@ -/* - * Copyright (c) 2009, 2011, Oracle and/or its affiliates. All rights reserved. - * DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER. - * - * This code is free software; you can redistribute it and/or modify it - * under the terms of the GNU General Public License version 2 only, as - * published by the Free Software Foundation. - * - * This code is distributed in the hope that it will be useful, but WITHOUT - * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or - * FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License - * version 2 for more details (a copy is included in the LICENSE file that - * accompanied this code). - * - * You should have received a copy of the GNU General Public License version - * 2 along with this work; if not, write to the Free Software Foundation, - * Inc., 51 Franklin St, Fifth Floor, Boston, MA 02110-1301 USA. - * - * Please contact Oracle, 500 Oracle Parkway, Redwood Shores, CA 94065 USA - * or visit www.oracle.com if you need additional information or have any - * questions. - */ -package com.oracle.graal.phases.util; - -import java.util.*; - -import com.oracle.graal.api.code.*; -import com.oracle.graal.debug.*; - -/** - * The {@code Util} class contains a motley collection of utility methods used throughout the - * compiler. - */ -public class Util { - - public static final int PRINTING_LINE_WIDTH = 40; - public static final char SECTION_CHARACTER = '*'; - public static final char SUB_SECTION_CHARACTER = '='; - public static final char SEPERATOR_CHARACTER = '-'; - - public static boolean replaceInList(T a, T b, List list) { - final int max = list.size(); - for (int i = 0; i < max; i++) { - if (list.get(i) == a) { - list.set(i, b); - return true; - } - } - return false; - } - - /** - * Statically cast an object to an arbitrary Object type. Dynamically checked. - */ - @SuppressWarnings("unchecked") - public static T uncheckedCast(@SuppressWarnings("unused") Class type, Object object) { - return (T) object; - } - - /** - * Statically cast an object to an arbitrary Object type. Dynamically checked. - */ - @SuppressWarnings("unchecked") - public static T uncheckedCast(Object object) { - return (T) object; - } - - /** - * Utility method to combine a base hash with the identity hash of one or more objects. - * - * @param hash the base hash - * @param x the object to add to the hash - * @return the combined hash - */ - public static int hash1(int hash, Object x) { - // always set at least one bit in case the hash wraps to zero - return 0x10000000 | (hash + 7 * System.identityHashCode(x)); - } - - /** - * Utility method to combine a base hash with the identity hash of one or more objects. - * - * @param hash the base hash - * @param x the first object to add to the hash - * @param y the second object to add to the hash - * @return the combined hash - */ - public static int hash2(int hash, Object x, Object y) { - // always set at least one bit in case the hash wraps to zero - return 0x20000000 | (hash + 7 * System.identityHashCode(x) + 11 * System.identityHashCode(y)); - } - - /** - * Utility method to combine a base hash with the identity hash of one or more objects. - * - * @param hash the base hash - * @param x the first object to add to the hash - * @param y the second object to add to the hash - * @param z the third object to add to the hash - * @return the combined hash - */ - public static int hash3(int hash, Object x, Object y, Object z) { - // always set at least one bit in case the hash wraps to zero - return 0x30000000 | (hash + 7 * System.identityHashCode(x) + 11 * System.identityHashCode(y) + 13 * System.identityHashCode(z)); - } - - /** - * Utility method to combine a base hash with the identity hash of one or more objects. - * - * @param hash the base hash - * @param x the first object to add to the hash - * @param y the second object to add to the hash - * @param z the third object to add to the hash - * @param w the fourth object to add to the hash - * @return the combined hash - */ - public static int hash4(int hash, Object x, Object y, Object z, Object w) { - // always set at least one bit in case the hash wraps to zero - return 0x40000000 | (hash + 7 * System.identityHashCode(x) + 11 * System.identityHashCode(y) + 13 * System.identityHashCode(z) + 17 * System.identityHashCode(w)); - } - - static { - assert CodeUtil.log2(2) == 1; - assert CodeUtil.log2(4) == 2; - assert CodeUtil.log2(8) == 3; - assert CodeUtil.log2(16) == 4; - assert CodeUtil.log2(32) == 5; - assert CodeUtil.log2(0x40000000) == 30; - - assert CodeUtil.log2(2L) == 1; - assert CodeUtil.log2(4L) == 2; - assert CodeUtil.log2(8L) == 3; - assert CodeUtil.log2(16L) == 4; - assert CodeUtil.log2(32L) == 5; - assert CodeUtil.log2(0x4000000000000000L) == 62; - - assert !CodeUtil.isPowerOf2(3); - assert !CodeUtil.isPowerOf2(5); - assert !CodeUtil.isPowerOf2(7); - assert !CodeUtil.isPowerOf2(-1); - - assert CodeUtil.isPowerOf2(2); - assert CodeUtil.isPowerOf2(4); - assert CodeUtil.isPowerOf2(8); - assert CodeUtil.isPowerOf2(16); - assert CodeUtil.isPowerOf2(32); - assert CodeUtil.isPowerOf2(64); - } - - /** - * Sets the element at a given position of a list and ensures that this position exists. If the - * list is current shorter than the position, intermediate positions are filled with a given - * value. - * - * @param list the list to put the element into - * @param pos the position at which to insert the element - * @param x the element that should be inserted - * @param filler the filler element that is used for the intermediate positions in case the list - * is shorter than pos - */ - public static void atPutGrow(List list, int pos, T x, T filler) { - if (list.size() < pos + 1) { - while (list.size() < pos + 1) { - list.add(filler); - } - assert list.size() == pos + 1; - } - - assert list.size() >= pos + 1; - list.set(pos, x); - } - - public static void breakpoint() { - // do nothing. - } - - public static void guarantee(boolean b, String string) { - if (!b) { - throw new BailoutException(string); - } - } - - public static void warning(String string) { - TTY.println("WARNING: " + string); - } - - public static int safeToInt(long l) { - assert (int) l == l; - return (int) l; - } - - public static int roundUp(int number, int mod) { - return ((number + mod - 1) / mod) * mod; - } - - public static void printSection(String name, char sectionCharacter) { - - String header = " " + name + " "; - int remainingCharacters = PRINTING_LINE_WIDTH - header.length(); - int leftPart = remainingCharacters / 2; - int rightPart = remainingCharacters - leftPart; - for (int i = 0; i < leftPart; i++) { - TTY.print(sectionCharacter); - } - - TTY.print(header); - - for (int i = 0; i < rightPart; i++) { - TTY.print(sectionCharacter); - } - - TTY.println(); - } - - /** - * Prints entries in a byte array as space separated hex values to {@link TTY}. - * - * @param address an address at which the bytes are located. This is used to print an address - * prefix per line of output. - * @param array the array containing all the bytes to print - * @param bytesPerLine the number of values to print per line of output - */ - public static void printBytes(long address, byte[] array, int bytesPerLine) { - printBytes(address, array, 0, array.length, bytesPerLine); - } - - /** - * Prints entries in a byte array as space separated hex values to {@link TTY}. - * - * @param address an address at which the bytes are located. This is used to print an address - * prefix per line of output. - * @param array the array containing the bytes to print - * @param offset the offset in {@code array} of the values to print - * @param length the number of values from {@code array} print - * @param bytesPerLine the number of values to print per line of output - */ - public static void printBytes(long address, byte[] array, int offset, int length, int bytesPerLine) { - assert bytesPerLine > 0; - boolean newLine = true; - for (int i = 0; i < length; i++) { - if (newLine) { - TTY.print("%08x: ", address + i); - newLine = false; - } - TTY.print("%02x ", array[i]); - if (i % bytesPerLine == bytesPerLine - 1) { - TTY.println(); - newLine = true; - } - } - - if (length % bytesPerLine != bytesPerLine) { - TTY.println(); - } - } - - public static boolean isShiftCount(int x) { - return 0 <= x && x < 32; - } - - /** - * Determines if a given {@code int} value is the range of unsigned byte values. - */ - public static boolean isUByte(int x) { - return (x & 0xff) == x; - } - - /** - * Determines if a given {@code int} value is the range of signed byte values. - */ - public static boolean isByte(int x) { - return (byte) x == x; - } - - /** - * Determines if a given {@code long} value is the range of unsigned byte values. - */ - public static boolean isUByte(long x) { - return (x & 0xffL) == x; - } - - /** - * Determines if a given {@code long} value is the range of signed byte values. - */ - public static boolean isByte(long l) { - return (byte) l == l; - } - - /** - * Determines if a given {@code long} value is the range of unsigned int values. - */ - public static boolean isUInt(long x) { - return (x & 0xffffffffL) == x; - } - - /** - * Determines if a given {@code long} value is the range of signed int values. - */ - public static boolean isInt(long l) { - return (int) l == l; - } - - /** - * Determines if a given {@code int} value is the range of signed short values. - */ - public static boolean isShort(int x) { - return (short) x == x; - } - - public static boolean is32bit(long x) { - return -0x80000000L <= x && x < 0x80000000L; - } - - public static short safeToShort(int v) { - assert isShort(v); - return (short) v; - } - - /** - * Creates an array of integers of length "size", in which each number from 0 to (size - 1) - * occurs exactly once. The integers are sorted using the given comparator. This can be used to - * create a sorting for arrays that cannot be modified directly. - * - * @param size The size of the range to be sorted. - * @param comparator A comparator that is used to compare indexes. - * @return An array of integers that contains each number from 0 to (size - 1) exactly once, - * sorted using the comparator. - */ - public static Integer[] createSortedPermutation(int size, Comparator comparator) { - Integer[] indexes = new Integer[size]; - for (int i = 0; i < size; i++) { - indexes[i] = i; - } - Arrays.sort(indexes, comparator); - return indexes; - } -} diff -r 3059a52d9614 -r 98967b613c88 graal/com.oracle.graal.printer/src/com/oracle/graal/printer/CFGPrinter.java --- a/graal/com.oracle.graal.printer/src/com/oracle/graal/printer/CFGPrinter.java Sat Feb 07 02:34:43 2015 +0100 +++ b/graal/com.oracle.graal.printer/src/com/oracle/graal/printer/CFGPrinter.java Sat Feb 07 02:47:00 2015 +0100 @@ -29,14 +29,14 @@ import com.oracle.graal.api.code.*; import com.oracle.graal.api.meta.*; -import com.oracle.graal.compiler.alloc.*; -import com.oracle.graal.compiler.alloc.Interval.UsePosList; import com.oracle.graal.compiler.common.cfg.*; import com.oracle.graal.compiler.gen.*; import com.oracle.graal.graph.*; import com.oracle.graal.java.*; import com.oracle.graal.java.BciBlockMapping.BciBlock; import com.oracle.graal.lir.*; +import com.oracle.graal.lir.alloc.lsra.*; +import com.oracle.graal.lir.alloc.lsra.Interval.*; import com.oracle.graal.lir.stackslotalloc.*; import com.oracle.graal.nodeinfo.*; import com.oracle.graal.nodes.*; diff -r 3059a52d9614 -r 98967b613c88 graal/com.oracle.graal.printer/src/com/oracle/graal/printer/CFGPrinterObserver.java --- a/graal/com.oracle.graal.printer/src/com/oracle/graal/printer/CFGPrinterObserver.java Sat Feb 07 02:34:43 2015 +0100 +++ b/graal/com.oracle.graal.printer/src/com/oracle/graal/printer/CFGPrinterObserver.java Sat Feb 07 02:47:00 2015 +0100 @@ -28,13 +28,13 @@ import com.oracle.graal.api.code.*; import com.oracle.graal.api.meta.*; -import com.oracle.graal.compiler.alloc.*; import com.oracle.graal.compiler.common.*; import com.oracle.graal.compiler.gen.*; import com.oracle.graal.debug.*; import com.oracle.graal.graph.*; import com.oracle.graal.java.*; import com.oracle.graal.lir.*; +import com.oracle.graal.lir.alloc.lsra.*; import com.oracle.graal.lir.stackslotalloc.*; import com.oracle.graal.nodes.*; import com.oracle.graal.nodes.cfg.*; diff -r 3059a52d9614 -r 98967b613c88 graal/com.oracle.graal.replacements/src/com/oracle/graal/replacements/BlackholeSubstitutions.java --- a/graal/com.oracle.graal.replacements/src/com/oracle/graal/replacements/BlackholeSubstitutions.java Sat Feb 07 02:34:43 2015 +0100 +++ b/graal/com.oracle.graal.replacements/src/com/oracle/graal/replacements/BlackholeSubstitutions.java Sat Feb 07 02:47:00 2015 +0100 @@ -23,7 +23,7 @@ package com.oracle.graal.replacements; import com.oracle.graal.api.replacements.*; -import com.oracle.graal.replacements.nodes.*; +import com.oracle.graal.nodes.debug.*; /** * Substitutions for both the old and new versions of JMH Blackhole helper classes. diff -r 3059a52d9614 -r 98967b613c88 graal/com.oracle.graal.replacements/src/com/oracle/graal/replacements/GraalDirectivesSubstitutions.java --- a/graal/com.oracle.graal.replacements/src/com/oracle/graal/replacements/GraalDirectivesSubstitutions.java Sat Feb 07 02:34:43 2015 +0100 +++ b/graal/com.oracle.graal.replacements/src/com/oracle/graal/replacements/GraalDirectivesSubstitutions.java Sat Feb 07 02:47:00 2015 +0100 @@ -29,7 +29,6 @@ import com.oracle.graal.nodes.debug.*; import com.oracle.graal.nodes.extended.*; import com.oracle.graal.nodes.spi.*; -import com.oracle.graal.replacements.nodes.*; @ClassSubstitution(GraalDirectives.class) public class GraalDirectivesSubstitutions { @@ -49,6 +48,11 @@ return true; } + /* + * This needs to be a @MacroSubstitution, not a @MethodSubstitution, because we want to get a + * unique ControlFlowAnchorNode for each occurrence of this call. With @MethodSubstitution, we + * would get a clone of a single cached node. + */ @MacroSubstitution(forced = true, macro = ControlFlowAnchorNode.class) public static native void controlFlowAnchor(); diff -r 3059a52d9614 -r 98967b613c88 graal/com.oracle.graal.replacements/src/com/oracle/graal/replacements/ReplacementsImpl.java --- a/graal/com.oracle.graal.replacements/src/com/oracle/graal/replacements/ReplacementsImpl.java Sat Feb 07 02:34:43 2015 +0100 +++ b/graal/com.oracle.graal.replacements/src/com/oracle/graal/replacements/ReplacementsImpl.java Sat Feb 07 02:47:00 2015 +0100 @@ -619,8 +619,8 @@ if (MethodsElidedInSnippets != null && methodToParse.getSignature().getReturnKind() == Kind.Void && MethodFilter.matches(MethodsElidedInSnippets, methodToParse)) { graph.addAfterFixed(graph.start(), graph.add(new ReturnNode(null))); } else { - createGraphBuilder(metaAccess, replacements.providers.getStampProvider(), replacements.assumptions, GraphBuilderConfiguration.getSnippetDefault(), OptimisticOptimizations.NONE).apply( - graph); + createGraphBuilder(metaAccess, replacements.providers.getStampProvider(), replacements.assumptions, replacements.providers.getConstantReflection(), + GraphBuilderConfiguration.getSnippetDefault(), OptimisticOptimizations.NONE).apply(graph); } afterParsing(graph); @@ -633,9 +633,9 @@ return graph; } - protected Instance createGraphBuilder(MetaAccessProvider metaAccess, StampProvider stampProvider, Assumptions assumptions, GraphBuilderConfiguration graphBuilderConfig, - OptimisticOptimizations optimisticOpts) { - return new GraphBuilderPhase.Instance(metaAccess, stampProvider, assumptions, graphBuilderConfig, optimisticOpts); + protected Instance createGraphBuilder(MetaAccessProvider metaAccess, StampProvider stampProvider, Assumptions assumptions, ConstantReflectionProvider constantReflection, + GraphBuilderConfiguration graphBuilderConfig, OptimisticOptimizations optimisticOpts) { + return new GraphBuilderPhase.Instance(metaAccess, stampProvider, assumptions, constantReflection, graphBuilderConfig, optimisticOpts); } protected void afterParsing(StructuredGraph graph) { diff -r 3059a52d9614 -r 98967b613c88 graal/com.oracle.graal.replacements/src/com/oracle/graal/replacements/nodes/BlackholeNode.java --- a/graal/com.oracle.graal.replacements/src/com/oracle/graal/replacements/nodes/BlackholeNode.java Sat Feb 07 02:34:43 2015 +0100 +++ /dev/null Thu Jan 01 00:00:00 1970 +0000 @@ -1,71 +0,0 @@ -/* - * Copyright (c) 2015, 2015, Oracle and/or its affiliates. All rights reserved. - * DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER. - * - * This code is free software; you can redistribute it and/or modify it - * under the terms of the GNU General Public License version 2 only, as - * published by the Free Software Foundation. - * - * This code is distributed in the hope that it will be useful, but WITHOUT - * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or - * FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License - * version 2 for more details (a copy is included in the LICENSE file that - * accompanied this code). - * - * You should have received a copy of the GNU General Public License version - * 2 along with this work; if not, write to the Free Software Foundation, - * Inc., 51 Franklin St, Fifth Floor, Boston, MA 02110-1301 USA. - * - * Please contact Oracle, 500 Oracle Parkway, Redwood Shores, CA 94065 USA - * or visit www.oracle.com if you need additional information or have any - * questions. - */ -package com.oracle.graal.replacements.nodes; - -import com.oracle.graal.compiler.common.type.*; -import com.oracle.graal.nodeinfo.*; -import com.oracle.graal.nodes.*; -import com.oracle.graal.nodes.spi.*; - -@NodeInfo -public final class BlackholeNode extends FixedWithNextNode implements LIRLowerable { - - @Input ValueNode value; - - public BlackholeNode(ValueNode value) { - super(StampFactory.forVoid()); - this.value = value; - } - - @Override - public void generate(NodeLIRBuilderTool gen) { - gen.getLIRGeneratorTool().emitBlackhole(gen.operand(value)); - } - - @NodeIntrinsic - public static native void consume(boolean v); - - @NodeIntrinsic - public static native void consume(byte v); - - @NodeIntrinsic - public static native void consume(short v); - - @NodeIntrinsic - public static native void consume(char v); - - @NodeIntrinsic - public static native void consume(int v); - - @NodeIntrinsic - public static native void consume(long v); - - @NodeIntrinsic - public static native void consume(float v); - - @NodeIntrinsic - public static native void consume(double v); - - @NodeIntrinsic - public static native void consume(Object v); -} diff -r 3059a52d9614 -r 98967b613c88 graal/com.oracle.graal.replacements/src/com/oracle/graal/replacements/nodes/OpaqueNode.java --- a/graal/com.oracle.graal.replacements/src/com/oracle/graal/replacements/nodes/OpaqueNode.java Sat Feb 07 02:34:43 2015 +0100 +++ /dev/null Thu Jan 01 00:00:00 1970 +0000 @@ -1,71 +0,0 @@ -/* - * Copyright (c) 2015, 2015, Oracle and/or its affiliates. All rights reserved. - * DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER. - * - * This code is free software; you can redistribute it and/or modify it - * under the terms of the GNU General Public License version 2 only, as - * published by the Free Software Foundation. - * - * This code is distributed in the hope that it will be useful, but WITHOUT - * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or - * FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License - * version 2 for more details (a copy is included in the LICENSE file that - * accompanied this code). - * - * You should have received a copy of the GNU General Public License version - * 2 along with this work; if not, write to the Free Software Foundation, - * Inc., 51 Franklin St, Fifth Floor, Boston, MA 02110-1301 USA. - * - * Please contact Oracle, 500 Oracle Parkway, Redwood Shores, CA 94065 USA - * or visit www.oracle.com if you need additional information or have any - * questions. - */ -package com.oracle.graal.replacements.nodes; - -import com.oracle.graal.nodeinfo.*; -import com.oracle.graal.nodes.*; -import com.oracle.graal.nodes.calc.*; -import com.oracle.graal.nodes.spi.*; - -@NodeInfo -public class OpaqueNode extends FloatingNode implements LIRLowerable { - - @Input ValueNode value; - - public OpaqueNode(ValueNode value) { - super(value.stamp().unrestricted()); - this.value = value; - } - - @Override - public void generate(NodeLIRBuilderTool gen) { - gen.setResult(this, gen.operand(value)); - } - - @NodeIntrinsic - public static native boolean opaque(boolean v); - - @NodeIntrinsic - public static native byte opaque(byte v); - - @NodeIntrinsic - public static native short opaque(short v); - - @NodeIntrinsic - public static native char opaque(char v); - - @NodeIntrinsic - public static native int opaque(int v); - - @NodeIntrinsic - public static native long opaque(long v); - - @NodeIntrinsic - public static native float opaque(float v); - - @NodeIntrinsic - public static native double opaque(double v); - - @NodeIntrinsic - public static native T opaque(T v); -} diff -r 3059a52d9614 -r 98967b613c88 graal/com.oracle.graal.truffle.hotspot/src/com/oracle/graal/truffle/hotspot/HotSpotTruffleRuntime.java --- a/graal/com.oracle.graal.truffle.hotspot/src/com/oracle/graal/truffle/hotspot/HotSpotTruffleRuntime.java Sat Feb 07 02:34:43 2015 +0100 +++ b/graal/com.oracle.graal.truffle.hotspot/src/com/oracle/graal/truffle/hotspot/HotSpotTruffleRuntime.java Sat Feb 07 02:47:00 2015 +0100 @@ -177,7 +177,8 @@ Suites suites = suitesProvider.createSuites(); removeInliningPhase(suites); StructuredGraph graph = new StructuredGraph(javaMethod); - new GraphBuilderPhase.Instance(metaAccess, providers.getStampProvider(), new Assumptions(false), GraphBuilderConfiguration.getEagerDefault(), OptimisticOptimizations.ALL).apply(graph); + new GraphBuilderPhase.Instance(metaAccess, providers.getStampProvider(), new Assumptions(false), providers.getConstantReflection(), GraphBuilderConfiguration.getEagerDefault(), + OptimisticOptimizations.ALL).apply(graph); PhaseSuite graphBuilderSuite = getGraphBuilderSuite(suitesProvider); CallingConvention cc = getCallingConvention(providers.getCodeCache(), Type.JavaCallee, graph.method(), false); Backend backend = Graal.getRequiredCapability(RuntimeProvider.class).getHostBackend(); diff -r 3059a52d9614 -r 98967b613c88 graal/com.oracle.graal.truffle/src/com/oracle/graal/truffle/FrameWithoutBoxing.java --- a/graal/com.oracle.graal.truffle/src/com/oracle/graal/truffle/FrameWithoutBoxing.java Sat Feb 07 02:34:43 2015 +0100 +++ b/graal/com.oracle.graal.truffle/src/com/oracle/graal/truffle/FrameWithoutBoxing.java Sat Feb 07 02:47:00 2015 +0100 @@ -49,7 +49,10 @@ this.arguments = arguments; int size = descriptor.getSize(); this.locals = new Object[size]; - Arrays.fill(locals, descriptor.getDefaultValue()); + Object defaultValue = descriptor.getDefaultValue(); + if (defaultValue != null) { + Arrays.fill(locals, defaultValue); + } this.primitiveLocals = new long[size]; this.tags = new byte[size]; } diff -r 3059a52d9614 -r 98967b613c88 graal/com.oracle.graal.truffle/src/com/oracle/graal/truffle/PartialEvaluator.java --- a/graal/com.oracle.graal.truffle/src/com/oracle/graal/truffle/PartialEvaluator.java Sat Feb 07 02:34:43 2015 +0100 +++ b/graal/com.oracle.graal.truffle/src/com/oracle/graal/truffle/PartialEvaluator.java Sat Feb 07 02:47:00 2015 +0100 @@ -1,5 +1,5 @@ /* - * Copyright (c) 2013, 2014, Oracle and/or its affiliates. All rights reserved. + * Copyright (c) 2013, 2015, Oracle and/or its affiliates. All rights reserved. * DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER. * * This code is free software; you can redistribute it and/or modify it @@ -30,15 +30,18 @@ import com.oracle.graal.api.code.*; import com.oracle.graal.api.meta.*; import com.oracle.graal.api.replacements.*; +import com.oracle.graal.api.runtime.*; import com.oracle.graal.debug.*; import com.oracle.graal.debug.Debug.Scope; import com.oracle.graal.debug.internal.*; import com.oracle.graal.graph.Graph.Mark; import com.oracle.graal.graph.*; import com.oracle.graal.graph.Node; +import com.oracle.graal.java.*; import com.oracle.graal.loop.*; import com.oracle.graal.nodes.CallTargetNode.InvokeKind; import com.oracle.graal.nodes.*; +import com.oracle.graal.nodes.calc.*; import com.oracle.graal.nodes.java.*; import com.oracle.graal.nodes.spi.*; import com.oracle.graal.nodes.util.*; @@ -55,8 +58,10 @@ import com.oracle.graal.truffle.nodes.frame.*; import com.oracle.graal.truffle.nodes.frame.NewFrameNode.VirtualOnlyInstanceNode; import com.oracle.graal.truffle.phases.*; +import com.oracle.graal.truffle.substitutions.*; import com.oracle.graal.virtual.phases.ea.*; import com.oracle.truffle.api.*; +import com.oracle.truffle.api.CompilerDirectives.TruffleBoundary; import com.oracle.truffle.api.nodes.*; /** @@ -70,15 +75,26 @@ private final TruffleCache truffleCache; private final SnippetReflectionProvider snippetReflection; private final ResolvedJavaMethod callDirectMethod; + private final ResolvedJavaMethod callInlinedMethod; private final ResolvedJavaMethod callSiteProxyMethod; + protected final ResolvedJavaMethod callRootMethod; + private final GraphBuilderConfiguration configForRoot; - public PartialEvaluator(Providers providers, TruffleCache truffleCache, SnippetReflectionProvider snippetReflection) { + public PartialEvaluator(Providers providers, GraphBuilderConfiguration configForRoot, TruffleCache truffleCache, SnippetReflectionProvider snippetReflection) { this.providers = providers; this.canonicalizer = new CanonicalizerPhase(!ImmutableCode.getValue()); this.snippetReflection = snippetReflection; this.truffleCache = truffleCache; this.callDirectMethod = providers.getMetaAccess().lookupJavaMethod(OptimizedCallTarget.getCallDirectMethod()); + this.callInlinedMethod = providers.getMetaAccess().lookupJavaMethod(OptimizedCallTarget.getCallInlinedMethod()); this.callSiteProxyMethod = providers.getMetaAccess().lookupJavaMethod(GraalFrameInstance.CallNodeFrame.METHOD); + this.configForRoot = configForRoot; + + try { + callRootMethod = providers.getMetaAccess().lookupJavaMethod(OptimizedCallTarget.class.getDeclaredMethod("callRoot", Object[].class)); + } catch (NoSuchMethodException ex) { + throw new RuntimeException(ex); + } } public StructuredGraph createGraph(final OptimizedCallTarget callTarget, final Assumptions assumptions) { @@ -91,65 +107,34 @@ } catch (Throwable e) { throw Debug.handle(e); } - final StructuredGraph graph = truffleCache.createRootGraph(callTarget.toString()); + + final StructuredGraph graph = new StructuredGraph(callTarget.toString(), callRootMethod); assert graph != null : "no graph for root method"; try (Scope s = Debug.scope("CreateGraph", graph); Indent indent = Debug.logAndIndent("createGraph %s", graph)) { - // Canonicalize / constant propagate. - PhaseContext baseContext = new PhaseContext(providers, assumptions); - - injectConstantCallTarget(graph, callTarget, baseContext); - - Debug.dump(graph, "Before expansion"); - - TruffleExpansionLogger expansionLogger = null; - if (TraceTruffleExpansion.getValue()) { - expansionLogger = new TruffleExpansionLogger(providers, graph); - } - expandTree(graph, assumptions, expansionLogger); + Map graphCache = null; + if (CacheGraphs.getValue()) { + graphCache = new HashMap<>(); + } + PhaseContext baseContext = new PhaseContext(providers, assumptions); + HighTierContext tierContext = new HighTierContext(providers, assumptions, graphCache, new PhaseSuite(), OptimisticOptimizations.NONE); - TruffleInliningCache inliningCache = null; - if (TruffleFunctionInlining.getValue()) { - callTarget.setInlining(new TruffleInlining(callTarget, new DefaultInliningPolicy())); - if (TruffleFunctionInliningCache.getValue()) { - inliningCache = new TruffleInliningCache(); - } + if (TruffleCompilerOptions.FastPE.getValue()) { + fastPartialEvaluation(callTarget, assumptions, graph, baseContext, tierContext); + } else { + createRootGraph(graph); + partialEvaluation(callTarget, assumptions, graph, baseContext, tierContext); } - expandDirectCalls(graph, assumptions, expansionLogger, callTarget.getInlining(), inliningCache); - if (Thread.currentThread().isInterrupted()) { return null; } new VerifyFrameDoesNotEscapePhase().apply(graph, false); - if (TraceTruffleCompilationHistogram.getValue() && constantReceivers != null) { createHistogram(); } - - canonicalizer.apply(graph, baseContext); - Map graphCache = null; - if (CacheGraphs.getValue()) { - graphCache = new HashMap<>(); - } - HighTierContext tierContext = new HighTierContext(providers, assumptions, graphCache, new PhaseSuite(), OptimisticOptimizations.NONE); - - // EA frame and clean up. - do { - try (Scope pe = Debug.scope("TrufflePartialEscape", graph)) { - new PartialEscapePhase(true, canonicalizer).apply(graph, tierContext); - new IncrementalCanonicalizerPhase<>(canonicalizer, new ConditionalEliminationPhase()).apply(graph, tierContext); - } catch (Throwable t) { - Debug.handle(t); - } - } while (expandTree(graph, assumptions, expansionLogger)); - - if (expansionLogger != null) { - expansionLogger.print(callTarget); - } - postPartialEvaluation(graph); } catch (Throwable e) { @@ -159,6 +144,149 @@ return graph; } + private class InterceptLoadFieldPlugin implements GraphBuilderPlugins.LoadFieldPlugin { + + public boolean apply(GraphBuilderContext builder, ValueNode receiver, ResolvedJavaField field) { + if (receiver.isConstant()) { + JavaConstant asJavaConstant = receiver.asJavaConstant(); + return tryConstantFold(builder, field, asJavaConstant); + } + return false; + } + + private boolean tryConstantFold(GraphBuilderContext builder, ResolvedJavaField field, JavaConstant asJavaConstant) { + JavaConstant result = providers.getConstantReflection().readConstantFieldValue(field, asJavaConstant); + if (result != null) { + ConstantNode constantNode = builder.append(ConstantNode.forConstant(result, providers.getMetaAccess())); + builder.push(constantNode.getKind().getStackKind(), constantNode); + return true; + } + return false; + } + + public boolean apply(GraphBuilderContext builder, ResolvedJavaField staticField) { + if (TruffleCompilerOptions.TruffleExcludeAssertions.getValue() && staticField.getName().equals("$assertionsDisabled")) { + ConstantNode trueNode = builder.append(ConstantNode.forBoolean(true)); + builder.push(trueNode.getKind().getStackKind(), trueNode); + return true; + } + return tryConstantFold(builder, staticField, null); + } + } + + private class InterceptReceiverPlugin implements GraphBuilderPlugins.ParameterPlugin { + + private final Object receiver; + + public InterceptReceiverPlugin(Object receiver) { + this.receiver = receiver; + } + + public FloatingNode interceptParameter(int index) { + if (index == 0) { + return ConstantNode.forConstant(snippetReflection.forObject(receiver), providers.getMetaAccess()); + } + return null; + } + } + + private class InlineInvokePlugin implements GraphBuilderPlugins.InlineInvokePlugin { + + public boolean shouldInlineInvoke(ResolvedJavaMethod method, int depth) { + return method.getAnnotation(TruffleBoundary.class) == null; + } + + } + + private class LoopExplosionPlugin implements GraphBuilderPlugins.LoopExplosionPlugin { + + public boolean shouldExplodeLoops(ResolvedJavaMethod method) { + return method.getAnnotation(ExplodeLoop.class) != null; + } + + } + + @SuppressWarnings("unused") + private void fastPartialEvaluation(OptimizedCallTarget callTarget, Assumptions assumptions, StructuredGraph graph, PhaseContext baseContext, HighTierContext tierContext) { + GraphBuilderConfiguration newConfig = configForRoot.copy(); + newConfig.setLoadFieldPlugin(new InterceptLoadFieldPlugin()); + newConfig.setParameterPlugin(new InterceptReceiverPlugin(callTarget)); + newConfig.setInlineInvokePlugin(new InlineInvokePlugin()); + newConfig.setLoopExplosionPlugin(new LoopExplosionPlugin()); + DefaultGraphBuilderPlugins plugins = new DefaultGraphBuilderPlugins(); + Iterable sl = Services.load(GraphBuilderPluginsProvider.class); + for (GraphBuilderPluginsProvider p : sl) { + p.registerPlugins(providers.getMetaAccess(), plugins); + } + TruffleGraphBuilderPlugins.registerPlugins(providers.getMetaAccess(), plugins); + new GraphBuilderPhase.Instance(providers.getMetaAccess(), providers.getStampProvider(), new Assumptions(false), providers.getConstantReflection(), newConfig, plugins, + TruffleCompilerImpl.Optimizations).apply(graph); + Debug.dump(graph, "After FastPE"); + + // Do single partial escape and canonicalization pass. + try (Scope pe = Debug.scope("TrufflePartialEscape", graph)) { + new PartialEscapePhase(true, canonicalizer).apply(graph, tierContext); + new IncrementalCanonicalizerPhase<>(canonicalizer, new ConditionalEliminationPhase()).apply(graph, tierContext); + } catch (Throwable t) { + Debug.handle(t); + } + } + + private void partialEvaluation(final OptimizedCallTarget callTarget, final Assumptions assumptions, final StructuredGraph graph, PhaseContext baseContext, HighTierContext tierContext) { + injectConstantCallTarget(graph, callTarget, baseContext); + + Debug.dump(graph, "Before expansion"); + + TruffleExpansionLogger expansionLogger = null; + if (TraceTruffleExpansion.getValue()) { + expansionLogger = new TruffleExpansionLogger(providers, graph); + } + + expandTree(graph, assumptions, expansionLogger); + + TruffleInliningCache inliningCache = null; + if (TruffleFunctionInlining.getValue()) { + callTarget.setInlining(new TruffleInlining(callTarget, new DefaultInliningPolicy())); + if (TruffleFunctionInliningCache.getValue()) { + inliningCache = new TruffleInliningCache(); + } + } + + expandDirectCalls(graph, assumptions, expansionLogger, callTarget.getInlining(), inliningCache); + + if (Thread.currentThread().isInterrupted()) { + return; + } + + canonicalizer.apply(graph, baseContext); + // EA frame and clean up. + do { + try (Scope pe = Debug.scope("TrufflePartialEscape", graph)) { + new PartialEscapePhase(true, canonicalizer).apply(graph, tierContext); + new IncrementalCanonicalizerPhase<>(canonicalizer, new ConditionalEliminationPhase()).apply(graph, tierContext); + } catch (Throwable t) { + Debug.handle(t); + } + } while (expandTree(graph, assumptions, expansionLogger)); + + if (expansionLogger != null) { + expansionLogger.print(callTarget); + } + } + + public StructuredGraph createRootGraph(StructuredGraph graph) { + new GraphBuilderPhase.Instance(providers.getMetaAccess(), providers.getStampProvider(), new Assumptions(false), providers.getConstantReflection(), configForRoot, + TruffleCompilerImpl.Optimizations).apply(graph); + return graph; + } + + public StructuredGraph createInlineGraph(String name) { + StructuredGraph graph = new StructuredGraph(name, callInlinedMethod); + new GraphBuilderPhase.Instance(providers.getMetaAccess(), providers.getStampProvider(), new Assumptions(false), providers.getConstantReflection(), configForRoot, + TruffleCompilerImpl.Optimizations).apply(graph); + return graph; + } + private static void postPartialEvaluation(final StructuredGraph graph) { NeverPartOfCompilationNode.verifyNotFoundIn(graph); for (MaterializeFrameNode materializeNode : graph.getNodes(MaterializeFrameNode.class).snapshot()) { @@ -464,7 +592,7 @@ private StructuredGraph createInlineGraph(PhaseContext phaseContext, Assumptions assumptions, TruffleInliningCache cache, TruffleInliningDecision decision) { try (Scope s = Debug.scope("GuestLanguageInlinedGraph", new DebugDumpScope(decision.getTarget().toString()))) { OptimizedCallTarget target = decision.getTarget(); - StructuredGraph inlineGraph = truffleCache.createInlineGraph(target.toString()); + StructuredGraph inlineGraph = createInlineGraph(target.toString()); injectConstantCallTarget(inlineGraph, decision.getTarget(), phaseContext); TruffleExpansionLogger expansionLogger = null; if (TraceTruffleExpansion.getValue()) { diff -r 3059a52d9614 -r 98967b613c88 graal/com.oracle.graal.truffle/src/com/oracle/graal/truffle/TruffleCache.java --- a/graal/com.oracle.graal.truffle/src/com/oracle/graal/truffle/TruffleCache.java Sat Feb 07 02:34:43 2015 +0100 +++ b/graal/com.oracle.graal.truffle/src/com/oracle/graal/truffle/TruffleCache.java Sat Feb 07 02:47:00 2015 +0100 @@ -30,13 +30,6 @@ public interface TruffleCache { /** - * Creates the graph for the root method, i.e. {@link OptimizedCallTarget#callBoundary}. - */ - StructuredGraph createRootGraph(String name); - - StructuredGraph createInlineGraph(String name); - - /** * Returns a cached graph for a method with given arguments. */ StructuredGraph lookup(final ResolvedJavaMethod method, final NodeInputList arguments, final CanonicalizerPhase finalCanonicalizer); diff -r 3059a52d9614 -r 98967b613c88 graal/com.oracle.graal.truffle/src/com/oracle/graal/truffle/TruffleCacheImpl.java --- a/graal/com.oracle.graal.truffle/src/com/oracle/graal/truffle/TruffleCacheImpl.java Sat Feb 07 02:34:43 2015 +0100 +++ b/graal/com.oracle.graal.truffle/src/com/oracle/graal/truffle/TruffleCacheImpl.java Sat Feb 07 02:47:00 2015 +0100 @@ -56,7 +56,6 @@ private final Providers providers; private final GraphBuilderConfiguration config; - private final GraphBuilderConfiguration configForRoot; private final OptimisticOptimizations optimisticOptimizations; private final HashMap, StructuredGraph> cache = new HashMap<>(); @@ -68,40 +67,17 @@ private final ResolvedJavaType errorClass; private final ResolvedJavaType controlFlowExceptionClass; - protected final ResolvedJavaMethod callRootMethod; - protected final ResolvedJavaMethod callInlinedMethod; - private long counter; - public TruffleCacheImpl(Providers providers, GraphBuilderConfiguration config, GraphBuilderConfiguration configForRoot, OptimisticOptimizations optimisticOptimizations) { + public TruffleCacheImpl(Providers providers, GraphBuilderConfiguration config, OptimisticOptimizations optimisticOptimizations) { this.providers = providers; this.config = config; - this.configForRoot = configForRoot; this.optimisticOptimizations = optimisticOptimizations; this.stringBuilderClass = providers.getMetaAccess().lookupJavaType(StringBuilder.class); this.runtimeExceptionClass = providers.getMetaAccess().lookupJavaType(RuntimeException.class); this.errorClass = providers.getMetaAccess().lookupJavaType(Error.class); this.controlFlowExceptionClass = providers.getMetaAccess().lookupJavaType(ControlFlowException.class); - - try { - callRootMethod = providers.getMetaAccess().lookupJavaMethod(OptimizedCallTarget.class.getDeclaredMethod("callRoot", Object[].class)); - } catch (NoSuchMethodException ex) { - throw new RuntimeException(ex); - } - this.callInlinedMethod = providers.getMetaAccess().lookupJavaMethod(OptimizedCallTarget.getCallInlinedMethod()); - } - - public StructuredGraph createInlineGraph(String name) { - StructuredGraph graph = new StructuredGraph(name, callInlinedMethod); - new GraphBuilderPhase.Instance(providers.getMetaAccess(), providers.getStampProvider(), new Assumptions(false), config, TruffleCompilerImpl.Optimizations).apply(graph); - return graph; - } - - public StructuredGraph createRootGraph(String name) { - StructuredGraph graph = new StructuredGraph(name, callRootMethod); - new GraphBuilderPhase.Instance(providers.getMetaAccess(), providers.getStampProvider(), new Assumptions(false), configForRoot, TruffleCompilerImpl.Optimizations).apply(graph); - return graph; } private static List computeCacheKey(ResolvedJavaMethod method, NodeInputList arguments) { @@ -294,7 +270,7 @@ protected StructuredGraph parseGraph(final ResolvedJavaMethod method, final PhaseContext phaseContext) { final StructuredGraph graph = new StructuredGraph(method); - new GraphBuilderPhase.Instance(phaseContext.getMetaAccess(), phaseContext.getStampProvider(), phaseContext.getAssumptions(), config, optimisticOptimizations).apply(graph); + new GraphBuilderPhase.Instance(phaseContext.getMetaAccess(), phaseContext.getStampProvider(), phaseContext.getAssumptions(), null, config, optimisticOptimizations).apply(graph); return graph; } diff -r 3059a52d9614 -r 98967b613c88 graal/com.oracle.graal.truffle/src/com/oracle/graal/truffle/TruffleCompilerImpl.java --- a/graal/com.oracle.graal.truffle/src/com/oracle/graal/truffle/TruffleCompilerImpl.java Sat Feb 07 02:34:43 2015 +0100 +++ b/graal/com.oracle.graal.truffle/src/com/oracle/graal/truffle/TruffleCompilerImpl.java Sat Feb 07 02:47:00 2015 +0100 @@ -84,9 +84,9 @@ ResolvedJavaType[] skippedExceptionTypes = getSkippedExceptionTypes(providers.getMetaAccess()); GraphBuilderConfiguration eagerConfig = GraphBuilderConfiguration.getEagerDefault().withSkippedExceptionTypes(skippedExceptionTypes); this.config = GraphBuilderConfiguration.getDefault().withSkippedExceptionTypes(skippedExceptionTypes); - this.truffleCache = new TruffleCacheImpl(providers, eagerConfig, config, TruffleCompilerImpl.Optimizations); + this.truffleCache = new TruffleCacheImpl(providers, eagerConfig, TruffleCompilerImpl.Optimizations); - this.partialEvaluator = new PartialEvaluator(providers, truffleCache, Graal.getRequiredCapability(SnippetReflectionProvider.class)); + this.partialEvaluator = new PartialEvaluator(providers, config, truffleCache, Graal.getRequiredCapability(SnippetReflectionProvider.class)); if (Debug.isEnabled()) { DebugEnvironment.initialize(System.out); @@ -197,8 +197,9 @@ private PhaseSuite createGraphBuilderSuite() { PhaseSuite suite = backend.getSuites().getDefaultGraphBuilderSuite().copy(); ListIterator> iterator = suite.findPhase(GraphBuilderPhase.class); + GraphBuilderPhase graphBuilderPhase = (GraphBuilderPhase) iterator.previous(); iterator.remove(); - iterator.add(new GraphBuilderPhase(config)); + iterator.add(new GraphBuilderPhase(config, graphBuilderPhase.getGraphBuilderPlugins())); return suite; } diff -r 3059a52d9614 -r 98967b613c88 graal/com.oracle.graal.truffle/src/com/oracle/graal/truffle/TruffleCompilerOptions.java --- a/graal/com.oracle.graal.truffle/src/com/oracle/graal/truffle/TruffleCompilerOptions.java Sat Feb 07 02:34:43 2015 +0100 +++ b/graal/com.oracle.graal.truffle/src/com/oracle/graal/truffle/TruffleCompilerOptions.java Sat Feb 07 02:47:00 2015 +0100 @@ -44,6 +44,9 @@ @Option(help = "Restrict compilation to comma-separated list of includes (or excludes prefixed with tilde)", type = OptionType.Debug) public static final OptionValue TruffleCompileOnly = new OptionValue<>(null); + @Option(help = "Exclude assertion code from Truffle compilations", type = OptionType.Debug) + public static final StableOptionValue TruffleExcludeAssertions = new StableOptionValue<>(true); + @Option(help = "Compile call target when call count exceeds this threshold", type = OptionType.User) public static final OptionValue TruffleCompilationThreshold = new OptionValue<>(1000); diff -r 3059a52d9614 -r 98967b613c88 graal/com.oracle.graal.truffle/src/com/oracle/graal/truffle/substitutions/TruffleGraphBuilderPlugins.java --- /dev/null Thu Jan 01 00:00:00 1970 +0000 +++ b/graal/com.oracle.graal.truffle/src/com/oracle/graal/truffle/substitutions/TruffleGraphBuilderPlugins.java Sat Feb 07 02:47:00 2015 +0100 @@ -0,0 +1,215 @@ +/* + * Copyright (c) 2015, Oracle and/or its affiliates. All rights reserved. + * DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER. + * + * This code is free software; you can redistribute it and/or modify it + * under the terms of the GNU General Public License version 2 only, as + * published by the Free Software Foundation. + * + * This code is distributed in the hope that it will be useful, but WITHOUT + * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or + * FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License + * version 2 for more details (a copy is included in the LICENSE file that + * accompanied this code). + * + * You should have received a copy of the GNU General Public License version + * 2 along with this work; if not, write to the Free Software Foundation, + * Inc., 51 Franklin St, Fifth Floor, Boston, MA 02110-1301 USA. + * + * Please contact Oracle, 500 Oracle Parkway, Redwood Shores, CA 94065 USA + * or visit www.oracle.com if you need additional information or have any + * questions. + */ +package com.oracle.graal.truffle.substitutions; + +import static java.lang.Character.*; + +import java.util.concurrent.*; + +import com.oracle.graal.api.code.*; +import com.oracle.graal.api.meta.*; +import com.oracle.graal.compiler.common.calc.*; +import com.oracle.graal.compiler.common.type.*; +import com.oracle.graal.java.*; +import com.oracle.graal.java.GraphBuilderPlugins.InvocationPlugin; +import com.oracle.graal.java.GraphBuilderPlugins.Registration; +import com.oracle.graal.java.GraphBuilderPlugins.Registration.Receiver; +import com.oracle.graal.nodes.*; +import com.oracle.graal.nodes.calc.*; +import com.oracle.graal.nodes.extended.*; +import com.oracle.graal.truffle.*; +import com.oracle.graal.truffle.nodes.*; +import com.oracle.graal.truffle.nodes.frame.*; +import com.oracle.truffle.api.*; +import com.oracle.truffle.api.frame.*; + +/** + * Provider of {@link GraphBuilderPlugin}s for Truffle classes. + */ +public class TruffleGraphBuilderPlugins { + public static void registerPlugins(MetaAccessProvider metaAccess, GraphBuilderPlugins plugins) { + + // CompilerDirectives.class + Registration r = new Registration(plugins, metaAccess, CompilerDirectives.class); + r.register0("inInterpreter", new InvocationPlugin() { + public boolean apply(GraphBuilderContext builder) { + builder.push(Kind.Boolean.getStackKind(), builder.append(ConstantNode.forBoolean(false))); + return true; + } + }); + r.register0("inCompiledCode", new InvocationPlugin() { + public boolean apply(GraphBuilderContext builder) { + builder.push(Kind.Boolean.getStackKind(), builder.append(ConstantNode.forBoolean(true))); + return true; + } + }); + r.register0("transferToInterpreter", new InvocationPlugin() { + public boolean apply(GraphBuilderContext builder) { + builder.append(new DeoptimizeNode(DeoptimizationAction.None, DeoptimizationReason.TransferToInterpreter)); + return true; + } + }); + r.register0("transferToInterpreterAndInvalidate", new InvocationPlugin() { + public boolean apply(GraphBuilderContext builder) { + builder.append(new DeoptimizeNode(DeoptimizationAction.InvalidateReprofile, DeoptimizationReason.TransferToInterpreter)); + return true; + } + }); + r.register1("interpreterOnly", Runnable.class, new InvocationPlugin() { + public boolean apply(GraphBuilderContext builder, ValueNode arg) { + return true; + } + }); + r.register1("interpreterOnly", Callable.class, new InvocationPlugin() { + public boolean apply(GraphBuilderContext builder, ValueNode arg) { + return true; + } + }); + r.register2("injectBranchProbability", double.class, boolean.class, new InvocationPlugin() { + public boolean apply(GraphBuilderContext builder, ValueNode probability, ValueNode condition) { + builder.append(new BranchProbabilityNode(probability, condition)); + return true; + } + }); + r.register1("bailout", String.class, new InvocationPlugin() { + public boolean apply(GraphBuilderContext builder, ValueNode message) { + if (message.isConstant()) { + throw new BailoutException(message.asConstant().toValueString()); + } + throw new BailoutException("bailout (message is not compile-time constant, so no additional information is available)"); + } + }); + r.register1("isCompilationConstant", Object.class, new InvocationPlugin() { + public boolean apply(GraphBuilderContext builder, ValueNode value) { + if ((value instanceof BoxNode ? ((BoxNode) value).getValue() : value).isConstant()) { + builder.push(Kind.Boolean.getStackKind(), builder.append(ConstantNode.forBoolean(true))); + return true; + } + return false; + } + }); + r.register1("materialize", Object.class, new InvocationPlugin() { + public boolean apply(GraphBuilderContext builder, ValueNode value) { + builder.append(new ForceMaterializeNode(value)); + return true; + } + }); + + // OptimizedCallTarget.class + r = new Registration(plugins, metaAccess, OptimizedCallTarget.class); + r.register2("createFrame", FrameDescriptor.class, Object[].class, new InvocationPlugin() { + public boolean apply(GraphBuilderContext builder, ValueNode arg1, ValueNode arg2) { + builder.push(Kind.Object, builder.append(new NewFrameNode(StampFactory.exactNonNull(metaAccess.lookupJavaType(FrameWithoutBoxing.class)), arg1, arg2))); + return true; + } + }); + + // FrameWithoutBoxing.class + r = new Registration(plugins, metaAccess, FrameWithoutBoxing.class); + r.register1("materialize", Receiver.class, new InvocationPlugin() { + public boolean apply(GraphBuilderContext builder, ValueNode frame) { + builder.append(new MaterializeFrameNode(frame)); + return true; + } + }); + r.register4("unsafeCast", Object.class, Class.class, boolean.class, boolean.class, new InvocationPlugin() { + public boolean apply(GraphBuilderContext builder, ValueNode object, ValueNode clazz, ValueNode condition, ValueNode nonNull) { + if (clazz.isConstant() && nonNull.isConstant()) { + ConstantReflectionProvider constantReflection = builder.getConstantReflection(); + ResolvedJavaType javaType = constantReflection.asJavaType(clazz.asConstant()); + if (javaType == null) { + builder.push(Kind.Object, object); + } else { + Stamp piStamp = StampFactory.declaredTrusted(javaType, nonNull.asJavaConstant().asInt() != 0); + ConditionAnchorNode valueAnchorNode = builder.append(new ConditionAnchorNode(CompareNode.createCompareNode(object.graph(), Condition.EQ, condition, + ConstantNode.forBoolean(true, object.graph())))); + PiNode piCast = builder.append(new PiNode(object, piStamp, valueAnchorNode)); + builder.push(Kind.Object, piCast); + } + return true; + } + // TODO: should we throw GraalInternalError.shouldNotReachHere() here? + return false; + } + }); + for (Kind kind : new Kind[]{Kind.Boolean, Kind.Byte, Kind.Int, Kind.Long, Kind.Float, Kind.Double, Kind.Object}) { + String kindName = kind.getJavaName(); + kindName = toUpperCase(kindName.charAt(0)) + kindName.substring(1); + String getName = "unsafeGet" + kindName; + String putName = "unsafePut" + kindName; + r.register4(getName, Object.class, long.class, boolean.class, Object.class, new CustomizedUnsafeLoadPlugin(kind)); + r.register4(putName, Object.class, long.class, kind == Kind.Object ? Object.class : kind.toJavaClass(), Object.class, new CustomizedUnsafeStorePlugin(kind)); + } + } + + static class CustomizedUnsafeLoadPlugin implements InvocationPlugin { + + private final Kind returnKind; + + public CustomizedUnsafeLoadPlugin(Kind returnKind) { + this.returnKind = returnKind; + } + + public boolean apply(GraphBuilderContext builder, ValueNode object, ValueNode offset, ValueNode condition, ValueNode location) { + if (location.isConstant()) { + LocationIdentity locationIdentity; + if (location.isNullConstant()) { + locationIdentity = LocationIdentity.ANY_LOCATION; + } else { + locationIdentity = ObjectLocationIdentity.create(location.asJavaConstant()); + } + CompareNode compare = builder.append(CompareNode.createCompareNode(Condition.EQ, condition, ConstantNode.forBoolean(true, object.graph()))); + builder.push(returnKind.getStackKind(), builder.append(new UnsafeLoadNode(object, offset, returnKind, locationIdentity, compare))); + return true; + } + // TODO: should we throw GraalInternalError.shouldNotReachHere() here? + return false; + } + } + + static class CustomizedUnsafeStorePlugin implements InvocationPlugin { + + private final Kind kind; + + public CustomizedUnsafeStorePlugin(Kind kind) { + this.kind = kind; + } + + public boolean apply(GraphBuilderContext builder, ValueNode object, ValueNode offset, ValueNode value, ValueNode location) { + ValueNode locationArgument = location; + if (locationArgument.isConstant()) { + LocationIdentity locationIdentity; + if (locationArgument.isNullConstant()) { + locationIdentity = LocationIdentity.ANY_LOCATION; + } else { + locationIdentity = ObjectLocationIdentity.create(locationArgument.asJavaConstant()); + } + + builder.append(new UnsafeStoreNode(object, offset, value, kind, locationIdentity, null)); + return true; + } + // TODO: should we throw GraalInternalError.shouldNotReachHere() here? + return false; + } + } +} diff -r 3059a52d9614 -r 98967b613c88 graal/com.oracle.graal.truffle/src/com/oracle/graal/truffle/substitutions/TruffleGraphBuilderPluginsProvider.java --- a/graal/com.oracle.graal.truffle/src/com/oracle/graal/truffle/substitutions/TruffleGraphBuilderPluginsProvider.java Sat Feb 07 02:34:43 2015 +0100 +++ /dev/null Thu Jan 01 00:00:00 1970 +0000 @@ -1,134 +0,0 @@ -/* - * Copyright (c) 2015, Oracle and/or its affiliates. All rights reserved. - * DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER. - * - * This code is free software; you can redistribute it and/or modify it - * under the terms of the GNU General Public License version 2 only, as - * published by the Free Software Foundation. - * - * This code is distributed in the hope that it will be useful, but WITHOUT - * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or - * FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License - * version 2 for more details (a copy is included in the LICENSE file that - * accompanied this code). - * - * You should have received a copy of the GNU General Public License version - * 2 along with this work; if not, write to the Free Software Foundation, - * Inc., 51 Franklin St, Fifth Floor, Boston, MA 02110-1301 USA. - * - * Please contact Oracle, 500 Oracle Parkway, Redwood Shores, CA 94065 USA - * or visit www.oracle.com if you need additional information or have any - * questions. - */ -package com.oracle.graal.truffle.substitutions; - -import java.util.concurrent.*; - -import com.oracle.graal.api.code.*; -import com.oracle.graal.api.meta.*; -import com.oracle.graal.api.runtime.*; -import com.oracle.graal.java.*; -import com.oracle.graal.nodes.*; -import com.oracle.graal.nodes.extended.*; -import com.oracle.graal.truffle.nodes.frame.*; -import com.oracle.truffle.api.*; - -/** - * Provider of {@link GraphBuilderPlugin}s for Truffle classes. - */ -@ServiceProvider(GraphBuilderPluginsProvider.class) -public class TruffleGraphBuilderPluginsProvider implements GraphBuilderPluginsProvider { - public void registerPlugins(MetaAccessProvider metaAccess, GraphBuilderPlugins plugins) { - plugins.register(metaAccess, CompilerDirectivesPlugin.class); - } - - /** - * Plugins for {@link CompilerDirectives}. - */ - enum CompilerDirectivesPlugin implements GraphBuilderPlugin { - inInterpreter() { - public boolean handleInvocation(GraphBuilderContext builder, ValueNode[] args) { - builder.append(ConstantNode.forBoolean(false)); - return true; - } - }, - inCompiledCode() { - public boolean handleInvocation(GraphBuilderContext builder, ValueNode[] args) { - builder.append(ConstantNode.forBoolean(true)); - return true; - } - }, - transferToInterpreter() { - public boolean handleInvocation(GraphBuilderContext builder, ValueNode[] args) { - builder.append(new DeoptimizeNode(DeoptimizationAction.None, DeoptimizationReason.TransferToInterpreter)); - return true; - } - }, - transferToInterpreterAndInvalidate() { - public boolean handleInvocation(GraphBuilderContext builder, ValueNode[] args) { - builder.append(new DeoptimizeNode(DeoptimizationAction.InvalidateReprofile, DeoptimizationReason.TransferToInterpreter)); - return true; - } - }, - interpreterOnly(Runnable.class) { - public boolean handleInvocation(GraphBuilderContext builder, ValueNode[] args) { - return true; - } - }, - interpreterOnly$(Callable.class) { - public boolean handleInvocation(GraphBuilderContext builder, ValueNode[] args) { - return true; - } - }, - injectBranchProbability(double.class, boolean.class) { - public boolean handleInvocation(GraphBuilderContext builder, ValueNode[] args) { - ValueNode probability = args[0]; - ValueNode condition = args[1]; - builder.append(new BranchProbabilityNode(probability, condition)); - return true; - } - }, - bailout(String.class) { - public boolean handleInvocation(GraphBuilderContext builder, ValueNode[] args) { - // TODO: is this too eager? Should a BailoutNode be created instead? - ValueNode message = args[0]; - if (message.isConstant()) { - throw new BailoutException(message.asConstant().toValueString()); - } - throw new BailoutException("bailout (message is not compile-time constant, so no additional information is available)"); - } - }, - - isCompilationConstant(Object.class) { - public boolean handleInvocation(GraphBuilderContext builder, ValueNode[] args) { - ValueNode arg0 = args[0]; - if (arg0 instanceof BoxNode) { - arg0 = ((BoxNode) arg0).getValue(); - } - if (arg0.isConstant()) { - builder.push(Kind.Boolean, builder.append(ConstantNode.forBoolean(true))); - return true; - } - - // Cannot create MacroNodes in a plugin (yet) - return false; - } - }, - materialize(Object.class) { - public boolean handleInvocation(GraphBuilderContext builder, ValueNode[] args) { - builder.append(new ForceMaterializeNode(args[0])); - return true; - } - }; - - CompilerDirectivesPlugin(Class... parameterTypes) { - this.parameterTypes = parameterTypes; - } - - private final Class[] parameterTypes; - - public ResolvedJavaMethod getInvocationTarget(MetaAccessProvider metaAccess) { - return GraphBuilderPlugin.resolveTarget(metaAccess, CompilerDirectives.class, name(), parameterTypes); - } - } -} diff -r 3059a52d9614 -r 98967b613c88 graal/com.oracle.graal.virtual/src/com/oracle/graal/virtual/phases/ea/PartialEscapeClosure.java --- a/graal/com.oracle.graal.virtual/src/com/oracle/graal/virtual/phases/ea/PartialEscapeClosure.java Sat Feb 07 02:34:43 2015 +0100 +++ b/graal/com.oracle.graal.virtual/src/com/oracle/graal/virtual/phases/ea/PartialEscapeClosure.java Sat Feb 07 02:47:00 2015 +0100 @@ -28,6 +28,7 @@ import com.oracle.graal.api.meta.*; import com.oracle.graal.compiler.common.*; import com.oracle.graal.compiler.common.type.*; +import com.oracle.graal.compiler.common.util.*; import com.oracle.graal.debug.*; import com.oracle.graal.graph.*; import com.oracle.graal.nodes.*; @@ -38,7 +39,6 @@ import com.oracle.graal.nodes.spi.Virtualizable.EscapeState; import com.oracle.graal.nodes.virtual.*; import com.oracle.graal.phases.schedule.*; -import com.oracle.graal.phases.util.*; import com.oracle.graal.virtual.nodes.*; public abstract class PartialEscapeClosure> extends EffectsClosure { diff -r 3059a52d9614 -r 98967b613c88 graal/com.oracle.truffle.api.test/src/com/oracle/truffle/api/test/nodes/SafeReplaceTest.java --- a/graal/com.oracle.truffle.api.test/src/com/oracle/truffle/api/test/nodes/SafeReplaceTest.java Sat Feb 07 02:34:43 2015 +0100 +++ b/graal/com.oracle.truffle.api.test/src/com/oracle/truffle/api/test/nodes/SafeReplaceTest.java Sat Feb 07 02:47:00 2015 +0100 @@ -39,12 +39,11 @@ public void testCorrectReplacement() { TestRootNode root = new TestRootNode(); final TestNode oldChild = new TestNode(); + final TestNode newChild = new TestNode(); root.child = oldChild; - assertFalse(oldChild.isReplaceable()); // No parent node + assertFalse(oldChild.isSafelyReplaceableBy(newChild)); // No parent node root.adoptChildren(); - assertTrue(oldChild.isReplaceable()); // Now adopted by parent - final TestNode newChild = new TestNode(); - assertTrue(oldChild.isSafelyReplaceableBy(newChild)); // Parent field type is assignable by + assertTrue(oldChild.isSafelyReplaceableBy(newChild)); // Now adopted by parent // new node oldChild.replace(newChild); root.execute(null); @@ -59,10 +58,11 @@ final TestNode oldChild = new TestNode(); root.child = oldChild; root.adoptChildren(); - final WrongTestNode newChild = new WrongTestNode(); - assertFalse(oldChild.isSafelyReplaceableBy(newChild)); - // Can't test: oldChild.replace(newChild); - // Fails if assertions checked; else unsafe assignment will eventually crash the VM + final TestNode newChild = new TestNode(); + final TestNode strayChild = new TestNode(); + assertFalse(strayChild.isSafelyReplaceableBy(newChild)); // Stray not a child of parent + final WrongTestNode wrongTypeNewChild = new WrongTestNode(); + assertFalse(oldChild.isSafelyReplaceableBy(wrongTypeNewChild)); } private static class TestNode extends Node { diff -r 3059a52d9614 -r 98967b613c88 graal/com.oracle.truffle.api/src/com/oracle/truffle/api/nodes/Node.java --- a/graal/com.oracle.truffle.api/src/com/oracle/truffle/api/nodes/Node.java Sat Feb 07 02:34:43 2015 +0100 +++ b/graal/com.oracle.truffle.api/src/com/oracle/truffle/api/nodes/Node.java Sat Feb 07 02:47:00 2015 +0100 @@ -290,26 +290,10 @@ } /** - * Checks if this node is properly adopted by its parent. - * - * @return {@code true} if it is structurally safe to replace this node. - */ - public final boolean isReplaceable() { - if (getParent() != null) { - for (Node sibling : getParent().getChildren()) { - if (sibling == this) { - return true; - } - } - } - return false; - } - - /** - * Checks if this node can be replaced by another node, both structurally and with type safety. + * Checks if this node can be replaced by another node: tree structure & type. */ public final boolean isSafelyReplaceableBy(Node newNode) { - return isReplaceable() && NodeUtil.isReplacementSafe(getParent(), this, newNode); + return NodeUtil.isReplacementSafe(getParent(), this, newNode); } private void reportReplace(Node oldNode, Node newNode, CharSequence reason) { diff -r 3059a52d9614 -r 98967b613c88 graal/com.oracle.truffle.api/src/com/oracle/truffle/api/nodes/NodeUtil.java --- a/graal/com.oracle.truffle.api/src/com/oracle/truffle/api/nodes/NodeUtil.java Sat Feb 07 02:34:43 2015 +0100 +++ b/graal/com.oracle.truffle.api/src/com/oracle/truffle/api/nodes/NodeUtil.java Sat Feb 07 02:47:00 2015 +0100 @@ -676,26 +676,24 @@ } /** - * Determines whether a proposed child replacement would be type safe. - * - * @param parent non-null node - * @param oldChild non-null existing child of parent - * @param newChild non-null proposed replacement for existing child + * Determines whether a proposed child replacement would be safe: structurally and type. */ public static boolean isReplacementSafe(Node parent, Node oldChild, Node newChild) { assert newChild != null; - final NodeField field = findChildField(parent, oldChild); - if (field == null) { - throw new IllegalArgumentException(); + if (parent != null) { + final NodeField field = findChildField(parent, oldChild); + if (field != null) { + switch (field.getKind()) { + case CHILD: + return field.getType().isAssignableFrom(newChild.getClass()); + case CHILDREN: + return field.getType().getComponentType().isAssignableFrom(newChild.getClass()); + default: + throw new IllegalStateException(); + } + } } - switch (field.getKind()) { - case CHILD: - return field.getType().isAssignableFrom(newChild.getClass()); - case CHILDREN: - return field.getType().getComponentType().isAssignableFrom(newChild.getClass()); - default: - throw new IllegalArgumentException(); - } + return false; } /** Returns all declared fields in the class hierarchy. */ diff -r 3059a52d9614 -r 98967b613c88 mx/mx_graal.py --- a/mx/mx_graal.py Sat Feb 07 02:34:43 2015 +0100 +++ b/mx/mx_graal.py Sat Feb 07 02:47:00 2015 +0100 @@ -1481,7 +1481,22 @@ vm(vmargs) def _basic_gate_body(args, tasks): - with Task('BuildHotSpotGraal: fastdebug,product', tasks): + # Build server-hosted-graal now so we can run the unit tests + with Task('BuildHotSpotGraalHosted: product', tasks): + buildvms(['--vms', 'server', '--builds', 'product']) + + # Run unit tests on server-hosted-graal + with VM('server', 'product'): + with Task('UnitTests:hosted-product', tasks): + unittest(['--enable-timing', '--verbose', '--fail-fast']) + + # Run baseline unit tests on server-hosted-graal + with VM('server', 'product'): + with Task('UnitTests-BaselineCompiler:hosted-product', tasks): + unittest(['--enable-timing', '--verbose', '--whitelist', 'test/whitelist_baseline.txt', '-G:+UseBaselineCompiler']) + + # Build the other VM flavors + with Task('BuildHotSpotGraalOthers: fastdebug,product', tasks): buildvms(['--vms', 'graal,server', '--builds', 'fastdebug,product']) with VM('graal', 'fastdebug'): @@ -1510,14 +1525,6 @@ with Task('BootstrapWithImmutableCode:product', tasks): vm(['-XX:-TieredCompilation', '-G:+ImmutableCode', '-G:+VerifyPhases', '-esa', '-version']) - with VM('server', 'product'): # hosted mode - with Task('UnitTests:hosted-product', tasks): - unittest(['--enable-timing', '--verbose', '--fail-fast']) - - with VM('server', 'product'): # hosted mode - with Task('UnitTests-BaselineCompiler:hosted-product', tasks): - unittest(['--enable-timing', '--verbose', '--whitelist', 'test/whitelist_baseline.txt', '-G:+UseBaselineCompiler']) - for vmbuild in ['fastdebug', 'product']: for test in sanitycheck.getDacapos(level=sanitycheck.SanityCheckLevel.Gate, gateBuildLevel=vmbuild) + sanitycheck.getScalaDacapos(level=sanitycheck.SanityCheckLevel.Gate, gateBuildLevel=vmbuild): with Task(str(test) + ':' + vmbuild, tasks) as t: @@ -2273,7 +2280,7 @@ complt += '\t(args)\n' # TODO: improve matcher: if mx args are given, this doesn't work complt += '\t\tcase $line[1] in\n' - complt += '\t\t\t(vm)\n' + complt += '\t\t\t(vm | vmg | vmfg | unittest | jmh | dacapo | scaladacapo | specjvm2008 | specjbb2013 | specjbb2005)\n' complt += '\t\t\t\tnoglob \\\n' complt += '\t\t\t\t\t_arguments -s -S \\\n' complt += _appendOptions("graal", r"G\:") @@ -2340,7 +2347,7 @@ # gather graal options output = StringIO.StringIO() - vm(['-XX:-BootstrapGraal', '-G:+PrintFlags' if optionType == "graal" else '-XX:+PrintFlagsWithComments'], + vm(['-XX:-BootstrapGraal', '-XX:+UnlockDiagnosticVMOptions', '-G:+PrintFlags' if optionType == "graal" else '-XX:+PrintFlagsWithComments'], vm="graal", vmbuild="optimized", nonZeroIsFatal=False, diff -r 3059a52d9614 -r 98967b613c88 mx/suite.py --- a/mx/suite.py Sat Feb 07 02:34:43 2015 +0100 +++ b/mx/suite.py Sat Feb 07 02:47:00 2015 +0100 @@ -490,7 +490,6 @@ "sourceDirs" : ["src"], "dependencies" : [ "com.oracle.graal.nodeinfo", - "com.oracle.graal.debug", "com.oracle.graal.compiler.common", "com.oracle.graal.api.collections", "com.oracle.graal.api.runtime", @@ -539,7 +538,6 @@ "dependencies" : [ "com.oracle.graal.compiler.common", "com.oracle.graal.asm", - "com.oracle.graal.debug", ], "checkstyle" : "com.oracle.graal.graph", "javaCompliance" : "1.8", @@ -579,15 +577,6 @@ "workingSets" : "Graal,LIR,SPARC", }, - "com.oracle.graal.alloc" : { - "subDir" : "graal", - "sourceDirs" : ["src"], - "dependencies" : ["com.oracle.graal.compiler.common"], - "checkstyle" : "com.oracle.graal.graph", - "javaCompliance" : "1.8", - "workingSets" : "Graal", - }, - "com.oracle.graal.word" : { "subDir" : "graal", "sourceDirs" : ["src"], @@ -601,7 +590,6 @@ "subDir" : "graal", "sourceDirs" : ["src"], "dependencies" : [ - "com.oracle.graal.api.directives", "com.oracle.graal.compiler", "com.oracle.graal.java", "com.oracle.graal.word", @@ -741,7 +729,6 @@ "dependencies" : [ "com.oracle.graal.virtual", "com.oracle.graal.loop", - "com.oracle.graal.alloc", ], "checkstyle" : "com.oracle.graal.graph", "javaCompliance" : "1.8", @@ -815,6 +802,7 @@ "subDir" : "graal", "sourceDirs" : ["src"], "dependencies" : [ + "com.oracle.graal.api.directives", "com.oracle.graal.phases", ], "checkstyle" : "com.oracle.graal.graph", @@ -829,6 +817,7 @@ "dependencies" : [ "com.oracle.graal.api.code", "com.oracle.graal.options", + "com.oracle.graal.debug", ], "checkstyle" : "com.oracle.graal.graph", "javaCompliance" : "1.8", @@ -875,7 +864,6 @@ "subDir" : "graal", "sourceDirs" : ["src"], "dependencies" : [ - "com.oracle.graal.api.directives", "com.oracle.graal.test", "com.oracle.graal.printer", "com.oracle.graal.runtime", diff -r 3059a52d9614 -r 98967b613c88 src/share/vm/runtime/arguments.cpp --- a/src/share/vm/runtime/arguments.cpp Sat Feb 07 02:34:43 2015 +0100 +++ b/src/share/vm/runtime/arguments.cpp Sat Feb 07 02:47:00 2015 +0100 @@ -3585,6 +3585,12 @@ if (match_option(option, "-XX:-IgnoreUnrecognizedVMOptions", &tail)) { IgnoreUnrecognizedVMOptions = false; } + if (match_option(option, "-XX:+UnlockDiagnosticVMOptions", &tail)) { + UnlockDiagnosticVMOptions = true; + } + if (match_option(option, "-XX:-UnlockDiagnosticVMOptions", &tail)) { + UnlockDiagnosticVMOptions = false; + } if (match_option(option, "-XX:+PrintFlagsInitial", &tail)) { CommandLineFlags::printFlags(tty, false); vm_exit(0);