# HG changeset patch # User Erik Eckstein # Date 1386920332 -3600 # Node ID 9423a38d64377b191b817840e8011431197b1187 # Parent 3603fab248a67484921d6b4fc017dbf68d795950 added rematerialization of constants in LinearScan, but still disabled diff -r 3603fab248a6 -r 9423a38d6437 graal/com.oracle.graal.compiler/src/com/oracle/graal/compiler/alloc/Interval.java --- a/graal/com.oracle.graal.compiler/src/com/oracle/graal/compiler/alloc/Interval.java Fri Dec 13 07:56:25 2013 +0100 +++ b/graal/com.oracle.graal.compiler/src/com/oracle/graal/compiler/alloc/Interval.java Fri Dec 13 08:38:52 2013 +0100 @@ -418,7 +418,8 @@ /** * The {@linkplain RegisterValue register} or {@linkplain StackSlot spill slot} assigned to this - * interval. + * interval. In case of a spilled interval which is re-materialized this is + * {@link Value#ILLEGAL}. */ private AllocatableValue location; @@ -498,6 +499,17 @@ */ private Interval locationHint; + /** + * The value with which a spilled child interval can be re-materialized. Currently this must be + * a Constant. + */ + private Constant materializedValue; + + /** + * The number of times {@link #addMaterializationValue(Constant)} is called. + */ + private int numMaterializationValuesAdded; + void assignLocation(AllocatableValue newLocation) { if (isRegister(newLocation)) { assert this.location == null : "cannot re-assign location for " + this; @@ -505,6 +517,8 @@ this.location = asRegister(newLocation).asValue(kind); return; } + } else if (isIllegal(newLocation)) { + assert canMaterialize(); } else { assert this.location == null || isRegister(this.location) : "cannot re-assign location for " + this; assert isStackSlot(newLocation); @@ -621,7 +635,7 @@ // returns true if this interval has a shadow copy on the stack that is always correct boolean alwaysInMemory() { - return splitParent().spillState == SpillState.StoreAtDefinition || splitParent().spillState == SpillState.StartInMemory; + return (splitParent().spillState == SpillState.StoreAtDefinition || splitParent().spillState == SpillState.StartInMemory) && !canMaterialize(); } void removeFirstUsePos() { @@ -693,6 +707,34 @@ currentSplitChild = this; } + /** + * Sets the value which is used for re-materialization. + */ + void addMaterializationValue(Constant value) { + if (numMaterializationValuesAdded == 0) { + materializedValue = value; + } else { + // Interval is defined on multiple places -> no materialization is possible. + materializedValue = null; + } + numMaterializationValuesAdded++; + } + + /** + * Returns true if this interval can be re-materialized when spilled. This means that no + * spill-moves are needed. Instead of restore-moves the materializeValue is restored. + */ + public boolean canMaterialize() { + return getMaterializedValue() != null; + } + + /** + * Returns the value which is moved to a register instead of a restore-move from stack. + */ + public Constant getMaterializedValue() { + return splitParent().materializedValue; + } + int calcTo() { assert first != Range.EndMarker : "interval has no range"; @@ -864,12 +906,24 @@ } } + private RegisterPriority adaptPriority(RegisterPriority priority) { + /* + * In case of re-materialized values we require that use-operands are registers, because we + * don't have the value at a stack location. (Note that ShouldHaveRegister means that the + * operand can also be a StackSlot). + */ + if (priority == RegisterPriority.ShouldHaveRegister && canMaterialize()) { + return RegisterPriority.MustHaveRegister; + } + return priority; + } + // Note: use positions are sorted descending . first use has highest index int firstUsage(RegisterPriority minRegisterPriority) { assert isVariable(operand) : "cannot access use positions for fixed intervals"; for (int i = usePosList.size() - 1; i >= 0; --i) { - RegisterPriority registerPriority = usePosList.registerPriority(i); + RegisterPriority registerPriority = adaptPriority(usePosList.registerPriority(i)); if (registerPriority.greaterEqual(minRegisterPriority)) { return usePosList.usePos(i); } @@ -882,7 +936,7 @@ for (int i = usePosList.size() - 1; i >= 0; --i) { int usePos = usePosList.usePos(i); - if (usePos >= from && usePosList.registerPriority(i).greaterEqual(minRegisterPriority)) { + if (usePos >= from && adaptPriority(usePosList.registerPriority(i)).greaterEqual(minRegisterPriority)) { return usePos; } } @@ -894,7 +948,7 @@ for (int i = usePosList.size() - 1; i >= 0; --i) { int usePos = usePosList.usePos(i); - if (usePos >= from && usePosList.registerPriority(i) == exactRegisterPriority) { + if (usePos >= from && adaptPriority(usePosList.registerPriority(i)) == exactRegisterPriority) { return usePos; } } @@ -910,7 +964,7 @@ if (usePos > from) { return prev; } - if (usePosList.registerPriority(i).greaterEqual(minRegisterPriority)) { + if (adaptPriority(usePosList.registerPriority(i)).greaterEqual(minRegisterPriority)) { prev = usePos; } } @@ -1181,6 +1235,10 @@ buf.append(usePosList.usePos(i)).append(':').append(usePosList.registerPriority(i)); prev = usePosList.usePos(i); } - return buf.append("} spill-state{").append(spillState()).append("}").toString(); + buf.append("} spill-state{").append(spillState()).append("}"); + if (canMaterialize()) { + buf.append(" (remat:").append(getMaterializedValue().toString()).append(")"); + } + return buf.toString(); } } diff -r 3603fab248a6 -r 9423a38d6437 graal/com.oracle.graal.compiler/src/com/oracle/graal/compiler/alloc/LinearScan.java --- a/graal/com.oracle.graal.compiler/src/com/oracle/graal/compiler/alloc/LinearScan.java Fri Dec 13 07:56:25 2013 +0100 +++ b/graal/com.oracle.graal.compiler/src/com/oracle/graal/compiler/alloc/LinearScan.java Fri Dec 13 08:38:52 2013 +0100 @@ -263,11 +263,11 @@ } }; - static final IntervalPredicate IS_OOP_INTERVAL = new IntervalPredicate() { + static final IntervalPredicate IS_STACK_INTERVAL = new IntervalPredicate() { @Override public boolean apply(Interval i) { - return !isRegister(i.operand) && i.kind() == Kind.Object; + return !isRegister(i.operand); } }; @@ -282,7 +282,9 @@ void assignSpillSlot(Interval interval) { // assign the canonical spill slot of the parent (if a part of the interval // is already spilled) or allocate a new spill slot - if (interval.spillSlot() != null) { + if (interval.canMaterialize()) { + interval.assignLocation(Value.ILLEGAL); + } else if (interval.spillSlot() != null) { interval.assignLocation(interval.spillSlot()); } else { StackSlot slot = frameMap.allocateSpillSlot(interval.kind()); @@ -519,9 +521,7 @@ // called once before assignment of register numbers void eliminateSpillMoves() { - if (getTraceLevel() >= 3) { - TTY.println(" Eliminating unnecessary spill moves"); - } + Indent indent = Debug.logAndIndent("Eliminating unnecessary spill moves"); // collect all intervals that must be stored after their definition. // the list is sorted by Interval.spillDefinitionPos @@ -553,9 +553,7 @@ if (!isRegister(curInterval.location()) && curInterval.alwaysInMemory()) { // move target is a stack slot that is always correct, so eliminate // instruction - if (getTraceLevel() >= 4) { - TTY.println("eliminating move from interval %d to %d", operandNumber(move.getInput()), operandNumber(move.getResult())); - } + indent.log("eliminating move from interval %d to %d", operandNumber(move.getInput()), operandNumber(move.getResult())); instructions.set(j, null); // null-instructions are deleted by assignRegNum } @@ -565,25 +563,23 @@ assert interval == Interval.EndMarker || (interval.isSplitParent() && interval.spillState() == SpillState.StoreAtDefinition) : "invalid interval"; while (interval != Interval.EndMarker && interval.spillDefinitionPos() == opId) { - if (!insertionBuffer.initialized()) { - // prepare insertion buffer (appended when all instructions of the block - // are processed) - insertionBuffer.init(instructions); - } - - AllocatableValue fromLocation = interval.location(); - AllocatableValue toLocation = canonicalSpillOpr(interval); + if (!interval.canMaterialize()) { + if (!insertionBuffer.initialized()) { + // prepare insertion buffer (appended when all instructions of the + // block are processed) + insertionBuffer.init(instructions); + } - assert isRegister(fromLocation) : "from operand must be a register but is: " + fromLocation + " toLocation=" + toLocation + " spillState=" + interval.spillState(); - assert isStackSlot(toLocation) : "to operand must be a stack slot"; - - insertionBuffer.append(j + 1, ir.spillMoveFactory.createMove(toLocation, fromLocation)); + AllocatableValue fromLocation = interval.location(); + AllocatableValue toLocation = canonicalSpillOpr(interval); - if (getTraceLevel() >= 4) { - StackSlot slot = interval.spillSlot(); - TTY.println("inserting move after definition of interval %d to stack slot %s at opId %d", interval.operandNumber, slot, opId); + assert isRegister(fromLocation) : "from operand must be a register but is: " + fromLocation + " toLocation=" + toLocation + " spillState=" + interval.spillState(); + assert isStackSlot(toLocation) : "to operand must be a stack slot"; + + insertionBuffer.append(j + 1, ir.spillMoveFactory.createMove(toLocation, fromLocation)); + + indent.log("inserting move after definition of interval %d to stack slot %s at opId %d", interval.operandNumber, interval.spillSlot(), opId); } - interval = interval.next; } } @@ -595,9 +591,10 @@ } // end of block iteration assert interval == Interval.EndMarker : "missed an interval"; + indent.outdent(); } - private void checkIntervals(Interval interval) { + private static void checkIntervals(Interval interval) { Interval prev = null; Interval temp = interval; while (temp != Interval.EndMarker) { @@ -607,13 +604,11 @@ assert temp.spillDefinitionPos() >= prev.spillDefinitionPos() : "when intervals are sorted by from : then they must also be sorted by spillDefinitionPos"; } - assert temp.spillSlot() != null : "interval has no spill slot assigned"; + assert temp.spillSlot() != null || temp.canMaterialize() : "interval has no spill slot assigned"; assert temp.spillDefinitionPos() >= temp.from() : "invalid order"; assert temp.spillDefinitionPos() <= temp.from() + 2 : "only intervals defined once at their start-pos can be optimized"; - if (traceLevel >= 4) { - TTY.println("interval %d (from %d to %d) must be stored at %d", temp.operandNumber, temp.from(), temp.to(), temp.spillDefinitionPos()); - } + Debug.log("interval %d (from %d to %d) must be stored at %d", temp.operandNumber, temp.from(), temp.to(), temp.spillDefinitionPos()); prev = temp; temp = temp.next; @@ -695,6 +690,8 @@ // iterate all blocks for (final Block block : sortedBlocks) { + Indent indent = Debug.logAndIndent("compute local live sets for block %d", block.getId()); + final BitSet liveGen = new BitSet(liveSize); final BitSet liveKill = new BitSet(liveSize); @@ -713,9 +710,7 @@ int operandNum = operandNumber(operand); if (!liveKill.get(operandNum)) { liveGen.set(operandNum); - if (getTraceLevel() >= 4) { - TTY.println(" Setting liveGen for operand %d at instruction %d", operandNum, op.id()); - } + Debug.log("liveGen for operand %d", operandNum); } if (block.getLoop() != null) { intervalInLoop.setBit(operandNum, block.getLoop().index); @@ -735,9 +730,7 @@ int operandNum = operandNumber(operand); if (!liveKill.get(operandNum)) { liveGen.set(operandNum); - if (getTraceLevel() >= 4) { - TTY.println(" Setting liveGen for LIR opId %d, operand %d because of state for %s", op.id(), operandNum, op); - } + Debug.log("liveGen in state for operand %d", operandNum); } return operand; } @@ -749,6 +742,7 @@ if (isVariable(operand)) { int varNum = operandNumber(operand); liveKill.set(varNum); + Debug.log("liveKill for operand %d", varNum); if (block.getLoop() != null) { intervalInLoop.setBit(varNum, block.getLoop().index); } @@ -764,6 +758,7 @@ } }; + Indent indent2 = indent.logAndIndent("handle op %d", op.id()); op.forEachInput(useProc); op.forEachAlive(useProc); // Add uses of live locals from interpreter's point of view for proper debug @@ -771,17 +766,19 @@ op.forEachState(stateProc); op.forEachTemp(defProc); op.forEachOutput(defProc); + indent2.outdent(); } // end of instruction iteration - blockData.get(block).liveGen = liveGen; - blockData.get(block).liveKill = liveKill; - blockData.get(block).liveIn = new BitSet(liveSize); - blockData.get(block).liveOut = new BitSet(liveSize); + BlockData blockSets = blockData.get(block); + blockSets.liveGen = liveGen; + blockSets.liveKill = liveKill; + blockSets.liveIn = new BitSet(liveSize); + blockSets.liveOut = new BitSet(liveSize); - if (getTraceLevel() >= 4) { - TTY.println("liveGen B%d %s", block.getId(), blockData.get(block).liveGen); - TTY.println("liveKill B%d %s", block.getId(), blockData.get(block).liveKill); - } + indent.log("liveGen B%d %s", block.getId(), blockSets.liveGen); + indent.log("liveKill B%d %s", block.getId(), blockSets.liveKill); + + indent.outdent(); } // end of block iteration } @@ -814,6 +811,7 @@ * {@link BlockData#liveIn} and {@link BlockData#liveOut}) for each block. */ void computeGlobalLiveSets() { + Indent indent = Debug.logAndIndent("compute global live sets"); int numBlocks = blockCount(); boolean changeOccurred; boolean changeOccurredInBlock; @@ -825,9 +823,12 @@ do { changeOccurred = false; + Indent indent2 = indent.logAndIndent("new iteration %d", iterationCount); + // iterate all blocks in reverse order for (int i = numBlocks - 1; i >= 0; i--) { Block block = blockAt(i); + BlockData blockSets = blockData.get(block); changeOccurredInBlock = false; @@ -844,10 +845,10 @@ liveOut.clear(); } - if (!blockData.get(block).liveOut.equals(liveOut)) { + if (!blockSets.liveOut.equals(liveOut)) { // A change occurred. Swap the old and new live out sets to avoid copying. - BitSet temp = blockData.get(block).liveOut; - blockData.get(block).liveOut = liveOut; + BitSet temp = blockSets.liveOut; + blockSets.liveOut = liveOut; liveOut = temp; changeOccurred = true; @@ -860,15 +861,13 @@ // !liveKill(block)) // note: liveIn has to be computed only in first iteration or if liveOut has // changed! - BitSet liveIn = blockData.get(block).liveIn; + BitSet liveIn = blockSets.liveIn; liveIn.clear(); - liveIn.or(blockData.get(block).liveOut); - liveIn.andNot(blockData.get(block).liveKill); - liveIn.or(blockData.get(block).liveGen); - } + liveIn.or(blockSets.liveOut); + liveIn.andNot(blockSets.liveKill); + liveIn.or(blockSets.liveGen); - if (getTraceLevel() >= 4) { - traceLiveness(changeOccurredInBlock, iterationCount, block); + indent2.log("block %d: livein = %s, liveout = %s", block.getId(), liveIn, blockSets.liveOut); } } iterationCount++; @@ -876,6 +875,7 @@ if (changeOccurred && iterationCount > 50) { throw new BailoutException("too many iterations in computeGlobalLiveSets"); } + indent2.outdent(); } while (changeOccurred); if (DetailedAsserts.getValue()) { @@ -888,55 +888,53 @@ if (DetailedAsserts.getValue()) { reportFailure(numBlocks); } - - TTY.println("preds=" + startBlock.getPredecessorCount() + ", succs=" + startBlock.getSuccessorCount()); - TTY.println("startBlock-ID: " + startBlock.getId()); - // bailout if this occurs in product mode. throw new GraalInternalError("liveIn set of first block must be empty: " + blockData.get(startBlock).liveIn); } + indent.outdent(); } private void reportFailure(int numBlocks) { - TTY.println(gen.getGraph().toString()); - TTY.println("Error: liveIn set of first block must be empty (when this fails, variables are used before they are defined):"); + Indent indent = Debug.logAndIndent("report failure"); + indent.log("graph: %s", gen.getGraph()); + indent.log("Error: liveIn set of first block must be empty (when this fails, variables are used before they are defined):"); BitSet startBlockLiveIn = blockData.get(ir.cfg.getStartBlock()).liveIn; for (int operandNum = startBlockLiveIn.nextSetBit(0); operandNum >= 0; operandNum = startBlockLiveIn.nextSetBit(operandNum + 1)) { Value operand = operandFor(operandNum); - TTY.println(" var %d; operand=%s; node=%s", operandNum, operand.toString(), gen.valueForOperand(operand)); + indent.log(" var %d; operand=%s; node=%s", operandNum, operand, gen.valueForOperand(operand)); } // print some additional information to simplify debugging for (int operandNum = startBlockLiveIn.nextSetBit(0); operandNum >= 0; operandNum = startBlockLiveIn.nextSetBit(operandNum + 1)) { Value operand = operandFor(operandNum); - TTY.println("---- Detailed information for var %d; operand=%s; node=%s ----", operandNum, operand.toString(), gen.valueForOperand(operand)); + final Indent indent2 = indent.logAndIndent("---- Detailed information for var %d; operand=%s; node=%s ----", operandNum, operand, gen.valueForOperand(operand)); Deque definedIn = new ArrayDeque<>(); HashSet usedIn = new HashSet<>(); for (Block block : sortedBlocks) { if (blockData.get(block).liveGen.get(operandNum)) { usedIn.add(block); - TTY.println("used in block B%d {", block.getId()); + indent2.log("used in block B%d {", block.getId()); for (LIRInstruction ins : ir.lir(block)) { - TTY.println(" " + ins.id() + ": " + ins.toString()); + indent2.log(" %d: %s", ins.id(), ins); ins.forEachState(new ValueProcedure() { @Override public Value doValue(Value liveStateOperand) { - TTY.println(" operand=" + liveStateOperand); + indent2.log(" operand=" + liveStateOperand); return liveStateOperand; } }); } - TTY.println("}"); + indent2.log("}"); } if (blockData.get(block).liveKill.get(operandNum)) { definedIn.add(block); - TTY.println("defined in block B%d {", block.getId()); + indent2.log("defined in block B%d {", block.getId()); for (LIRInstruction ins : ir.lir(block)) { - TTY.println(" " + ins.id() + ": " + ins.toString()); + indent2.log(" %d: %s", ins.id(), ins); } - TTY.println("}"); + indent2.log("}"); } } @@ -957,12 +955,13 @@ } } } - TTY.print("**** offending usages are in: "); + indent.log("**** offending usages are in: "); for (Block block : usedIn) { - TTY.print("B%d ", block.getId()); + indent2.log("B%d ", block.getId()); } - TTY.println(); + indent2.outdent(); } + indent.outdent(); } private void verifyLiveness() { @@ -977,21 +976,10 @@ } } - private void traceLiveness(boolean changeOccurredInBlock, int iterationCount, Block block) { - char c = iterationCount == 0 || changeOccurredInBlock ? '*' : ' '; - TTY.print("(%d) liveIn%c B%d ", iterationCount, c, block.getId()); - TTY.println(blockData.get(block).liveIn.toString()); - TTY.print("(%d) liveOut%c B%d ", iterationCount, c, block.getId()); - TTY.println(blockData.get(block).liveOut.toString()); - } - void addUse(AllocatableValue operand, int from, int to, RegisterPriority registerPriority, PlatformKind kind) { if (!isProcessed(operand)) { return; } - if (getTraceLevel() >= 2 && kind == null) { - TTY.println(" use %s from %d to %d (%s)", operand, from, to, registerPriority.name()); - } Interval interval = getOrCreateInterval(operand); if (kind != Kind.Illegal) { @@ -1002,15 +990,14 @@ // Register use position at even instruction id. interval.addUsePos(to & ~1, registerPriority); + + Debug.log("add use: %s, from %d to %d (%s)", interval, from, to, registerPriority.name()); } void addTemp(AllocatableValue operand, int tempPos, RegisterPriority registerPriority, PlatformKind kind) { if (!isProcessed(operand)) { return; } - if (getTraceLevel() >= 2) { - TTY.println(" temp %s tempPos %d (%s)", operand, tempPos, RegisterPriority.MustHaveRegister.name()); - } Interval interval = getOrCreateInterval(operand); if (kind != Kind.Illegal) { @@ -1019,19 +1006,20 @@ interval.addRange(tempPos, tempPos + 1); interval.addUsePos(tempPos, registerPriority); + interval.addMaterializationValue(null); + + Debug.log("add temp: %s tempPos %d (%s)", interval, tempPos, RegisterPriority.MustHaveRegister.name()); } boolean isProcessed(Value operand) { return !isRegister(operand) || attributes(asRegister(operand)).isAllocatable(); } - void addDef(AllocatableValue operand, int defPos, RegisterPriority registerPriority, PlatformKind kind) { + void addDef(AllocatableValue operand, LIRInstruction op, RegisterPriority registerPriority, PlatformKind kind) { if (!isProcessed(operand)) { return; } - if (getTraceLevel() >= 2) { - TTY.println(" def %s defPos %d (%s)", operand, defPos, registerPriority.name()); - } + int defPos = op.id(); Interval interval = getOrCreateInterval(operand); if (kind != Kind.Illegal) { @@ -1050,9 +1038,7 @@ // also add register priority for dead intervals interval.addRange(defPos, defPos + 1); interval.addUsePos(defPos, registerPriority); - if (getTraceLevel() >= 2) { - TTY.println("Warning: def of operand %s at %d occurs without use", operand, defPos); - } + Debug.log("Warning: def of operand %s at %d occurs without use", operand, defPos); } changeSpillDefinitionPos(interval, defPos); @@ -1060,6 +1046,9 @@ // detection of method-parameters and roundfp-results interval.setSpillState(SpillState.StartInMemory); } + interval.addMaterializationValue(gen.getMaterializedValue(op, operand)); + + Debug.log("add def: %s defPos %d (%s)", interval, defPos, registerPriority.name()); } /** @@ -1153,6 +1142,9 @@ } void buildIntervals() { + + Indent indent = Debug.logAndIndent("build intervals"); + intervalsSize = operandSize(); intervals = new Interval[intervalsSize + INITIAL_SPLIT_INTERVALS_CAPACITY]; @@ -1161,7 +1153,10 @@ // iterate all blocks in reverse order for (int i = blockCount() - 1; i >= 0; i--) { + Block block = blockAt(i); + Indent indent2 = indent.logAndIndent("handle block %d", block.getId()); + List instructions = ir.lir(block); final int blockFrom = getFirstLirInstructionId(block); int blockTo = getLastLirInstructionId(block); @@ -1174,9 +1169,7 @@ for (int operandNum = live.nextSetBit(0); operandNum >= 0; operandNum = live.nextSetBit(operandNum + 1)) { assert live.get(operandNum) : "should not stop here otherwise"; AllocatableValue operand = operandFor(operandNum); - if (getTraceLevel() >= 2) { - TTY.println("live in %s to %d", operand, blockTo + 2); - } + indent.log("live in %d: %s", operandNum, operand); addUse(operand, blockFrom, blockTo + 2, RegisterPriority.None, Kind.Illegal); @@ -1195,6 +1188,8 @@ final LIRInstruction op = instructions.get(j); final int opId = op.id(); + Indent indent3 = indent2.logAndIndent("handle inst %d: %s", opId, op); + // add a temp range for each register if operation destroys caller-save registers if (op.destroysCallerSavedRegisters()) { for (Register r : callerSaveRegs) { @@ -1202,9 +1197,7 @@ addTemp(r.asValue(), opId, RegisterPriority.None, Kind.Illegal); } } - if (getTraceLevel() >= 4) { - TTY.println("operation destroys all caller-save registers"); - } + indent.log("operation destroys all caller-save registers"); } op.forEachOutput(new ValueProcedure() { @@ -1212,7 +1205,7 @@ @Override public Value doValue(Value operand, OperandMode mode, EnumSet flags) { if (isVariableOrRegister(operand)) { - addDef((AllocatableValue) operand, opId, registerPriorityOfOutputOperand(op), operand.getPlatformKind()); + addDef((AllocatableValue) operand, op, registerPriorityOfOutputOperand(op), operand.getPlatformKind()); addRegisterHint(op, operand, mode, flags, true); } return operand; @@ -1270,7 +1263,10 @@ // special steps for some instructions (especially moves) handleMethodArguments(op); + indent3.outdent(); + } // end of instruction iteration + indent2.outdent(); } // end of block iteration // add the range [0, 1] to all fixed intervals. @@ -1280,6 +1276,7 @@ interval.addRange(0, 1); } } + indent.outdent(); } // * Phase 5: actual register allocation @@ -1431,6 +1428,7 @@ }; public void allocateRegisters() { + Indent indent = Debug.logAndIndent("allocate registers"); Interval precoloredIntervals; Interval notPrecoloredIntervals; @@ -1442,6 +1440,7 @@ LinearScanWalker lsw = new LinearScanWalker(this, precoloredIntervals, notPrecoloredIntervals); lsw.walk(); lsw.finishAllocation(); + indent.outdent(); } // * Phase 6: resolve data flow @@ -1546,6 +1545,8 @@ * have been split. */ void resolveDataFlow() { + Indent indent = Debug.logAndIndent("resolve data flow"); + int numBlocks = blockCount(); MoveResolver moveResolver = new MoveResolver(this); BitSet blockCompleted = new BitSet(numBlocks); @@ -1608,6 +1609,7 @@ } } } + indent.outdent(); } // * Phase 7: assign register numbers back to LIR @@ -1652,15 +1654,18 @@ interval = splitChildAtOpId(interval, opId, mode); } + if (isIllegal(interval.location()) && interval.canMaterialize()) { + return interval.getMaterializedValue(); + } return interval.location(); } - IntervalWalker initComputeOopMaps() { + protected IntervalWalker initIntervalWalker(IntervalPredicate predicate) { // setup lists of potential oops for walking Interval oopIntervals; Interval nonOopIntervals; - oopIntervals = createUnhandledLists(IS_OOP_INTERVAL, null).first; + oopIntervals = createUnhandledLists(predicate, null).first; // intervals that have no oops inside need not to be processed. // to ensure a walking until the last instruction id, add a dummy interval @@ -1671,7 +1676,11 @@ return new IntervalWalker(this, oopIntervals, nonOopIntervals); } - void computeOopMap(IntervalWalker iw, LIRInstruction op, BitSet registerRefMap, BitSet frameRefMap) { + /** + * Visits all intervals for a frame state. The frame state use this information to build the OOP + * maps. + */ + void markFrameLocations(IntervalWalker iw, LIRInstruction op, LIRFrameState info) { if (getTraceLevel() >= 3) { TTY.println("creating oop map at opId %d", op.id()); } @@ -1694,11 +1703,11 @@ // moves, any intervals which end at this instruction are included // in the oop map since we may safepoint while doing the patch // before we've consumed the inputs. - if (op.id() < interval.currentTo()) { + if (op.id() < interval.currentTo() && !isIllegal(interval.location())) { // caller-save registers must not be included into oop-maps at calls assert !op.destroysCallerSavedRegisters() || !isRegister(operand) || !isCallerSave(operand) : "interval is in a caller-save register at a call . register will be overwritten"; - frameMap.setReference(interval.location(), registerRefMap, frameRefMap); + info.markLocation(interval.location(), frameMap); // Spill optimization: when the stack value is guaranteed to be always correct, // then it must be added to the oop map even if the interval is currently in a @@ -1707,7 +1716,7 @@ assert interval.spillDefinitionPos() > 0 : "position not set correctly"; assert interval.spillSlot() != null : "no spill slot assigned"; assert !isRegister(interval.operand) : "interval is on stack : so stack slot is registered twice"; - frameMap.setReference(interval.spillSlot(), registerRefMap, frameRefMap); + info.markLocation(interval.spillSlot(), frameMap); } } } @@ -1718,9 +1727,8 @@ } private void computeDebugInfo(IntervalWalker iw, final LIRInstruction op, LIRFrameState info) { - BitSet registerRefMap = op.destroysCallerSavedRegisters() && callKillsRegisters ? null : frameMap.initRegisterRefMap(); - BitSet frameRefMap = frameMap.initFrameRefMap(); - computeOopMap(iw, op, registerRefMap, frameRefMap); + info.initDebugInfo(frameMap, !op.destroysCallerSavedRegisters() || !callKillsRegisters); + markFrameLocations(iw, op, info); info.forEachState(new ValueProcedure() { @@ -1750,12 +1758,11 @@ // the intervals // if the interval is not live, colorLirOperand will cause an assert on failure Value result = colorLirOperand((Variable) operand, tempOpId, mode); - assert !hasCall(tempOpId) || isStackSlot(result) || !isCallerSave(result) : "cannot have caller-save register operands at calls"; + assert !hasCall(tempOpId) || isStackSlot(result) || isConstant(result) || !isCallerSave(result) : "cannot have caller-save register operands at calls"; return result; } }); - - info.finish(registerRefMap, frameRefMap); + info.finish(op, frameMap); } private void assignLocations(List instructions, final IntervalWalker iw) { @@ -1811,7 +1818,7 @@ } private void assignLocations() { - IntervalWalker iw = initComputeOopMaps(); + IntervalWalker iw = initIntervalWalker(IS_STACK_INTERVAL); for (Block block : sortedBlocks) { assignLocations(ir.lir(block), iw); } @@ -1819,6 +1826,8 @@ public void allocate() { + Indent indent = Debug.logAndIndent(false, "allocate %s", gen.getGraph().method()); + try (Scope s = Debug.scope("LifetimeAnalysis")) { numberInstructions(); printLir("Before register allocation", true); @@ -1875,6 +1884,8 @@ } catch (Throwable e) { throw Debug.handle(e); } + + indent.outdent(); } void printIntervals(String label) { @@ -1999,7 +2010,7 @@ } Value l1 = i1.location(); Value l2 = i2.location(); - if (i1.intersects(i2) && (l1.equals(l2))) { + if (i1.intersects(i2) && !isIllegal(l1) && (l1.equals(l2))) { if (DetailedAsserts.getValue()) { TTY.println("Intervals %d and %d overlap and have the same register assigned", i1.operandNumber, i2.operandNumber); TTY.println(i1.logString(this)); diff -r 3603fab248a6 -r 9423a38d6437 graal/com.oracle.graal.compiler/src/com/oracle/graal/compiler/alloc/LinearScanWalker.java --- a/graal/com.oracle.graal.compiler/src/com/oracle/graal/compiler/alloc/LinearScanWalker.java Fri Dec 13 07:56:25 2013 +0100 +++ b/graal/com.oracle.graal.compiler/src/com/oracle/graal/compiler/alloc/LinearScanWalker.java Fri Dec 13 08:38:52 2013 +0100 @@ -399,66 +399,52 @@ // 1) the left part has already a location assigned // 2) the right part is sorted into to the unhandled-list void splitBeforeUsage(Interval interval, int minSplitPos, int maxSplitPos) { - if (getTraceLevel() >= 2) { - TTY.println("----- splitting interval: "); - } - if (getTraceLevel() >= 4) { - TTY.println(interval.logString(allocator)); - } - if (getTraceLevel() >= 2) { - TTY.println(" between %d and %d", minSplitPos, maxSplitPos); - } + + try (Indent indent = Debug.logAndIndent("splitting interval %s between %d and %d", interval, minSplitPos, maxSplitPos)) { - assert interval.from() < minSplitPos : "cannot split at start of interval"; - assert currentPosition < minSplitPos : "cannot split before current position"; - assert minSplitPos <= maxSplitPos : "invalid order"; - assert maxSplitPos <= interval.to() : "cannot split after end of interval"; + assert interval.from() < minSplitPos : "cannot split at start of interval"; + assert currentPosition < minSplitPos : "cannot split before current position"; + assert minSplitPos <= maxSplitPos : "invalid order"; + assert maxSplitPos <= interval.to() : "cannot split after end of interval"; + + int optimalSplitPos = findOptimalSplitPos(interval, minSplitPos, maxSplitPos, true); - int optimalSplitPos = findOptimalSplitPos(interval, minSplitPos, maxSplitPos, true); - - assert minSplitPos <= optimalSplitPos && optimalSplitPos <= maxSplitPos : "out of range"; - assert optimalSplitPos <= interval.to() : "cannot split after end of interval"; - assert optimalSplitPos > interval.from() : "cannot split at start of interval"; + assert minSplitPos <= optimalSplitPos && optimalSplitPos <= maxSplitPos : "out of range"; + assert optimalSplitPos <= interval.to() : "cannot split after end of interval"; + assert optimalSplitPos > interval.from() : "cannot split at start of interval"; - if (optimalSplitPos == interval.to() && interval.nextUsage(RegisterPriority.MustHaveRegister, minSplitPos) == Integer.MAX_VALUE) { - // the split position would be just before the end of the interval - // . no split at all necessary - if (getTraceLevel() >= 4) { - TTY.println(" no split necessary because optimal split position is at end of interval"); + if (optimalSplitPos == interval.to() && interval.nextUsage(RegisterPriority.MustHaveRegister, minSplitPos) == Integer.MAX_VALUE) { + // the split position would be just before the end of the interval + // . no split at all necessary + indent.log("no split necessary because optimal split position is at end of interval"); + return; } - return; - } - // must calculate this before the actual split is performed and before split position is - // moved to odd opId - boolean moveNecessary = !allocator.isBlockBegin(optimalSplitPos) && !interval.hasHoleBetween(optimalSplitPos - 1, optimalSplitPos); + // must calculate this before the actual split is performed and before split position is + // moved to odd opId + boolean moveNecessary = !allocator.isBlockBegin(optimalSplitPos) && !interval.hasHoleBetween(optimalSplitPos - 1, optimalSplitPos); - if (!allocator.isBlockBegin(optimalSplitPos)) { - // move position before actual instruction (odd opId) - optimalSplitPos = (optimalSplitPos - 1) | 1; - } + if (!allocator.isBlockBegin(optimalSplitPos)) { + // move position before actual instruction (odd opId) + optimalSplitPos = (optimalSplitPos - 1) | 1; + } - if (getTraceLevel() >= 4) { - TTY.println(" splitting at position %d", optimalSplitPos); - } - assert allocator.isBlockBegin(optimalSplitPos) || (optimalSplitPos % 2 == 1) : "split pos must be odd when not on block boundary"; - assert !allocator.isBlockBegin(optimalSplitPos) || (optimalSplitPos % 2 == 0) : "split pos must be even on block boundary"; + indent.log("splitting at position %d", optimalSplitPos); - Interval splitPart = interval.split(optimalSplitPos, allocator); + assert allocator.isBlockBegin(optimalSplitPos) || (optimalSplitPos % 2 == 1) : "split pos must be odd when not on block boundary"; + assert !allocator.isBlockBegin(optimalSplitPos) || (optimalSplitPos % 2 == 0) : "split pos must be even on block boundary"; - splitPart.setInsertMoveWhenActivated(moveNecessary); + Interval splitPart = interval.split(optimalSplitPos, allocator); - assert splitPart.from() >= currentInterval.currentFrom() : "cannot append new interval before current walk position"; - unhandledLists.addToListSortedByStartAndUsePositions(RegisterBinding.Any, splitPart); + splitPart.setInsertMoveWhenActivated(moveNecessary); - if (getTraceLevel() >= 2) { - TTY.println(" split interval in two parts (insertMoveWhenActivated: %b)", moveNecessary); - } - if (getTraceLevel() >= 2) { - TTY.print(" "); - TTY.println(interval.logString(allocator)); - TTY.print(" "); - TTY.println(splitPart.logString(allocator)); + assert splitPart.from() >= currentInterval.currentFrom() : "cannot append new interval before current walk position"; + unhandledLists.addToListSortedByStartAndUsePositions(RegisterBinding.Any, splitPart); + + if (Debug.isLogEnabled()) { + indent.log("left interval %s: %s", moveNecessary ? " " : "", interval.logString(allocator)); + indent.log("right interval %s: %s", moveNecessary ? "(move)" : "", splitPart.logString(allocator)); + } } } @@ -472,11 +458,7 @@ int maxSplitPos = currentPosition; int minSplitPos = Math.max(interval.previousUsage(RegisterPriority.ShouldHaveRegister, maxSplitPos) + 1, interval.from()); - if (getTraceLevel() >= 2) { - TTY.print("----- splitting and spilling interval: "); - TTY.println(interval.logString(allocator)); - TTY.println(" between %d and %d", minSplitPos, maxSplitPos); - } + Indent indent = Debug.logAndIndent("splitting and spilling interval %s between %d and %d", interval, minSplitPos, maxSplitPos); assert interval.state == State.Active : "why spill interval that is not active?"; assert interval.from() <= minSplitPos : "cannot split before start of interval"; @@ -486,33 +468,32 @@ if (minSplitPos == interval.from()) { // the whole interval is never used, so spill it entirely to memory - if (getTraceLevel() >= 2) { - TTY.println(" spilling entire interval because split pos is at beginning of interval"); - TTY.println(" use positions: " + interval.usePosList().size()); - } - assert interval.firstUsage(RegisterPriority.ShouldHaveRegister) > currentPosition : "interval must not have use position before currentPosition"; + + try (Indent indent2 = indent.logAndIndent("spilling entire interval because split pos is at beginning of interval (use positions: %d)", interval.usePosList().size())) { - allocator.assignSpillSlot(interval); - allocator.changeSpillState(interval, minSplitPos); + assert interval.firstUsage(RegisterPriority.ShouldHaveRegister) > currentPosition : "interval must not have use position before currentPosition"; + + allocator.assignSpillSlot(interval); + allocator.changeSpillState(interval, minSplitPos); - // Also kick parent intervals out of register to memory when they have no use - // position. This avoids short interval in register surrounded by intervals in - // memory . avoid useless moves from memory to register and back - Interval parent = interval; - while (parent != null && parent.isSplitChild()) { - parent = parent.getSplitChildBeforeOpId(parent.from()); + // Also kick parent intervals out of register to memory when they have no use + // position. This avoids short interval in register surrounded by intervals in + // memory . avoid useless moves from memory to register and back + Interval parent = interval; + while (parent != null && parent.isSplitChild()) { + parent = parent.getSplitChildBeforeOpId(parent.from()); - if (isRegister(parent.location())) { - if (parent.firstUsage(RegisterPriority.ShouldHaveRegister) == Integer.MAX_VALUE) { - // parent is never used, so kick it out of its assigned register - if (getTraceLevel() >= 4) { - TTY.println(" kicking out interval %d out of its register because it is never used", parent.operandNumber); + if (isRegister(parent.location())) { + if (parent.firstUsage(RegisterPriority.ShouldHaveRegister) == Integer.MAX_VALUE) { + // parent is never used, so kick it out of its assigned register + indent2.log("kicking out interval %d out of its register because it is never used", parent.operandNumber); + allocator.assignSpillSlot(parent); + } else { + // do not go further back because the register is actually used by +// the + // interval + parent = null; } - allocator.assignSpillSlot(parent); - } else { - // do not go further back because the register is actually used by the - // interval - parent = null; } } } @@ -530,35 +511,30 @@ optimalSplitPos = (optimalSplitPos - 1) | 1; } - if (getTraceLevel() >= 4) { - TTY.println(" splitting at position %d", optimalSplitPos); - } - assert allocator.isBlockBegin(optimalSplitPos) || (optimalSplitPos % 2 == 1) : "split pos must be odd when not on block boundary"; - assert !allocator.isBlockBegin(optimalSplitPos) || (optimalSplitPos % 2 == 0) : "split pos must be even on block boundary"; + try (Indent indent2 = indent.logAndIndent("splitting at position %d", optimalSplitPos)) { + assert allocator.isBlockBegin(optimalSplitPos) || (optimalSplitPos % 2 == 1) : "split pos must be odd when not on block boundary"; + assert !allocator.isBlockBegin(optimalSplitPos) || (optimalSplitPos % 2 == 0) : "split pos must be even on block boundary"; - Interval spilledPart = interval.split(optimalSplitPos, allocator); - allocator.assignSpillSlot(spilledPart); - allocator.changeSpillState(spilledPart, optimalSplitPos); + Interval spilledPart = interval.split(optimalSplitPos, allocator); + allocator.assignSpillSlot(spilledPart); + allocator.changeSpillState(spilledPart, optimalSplitPos); - if (!allocator.isBlockBegin(optimalSplitPos)) { - if (getTraceLevel() >= 4) { - TTY.println(" inserting move from interval %d to %d", interval.operandNumber, spilledPart.operandNumber); + if (!allocator.isBlockBegin(optimalSplitPos)) { + indent2.log("inserting move from interval %d to %d", interval.operandNumber, spilledPart.operandNumber); + insertMove(optimalSplitPos, interval, spilledPart); } - insertMove(optimalSplitPos, interval, spilledPart); - } - // the currentSplitChild is needed later when moves are inserted for reloading - assert spilledPart.currentSplitChild() == interval : "overwriting wrong currentSplitChild"; - spilledPart.makeCurrentSplitChild(); + // the currentSplitChild is needed later when moves are inserted for reloading + assert spilledPart.currentSplitChild() == interval : "overwriting wrong currentSplitChild"; + spilledPart.makeCurrentSplitChild(); - if (getTraceLevel() >= 2) { - TTY.println(" split interval in two parts"); - TTY.print(" "); - TTY.println(interval.logString(allocator)); - TTY.print(" "); - TTY.println(spilledPart.logString(allocator)); + if (Debug.isLogEnabled()) { + indent2.log("left interval: %s", interval.logString(allocator)); + indent2.log("spilled interval : %s", spilledPart.logString(allocator)); + } } } + indent.outdent(); } void splitStackInterval(Interval interval) { @@ -883,13 +859,8 @@ Interval interval = currentInterval; boolean result = true; - if (getTraceLevel() >= 2) { - TTY.println("+++++ activating interval " + interval.logString(allocator)); - } - - if (getTraceLevel() >= 4) { - TTY.println(" splitParent: %s, insertMoveWhenActivated: %b", interval.splitParent().operandNumber, interval.insertMoveWhenActivated()); - } + Indent indent = Debug.logAndIndent("activating interval %s, splitParent: %s, insertMoveWhenActivated: %b", interval.logString(allocator), interval.splitParent().operandNumber, + interval.insertMoveWhenActivated()); final Value operand = interval.operand; if (interval.location() != null && isStackSlot(interval.location())) { @@ -940,6 +911,8 @@ } interval.makeCurrentSplitChild(); + indent.outdent(); + return result; // true = interval is moved to active list } diff -r 3603fab248a6 -r 9423a38d6437 graal/com.oracle.graal.compiler/src/com/oracle/graal/compiler/alloc/MoveResolver.java --- a/graal/com.oracle.graal.compiler/src/com/oracle/graal/compiler/alloc/MoveResolver.java Fri Dec 13 07:56:25 2013 +0100 +++ b/graal/com.oracle.graal.compiler/src/com/oracle/graal/compiler/alloc/MoveResolver.java Fri Dec 13 08:38:52 2013 +0100 @@ -199,9 +199,7 @@ insertionBuffer.append(insertIdx, allocator.ir.spillMoveFactory.createMove(toOpr, fromOpr)); - if (allocator.getTraceLevel() >= 4) { - TTY.println("MoveResolver: inserted move from %d (%s) to %d (%s)", fromInterval.operandNumber, fromInterval.location(), toInterval.operandNumber, toInterval.location()); - } + Debug.log("insert move from %s to %s at %d", fromInterval, toInterval, insertIdx); } private void insertMove(Value fromOpr, Interval toInterval) { @@ -211,9 +209,7 @@ AllocatableValue toOpr = toInterval.operand; insertionBuffer.append(insertIdx, allocator.ir.spillMoveFactory.createMove(toOpr, fromOpr)); - if (allocator.getTraceLevel() >= 4) { - TTY.print("MoveResolver: inserted move from constant %s to %d (%s)", fromOpr, toInterval.operandNumber, toInterval.location()); - } + Debug.log("insert move from value %s to %s at %d", fromOpr, toInterval, insertIdx); } private void resolveMappings() { @@ -283,9 +279,7 @@ } spillInterval.assignLocation(spillSlot); - if (allocator.getTraceLevel() >= 4) { - TTY.println("created new Interval %s for spilling", spillInterval.operand); - } + Debug.log("created new Interval for spilling: %s", spillInterval); // insert a move from register to stack and update the mapping insertMove(fromInterval, spillInterval); @@ -325,9 +319,18 @@ } void addMapping(Interval fromInterval, Interval toInterval) { - if (allocator.getTraceLevel() >= 4) { - TTY.println("MoveResolver: adding mapping from interval %d (%s) to interval %d (%s)", fromInterval.operandNumber, fromInterval.location(), toInterval.operandNumber, toInterval.location()); + + if (isIllegal(toInterval.location()) && toInterval.canMaterialize()) { + Debug.log("no store to rematerializable interval %s needed", toInterval); + return; } + if (isIllegal(fromInterval.location()) && fromInterval.canMaterialize()) { + // Instead of a reload, re-materialize the value + Value rematValue = fromInterval.getMaterializedValue(); + addMapping(rematValue, toInterval); + return; + } + Debug.log("add move mapping from %s to %s", fromInterval, toInterval); assert !fromInterval.operand.equals(toInterval.operand) : "from and to interval equal: " + fromInterval; assert fromInterval.kind() == toInterval.kind(); @@ -337,9 +340,8 @@ } void addMapping(Value fromOpr, Interval toInterval) { - if (allocator.getTraceLevel() >= 4) { - TTY.println("MoveResolver: adding mapping from %s to %d (%s)", fromOpr, toInterval.operandNumber, toInterval.location()); - } + Debug.log("add move mapping from %s to %s", fromOpr, toInterval); + assert isConstant(fromOpr) : "only for constants"; mappingFrom.add(null); diff -r 3603fab248a6 -r 9423a38d6437 graal/com.oracle.graal.compiler/src/com/oracle/graal/compiler/gen/LIRGenerator.java --- a/graal/com.oracle.graal.compiler/src/com/oracle/graal/compiler/gen/LIRGenerator.java Fri Dec 13 07:56:25 2013 +0100 +++ b/graal/com.oracle.graal.compiler/src/com/oracle/graal/compiler/gen/LIRGenerator.java Fri Dec 13 08:38:52 2013 +0100 @@ -123,6 +123,19 @@ this.printIRWithLIR = Options.PrintIRWithLIR.getValue(); } + /** + * Returns a value for a interval definition, which can be used for re-materialization. + * + * @param op An instruction which defines a value + * @param operand The destination operand of the instruction + * @return Returns the value which is moved to the instruction and which can be reused at all + * reload-locations in case the interval of this instruction is spilled. Currently this + * can only be a {@link Constant}. + */ + public Constant getMaterializedValue(LIRInstruction op, Value operand) { + return null; + } + @SuppressWarnings("hiding") protected DebugInfoBuilder createDebugInfoBuilder(NodeMap nodeOperands) { return new DebugInfoBuilder(nodeOperands); diff -r 3603fab248a6 -r 9423a38d6437 graal/com.oracle.graal.lir/src/com/oracle/graal/lir/LIRFrameState.java --- a/graal/com.oracle.graal.lir/src/com/oracle/graal/lir/LIRFrameState.java Fri Dec 13 07:56:25 2013 +0100 +++ b/graal/com.oracle.graal.lir/src/com/oracle/graal/lir/LIRFrameState.java Fri Dec 13 08:38:52 2013 +0100 @@ -41,7 +41,7 @@ public final BytecodeFrame topFrame; private final VirtualObject[] virtualObjects; public final LabelRef exceptionEdge; - private DebugInfo debugInfo; + protected DebugInfo debugInfo; public LIRFrameState(BytecodeFrame topFrame, VirtualObject[] virtualObjects, LabelRef exceptionEdge) { this.topFrame = topFrame; @@ -109,8 +109,45 @@ } } - public void finish(BitSet registerRefMap, BitSet frameRefMap) { - debugInfo = new DebugInfo(topFrame, registerRefMap, frameRefMap); + /** + * Called by the register allocator before {@link #markLocation} to initialize the frame state. + * + * @param frameMap The frame map. + * @param canHaveRegisters True if there can be any register map entries. + */ + public void initDebugInfo(FrameMap frameMap, boolean canHaveRegisters) { + BitSet registerRefMap = (canHaveRegisters ? frameMap.initRegisterRefMap() : null); + debugInfo = new DebugInfo(topFrame, registerRefMap, frameMap.initFrameRefMap()); + } + + /** + * Called by the register allocator to mark the specified location as a reference in the + * reference map of the debug information. The tracked location can be a {@link RegisterValue} + * or a {@link StackSlot}. Note that a {@link Constant} is automatically tracked. + * + * @param location The location to be added to the reference map. + * @param frameMap The frame map. + */ + public void markLocation(Value location, FrameMap frameMap) { + if (location.getKind() == Kind.Object) { + if (isRegister(location)) { + debugInfo.getRegisterRefMap().set(asRegister(location).number); + } else if (isStackSlot(location)) { + int index = frameMap.indexForStackSlot(asStackSlot(location)); + debugInfo.getFrameRefMap().set(index); + } else { + assert isConstant(location); + } + } + } + + /** + * Called by the register allocator after all locations are marked. + * + * @param op The instruction to which this frame state belongs. + * @param frameMap The frame map. + */ + public void finish(LIRInstruction op, FrameMap frameMap) { } @Override