# HG changeset patch # User Christos Kotselidis # Date 1375692286 -7200 # Node ID 19648527ec725576a78244894073b0f017f075a1 # Parent 050eba23554e730749b27c4e73f2465c4508d51f# Parent 9024b2eb84345515b2f3413614c393e8f6d17d81 Merge diff -r 050eba23554e -r 19648527ec72 graal/com.oracle.graal.api.code/src/com/oracle/graal/api/code/CodeUtil.java --- a/graal/com.oracle.graal.api.code/src/com/oracle/graal/api/code/CodeUtil.java Sun Aug 04 02:36:40 2013 +0200 +++ b/graal/com.oracle.graal.api.code/src/com/oracle/graal/api/code/CodeUtil.java Mon Aug 05 10:44:46 2013 +0200 @@ -75,7 +75,7 @@ */ public static int log2(int val) { assert val > 0; - return 31 - Integer.numberOfLeadingZeros(val); + return (Integer.SIZE - 1) - Integer.numberOfLeadingZeros(val); } /** @@ -87,7 +87,7 @@ */ public static int log2(long val) { assert val > 0; - return 63 - Long.numberOfLeadingZeros(val); + return (Long.SIZE - 1) - Long.numberOfLeadingZeros(val); } /** diff -r 050eba23554e -r 19648527ec72 graal/com.oracle.graal.api.code/src/com/oracle/graal/api/code/TargetDescription.java --- a/graal/com.oracle.graal.api.code/src/com/oracle/graal/api/code/TargetDescription.java Sun Aug 04 02:36:40 2013 +0200 +++ b/graal/com.oracle.graal.api.code/src/com/oracle/graal/api/code/TargetDescription.java Mon Aug 05 10:44:46 2013 +0200 @@ -81,16 +81,4 @@ this.implicitNullCheckLimit = implicitNullCheckLimit; this.inlineObjects = inlineObjects; } - - /** - * Aligns the given frame size (without return instruction pointer) to the stack alignment size - * and return the aligned size (without return instruction pointer). - * - * @param frameSize the initial frame size to be aligned - * @return the aligned frame size - */ - public int alignFrameSize(int frameSize) { - int x = frameSize + arch.getReturnAddressSize() + (stackAlignment - 1); - return (x / stackAlignment) * stackAlignment - arch.getReturnAddressSize(); - } } diff -r 050eba23554e -r 19648527ec72 graal/com.oracle.graal.compiler.hsail/src/com/oracle/graal/compiler/hsail/HSAILBackend.java --- a/graal/com.oracle.graal.compiler.hsail/src/com/oracle/graal/compiler/hsail/HSAILBackend.java Sun Aug 04 02:36:40 2013 +0200 +++ b/graal/com.oracle.graal.compiler.hsail/src/com/oracle/graal/compiler/hsail/HSAILBackend.java Mon Aug 05 10:44:46 2013 +0200 @@ -23,7 +23,6 @@ package com.oracle.graal.compiler.hsail; import static com.oracle.graal.api.code.CallingConvention.Type.*; - import static com.oracle.graal.api.code.ValueUtil.*; import com.oracle.graal.api.code.*; @@ -35,6 +34,7 @@ import com.oracle.graal.debug.*; import com.oracle.graal.lir.*; import com.oracle.graal.lir.asm.*; +import com.oracle.graal.lir.hsail.*; import com.oracle.graal.nodes.*; import com.oracle.graal.hsail.*; @@ -60,6 +60,11 @@ } @Override + public FrameMap newFrameMap() { + return new HSAILFrameMap(runtime(), target, runtime().lookupRegisterConfig()); + } + + @Override public LIRGenerator newLIRGenerator(StructuredGraph graph, FrameMap frameMap, CallingConvention cc, LIR lir) { return new HSAILLIRGenerator(graph, runtime(), target, frameMap, cc, lir); } diff -r 050eba23554e -r 19648527ec72 graal/com.oracle.graal.compiler.ptx/src/com/oracle/graal/compiler/ptx/PTXBackend.java --- a/graal/com.oracle.graal.compiler.ptx/src/com/oracle/graal/compiler/ptx/PTXBackend.java Sun Aug 04 02:36:40 2013 +0200 +++ b/graal/com.oracle.graal.compiler.ptx/src/com/oracle/graal/compiler/ptx/PTXBackend.java Mon Aug 05 10:44:46 2013 +0200 @@ -30,6 +30,7 @@ import com.oracle.graal.compiler.target.*; import com.oracle.graal.lir.*; import com.oracle.graal.lir.asm.*; +import com.oracle.graal.lir.ptx.*; import com.oracle.graal.nodes.*; /** @@ -42,6 +43,11 @@ } @Override + public FrameMap newFrameMap() { + return new PTXFrameMap(runtime(), target, runtime().lookupRegisterConfig()); + } + + @Override public LIRGenerator newLIRGenerator(StructuredGraph graph, FrameMap frameMap, CallingConvention cc, LIR lir) { return new PTXLIRGenerator(graph, runtime(), target, frameMap, cc, lir); } diff -r 050eba23554e -r 19648527ec72 graal/com.oracle.graal.compiler/src/com/oracle/graal/compiler/alloc/LinearScan.java --- a/graal/com.oracle.graal.compiler/src/com/oracle/graal/compiler/alloc/LinearScan.java Sun Aug 04 02:36:40 2013 +0200 +++ b/graal/com.oracle.graal.compiler/src/com/oracle/graal/compiler/alloc/LinearScan.java Mon Aug 05 10:44:46 2013 +0200 @@ -63,6 +63,8 @@ final RegisterAttributes[] registerAttributes; final Register[] registers; + boolean callKillsRegisters; + private static final int INITIAL_SPLIT_INTERVALS_CAPACITY = 32; public static class BlockData { @@ -1702,7 +1704,7 @@ } private void computeDebugInfo(IntervalWalker iw, final LIRInstruction op, LIRFrameState info) { - BitSet registerRefMap = op.destroysCallerSavedRegisters() ? null : frameMap.initRegisterRefMap(); + BitSet registerRefMap = op.destroysCallerSavedRegisters() && callKillsRegisters ? null : frameMap.initRegisterRefMap(); BitSet frameRefMap = frameMap.initFrameRefMap(); computeOopMap(iw, op, registerRefMap, frameRefMap); diff -r 050eba23554e -r 19648527ec72 graal/com.oracle.graal.compiler/src/com/oracle/graal/compiler/alloc/LinearScanWalker.java --- a/graal/com.oracle.graal.compiler/src/com/oracle/graal/compiler/alloc/LinearScanWalker.java Sun Aug 04 02:36:40 2013 +0200 +++ b/graal/com.oracle.graal.compiler/src/com/oracle/graal/compiler/alloc/LinearScanWalker.java Mon Aug 05 10:44:46 2013 +0200 @@ -45,8 +45,6 @@ */ final class LinearScanWalker extends IntervalWalker { - private final boolean callKillsRegisters; - private Register[] availableRegs; private final int[] usePos; @@ -77,7 +75,7 @@ // The register allocator can save time not trying to find a register at a call site. HashSet registers = new HashSet<>(Arrays.asList(allocator.frameMap.registerConfig.getAllocatableRegisters())); registers.removeAll(Arrays.asList(allocator.frameMap.registerConfig.getCallerSaveRegisters())); - callKillsRegisters = registers.size() == 0; + allocator.callKillsRegisters = registers.size() == 0; moveResolver = new MoveResolver(allocator); spillIntervals = Util.uncheckedCast(new List[allocator.registers.length]); @@ -784,8 +782,7 @@ } boolean noAllocationPossible(Interval interval) { - - if (callKillsRegisters) { + if (allocator.callKillsRegisters) { // fast calculation of intervals that can never get a register because the // the next instruction is a call that blocks all registers // Note: this only works if a call kills all registers diff -r 050eba23554e -r 19648527ec72 graal/com.oracle.graal.compiler/src/com/oracle/graal/compiler/target/Backend.java --- a/graal/com.oracle.graal.compiler/src/com/oracle/graal/compiler/target/Backend.java Sun Aug 04 02:36:40 2013 +0200 +++ b/graal/com.oracle.graal.compiler/src/com/oracle/graal/compiler/target/Backend.java Mon Aug 05 10:44:46 2013 +0200 @@ -47,9 +47,7 @@ return runtime; } - public FrameMap newFrameMap() { - return new FrameMap(runtime, target, runtime.lookupRegisterConfig()); - } + public abstract FrameMap newFrameMap(); public abstract LIRGenerator newLIRGenerator(StructuredGraph graph, FrameMap frameMap, CallingConvention cc, LIR lir); diff -r 050eba23554e -r 19648527ec72 graal/com.oracle.graal.hotspot.amd64/src/com/oracle/graal/hotspot/amd64/AMD64HotSpotBackend.java --- a/graal/com.oracle.graal.hotspot.amd64/src/com/oracle/graal/hotspot/amd64/AMD64HotSpotBackend.java Sun Aug 04 02:36:40 2013 +0200 +++ b/graal/com.oracle.graal.hotspot.amd64/src/com/oracle/graal/hotspot/amd64/AMD64HotSpotBackend.java Mon Aug 05 10:44:46 2013 +0200 @@ -63,6 +63,11 @@ } @Override + public FrameMap newFrameMap() { + return new AMD64FrameMap(runtime(), target, runtime().lookupRegisterConfig()); + } + + @Override public LIRGenerator newLIRGenerator(StructuredGraph graph, FrameMap frameMap, CallingConvention cc, LIR lir) { return new AMD64HotSpotLIRGenerator(graph, runtime(), target, frameMap, cc, lir); } diff -r 050eba23554e -r 19648527ec72 graal/com.oracle.graal.hotspot.amd64/src/com/oracle/graal/hotspot/amd64/AMD64HotSpotGraalRuntime.java --- a/graal/com.oracle.graal.hotspot.amd64/src/com/oracle/graal/hotspot/amd64/AMD64HotSpotGraalRuntime.java Sun Aug 04 02:36:40 2013 +0200 +++ b/graal/com.oracle.graal.hotspot.amd64/src/com/oracle/graal/hotspot/amd64/AMD64HotSpotGraalRuntime.java Mon Aug 05 10:44:46 2013 +0200 @@ -58,7 +58,8 @@ protected TargetDescription createTarget() { final int stackFrameAlignment = 16; final int implicitNullCheckLimit = 4096; - return new TargetDescription(createArchitecture(), true, stackFrameAlignment, implicitNullCheckLimit, true); + final boolean inlineObjects = true; + return new TargetDescription(createArchitecture(), true, stackFrameAlignment, implicitNullCheckLimit, inlineObjects); } @Override diff -r 050eba23554e -r 19648527ec72 graal/com.oracle.graal.hotspot.sparc/src/com/oracle/graal/hotspot/sparc/SPARCHotSpotBackend.java --- a/graal/com.oracle.graal.hotspot.sparc/src/com/oracle/graal/hotspot/sparc/SPARCHotSpotBackend.java Sun Aug 04 02:36:40 2013 +0200 +++ b/graal/com.oracle.graal.hotspot.sparc/src/com/oracle/graal/hotspot/sparc/SPARCHotSpotBackend.java Mon Aug 05 10:44:46 2013 +0200 @@ -60,6 +60,11 @@ } @Override + public FrameMap newFrameMap() { + return new SPARCFrameMap(runtime(), target, runtime().lookupRegisterConfig()); + } + + @Override public LIRGenerator newLIRGenerator(StructuredGraph graph, FrameMap frameMap, CallingConvention cc, LIR lir) { return new SPARCHotSpotLIRGenerator(graph, runtime(), target, frameMap, cc, lir); } @@ -73,7 +78,7 @@ protected static void emitStackOverflowCheck(TargetMethodAssembler tasm, boolean afterFrameInit) { if (StackShadowPages.getValue() > 0) { SPARCMacroAssembler masm = (SPARCMacroAssembler) tasm.asm; - final int frameSize = tasm.frameMap.frameSize(); + final int frameSize = tasm.frameMap.totalFrameSize(); if (frameSize > 0) { int lastFramePage = frameSize / unsafe.pageSize(); // emit multiple stack bangs for methods with frames larger than a page @@ -106,9 +111,7 @@ @Override public void enter(TargetMethodAssembler tasm) { - final int alignment = target.wordSize * 2; - final int frameSize = (tasm.frameMap.frameSize() + (alignment - 1)) & ~(alignment - 1); - assert frameSize % alignment == 0 : "must preserve 2*wordSize alignment"; + final int frameSize = tasm.frameMap.totalFrameSize(); SPARCMacroAssembler masm = (SPARCMacroAssembler) tasm.asm; if (!isStub) { diff -r 050eba23554e -r 19648527ec72 graal/com.oracle.graal.lir.amd64/src/com/oracle/graal/lir/amd64/AMD64Arithmetic.java --- a/graal/com.oracle.graal.lir.amd64/src/com/oracle/graal/lir/amd64/AMD64Arithmetic.java Sun Aug 04 02:36:40 2013 +0200 +++ b/graal/com.oracle.graal.lir.amd64/src/com/oracle/graal/lir/amd64/AMD64Arithmetic.java Mon Aug 05 10:44:46 2013 +0200 @@ -34,8 +34,10 @@ import com.oracle.graal.lir.*; import com.oracle.graal.lir.asm.*; -// @formatter:off public enum AMD64Arithmetic { + + // @formatter:off + IADD, ISUB, IMUL, IDIV, IDIVREM, IREM, IUDIV, IUREM, IAND, IOR, IXOR, ISHL, ISHR, IUSHR, LADD, LSUB, LMUL, LDIV, LDIVREM, LREM, LUDIV, LUREM, LAND, LOR, LXOR, LSHL, LSHR, LUSHR, FADD, FSUB, FMUL, FDIV, FREM, FAND, FOR, FXOR, @@ -53,10 +55,13 @@ */ F2I, D2I, F2L, D2L; + // @formatter:on + /** - * Unary operation with separate source and destination operand. + * Unary operation with separate source and destination operand. */ public static class Unary2Op extends AMD64LIRInstruction { + @Opcode private final AMD64Arithmetic opcode; @Def({REG}) protected AllocatableValue result; @Use({REG, STACK}) protected AllocatableValue x; @@ -74,9 +79,10 @@ } /** - * Unary operation with single operand for source and destination. + * Unary operation with single operand for source and destination. */ public static class Unary1Op extends AMD64LIRInstruction { + @Opcode private final AMD64Arithmetic opcode; @Def({REG, HINT}) protected AllocatableValue result; @Use({REG, STACK}) protected AllocatableValue x; @@ -95,10 +101,11 @@ } /** - * Binary operation with two operands. The first source operand is combined with the destination. - * The second source operand may be a stack slot. + * Binary operation with two operands. The first source operand is combined with the + * destination. The second source operand may be a stack slot. */ public static class BinaryRegStack extends AMD64LIRInstruction { + @Opcode private final AMD64Arithmetic opcode; @Def({REG, HINT}) protected AllocatableValue result; @Use({REG, STACK}) protected AllocatableValue x; @@ -126,10 +133,11 @@ } /** - * Binary operation with two operands. The first source operand is combined with the destination. - * The second source operand must be a register. + * Binary operation with two operands. The first source operand is combined with the + * destination. The second source operand must be a register. */ public static class BinaryRegReg extends AMD64LIRInstruction { + @Opcode private final AMD64Arithmetic opcode; @Def({REG, HINT}) protected AllocatableValue result; @Use({REG, STACK}) protected AllocatableValue x; @@ -160,6 +168,7 @@ * Binary operation with single source/destination operand and one constant. */ public static class BinaryRegConst extends AMD64LIRInstruction { + @Opcode private final AMD64Arithmetic opcode; @Def({REG, HINT}) protected AllocatableValue result; @Use({REG, STACK}) protected AllocatableValue x; @@ -186,9 +195,11 @@ } /** - * Commutative binary operation with two operands. One of the operands is combined with the result. + * Commutative binary operation with two operands. One of the operands is combined with the + * result. */ public static class BinaryCommutative extends AMD64LIRInstruction { + @Opcode private final AMD64Arithmetic opcode; @Def({REG, HINT}) protected AllocatableValue result; @Use({REG, STACK}) protected AllocatableValue x; @@ -222,6 +233,7 @@ * Binary operation with separate source and destination and one constant operand. */ public static class BinaryRegStackConst extends AMD64LIRInstruction { + @Opcode private final AMD64Arithmetic opcode; @Def({REG}) protected AllocatableValue result; @Use({REG, STACK}) protected AllocatableValue x; @@ -248,6 +260,7 @@ } public static class DivRemOp extends AMD64LIRInstruction { + @Opcode private final AMD64Arithmetic opcode; @Def protected AllocatableValue divResult; @Def protected AllocatableValue remResult; @@ -272,7 +285,8 @@ @Override protected void verify() { super.verify(); - // left input in rax, right input in any register but rax and rdx, result quotient in rax, result remainder in rdx + // left input in rax, right input in any register but rax and rdx, result quotient in + // rax, result remainder in rdx assert asRegister(x).equals(AMD64.rax); assert differentRegisters(y, AMD64.rax.asValue(), AMD64.rdx.asValue()); verifyKind(opcode, divResult, x, y); @@ -281,6 +295,7 @@ } public static class FPDivRemOp extends AMD64LIRInstruction { + @Opcode private final AMD64Arithmetic opcode; @Def protected AllocatableValue result; @Use protected AllocatableValue x; @@ -342,11 +357,20 @@ @SuppressWarnings("unused") protected static void emit(TargetMethodAssembler tasm, AMD64MacroAssembler masm, AMD64Arithmetic opcode, AllocatableValue result) { switch (opcode) { - case INEG: masm.negl(asIntReg(result)); break; - case LNEG: masm.negq(asLongReg(result)); break; - case L2I: masm.andl(asIntReg(result), 0xFFFFFFFF); break; - case I2C: masm.andl(asIntReg(result), 0xFFFF); break; - default: throw GraalInternalError.shouldNotReachHere(); + case INEG: + masm.negl(asIntReg(result)); + break; + case LNEG: + masm.negq(asLongReg(result)); + break; + case L2I: + masm.andl(asIntReg(result), 0xFFFFFFFF); + break; + case I2C: + masm.andl(asIntReg(result), 0xFFFF); + break; + default: + throw GraalInternalError.shouldNotReachHere(); } } @@ -354,51 +378,139 @@ int exceptionOffset = -1; if (isRegister(src)) { switch (opcode) { - case IADD: masm.addl(asIntReg(dst), asIntReg(src)); break; - case ISUB: masm.subl(asIntReg(dst), asIntReg(src)); break; - case IAND: masm.andl(asIntReg(dst), asIntReg(src)); break; - case IMUL: masm.imull(asIntReg(dst), asIntReg(src)); break; - case IOR: masm.orl(asIntReg(dst), asIntReg(src)); break; - case IXOR: masm.xorl(asIntReg(dst), asIntReg(src)); break; - case ISHL: assert asIntReg(src).equals(AMD64.rcx); masm.shll(asIntReg(dst)); break; - case ISHR: assert asIntReg(src).equals(AMD64.rcx); masm.sarl(asIntReg(dst)); break; - case IUSHR: assert asIntReg(src).equals(AMD64.rcx); masm.shrl(asIntReg(dst)); break; + case IADD: + masm.addl(asIntReg(dst), asIntReg(src)); + break; + case ISUB: + masm.subl(asIntReg(dst), asIntReg(src)); + break; + case IAND: + masm.andl(asIntReg(dst), asIntReg(src)); + break; + case IMUL: + masm.imull(asIntReg(dst), asIntReg(src)); + break; + case IOR: + masm.orl(asIntReg(dst), asIntReg(src)); + break; + case IXOR: + masm.xorl(asIntReg(dst), asIntReg(src)); + break; + case ISHL: + assert asIntReg(src).equals(AMD64.rcx); + masm.shll(asIntReg(dst)); + break; + case ISHR: + assert asIntReg(src).equals(AMD64.rcx); + masm.sarl(asIntReg(dst)); + break; + case IUSHR: + assert asIntReg(src).equals(AMD64.rcx); + masm.shrl(asIntReg(dst)); + break; - case LADD: masm.addq(asLongReg(dst), asLongReg(src)); break; - case LSUB: masm.subq(asLongReg(dst), asLongReg(src)); break; - case LMUL: masm.imulq(asLongReg(dst), asLongReg(src)); break; - case LAND: masm.andq(asLongReg(dst), asLongReg(src)); break; - case LOR: masm.orq(asLongReg(dst), asLongReg(src)); break; - case LXOR: masm.xorq(asLongReg(dst), asLongReg(src)); break; - case LSHL: assert asIntReg(src).equals(AMD64.rcx); masm.shlq(asLongReg(dst)); break; - case LSHR: assert asIntReg(src).equals(AMD64.rcx); masm.sarq(asLongReg(dst)); break; - case LUSHR: assert asIntReg(src).equals(AMD64.rcx); masm.shrq(asLongReg(dst)); break; + case LADD: + masm.addq(asLongReg(dst), asLongReg(src)); + break; + case LSUB: + masm.subq(asLongReg(dst), asLongReg(src)); + break; + case LMUL: + masm.imulq(asLongReg(dst), asLongReg(src)); + break; + case LAND: + masm.andq(asLongReg(dst), asLongReg(src)); + break; + case LOR: + masm.orq(asLongReg(dst), asLongReg(src)); + break; + case LXOR: + masm.xorq(asLongReg(dst), asLongReg(src)); + break; + case LSHL: + assert asIntReg(src).equals(AMD64.rcx); + masm.shlq(asLongReg(dst)); + break; + case LSHR: + assert asIntReg(src).equals(AMD64.rcx); + masm.sarq(asLongReg(dst)); + break; + case LUSHR: + assert asIntReg(src).equals(AMD64.rcx); + masm.shrq(asLongReg(dst)); + break; - case FADD: masm.addss(asFloatReg(dst), asFloatReg(src)); break; - case FSUB: masm.subss(asFloatReg(dst), asFloatReg(src)); break; - case FMUL: masm.mulss(asFloatReg(dst), asFloatReg(src)); break; - case FDIV: masm.divss(asFloatReg(dst), asFloatReg(src)); break; - case FAND: masm.andps(asFloatReg(dst), asFloatReg(src)); break; - case FOR: masm.orps(asFloatReg(dst), asFloatReg(src)); break; - case FXOR: masm.xorps(asFloatReg(dst), asFloatReg(src)); break; + case FADD: + masm.addss(asFloatReg(dst), asFloatReg(src)); + break; + case FSUB: + masm.subss(asFloatReg(dst), asFloatReg(src)); + break; + case FMUL: + masm.mulss(asFloatReg(dst), asFloatReg(src)); + break; + case FDIV: + masm.divss(asFloatReg(dst), asFloatReg(src)); + break; + case FAND: + masm.andps(asFloatReg(dst), asFloatReg(src)); + break; + case FOR: + masm.orps(asFloatReg(dst), asFloatReg(src)); + break; + case FXOR: + masm.xorps(asFloatReg(dst), asFloatReg(src)); + break; - case DADD: masm.addsd(asDoubleReg(dst), asDoubleReg(src)); break; - case DSUB: masm.subsd(asDoubleReg(dst), asDoubleReg(src)); break; - case DMUL: masm.mulsd(asDoubleReg(dst), asDoubleReg(src)); break; - case DDIV: masm.divsd(asDoubleReg(dst), asDoubleReg(src)); break; - case DAND: masm.andpd(asDoubleReg(dst), asDoubleReg(src)); break; - case DOR: masm.orpd(asDoubleReg(dst), asDoubleReg(src)); break; - case DXOR: masm.xorpd(asDoubleReg(dst), asDoubleReg(src)); break; + case DADD: + masm.addsd(asDoubleReg(dst), asDoubleReg(src)); + break; + case DSUB: + masm.subsd(asDoubleReg(dst), asDoubleReg(src)); + break; + case DMUL: + masm.mulsd(asDoubleReg(dst), asDoubleReg(src)); + break; + case DDIV: + masm.divsd(asDoubleReg(dst), asDoubleReg(src)); + break; + case DAND: + masm.andpd(asDoubleReg(dst), asDoubleReg(src)); + break; + case DOR: + masm.orpd(asDoubleReg(dst), asDoubleReg(src)); + break; + case DXOR: + masm.xorpd(asDoubleReg(dst), asDoubleReg(src)); + break; - case I2B: masm.movsxb(asIntReg(dst), asIntReg(src)); break; - case I2S: masm.movsxw(asIntReg(dst), asIntReg(src)); break; - case I2L: masm.movslq(asLongReg(dst), asIntReg(src)); break; - case F2D: masm.cvtss2sd(asDoubleReg(dst), asFloatReg(src)); break; - case D2F: masm.cvtsd2ss(asFloatReg(dst), asDoubleReg(src)); break; - case I2F: masm.cvtsi2ssl(asFloatReg(dst), asIntReg(src)); break; - case I2D: masm.cvtsi2sdl(asDoubleReg(dst), asIntReg(src)); break; - case L2F: masm.cvtsi2ssq(asFloatReg(dst), asLongReg(src)); break; - case L2D: masm.cvtsi2sdq(asDoubleReg(dst), asLongReg(src)); break; + case I2B: + masm.movsxb(asIntReg(dst), asIntReg(src)); + break; + case I2S: + masm.movsxw(asIntReg(dst), asIntReg(src)); + break; + case I2L: + masm.movslq(asLongReg(dst), asIntReg(src)); + break; + case F2D: + masm.cvtss2sd(asDoubleReg(dst), asFloatReg(src)); + break; + case D2F: + masm.cvtsd2ss(asFloatReg(dst), asDoubleReg(src)); + break; + case I2F: + masm.cvtsi2ssl(asFloatReg(dst), asIntReg(src)); + break; + case I2D: + masm.cvtsi2sdl(asDoubleReg(dst), asIntReg(src)); + break; + case L2F: + masm.cvtsi2ssq(asFloatReg(dst), asLongReg(src)); + break; + case L2D: + masm.cvtsi2sdq(asDoubleReg(dst), asLongReg(src)); + break; case F2I: masm.cvttss2sil(asIntReg(dst), asFloatReg(src)); break; @@ -411,10 +523,18 @@ case D2L: masm.cvttsd2siq(asLongReg(dst), asDoubleReg(src)); break; - case MOV_I2F: masm.movdl(asFloatReg(dst), asIntReg(src)); break; - case MOV_L2D: masm.movdq(asDoubleReg(dst), asLongReg(src)); break; - case MOV_F2I: masm.movdl(asIntReg(dst), asFloatReg(src)); break; - case MOV_D2L: masm.movdq(asLongReg(dst), asDoubleReg(src)); break; + case MOV_I2F: + masm.movdl(asFloatReg(dst), asIntReg(src)); + break; + case MOV_L2D: + masm.movdq(asDoubleReg(dst), asLongReg(src)); + break; + case MOV_F2I: + masm.movdl(asIntReg(dst), asFloatReg(src)); + break; + case MOV_D2L: + masm.movdq(asLongReg(dst), asDoubleReg(src)); + break; case IDIVREM: case IDIV: @@ -452,78 +572,201 @@ } } else if (isConstant(src)) { switch (opcode) { - case IADD: masm.incrementl(asIntReg(dst), tasm.asIntConst(src)); break; - case ISUB: masm.decrementl(asIntReg(dst), tasm.asIntConst(src)); break; - case IMUL: masm.imull(asIntReg(dst), asIntReg(dst), tasm.asIntConst(src)); break; - case IAND: masm.andl(asIntReg(dst), tasm.asIntConst(src)); break; - case IOR: masm.orl(asIntReg(dst), tasm.asIntConst(src)); break; - case IXOR: masm.xorl(asIntReg(dst), tasm.asIntConst(src)); break; - case ISHL: masm.shll(asIntReg(dst), tasm.asIntConst(src) & 31); break; - case ISHR: masm.sarl(asIntReg(dst), tasm.asIntConst(src) & 31); break; - case IUSHR:masm.shrl(asIntReg(dst), tasm.asIntConst(src) & 31); break; + case IADD: + masm.incrementl(asIntReg(dst), tasm.asIntConst(src)); + break; + case ISUB: + masm.decrementl(asIntReg(dst), tasm.asIntConst(src)); + break; + case IMUL: + masm.imull(asIntReg(dst), asIntReg(dst), tasm.asIntConst(src)); + break; + case IAND: + masm.andl(asIntReg(dst), tasm.asIntConst(src)); + break; + case IOR: + masm.orl(asIntReg(dst), tasm.asIntConst(src)); + break; + case IXOR: + masm.xorl(asIntReg(dst), tasm.asIntConst(src)); + break; + case ISHL: + masm.shll(asIntReg(dst), tasm.asIntConst(src) & 31); + break; + case ISHR: + masm.sarl(asIntReg(dst), tasm.asIntConst(src) & 31); + break; + case IUSHR: + masm.shrl(asIntReg(dst), tasm.asIntConst(src) & 31); + break; - case LADD: masm.addq(asLongReg(dst), tasm.asIntConst(src)); break; - case LSUB: masm.subq(asLongReg(dst), tasm.asIntConst(src)); break; - case LMUL: masm.imulq(asLongReg(dst), asLongReg(dst), tasm.asIntConst(src)); break; - case LAND: masm.andq(asLongReg(dst), tasm.asIntConst(src)); break; - case LOR: masm.orq(asLongReg(dst), tasm.asIntConst(src)); break; - case LXOR: masm.xorq(asLongReg(dst), tasm.asIntConst(src)); break; - case LSHL: masm.shlq(asLongReg(dst), tasm.asIntConst(src) & 63); break; - case LSHR: masm.sarq(asLongReg(dst), tasm.asIntConst(src) & 63); break; - case LUSHR:masm.shrq(asLongReg(dst), tasm.asIntConst(src) & 63); break; + case LADD: + masm.addq(asLongReg(dst), tasm.asIntConst(src)); + break; + case LSUB: + masm.subq(asLongReg(dst), tasm.asIntConst(src)); + break; + case LMUL: + masm.imulq(asLongReg(dst), asLongReg(dst), tasm.asIntConst(src)); + break; + case LAND: + masm.andq(asLongReg(dst), tasm.asIntConst(src)); + break; + case LOR: + masm.orq(asLongReg(dst), tasm.asIntConst(src)); + break; + case LXOR: + masm.xorq(asLongReg(dst), tasm.asIntConst(src)); + break; + case LSHL: + masm.shlq(asLongReg(dst), tasm.asIntConst(src) & 63); + break; + case LSHR: + masm.sarq(asLongReg(dst), tasm.asIntConst(src) & 63); + break; + case LUSHR: + masm.shrq(asLongReg(dst), tasm.asIntConst(src) & 63); + break; - case FADD: masm.addss(asFloatReg(dst), (AMD64Address) tasm.asFloatConstRef(src)); break; - case FSUB: masm.subss(asFloatReg(dst), (AMD64Address) tasm.asFloatConstRef(src)); break; - case FMUL: masm.mulss(asFloatReg(dst), (AMD64Address) tasm.asFloatConstRef(src)); break; - case FAND: masm.andps(asFloatReg(dst), (AMD64Address) tasm.asFloatConstRef(src, 16)); break; - case FOR: masm.orps(asFloatReg(dst), (AMD64Address) tasm.asFloatConstRef(src, 16)); break; - case FXOR: masm.xorps(asFloatReg(dst), (AMD64Address) tasm.asFloatConstRef(src, 16)); break; - case FDIV: masm.divss(asFloatReg(dst), (AMD64Address) tasm.asFloatConstRef(src)); break; + case FADD: + masm.addss(asFloatReg(dst), (AMD64Address) tasm.asFloatConstRef(src)); + break; + case FSUB: + masm.subss(asFloatReg(dst), (AMD64Address) tasm.asFloatConstRef(src)); + break; + case FMUL: + masm.mulss(asFloatReg(dst), (AMD64Address) tasm.asFloatConstRef(src)); + break; + case FAND: + masm.andps(asFloatReg(dst), (AMD64Address) tasm.asFloatConstRef(src, 16)); + break; + case FOR: + masm.orps(asFloatReg(dst), (AMD64Address) tasm.asFloatConstRef(src, 16)); + break; + case FXOR: + masm.xorps(asFloatReg(dst), (AMD64Address) tasm.asFloatConstRef(src, 16)); + break; + case FDIV: + masm.divss(asFloatReg(dst), (AMD64Address) tasm.asFloatConstRef(src)); + break; - case DADD: masm.addsd(asDoubleReg(dst), (AMD64Address) tasm.asDoubleConstRef(src)); break; - case DSUB: masm.subsd(asDoubleReg(dst), (AMD64Address) tasm.asDoubleConstRef(src)); break; - case DMUL: masm.mulsd(asDoubleReg(dst), (AMD64Address) tasm.asDoubleConstRef(src)); break; - case DDIV: masm.divsd(asDoubleReg(dst), (AMD64Address) tasm.asDoubleConstRef(src)); break; - case DAND: masm.andpd(asDoubleReg(dst), (AMD64Address) tasm.asDoubleConstRef(src, 16)); break; - case DOR: masm.orpd(asDoubleReg(dst), (AMD64Address) tasm.asDoubleConstRef(src, 16)); break; - case DXOR: masm.xorpd(asDoubleReg(dst), (AMD64Address) tasm.asDoubleConstRef(src, 16)); break; - default: throw GraalInternalError.shouldNotReachHere(); + case DADD: + masm.addsd(asDoubleReg(dst), (AMD64Address) tasm.asDoubleConstRef(src)); + break; + case DSUB: + masm.subsd(asDoubleReg(dst), (AMD64Address) tasm.asDoubleConstRef(src)); + break; + case DMUL: + masm.mulsd(asDoubleReg(dst), (AMD64Address) tasm.asDoubleConstRef(src)); + break; + case DDIV: + masm.divsd(asDoubleReg(dst), (AMD64Address) tasm.asDoubleConstRef(src)); + break; + case DAND: + masm.andpd(asDoubleReg(dst), (AMD64Address) tasm.asDoubleConstRef(src, 16)); + break; + case DOR: + masm.orpd(asDoubleReg(dst), (AMD64Address) tasm.asDoubleConstRef(src, 16)); + break; + case DXOR: + masm.xorpd(asDoubleReg(dst), (AMD64Address) tasm.asDoubleConstRef(src, 16)); + break; + default: + throw GraalInternalError.shouldNotReachHere(); } } else { switch (opcode) { - case IADD: masm.addl(asIntReg(dst), (AMD64Address) tasm.asIntAddr(src)); break; - case ISUB: masm.subl(asIntReg(dst), (AMD64Address) tasm.asIntAddr(src)); break; - case IAND: masm.andl(asIntReg(dst), (AMD64Address) tasm.asIntAddr(src)); break; - case IMUL: masm.imull(asIntReg(dst), (AMD64Address) tasm.asIntAddr(src)); break; - case IOR: masm.orl(asIntReg(dst), (AMD64Address) tasm.asIntAddr(src)); break; - case IXOR: masm.xorl(asIntReg(dst), (AMD64Address) tasm.asIntAddr(src)); break; + case IADD: + masm.addl(asIntReg(dst), (AMD64Address) tasm.asIntAddr(src)); + break; + case ISUB: + masm.subl(asIntReg(dst), (AMD64Address) tasm.asIntAddr(src)); + break; + case IAND: + masm.andl(asIntReg(dst), (AMD64Address) tasm.asIntAddr(src)); + break; + case IMUL: + masm.imull(asIntReg(dst), (AMD64Address) tasm.asIntAddr(src)); + break; + case IOR: + masm.orl(asIntReg(dst), (AMD64Address) tasm.asIntAddr(src)); + break; + case IXOR: + masm.xorl(asIntReg(dst), (AMD64Address) tasm.asIntAddr(src)); + break; - case LADD: masm.addq(asLongReg(dst), (AMD64Address) tasm.asLongAddr(src)); break; - case LSUB: masm.subq(asLongReg(dst), (AMD64Address) tasm.asLongAddr(src)); break; - case LMUL: masm.imulq(asLongReg(dst), (AMD64Address) tasm.asLongAddr(src)); break; - case LAND: masm.andq(asLongReg(dst), (AMD64Address) tasm.asLongAddr(src)); break; - case LOR: masm.orq(asLongReg(dst), (AMD64Address) tasm.asLongAddr(src)); break; - case LXOR: masm.xorq(asLongReg(dst), (AMD64Address) tasm.asLongAddr(src)); break; + case LADD: + masm.addq(asLongReg(dst), (AMD64Address) tasm.asLongAddr(src)); + break; + case LSUB: + masm.subq(asLongReg(dst), (AMD64Address) tasm.asLongAddr(src)); + break; + case LMUL: + masm.imulq(asLongReg(dst), (AMD64Address) tasm.asLongAddr(src)); + break; + case LAND: + masm.andq(asLongReg(dst), (AMD64Address) tasm.asLongAddr(src)); + break; + case LOR: + masm.orq(asLongReg(dst), (AMD64Address) tasm.asLongAddr(src)); + break; + case LXOR: + masm.xorq(asLongReg(dst), (AMD64Address) tasm.asLongAddr(src)); + break; - case FADD: masm.addss(asFloatReg(dst), (AMD64Address) tasm.asFloatAddr(src)); break; - case FSUB: masm.subss(asFloatReg(dst), (AMD64Address) tasm.asFloatAddr(src)); break; - case FMUL: masm.mulss(asFloatReg(dst), (AMD64Address) tasm.asFloatAddr(src)); break; - case FDIV: masm.divss(asFloatReg(dst), (AMD64Address) tasm.asFloatAddr(src)); break; + case FADD: + masm.addss(asFloatReg(dst), (AMD64Address) tasm.asFloatAddr(src)); + break; + case FSUB: + masm.subss(asFloatReg(dst), (AMD64Address) tasm.asFloatAddr(src)); + break; + case FMUL: + masm.mulss(asFloatReg(dst), (AMD64Address) tasm.asFloatAddr(src)); + break; + case FDIV: + masm.divss(asFloatReg(dst), (AMD64Address) tasm.asFloatAddr(src)); + break; - case DADD: masm.addsd(asDoubleReg(dst), (AMD64Address) tasm.asDoubleAddr(src)); break; - case DSUB: masm.subsd(asDoubleReg(dst), (AMD64Address) tasm.asDoubleAddr(src)); break; - case DMUL: masm.mulsd(asDoubleReg(dst), (AMD64Address) tasm.asDoubleAddr(src)); break; - case DDIV: masm.divsd(asDoubleReg(dst), (AMD64Address) tasm.asDoubleAddr(src)); break; + case DADD: + masm.addsd(asDoubleReg(dst), (AMD64Address) tasm.asDoubleAddr(src)); + break; + case DSUB: + masm.subsd(asDoubleReg(dst), (AMD64Address) tasm.asDoubleAddr(src)); + break; + case DMUL: + masm.mulsd(asDoubleReg(dst), (AMD64Address) tasm.asDoubleAddr(src)); + break; + case DDIV: + masm.divsd(asDoubleReg(dst), (AMD64Address) tasm.asDoubleAddr(src)); + break; - case I2B: masm.movsxb(asIntReg(dst), (AMD64Address) tasm.asIntAddr(src)); break; - case I2S: masm.movsxw(asIntReg(dst), (AMD64Address) tasm.asIntAddr(src)); break; - case I2L: masm.movslq(asLongReg(dst), (AMD64Address) tasm.asIntAddr(src)); break; - case F2D: masm.cvtss2sd(asDoubleReg(dst), (AMD64Address) tasm.asFloatAddr(src)); break; - case D2F: masm.cvtsd2ss(asFloatReg(dst), (AMD64Address) tasm.asDoubleAddr(src)); break; - case I2F: masm.cvtsi2ssl(asFloatReg(dst), (AMD64Address) tasm.asIntAddr(src)); break; - case I2D: masm.cvtsi2sdl(asDoubleReg(dst), (AMD64Address) tasm.asIntAddr(src)); break; - case L2F: masm.cvtsi2ssq(asFloatReg(dst), (AMD64Address) tasm.asLongAddr(src)); break; - case L2D: masm.cvtsi2sdq(asDoubleReg(dst), (AMD64Address) tasm.asLongAddr(src)); break; + case I2B: + masm.movsxb(asIntReg(dst), (AMD64Address) tasm.asIntAddr(src)); + break; + case I2S: + masm.movsxw(asIntReg(dst), (AMD64Address) tasm.asIntAddr(src)); + break; + case I2L: + masm.movslq(asLongReg(dst), (AMD64Address) tasm.asIntAddr(src)); + break; + case F2D: + masm.cvtss2sd(asDoubleReg(dst), (AMD64Address) tasm.asFloatAddr(src)); + break; + case D2F: + masm.cvtsd2ss(asFloatReg(dst), (AMD64Address) tasm.asDoubleAddr(src)); + break; + case I2F: + masm.cvtsi2ssl(asFloatReg(dst), (AMD64Address) tasm.asIntAddr(src)); + break; + case I2D: + masm.cvtsi2sdl(asDoubleReg(dst), (AMD64Address) tasm.asIntAddr(src)); + break; + case L2F: + masm.cvtsi2ssq(asFloatReg(dst), (AMD64Address) tasm.asLongAddr(src)); + break; + case L2D: + masm.cvtsi2sdq(asDoubleReg(dst), (AMD64Address) tasm.asLongAddr(src)); + break; case F2I: masm.cvttss2sil(asIntReg(dst), (AMD64Address) tasm.asFloatAddr(src)); break; @@ -536,12 +779,21 @@ case D2L: masm.cvttsd2siq(asLongReg(dst), (AMD64Address) tasm.asDoubleAddr(src)); break; - case MOV_I2F: masm.movss(asFloatReg(dst), (AMD64Address) tasm.asIntAddr(src)); break; - case MOV_L2D: masm.movsd(asDoubleReg(dst), (AMD64Address) tasm.asLongAddr(src)); break; - case MOV_F2I: masm.movl(asIntReg(dst), (AMD64Address) tasm.asFloatAddr(src)); break; - case MOV_D2L: masm.movq(asLongReg(dst), (AMD64Address) tasm.asDoubleAddr(src)); break; + case MOV_I2F: + masm.movss(asFloatReg(dst), (AMD64Address) tasm.asIntAddr(src)); + break; + case MOV_L2D: + masm.movsd(asDoubleReg(dst), (AMD64Address) tasm.asLongAddr(src)); + break; + case MOV_F2I: + masm.movl(asIntReg(dst), (AMD64Address) tasm.asFloatAddr(src)); + break; + case MOV_D2L: + masm.movq(asLongReg(dst), (AMD64Address) tasm.asDoubleAddr(src)); + break; - default: throw GraalInternalError.shouldNotReachHere(); + default: + throw GraalInternalError.shouldNotReachHere(); } } @@ -552,10 +804,10 @@ } private static void verifyKind(AMD64Arithmetic opcode, Value result, Value x, Value y) { - assert (opcode.name().startsWith("I") && result.getKind() == Kind.Int && x.getKind().getStackKind() == Kind.Int && y.getKind().getStackKind() == Kind.Int) - || (opcode.name().startsWith("L") && result.getKind() == Kind.Long && x.getKind() == Kind.Long && y.getKind() == Kind.Long) - || (opcode.name().startsWith("F") && result.getKind() == Kind.Float && x.getKind() == Kind.Float && y.getKind() == Kind.Float) - || (opcode.name().startsWith("D") && result.getKind() == Kind.Double && x.getKind() == Kind.Double && y.getKind() == Kind.Double) - || (opcode.name().matches(".U?SH.") && result.getKind() == x.getKind() && y.getKind() == Kind.Int && (isConstant(y) || asRegister(y).equals(AMD64.rcx))); + assert (opcode.name().startsWith("I") && result.getKind() == Kind.Int && x.getKind().getStackKind() == Kind.Int && y.getKind().getStackKind() == Kind.Int) || + (opcode.name().startsWith("L") && result.getKind() == Kind.Long && x.getKind() == Kind.Long && y.getKind() == Kind.Long) || + (opcode.name().startsWith("F") && result.getKind() == Kind.Float && x.getKind() == Kind.Float && y.getKind() == Kind.Float) || + (opcode.name().startsWith("D") && result.getKind() == Kind.Double && x.getKind() == Kind.Double && y.getKind() == Kind.Double) || + (opcode.name().matches(".U?SH.") && result.getKind() == x.getKind() && y.getKind() == Kind.Int && (isConstant(y) || asRegister(y).equals(AMD64.rcx))); } } diff -r 050eba23554e -r 19648527ec72 graal/com.oracle.graal.lir.amd64/src/com/oracle/graal/lir/amd64/AMD64ControlFlow.java --- a/graal/com.oracle.graal.lir.amd64/src/com/oracle/graal/lir/amd64/AMD64ControlFlow.java Sun Aug 04 02:36:40 2013 +0200 +++ b/graal/com.oracle.graal.lir.amd64/src/com/oracle/graal/lir/amd64/AMD64ControlFlow.java Mon Aug 05 10:44:46 2013 +0200 @@ -168,11 +168,11 @@ masm.jcc(ConditionFlag.Equal, keyTargets[i].label()); } } else if (key.getKind() == Kind.Object) { - Register intKey = asObjectReg(key); + Register objectKey = asObjectReg(key); Register temp = asObjectReg(scratch); for (int i = 0; i < keyConstants.length; i++) { AMD64Move.move(tasm, masm, temp.asValue(Kind.Object), keyConstants[i]); - masm.cmpptr(intKey, temp); + masm.cmpptr(objectKey, temp); masm.jcc(ConditionFlag.Equal, keyTargets[i].label()); } } else { diff -r 050eba23554e -r 19648527ec72 graal/com.oracle.graal.lir.amd64/src/com/oracle/graal/lir/amd64/AMD64FrameMap.java --- /dev/null Thu Jan 01 00:00:00 1970 +0000 +++ b/graal/com.oracle.graal.lir.amd64/src/com/oracle/graal/lir/amd64/AMD64FrameMap.java Mon Aug 05 10:44:46 2013 +0200 @@ -0,0 +1,104 @@ +/* + * Copyright (c) 2013, Oracle and/or its affiliates. All rights reserved. + * DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER. + * + * This code is free software; you can redistribute it and/or modify it + * under the terms of the GNU General Public License version 2 only, as + * published by the Free Software Foundation. + * + * This code is distributed in the hope that it will be useful, but WITHOUT + * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or + * FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License + * version 2 for more details (a copy is included in the LICENSE file that + * accompanied this code). + * + * You should have received a copy of the GNU General Public License version + * 2 along with this work; if not, write to the Free Software Foundation, + * Inc., 51 Franklin St, Fifth Floor, Boston, MA 02110-1301 USA. + * + * Please contact Oracle, 500 Oracle Parkway, Redwood Shores, CA 94065 USA + * or visit www.oracle.com if you need additional information or have any + * questions. + */ +package com.oracle.graal.lir.amd64; + +import com.oracle.graal.api.code.*; +import com.oracle.graal.api.meta.*; +import com.oracle.graal.lir.*; + +/** + * AMD64 specific frame map. + * + * This is the format of an AMD64 stack frame: + * + *
+ *   Base       Contents
+ * 
+ *            :                                :  -----
+ *   caller   | incoming overflow argument n   |    ^
+ *   frame    :     ...                        :    | positive
+ *            | incoming overflow argument 0   |    | offsets
+ *   ---------+--------------------------------+---------------------
+ *            | return address                 |    |            ^
+ *   current  +--------------------------------+    |            |    -----
+ *   frame    |                                |    |            |      ^
+ *            : callee save area               :    |            |      |
+ *            |                                |    |            |      |
+ *            +--------------------------------+    |            |      |
+ *            | spill slot 0                   |    | negative   |      |
+ *            :     ...                        :    v offsets    |      |
+ *            | spill slot n                   |  -----        total  frame
+ *            +--------------------------------+               frame  size
+ *            | alignment padding              |               size     |
+ *            +--------------------------------+  -----          |      |
+ *            | outgoing overflow argument n   |    ^            |      |
+ *            :     ...                        :    | positive   |      |
+ *            | outgoing overflow argument 0   |    | offsets    v      v
+ *    %sp-->  +--------------------------------+---------------------------
+ * 
+ * 
+ * + * The spill slot area also includes stack allocated memory blocks (ALLOCA blocks). The size of such + * a block may be greater than the size of a normal spill slot or the word size. + *

+ * A runtime can reserve space at the beginning of the overflow argument area. The calling + * convention can specify that the first overflow stack argument is not at offset 0, but at a + * specified offset. Use {@link CodeCacheProvider#getMinimumOutgoingSize()} to make sure that + * call-free methods also have this space reserved. Then the VM can use the memory at offset 0 + * relative to the stack pointer. + */ +public final class AMD64FrameMap extends FrameMap { + + public AMD64FrameMap(CodeCacheProvider runtime, TargetDescription target, RegisterConfig registerConfig) { + super(runtime, target, registerConfig); + // (negative) offset relative to sp + total frame size + initialSpillSize = returnAddressSize() + calleeSaveAreaSize(); + spillSize = initialSpillSize; + } + + @Override + public int totalFrameSize() { + return frameSize() + returnAddressSize(); + } + + @Override + public int currentFrameSize() { + return alignFrameSize(outgoingSize + spillSize - returnAddressSize()); + } + + @Override + protected int alignFrameSize(int size) { + int x = size + returnAddressSize() + (target.stackAlignment - 1); + return (x / target.stackAlignment) * target.stackAlignment - returnAddressSize(); + } + + @Override + public int offsetToCalleeSaveArea() { + return frameSize() - calleeSaveAreaSize(); + } + + @Override + protected StackSlot allocateNewSpillSlot(PlatformKind kind, int additionalOffset) { + return StackSlot.get(kind, -spillSize + additionalOffset, true); + } +} diff -r 050eba23554e -r 19648527ec72 graal/com.oracle.graal.lir.hsail/src/com/oracle/graal/lir/hsail/HSAILFrameMap.java --- /dev/null Thu Jan 01 00:00:00 1970 +0000 +++ b/graal/com.oracle.graal.lir.hsail/src/com/oracle/graal/lir/hsail/HSAILFrameMap.java Mon Aug 05 10:44:46 2013 +0200 @@ -0,0 +1,72 @@ +/* + * Copyright (c) 2013, Oracle and/or its affiliates. All rights reserved. + * DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER. + * + * This code is free software; you can redistribute it and/or modify it + * under the terms of the GNU General Public License version 2 only, as + * published by the Free Software Foundation. + * + * This code is distributed in the hope that it will be useful, but WITHOUT + * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or + * FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License + * version 2 for more details (a copy is included in the LICENSE file that + * accompanied this code). + * + * You should have received a copy of the GNU General Public License version + * 2 along with this work; if not, write to the Free Software Foundation, + * Inc., 51 Franklin St, Fifth Floor, Boston, MA 02110-1301 USA. + * + * Please contact Oracle, 500 Oracle Parkway, Redwood Shores, CA 94065 USA + * or visit www.oracle.com if you need additional information or have any + * questions. + */ +package com.oracle.graal.lir.hsail; + +import com.oracle.graal.api.code.*; +import com.oracle.graal.api.meta.*; +import com.oracle.graal.lir.*; + +/** + * HSAIL specific frame map. + * + * This is the format of a HSAIL stack frame: + * + *

+ * TODO stack frame layout
+ * 
+ */ +public final class HSAILFrameMap extends FrameMap { + + public HSAILFrameMap(CodeCacheProvider runtime, TargetDescription target, RegisterConfig registerConfig) { + super(runtime, target, registerConfig); + } + + @Override + public int totalFrameSize() { + // FIXME return some sane values + return frameSize(); + } + + @Override + public int currentFrameSize() { + // FIXME return some sane values + return alignFrameSize(outgoingSize + spillSize); + } + + @Override + protected int alignFrameSize(int size) { + // FIXME return some sane values + int x = size + (target.stackAlignment - 1); + return (x / target.stackAlignment) * target.stackAlignment; + } + + @Override + public int offsetToCalleeSaveArea() { + return frameSize() - calleeSaveAreaSize(); + } + + @Override + protected StackSlot allocateNewSpillSlot(PlatformKind kind, int additionalOffset) { + return StackSlot.get(kind, -spillSize + additionalOffset, true); + } +} diff -r 050eba23554e -r 19648527ec72 graal/com.oracle.graal.lir.ptx/src/com/oracle/graal/lir/ptx/PTXFrameMap.java --- /dev/null Thu Jan 01 00:00:00 1970 +0000 +++ b/graal/com.oracle.graal.lir.ptx/src/com/oracle/graal/lir/ptx/PTXFrameMap.java Mon Aug 05 10:44:46 2013 +0200 @@ -0,0 +1,72 @@ +/* + * Copyright (c) 2013, Oracle and/or its affiliates. All rights reserved. + * DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER. + * + * This code is free software; you can redistribute it and/or modify it + * under the terms of the GNU General Public License version 2 only, as + * published by the Free Software Foundation. + * + * This code is distributed in the hope that it will be useful, but WITHOUT + * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or + * FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License + * version 2 for more details (a copy is included in the LICENSE file that + * accompanied this code). + * + * You should have received a copy of the GNU General Public License version + * 2 along with this work; if not, write to the Free Software Foundation, + * Inc., 51 Franklin St, Fifth Floor, Boston, MA 02110-1301 USA. + * + * Please contact Oracle, 500 Oracle Parkway, Redwood Shores, CA 94065 USA + * or visit www.oracle.com if you need additional information or have any + * questions. + */ +package com.oracle.graal.lir.ptx; + +import com.oracle.graal.api.code.*; +import com.oracle.graal.api.meta.*; +import com.oracle.graal.lir.*; + +/** + * PTX specific frame map. + * + * This is the format of a PTX stack frame: + * + *
+ * TODO stack frame layout
+ * 
+ */ +public final class PTXFrameMap extends FrameMap { + + public PTXFrameMap(CodeCacheProvider runtime, TargetDescription target, RegisterConfig registerConfig) { + super(runtime, target, registerConfig); + } + + @Override + public int totalFrameSize() { + // FIXME return some sane values + return frameSize(); + } + + @Override + public int currentFrameSize() { + // FIXME return some sane values + return alignFrameSize(outgoingSize + spillSize); + } + + @Override + protected int alignFrameSize(int size) { + // FIXME return some sane values + int x = size + (target.stackAlignment - 1); + return (x / target.stackAlignment) * target.stackAlignment; + } + + @Override + public int offsetToCalleeSaveArea() { + return frameSize() - calleeSaveAreaSize(); + } + + @Override + protected StackSlot allocateNewSpillSlot(PlatformKind kind, int additionalOffset) { + return StackSlot.get(kind, -spillSize + additionalOffset, true); + } +} diff -r 050eba23554e -r 19648527ec72 graal/com.oracle.graal.lir.sparc/src/com/oracle/graal/lir/sparc/SPARCFrameMap.java --- /dev/null Thu Jan 01 00:00:00 1970 +0000 +++ b/graal/com.oracle.graal.lir.sparc/src/com/oracle/graal/lir/sparc/SPARCFrameMap.java Mon Aug 05 10:44:46 2013 +0200 @@ -0,0 +1,104 @@ +/* + * Copyright (c) 2013, Oracle and/or its affiliates. All rights reserved. + * DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER. + * + * This code is free software; you can redistribute it and/or modify it + * under the terms of the GNU General Public License version 2 only, as + * published by the Free Software Foundation. + * + * This code is distributed in the hope that it will be useful, but WITHOUT + * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or + * FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License + * version 2 for more details (a copy is included in the LICENSE file that + * accompanied this code). + * + * You should have received a copy of the GNU General Public License version + * 2 along with this work; if not, write to the Free Software Foundation, + * Inc., 51 Franklin St, Fifth Floor, Boston, MA 02110-1301 USA. + * + * Please contact Oracle, 500 Oracle Parkway, Redwood Shores, CA 94065 USA + * or visit www.oracle.com if you need additional information or have any + * questions. + */ +package com.oracle.graal.lir.sparc; + +import com.oracle.graal.api.code.*; +import com.oracle.graal.api.meta.*; +import com.oracle.graal.lir.*; + +/** + * SPARC specific frame map. + * + * This is the format of a SPARC stack frame: + * + *
+ *   Base       Contents
+ * 
+ *            :                                :  -----
+ *   caller   | incoming overflow argument n   |    ^
+ *   frame    :     ...                        :    | positive
+ *            | incoming overflow argument 0   |    | offsets
+ *   ---------+--------------------------------+---------------------------
+ *            | spill slot 0                   |    | negative   ^      ^
+ *            :     ...                        :    v offsets    |      |
+ *            | spill slot n                   |  -----        total    |
+ *            +--------------------------------+               frame    |
+ *   current  | alignment padding              |               size     |
+ *   frame    +--------------------------------+  -----          |      |
+ *            | outgoing overflow argument n   |    ^            |    frame
+ *            :     ...                        :    | positive   |    size
+ *            | outgoing overflow argument 0   |    | offsets    |      |
+ *            +--------------------------------+    |            |      |
+ *            | return address                 |    |            |      |
+ *            +--------------------------------+    |            |      |
+ *            |                                |    |            |      |
+ *            : callee save area               :    |            |      |
+ *            |                                |    |            v      v
+ *    %sp-->  +--------------------------------+---------------------------
+ * 
+ * 
+ * + * The spill slot area also includes stack allocated memory blocks (ALLOCA blocks). The size of such + * a block may be greater than the size of a normal spill slot or the word size. + *

+ * A runtime can reserve space at the beginning of the overflow argument area. The calling + * convention can specify that the first overflow stack argument is not at offset 0, but at a + * specified offset. Use {@link CodeCacheProvider#getMinimumOutgoingSize()} to make sure that + * call-free methods also have this space reserved. Then the VM can use the memory at offset 0 + * relative to the stack pointer. + */ +public final class SPARCFrameMap extends FrameMap { + + public SPARCFrameMap(CodeCacheProvider runtime, TargetDescription target, RegisterConfig registerConfig) { + super(runtime, target, registerConfig); + // offset relative to sp + total frame size + initialSpillSize = 0; + spillSize = initialSpillSize; + } + + @Override + public int totalFrameSize() { + return frameSize(); + } + + @Override + public int currentFrameSize() { + return alignFrameSize(calleeSaveAreaSize() + returnAddressSize() + outgoingSize + spillSize); + } + + @Override + protected int alignFrameSize(int size) { + int x = size + (target.stackAlignment - 1); + return (x / target.stackAlignment) * target.stackAlignment; + } + + @Override + public int offsetToCalleeSaveArea() { + return 0; + } + + @Override + protected StackSlot allocateNewSpillSlot(PlatformKind kind, int additionalOffset) { + return StackSlot.get(kind, -spillSize + additionalOffset, true); + } +} diff -r 050eba23554e -r 19648527ec72 graal/com.oracle.graal.lir/src/com/oracle/graal/lir/FrameMap.java --- a/graal/com.oracle.graal.lir/src/com/oracle/graal/lir/FrameMap.java Sun Aug 04 02:36:40 2013 +0200 +++ b/graal/com.oracle.graal.lir/src/com/oracle/graal/lir/FrameMap.java Mon Aug 05 10:44:46 2013 +0200 @@ -37,46 +37,8 @@ * area and the spill are can grow until then. Therefore, outgoing arguments are indexed from the * stack pointer, while spill slots are indexed from the beginning of the frame (and the total frame * size has to be added to get the actual offset from the stack pointer). - *

- * This is the format of a stack frame: - * - *

- *   Base       Contents
- * 
- *            :                                :  -----
- *   caller   | incoming overflow argument n   |    ^
- *   frame    :     ...                        :    | positive
- *            | incoming overflow argument 0   |    | offsets
- *   ---------+--------------------------------+---------------------
- *            | return address                 |    |            ^
- *   current  +--------------------------------+    |            |    -----
- *   frame    |                                |    |            |      ^
- *            : callee save area               :    |            |      |
- *            |                                |    |            |      |
- *            +--------------------------------+    |            |      |
- *            | spill slot 0                   |    | negative   |      |
- *            :     ...                        :    v offsets    |      |
- *            | spill slot n                   |  -----        total  frame
- *            +--------------------------------+               frame  size
- *            | alignment padding              |               size     |
- *            +--------------------------------+  -----          |      |
- *            | outgoing overflow argument n   |    ^            |      |
- *            :     ...                        :    | positive   |      |
- *            | outgoing overflow argument 0   |    | offsets    v      v
- *    %sp-->  +--------------------------------+---------------------------
- * 
- * 
- * - * The spill slot area also includes stack allocated memory blocks (ALLOCA blocks). The size of such - * a block may be greater than the size of a normal spill slot or the word size. - *

- * A runtime can reserve space at the beginning of the overflow argument area. The calling - * convention can specify that the first overflow stack argument is not at offset 0, but at a - * specified offset. Use {@link CodeCacheProvider#getMinimumOutgoingSize()} to make sure that - * call-free methods also have this space reserved. Then the VM can use the memory at offset 0 - * relative to the stack pointer. */ -public final class FrameMap { +public abstract class FrameMap { public final CodeCacheProvider runtime; public final TargetDescription target; @@ -90,16 +52,21 @@ private int frameSize; /** + * Initial size of the area occupied by spill slots and other stack-allocated memory blocks. + */ + protected int initialSpillSize; + + /** * Size of the area occupied by spill slots and other stack-allocated memory blocks. */ - private int spillSize; + protected int spillSize; /** * Size of the area occupied by outgoing overflow arguments. This value is adjusted as calling * conventions for outgoing calls are retrieved. On some platforms, there is a minimum outgoing * size even if no overflow arguments are on the stack. */ - private int outgoingSize; + protected int outgoingSize; /** * Determines if this frame has values on the stack for outgoing calls. @@ -125,16 +92,15 @@ this.target = target; this.registerConfig = registerConfig; this.frameSize = -1; - this.spillSize = returnAddressSize() + calleeSaveAreaSize(); this.outgoingSize = runtime.getMinimumOutgoingSize(); this.objectStackBlocks = new ArrayList<>(); } - private int returnAddressSize() { + protected int returnAddressSize() { return target.arch.getReturnAddressSize(); } - private int calleeSaveAreaSize() { + protected int calleeSaveAreaSize() { CalleeSaveLayout csl = registerConfig.getCalleeSaveLayout(); return csl != null ? csl.size : 0; } @@ -173,17 +139,21 @@ * * @return The total size of the frame (in bytes). */ - public int totalFrameSize() { - return frameSize() + returnAddressSize(); - } + public abstract int totalFrameSize(); /** * Gets the current size of this frame. This is the size that would be returned by * {@link #frameSize()} if {@link #finish()} were called now. */ - public int currentFrameSize() { - return target.alignFrameSize(outgoingSize + spillSize - returnAddressSize()); - } + public abstract int currentFrameSize(); + + /** + * Aligns the given frame size to the stack alignment size and return the aligned size. + * + * @param size the initial frame size to be aligned + * @return the aligned frame size + */ + protected abstract int alignFrameSize(int size); /** * Computes the final size of this frame. After this method has been called, methods that change @@ -200,7 +170,6 @@ for (StackSlot s : freedSlots) { total += target.arch.getSizeInBytes(s.getKind()); } - int initialSpillSize = returnAddressSize() + calleeSaveAreaSize(); if (total == spillSize - initialSpillSize) { // reset spill area size spillSize = initialSpillSize; @@ -238,13 +207,12 @@ } /** - * Gets the offset to the stack area where callee-saved registers are stored. + * Gets the offset from the stack pointer to the stack area where callee-saved registers are + * stored. * * @return The offset to the callee save area (in bytes). */ - public int offsetToCalleeSaveArea() { - return frameSize() - calleeSaveAreaSize(); - } + public abstract int offsetToCalleeSaveArea(); /** * Informs the frame map that the compiled code calls a particular method, which may need stack @@ -267,9 +235,16 @@ hasOutgoingStackArguments = hasOutgoingStackArguments || argsSize > 0; } - private StackSlot getSlot(PlatformKind kind, int additionalOffset) { - return StackSlot.get(kind, -spillSize + additionalOffset, true); - } + /** + * Reserves a new spill slot in the frame of the method being compiled. The returned slot is + * aligned on its natural alignment, i.e., an 8-byte spill slot is aligned at an 8-byte + * boundary. + * + * @param kind The kind of the spill slot to be reserved. + * @param additionalOffset + * @return A spill slot denoting the reserved memory area. + */ + protected abstract StackSlot allocateNewSpillSlot(PlatformKind kind, int additionalOffset); /** * Reserves a spill slot in the frame of the method being compiled. The returned slot is aligned @@ -294,7 +269,7 @@ } int size = target.arch.getSizeInBytes(kind); spillSize = NumUtil.roundUp(spillSize + size, size); - return getSlot(kind, 0); + return allocateNewSpillSlot(kind, 0); } private Set freedSlots; @@ -329,15 +304,15 @@ if (refs) { assert size % target.wordSize == 0; - StackSlot result = getSlot(Kind.Object, 0); + StackSlot result = allocateNewSpillSlot(Kind.Object, 0); objectStackBlocks.add(result); for (int i = target.wordSize; i < size; i += target.wordSize) { - objectStackBlocks.add(getSlot(Kind.Object, i)); + objectStackBlocks.add(allocateNewSpillSlot(Kind.Object, i)); } return result; } else { - return getSlot(target.wordKind, 0); + return allocateNewSpillSlot(target.wordKind, 0); } }