# HG changeset patch # User Christos Kotselidis # Date 1370362017 -7200 # Node ID ed86945795d562db7d55201abc61bac76bf6718b # Parent cecd40916b0670e801938f6e6b5a2e2225f36a7b Add Compressed Oops support in LIR diff -r cecd40916b06 -r ed86945795d5 graal/com.oracle.graal.compiler.amd64/src/com/oracle/graal/compiler/amd64/AMD64LIRGenerator.java --- a/graal/com.oracle.graal.compiler.amd64/src/com/oracle/graal/compiler/amd64/AMD64LIRGenerator.java Tue Jun 04 17:14:51 2013 +0200 +++ b/graal/com.oracle.graal.compiler.amd64/src/com/oracle/graal/compiler/amd64/AMD64LIRGenerator.java Tue Jun 04 18:06:57 2013 +0200 @@ -215,31 +215,6 @@ } @Override - public Variable emitLoad(Kind kind, Value address, DeoptimizingNode deopting) { - AMD64AddressValue loadAddress = asAddressValue(address); - Variable result = newVariable(kind); - append(new LoadOp(kind, result, loadAddress, deopting != null ? state(deopting) : null)); - return result; - } - - @Override - public void emitStore(Kind kind, Value address, Value inputVal, DeoptimizingNode deopting) { - AMD64AddressValue storeAddress = asAddressValue(address); - LIRFrameState state = deopting != null ? state(deopting) : null; - - if (isConstant(inputVal)) { - Constant c = asConstant(inputVal); - if (canStoreConstant(c)) { - append(new StoreConstantOp(kind, storeAddress, c, state)); - return; - } - } - - Variable input = load(inputVal); - append(new StoreOp(kind, storeAddress, input, state)); - } - - @Override public Variable emitAddress(StackSlot address) { Variable result = newVariable(target().wordKind); append(new StackLeaOp(result, address)); @@ -882,34 +857,6 @@ } @Override - public void visitCompareAndSwap(CompareAndSwapNode node) { - Kind kind = node.newValue().kind(); - assert kind == node.expected().kind(); - - Value expected = loadNonConst(operand(node.expected())); - Variable newValue = load(operand(node.newValue())); - - AMD64AddressValue address; - int displacement = node.displacement(); - Value index = operand(node.offset()); - if (isConstant(index) && NumUtil.isInt(asConstant(index).asLong() + displacement)) { - assert !runtime.needsDataPatch(asConstant(index)); - displacement += (int) asConstant(index).asLong(); - address = new AMD64AddressValue(kind, load(operand(node.object())), displacement); - } else { - address = new AMD64AddressValue(kind, load(operand(node.object())), load(index), Scale.Times1, displacement); - } - - RegisterValue rax = AMD64.rax.asValue(kind); - emitMove(rax, expected); - append(new CompareAndSwapOp(rax, address, rax, newValue)); - - Variable result = newVariable(node.kind()); - append(new CondMoveOp(result, Condition.EQ, load(Constant.TRUE), Constant.FALSE)); - setResult(node, result); - } - - @Override public void visitBreakpointNode(BreakpointNode node) { JavaType[] sig = new JavaType[node.arguments().size()]; for (int i = 0; i < sig.length; i++) { diff -r cecd40916b06 -r ed86945795d5 graal/com.oracle.graal.hotspot.amd64/src/com/oracle/graal/hotspot/amd64/AMD64HotSpotLIRGenerator.java --- a/graal/com.oracle.graal.hotspot.amd64/src/com/oracle/graal/hotspot/amd64/AMD64HotSpotLIRGenerator.java Tue Jun 04 17:14:51 2013 +0200 +++ b/graal/com.oracle.graal.hotspot.amd64/src/com/oracle/graal/hotspot/amd64/AMD64HotSpotLIRGenerator.java Tue Jun 04 18:06:57 2013 +0200 @@ -45,9 +45,18 @@ import com.oracle.graal.lir.StandardOp.ParametersOp; import com.oracle.graal.lir.StandardOp.PlaceholderOp; import com.oracle.graal.lir.amd64.*; +import com.oracle.graal.lir.amd64.AMD64ControlFlow.CondMoveOp; +import com.oracle.graal.lir.amd64.AMD64Move.CompareAndSwapCompressedOp; import com.oracle.graal.lir.amd64.AMD64Move.CompareAndSwapOp; +import com.oracle.graal.lir.amd64.AMD64Move.LoadCompressedOop; +import com.oracle.graal.lir.amd64.AMD64Move.LoadOp; import com.oracle.graal.lir.amd64.AMD64Move.MoveFromRegOp; +import com.oracle.graal.lir.amd64.AMD64Move.StoreCompressedOop; +import com.oracle.graal.lir.amd64.AMD64Move.StoreConstantOp; +import com.oracle.graal.lir.amd64.AMD64Move.StoreOp; import com.oracle.graal.nodes.*; +import com.oracle.graal.nodes.calc.*; +import com.oracle.graal.nodes.java.*; import com.oracle.graal.nodes.java.MethodCallTargetNode.InvokeKind; /** @@ -391,4 +400,78 @@ op.savedRbp = savedRbp; } } + + private static boolean isCompressCandidate(DeoptimizingNode access) { + return access != null && ((HeapAccess) access).compress(); + } + + @Override + public Variable emitLoad(Kind kind, Value address, DeoptimizingNode access) { + AMD64AddressValue loadAddress = asAddressValue(address); + Variable result = newVariable(kind); + assert access != null || access instanceof HeapAccess; + if (runtime().config.useCompressedOops && isCompressCandidate(access)) { + assert kind == Kind.Object; + Variable scratch = newVariable(Kind.Long); + append(new LoadCompressedOop(kind, result, scratch, loadAddress, access != null ? state(access) : null, runtime().config.narrowOopBase, runtime().config.narrowOopShift, + runtime().config.logMinObjAlignment)); + } else { + append(new LoadOp(kind, result, loadAddress, access != null ? state(access) : null)); + } + return result; + } + + @Override + public void emitStore(Kind kind, Value address, Value inputVal, DeoptimizingNode access) { + AMD64AddressValue storeAddress = asAddressValue(address); + LIRFrameState state = access != null ? state(access) : null; + if (isConstant(inputVal)) { + Constant c = asConstant(inputVal); + if (canStoreConstant(c)) { + append(new StoreConstantOp(kind, storeAddress, c, state, runtime().config.useCompressedOops && isCompressCandidate(access))); + return; + } + } + Variable input = load(inputVal); + if (runtime().config.useCompressedOops && isCompressCandidate(access)) { + assert kind == Kind.Object; + Variable scratch = newVariable(Kind.Long); + append(new StoreCompressedOop(kind, storeAddress, input, scratch, state, runtime().config.narrowOopBase, runtime().config.narrowOopShift, runtime().config.logMinObjAlignment)); + } else { + append(new StoreOp(kind, storeAddress, input, state)); + } + } + + @Override + public void visitCompareAndSwap(CompareAndSwapNode node) { + Kind kind = node.newValue().kind(); + assert kind == node.expected().kind(); + + Value expected = loadNonConst(operand(node.expected())); + Variable newValue = load(operand(node.newValue())); + + AMD64AddressValue address; + int displacement = node.displacement(); + Value index = operand(node.offset()); + if (isConstant(index) && NumUtil.isInt(asConstant(index).asLong() + displacement)) { + assert !runtime.needsDataPatch(asConstant(index)); + displacement += (int) asConstant(index).asLong(); + address = new AMD64AddressValue(kind, load(operand(node.object())), displacement); + } else { + address = new AMD64AddressValue(kind, load(operand(node.object())), load(index), Scale.Times1, displacement); + } + + RegisterValue raxRes = AMD64.rax.asValue(kind); + emitMove(raxRes, expected); + if (runtime().config.useCompressedOops && node.compress()) { + assert kind == Kind.Object; + Variable scratch = newVariable(Kind.Long); + append(new CompareAndSwapCompressedOp(raxRes, address, raxRes, newValue, scratch, runtime().config.narrowOopBase, runtime().config.narrowOopShift, runtime().config.logMinObjAlignment)); + } else { + append(new CompareAndSwapOp(raxRes, address, raxRes, newValue)); + } + Variable result = newVariable(node.kind()); + append(new CondMoveOp(result, Condition.EQ, load(Constant.TRUE), Constant.FALSE)); + setResult(node, result); + } } diff -r cecd40916b06 -r ed86945795d5 graal/com.oracle.graal.lir.amd64/src/com/oracle/graal/lir/amd64/AMD64Move.java --- a/graal/com.oracle.graal.lir.amd64/src/com/oracle/graal/lir/amd64/AMD64Move.java Tue Jun 04 17:14:51 2013 +0200 +++ b/graal/com.oracle.graal.lir.amd64/src/com/oracle/graal/lir/amd64/AMD64Move.java Tue Jun 04 18:06:57 2013 +0200 @@ -118,6 +118,36 @@ } } + public static class LoadCompressedOop extends LoadOp { + + private long narrowOopBase; + private int narrowOopShift; + private int logMinObjAlignment; + @Temp({REG}) private AllocatableValue scratch; + + public LoadCompressedOop(Kind kind, AllocatableValue result, AllocatableValue scratch, AMD64AddressValue address, LIRFrameState state, long narrowOopBase, int narrowOopShift, + int logMinObjAlignment) { + super(kind, result, address, state); + this.narrowOopBase = narrowOopBase; + this.narrowOopShift = narrowOopShift; + this.logMinObjAlignment = logMinObjAlignment; + this.scratch = scratch; + } + + @Override + public void emitMemAccess(AMD64MacroAssembler masm) { + switch (kind) { + case Object: + Register resRegister = asRegister(result); + masm.movl(resRegister, address.toAddress()); + decodeOop(masm, resRegister, narrowOopBase, narrowOopShift, logMinObjAlignment); + break; + default: + throw GraalInternalError.shouldNotReachHere(); + } + } + } + public static class LoadOp extends MemOp { @Def({REG}) protected AllocatableValue result; @@ -161,6 +191,51 @@ } } + public static class StoreCompressedOop extends AMD64LIRInstruction { + + protected final Kind kind; + private long narrowOopBase; + private int narrowOopShift; + private int logMinObjAlignment; + @Temp({REG}) private AllocatableValue scratch; + @Alive({REG}) protected AllocatableValue input; + @Alive({COMPOSITE}) protected AMD64AddressValue address; + @State protected LIRFrameState state; + + public StoreCompressedOop(Kind kind, AMD64AddressValue address, AllocatableValue input, AllocatableValue scratch, LIRFrameState state, long narrowOopBase, int narrowOopShift, + int logMinObjAlignment) { + this.narrowOopBase = narrowOopBase; + this.narrowOopShift = narrowOopShift; + this.logMinObjAlignment = logMinObjAlignment; + this.scratch = scratch; + this.kind = kind; + this.address = address; + this.state = state; + this.input = input; + } + + @Override + public void emitCode(TargetMethodAssembler tasm, AMD64MacroAssembler masm) { + emitMemAccess(tasm, masm); + } + + public void emitMemAccess(TargetMethodAssembler tasm, AMD64MacroAssembler masm) { + switch (kind) { + case Object: + masm.movq(asRegister(scratch), asRegister(input)); + encodeOop(masm, asRegister(scratch), narrowOopBase, narrowOopShift, logMinObjAlignment); + if (state != null) { + tasm.recordImplicitException(masm.codeBuffer.position(), state); + } + masm.movl(address.toAddress(), asRegister(scratch)); + // masm.movq(asRegister(scratch), 0xDEADBEEF); + break; + default: + throw GraalInternalError.shouldNotReachHere(); + } + } + } + public static class StoreOp extends MemOp { @Use({REG}) protected AllocatableValue input; @@ -206,10 +281,12 @@ public static class StoreConstantOp extends MemOp { protected final Constant input; + private final boolean compress; - public StoreConstantOp(Kind kind, AMD64AddressValue address, Constant input, LIRFrameState state) { + public StoreConstantOp(Kind kind, AMD64AddressValue address, Constant input, LIRFrameState state, boolean compress) { super(kind, address, state); this.input = input; + this.compress = compress; } @Override @@ -240,7 +317,11 @@ throw GraalInternalError.shouldNotReachHere("Cannot store 64-bit constants to memory"); case Object: if (input.isNull()) { - masm.movptr(address.toAddress(), 0); + if (compress) { + masm.movl(address.toAddress(), 0); + } else { + masm.movptr(address.toAddress(), 0); + } } else { throw GraalInternalError.shouldNotReachHere("Cannot store 64-bit constants to memory"); } @@ -335,6 +416,37 @@ } } + @Opcode("CAS") + public static class CompareAndSwapCompressedOp extends AMD64LIRInstruction { + + @Def protected AllocatableValue result; + @Alive({COMPOSITE}) protected AMD64AddressValue address; + @Alive protected AllocatableValue cmpValue; + @Alive protected AllocatableValue newValue; + @Temp({REG}) protected AllocatableValue scratch; + + private long narrowOopBase; + private int narrowOopShift; + private int logMinObjAlignment; + + public CompareAndSwapCompressedOp(AllocatableValue result, AMD64AddressValue address, AllocatableValue cmpValue, AllocatableValue newValue, AllocatableValue scratch, long narrowOopBase, + int narrowOopShift, int logMinObjAlignment) { + this.narrowOopBase = narrowOopBase; + this.narrowOopShift = narrowOopShift; + this.logMinObjAlignment = logMinObjAlignment; + this.scratch = scratch; + this.result = result; + this.address = address; + this.cmpValue = cmpValue; + this.newValue = newValue; + } + + @Override + public void emitCode(TargetMethodAssembler tasm, AMD64MacroAssembler masm) { + compareAndSwapCompressed(tasm, masm, result, address, cmpValue, newValue, scratch, narrowOopBase, narrowOopShift, logMinObjAlignment); + } + } + public static void move(TargetMethodAssembler tasm, AMD64MacroAssembler masm, Value result, Value input) { if (isRegister(input)) { if (isRegister(result)) { @@ -543,4 +655,51 @@ throw GraalInternalError.shouldNotReachHere(); } } + + protected static void compareAndSwapCompressed(TargetMethodAssembler tasm, AMD64MacroAssembler masm, AllocatableValue result, AMD64AddressValue address, AllocatableValue cmpValue, + AllocatableValue newValue, AllocatableValue scratch, long narrowOopBase, int narrowOopShift, int logMinObjAlignment) { + assert asRegister(cmpValue) == AMD64.rax && asRegister(result) == AMD64.rax; + + switch (cmpValue.getKind()) { + case Object: + final Register scratchRegister = asRegister(scratch); + final Register cmpRegister = asRegister(cmpValue); + final Register newRegister = asRegister(newValue); + encodeOop(masm, cmpRegister, narrowOopBase, narrowOopShift, logMinObjAlignment); + masm.movq(scratchRegister, newRegister); + encodeOop(masm, scratchRegister, narrowOopBase, narrowOopShift, logMinObjAlignment); + if (tasm.target.isMP) { + masm.lock(); + } + masm.cmpxchgl(scratchRegister, address.toAddress()); + break; + default: + throw GraalInternalError.shouldNotReachHere(); + } + } + + private static void encodeOop(AMD64MacroAssembler masm, Register scratchRegister, long narrowOopBase, int narrowOopShift, int logMinObjAlignment) { + if (narrowOopBase == 0) { + if (narrowOopShift != 0) { + assert logMinObjAlignment == narrowOopShift : "Encode algorithm is wrong"; + masm.shrq(scratchRegister, logMinObjAlignment); + } + } else { + masm.subq(scratchRegister, AMD64.r12); + masm.shrq(scratchRegister, logMinObjAlignment); + } + } + + private static void decodeOop(AMD64MacroAssembler masm, Register resRegister, long narrowOopBase, int narrowOopShift, int logMinObjAlignment) { + if (narrowOopBase == 0) { + if (narrowOopShift != 0) { + assert logMinObjAlignment == narrowOopShift : "Decode algorithm is wrong"; + masm.shlq(resRegister, logMinObjAlignment); + } + } else { + masm.shlq(resRegister, logMinObjAlignment); + masm.addq(resRegister, AMD64.r12); + } + } + }