# HG changeset patch # User Christos Kotselidis # Date 1377789455 -7200 # Node ID 906d0cdf9c515af4be250964e211d51e6f849798 # Parent fd1383d4542029d6ab3dc8bf0b4cfb1d2adcc205# Parent bd1a12a78a5176aae8a1e29aadf0c5f0d79eced2 Merge diff -r bd1a12a78a51 -r 906d0cdf9c51 graal/com.oracle.graal.alloc/src/com/oracle/graal/alloc/ComputeBlockOrder.java --- a/graal/com.oracle.graal.alloc/src/com/oracle/graal/alloc/ComputeBlockOrder.java Thu Aug 29 15:35:17 2013 +0200 +++ b/graal/com.oracle.graal.alloc/src/com/oracle/graal/alloc/ComputeBlockOrder.java Thu Aug 29 17:17:35 2013 +0200 @@ -152,39 +152,40 @@ /** * Add a linear path to the code emission order greedily following the most likely successor. */ - private static void addPathToCodeEmittingOrder(Block block, List order, PriorityQueue worklist, BitSet visitedBlocks, NodesToDoubles nodeProbabilities) { - - // Skip loop headers if there is only a single loop end block to make the backward jump be a - // conditional jump. - if (!skipLoopHeader(block)) { + private static void addPathToCodeEmittingOrder(Block initialBlock, List order, PriorityQueue worklist, BitSet visitedBlocks, NodesToDoubles nodeProbabilities) { + Block block = initialBlock; + while (block != null) { + // Skip loop headers if there is only a single loop end block to + // make the backward jump be a conditional jump. + if (!skipLoopHeader(block)) { - // Align unskipped loop headers as they are the target of the backward jump. - if (block.isLoopHeader()) { - block.setAlign(true); + // Align unskipped loop headers as they are the target of the backward jump. + if (block.isLoopHeader()) { + block.setAlign(true); + } + addBlock(block, order); } - addBlock(block, order); - } - Loop loop = block.getLoop(); - if (block.isLoopEnd() && skipLoopHeader(loop.header)) { + Loop loop = block.getLoop(); + if (block.isLoopEnd() && skipLoopHeader(loop.header)) { + + // This is the only loop end of a skipped loop header. + // Add the header immediately afterwards. + addBlock(loop.header, order); - // This is the only loop end of a skipped loop header. Add the header immediately - // afterwards. - addBlock(loop.header, order); - - // Make sure the loop successors of the loop header are aligned as they are the target - // of the backward jump. - for (Block successor : loop.header.getSuccessors()) { - if (successor.getLoopDepth() == block.getLoopDepth()) { - successor.setAlign(true); + // Make sure the loop successors of the loop header are aligned + // as they are the target + // of the backward jump. + for (Block successor : loop.header.getSuccessors()) { + if (successor.getLoopDepth() == block.getLoopDepth()) { + successor.setAlign(true); + } } } - } - Block mostLikelySuccessor = findAndMarkMostLikelySuccessor(block, visitedBlocks, nodeProbabilities); - enqueueSuccessors(block, worklist, visitedBlocks); - if (mostLikelySuccessor != null) { - addPathToCodeEmittingOrder(mostLikelySuccessor, order, worklist, visitedBlocks, nodeProbabilities); + Block mostLikelySuccessor = findAndMarkMostLikelySuccessor(block, visitedBlocks, nodeProbabilities); + enqueueSuccessors(block, worklist, visitedBlocks); + block = mostLikelySuccessor; } } diff -r bd1a12a78a51 -r 906d0cdf9c51 graal/com.oracle.graal.api.code/src/com/oracle/graal/api/code/CodeCacheProvider.java --- a/graal/com.oracle.graal.api.code/src/com/oracle/graal/api/code/CodeCacheProvider.java Thu Aug 29 15:35:17 2013 +0200 +++ b/graal/com.oracle.graal.api.code/src/com/oracle/graal/api/code/CodeCacheProvider.java Thu Aug 29 17:17:35 2013 +0200 @@ -89,4 +89,12 @@ * Gets a description of the target architecture. */ TargetDescription getTarget(); + + /** + * Returns the register the runtime uses for maintaining the heap base address. This is mainly + * utilized by runtimes which support compressed pointers. + * + * @return the register that keeps the heap base address + */ + Register heapBaseRegister(); } diff -r bd1a12a78a51 -r 906d0cdf9c51 graal/com.oracle.graal.hotspot.amd64/src/com/oracle/graal/hotspot/amd64/AMD64HotSpotBackend.java --- a/graal/com.oracle.graal.hotspot.amd64/src/com/oracle/graal/hotspot/amd64/AMD64HotSpotBackend.java Thu Aug 29 15:35:17 2013 +0200 +++ b/graal/com.oracle.graal.hotspot.amd64/src/com/oracle/graal/hotspot/amd64/AMD64HotSpotBackend.java Thu Aug 29 17:17:35 2013 +0200 @@ -247,7 +247,7 @@ HotSpotRuntime hr = ((HotSpotRuntime) gen.getRuntime()); if (hr.config.useCompressedKlassPointers) { Register register = r10; - AMD64Move.decodeKlassPointer(asm, register, src, hr.config.narrowKlassBase, hr.config.narrowKlassShift, hr.config.logKlassAlignment); + AMD64Move.decodeKlassPointer(asm, register, hr.heapBaseRegister(), src, hr.config.narrowKlassBase, hr.config.narrowKlassShift, hr.config.logKlassAlignment); asm.cmpq(inlineCacheKlass, register); } else { asm.cmpq(inlineCacheKlass, src); diff -r bd1a12a78a51 -r 906d0cdf9c51 graal/com.oracle.graal.hotspot.amd64/src/com/oracle/graal/hotspot/amd64/AMD64HotSpotLIRGenerator.java --- a/graal/com.oracle.graal.hotspot.amd64/src/com/oracle/graal/hotspot/amd64/AMD64HotSpotLIRGenerator.java Thu Aug 29 15:35:17 2013 +0200 +++ b/graal/com.oracle.graal.hotspot.amd64/src/com/oracle/graal/hotspot/amd64/AMD64HotSpotLIRGenerator.java Thu Aug 29 17:17:35 2013 +0200 @@ -404,6 +404,9 @@ } } + /** + * Returns whether or not the input access is a (de)compression candidate. + */ private static boolean isCompressCandidate(DeoptimizingNode access) { return access != null && ((HeapAccess) access).isCompressible(); } @@ -413,13 +416,23 @@ AMD64AddressValue loadAddress = asAddressValue(address); Variable result = newVariable(kind); assert access == null || access instanceof HeapAccess; + /** + * Currently, the (de)compression of pointers applies conditionally to some objects (oops, + * kind==Object) and some addresses (klass pointers, kind==Long). Initially, the input + * operation is checked to discover if it has been tagged as a potential "compression" + * candidate. Consequently, depending on the appropriate kind, the specific (de)compression + * functions are being called. Although, currently, the compression and decompression + * algorithms of oops and klass pointers are identical, in hotspot, they are implemented as + * separate methods. That means that in the future there might be the case where the + * algorithms may differ. + */ if (isCompressCandidate(access)) { if (runtime().config.useCompressedOops && kind == Kind.Object) { - append(new LoadCompressedPointer(kind, result, loadAddress, access != null ? state(access) : null, runtime().config.narrowOopBase, runtime().config.narrowOopShift, - runtime().config.logMinObjAlignment)); + append(new LoadCompressedPointer(kind, result, runtime().heapBaseRegister().asValue(), loadAddress, access != null ? state(access) : null, runtime().config.narrowOopBase, + runtime().config.narrowOopShift, runtime().config.logMinObjAlignment)); } else if (runtime().config.useCompressedKlassPointers && kind == Kind.Long) { - append(new LoadCompressedPointer(kind, result, loadAddress, access != null ? state(access) : null, runtime().config.narrowKlassBase, runtime().config.narrowKlassShift, - runtime().config.logKlassAlignment)); + append(new LoadCompressedPointer(kind, result, runtime().heapBaseRegister().asValue(), loadAddress, access != null ? state(access) : null, runtime().config.narrowKlassBase, + runtime().config.narrowKlassShift, runtime().config.logKlassAlignment)); } else { append(new LoadOp(kind, result, loadAddress, access != null ? state(access) : null)); } diff -r bd1a12a78a51 -r 906d0cdf9c51 graal/com.oracle.graal.hotspot.amd64/src/com/oracle/graal/hotspot/amd64/AMD64HotSpotRuntime.java --- a/graal/com.oracle.graal.hotspot.amd64/src/com/oracle/graal/hotspot/amd64/AMD64HotSpotRuntime.java Thu Aug 29 15:35:17 2013 +0200 +++ b/graal/com.oracle.graal.hotspot.amd64/src/com/oracle/graal/hotspot/amd64/AMD64HotSpotRuntime.java Thu Aug 29 17:17:35 2013 +0200 @@ -96,6 +96,11 @@ } @Override + public Register heapBaseRegister() { + return r12; + } + + @Override protected RegisterConfig createRegisterConfig() { return new AMD64HotSpotRegisterConfig(graalRuntime.getTarget().arch, config); } diff -r bd1a12a78a51 -r 906d0cdf9c51 graal/com.oracle.graal.hotspot.sparc/src/com/oracle/graal/hotspot/sparc/SPARCHotSpotRuntime.java --- a/graal/com.oracle.graal.hotspot.sparc/src/com/oracle/graal/hotspot/sparc/SPARCHotSpotRuntime.java Thu Aug 29 15:35:17 2013 +0200 +++ b/graal/com.oracle.graal.hotspot.sparc/src/com/oracle/graal/hotspot/sparc/SPARCHotSpotRuntime.java Thu Aug 29 17:17:35 2013 +0200 @@ -83,6 +83,11 @@ } @Override + public Register heapBaseRegister() { + return r12; + } + + @Override protected RegisterConfig createRegisterConfig() { return new SPARCHotSpotRegisterConfig(graalRuntime.getTarget().arch, config); } diff -r bd1a12a78a51 -r 906d0cdf9c51 graal/com.oracle.graal.hotspot.test/src/com/oracle/graal/hotspot/test/WriteBarrierAdditionTest.java --- a/graal/com.oracle.graal.hotspot.test/src/com/oracle/graal/hotspot/test/WriteBarrierAdditionTest.java Thu Aug 29 15:35:17 2013 +0200 +++ b/graal/com.oracle.graal.hotspot.test/src/com/oracle/graal/hotspot/test/WriteBarrierAdditionTest.java Thu Aug 29 17:17:35 2013 +0200 @@ -195,7 +195,7 @@ */ @Test public void test10() throws Exception { - test2("testUnsafeLoad", wr, new Long(8), new Integer(8)); + test2("testUnsafeLoad", wr, new Long(useCompressedOops() ? 6 : 8), new Integer(useCompressedOops() ? 6 : 8)); } /** diff -r bd1a12a78a51 -r 906d0cdf9c51 graal/com.oracle.graal.hotspot/src/com/oracle/graal/hotspot/meta/HotSpotRuntime.java --- a/graal/com.oracle.graal.hotspot/src/com/oracle/graal/hotspot/meta/HotSpotRuntime.java Thu Aug 29 15:35:17 2013 +0200 +++ b/graal/com.oracle.graal.hotspot/src/com/oracle/graal/hotspot/meta/HotSpotRuntime.java Thu Aug 29 17:17:35 2013 +0200 @@ -113,7 +113,7 @@ private InstanceOfSnippets.Templates instanceofSnippets; private NewObjectSnippets.Templates newObjectSnippets; private MonitorSnippets.Templates monitorSnippets; - private WriteBarrierSnippets.Templates writeBarrierSnippets; + protected WriteBarrierSnippets.Templates writeBarrierSnippets; private BoxingSnippets.Templates boxingSnippets; private LoadExceptionObjectSnippets.Templates exceptionObjectSnippets; private UnsafeLoadSnippets.Templates unsafeLoadSnippets; @@ -643,7 +643,7 @@ UnsafeLoadNode load = (UnsafeLoadNode) n; assert load.kind() != Kind.Illegal; boolean compressible = (!load.object().isNullConstant() && load.accessKind() == Kind.Object); - if (addReadBarrier(load)) { + if (addReadBarrier(load, tool)) { unsafeLoadSnippets.lower(load, tool); } else { IndexedLocationNode location = IndexedLocationNode.create(ANY_LOCATION, load.accessKind(), load.displacement(), load.offset(), graph, 1); @@ -852,13 +852,12 @@ } } - private static boolean addReadBarrier(UnsafeLoadNode load) { - if (useG1GC()) { - if (load.object().kind() == Kind.Object && load.accessKind() == Kind.Object && !ObjectStamp.isObjectAlwaysNull(load.object())) { - ResolvedJavaType type = ObjectStamp.typeOrNull(load.object()); - if (type != null && !type.isArray()) { - return true; - } + private static boolean addReadBarrier(UnsafeLoadNode load, LoweringTool tool) { + if (useG1GC() && tool.getLoweringType() == LoweringType.AFTER_GUARDS && load.object().kind() == Kind.Object && load.accessKind() == Kind.Object && + !ObjectStamp.isObjectAlwaysNull(load.object())) { + ResolvedJavaType type = ObjectStamp.typeOrNull(load.object()); + if (type != null && !type.isArray()) { + return true; } } return false; diff -r bd1a12a78a51 -r 906d0cdf9c51 graal/com.oracle.graal.lir.amd64/src/com/oracle/graal/lir/amd64/AMD64Move.java --- a/graal/com.oracle.graal.lir.amd64/src/com/oracle/graal/lir/amd64/AMD64Move.java Thu Aug 29 15:35:17 2013 +0200 +++ b/graal/com.oracle.graal.lir.amd64/src/com/oracle/graal/lir/amd64/AMD64Move.java Thu Aug 29 17:17:35 2013 +0200 @@ -24,7 +24,6 @@ import static com.oracle.graal.api.code.ValueUtil.*; import static com.oracle.graal.lir.LIRInstruction.OperandFlag.*; - import static java.lang.Double.*; import static java.lang.Float.*; @@ -123,12 +122,14 @@ private long base; private int shift; private int alignment; + @Alive({REG}) protected AllocatableValue heapBaseRegister; - public LoadCompressedPointer(Kind kind, AllocatableValue result, AMD64AddressValue address, LIRFrameState state, long base, int shift, int alignment) { + public LoadCompressedPointer(Kind kind, AllocatableValue result, AllocatableValue heapBaseRegister, AMD64AddressValue address, LIRFrameState state, long base, int shift, int alignment) { super(kind, result, address, state); this.base = base; this.shift = shift; this.alignment = alignment; + this.heapBaseRegister = heapBaseRegister; assert kind == Kind.Object || kind == Kind.Long; } @@ -137,9 +138,9 @@ Register resRegister = asRegister(result); masm.movl(resRegister, address.toAddress()); if (kind == Kind.Object) { - decodePointer(masm, resRegister, base, shift, alignment); + decodePointer(masm, resRegister, asRegister(heapBaseRegister), base, shift, alignment); } else { - decodeKlassPointer(masm, resRegister, base, shift, alignment); + decodeKlassPointer(masm, resRegister, asRegister(heapBaseRegister), base, shift, alignment); } } } @@ -214,9 +215,9 @@ public void emitCode(TargetMethodAssembler tasm, AMD64MacroAssembler masm) { masm.movq(asRegister(scratch), asRegister(input)); if (kind == Kind.Object) { - encodePointer(masm, asRegister(scratch), base, shift, alignment); + encodePointer(masm, asRegister(scratch), tasm.runtime.heapBaseRegister(), base, shift, alignment); } else { - encodeKlassPointer(masm, asRegister(scratch), base, shift, alignment); + encodeKlassPointer(masm, asRegister(scratch), tasm.runtime.heapBaseRegister(), base, shift, alignment); } if (state != null) { tasm.recordImplicitException(masm.codeBuffer.position(), state); @@ -656,16 +657,16 @@ final Register scratchRegister = asRegister(scratch); final Register cmpRegister = asRegister(cmpValue); final Register newRegister = asRegister(newValue); - encodePointer(masm, cmpRegister, base, shift, alignment); + encodePointer(masm, cmpRegister, tasm.runtime.heapBaseRegister(), base, shift, alignment); masm.movq(scratchRegister, newRegister); - encodePointer(masm, scratchRegister, base, shift, alignment); + encodePointer(masm, scratchRegister, tasm.runtime.heapBaseRegister(), base, shift, alignment); if (tasm.target.isMP) { masm.lock(); } masm.cmpxchgl(scratchRegister, address.toAddress()); } - private static void encodePointer(AMD64MacroAssembler masm, Register scratchRegister, long base, int shift, int alignment) { + private static void encodePointer(AMD64MacroAssembler masm, Register scratchRegister, Register heapBaseRegister, long base, int shift, int alignment) { // If the base is zero, the uncompressed address has to be shifted right // in order to be compressed. if (base == 0) { @@ -679,13 +680,13 @@ masm.testq(scratchRegister, scratchRegister); // If the stored reference is null, move the heap to scratch // register and then calculate the compressed oop value. - masm.cmovq(ConditionFlag.Equal, scratchRegister, AMD64.r12); - masm.subq(scratchRegister, AMD64.r12); + masm.cmovq(ConditionFlag.Equal, scratchRegister, heapBaseRegister); + masm.subq(scratchRegister, heapBaseRegister); masm.shrq(scratchRegister, alignment); } } - private static void decodePointer(AMD64MacroAssembler masm, Register resRegister, long base, int shift, int alignment) { + private static void decodePointer(AMD64MacroAssembler masm, Register resRegister, Register heapBaseRegister, long base, int shift, int alignment) { // If the base is zero, the compressed address has to be shifted left // in order to be uncompressed. if (base == 0) { @@ -698,14 +699,14 @@ masm.shlq(resRegister, alignment); masm.jccb(ConditionFlag.Equal, done); // Otherwise the heap base is added to the shifted address. - masm.addq(resRegister, AMD64.r12); + masm.addq(resRegister, heapBaseRegister); masm.bind(done); } } - private static void encodeKlassPointer(AMD64MacroAssembler masm, Register scratchRegister, long base, int shift, int alignment) { + private static void encodeKlassPointer(AMD64MacroAssembler masm, Register scratchRegister, Register heapBaseRegister, long base, int shift, int alignment) { if (base != 0) { - masm.subq(scratchRegister, AMD64.r12); + masm.subq(scratchRegister, heapBaseRegister); } if (shift != 0) { assert alignment == shift : "Encode algorithm is wrong"; @@ -713,20 +714,21 @@ } } - private static void decodeKlassPointer(AMD64MacroAssembler masm, Register resRegister, long base, int shift, int alignment) { + private static void decodeKlassPointer(AMD64MacroAssembler masm, Register resRegister, Register heapBaseRegister, long base, int shift, int alignment) { if (shift != 0) { assert alignment == shift : "Decode algorithm is wrong"; masm.shlq(resRegister, alignment); if (base != 0) { - masm.addq(resRegister, AMD64.r12); + masm.addq(resRegister, heapBaseRegister); } } else { assert base == 0 : "Sanity"; } } - public static void decodeKlassPointer(AMD64MacroAssembler masm, Register register, AMD64Address address, long narrowKlassBase, int narrowKlassShift, int logKlassAlignment) { + public static void decodeKlassPointer(AMD64MacroAssembler masm, Register register, Register heapBaseRegister, AMD64Address address, long narrowKlassBase, int narrowKlassShift, + int logKlassAlignment) { masm.movl(register, address); - decodeKlassPointer(masm, register, narrowKlassBase, narrowKlassShift, logKlassAlignment); + decodeKlassPointer(masm, register, heapBaseRegister, narrowKlassBase, narrowKlassShift, logKlassAlignment); } } diff -r bd1a12a78a51 -r 906d0cdf9c51 src/share/vm/runtime/arguments.cpp --- a/src/share/vm/runtime/arguments.cpp Thu Aug 29 15:35:17 2013 +0200 +++ b/src/share/vm/runtime/arguments.cpp Thu Aug 29 17:17:35 2013 +0200 @@ -2203,10 +2203,8 @@ #ifdef GRAAL if (UseG1GC) { if (IgnoreUnrecognizedVMOptions) { - warning("UseG1GC is still experimental in Graal, use SerialGC instead "); FLAG_SET_CMDLINE(bool, UseG1GC, true); } else { - warning("UseG1GC is still experimental in Graal, use SerialGC instead "); status = true; } } else {