changeset 13520:fb7b39f07232

Embed compressed constants when possible and use more efficient patterns for encoding
author Tom Rodriguez <tom.rodriguez@oracle.com>
date Mon, 06 Jan 2014 17:19:18 -0800
parents 1ceb90be7bac
children 56452e07874f
files graal/com.oracle.graal.asm.amd64/src/com/oracle/graal/asm/amd64/AMD64Assembler.java graal/com.oracle.graal.compiler.amd64/src/com/oracle/graal/compiler/amd64/AMD64LIRGenerator.java graal/com.oracle.graal.compiler.hsail/src/com/oracle/graal/compiler/hsail/HSAILLIRGenerator.java graal/com.oracle.graal.compiler.ptx/src/com/oracle/graal/compiler/ptx/PTXLIRGenerator.java graal/com.oracle.graal.compiler.sparc/src/com/oracle/graal/compiler/sparc/SPARCLIRGenerator.java graal/com.oracle.graal.compiler/src/com/oracle/graal/compiler/gen/LIRGenerator.java graal/com.oracle.graal.hotspot.amd64/src/com/oracle/graal/hotspot/amd64/AMD64HotSpotBackend.java graal/com.oracle.graal.hotspot.amd64/src/com/oracle/graal/hotspot/amd64/AMD64HotSpotLIRGenerator.java graal/com.oracle.graal.hotspot.amd64/src/com/oracle/graal/hotspot/amd64/AMD64HotSpotMove.java graal/com.oracle.graal.hotspot.amd64/src/com/oracle/graal/hotspot/amd64/AMD64HotspotDirectStaticCallOp.java graal/com.oracle.graal.hotspot.amd64/src/com/oracle/graal/hotspot/amd64/AMD64HotspotDirectVirtualCallOp.java graal/com.oracle.graal.hotspot.sparc/src/com/oracle/graal/hotspot/sparc/SPARCHotSpotLIRGenerator.java graal/com.oracle.graal.hotspot/src/com/oracle/graal/hotspot/HotSpotVMConfig.java graal/com.oracle.graal.hotspot/src/com/oracle/graal/hotspot/replacements/HotSpotReplacementsUtil.java graal/com.oracle.graal.lir.amd64/src/com/oracle/graal/lir/amd64/AMD64Move.java graal/com.oracle.graal.nodes/src/com/oracle/graal/nodes/extended/WriteNode.java graal/com.oracle.graal.nodes/src/com/oracle/graal/nodes/spi/LIRGeneratorTool.java graal/com.oracle.graal.truffle.hotspot.amd64/src/com/oracle/graal/truffle/hotspot/amd64/AMD64OptimizedCallTargetInstrumentationFactory.java src/cpu/x86/vm/graalCodeInstaller_x86.hpp
diffstat 19 files changed, 218 insertions(+), 155 deletions(-) [+]
line wrap: on
line diff
--- a/graal/com.oracle.graal.asm.amd64/src/com/oracle/graal/asm/amd64/AMD64Assembler.java	Mon Jan 06 14:30:23 2014 -0800
+++ b/graal/com.oracle.graal.asm.amd64/src/com/oracle/graal/asm/amd64/AMD64Assembler.java	Mon Jan 06 17:19:18 2014 -0800
@@ -2178,6 +2178,13 @@
         emitLong(imm64);
     }
 
+    public final void movslq(Register dst, int imm32) {
+        int encode = prefixqAndEncode(dst.encoding);
+        emitByte(0xC7);
+        emitByte(0xC0 | encode);
+        emitInt(imm32);
+    }
+
     public final void movdq(Register dst, Register src) {
 
         // table D-1 says MMX/SSE2
--- a/graal/com.oracle.graal.compiler.amd64/src/com/oracle/graal/compiler/amd64/AMD64LIRGenerator.java	Mon Jan 06 14:30:23 2014 -0800
+++ b/graal/com.oracle.graal.compiler.amd64/src/com/oracle/graal/compiler/amd64/AMD64LIRGenerator.java	Mon Jan 06 17:19:18 2014 -0800
@@ -91,14 +91,20 @@
     }
 
     @Override
-    public boolean canStoreConstant(Constant c) {
+    public boolean canStoreConstant(Constant c, boolean isCompressed) {
         // there is no immediate move of 64-bit constants on Intel
         switch (c.getKind()) {
             case Long:
+                if (isCompressed) {
+                    return true;
+                }
                 return Util.isInt(c.asLong()) && !getCodeCache().needsDataPatch(c);
             case Double:
                 return false;
             case Object:
+                if (isCompressed) {
+                    return true;
+                }
                 return c.isNull();
             default:
                 return true;
--- a/graal/com.oracle.graal.compiler.hsail/src/com/oracle/graal/compiler/hsail/HSAILLIRGenerator.java	Mon Jan 06 14:30:23 2014 -0800
+++ b/graal/com.oracle.graal.compiler.hsail/src/com/oracle/graal/compiler/hsail/HSAILLIRGenerator.java	Mon Jan 06 17:19:18 2014 -0800
@@ -80,7 +80,7 @@
     }
 
     @Override
-    public boolean canStoreConstant(Constant c) {
+    public boolean canStoreConstant(Constant c, boolean isCompressed) {
         // Operand b must be in the .reg state space.
         return false;
     }
--- a/graal/com.oracle.graal.compiler.ptx/src/com/oracle/graal/compiler/ptx/PTXLIRGenerator.java	Mon Jan 06 14:30:23 2014 -0800
+++ b/graal/com.oracle.graal.compiler.ptx/src/com/oracle/graal/compiler/ptx/PTXLIRGenerator.java	Mon Jan 06 17:19:18 2014 -0800
@@ -100,7 +100,7 @@
     }
 
     @Override
-    public boolean canStoreConstant(Constant c) {
+    public boolean canStoreConstant(Constant c, boolean isCompressed) {
         // Operand b must be in the .reg state space.
         return false;
     }
--- a/graal/com.oracle.graal.compiler.sparc/src/com/oracle/graal/compiler/sparc/SPARCLIRGenerator.java	Mon Jan 06 14:30:23 2014 -0800
+++ b/graal/com.oracle.graal.compiler.sparc/src/com/oracle/graal/compiler/sparc/SPARCLIRGenerator.java	Mon Jan 06 17:19:18 2014 -0800
@@ -82,7 +82,7 @@
     }
 
     @Override
-    public boolean canStoreConstant(Constant c) {
+    public boolean canStoreConstant(Constant c, boolean isCompressed) {
         // SPARC can only store integer null constants (via g0)
         switch (c.getKind()) {
             case Float:
--- a/graal/com.oracle.graal.compiler/src/com/oracle/graal/compiler/gen/LIRGenerator.java	Mon Jan 06 14:30:23 2014 -0800
+++ b/graal/com.oracle.graal.compiler/src/com/oracle/graal/compiler/gen/LIRGenerator.java	Mon Jan 06 17:19:18 2014 -0800
@@ -167,7 +167,7 @@
      * @return True if the constant can be used directly, false if the constant needs to be in a
      *         register.
      */
-    public abstract boolean canStoreConstant(Constant c);
+    public abstract boolean canStoreConstant(Constant c, boolean isCompressed);
 
     public LIRGenerator(StructuredGraph graph, Providers providers, FrameMap frameMap, CallingConvention cc, LIR lir) {
         this.graph = graph;
--- a/graal/com.oracle.graal.hotspot.amd64/src/com/oracle/graal/hotspot/amd64/AMD64HotSpotBackend.java	Mon Jan 06 14:30:23 2014 -0800
+++ b/graal/com.oracle.graal.hotspot.amd64/src/com/oracle/graal/hotspot/amd64/AMD64HotSpotBackend.java	Mon Jan 06 17:19:18 2014 -0800
@@ -257,8 +257,11 @@
 
             if (config.useCompressedClassPointers) {
                 Register register = r10;
-                AMD64HotSpotMove.decodeKlassPointer(asm, register, providers.getRegisters().getHeapBaseRegister(), src, config.narrowKlassBase, config.narrowOopBase, config.narrowKlassShift,
-                                config.logKlassAlignment);
+                AMD64HotSpotMove.decodeKlassPointer(asm, register, providers.getRegisters().getHeapBaseRegister(), src, config.getKlassEncoding());
+                if (config.narrowKlassBase != 0) {
+                    // The heap base register was destroyed above, so restore it
+                    asm.movq(providers.getRegisters().getHeapBaseRegister(), config.narrowOopBase);
+                }
                 asm.cmpq(inlineCacheKlass, register);
             } else {
                 asm.cmpq(inlineCacheKlass, src);
--- a/graal/com.oracle.graal.hotspot.amd64/src/com/oracle/graal/hotspot/amd64/AMD64HotSpotLIRGenerator.java	Mon Jan 06 14:30:23 2014 -0800
+++ b/graal/com.oracle.graal.hotspot.amd64/src/com/oracle/graal/hotspot/amd64/AMD64HotSpotLIRGenerator.java	Mon Jan 06 17:19:18 2014 -0800
@@ -40,6 +40,7 @@
 import com.oracle.graal.debug.*;
 import com.oracle.graal.graph.*;
 import com.oracle.graal.hotspot.*;
+import com.oracle.graal.hotspot.HotSpotVMConfig.CompressEncoding;
 import com.oracle.graal.hotspot.amd64.AMD64HotSpotMove.CompareAndSwapCompressedOp;
 import com.oracle.graal.hotspot.amd64.AMD64HotSpotMove.LoadCompressedPointer;
 import com.oracle.graal.hotspot.amd64.AMD64HotSpotMove.StoreCompressedConstantOp;
@@ -446,10 +447,23 @@
     }
 
     /**
-     * Returns whether or not the input access is a (de)compression candidate.
+     * Returns whether or not the input access should be (de)compressed.
+     */
+    private boolean isCompressedOperation(Kind kind, Access access) {
+        return access != null && access.isCompressible() && ((kind == Kind.Long && config.useCompressedClassPointers) || (kind == Kind.Object && config.useCompressedOops));
+    }
+
+    /**
+     * @return a compressed version of the incoming constant
      */
-    private static boolean isCompressCandidate(Access access) {
-        return access != null && access.isCompressible();
+    protected static Constant compress(Constant c, CompressEncoding encoding) {
+        if (c.getKind() == Kind.Long) {
+            return Constant.forIntegerKind(Kind.Int, (int) (((c.asLong() - encoding.base) >> encoding.shift) & 0xffffffffL), c.getPrimitiveAnnotation());
+        } else if (c.getKind() == Kind.Object) {
+            return Constant.forIntegerKind(Kind.Int, 0xdeaddead, c.asObject());
+        } else {
+            throw GraalInternalError.shouldNotReachHere();
+        }
     }
 
     @Override
@@ -465,20 +479,16 @@
          * kind==Object) and some addresses (klass pointers, kind==Long). Initially, the input
          * operation is checked to discover if it has been tagged as a potential "compression"
          * candidate. Consequently, depending on the appropriate kind, the specific (de)compression
-         * functions are being called. Although, currently, the compression and decompression
-         * algorithms of oops and klass pointers are identical, in hotspot, they are implemented as
-         * separate methods. That means that in the future there might be the case where the
-         * algorithms may differ.
+         * functions are being called.
          */
-        if (isCompressCandidate(access)) {
-            if (config.useCompressedOops && kind == Kind.Object) {
-                append(new LoadCompressedPointer(kind, result, getProviders().getRegisters().getHeapBaseRegister().asValue(), loadAddress, state, getNarrowKlassBase(), getNarrowOopBase(),
-                                getNarrowOopShift(), getLogMinObjectAlignment()));
-            } else if (config.useCompressedClassPointers && kind == Kind.Long) {
-                append(new LoadCompressedPointer(kind, result, getProviders().getRegisters().getHeapBaseRegister().asValue(), loadAddress, state, getNarrowKlassBase(), getNarrowOopBase(),
-                                getNarrowKlassShift(), getLogKlassAlignment()));
+        if (isCompressedOperation(kind, access)) {
+            if (kind == Kind.Object) {
+                append(new LoadCompressedPointer(kind, result, getProviders().getRegisters().getHeapBaseRegister().asValue(), loadAddress, state, config.getOopEncoding()));
+            } else if (kind == Kind.Long) {
+                Variable scratch = config.getKlassEncoding().base != 0 ? newVariable(Kind.Long) : null;
+                append(new LoadCompressedPointer(kind, result, scratch, loadAddress, state, config.getKlassEncoding()));
             } else {
-                append(new LoadOp(kind, result, loadAddress, state));
+                throw GraalInternalError.shouldNotReachHere("can't handle: " + access);
             }
         } else {
             append(new LoadOp(kind, result, loadAddress, state));
@@ -493,34 +503,39 @@
         if (access instanceof DeoptimizingNode) {
             state = state((DeoptimizingNode) access);
         }
+        boolean isCompressed = isCompressedOperation(kind, access);
         if (isConstant(inputVal)) {
             Constant c = asConstant(inputVal);
-            if (canStoreConstant(c)) {
-                if (inputVal.getKind() == Kind.Object && config.useCompressedOops && isCompressCandidate(access)) {
-                    append(new StoreCompressedConstantOp(kind, storeAddress, c, state));
-                } else if (inputVal.getKind() == Kind.Long && config.useCompressedClassPointers && isCompressCandidate(access)) {
-                    append(new StoreCompressedConstantOp(kind, storeAddress, c, state));
+            if (isCompressed && canStoreConstant(c, isCompressed)) {
+                if (c.getKind() == Kind.Object) {
+                    Constant value = c.isNull() ? c : compress(c, config.getOopEncoding());
+                    append(new StoreCompressedConstantOp(kind, storeAddress, value, state));
+                } else if (c.getKind() == Kind.Long) {
+                    // It's always a good idea to directly store compressed constants since they
+                    // have to be materialized as 64 bits encoded otherwise.
+                    Constant value = compress(c, config.getKlassEncoding());
+                    append(new StoreCompressedConstantOp(kind, storeAddress, value, state));
                 } else {
-                    append(new StoreConstantOp(kind, storeAddress, c, state));
+                    throw GraalInternalError.shouldNotReachHere("can't handle: " + access);
                 }
                 return;
             }
         }
         Variable input = load(inputVal);
-        if (isCompressCandidate(access)) {
-            if (config.useCompressedOops && kind == Kind.Object) {
+        if (isCompressed) {
+            if (kind == Kind.Object) {
                 if (input.getKind() == Kind.Object) {
                     Variable scratch = newVariable(Kind.Long);
                     Register heapBaseReg = getProviders().getRegisters().getHeapBaseRegister();
-                    append(new StoreCompressedPointer(kind, storeAddress, input, scratch, state, getNarrowKlassBase(), getNarrowOopBase(), getNarrowOopShift(), getLogMinObjectAlignment(), heapBaseReg));
+                    append(new StoreCompressedPointer(kind, storeAddress, input, scratch, state, config.getOopEncoding(), heapBaseReg));
                 } else {
                     // the input oop is already compressed
                     append(new StoreOp(input.getKind(), storeAddress, input, state));
                 }
-            } else if (config.useCompressedClassPointers && kind == Kind.Long) {
+            } else if (kind == Kind.Long) {
                 Variable scratch = newVariable(Kind.Long);
                 Register heapBaseReg = getProviders().getRegisters().getHeapBaseRegister();
-                append(new StoreCompressedPointer(kind, storeAddress, input, scratch, state, getNarrowKlassBase(), getNarrowOopBase(), getNarrowKlassShift(), getLogKlassAlignment(), heapBaseReg));
+                append(new StoreCompressedPointer(kind, storeAddress, input, scratch, state, config.getKlassEncoding(), heapBaseReg));
             } else {
                 append(new StoreOp(kind, storeAddress, input, state));
             }
@@ -529,30 +544,6 @@
         }
     }
 
-    private int getLogMinObjectAlignment() {
-        return config.logMinObjAlignment();
-    }
-
-    private int getNarrowOopShift() {
-        return config.narrowOopShift;
-    }
-
-    private long getNarrowOopBase() {
-        return config.narrowOopBase;
-    }
-
-    private int getLogKlassAlignment() {
-        return config.logKlassAlignment;
-    }
-
-    private int getNarrowKlassShift() {
-        return config.narrowKlassShift;
-    }
-
-    private long getNarrowKlassBase() {
-        return config.narrowKlassBase;
-    }
-
     @Override
     public void visitCompareAndSwap(LoweredCompareAndSwapNode node, Value address) {
         Kind kind = node.getNewValue().kind();
@@ -565,7 +556,7 @@
         if (config.useCompressedOops && node.isCompressible()) {
             Variable scratch = newVariable(Kind.Long);
             Register heapBaseReg = getProviders().getRegisters().getHeapBaseRegister();
-            append(new CompareAndSwapCompressedOp(raxRes, addressValue, raxRes, newValue, scratch, getNarrowOopBase(), getNarrowOopShift(), getLogMinObjectAlignment(), heapBaseReg));
+            append(new CompareAndSwapCompressedOp(raxRes, addressValue, raxRes, newValue, scratch, config.getOopEncoding(), heapBaseReg));
         } else {
             append(new CompareAndSwapOp(raxRes, addressValue, raxRes, newValue));
         }
--- a/graal/com.oracle.graal.hotspot.amd64/src/com/oracle/graal/hotspot/amd64/AMD64HotSpotMove.java	Mon Jan 06 14:30:23 2014 -0800
+++ b/graal/com.oracle.graal.hotspot.amd64/src/com/oracle/graal/hotspot/amd64/AMD64HotSpotMove.java	Mon Jan 06 17:19:18 2014 -0800
@@ -32,6 +32,7 @@
 import com.oracle.graal.asm.amd64.*;
 import com.oracle.graal.asm.amd64.AMD64Assembler.ConditionFlag;
 import com.oracle.graal.graph.*;
+import com.oracle.graal.hotspot.HotSpotVMConfig.CompressEncoding;
 import com.oracle.graal.lir.*;
 import com.oracle.graal.lir.amd64.*;
 import com.oracle.graal.lir.amd64.AMD64Move.LoadOp;
@@ -47,7 +48,7 @@
         }
 
         @Override
-        public void emitMemAccess(AMD64MacroAssembler masm) {
+        public void emitMemAccess(CompilationResultBuilder crb, AMD64MacroAssembler masm) {
             if (kind == Kind.Long) {
                 if (NumUtil.isInt(input.asLong())) {
                     masm.movl(address.toAddress(), (int) input.asLong());
@@ -57,6 +58,9 @@
             } else if (kind == Kind.Object) {
                 if (input.isNull()) {
                     masm.movl(address.toAddress(), 0);
+                } else if (crb.target.inlineObjects) {
+                    crb.recordDataReferenceInCode(input, 0, true);
+                    masm.movl(address.toAddress(), 0xDEADDEAD);
                 } else {
                     throw GraalInternalError.shouldNotReachHere("Cannot store 64-bit constants to memory");
                 }
@@ -68,31 +72,25 @@
 
     public static class LoadCompressedPointer extends LoadOp {
 
-        private final long klassBase;
-        private final long heapBase;
-        private final int shift;
-        private final int alignment;
-        @Alive({REG}) protected AllocatableValue heapBaseRegister;
+        private final CompressEncoding encoding;
+        @Temp({REG, ILLEGAL}) protected AllocatableValue scratch;
 
-        public LoadCompressedPointer(Kind kind, AllocatableValue result, AllocatableValue heapBaseRegister, AMD64AddressValue address, LIRFrameState state, long klassBase, long heapBase, int shift,
-                        int alignment) {
+        public LoadCompressedPointer(Kind kind, AllocatableValue result, AllocatableValue scratch, AMD64AddressValue address, LIRFrameState state, CompressEncoding encoding) {
             super(kind, result, address, state);
-            this.klassBase = klassBase;
-            this.heapBase = heapBase;
-            this.shift = shift;
-            this.alignment = alignment;
-            this.heapBaseRegister = heapBaseRegister;
+            this.encoding = encoding;
+            this.scratch = scratch != null ? scratch : Value.ILLEGAL;
             assert kind == Kind.Object || kind == Kind.Long;
         }
 
         @Override
-        public void emitMemAccess(AMD64MacroAssembler masm) {
+        public void emitMemAccess(CompilationResultBuilder crb, AMD64MacroAssembler masm) {
             Register resRegister = asRegister(result);
-            masm.movl(resRegister, address.toAddress());
             if (kind == Kind.Object) {
-                decodePointer(masm, resRegister, asRegister(heapBaseRegister), heapBase, shift, alignment);
+                masm.movl(resRegister, address.toAddress());
+                decodePointer(masm, resRegister, asRegister(scratch), encoding);
             } else {
-                decodeKlassPointer(masm, resRegister, asRegister(heapBaseRegister), klassBase, heapBase, shift, alignment);
+                Register base = scratch.equals(Value.ILLEGAL) ? null : asRegister(scratch);
+                decodeKlassPointer(masm, resRegister, base, address.toAddress(), encoding);
             }
         }
     }
@@ -100,23 +98,16 @@
     public static class StoreCompressedPointer extends AMD64LIRInstruction {
 
         protected final Kind kind;
-        private final long klassBase;
-        private final long heapBase;
         private final Register heapBaseReg;
-        private final int shift;
-        private final int alignment;
+        private final CompressEncoding encoding;
         @Temp({REG}) private AllocatableValue scratch;
-        @Alive({REG}) protected AllocatableValue input;
+        @Alive({REG}) protected Value input;
         @Alive({COMPOSITE}) protected AMD64AddressValue address;
         @State protected LIRFrameState state;
 
-        public StoreCompressedPointer(Kind kind, AMD64AddressValue address, AllocatableValue input, AllocatableValue scratch, LIRFrameState state, long klassBase, long heapBase, int shift,
-                        int alignment, Register heapBaseReg) {
-            this.klassBase = klassBase;
-            this.heapBase = heapBase;
+        public StoreCompressedPointer(Kind kind, AMD64AddressValue address, AllocatableValue input, AllocatableValue scratch, LIRFrameState state, CompressEncoding encoding, Register heapBaseReg) {
+            this.encoding = encoding;
             this.heapBaseReg = heapBaseReg;
-            this.shift = shift;
-            this.alignment = alignment;
             this.scratch = scratch;
             this.kind = kind;
             this.address = address;
@@ -128,10 +119,23 @@
         @Override
         public void emitCode(CompilationResultBuilder crb, AMD64MacroAssembler masm) {
             masm.movq(asRegister(scratch), asRegister(input));
-            if (kind == Kind.Object) {
-                encodePointer(masm, asRegister(scratch), heapBaseReg, heapBase, shift, alignment);
+            if (kind == Kind.Long && (encoding.base & 0xffffffffL) == 0 && encoding.shift == 0) {
+                // Compressing the pointer won't change the low 32 bits, so just store it
+                masm.movl(address.toAddress(), asRegister(input));
+            } else if (kind == Kind.Object) {
+                encodePointer(masm, asRegister(scratch), heapBaseReg, encoding);
             } else {
-                encodeKlassPointer(masm, asRegister(scratch), heapBaseReg, klassBase, heapBase, shift, alignment);
+                masm.movq(asRegister(scratch), asRegister(input));
+                if (kind == Kind.Object) {
+                    encodePointer(masm, asRegister(scratch), heapBaseReg, encoding);
+                } else {
+                    assert !asRegister(scratch).equals(heapBaseReg) : "need to restore value otherwise";
+                    encodeKlassPointer(masm, asRegister(scratch), heapBaseReg, encoding);
+                }
+                if (state != null) {
+                    crb.recordImplicitException(masm.codeBuffer.position(), state);
+                }
+                masm.movl(address.toAddress(), asRegister(scratch));
             }
             if (state != null) {
                 crb.recordImplicitException(masm.codeBuffer.position(), state);
@@ -149,18 +153,14 @@
         @Alive protected AllocatableValue newValue;
         @Temp({REG}) protected AllocatableValue scratch;
 
-        private long base;
-        private int shift;
-        private int alignment;
+        private CompressEncoding encoding;
         private final Register heapBaseReg;
 
-        public CompareAndSwapCompressedOp(AllocatableValue result, AMD64AddressValue address, AllocatableValue cmpValue, AllocatableValue newValue, AllocatableValue scratch, long base, int shift,
-                        int alignment, Register heapBaseReg) {
-            this.base = base;
+        public CompareAndSwapCompressedOp(AllocatableValue result, AMD64AddressValue address, AllocatableValue cmpValue, AllocatableValue newValue, AllocatableValue scratch,
+                        CompressEncoding encoding, Register heapBaseReg) {
             this.heapBaseReg = heapBaseReg;
-            this.shift = shift;
-            this.alignment = alignment;
             this.scratch = scratch;
+            this.encoding = encoding;
             this.result = result;
             this.address = address;
             this.cmpValue = cmpValue;
@@ -170,33 +170,33 @@
 
         @Override
         public void emitCode(CompilationResultBuilder crb, AMD64MacroAssembler masm) {
-            compareAndSwapCompressed(crb, masm, result, address, cmpValue, newValue, scratch, base, shift, alignment, heapBaseReg);
+            compareAndSwapCompressed(crb, masm, result, address, cmpValue, newValue, scratch, encoding, heapBaseReg);
         }
     }
 
     protected static void compareAndSwapCompressed(CompilationResultBuilder crb, AMD64MacroAssembler masm, AllocatableValue result, AMD64AddressValue address, AllocatableValue cmpValue,
-                    AllocatableValue newValue, AllocatableValue scratch, long base, int shift, int alignment, Register heapBaseReg) {
+                    AllocatableValue newValue, AllocatableValue scratch, CompressEncoding encoding, Register heapBaseReg) {
         assert AMD64.rax.equals(asRegister(cmpValue)) && AMD64.rax.equals(asRegister(result));
         final Register scratchRegister = asRegister(scratch);
         final Register cmpRegister = asRegister(cmpValue);
         final Register newRegister = asRegister(newValue);
         Register heapBase = heapBaseReg;
-        encodePointer(masm, cmpRegister, heapBase, base, shift, alignment);
+        encodePointer(masm, cmpRegister, heapBase, encoding);
         masm.movq(scratchRegister, newRegister);
-        encodePointer(masm, scratchRegister, heapBase, base, shift, alignment);
+        encodePointer(masm, scratchRegister, heapBase, encoding);
         if (crb.target.isMP) {
             masm.lock();
         }
         masm.cmpxchgl(scratchRegister, address.toAddress());
     }
 
-    private static void encodePointer(AMD64MacroAssembler masm, Register scratchRegister, Register heapBaseRegister, long base, int shift, int alignment) {
+    private static void encodePointer(AMD64MacroAssembler masm, Register scratchRegister, Register heapBaseRegister, CompressEncoding encoding) {
         // If the base is zero, the uncompressed address has to be shifted right
         // in order to be compressed.
-        if (base == 0) {
-            if (shift != 0) {
-                assert alignment == shift : "Encode algorithm is wrong";
-                masm.shrq(scratchRegister, alignment);
+        if (encoding.base == 0) {
+            if (encoding.shift != 0) {
+                assert encoding.alignment == encoding.shift : "Encode algorithm is wrong";
+                masm.shrq(scratchRegister, encoding.alignment);
             }
         } else {
             // Otherwise the heap base, which resides always in register 12, is subtracted
@@ -206,21 +206,21 @@
             // register and then calculate the compressed oop value.
             masm.cmovq(ConditionFlag.Equal, scratchRegister, heapBaseRegister);
             masm.subq(scratchRegister, heapBaseRegister);
-            masm.shrq(scratchRegister, alignment);
+            masm.shrq(scratchRegister, encoding.alignment);
         }
     }
 
-    public static void decodePointer(AMD64MacroAssembler masm, Register resRegister, Register heapBaseRegister, long base, int shift, int alignment) {
+    public static void decodePointer(AMD64MacroAssembler masm, Register resRegister, Register heapBaseRegister, CompressEncoding encoding) {
         // If the base is zero, the compressed address has to be shifted left
         // in order to be uncompressed.
-        if (base == 0) {
-            if (shift != 0) {
-                assert alignment == shift : "Decode algorithm is wrong";
-                masm.shlq(resRegister, alignment);
+        if (encoding.base == 0) {
+            if (encoding.shift != 0) {
+                assert encoding.alignment == encoding.shift : "Decode algorithm is wrong";
+                masm.shlq(resRegister, encoding.alignment);
             }
         } else {
             Label done = new Label();
-            masm.shlq(resRegister, alignment);
+            masm.shlq(resRegister, encoding.alignment);
             masm.jccb(ConditionFlag.Equal, done);
             // Otherwise the heap base is added to the shifted address.
             masm.addq(resRegister, heapBaseRegister);
@@ -228,41 +228,27 @@
         }
     }
 
-    private static void encodeKlassPointer(AMD64MacroAssembler masm, Register scratchRegister, Register heapBaseRegister, long klassBase, long heapBase, int shift, int alignment) {
-        if (klassBase != 0) {
-            masm.movq(heapBaseRegister, klassBase);
+    private static void encodeKlassPointer(AMD64MacroAssembler masm, Register scratchRegister, Register heapBaseRegister, CompressEncoding encoding) {
+        if (encoding.base != 0) {
+            masm.movq(heapBaseRegister, encoding.base);
             masm.subq(scratchRegister, heapBaseRegister);
-            restoreHeapBase(masm, heapBaseRegister, heapBase);
         }
-        if (shift != 0) {
-            assert alignment == shift : "Encode algorithm is wrong";
-            masm.shrq(scratchRegister, alignment);
+        if (encoding.shift != 0) {
+            assert encoding.alignment == encoding.shift : "Encode algorithm is wrong";
+            masm.shrq(scratchRegister, encoding.alignment);
         }
     }
 
-    private static void decodeKlassPointer(AMD64MacroAssembler masm, Register resRegister, Register heapBaseRegister, long klassBase, long heapBase, int shift, int alignment) {
-        if (shift != 0) {
-            assert alignment == shift : "Decode algorithm is wrong";
-            masm.shlq(resRegister, alignment);
+    public static void decodeKlassPointer(AMD64MacroAssembler masm, Register register, Register scratch, AMD64Address address, CompressEncoding encoding) {
+        masm.movl(register, address);
+        if (encoding.shift != 0) {
+            assert encoding.alignment == encoding.shift : "Decode algorithm is wrong";
+            masm.shlq(register, encoding.alignment);
         }
-        if (klassBase != 0) {
-            masm.movq(heapBaseRegister, klassBase);
-            masm.addq(resRegister, heapBaseRegister);
-            restoreHeapBase(masm, heapBaseRegister, heapBase);
+        if (encoding.base != 0) {
+            masm.movq(scratch, encoding.base);
+            masm.addq(register, scratch);
         }
     }
 
-    private static void restoreHeapBase(AMD64MacroAssembler masm, Register heapBaseRegister, long heapBase) {
-        if (heapBase == 0) {
-            masm.xorq(heapBaseRegister, heapBaseRegister);
-        } else {
-            masm.movq(heapBaseRegister, heapBase);
-        }
-    }
-
-    public static void decodeKlassPointer(AMD64MacroAssembler masm, Register register, Register heapBaseRegister, AMD64Address address, long narrowKlassBase, long narrowOopBase, int narrowKlassShift,
-                    int logKlassAlignment) {
-        masm.movl(register, address);
-        decodeKlassPointer(masm, register, heapBaseRegister, narrowKlassBase, narrowOopBase, narrowKlassShift, logKlassAlignment);
-    }
 }
--- a/graal/com.oracle.graal.hotspot.amd64/src/com/oracle/graal/hotspot/amd64/AMD64HotspotDirectStaticCallOp.java	Mon Jan 06 14:30:23 2014 -0800
+++ b/graal/com.oracle.graal.hotspot.amd64/src/com/oracle/graal/hotspot/amd64/AMD64HotspotDirectStaticCallOp.java	Mon Jan 06 17:19:18 2014 -0800
@@ -56,7 +56,8 @@
         // instruction that loads the Klass from the inline cache.
         AMD64Move.move(crb, masm, AMD64.rbx.asValue(Kind.Long), metaspaceMethod);
         crb.recordMark(invokeKind == InvokeKind.Static ? Marks.MARK_INVOKESTATIC : Marks.MARK_INVOKESPECIAL);
-        AMD64Move.move(crb, masm, AMD64.rax.asValue(Kind.Long), Constant.forLong(HotSpotGraalRuntime.runtime().getConfig().nonOopBits));
+        // This must be emitted exactly like this to ensure it's patchable
+        masm.movq(AMD64.rax, HotSpotGraalRuntime.runtime().getConfig().nonOopBits);
         super.emitCode(crb, masm);
     }
 }
--- a/graal/com.oracle.graal.hotspot.amd64/src/com/oracle/graal/hotspot/amd64/AMD64HotspotDirectVirtualCallOp.java	Mon Jan 06 14:30:23 2014 -0800
+++ b/graal/com.oracle.graal.hotspot.amd64/src/com/oracle/graal/hotspot/amd64/AMD64HotspotDirectVirtualCallOp.java	Mon Jan 06 17:19:18 2014 -0800
@@ -30,7 +30,6 @@
 import com.oracle.graal.hotspot.*;
 import com.oracle.graal.hotspot.bridge.*;
 import com.oracle.graal.lir.*;
-import com.oracle.graal.lir.amd64.*;
 import com.oracle.graal.lir.amd64.AMD64Call.DirectCallOp;
 import com.oracle.graal.lir.asm.*;
 import com.oracle.graal.nodes.java.MethodCallTargetNode.InvokeKind;
@@ -55,7 +54,8 @@
         // The mark for an invocation that uses an inline cache must be placed at the
         // instruction that loads the Klass from the inline cache.
         crb.recordMark(invokeKind == Virtual ? Marks.MARK_INVOKEVIRTUAL : Marks.MARK_INVOKEINTERFACE);
-        AMD64Move.move(crb, masm, AMD64.rax.asValue(Kind.Long), Constant.forLong(HotSpotGraalRuntime.runtime().getConfig().nonOopBits));
+        // This must be emitted exactly like this to ensure it's patchable
+        masm.movq(AMD64.rax, HotSpotGraalRuntime.runtime().getConfig().nonOopBits);
         super.emitCode(crb, masm);
     }
 }
--- a/graal/com.oracle.graal.hotspot.sparc/src/com/oracle/graal/hotspot/sparc/SPARCHotSpotLIRGenerator.java	Mon Jan 06 14:30:23 2014 -0800
+++ b/graal/com.oracle.graal.hotspot.sparc/src/com/oracle/graal/hotspot/sparc/SPARCHotSpotLIRGenerator.java	Mon Jan 06 17:19:18 2014 -0800
@@ -275,7 +275,7 @@
         }
         if (isConstant(inputVal)) {
             Constant c = asConstant(inputVal);
-            if (canStoreConstant(c)) {
+            if (canStoreConstant(c, isCompressCandidate(access))) {
                 if (inputVal.getKind() == Kind.Object) {
                     append(new StoreConstantOp(kind, storeAddress, c, state, config.useCompressedOops && isCompressCandidate(access)));
                 } else if (inputVal.getKind() == Kind.Long) {
--- a/graal/com.oracle.graal.hotspot/src/com/oracle/graal/hotspot/HotSpotVMConfig.java	Mon Jan 06 14:30:23 2014 -0800
+++ b/graal/com.oracle.graal.hotspot/src/com/oracle/graal/hotspot/HotSpotVMConfig.java	Mon Jan 06 17:19:18 2014 -0800
@@ -157,9 +157,23 @@
             }
         }
 
+        oopEncoding = new CompressEncoding(narrowOopBase, narrowOopShift, logMinObjAlignment());
+        klassEncoding = new CompressEncoding(narrowKlassBase, narrowKlassShift, logKlassAlignment);
+
         assert check();
     }
 
+    private final CompressEncoding oopEncoding;
+    private final CompressEncoding klassEncoding;
+
+    public CompressEncoding getOopEncoding() {
+        return oopEncoding;
+    }
+
+    public CompressEncoding getKlassEncoding() {
+        return klassEncoding;
+    }
+
     private void setField(Field field, Object value) {
         try {
             Class<?> fieldType = field.getType();
@@ -1309,4 +1323,24 @@
 
         return true;
     }
+
+    /**
+     * A compact representation of the different encoding strategies for Objects and metadata.
+     */
+    public static class CompressEncoding {
+        public final long base;
+        public final int shift;
+        public final int alignment;
+
+        CompressEncoding(long base, int shift, int alignment) {
+            this.base = base;
+            this.shift = shift;
+            this.alignment = alignment;
+        }
+
+        @Override
+        public String toString() {
+            return "base: " + base + " shift: " + shift + " alignment: " + alignment;
+        }
+    }
 }
--- a/graal/com.oracle.graal.hotspot/src/com/oracle/graal/hotspot/replacements/HotSpotReplacementsUtil.java	Mon Jan 06 14:30:23 2014 -0800
+++ b/graal/com.oracle.graal.hotspot/src/com/oracle/graal/hotspot/replacements/HotSpotReplacementsUtil.java	Mon Jan 06 17:19:18 2014 -0800
@@ -268,7 +268,7 @@
 
     public static void initializeObjectHeader(Word memory, Word markWord, Word hub) {
         memory.writeWord(markOffset(), markWord, MARK_WORD_LOCATION);
-        StoreHubNode.write(memory.toObject(), hub);
+        StoreHubNode.write(memory, hub);
     }
 
     @Fold
--- a/graal/com.oracle.graal.lir.amd64/src/com/oracle/graal/lir/amd64/AMD64Move.java	Mon Jan 06 14:30:23 2014 -0800
+++ b/graal/com.oracle.graal.lir.amd64/src/com/oracle/graal/lir/amd64/AMD64Move.java	Mon Jan 06 17:19:18 2014 -0800
@@ -107,14 +107,14 @@
             this.state = state;
         }
 
-        protected abstract void emitMemAccess(AMD64MacroAssembler masm);
+        protected abstract void emitMemAccess(CompilationResultBuilder crb, AMD64MacroAssembler masm);
 
         @Override
         public void emitCode(CompilationResultBuilder crb, AMD64MacroAssembler masm) {
             if (state != null) {
                 crb.recordImplicitException(masm.codeBuffer.position(), state);
             }
-            emitMemAccess(masm);
+            emitMemAccess(crb, masm);
         }
 
         public boolean makeNullCheckFor(Value value, LIRFrameState nullCheckState, int implicitNullCheckLimit) {
@@ -136,7 +136,7 @@
         }
 
         @Override
-        public void emitMemAccess(AMD64MacroAssembler masm) {
+        public void emitMemAccess(CompilationResultBuilder crb, AMD64MacroAssembler masm) {
             switch (kind) {
                 case Boolean:
                 case Byte:
@@ -179,7 +179,7 @@
         }
 
         @Override
-        public void emitMemAccess(AMD64MacroAssembler masm) {
+        public void emitMemAccess(CompilationResultBuilder crb, AMD64MacroAssembler masm) {
             assert isRegister(input);
             switch (kind) {
                 case Boolean:
@@ -221,7 +221,7 @@
         }
 
         @Override
-        public void emitMemAccess(AMD64MacroAssembler masm) {
+        public void emitMemAccess(CompilationResultBuilder crb, AMD64MacroAssembler masm) {
             switch (kind) {
                 case Boolean:
                 case Byte:
@@ -469,13 +469,27 @@
 
                 break;
             case Long:
+                boolean patch = false;
                 if (crb.codeCache.needsDataPatch(input)) {
+                    patch = true;
                     crb.recordDataReferenceInCode(input, 0, true);
                 }
                 // Do not optimize with an XOR as this instruction may be between
                 // a CMP and a Jcc in which case the XOR will modify the condition
                 // flags and interfere with the Jcc.
-                masm.movq(asRegister(result), input.asLong());
+                if (patch) {
+                    masm.movq(asRegister(result), input.asLong());
+                } else {
+                    if (input.asLong() == (int) input.asLong()) {
+                        // Sign extended to long
+                        masm.movslq(asRegister(result), (int) input.asLong());
+                    } else if ((input.asLong() & 0xFFFFFFFFL) == input.asLong()) {
+                        // Zero extended to long
+                        masm.movl(asRegister(result), (int) input.asLong());
+                    } else {
+                        masm.movq(asRegister(result), input.asLong());
+                    }
+                }
                 break;
             case Float:
                 // This is *not* the same as 'constant == 0.0f' in the case where constant is -0.0f
--- a/graal/com.oracle.graal.nodes/src/com/oracle/graal/nodes/extended/WriteNode.java	Mon Jan 06 14:30:23 2014 -0800
+++ b/graal/com.oracle.graal.nodes/src/com/oracle/graal/nodes/extended/WriteNode.java	Mon Jan 06 17:19:18 2014 -0800
@@ -91,7 +91,15 @@
     @Override
     public void generate(LIRGeneratorTool gen) {
         Value address = location().generateAddress(gen, gen.operand(object()));
-        gen.emitStore(location().getValueKind(), address, gen.operand(value()), this);
+        // It's possible a constant was forced for other usages so inspect the value directly and
+        // use a constant if it can be directly stored.
+        Value v;
+        if (value().isConstant() && gen.canStoreConstant(value().asConstant(), isCompressible())) {
+            v = value().asConstant();
+        } else {
+            v = gen.operand(value());
+        }
+        gen.emitStore(location().getValueKind(), address, v, this);
     }
 
     @NodeIntrinsic
--- a/graal/com.oracle.graal.nodes/src/com/oracle/graal/nodes/spi/LIRGeneratorTool.java	Mon Jan 06 14:30:23 2014 -0800
+++ b/graal/com.oracle.graal.nodes/src/com/oracle/graal/nodes/spi/LIRGeneratorTool.java	Mon Jan 06 17:19:18 2014 -0800
@@ -49,6 +49,8 @@
      */
     boolean canInlineConstant(Constant c);
 
+    boolean canStoreConstant(Constant c, boolean isCompressed);
+
     RegisterAttributes attributes(Register register);
 
     AllocatableValue newVariable(PlatformKind kind);
--- a/graal/com.oracle.graal.truffle.hotspot.amd64/src/com/oracle/graal/truffle/hotspot/amd64/AMD64OptimizedCallTargetInstrumentationFactory.java	Mon Jan 06 14:30:23 2014 -0800
+++ b/graal/com.oracle.graal.truffle.hotspot.amd64/src/com/oracle/graal/truffle/hotspot/amd64/AMD64OptimizedCallTargetInstrumentationFactory.java	Mon Jan 06 17:19:18 2014 -0800
@@ -55,7 +55,7 @@
                 if (config.useCompressedOops) {
                     asm.movl(spillRegister, nMethodAddress);
                     asm.nop(AMD64HotSpotBackend.PATCHED_VERIFIED_ENTRY_POINT_INSTRUCTION_SIZE - (asm.codeBuffer.position() - verifiedEntryPoint));
-                    AMD64HotSpotMove.decodePointer(asm, spillRegister, registers.getHeapBaseRegister(), config.narrowOopBase, config.narrowOopShift, config.logMinObjAlignment());
+                    AMD64HotSpotMove.decodePointer(asm, spillRegister, registers.getHeapBaseRegister(), config.getOopEncoding());
                 } else {
                     asm.movq(spillRegister, nMethodAddress);
                     asm.nop(AMD64HotSpotBackend.PATCHED_VERIFIED_ENTRY_POINT_INSTRUCTION_SIZE - (asm.codeBuffer.position() - verifiedEntryPoint));
--- a/src/cpu/x86/vm/graalCodeInstaller_x86.hpp	Mon Jan 06 14:30:23 2014 -0800
+++ b/src/cpu/x86/vm/graalCodeInstaller_x86.hpp	Mon Jan 06 17:19:18 2014 -0800
@@ -80,9 +80,20 @@
     case 'b':
     case 's':
     case 'c':
-    case 'i':
       fatal("int-sized values not expected in DataPatch");
       break;
+
+    case 'i': {
+      address operand = Assembler::locate_operand(pc, Assembler::narrow_oop_operand);
+      Handle obj = Constant::object(constant);
+
+      jobject value = JNIHandles::make_local(obj());
+      int oop_index = _oop_recorder->find_index(value);
+      _instructions->relocate(pc, oop_Relocation::spec(oop_index), Assembler::narrow_oop_operand);
+      TRACE_graal_3("relocating (narrow oop constant) at %p/%p", pc, operand);
+      break;
+    }
+
     case 'f':
     case 'j':
     case 'd':