changeset 11474:df18a4214c7c

Move compressed pointers' logic to HotSpot specific move
author Christos Kotselidis <christos.kotselidis@oracle.com>
date Fri, 30 Aug 2013 13:51:22 +0200
parents 906d0cdf9c51
children c121402a62d8
files graal/com.oracle.graal.api.code/src/com/oracle/graal/api/code/CodeCacheProvider.java graal/com.oracle.graal.hotspot.amd64/src/com/oracle/graal/hotspot/amd64/AMD64HotSpotBackend.java graal/com.oracle.graal.hotspot.amd64/src/com/oracle/graal/hotspot/amd64/AMD64HotSpotLIRGenerator.java graal/com.oracle.graal.hotspot.amd64/src/com/oracle/graal/hotspot/amd64/AMD64HotSpotMove.java graal/com.oracle.graal.hotspot/src/com/oracle/graal/hotspot/meta/HotSpotRuntime.java graal/com.oracle.graal.lir.amd64/src/com/oracle/graal/lir/amd64/AMD64Move.java
diffstat 6 files changed, 319 insertions(+), 228 deletions(-) [+]
line wrap: on
line diff
--- a/graal/com.oracle.graal.api.code/src/com/oracle/graal/api/code/CodeCacheProvider.java	Thu Aug 29 17:17:35 2013 +0200
+++ b/graal/com.oracle.graal.api.code/src/com/oracle/graal/api/code/CodeCacheProvider.java	Fri Aug 30 13:51:22 2013 +0200
@@ -89,12 +89,4 @@
      * Gets a description of the target architecture.
      */
     TargetDescription getTarget();
-
-    /**
-     * Returns the register the runtime uses for maintaining the heap base address. This is mainly
-     * utilized by runtimes which support compressed pointers.
-     * 
-     * @return the register that keeps the heap base address
-     */
-    Register heapBaseRegister();
 }
--- a/graal/com.oracle.graal.hotspot.amd64/src/com/oracle/graal/hotspot/amd64/AMD64HotSpotBackend.java	Thu Aug 29 17:17:35 2013 +0200
+++ b/graal/com.oracle.graal.hotspot.amd64/src/com/oracle/graal/hotspot/amd64/AMD64HotSpotBackend.java	Fri Aug 30 13:51:22 2013 +0200
@@ -244,10 +244,10 @@
             AMD64Address src = new AMD64Address(receiver, config.hubOffset);
 
             AMD64HotSpotLIRGenerator gen = (AMD64HotSpotLIRGenerator) lirGen;
-            HotSpotRuntime hr = ((HotSpotRuntime) gen.getRuntime());
-            if (hr.config.useCompressedKlassPointers) {
+            AMD64HotSpotRuntime hr = ((AMD64HotSpotRuntime) gen.getRuntime());
+            if (hr.useCompressedKlassPointers()) {
                 Register register = r10;
-                AMD64Move.decodeKlassPointer(asm, register, hr.heapBaseRegister(), src, hr.config.narrowKlassBase, hr.config.narrowKlassShift, hr.config.logKlassAlignment);
+                AMD64HotSpotMove.decodeKlassPointer(asm, register, hr.heapBaseRegister(), src, config.narrowKlassBase, config.narrowKlassShift, config.logKlassAlignment);
                 asm.cmpq(inlineCacheKlass, register);
             } else {
                 asm.cmpq(inlineCacheKlass, src);
--- a/graal/com.oracle.graal.hotspot.amd64/src/com/oracle/graal/hotspot/amd64/AMD64HotSpotLIRGenerator.java	Thu Aug 29 17:17:35 2013 +0200
+++ b/graal/com.oracle.graal.hotspot.amd64/src/com/oracle/graal/hotspot/amd64/AMD64HotSpotLIRGenerator.java	Fri Aug 30 13:51:22 2013 +0200
@@ -38,6 +38,10 @@
 import com.oracle.graal.compiler.gen.*;
 import com.oracle.graal.graph.*;
 import com.oracle.graal.hotspot.*;
+import com.oracle.graal.hotspot.amd64.AMD64HotSpotMove.CompareAndSwapCompressedOp;
+import com.oracle.graal.hotspot.amd64.AMD64HotSpotMove.LoadCompressedPointer;
+import com.oracle.graal.hotspot.amd64.AMD64HotSpotMove.StoreCompressedConstantOp;
+import com.oracle.graal.hotspot.amd64.AMD64HotSpotMove.StoreCompressedPointer;
 import com.oracle.graal.hotspot.meta.*;
 import com.oracle.graal.hotspot.nodes.*;
 import com.oracle.graal.hotspot.stubs.*;
@@ -46,12 +50,9 @@
 import com.oracle.graal.lir.StandardOp.PlaceholderOp;
 import com.oracle.graal.lir.amd64.*;
 import com.oracle.graal.lir.amd64.AMD64ControlFlow.CondMoveOp;
-import com.oracle.graal.lir.amd64.AMD64Move.CompareAndSwapCompressedOp;
 import com.oracle.graal.lir.amd64.AMD64Move.CompareAndSwapOp;
-import com.oracle.graal.lir.amd64.AMD64Move.LoadCompressedPointer;
 import com.oracle.graal.lir.amd64.AMD64Move.LoadOp;
 import com.oracle.graal.lir.amd64.AMD64Move.MoveFromRegOp;
-import com.oracle.graal.lir.amd64.AMD64Move.StoreCompressedPointer;
 import com.oracle.graal.lir.amd64.AMD64Move.StoreConstantOp;
 import com.oracle.graal.lir.amd64.AMD64Move.StoreOp;
 import com.oracle.graal.nodes.*;
@@ -427,12 +428,12 @@
          * algorithms may differ.
          */
         if (isCompressCandidate(access)) {
-            if (runtime().config.useCompressedOops && kind == Kind.Object) {
-                append(new LoadCompressedPointer(kind, result, runtime().heapBaseRegister().asValue(), loadAddress, access != null ? state(access) : null, runtime().config.narrowOopBase,
-                                runtime().config.narrowOopShift, runtime().config.logMinObjAlignment));
-            } else if (runtime().config.useCompressedKlassPointers && kind == Kind.Long) {
-                append(new LoadCompressedPointer(kind, result, runtime().heapBaseRegister().asValue(), loadAddress, access != null ? state(access) : null, runtime().config.narrowKlassBase,
-                                runtime().config.narrowKlassShift, runtime().config.logKlassAlignment));
+            if (runtime().useCompressedOops() && kind == Kind.Object) {
+                append(new LoadCompressedPointer(kind, result, runtime().heapBaseRegister().asValue(), loadAddress, access != null ? state(access) : null, getNarrowOopBase(), getNarrowOopShift(),
+                                getLogMinObjectAlignment()));
+            } else if (runtime().useCompressedKlassPointers() && kind == Kind.Long) {
+                append(new LoadCompressedPointer(kind, result, runtime().heapBaseRegister().asValue(), loadAddress, access != null ? state(access) : null, getNarrowKlassBase(), getNarrowKlassShift(),
+                                getLogKlassAlignment()));
             } else {
                 append(new LoadOp(kind, result, loadAddress, access != null ? state(access) : null));
             }
@@ -449,29 +450,29 @@
         if (isConstant(inputVal)) {
             Constant c = asConstant(inputVal);
             if (canStoreConstant(c)) {
-                if (inputVal.getKind() == Kind.Object) {
-                    append(new StoreConstantOp(kind, storeAddress, c, state, runtime().config.useCompressedOops && isCompressCandidate(access)));
-                } else if (inputVal.getKind() == Kind.Long) {
-                    append(new StoreConstantOp(kind, storeAddress, c, state, runtime().config.useCompressedKlassPointers && isCompressCandidate(access)));
+                if (inputVal.getKind() == Kind.Object && runtime().useCompressedOops() && isCompressCandidate(access)) {
+                    append(new StoreCompressedConstantOp(kind, storeAddress, c, state));
+                } else if (inputVal.getKind() == Kind.Long && runtime().useCompressedKlassPointers() && isCompressCandidate(access)) {
+                    append(new StoreCompressedConstantOp(kind, storeAddress, c, state));
                 } else {
-                    append(new StoreConstantOp(kind, storeAddress, c, state, false));
+                    append(new StoreConstantOp(kind, storeAddress, c, state));
                 }
                 return;
             }
         }
         Variable input = load(inputVal);
         if (isCompressCandidate(access)) {
-            if (runtime().config.useCompressedOops && kind == Kind.Object) {
+            if (runtime().useCompressedOops() && kind == Kind.Object) {
                 if (input.getKind() == Kind.Object) {
                     Variable scratch = newVariable(Kind.Long);
-                    append(new StoreCompressedPointer(kind, storeAddress, input, scratch, state, runtime().config.narrowOopBase, runtime().config.narrowOopShift, runtime().config.logMinObjAlignment));
+                    append(new StoreCompressedPointer(kind, storeAddress, input, scratch, state, getNarrowOopBase(), getNarrowOopShift(), getLogMinObjectAlignment()));
                 } else {
                     // the input oop is already compressed
                     append(new StoreOp(input.getKind(), storeAddress, input, state));
                 }
-            } else if (runtime().config.useCompressedKlassPointers && kind == Kind.Long) {
+            } else if (runtime().useCompressedKlassPointers() && kind == Kind.Long) {
                 Variable scratch = newVariable(Kind.Long);
-                append(new StoreCompressedPointer(kind, storeAddress, input, scratch, state, runtime().config.narrowKlassBase, runtime().config.narrowKlassShift, runtime().config.logKlassAlignment));
+                append(new StoreCompressedPointer(kind, storeAddress, input, scratch, state, getNarrowKlassBase(), getNarrowKlassShift(), getLogKlassAlignment()));
             } else {
                 append(new StoreOp(kind, storeAddress, input, state));
             }
@@ -480,6 +481,30 @@
         }
     }
 
+    private int getLogMinObjectAlignment() {
+        return runtime().config.logMinObjAlignment;
+    }
+
+    private int getNarrowOopShift() {
+        return runtime().config.narrowOopShift;
+    }
+
+    private long getNarrowOopBase() {
+        return runtime().config.narrowOopBase;
+    }
+
+    private int getLogKlassAlignment() {
+        return runtime().config.logKlassAlignment;
+    }
+
+    private int getNarrowKlassShift() {
+        return runtime().config.narrowKlassShift;
+    }
+
+    private long getNarrowKlassBase() {
+        return runtime().config.narrowKlassBase;
+    }
+
     @Override
     public void visitCompareAndSwap(LoweredCompareAndSwapNode node, Value address) {
         Kind kind = node.getNewValue().kind();
@@ -489,9 +514,9 @@
         AMD64AddressValue addressValue = asAddressValue(address);
         RegisterValue raxRes = AMD64.rax.asValue(kind);
         emitMove(raxRes, expected);
-        if (runtime().config.useCompressedOops && node.isCompressible()) {
+        if (runtime().useCompressedOops() && node.isCompressible()) {
             Variable scratch = newVariable(Kind.Long);
-            append(new CompareAndSwapCompressedOp(raxRes, addressValue, raxRes, newValue, scratch, runtime().config.narrowOopBase, runtime().config.narrowOopShift, runtime().config.logMinObjAlignment));
+            append(new CompareAndSwapCompressedOp(raxRes, addressValue, raxRes, newValue, scratch, getNarrowOopBase(), getNarrowOopShift(), getLogMinObjectAlignment()));
         } else {
             append(new CompareAndSwapOp(raxRes, addressValue, raxRes, newValue));
         }
--- /dev/null	Thu Jan 01 00:00:00 1970 +0000
+++ b/graal/com.oracle.graal.hotspot.amd64/src/com/oracle/graal/hotspot/amd64/AMD64HotSpotMove.java	Fri Aug 30 13:51:22 2013 +0200
@@ -0,0 +1,250 @@
+/*
+ * Copyright (c) 2013, Oracle and/or its affiliates. All rights reserved.
+ * DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER.
+ *
+ * This code is free software; you can redistribute it and/or modify it
+ * under the terms of the GNU General Public License version 2 only, as
+ * published by the Free Software Foundation.
+ *
+ * This code is distributed in the hope that it will be useful, but WITHOUT
+ * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
+ * FITNESS FOR A PARTICULAR PURPOSE.  See the GNU General Public License
+ * version 2 for more details (a copy is included in the LICENSE file that
+ * accompanied this code).
+ *
+ * You should have received a copy of the GNU General Public License version
+ * 2 along with this work; if not, write to the Free Software Foundation,
+ * Inc., 51 Franklin St, Fifth Floor, Boston, MA 02110-1301 USA.
+ *
+ * Please contact Oracle, 500 Oracle Parkway, Redwood Shores, CA 94065 USA
+ * or visit www.oracle.com if you need additional information or have any
+ * questions.
+ */
+package com.oracle.graal.hotspot.amd64;
+
+import static com.oracle.graal.api.code.ValueUtil.*;
+import static com.oracle.graal.lir.LIRInstruction.OperandFlag.*;
+
+import com.oracle.graal.amd64.*;
+import com.oracle.graal.api.code.*;
+import com.oracle.graal.api.meta.*;
+import com.oracle.graal.asm.*;
+import com.oracle.graal.asm.amd64.*;
+import com.oracle.graal.asm.amd64.AMD64Assembler.ConditionFlag;
+import com.oracle.graal.graph.*;
+import com.oracle.graal.hotspot.meta.*;
+import com.oracle.graal.lir.*;
+import com.oracle.graal.lir.amd64.*;
+import com.oracle.graal.lir.amd64.AMD64Move.LoadOp;
+import com.oracle.graal.lir.amd64.AMD64Move.StoreConstantOp;
+import com.oracle.graal.lir.asm.*;
+
+public class AMD64HotSpotMove {
+
+    public static class StoreCompressedConstantOp extends StoreConstantOp {
+
+        public StoreCompressedConstantOp(Kind kind, AMD64AddressValue address, Constant input, LIRFrameState state) {
+            super(kind, address, input, state);
+        }
+
+        @Override
+        public void emitMemAccess(AMD64MacroAssembler masm) {
+            if (kind == Kind.Long) {
+                if (NumUtil.isInt(input.asLong())) {
+                    masm.movl(address.toAddress(), (int) input.asLong());
+                } else {
+                    throw GraalInternalError.shouldNotReachHere("Cannot store 64-bit constants to memory");
+                }
+            } else if (kind == Kind.Object) {
+                if (input.isNull()) {
+                    masm.movl(address.toAddress(), 0);
+                } else {
+                    throw GraalInternalError.shouldNotReachHere("Cannot store 64-bit constants to memory");
+                }
+            } else {
+                throw GraalInternalError.shouldNotReachHere("Attempt to store compressed constant of wrong type.");
+            }
+        }
+    }
+
+    public static class LoadCompressedPointer extends LoadOp {
+
+        private long base;
+        private int shift;
+        private int alignment;
+        @Alive({REG}) protected AllocatableValue heapBaseRegister;
+
+        public LoadCompressedPointer(Kind kind, AllocatableValue result, AllocatableValue heapBaseRegister, AMD64AddressValue address, LIRFrameState state, long base, int shift, int alignment) {
+            super(kind, result, address, state);
+            this.base = base;
+            this.shift = shift;
+            this.alignment = alignment;
+            this.heapBaseRegister = heapBaseRegister;
+            assert kind == Kind.Object || kind == Kind.Long;
+        }
+
+        @Override
+        public void emitMemAccess(AMD64MacroAssembler masm) {
+            Register resRegister = asRegister(result);
+            masm.movl(resRegister, address.toAddress());
+            if (kind == Kind.Object) {
+                decodePointer(masm, resRegister, asRegister(heapBaseRegister), base, shift, alignment);
+            } else {
+                decodeKlassPointer(masm, resRegister, asRegister(heapBaseRegister), base, shift, alignment);
+            }
+        }
+    }
+
+    public static class StoreCompressedPointer extends AMD64LIRInstruction {
+
+        protected final Kind kind;
+        private long base;
+        private int shift;
+        private int alignment;
+        @Temp({REG}) private AllocatableValue scratch;
+        @Alive({REG}) protected AllocatableValue input;
+        @Alive({COMPOSITE}) protected AMD64AddressValue address;
+        @State protected LIRFrameState state;
+
+        public StoreCompressedPointer(Kind kind, AMD64AddressValue address, AllocatableValue input, AllocatableValue scratch, LIRFrameState state, long base, int shift, int alignment) {
+            this.base = base;
+            this.shift = shift;
+            this.alignment = alignment;
+            this.scratch = scratch;
+            this.kind = kind;
+            this.address = address;
+            this.state = state;
+            this.input = input;
+            assert kind == Kind.Object || kind == Kind.Long;
+        }
+
+        @Override
+        public void emitCode(TargetMethodAssembler tasm, AMD64MacroAssembler masm) {
+            Register heapBase = ((HotSpotRuntime) tasm.runtime).heapBaseRegister();
+            masm.movq(asRegister(scratch), asRegister(input));
+            if (kind == Kind.Object) {
+                encodePointer(masm, asRegister(scratch), heapBase, base, shift, alignment);
+            } else {
+                encodeKlassPointer(masm, asRegister(scratch), heapBase, base, shift, alignment);
+            }
+            if (state != null) {
+                tasm.recordImplicitException(masm.codeBuffer.position(), state);
+            }
+            masm.movl(address.toAddress(), asRegister(scratch));
+        }
+    }
+
+    @Opcode("CAS")
+    public static class CompareAndSwapCompressedOp extends AMD64LIRInstruction {
+
+        @Def protected AllocatableValue result;
+        @Alive({COMPOSITE}) protected AMD64AddressValue address;
+        @Alive protected AllocatableValue cmpValue;
+        @Alive protected AllocatableValue newValue;
+        @Temp({REG}) protected AllocatableValue scratch;
+
+        private long base;
+        private int shift;
+        private int alignment;
+
+        public CompareAndSwapCompressedOp(AllocatableValue result, AMD64AddressValue address, AllocatableValue cmpValue, AllocatableValue newValue, AllocatableValue scratch, long base, int shift,
+                        int alignment) {
+            this.base = base;
+            this.shift = shift;
+            this.alignment = alignment;
+            this.scratch = scratch;
+            this.result = result;
+            this.address = address;
+            this.cmpValue = cmpValue;
+            this.newValue = newValue;
+            assert cmpValue.getKind() == Kind.Object;
+        }
+
+        @Override
+        public void emitCode(TargetMethodAssembler tasm, AMD64MacroAssembler masm) {
+            compareAndSwapCompressed(tasm, masm, result, address, cmpValue, newValue, scratch, base, shift, alignment);
+        }
+    }
+
+    protected static void compareAndSwapCompressed(TargetMethodAssembler tasm, AMD64MacroAssembler masm, AllocatableValue result, AMD64AddressValue address, AllocatableValue cmpValue,
+                    AllocatableValue newValue, AllocatableValue scratch, long base, int shift, int alignment) {
+        assert AMD64.rax.equals(asRegister(cmpValue)) && AMD64.rax.equals(asRegister(result));
+        final Register scratchRegister = asRegister(scratch);
+        final Register cmpRegister = asRegister(cmpValue);
+        final Register newRegister = asRegister(newValue);
+        Register heapBase = ((HotSpotRuntime) tasm.runtime).heapBaseRegister();
+        encodePointer(masm, cmpRegister, heapBase, base, shift, alignment);
+        masm.movq(scratchRegister, newRegister);
+        encodePointer(masm, scratchRegister, heapBase, base, shift, alignment);
+        if (tasm.target.isMP) {
+            masm.lock();
+        }
+        masm.cmpxchgl(scratchRegister, address.toAddress());
+    }
+
+    private static void encodePointer(AMD64MacroAssembler masm, Register scratchRegister, Register heapBaseRegister, long base, int shift, int alignment) {
+        // If the base is zero, the uncompressed address has to be shifted right
+        // in order to be compressed.
+        if (base == 0) {
+            if (shift != 0) {
+                assert alignment == shift : "Encode algorithm is wrong";
+                masm.shrq(scratchRegister, alignment);
+            }
+        } else {
+            // Otherwise the heap base, which resides always in register 12, is subtracted
+            // followed by right shift.
+            masm.testq(scratchRegister, scratchRegister);
+            // If the stored reference is null, move the heap to scratch
+            // register and then calculate the compressed oop value.
+            masm.cmovq(ConditionFlag.Equal, scratchRegister, heapBaseRegister);
+            masm.subq(scratchRegister, heapBaseRegister);
+            masm.shrq(scratchRegister, alignment);
+        }
+    }
+
+    private static void decodePointer(AMD64MacroAssembler masm, Register resRegister, Register heapBaseRegister, long base, int shift, int alignment) {
+        // If the base is zero, the compressed address has to be shifted left
+        // in order to be uncompressed.
+        if (base == 0) {
+            if (shift != 0) {
+                assert alignment == shift : "Decode algorithm is wrong";
+                masm.shlq(resRegister, alignment);
+            }
+        } else {
+            Label done = new Label();
+            masm.shlq(resRegister, alignment);
+            masm.jccb(ConditionFlag.Equal, done);
+            // Otherwise the heap base is added to the shifted address.
+            masm.addq(resRegister, heapBaseRegister);
+            masm.bind(done);
+        }
+    }
+
+    private static void encodeKlassPointer(AMD64MacroAssembler masm, Register scratchRegister, Register heapBaseRegister, long base, int shift, int alignment) {
+        if (base != 0) {
+            masm.subq(scratchRegister, heapBaseRegister);
+        }
+        if (shift != 0) {
+            assert alignment == shift : "Encode algorithm is wrong";
+            masm.shrq(scratchRegister, alignment);
+        }
+    }
+
+    private static void decodeKlassPointer(AMD64MacroAssembler masm, Register resRegister, Register heapBaseRegister, long base, int shift, int alignment) {
+        if (shift != 0) {
+            assert alignment == shift : "Decode algorithm is wrong";
+            masm.shlq(resRegister, alignment);
+            if (base != 0) {
+                masm.addq(resRegister, heapBaseRegister);
+            }
+        } else {
+            assert base == 0 : "Sanity";
+        }
+    }
+
+    public static void decodeKlassPointer(AMD64MacroAssembler masm, Register register, Register heapBaseRegister, AMD64Address address, long narrowKlassBase, int narrowKlassShift,
+                    int logKlassAlignment) {
+        masm.movl(register, address);
+        decodeKlassPointer(masm, register, heapBaseRegister, narrowKlassBase, narrowKlassShift, logKlassAlignment);
+    }
+}
--- a/graal/com.oracle.graal.hotspot/src/com/oracle/graal/hotspot/meta/HotSpotRuntime.java	Thu Aug 29 17:17:35 2013 +0200
+++ b/graal/com.oracle.graal.hotspot/src/com/oracle/graal/hotspot/meta/HotSpotRuntime.java	Fri Aug 30 13:51:22 2013 +0200
@@ -361,6 +361,12 @@
     public abstract Register threadRegister();
 
     /**
+     * Returns the register used by the runtime for maintaining the heap base address for compressed
+     * pointers.
+     */
+    public abstract Register heapBaseRegister();
+
+    /**
      * Gets the stack pointer register.
      */
     public abstract Register stackPointerRegister();
@@ -494,6 +500,14 @@
         return Array.getLength(array.asObject());
     }
 
+    public boolean useCompressedOops() {
+        return config.useCompressedOops;
+    }
+
+    public boolean useCompressedKlassPointers() {
+        return config.useCompressedKlassPointers;
+    }
+
     @Override
     public void lower(Node n, LoweringTool tool) {
         StructuredGraph graph = (StructuredGraph) n.graph();
@@ -879,13 +893,13 @@
     private FloatingReadNode createReadHub(StructuredGraph graph, Kind wordKind, ValueNode object, GuardingNode guard) {
         LocationNode location = ConstantLocationNode.create(FINAL_LOCATION, wordKind, config.hubOffset, graph);
         assert !object.isConstant() || object.asConstant().isNull();
-        return graph.add(new FloatingReadNode(object, location, null, StampFactory.forKind(wordKind()), guard, BarrierType.NONE, config.useCompressedKlassPointers));
+        return graph.add(new FloatingReadNode(object, location, null, StampFactory.forKind(wordKind()), guard, BarrierType.NONE, useCompressedKlassPointers()));
     }
 
     private WriteNode createWriteHub(StructuredGraph graph, Kind wordKind, ValueNode object, ValueNode value) {
         LocationNode location = ConstantLocationNode.create(ANY_LOCATION, wordKind, config.hubOffset, graph);
         assert !object.isConstant() || object.asConstant().isNull();
-        return graph.add(new WriteNode(object, value, location, BarrierType.NONE, config.useCompressedKlassPointers));
+        return graph.add(new WriteNode(object, value, location, BarrierType.NONE, useCompressedKlassPointers()));
     }
 
     private static BarrierType getFieldLoadBarrierType(HotSpotResolvedJavaField loadField) {
@@ -943,7 +957,7 @@
     }
 
     public int getScalingFactor(Kind kind) {
-        if (config.useCompressedOops && kind == Kind.Object) {
+        if (useCompressedOops() && kind == Kind.Object) {
             return this.graalRuntime.getTarget().arch.getSizeInBytes(Kind.Int);
         } else {
             return this.graalRuntime.getTarget().arch.getSizeInBytes(kind);
@@ -1150,7 +1164,7 @@
             case Int:
                 return Constant.forInt(base == null ? unsafe.getInt(displacement) : unsafe.getInt(base, displacement));
             case Long:
-                if (displacement == config().hubOffset && this.getGraalRuntime().getRuntime().config.useCompressedKlassPointers) {
+                if (displacement == config().hubOffset && useCompressedKlassPointers()) {
                     if (base == null) {
                         throw new GraalInternalError("Base of object must not be null");
                     } else {
--- a/graal/com.oracle.graal.lir.amd64/src/com/oracle/graal/lir/amd64/AMD64Move.java	Thu Aug 29 17:17:35 2013 +0200
+++ b/graal/com.oracle.graal.lir.amd64/src/com/oracle/graal/lir/amd64/AMD64Move.java	Fri Aug 30 13:51:22 2013 +0200
@@ -32,7 +32,6 @@
 import com.oracle.graal.api.meta.*;
 import com.oracle.graal.asm.*;
 import com.oracle.graal.asm.amd64.*;
-import com.oracle.graal.asm.amd64.AMD64Assembler.ConditionFlag;
 import com.oracle.graal.graph.*;
 import com.oracle.graal.lir.*;
 import com.oracle.graal.lir.StandardOp.MoveOp;
@@ -117,34 +116,6 @@
         }
     }
 
-    public static class LoadCompressedPointer extends LoadOp {
-
-        private long base;
-        private int shift;
-        private int alignment;
-        @Alive({REG}) protected AllocatableValue heapBaseRegister;
-
-        public LoadCompressedPointer(Kind kind, AllocatableValue result, AllocatableValue heapBaseRegister, AMD64AddressValue address, LIRFrameState state, long base, int shift, int alignment) {
-            super(kind, result, address, state);
-            this.base = base;
-            this.shift = shift;
-            this.alignment = alignment;
-            this.heapBaseRegister = heapBaseRegister;
-            assert kind == Kind.Object || kind == Kind.Long;
-        }
-
-        @Override
-        public void emitMemAccess(AMD64MacroAssembler masm) {
-            Register resRegister = asRegister(result);
-            masm.movl(resRegister, address.toAddress());
-            if (kind == Kind.Object) {
-                decodePointer(masm, resRegister, asRegister(heapBaseRegister), base, shift, alignment);
-            } else {
-                decodeKlassPointer(masm, resRegister, asRegister(heapBaseRegister), base, shift, alignment);
-            }
-        }
-    }
-
     public static class LoadOp extends MemOp {
 
         @Def({REG}) protected AllocatableValue result;
@@ -188,44 +159,6 @@
         }
     }
 
-    public static class StoreCompressedPointer extends AMD64LIRInstruction {
-
-        protected final Kind kind;
-        private long base;
-        private int shift;
-        private int alignment;
-        @Temp({REG}) private AllocatableValue scratch;
-        @Alive({REG}) protected AllocatableValue input;
-        @Alive({COMPOSITE}) protected AMD64AddressValue address;
-        @State protected LIRFrameState state;
-
-        public StoreCompressedPointer(Kind kind, AMD64AddressValue address, AllocatableValue input, AllocatableValue scratch, LIRFrameState state, long base, int shift, int alignment) {
-            this.base = base;
-            this.shift = shift;
-            this.alignment = alignment;
-            this.scratch = scratch;
-            this.kind = kind;
-            this.address = address;
-            this.state = state;
-            this.input = input;
-            assert kind == Kind.Object || kind == Kind.Long;
-        }
-
-        @Override
-        public void emitCode(TargetMethodAssembler tasm, AMD64MacroAssembler masm) {
-            masm.movq(asRegister(scratch), asRegister(input));
-            if (kind == Kind.Object) {
-                encodePointer(masm, asRegister(scratch), tasm.runtime.heapBaseRegister(), base, shift, alignment);
-            } else {
-                encodeKlassPointer(masm, asRegister(scratch), tasm.runtime.heapBaseRegister(), base, shift, alignment);
-            }
-            if (state != null) {
-                tasm.recordImplicitException(masm.codeBuffer.position(), state);
-            }
-            masm.movl(address.toAddress(), asRegister(scratch));
-        }
-    }
-
     public static class StoreOp extends MemOp {
 
         @Use({REG}) protected AllocatableValue input;
@@ -271,12 +204,10 @@
     public static class StoreConstantOp extends MemOp {
 
         protected final Constant input;
-        private final boolean compressible;
 
-        public StoreConstantOp(Kind kind, AMD64AddressValue address, Constant input, LIRFrameState state, boolean compressible) {
+        public StoreConstantOp(Kind kind, AMD64AddressValue address, Constant input, LIRFrameState state) {
             super(kind, address, state);
             this.input = input;
-            this.compressible = compressible;
         }
 
         @Override
@@ -295,11 +226,7 @@
                     break;
                 case Long:
                     if (NumUtil.isInt(input.asLong())) {
-                        if (compressible) {
-                            masm.movl(address.toAddress(), (int) input.asLong());
-                        } else {
-                            masm.movslq(address.toAddress(), (int) input.asLong());
-                        }
+                        masm.movslq(address.toAddress(), (int) input.asLong());
                     } else {
                         throw GraalInternalError.shouldNotReachHere("Cannot store 64-bit constants to memory");
                     }
@@ -311,11 +238,7 @@
                     throw GraalInternalError.shouldNotReachHere("Cannot store 64-bit constants to memory");
                 case Object:
                     if (input.isNull()) {
-                        if (compressible) {
-                            masm.movl(address.toAddress(), 0);
-                        } else {
-                            masm.movptr(address.toAddress(), 0);
-                        }
+                        masm.movptr(address.toAddress(), 0);
                     } else {
                         throw GraalInternalError.shouldNotReachHere("Cannot store 64-bit constants to memory");
                     }
@@ -410,38 +333,6 @@
         }
     }
 
-    @Opcode("CAS")
-    public static class CompareAndSwapCompressedOp extends AMD64LIRInstruction {
-
-        @Def protected AllocatableValue result;
-        @Alive({COMPOSITE}) protected AMD64AddressValue address;
-        @Alive protected AllocatableValue cmpValue;
-        @Alive protected AllocatableValue newValue;
-        @Temp({REG}) protected AllocatableValue scratch;
-
-        private long base;
-        private int shift;
-        private int alignment;
-
-        public CompareAndSwapCompressedOp(AllocatableValue result, AMD64AddressValue address, AllocatableValue cmpValue, AllocatableValue newValue, AllocatableValue scratch, long base, int shift,
-                        int alignment) {
-            this.base = base;
-            this.shift = shift;
-            this.alignment = alignment;
-            this.scratch = scratch;
-            this.result = result;
-            this.address = address;
-            this.cmpValue = cmpValue;
-            this.newValue = newValue;
-            assert cmpValue.getKind() == Kind.Object;
-        }
-
-        @Override
-        public void emitCode(TargetMethodAssembler tasm, AMD64MacroAssembler masm) {
-            compareAndSwapCompressed(tasm, masm, result, address, cmpValue, newValue, scratch, base, shift, alignment);
-        }
-    }
-
     public static void move(TargetMethodAssembler tasm, AMD64MacroAssembler masm, Value result, Value input) {
         if (isRegister(input)) {
             if (isRegister(result)) {
@@ -650,85 +541,4 @@
                 throw GraalInternalError.shouldNotReachHere();
         }
     }
-
-    protected static void compareAndSwapCompressed(TargetMethodAssembler tasm, AMD64MacroAssembler masm, AllocatableValue result, AMD64AddressValue address, AllocatableValue cmpValue,
-                    AllocatableValue newValue, AllocatableValue scratch, long base, int shift, int alignment) {
-        assert AMD64.rax.equals(asRegister(cmpValue)) && AMD64.rax.equals(asRegister(result));
-        final Register scratchRegister = asRegister(scratch);
-        final Register cmpRegister = asRegister(cmpValue);
-        final Register newRegister = asRegister(newValue);
-        encodePointer(masm, cmpRegister, tasm.runtime.heapBaseRegister(), base, shift, alignment);
-        masm.movq(scratchRegister, newRegister);
-        encodePointer(masm, scratchRegister, tasm.runtime.heapBaseRegister(), base, shift, alignment);
-        if (tasm.target.isMP) {
-            masm.lock();
-        }
-        masm.cmpxchgl(scratchRegister, address.toAddress());
-    }
-
-    private static void encodePointer(AMD64MacroAssembler masm, Register scratchRegister, Register heapBaseRegister, long base, int shift, int alignment) {
-        // If the base is zero, the uncompressed address has to be shifted right
-        // in order to be compressed.
-        if (base == 0) {
-            if (shift != 0) {
-                assert alignment == shift : "Encode algorithm is wrong";
-                masm.shrq(scratchRegister, alignment);
-            }
-        } else {
-            // Otherwise the heap base, which resides always in register 12, is subtracted
-            // followed by right shift.
-            masm.testq(scratchRegister, scratchRegister);
-            // If the stored reference is null, move the heap to scratch
-            // register and then calculate the compressed oop value.
-            masm.cmovq(ConditionFlag.Equal, scratchRegister, heapBaseRegister);
-            masm.subq(scratchRegister, heapBaseRegister);
-            masm.shrq(scratchRegister, alignment);
-        }
-    }
-
-    private static void decodePointer(AMD64MacroAssembler masm, Register resRegister, Register heapBaseRegister, long base, int shift, int alignment) {
-        // If the base is zero, the compressed address has to be shifted left
-        // in order to be uncompressed.
-        if (base == 0) {
-            if (shift != 0) {
-                assert alignment == shift : "Decode algorithm is wrong";
-                masm.shlq(resRegister, alignment);
-            }
-        } else {
-            Label done = new Label();
-            masm.shlq(resRegister, alignment);
-            masm.jccb(ConditionFlag.Equal, done);
-            // Otherwise the heap base is added to the shifted address.
-            masm.addq(resRegister, heapBaseRegister);
-            masm.bind(done);
-        }
-    }
-
-    private static void encodeKlassPointer(AMD64MacroAssembler masm, Register scratchRegister, Register heapBaseRegister, long base, int shift, int alignment) {
-        if (base != 0) {
-            masm.subq(scratchRegister, heapBaseRegister);
-        }
-        if (shift != 0) {
-            assert alignment == shift : "Encode algorithm is wrong";
-            masm.shrq(scratchRegister, alignment);
-        }
-    }
-
-    private static void decodeKlassPointer(AMD64MacroAssembler masm, Register resRegister, Register heapBaseRegister, long base, int shift, int alignment) {
-        if (shift != 0) {
-            assert alignment == shift : "Decode algorithm is wrong";
-            masm.shlq(resRegister, alignment);
-            if (base != 0) {
-                masm.addq(resRegister, heapBaseRegister);
-            }
-        } else {
-            assert base == 0 : "Sanity";
-        }
-    }
-
-    public static void decodeKlassPointer(AMD64MacroAssembler masm, Register register, Register heapBaseRegister, AMD64Address address, long narrowKlassBase, int narrowKlassShift,
-                    int logKlassAlignment) {
-        masm.movl(register, address);
-        decodeKlassPointer(masm, register, heapBaseRegister, narrowKlassBase, narrowKlassShift, logKlassAlignment);
-    }
 }