changeset 19870:ab898f9f9c3c

Merge with f803f49c9ec423cb1c87a510c29fd61afd6a8c57
author Michael Van De Vanter <michael.van.de.vanter@oracle.com>
date Mon, 16 Mar 2015 16:54:10 -0700
parents 1d6a7ea5de59 (current diff) f803f49c9ec4 (diff)
children 825f5c7468bd 87c62a38f843
files graal/com.oracle.graal.hotspot.amd64/src/com/oracle/graal/hotspot/amd64/AMD64HotSpotCompare.java graal/com.oracle.graal.lir.amd64/src/com/oracle/graal/lir/amd64/AMD64BitManipulationOp.java graal/com.oracle.graal.lir.amd64/src/com/oracle/graal/lir/amd64/AMD64Compare.java graal/com.oracle.graal.lir.amd64/src/com/oracle/graal/lir/amd64/AMD64TestMemoryOp.java graal/com.oracle.graal.lir.amd64/src/com/oracle/graal/lir/amd64/AMD64TestOp.java
diffstat 53 files changed, 2891 insertions(+), 3570 deletions(-) [+]
line wrap: on
line diff
--- a/graal/com.oracle.graal.asm.amd64.test/src/com/oracle/graal/asm/amd64/test/BitOpsTest.java	Mon Mar 16 15:59:57 2015 -0700
+++ b/graal/com.oracle.graal.asm.amd64.test/src/com/oracle/graal/asm/amd64/test/BitOpsTest.java	Mon Mar 16 16:54:10 2015 -0700
@@ -1,5 +1,5 @@
 /*
- * Copyright (c) 2013, 2014, Oracle and/or its affiliates. All rights reserved.
+ * Copyright (c) 2013, 2015, Oracle and/or its affiliates. All rights reserved.
  * DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER.
  *
  * This code is free software; you can redistribute it and/or modify it
@@ -24,16 +24,18 @@
 package com.oracle.graal.asm.amd64.test;
 
 import static com.oracle.graal.api.code.ValueUtil.*;
+import static com.oracle.graal.asm.amd64.AMD64Assembler.AMD64RMOp.*;
+import static com.oracle.graal.asm.amd64.AMD64Assembler.OperandSize.*;
 import static com.oracle.graal.compiler.common.UnsafeAccess.*;
 import static org.junit.Assume.*;
 
-import org.junit.*;
-
 import java.lang.reflect.*;
 import java.util.*;
 
+import org.junit.*;
+
 import com.oracle.graal.amd64.*;
-import com.oracle.graal.amd64.AMD64.*;
+import com.oracle.graal.amd64.AMD64.CPUFeature;
 import com.oracle.graal.api.code.*;
 import com.oracle.graal.api.meta.*;
 import com.oracle.graal.asm.amd64.*;
@@ -61,7 +63,7 @@
                     AMD64Assembler asm = new AMD64Assembler(target, registerConfig);
                     Register ret = registerConfig.getReturnRegister(Kind.Int);
                     Register arg = asRegister(cc.getArgument(0));
-                    asm.lzcntl(ret, arg);
+                    LZCNT.emit(asm, DWORD, ret, arg);
                     asm.ret(0);
                     return asm.close(true);
                 }
@@ -82,7 +84,7 @@
                     try {
                         Field f = IntField.class.getDeclaredField("x");
                         AMD64Address arg = new AMD64Address(asRegister(cc.getArgument(0)), (int) unsafe.objectFieldOffset(f));
-                        asm.lzcntl(ret, arg);
+                        LZCNT.emit(asm, DWORD, ret, arg);
                         asm.ret(0);
                         return asm.close(true);
                     } catch (Exception e) {
@@ -104,7 +106,7 @@
                     AMD64Assembler asm = new AMD64Assembler(target, registerConfig);
                     Register ret = registerConfig.getReturnRegister(Kind.Int);
                     Register arg = asRegister(cc.getArgument(0));
-                    asm.lzcntq(ret, arg);
+                    LZCNT.emit(asm, QWORD, ret, arg);
                     asm.ret(0);
                     return asm.close(true);
                 }
@@ -125,7 +127,7 @@
                     try {
                         Field f = LongField.class.getDeclaredField("x");
                         AMD64Address arg = new AMD64Address(asRegister(cc.getArgument(0)), (int) unsafe.objectFieldOffset(f));
-                        asm.lzcntq(ret, arg);
+                        LZCNT.emit(asm, QWORD, ret, arg);
                         asm.ret(0);
                         return asm.close(true);
                     } catch (Exception e) {
@@ -147,7 +149,7 @@
                     AMD64Assembler asm = new AMD64Assembler(target, registerConfig);
                     Register ret = registerConfig.getReturnRegister(Kind.Int);
                     Register arg = asRegister(cc.getArgument(0));
-                    asm.tzcntl(ret, arg);
+                    TZCNT.emit(asm, DWORD, ret, arg);
                     asm.ret(0);
                     return asm.close(true);
                 }
@@ -168,7 +170,7 @@
                     try {
                         Field f = IntField.class.getDeclaredField("x");
                         AMD64Address arg = new AMD64Address(asRegister(cc.getArgument(0)), (int) unsafe.objectFieldOffset(f));
-                        asm.tzcntl(ret, arg);
+                        TZCNT.emit(asm, DWORD, ret, arg);
                         asm.ret(0);
                         return asm.close(true);
                     } catch (Exception e) {
@@ -190,7 +192,7 @@
                     AMD64Assembler asm = new AMD64Assembler(target, registerConfig);
                     Register ret = registerConfig.getReturnRegister(Kind.Int);
                     Register arg = asRegister(cc.getArgument(0));
-                    asm.tzcntq(ret, arg);
+                    TZCNT.emit(asm, QWORD, ret, arg);
                     asm.ret(0);
                     return asm.close(true);
                 }
@@ -211,7 +213,7 @@
                     try {
                         Field f = LongField.class.getDeclaredField("x");
                         AMD64Address arg = new AMD64Address(asRegister(cc.getArgument(0)), (int) unsafe.objectFieldOffset(f));
-                        asm.tzcntq(ret, arg);
+                        TZCNT.emit(asm, QWORD, ret, arg);
                         asm.ret(0);
                         return asm.close(true);
                     } catch (Exception e) {
--- a/graal/com.oracle.graal.asm.amd64/src/com/oracle/graal/asm/amd64/AMD64Assembler.java	Mon Mar 16 15:59:57 2015 -0700
+++ b/graal/com.oracle.graal.asm.amd64/src/com/oracle/graal/asm/amd64/AMD64Assembler.java	Mon Mar 16 16:54:10 2015 -0700
@@ -26,10 +26,13 @@
 import static com.oracle.graal.api.code.MemoryBarriers.*;
 import static com.oracle.graal.asm.NumUtil.*;
 import static com.oracle.graal.asm.amd64.AMD64AsmOptions.*;
+import static com.oracle.graal.asm.amd64.AMD64Assembler.AMD64BinaryArithmetic.*;
+import static com.oracle.graal.asm.amd64.AMD64Assembler.OperandSize.*;
 
 import com.oracle.graal.amd64.*;
 import com.oracle.graal.amd64.AMD64.CPUFeature;
 import com.oracle.graal.api.code.*;
+import com.oracle.graal.api.code.Register.RegisterCategory;
 import com.oracle.graal.asm.*;
 
 /**
@@ -158,6 +161,131 @@
     }
 
     /**
+     * The x86 operand sizes.
+     */
+    public static enum OperandSize {
+        BYTE(1) {
+            @Override
+            protected void emitImmediate(AMD64Assembler asm, int imm) {
+                assert imm == (byte) imm;
+                asm.emitByte(imm);
+            }
+        },
+
+        WORD(2, 0x66) {
+            @Override
+            protected void emitImmediate(AMD64Assembler asm, int imm) {
+                assert imm == (short) imm;
+                asm.emitShort(imm);
+            }
+        },
+
+        DWORD(4) {
+            @Override
+            protected void emitImmediate(AMD64Assembler asm, int imm) {
+                asm.emitInt(imm);
+            }
+        },
+
+        QWORD(8) {
+            @Override
+            protected void emitImmediate(AMD64Assembler asm, int imm) {
+                asm.emitInt(imm);
+            }
+        },
+
+        SS(4, 0xF3, true),
+
+        SD(8, 0xF2, true),
+
+        PS(16, true),
+
+        PD(16, 0x66, true);
+
+        private final int sizePrefix;
+
+        private final int bytes;
+        private final boolean xmm;
+
+        private OperandSize(int bytes) {
+            this(bytes, 0);
+        }
+
+        private OperandSize(int bytes, int sizePrefix) {
+            this(bytes, sizePrefix, false);
+        }
+
+        private OperandSize(int bytes, boolean xmm) {
+            this(bytes, 0, xmm);
+        }
+
+        private OperandSize(int bytes, int sizePrefix, boolean xmm) {
+            this.sizePrefix = sizePrefix;
+            this.bytes = bytes;
+            this.xmm = xmm;
+        }
+
+        public int getBytes() {
+            return bytes;
+        }
+
+        public boolean isXmmType() {
+            return xmm;
+        }
+
+        /**
+         * Emit an immediate of this size. Note that immediate {@link #QWORD} operands are encoded
+         * as sign-extended 32-bit values.
+         *
+         * @param asm
+         * @param imm
+         */
+        protected void emitImmediate(AMD64Assembler asm, int imm) {
+            assert false;
+        }
+    }
+
+    /**
+     * Operand size and register type constraints.
+     */
+    private static enum OpAssertion {
+        ByteAssertion(CPU, CPU, BYTE),
+        IntegerAssertion(CPU, CPU, WORD, DWORD, QWORD),
+        No16BitAssertion(CPU, CPU, DWORD, QWORD),
+        QwordOnlyAssertion(CPU, CPU, QWORD),
+        FloatingAssertion(XMM, XMM, SS, SD, PS, PD),
+        PackedFloatingAssertion(XMM, XMM, PS, PD),
+        SingleAssertion(XMM, XMM, SS),
+        DoubleAssertion(XMM, XMM, SD),
+        IntToFloatingAssertion(XMM, CPU, DWORD, QWORD),
+        FloatingToIntAssertion(CPU, XMM, DWORD, QWORD);
+
+        private final RegisterCategory resultCategory;
+        private final RegisterCategory inputCategory;
+        private final OperandSize[] allowedSizes;
+
+        private OpAssertion(RegisterCategory resultCategory, RegisterCategory inputCategory, OperandSize... allowedSizes) {
+            this.resultCategory = resultCategory;
+            this.inputCategory = inputCategory;
+            this.allowedSizes = allowedSizes;
+        }
+
+        protected boolean checkOperands(AMD64Op op, OperandSize size, Register resultReg, Register inputReg) {
+            assert resultReg == null || resultCategory.equals(resultReg.getRegisterCategory()) : "invalid result register " + resultReg + " used in " + op;
+            assert inputReg == null || inputCategory.equals(inputReg.getRegisterCategory()) : "invalid input register " + inputReg + " used in " + op;
+
+            for (OperandSize s : allowedSizes) {
+                if (size == s) {
+                    return true;
+                }
+            }
+
+            assert false : "invalid operand size " + size + " used in " + op;
+            return false;
+        }
+    }
+
+    /**
      * The register to which {@link Register#Frame} and {@link Register#CallerFrame} are bound.
      */
     public final Register frameRegister;
@@ -184,97 +312,65 @@
         return r.encoding & 0x7;
     }
 
-    private void emitArithImm8(int op, Register dst, int imm8) {
-        int encode = prefixAndEncode(op, false, dst.encoding, true);
-        emitByte(0x80);
-        emitByte(0xC0 | encode);
-        emitByte(imm8);
-    }
-
-    private void emitArithImm16(int op, Register dst, int imm16) {
-        emitByte(0x66);
-        int encode = prefixAndEncode(op, dst.encoding);
-        if (isByte(imm16)) {
-            emitByte(0x83); // imm8 sign extend
-            emitByte(0xC0 | encode);
-            emitByte(imm16 & 0xFF);
-        } else {
-            emitByte(0x81);
-            emitByte(0xC0 | encode);
-            emitShort(imm16);
-        }
-    }
-
-    private void emitArithImm32(int op, Register dst, int imm32) {
-        int encode = prefixAndEncode(op, dst.encoding);
-        if (isByte(imm32)) {
-            emitByte(0x83); // imm8 sign extend
-            emitByte(0xC0 | encode);
-            emitByte(imm32 & 0xFF);
-        } else {
-            emitByte(0x81);
-            emitByte(0xC0 | encode);
-            emitInt(imm32);
-        }
-    }
-
-    private void emitArithImm32q(int op, Register dst, int imm32) {
-        emitArithImm32q(op, dst, imm32, false);
+    /**
+     * Get RXB bits for register-register instruction. In that encoding, ModRM.rm contains a
+     * register index. The R bit extends the ModRM.reg field and the B bit extends the ModRM.rm
+     * field. The X bit must be 0.
+     */
+    protected static int getRXB(Register reg, Register rm) {
+        int rxb = (reg == null ? 0 : reg.encoding & 0x08) >> 1;
+        rxb |= (rm == null ? 0 : rm.encoding & 0x08) >> 3;
+        return rxb;
     }
 
-    private void emitArithImm32q(int op, Register dst, int imm32, boolean force32Imm) {
-        int encode = prefixqAndEncode(op, dst.encoding);
-        if (isByte(imm32) && !force32Imm) {
-            emitByte(0x83); // imm8 sign extend
-            emitByte(0xC0 | encode);
-            emitByte(imm32 & 0xFF);
-        } else {
-            emitByte(0x81);
-            emitByte(0xC0 | encode);
-            emitInt(imm32);
+    /**
+     * Get RXB bits for register-memory instruction. The R bit extends the ModRM.reg field. There
+     * are two cases for the memory operand:<br>
+     * ModRM.rm contains the base register: In that case, B extends the ModRM.rm field and X = 0.<br>
+     * There is an SIB byte: In that case, X extends SIB.index and B extends SIB.base.
+     */
+    protected static int getRXB(Register reg, AMD64Address rm) {
+        int rxb = (reg == null ? 0 : reg.encoding & 0x08) >> 1;
+        if (!rm.getIndex().equals(Register.None)) {
+            rxb |= (rm.getIndex().encoding & 0x08) >> 2;
         }
-    }
-
-    // immediate-to-memory forms
-    private void emitArithImm8(int op, AMD64Address adr, int imm8) {
-        prefix(adr);
-        emitByte(0x80);
-        emitOperandHelper(op, adr);
-        emitByte(imm8);
+        if (!rm.getBase().equals(Register.None)) {
+            rxb |= (rm.getBase().encoding & 0x08) >> 3;
+        }
+        return rxb;
     }
 
-    private void emitArithImm16(int op, AMD64Address adr, int imm16) {
-        emitByte(0x66);
-        prefix(adr);
-        if (isByte(imm16)) {
-            emitByte(0x83); // imm8 sign extend
-            emitOperandHelper(op, adr);
-            emitByte(imm16 & 0xFF);
-        } else {
-            emitByte(0x81);
-            emitOperandHelper(op, adr);
-            emitShort(imm16);
-        }
+    /**
+     * Emit the ModR/M byte for one register operand and an opcode extension in the R field.
+     * <p>
+     * Format: [ 11 reg r/m ]
+     */
+    protected void emitModRM(int reg, Register rm) {
+        assert (reg & 0x07) == reg;
+        emitByte(0xC0 | (reg << 3) | (rm.encoding & 0x07));
     }
 
-    private void emitArithImm32(int op, AMD64Address adr, int imm32) {
-        prefix(adr);
-        if (isByte(imm32)) {
-            emitByte(0x83); // imm8 sign extend
-            emitOperandHelper(op, adr);
-            emitByte(imm32 & 0xFF);
-        } else {
-            emitByte(0x81);
-            emitOperandHelper(op, adr);
-            emitInt(imm32);
-        }
+    /**
+     * Emit the ModR/M byte for two register operands.
+     * <p>
+     * Format: [ 11 reg r/m ]
+     */
+    protected void emitModRM(Register reg, Register rm) {
+        emitModRM(reg.encoding & 0x07, rm);
     }
 
+    /**
+     * Emits the ModR/M byte and optionally the SIB byte for one register and one memory operand.
+     */
     protected void emitOperandHelper(Register reg, AMD64Address addr) {
         assert !reg.equals(Register.None);
         emitOperandHelper(encode(reg), addr);
     }
 
+    /**
+     * Emits the ModR/M byte and optionally the SIB byte for one memory operand and an opcode
+     * extension in the R field.
+     */
     protected void emitOperandHelper(int reg, AMD64Address addr) {
         assert (reg & 0x07) == reg;
         int regenc = reg << 3;
@@ -379,24 +475,470 @@
         }
     }
 
+    /**
+     * Base class for AMD64 opcodes.
+     */
+    public static class AMD64Op {
+
+        protected static final int P_0F = 0x0F;
+        protected static final int P_0F38 = 0x380F;
+        protected static final int P_0F3A = 0x3A0F;
+
+        private final String opcode;
+
+        private final int prefix1;
+        private final int prefix2;
+        private final int op;
+
+        private final boolean dstIsByte;
+        private final boolean srcIsByte;
+
+        private final OpAssertion assertion;
+        private final CPUFeature feature;
+
+        protected AMD64Op(String opcode, int prefix1, int prefix2, int op, OpAssertion assertion, CPUFeature feature) {
+            this(opcode, prefix1, prefix2, op, assertion == OpAssertion.ByteAssertion, assertion == OpAssertion.ByteAssertion, assertion, feature);
+        }
+
+        protected AMD64Op(String opcode, int prefix1, int prefix2, int op, boolean dstIsByte, boolean srcIsByte, OpAssertion assertion, CPUFeature feature) {
+            this.opcode = opcode;
+            this.prefix1 = prefix1;
+            this.prefix2 = prefix2;
+            this.op = op;
+
+            this.dstIsByte = dstIsByte;
+            this.srcIsByte = srcIsByte;
+
+            this.assertion = assertion;
+            this.feature = feature;
+        }
+
+        protected final void emitOpcode(AMD64Assembler asm, OperandSize size, int rxb, int dstEnc, int srcEnc) {
+            if (prefix1 != 0) {
+                asm.emitByte(prefix1);
+            }
+            if (size.sizePrefix != 0) {
+                asm.emitByte(size.sizePrefix);
+            }
+            int rexPrefix = 0x40 | rxb;
+            if (size == QWORD) {
+                rexPrefix |= 0x08;
+            }
+            if (rexPrefix != 0x40 || (dstIsByte && dstEnc >= 4) || (srcIsByte && srcEnc >= 4)) {
+                asm.emitByte(rexPrefix);
+            }
+            if (prefix2 > 0xFF) {
+                asm.emitShort(prefix2);
+            } else if (prefix2 > 0) {
+                asm.emitByte(prefix2);
+            }
+            asm.emitByte(op);
+        }
+
+        protected final boolean verify(AMD64Assembler asm, OperandSize size, Register resultReg, Register inputReg) {
+            assert feature == null || asm.supports(feature) : String.format("unsupported feature %s required for %s", feature, opcode);
+            assert assertion.checkOperands(this, size, resultReg, inputReg);
+            return true;
+        }
+
+        @Override
+        public String toString() {
+            return opcode;
+        }
+    }
+
+    /**
+     * Base class for AMD64 opcodes with immediate operands.
+     */
+    public static class AMD64ImmOp extends AMD64Op {
+
+        private final boolean immIsByte;
+
+        protected AMD64ImmOp(String opcode, boolean immIsByte, int prefix, int op, OpAssertion assertion) {
+            super(opcode, 0, prefix, op, assertion, null);
+            this.immIsByte = immIsByte;
+        }
+
+        protected final void emitImmediate(AMD64Assembler asm, OperandSize size, int imm) {
+            if (immIsByte) {
+                assert imm == (byte) imm;
+                asm.emitByte(imm);
+            } else {
+                size.emitImmediate(asm, imm);
+            }
+        }
+    }
+
+    /**
+     * Opcode with operand order of either RM or MR.
+     */
+    public abstract static class AMD64RROp extends AMD64Op {
+
+        protected AMD64RROp(String opcode, int prefix1, int prefix2, int op, OpAssertion assertion, CPUFeature feature) {
+            super(opcode, prefix1, prefix2, op, assertion, feature);
+        }
+
+        protected AMD64RROp(String opcode, int prefix1, int prefix2, int op, boolean dstIsByte, boolean srcIsByte, OpAssertion assertion, CPUFeature feature) {
+            super(opcode, prefix1, prefix2, op, dstIsByte, srcIsByte, assertion, feature);
+        }
+
+        public abstract void emit(AMD64Assembler asm, OperandSize size, Register dst, Register src);
+    }
+
+    /**
+     * Opcode with operand order of RM.
+     */
+    public static class AMD64RMOp extends AMD64RROp {
+        // @formatter:off
+        public static final AMD64RMOp IMUL   = new AMD64RMOp("IMUL",         P_0F, 0xAF);
+        public static final AMD64RMOp BSF    = new AMD64RMOp("BSF",          P_0F, 0xBC);
+        public static final AMD64RMOp BSR    = new AMD64RMOp("BSR",          P_0F, 0xBD);
+        public static final AMD64RMOp POPCNT = new AMD64RMOp("POPCNT", 0xF3, P_0F, 0xB8, CPUFeature.POPCNT);
+        public static final AMD64RMOp TZCNT  = new AMD64RMOp("TZCNT",  0xF3, P_0F, 0xBC, CPUFeature.BMI1);
+        public static final AMD64RMOp LZCNT  = new AMD64RMOp("LZCNT",  0xF3, P_0F, 0xBD, CPUFeature.LZCNT);
+        public static final AMD64RMOp MOVZXB = new AMD64RMOp("MOVZXB",       P_0F, 0xB6, false, true, OpAssertion.IntegerAssertion);
+        public static final AMD64RMOp MOVZX  = new AMD64RMOp("MOVZX",        P_0F, 0xB7, OpAssertion.No16BitAssertion);
+        public static final AMD64RMOp MOVSXB = new AMD64RMOp("MOVSXB",       P_0F, 0xBE, false, true, OpAssertion.IntegerAssertion);
+        public static final AMD64RMOp MOVSX  = new AMD64RMOp("MOVSX",        P_0F, 0xBF, OpAssertion.No16BitAssertion);
+        public static final AMD64RMOp MOVSXD = new AMD64RMOp("MOVSXD",             0x63, OpAssertion.QwordOnlyAssertion);
+        public static final AMD64RMOp MOV    = new AMD64RMOp("MOV",                0x8B);
+
+        // MOVD and MOVQ are the same opcode, just with different operand size prefix
+        public static final AMD64RMOp MOVD   = new AMD64RMOp("MOVD",   0x66, P_0F, 0x6E, OpAssertion.IntToFloatingAssertion, CPUFeature.SSE2);
+        public static final AMD64RMOp MOVQ   = new AMD64RMOp("MOVQ",   0x66, P_0F, 0x6E, OpAssertion.IntToFloatingAssertion, CPUFeature.SSE2);
+
+        // TEST is documented as MR operation, but it's symmetric, and using it as RM operation is more convenient.
+        public static final AMD64RMOp TESTB  = new AMD64RMOp("TEST",               0x84, OpAssertion.ByteAssertion);
+        public static final AMD64RMOp TEST   = new AMD64RMOp("TEST",               0x85);
+        // @formatter:on
+
+        protected AMD64RMOp(String opcode, int op) {
+            this(opcode, 0, op);
+        }
+
+        protected AMD64RMOp(String opcode, int op, OpAssertion assertion) {
+            this(opcode, 0, op, assertion);
+        }
+
+        protected AMD64RMOp(String opcode, int prefix, int op) {
+            this(opcode, 0, prefix, op, null);
+        }
+
+        protected AMD64RMOp(String opcode, int prefix, int op, OpAssertion assertion) {
+            this(opcode, 0, prefix, op, assertion, null);
+        }
+
+        protected AMD64RMOp(String opcode, int prefix, int op, boolean dstIsByte, boolean srcIsByte, OpAssertion assertion) {
+            super(opcode, 0, prefix, op, dstIsByte, srcIsByte, assertion, null);
+        }
+
+        protected AMD64RMOp(String opcode, int prefix1, int prefix2, int op, CPUFeature feature) {
+            this(opcode, prefix1, prefix2, op, OpAssertion.IntegerAssertion, feature);
+        }
+
+        protected AMD64RMOp(String opcode, int prefix1, int prefix2, int op, OpAssertion assertion, CPUFeature feature) {
+            super(opcode, prefix1, prefix2, op, assertion, feature);
+        }
+
+        @Override
+        public final void emit(AMD64Assembler asm, OperandSize size, Register dst, Register src) {
+            assert verify(asm, size, dst, src);
+            emitOpcode(asm, size, getRXB(dst, src), dst.encoding, src.encoding);
+            asm.emitModRM(dst, src);
+        }
+
+        public final void emit(AMD64Assembler asm, OperandSize size, Register dst, AMD64Address src) {
+            assert verify(asm, size, dst, null);
+            emitOpcode(asm, size, getRXB(dst, src), dst.encoding, 0);
+            asm.emitOperandHelper(dst, src);
+        }
+    }
+
+    /**
+     * Opcode with operand order of MR.
+     */
+    public static class AMD64MROp extends AMD64RROp {
+        // @formatter:off
+        public static final AMD64MROp MOV    = new AMD64MROp("MOV",                0x89);
+
+        // MOVD and MOVQ are the same opcode, just with different operand size prefix
+        // Note that as MR opcodes, they have reverse operand order, so the IntToFloatingAssertion must be used.
+        public static final AMD64MROp MOVD   = new AMD64MROp("MOVD",   0x66, P_0F, 0x7E, OpAssertion.IntToFloatingAssertion, CPUFeature.SSE2);
+        public static final AMD64MROp MOVQ   = new AMD64MROp("MOVQ",   0x66, P_0F, 0x7E, OpAssertion.IntToFloatingAssertion, CPUFeature.SSE2);
+        // @formatter:on
+
+        protected AMD64MROp(String opcode, int op) {
+            this(opcode, 0, op);
+        }
+
+        protected AMD64MROp(String opcode, int op, OpAssertion assertion) {
+            this(opcode, 0, op, assertion);
+        }
+
+        protected AMD64MROp(String opcode, int prefix, int op) {
+            this(opcode, prefix, op, OpAssertion.IntegerAssertion);
+        }
+
+        protected AMD64MROp(String opcode, int prefix, int op, OpAssertion assertion) {
+            this(opcode, 0, prefix, op, assertion, null);
+        }
+
+        protected AMD64MROp(String opcode, int prefix1, int prefix2, int op, OpAssertion assertion, CPUFeature feature) {
+            super(opcode, prefix1, prefix2, op, assertion, feature);
+        }
+
+        @Override
+        public final void emit(AMD64Assembler asm, OperandSize size, Register dst, Register src) {
+            assert verify(asm, size, src, dst);
+            emitOpcode(asm, size, getRXB(src, dst), src.encoding, dst.encoding);
+            asm.emitModRM(src, dst);
+        }
+
+        public final void emit(AMD64Assembler asm, OperandSize size, AMD64Address dst, Register src) {
+            assert verify(asm, size, null, src);
+            emitOpcode(asm, size, getRXB(src, dst), src.encoding, 0);
+            asm.emitOperandHelper(src, dst);
+        }
+    }
+
+    /**
+     * Opcodes with operand order of M.
+     */
+    public static class AMD64MOp extends AMD64Op {
+        // @formatter:off
+        public static final AMD64MOp NOT  = new AMD64MOp("NOT",  0xF7, 2);
+        public static final AMD64MOp NEG  = new AMD64MOp("NEG",  0xF7, 3);
+        public static final AMD64MOp MUL  = new AMD64MOp("MUL",  0xF7, 4);
+        public static final AMD64MOp IMUL = new AMD64MOp("IMUL", 0xF7, 5);
+        public static final AMD64MOp DIV  = new AMD64MOp("DIV",  0xF7, 6);
+        public static final AMD64MOp IDIV = new AMD64MOp("IDIV", 0xF7, 7);
+        // @formatter:on
+
+        private final int ext;
+
+        protected AMD64MOp(String opcode, int op, int ext) {
+            this(opcode, 0, op, ext);
+        }
+
+        protected AMD64MOp(String opcode, int prefix, int op, int ext) {
+            this(opcode, prefix, op, ext, OpAssertion.IntegerAssertion);
+        }
+
+        protected AMD64MOp(String opcode, int prefix, int op, int ext, OpAssertion assertion) {
+            super(opcode, 0, prefix, op, assertion, null);
+            this.ext = ext;
+        }
+
+        public final void emit(AMD64Assembler asm, OperandSize size, Register dst) {
+            assert verify(asm, size, dst, null);
+            emitOpcode(asm, size, getRXB(null, dst), 0, dst.encoding);
+            asm.emitModRM(ext, dst);
+        }
+
+        public final void emit(AMD64Assembler asm, OperandSize size, AMD64Address dst) {
+            assert verify(asm, size, null, null);
+            emitOpcode(asm, size, getRXB(null, dst), 0, 0);
+            asm.emitOperandHelper(ext, dst);
+        }
+    }
+
+    /**
+     * Opcodes with operand order of MI.
+     */
+    public static class AMD64MIOp extends AMD64ImmOp {
+        // @formatter:off
+        public static final AMD64MIOp MOV  = new AMD64MIOp("MOV",  false, 0xC7, 0);
+        public static final AMD64MIOp TEST = new AMD64MIOp("TEST", false, 0xF7, 0);
+        // @formatter:on
+
+        private final int ext;
+
+        protected AMD64MIOp(String opcode, boolean immIsByte, int op, int ext) {
+            this(opcode, immIsByte, 0, op, ext, OpAssertion.IntegerAssertion);
+        }
+
+        protected AMD64MIOp(String opcode, boolean immIsByte, int prefix, int op, int ext, OpAssertion assertion) {
+            super(opcode, immIsByte, prefix, op, assertion);
+            this.ext = ext;
+        }
+
+        public final void emit(AMD64Assembler asm, OperandSize size, Register dst, int imm) {
+            assert verify(asm, size, dst, null);
+            emitOpcode(asm, size, getRXB(null, dst), 0, dst.encoding);
+            asm.emitModRM(ext, dst);
+            emitImmediate(asm, size, imm);
+        }
+
+        public final void emit(AMD64Assembler asm, OperandSize size, AMD64Address dst, int imm) {
+            assert verify(asm, size, null, null);
+            emitOpcode(asm, size, getRXB(null, dst), 0, 0);
+            asm.emitOperandHelper(ext, dst);
+            emitImmediate(asm, size, imm);
+        }
+    }
+
+    /**
+     * Opcodes with operand order of RMI.
+     */
+    public static class AMD64RMIOp extends AMD64ImmOp {
+        // @formatter:off
+        public static final AMD64RMIOp IMUL    = new AMD64RMIOp("IMUL", false, 0x69);
+        public static final AMD64RMIOp IMUL_SX = new AMD64RMIOp("IMUL", true,  0x6B);
+        // @formatter:on
+
+        protected AMD64RMIOp(String opcode, boolean immIsByte, int op) {
+            this(opcode, immIsByte, 0, op, OpAssertion.IntegerAssertion);
+        }
+
+        protected AMD64RMIOp(String opcode, boolean immIsByte, int prefix, int op, OpAssertion assertion) {
+            super(opcode, immIsByte, prefix, op, assertion);
+        }
+
+        public final void emit(AMD64Assembler asm, OperandSize size, Register dst, Register src, int imm) {
+            assert verify(asm, size, dst, src);
+            emitOpcode(asm, size, getRXB(dst, src), dst.encoding, src.encoding);
+            asm.emitModRM(dst, src);
+            emitImmediate(asm, size, imm);
+        }
+
+        public final void emit(AMD64Assembler asm, OperandSize size, Register dst, AMD64Address src, int imm) {
+            assert verify(asm, size, dst, null);
+            emitOpcode(asm, size, getRXB(dst, src), dst.encoding, 0);
+            asm.emitOperandHelper(dst, src);
+            emitImmediate(asm, size, imm);
+        }
+    }
+
+    public static class SSEOp extends AMD64RMOp {
+        // @formatter:off
+        public static final SSEOp CVTSI2SS  = new SSEOp("CVTSI2SS",  0xF3, P_0F, 0x2A, OpAssertion.IntToFloatingAssertion);
+        public static final SSEOp CVTSI2SD  = new SSEOp("CVTSI2SS",  0xF2, P_0F, 0x2A, OpAssertion.IntToFloatingAssertion);
+        public static final SSEOp CVTTSS2SI = new SSEOp("CVTTSS2SI", 0xF3, P_0F, 0x2C, OpAssertion.FloatingToIntAssertion);
+        public static final SSEOp CVTTSD2SI = new SSEOp("CVTTSD2SI", 0xF2, P_0F, 0x2C, OpAssertion.FloatingToIntAssertion);
+        public static final SSEOp UCOMIS    = new SSEOp("UCOMIS",          P_0F, 0x2E, OpAssertion.PackedFloatingAssertion);
+        public static final SSEOp SQRT      = new SSEOp("SQRT",            P_0F, 0x51);
+        public static final SSEOp AND       = new SSEOp("AND",             P_0F, 0x54, OpAssertion.PackedFloatingAssertion);
+        public static final SSEOp ANDN      = new SSEOp("ANDN",            P_0F, 0x55, OpAssertion.PackedFloatingAssertion);
+        public static final SSEOp OR        = new SSEOp("OR",              P_0F, 0x56, OpAssertion.PackedFloatingAssertion);
+        public static final SSEOp XOR       = new SSEOp("XOR",             P_0F, 0x57, OpAssertion.PackedFloatingAssertion);
+        public static final SSEOp ADD       = new SSEOp("ADD",             P_0F, 0x58);
+        public static final SSEOp MUL       = new SSEOp("MUL",             P_0F, 0x59);
+        public static final SSEOp CVTSS2SD  = new SSEOp("CVTSS2SD",        P_0F, 0x5A, OpAssertion.SingleAssertion);
+        public static final SSEOp CVTSD2SS  = new SSEOp("CVTSD2SS",        P_0F, 0x5A, OpAssertion.DoubleAssertion);
+        public static final SSEOp SUB       = new SSEOp("SUB",             P_0F, 0x5C);
+        public static final SSEOp MIN       = new SSEOp("MIN",             P_0F, 0x5D);
+        public static final SSEOp DIV       = new SSEOp("DIV",             P_0F, 0x5E);
+        public static final SSEOp MAX       = new SSEOp("MAX",             P_0F, 0x5F);
+        // @formatter:on
+
+        protected SSEOp(String opcode, int prefix, int op) {
+            this(opcode, prefix, op, OpAssertion.FloatingAssertion);
+        }
+
+        protected SSEOp(String opcode, int prefix, int op, OpAssertion assertion) {
+            this(opcode, 0, prefix, op, assertion);
+        }
+
+        protected SSEOp(String opcode, int mandatoryPrefix, int prefix, int op, OpAssertion assertion) {
+            super(opcode, mandatoryPrefix, prefix, op, assertion, CPUFeature.SSE2);
+        }
+    }
+
+    /**
+     * Arithmetic operation with operand order of RM, MR or MI.
+     */
+    public static final class AMD64BinaryArithmetic {
+        // @formatter:off
+        public static final AMD64BinaryArithmetic ADD = new AMD64BinaryArithmetic("ADD", 0);
+        public static final AMD64BinaryArithmetic OR  = new AMD64BinaryArithmetic("OR",  1);
+        public static final AMD64BinaryArithmetic ADC = new AMD64BinaryArithmetic("ADC", 2);
+        public static final AMD64BinaryArithmetic SBB = new AMD64BinaryArithmetic("SBB", 3);
+        public static final AMD64BinaryArithmetic AND = new AMD64BinaryArithmetic("AND", 4);
+        public static final AMD64BinaryArithmetic SUB = new AMD64BinaryArithmetic("SUB", 5);
+        public static final AMD64BinaryArithmetic XOR = new AMD64BinaryArithmetic("XOR", 6);
+        public static final AMD64BinaryArithmetic CMP = new AMD64BinaryArithmetic("CMP", 7);
+        // @formatter:on
+
+        private final AMD64MIOp byteImmOp;
+        private final AMD64MROp byteMrOp;
+        private final AMD64RMOp byteRmOp;
+
+        private final AMD64MIOp immOp;
+        private final AMD64MIOp immSxOp;
+        private final AMD64MROp mrOp;
+        private final AMD64RMOp rmOp;
+
+        private AMD64BinaryArithmetic(String opcode, int code) {
+            int baseOp = code << 3;
+
+            byteImmOp = new AMD64MIOp(opcode, true, 0, 0x80, code, OpAssertion.ByteAssertion);
+            byteMrOp = new AMD64MROp(opcode, 0, baseOp, OpAssertion.ByteAssertion);
+            byteRmOp = new AMD64RMOp(opcode, 0, baseOp | 0x02, OpAssertion.ByteAssertion);
+
+            immOp = new AMD64MIOp(opcode, false, 0, 0x81, code, OpAssertion.IntegerAssertion);
+            immSxOp = new AMD64MIOp(opcode, true, 0, 0x83, code, OpAssertion.IntegerAssertion);
+            mrOp = new AMD64MROp(opcode, 0, baseOp | 0x01, OpAssertion.IntegerAssertion);
+            rmOp = new AMD64RMOp(opcode, 0, baseOp | 0x03, OpAssertion.IntegerAssertion);
+        }
+
+        public AMD64MIOp getMIOpcode(OperandSize size, boolean sx) {
+            if (size == BYTE) {
+                return byteImmOp;
+            } else if (sx) {
+                return immSxOp;
+            } else {
+                return immOp;
+            }
+        }
+
+        public AMD64MROp getMROpcode(OperandSize size) {
+            if (size == BYTE) {
+                return byteMrOp;
+            } else {
+                return mrOp;
+            }
+        }
+
+        public AMD64RMOp getRMOpcode(OperandSize size) {
+            if (size == BYTE) {
+                return byteRmOp;
+            } else {
+                return rmOp;
+            }
+        }
+    }
+
+    /**
+     * Shift operation with operand order of M1, MC or MI.
+     */
+    public static final class AMD64Shift {
+        // @formatter:off
+        public static final AMD64Shift ROL = new AMD64Shift("ROL", 0);
+        public static final AMD64Shift ROR = new AMD64Shift("ROR", 1);
+        public static final AMD64Shift RCL = new AMD64Shift("RCL", 2);
+        public static final AMD64Shift RCR = new AMD64Shift("RCR", 3);
+        public static final AMD64Shift SHL = new AMD64Shift("SHL", 4);
+        public static final AMD64Shift SHR = new AMD64Shift("SHR", 5);
+        public static final AMD64Shift SAR = new AMD64Shift("SAR", 7);
+        // @formatter:on
+
+        public final AMD64MOp m1Op;
+        public final AMD64MOp mcOp;
+        public final AMD64MIOp miOp;
+
+        private AMD64Shift(String opcode, int code) {
+            m1Op = new AMD64MOp(opcode, 0, 0xD1, code, OpAssertion.IntegerAssertion);
+            mcOp = new AMD64MOp(opcode, 0, 0xD3, code, OpAssertion.IntegerAssertion);
+            miOp = new AMD64MIOp(opcode, true, 0, 0xC1, code, OpAssertion.IntegerAssertion);
+        }
+    }
+
     public final void addl(AMD64Address dst, int imm32) {
-        emitArithImm32(0, dst, imm32);
+        ADD.getMIOpcode(DWORD, isByte(imm32)).emit(this, DWORD, dst, imm32);
     }
 
     public final void addl(Register dst, int imm32) {
-        emitArithImm32(0, dst, imm32);
-    }
-
-    public final void addl(Register dst, AMD64Address src) {
-        prefix(src, dst);
-        emitByte(0x03);
-        emitOperandHelper(dst, src);
-    }
-
-    public final void addl(Register dst, Register src) {
-        int encode = prefixAndEncode(dst.encoding, src.encoding);
-        emitByte(0x03);
-        emitByte(0xC0 | encode);
+        ADD.getMIOpcode(DWORD, isByte(imm32)).emit(this, DWORD, dst, imm32);
     }
 
     private void addrNop4() {
@@ -433,98 +975,8 @@
         emitInt(0); // 32-bits offset (4 bytes)
     }
 
-    public final void addsd(Register dst, Register src) {
-        assert dst.getRegisterCategory().equals(AMD64.XMM) && src.getRegisterCategory().equals(AMD64.XMM);
-        emitByte(0xF2);
-        int encode = prefixAndEncode(dst.encoding, src.encoding);
-        emitByte(0x0F);
-        emitByte(0x58);
-        emitByte(0xC0 | encode);
-    }
-
-    public final void addsd(Register dst, AMD64Address src) {
-        assert dst.getRegisterCategory().equals(AMD64.XMM);
-        emitByte(0xF2);
-        prefix(src, dst);
-        emitByte(0x0F);
-        emitByte(0x58);
-        emitOperandHelper(dst, src);
-    }
-
-    public final void addss(Register dst, Register src) {
-        assert dst.getRegisterCategory().equals(AMD64.XMM) && src.getRegisterCategory().equals(AMD64.XMM);
-        emitByte(0xF3);
-        int encode = prefixAndEncode(dst.encoding, src.encoding);
-        emitByte(0x0F);
-        emitByte(0x58);
-        emitByte(0xC0 | encode);
-    }
-
-    public final void addss(Register dst, AMD64Address src) {
-        assert dst.getRegisterCategory().equals(AMD64.XMM);
-        emitByte(0xF3);
-        prefix(src, dst);
-        emitByte(0x0F);
-        emitByte(0x58);
-        emitOperandHelper(dst, src);
-    }
-
     public final void andl(Register dst, int imm32) {
-        emitArithImm32(4, dst, imm32);
-    }
-
-    public final void andl(Register dst, AMD64Address src) {
-        prefix(src, dst);
-        emitByte(0x23);
-        emitOperandHelper(dst, src);
-    }
-
-    public final void andl(Register dst, Register src) {
-        int encode = prefixAndEncode(dst.encoding, src.encoding);
-        emitByte(0x23);
-        emitByte(0xC0 | encode);
-    }
-
-    public final void bsfq(Register dst, Register src) {
-        int encode = prefixqAndEncode(dst.encoding, src.encoding);
-        emitByte(0x0F);
-        emitByte(0xBC);
-        emitByte(0xC0 | encode);
-    }
-
-    public final void bsfq(Register dst, AMD64Address src) {
-        prefixq(src, dst);
-        emitByte(0x0F);
-        emitByte(0xBC);
-        emitOperandHelper(dst, src);
-    }
-
-    public final void bsrq(Register dst, Register src) {
-        int encode = prefixqAndEncode(dst.encoding, src.encoding);
-        emitByte(0x0F);
-        emitByte(0xBD);
-        emitByte(0xC0 | encode);
-    }
-
-    public final void bsrq(Register dst, AMD64Address src) {
-        prefixq(src, dst);
-        emitByte(0x0F);
-        emitByte(0xBD);
-        emitOperandHelper(dst, src);
-    }
-
-    public final void bsrl(Register dst, Register src) {
-        int encode = prefixAndEncode(dst.encoding, src.encoding);
-        emitByte(0x0F);
-        emitByte(0xBD);
-        emitByte(0xC0 | encode);
-    }
-
-    public final void bsrl(Register dst, AMD64Address src) {
-        prefix(src, dst);
-        emitByte(0x0F);
-        emitByte(0xBD);
-        emitOperandHelper(dst, src);
+        AND.getMIOpcode(DWORD, isByte(imm32)).emit(this, DWORD, dst, imm32);
     }
 
     public final void bswapl(Register reg) {
@@ -551,66 +1003,20 @@
         emitOperandHelper(dst, src);
     }
 
-    public final void cmpb(Register dst, int imm8) {
-        emitArithImm8(7, dst, imm8);
-    }
-
-    public final void cmpb(Register dst, Register src) {
-        int encode = prefixAndEncode(dst.encoding, true, src.encoding, true);
-        emitByte(0x3A);
-        emitByte(0xC0 | encode);
-    }
-
-    public final void cmpb(Register dst, AMD64Address src) {
-        prefix(src, dst, true);
-        emitByte(0x3A);
-        emitOperandHelper(dst, src);
-    }
-
-    public final void cmpb(AMD64Address dst, int imm8) {
-        emitArithImm8(7, dst, imm8);
-    }
-
-    public final void cmpw(Register dst, int imm16) {
-        emitArithImm16(7, dst, imm16);
-    }
-
-    public final void cmpw(Register dst, Register src) {
-        emitByte(0x66);
-        int encode = prefixAndEncode(dst.encoding, src.encoding);
-        emitByte(0x3B);
-        emitByte(0xC0 | encode);
-    }
-
-    public final void cmpw(Register dst, AMD64Address src) {
-        emitByte(0x66);
-        prefix(src, dst);
-        emitByte(0x3B);
-        emitOperandHelper(dst, src);
-    }
-
-    public final void cmpw(AMD64Address dst, int imm16) {
-        emitArithImm16(7, dst, imm16);
-    }
-
     public final void cmpl(Register dst, int imm32) {
-        emitArithImm32(7, dst, imm32);
+        CMP.getMIOpcode(DWORD, isByte(imm32)).emit(this, DWORD, dst, imm32);
     }
 
     public final void cmpl(Register dst, Register src) {
-        int encode = prefixAndEncode(dst.encoding, src.encoding);
-        emitByte(0x3B);
-        emitByte(0xC0 | encode);
+        CMP.rmOp.emit(this, DWORD, dst, src);
     }
 
     public final void cmpl(Register dst, AMD64Address src) {
-        prefix(src, dst);
-        emitByte(0x3B);
-        emitOperandHelper(dst, src);
+        CMP.rmOp.emit(this, DWORD, dst, src);
     }
 
     public final void cmpl(AMD64Address dst, int imm32) {
-        emitArithImm32(7, dst, imm32);
+        CMP.getMIOpcode(DWORD, isByte(imm32)).emit(this, DWORD, dst, imm32);
     }
 
     // The 32-bit cmpxchg compares the value at adr with the contents of X86.rax,
@@ -623,235 +1029,21 @@
         emitOperandHelper(reg, adr);
     }
 
-    public final void cvtsd2ss(Register dst, AMD64Address src) {
-        assert dst.getRegisterCategory().equals(AMD64.XMM);
-        emitByte(0xF2);
-        prefix(src, dst);
-        emitByte(0x0F);
-        emitByte(0x5A);
-        emitOperandHelper(dst, src);
-    }
-
-    public final void cvtsd2ss(Register dst, Register src) {
-        assert dst.getRegisterCategory().equals(AMD64.XMM);
-        assert src.getRegisterCategory().equals(AMD64.XMM);
-        emitByte(0xF2);
-        int encode = prefixAndEncode(dst.encoding, src.encoding);
-        emitByte(0x0F);
-        emitByte(0x5A);
-        emitByte(0xC0 | encode);
-    }
-
-    public final void cvtsi2sdl(Register dst, AMD64Address src) {
-        assert dst.getRegisterCategory().equals(AMD64.XMM);
-        emitByte(0xF2);
-        prefix(src, dst);
-        emitByte(0x0F);
-        emitByte(0x2A);
-        emitOperandHelper(dst, src);
-    }
-
-    public final void cvtsi2sdl(Register dst, Register src) {
-        assert dst.getRegisterCategory().equals(AMD64.XMM);
-        emitByte(0xF2);
-        int encode = prefixAndEncode(dst.encoding, src.encoding);
-        emitByte(0x0F);
-        emitByte(0x2A);
-        emitByte(0xC0 | encode);
-    }
-
-    public final void cvtsi2ssl(Register dst, AMD64Address src) {
-        assert dst.getRegisterCategory().equals(AMD64.XMM);
-        emitByte(0xF3);
-        prefix(src, dst);
-        emitByte(0x0F);
-        emitByte(0x2A);
-        emitOperandHelper(dst, src);
-    }
-
-    public final void cvtsi2ssl(Register dst, Register src) {
-        assert dst.getRegisterCategory().equals(AMD64.XMM);
-        emitByte(0xF3);
-        int encode = prefixAndEncode(dst.encoding, src.encoding);
-        emitByte(0x0F);
-        emitByte(0x2A);
-        emitByte(0xC0 | encode);
-    }
-
-    public final void cvtss2sd(Register dst, AMD64Address src) {
-        assert dst.getRegisterCategory().equals(AMD64.XMM);
-        emitByte(0xF3);
-        prefix(src, dst);
-        emitByte(0x0F);
-        emitByte(0x5A);
-        emitOperandHelper(dst, src);
-    }
-
-    public final void cvtss2sd(Register dst, Register src) {
-        assert dst.getRegisterCategory().equals(AMD64.XMM);
-        assert src.getRegisterCategory().equals(AMD64.XMM);
-        emitByte(0xF3);
-        int encode = prefixAndEncode(dst.encoding, src.encoding);
-        emitByte(0x0F);
-        emitByte(0x5A);
-        emitByte(0xC0 | encode);
-    }
-
-    public final void cvttsd2sil(Register dst, AMD64Address src) {
-        emitByte(0xF2);
-        prefix(src, dst);
-        emitByte(0x0F);
-        emitByte(0x2C);
-        emitOperandHelper(dst, src);
-    }
-
-    public final void cvttsd2sil(Register dst, Register src) {
-        assert src.getRegisterCategory().equals(AMD64.XMM);
-        emitByte(0xF2);
-        int encode = prefixAndEncode(dst.encoding, src.encoding);
-        emitByte(0x0F);
-        emitByte(0x2C);
-        emitByte(0xC0 | encode);
-    }
-
-    public final void cvttss2sil(Register dst, AMD64Address src) {
-        emitByte(0xF3);
-        prefix(src, dst);
-        emitByte(0x0F);
-        emitByte(0x2C);
-        emitOperandHelper(dst, src);
-    }
-
-    public final void cvttss2sil(Register dst, Register src) {
-        assert src.getRegisterCategory().equals(AMD64.XMM);
-        emitByte(0xF3);
-        int encode = prefixAndEncode(dst.encoding, src.encoding);
-        emitByte(0x0F);
-        emitByte(0x2C);
-        emitByte(0xC0 | encode);
-    }
-
     protected final void decl(AMD64Address dst) {
         prefix(dst);
         emitByte(0xFF);
         emitOperandHelper(1, dst);
     }
 
-    public final void divsd(Register dst, AMD64Address src) {
-        assert dst.getRegisterCategory().equals(AMD64.XMM);
-        emitByte(0xF2);
-        prefix(src, dst);
-        emitByte(0x0F);
-        emitByte(0x5E);
-        emitOperandHelper(dst, src);
-    }
-
-    public final void divsd(Register dst, Register src) {
-        assert dst.getRegisterCategory().equals(AMD64.XMM);
-        assert src.getRegisterCategory().equals(AMD64.XMM);
-        emitByte(0xF2);
-        int encode = prefixAndEncode(dst.encoding, src.encoding);
-        emitByte(0x0F);
-        emitByte(0x5E);
-        emitByte(0xC0 | encode);
-    }
-
-    public final void divss(Register dst, AMD64Address src) {
-        assert dst.getRegisterCategory().equals(AMD64.XMM);
-        emitByte(0xF3);
-        prefix(src, dst);
-        emitByte(0x0F);
-        emitByte(0x5E);
-        emitOperandHelper(dst, src);
-    }
-
-    public final void divss(Register dst, Register src) {
-        assert dst.getRegisterCategory().equals(AMD64.XMM);
-        assert src.getRegisterCategory().equals(AMD64.XMM);
-        emitByte(0xF3);
-        int encode = prefixAndEncode(dst.encoding, src.encoding);
-        emitByte(0x0F);
-        emitByte(0x5E);
-        emitByte(0xC0 | encode);
-    }
-
     public final void hlt() {
         emitByte(0xF4);
     }
 
-    public final void idivl(Register src) {
-        int encode = prefixAndEncode(7, src.encoding);
-        emitByte(0xF7);
-        emitByte(0xC0 | encode);
-    }
-
-    public final void divl(Register src) {
-        int encode = prefixAndEncode(6, src.encoding);
-        emitByte(0xF7);
-        emitByte(0xC0 | encode);
-    }
-
-    public final void mull(Register src) {
-        int encode = prefixAndEncode(4, src.encoding);
-        emitByte(0xF7);
-        emitByte(0xC0 | encode);
-    }
-
-    public final void mull(AMD64Address src) {
-        prefix(src);
-        emitByte(0xF7);
-        emitOperandHelper(4, src);
-    }
-
-    public final void imull(Register src) {
-        int encode = prefixAndEncode(5, src.encoding);
-        emitByte(0xF7);
-        emitByte(0xC0 | encode);
-    }
-
-    public final void imull(AMD64Address src) {
-        prefix(src);
-        emitByte(0xF7);
-        emitOperandHelper(5, src);
-    }
-
-    public final void imull(Register dst, Register src) {
-        int encode = prefixAndEncode(dst.encoding, src.encoding);
-        emitByte(0x0F);
-        emitByte(0xAF);
-        emitByte(0xC0 | encode);
-    }
-
-    public final void imull(Register dst, AMD64Address src) {
-        prefix(src, dst);
-        emitByte(0x0F);
-        emitByte(0xAF);
-        emitOperandHelper(dst, src);
-    }
-
     public final void imull(Register dst, Register src, int value) {
-        int encode = prefixAndEncode(dst.encoding, src.encoding);
         if (isByte(value)) {
-            emitByte(0x6B);
-            emitByte(0xC0 | encode);
-            emitByte(value & 0xFF);
+            AMD64RMIOp.IMUL_SX.emit(this, DWORD, dst, src, value);
         } else {
-            emitByte(0x69);
-            emitByte(0xC0 | encode);
-            emitInt(value);
-        }
-    }
-
-    public final void imull(Register dst, AMD64Address src, int value) {
-        prefix(src, dst);
-        if (isByte(value)) {
-            emitByte(0x6B);
-            emitOperandHelper(dst, src);
-            emitByte(value & 0xFF);
-        } else {
-            emitByte(0x69);
-            emitOperandHelper(dst, src);
-            emitInt(value);
+            AMD64RMIOp.IMUL.emit(this, DWORD, dst, src, value);
         }
     }
 
@@ -1039,25 +1231,6 @@
         emitOperandHelper(src, dst);
     }
 
-    public final void movdl(Register dst, Register src) {
-        if (dst.getRegisterCategory().equals(AMD64.XMM)) {
-            assert !src.getRegisterCategory().equals(AMD64.XMM) : "does this hold?";
-            emitByte(0x66);
-            int encode = prefixAndEncode(dst.encoding, src.encoding);
-            emitByte(0x0F);
-            emitByte(0x6E);
-            emitByte(0xC0 | encode);
-        } else if (src.getRegisterCategory().equals(AMD64.XMM)) {
-            assert !dst.getRegisterCategory().equals(AMD64.XMM);
-            emitByte(0x66);
-            // swap src/dst to get correct prefix
-            int encode = prefixAndEncode(src.encoding, dst.encoding);
-            emitByte(0x0F);
-            emitByte(0x7E);
-            emitByte(0xC0 | encode);
-        }
-    }
-
     public final void movl(Register dst, int imm32) {
         int encode = prefixAndEncode(dst.encoding);
         emitByte(0xB8 | encode);
@@ -1229,27 +1402,6 @@
         emitOperandHelper(dst, src);
     }
 
-    public final void movswl(Register dst, Register src) {
-        int encode = prefixAndEncode(dst.encoding, src.encoding);
-        emitByte(0x0F);
-        emitByte(0xBF);
-        emitByte(0xC0 | encode);
-    }
-
-    public final void movswq(Register dst, AMD64Address src) {
-        prefixq(src, dst);
-        emitByte(0x0F);
-        emitByte(0xBF);
-        emitOperandHelper(dst, src);
-    }
-
-    public final void movswq(Register dst, Register src) {
-        int encode = prefixqAndEncode(dst.encoding, src.encoding);
-        emitByte(0x0F);
-        emitByte(0xBF);
-        emitByte(0xC0 | encode);
-    }
-
     public final void movw(AMD64Address dst, int imm16) {
         emitByte(0x66); // switch to 16-bit mode
         prefix(dst);
@@ -1279,99 +1431,11 @@
         emitOperandHelper(dst, src);
     }
 
-    public final void mulsd(Register dst, AMD64Address src) {
-        assert dst.getRegisterCategory().equals(AMD64.XMM);
-        emitByte(0xF2);
-        prefix(src, dst);
-        emitByte(0x0F);
-        emitByte(0x59);
-        emitOperandHelper(dst, src);
-    }
-
-    public final void mulsd(Register dst, Register src) {
-        assert dst.getRegisterCategory().equals(AMD64.XMM);
-        assert src.getRegisterCategory().equals(AMD64.XMM);
-
-        emitByte(0xF2);
-        int encode = prefixAndEncode(dst.encoding, src.encoding);
-        emitByte(0x0F);
-        emitByte(0x59);
-        emitByte(0xC0 | encode);
-    }
-
-    public final void mulss(Register dst, AMD64Address src) {
-        assert dst.getRegisterCategory().equals(AMD64.XMM);
-
-        emitByte(0xF3);
-        prefix(src, dst);
-        emitByte(0x0F);
-        emitByte(0x59);
-        emitOperandHelper(dst, src);
-    }
-
-    public final void mulss(Register dst, Register src) {
-        assert dst.getRegisterCategory().equals(AMD64.XMM);
-        assert src.getRegisterCategory().equals(AMD64.XMM);
-        emitByte(0xF3);
-        int encode = prefixAndEncode(dst.encoding, src.encoding);
-        emitByte(0x0F);
-        emitByte(0x59);
-        emitByte(0xC0 | encode);
-    }
-
-    public final void negl(Register dst) {
-        int encode = prefixAndEncode(dst.encoding);
-        emitByte(0xF7);
-        emitByte(0xD8 | encode);
-    }
-
-    public final void notl(Register dst) {
-        int encode = prefixAndEncode(dst.encoding);
-        emitByte(0xF7);
-        emitByte(0xD0 | encode);
-    }
-
     @Override
     public final void ensureUniquePC() {
         nop();
     }
 
-    public final void lzcntl(Register dst, Register src) {
-        assert supports(CPUFeature.LZCNT);
-        emitByte(0xF3);
-        int encode = prefixAndEncode(dst.encoding, src.encoding);
-        emitByte(0x0F);
-        emitByte(0xBD);
-        emitByte(0xC0 | encode);
-    }
-
-    public final void lzcntq(Register dst, Register src) {
-        assert supports(CPUFeature.LZCNT);
-        emitByte(0xF3);
-        int encode = prefixqAndEncode(dst.encoding, src.encoding);
-        emitByte(0x0F);
-        emitByte(0xBD);
-        emitByte(0xC0 | encode);
-    }
-
-    public final void lzcntl(Register dst, AMD64Address src) {
-        assert supports(CPUFeature.LZCNT);
-        emitByte(0xF3);
-        prefix(src, dst);
-        emitByte(0x0F);
-        emitByte(0xBD);
-        emitOperandHelper(dst, src);
-    }
-
-    public final void lzcntq(Register dst, AMD64Address src) {
-        assert supports(CPUFeature.LZCNT);
-        emitByte(0xF3);
-        prefixq(src, dst);
-        emitByte(0x0F);
-        emitByte(0xBD);
-        emitOperandHelper(dst, src);
-    }
-
     public final void nop() {
         nop(1);
     }
@@ -1578,58 +1642,6 @@
         }
     }
 
-    public final void orl(Register dst, int imm32) {
-        emitArithImm32(1, dst, imm32);
-    }
-
-    public final void orl(Register dst, AMD64Address src) {
-        prefix(src, dst);
-        emitByte(0x0B);
-        emitOperandHelper(dst, src);
-    }
-
-    public final void orl(Register dst, Register src) {
-        int encode = prefixAndEncode(dst.encoding, src.encoding);
-        emitByte(0x0B);
-        emitByte(0xC0 | encode);
-    }
-
-    public final void popcntl(Register dst, AMD64Address src) {
-        assert supports(CPUFeature.POPCNT);
-        emitByte(0xF3);
-        prefix(src, dst);
-        emitByte(0x0F);
-        emitByte(0xB8);
-        emitOperandHelper(dst, src);
-    }
-
-    public final void popcntl(Register dst, Register src) {
-        assert supports(CPUFeature.POPCNT);
-        emitByte(0xF3);
-        int encode = prefixAndEncode(dst.encoding, src.encoding);
-        emitByte(0x0F);
-        emitByte(0xB8);
-        emitByte(0xC0 | encode);
-    }
-
-    public final void popcntq(Register dst, AMD64Address src) {
-        assert supports(CPUFeature.POPCNT);
-        emitByte(0xF3);
-        prefixq(src, dst);
-        emitByte(0x0F);
-        emitByte(0xB8);
-        emitOperandHelper(dst, src);
-    }
-
-    public final void popcntq(Register dst, Register src) {
-        assert supports(CPUFeature.POPCNT);
-        emitByte(0xF3);
-        int encode = prefixqAndEncode(dst.encoding, src.encoding);
-        emitByte(0x0F);
-        emitByte(0xB8);
-        emitByte(0xC0 | encode);
-    }
-
     public final void pop(Register dst) {
         int encode = prefixAndEncode(dst.encoding);
         emitByte(0x58 | encode);
@@ -1675,218 +1687,12 @@
         }
     }
 
-    public final void sarl(Register dst, int imm8) {
-        int encode = prefixAndEncode(dst.encoding);
-        assert isShiftCount(imm8) : "illegal shift count";
-        if (imm8 == 1) {
-            emitByte(0xD1);
-            emitByte(0xF8 | encode);
-        } else {
-            emitByte(0xC1);
-            emitByte(0xF8 | encode);
-            emitByte(imm8);
-        }
-    }
-
-    public final void sarl(Register dst) {
-        int encode = prefixAndEncode(dst.encoding);
-        emitByte(0xD3);
-        emitByte(0xF8 | encode);
-    }
-
-    public final void shll(Register dst, int imm8) {
-        assert isShiftCount(imm8) : "illegal shift count";
-        int encode = prefixAndEncode(dst.encoding);
-        if (imm8 == 1) {
-            emitByte(0xD1);
-            emitByte(0xE0 | encode);
-        } else {
-            emitByte(0xC1);
-            emitByte(0xE0 | encode);
-            emitByte(imm8);
-        }
-    }
-
-    public final void shll(Register dst) {
-        int encode = prefixAndEncode(dst.encoding);
-        emitByte(0xD3);
-        emitByte(0xE0 | encode);
-    }
-
-    public final void shrl(Register dst, int imm8) {
-        assert isShiftCount(imm8) : "illegal shift count";
-        int encode = prefixAndEncode(dst.encoding);
-        if (imm8 == 1) {
-            emitByte(0xD1);
-            emitByte(0xE8 | encode);
-        } else {
-            emitByte(0xC1);
-            emitByte(0xE8 | encode);
-            emitByte(imm8);
-        }
-    }
-
-    public final void shrl(Register dst) {
-        int encode = prefixAndEncode(dst.encoding);
-        emitByte(0xD3);
-        emitByte(0xE8 | encode);
-    }
-
-    public final void roll(Register dst, int imm8) {
-        assert isShiftCount(imm8) : "illegal shift count";
-        int encode = prefixAndEncode(dst.encoding);
-        if (imm8 == 1) {
-            emitByte(0xD1);
-            emitByte(0xC0 | encode);
-        } else {
-            emitByte(0xC1);
-            emitByte(0xC0 | encode);
-            emitByte(imm8);
-        }
-    }
-
-    public final void roll(Register dst) {
-        int encode = prefixAndEncode(dst.encoding);
-        emitByte(0xD3);
-        emitByte(0xC0 | encode);
-    }
-
-    public final void rorl(Register dst, int imm8) {
-        assert isShiftCount(imm8) : "illegal shift count";
-        int encode = prefixAndEncode(dst.encoding);
-        if (imm8 == 1) {
-            emitByte(0xD1);
-            emitByte(0xC8 | encode);
-        } else {
-            emitByte(0xC1);
-            emitByte(0xC8 | encode);
-            emitByte(imm8);
-        }
-    }
-
-    public final void rorl(Register dst) {
-        int encode = prefixAndEncode(dst.encoding);
-        emitByte(0xD3);
-        emitByte(0xC8 | encode);
-    }
-
-    public final void rolq(Register dst, int imm8) {
-        assert isShiftCount(imm8) : "illegal shift count";
-        int encode = prefixqAndEncode(dst.encoding);
-        if (imm8 == 1) {
-            emitByte(0xD1);
-            emitByte(0xC0 | encode);
-        } else {
-            emitByte(0xC1);
-            emitByte(0xC0 | encode);
-            emitByte(imm8);
-        }
-    }
-
-    public final void rolq(Register dst) {
-        int encode = prefixqAndEncode(dst.encoding);
-        emitByte(0xD3);
-        emitByte(0xC0 | encode);
-    }
-
-    public final void rorq(Register dst, int imm8) {
-        assert isShiftCount(imm8) : "illegal shift count";
-        int encode = prefixqAndEncode(dst.encoding);
-        if (imm8 == 1) {
-            emitByte(0xD1);
-            emitByte(0xC8 | encode);
-        } else {
-            emitByte(0xC1);
-            emitByte(0xC8 | encode);
-            emitByte(imm8);
-        }
-    }
-
-    public final void rorq(Register dst) {
-        int encode = prefixqAndEncode(dst.encoding);
-        emitByte(0xD3);
-        emitByte(0xC8 | encode);
-    }
-
-    public final void sqrtsd(Register dst, AMD64Address src) {
-        assert dst.getRegisterCategory().equals(AMD64.XMM);
-        emitByte(0xF2);
-        prefix(src, dst);
-        emitByte(0x0F);
-        emitByte(0x51);
-        emitOperandHelper(dst, src);
-    }
-
-    public final void sqrtsd(Register dst, Register src) {
-        assert dst.getRegisterCategory().equals(AMD64.XMM);
-        assert src.getRegisterCategory().equals(AMD64.XMM);
-        // HMM Table D-1 says sse2
-        // assert is64 || target.supportsSSE();
-        emitByte(0xF2);
-        int encode = prefixAndEncode(dst.encoding, src.encoding);
-        emitByte(0x0F);
-        emitByte(0x51);
-        emitByte(0xC0 | encode);
-    }
-
     public final void subl(AMD64Address dst, int imm32) {
-        emitArithImm32(5, dst, imm32);
+        SUB.getMIOpcode(DWORD, isByte(imm32)).emit(this, DWORD, dst, imm32);
     }
 
     public final void subl(Register dst, int imm32) {
-        emitArithImm32(5, dst, imm32);
-    }
-
-    public final void subl(Register dst, AMD64Address src) {
-        prefix(src, dst);
-        emitByte(0x2B);
-        emitOperandHelper(dst, src);
-    }
-
-    public final void subl(Register dst, Register src) {
-        int encode = prefixAndEncode(dst.encoding, src.encoding);
-        emitByte(0x2B);
-        emitByte(0xC0 | encode);
-    }
-
-    public final void subsd(Register dst, Register src) {
-        assert dst.getRegisterCategory().equals(AMD64.XMM);
-        assert src.getRegisterCategory().equals(AMD64.XMM);
-        emitByte(0xF2);
-        int encode = prefixAndEncode(dst.encoding, src.encoding);
-        emitByte(0x0F);
-        emitByte(0x5C);
-        emitByte(0xC0 | encode);
-    }
-
-    public final void subsd(Register dst, AMD64Address src) {
-        assert dst.getRegisterCategory().equals(AMD64.XMM);
-
-        emitByte(0xF2);
-        prefix(src, dst);
-        emitByte(0x0F);
-        emitByte(0x5C);
-        emitOperandHelper(dst, src);
-    }
-
-    public final void subss(Register dst, Register src) {
-        assert dst.getRegisterCategory().equals(AMD64.XMM);
-        assert src.getRegisterCategory().equals(AMD64.XMM);
-        emitByte(0xF3);
-        int encode = prefixAndEncode(dst.encoding, src.encoding);
-        emitByte(0x0F);
-        emitByte(0x5C);
-        emitByte(0xC0 | encode);
-    }
-
-    public final void subss(Register dst, AMD64Address src) {
-        assert dst.getRegisterCategory().equals(AMD64.XMM);
-
-        emitByte(0xF3);
-        prefix(src, dst);
-        emitByte(0x0F);
-        emitByte(0x5C);
-        emitOperandHelper(dst, src);
+        SUB.getMIOpcode(DWORD, isByte(imm32)).emit(this, DWORD, dst, imm32);
     }
 
     public final void testl(Register dst, int imm32) {
@@ -1904,13 +1710,6 @@
         emitInt(imm32);
     }
 
-    public final void testl(AMD64Address dst, int imm32) {
-        prefix(dst);
-        emitByte(0xF7);
-        emitOperandHelper(0, dst);
-        emitInt(imm32);
-    }
-
     public final void testl(Register dst, Register src) {
         int encode = prefixAndEncode(dst.encoding, src.encoding);
         emitByte(0x85);
@@ -1923,139 +1722,8 @@
         emitOperandHelper(dst, src);
     }
 
-    public final void tzcntl(Register dst, Register src) {
-        assert supports(CPUFeature.BMI1);
-        emitByte(0xF3);
-        int encode = prefixAndEncode(dst.encoding, src.encoding);
-        emitByte(0x0F);
-        emitByte(0xBC);
-        emitByte(0xC0 | encode);
-    }
-
-    public final void tzcntq(Register dst, Register src) {
-        assert supports(CPUFeature.BMI1);
-        emitByte(0xF3);
-        int encode = prefixqAndEncode(dst.encoding, src.encoding);
-        emitByte(0x0F);
-        emitByte(0xBC);
-        emitByte(0xC0 | encode);
-    }
-
-    public final void tzcntl(Register dst, AMD64Address src) {
-        assert supports(CPUFeature.BMI1);
-        emitByte(0xF3);
-        prefix(src, dst);
-        emitByte(0x0F);
-        emitByte(0xBC);
-        emitOperandHelper(dst, src);
-    }
-
-    public final void tzcntq(Register dst, AMD64Address src) {
-        assert supports(CPUFeature.BMI1);
-        emitByte(0xF3);
-        prefixq(src, dst);
-        emitByte(0x0F);
-        emitByte(0xBC);
-        emitOperandHelper(dst, src);
-    }
-
-    public final void ucomisd(Register dst, AMD64Address src) {
-        assert dst.getRegisterCategory().equals(AMD64.XMM);
-        emitByte(0x66);
-        ucomiss(dst, src);
-    }
-
-    public final void ucomisd(Register dst, Register src) {
-        assert dst.getRegisterCategory().equals(AMD64.XMM);
-        assert src.getRegisterCategory().equals(AMD64.XMM);
-        emitByte(0x66);
-        ucomiss(dst, src);
-    }
-
-    public final void ucomiss(Register dst, AMD64Address src) {
-        assert dst.getRegisterCategory().equals(AMD64.XMM);
-
-        prefix(src, dst);
-        emitByte(0x0F);
-        emitByte(0x2E);
-        emitOperandHelper(dst, src);
-    }
-
-    public final void ucomiss(Register dst, Register src) {
-        assert dst.getRegisterCategory().equals(AMD64.XMM);
-        assert src.getRegisterCategory().equals(AMD64.XMM);
-        int encode = prefixAndEncode(dst.encoding, src.encoding);
-        emitByte(0x0F);
-        emitByte(0x2E);
-        emitByte(0xC0 | encode);
-    }
-
-    public final void xorl(Register dst, int imm32) {
-        emitArithImm32(6, dst, imm32);
-    }
-
-    public final void xorl(Register dst, AMD64Address src) {
-        prefix(src, dst);
-        emitByte(0x33);
-        emitOperandHelper(dst, src);
-    }
-
     public final void xorl(Register dst, Register src) {
-        int encode = prefixAndEncode(dst.encoding, src.encoding);
-        emitByte(0x33);
-        emitByte(0xC0 | encode);
-    }
-
-    public final void andpd(Register dst, Register src) {
-        emitByte(0x66);
-        andps(dst, src);
-    }
-
-    public final void andpd(Register dst, AMD64Address src) {
-        emitByte(0x66);
-        andps(dst, src);
-    }
-
-    public final void andps(Register dst, Register src) {
-        assert dst.getRegisterCategory().equals(AMD64.XMM) && src.getRegisterCategory().equals(AMD64.XMM);
-        int encode = prefixAndEncode(dst.encoding, src.encoding);
-        emitByte(0x0F);
-        emitByte(0x54);
-        emitByte(0xC0 | encode);
-    }
-
-    public final void andps(Register dst, AMD64Address src) {
-        assert dst.getRegisterCategory().equals(AMD64.XMM);
-        prefix(src, dst);
-        emitByte(0x0F);
-        emitByte(0x54);
-        emitOperandHelper(dst, src);
-    }
-
-    public final void orpd(Register dst, Register src) {
-        emitByte(0x66);
-        orps(dst, src);
-    }
-
-    public final void orpd(Register dst, AMD64Address src) {
-        emitByte(0x66);
-        orps(dst, src);
-    }
-
-    public final void orps(Register dst, Register src) {
-        assert dst.getRegisterCategory().equals(AMD64.XMM) && src.getRegisterCategory().equals(AMD64.XMM);
-        int encode = prefixAndEncode(dst.encoding, src.encoding);
-        emitByte(0x0F);
-        emitByte(0x56);
-        emitByte(0xC0 | encode);
-    }
-
-    public final void orps(Register dst, AMD64Address src) {
-        assert dst.getRegisterCategory().equals(AMD64.XMM);
-        prefix(src, dst);
-        emitByte(0x0F);
-        emitByte(0x56);
-        emitOperandHelper(dst, src);
+        XOR.rmOp.emit(this, DWORD, dst, src);
     }
 
     public final void xorpd(Register dst, Register src) {
@@ -2063,11 +1731,6 @@
         xorps(dst, src);
     }
 
-    public final void xorpd(Register dst, AMD64Address src) {
-        emitByte(0x66);
-        xorps(dst, src);
-    }
-
     public final void xorps(Register dst, Register src) {
         assert dst.getRegisterCategory().equals(AMD64.XMM) && src.getRegisterCategory().equals(AMD64.XMM);
         int encode = prefixAndEncode(dst.encoding, src.encoding);
@@ -2076,14 +1739,6 @@
         emitByte(0xC0 | encode);
     }
 
-    public final void xorps(Register dst, AMD64Address src) {
-        assert dst.getRegisterCategory().equals(AMD64.XMM);
-        prefix(src, dst);
-        emitByte(0x0F);
-        emitByte(0x57);
-        emitOperandHelper(dst, src);
-    }
-
     protected final void decl(Register dst) {
         // Use two-byte form (one-byte form is a REX prefix in 64-bit mode)
         int encode = prefixAndEncode(dst.encoding);
@@ -2281,35 +1936,15 @@
     }
 
     public final void addq(Register dst, int imm32) {
-        emitArithImm32q(0, dst, imm32);
-    }
-
-    public final void addq(Register dst, AMD64Address src) {
-        prefixq(src, dst);
-        emitByte(0x03);
-        emitOperandHelper(dst, src);
+        ADD.getMIOpcode(QWORD, isByte(imm32)).emit(this, QWORD, dst, imm32);
     }
 
     public final void addq(Register dst, Register src) {
-        int encode = prefixqAndEncode(dst.encoding, src.encoding);
-        emitByte(0x03);
-        emitByte(0xC0 | encode);
+        ADD.rmOp.emit(this, QWORD, dst, src);
     }
 
     public final void andq(Register dst, int imm32) {
-        emitArithImm32q(4, dst, imm32);
-    }
-
-    public final void andq(Register dst, AMD64Address src) {
-        prefixq(src, dst);
-        emitByte(0x23);
-        emitOperandHelper(dst, src);
-    }
-
-    public final void andq(Register dst, Register src) {
-        int encode = prefixqAndEncode(dst.encoding, src.encoding);
-        emitByte(0x23);
-        emitByte(0xC0 | encode);
+        AND.getMIOpcode(QWORD, isByte(imm32)).emit(this, QWORD, dst, imm32);
     }
 
     public final void bswapq(Register reg) {
@@ -2337,27 +1972,16 @@
         emitOperandHelper(dst, src);
     }
 
-    public final void cmpq(AMD64Address dst, int imm32) {
-        prefixq(dst);
-        emitByte(0x81);
-        emitOperandHelper(7, dst);
-        emitInt(imm32);
-    }
-
     public final void cmpq(Register dst, int imm32) {
-        emitArithImm32q(7, dst, imm32);
+        CMP.getMIOpcode(QWORD, isByte(imm32)).emit(this, QWORD, dst, imm32);
     }
 
     public final void cmpq(Register dst, Register src) {
-        int encode = prefixqAndEncode(dst.encoding, src.encoding);
-        emitByte(0x3B);
-        emitByte(0xC0 | encode);
+        CMP.rmOp.emit(this, QWORD, dst, src);
     }
 
     public final void cmpq(Register dst, AMD64Address src) {
-        prefixq(src, dst);
-        emitByte(0x3B);
-        emitOperandHelper(dst, src);
+        CMP.rmOp.emit(this, QWORD, dst, src);
     }
 
     public final void cmpxchgq(Register reg, AMD64Address adr) {
@@ -2367,76 +1991,6 @@
         emitOperandHelper(reg, adr);
     }
 
-    public final void cvtsi2sdq(Register dst, AMD64Address src) {
-        assert dst.getRegisterCategory().equals(AMD64.XMM);
-        emitByte(0xF2);
-        prefixq(src, dst);
-        emitByte(0x0F);
-        emitByte(0x2A);
-        emitOperandHelper(dst, src);
-    }
-
-    public final void cvtsi2sdq(Register dst, Register src) {
-        assert dst.getRegisterCategory().equals(AMD64.XMM);
-        emitByte(0xF2);
-        int encode = prefixqAndEncode(dst.encoding, src.encoding);
-        emitByte(0x0F);
-        emitByte(0x2A);
-        emitByte(0xC0 | encode);
-    }
-
-    public final void cvtsi2ssq(Register dst, AMD64Address src) {
-        assert dst.getRegisterCategory().equals(AMD64.XMM);
-        emitByte(0xF3);
-        prefixq(src, dst);
-        emitByte(0x0F);
-        emitByte(0x2A);
-        emitOperandHelper(dst, src);
-    }
-
-    public final void cvtsi2ssq(Register dst, Register src) {
-        assert dst.getRegisterCategory().equals(AMD64.XMM);
-        emitByte(0xF3);
-        int encode = prefixqAndEncode(dst.encoding, src.encoding);
-        emitByte(0x0F);
-        emitByte(0x2A);
-        emitByte(0xC0 | encode);
-    }
-
-    public final void cvttsd2siq(Register dst, AMD64Address src) {
-        emitByte(0xF2);
-        prefixq(src, dst);
-        emitByte(0x0F);
-        emitByte(0x2C);
-        emitOperandHelper(dst, src);
-    }
-
-    public final void cvttsd2siq(Register dst, Register src) {
-        assert src.getRegisterCategory().equals(AMD64.XMM);
-        emitByte(0xF2);
-        int encode = prefixqAndEncode(dst.encoding, src.encoding);
-        emitByte(0x0F);
-        emitByte(0x2C);
-        emitByte(0xC0 | encode);
-    }
-
-    public final void cvttss2siq(Register dst, AMD64Address src) {
-        emitByte(0xF3);
-        prefixq(src, dst);
-        emitByte(0x0F);
-        emitByte(0x2C);
-        emitOperandHelper(dst, src);
-    }
-
-    public final void cvttss2siq(Register dst, Register src) {
-        assert src.getRegisterCategory().equals(AMD64.XMM);
-        emitByte(0xF3);
-        int encode = prefixqAndEncode(dst.encoding, src.encoding);
-        emitByte(0x0F);
-        emitByte(0x2C);
-        emitByte(0xC0 | encode);
-    }
-
     protected final void decq(Register dst) {
         // Use two-byte form (one-byte from is a REX prefix in 64-bit mode)
         int encode = prefixqAndEncode(dst.encoding);
@@ -2444,88 +1998,6 @@
         emitByte(0xC8 | encode);
     }
 
-    protected final void decq(AMD64Address dst) {
-        prefixq(dst);
-        emitByte(0xFF);
-        emitOperandHelper(1, dst);
-    }
-
-    public final void divq(Register src) {
-        int encode = prefixqAndEncode(6, src.encoding);
-        emitByte(0xF7);
-        emitByte(0xC0 | encode);
-    }
-
-    public final void idivq(Register src) {
-        int encode = prefixqAndEncode(7, src.encoding);
-        emitByte(0xF7);
-        emitByte(0xC0 | encode);
-    }
-
-    public final void mulq(Register src) {
-        int encode = prefixqAndEncode(4, src.encoding);
-        emitByte(0xF7);
-        emitByte(0xC0 | encode);
-    }
-
-    public final void mulq(AMD64Address src) {
-        prefixq(src);
-        emitByte(0xF7);
-        emitOperandHelper(4, src);
-    }
-
-    public final void imulq(Register src) {
-        int encode = prefixqAndEncode(5, src.encoding);
-        emitByte(0xF7);
-        emitByte(0xC0 | encode);
-    }
-
-    public final void imulq(AMD64Address src) {
-        prefixq(src);
-        emitByte(0xF7);
-        emitOperandHelper(5, src);
-    }
-
-    public final void imulq(Register dst, Register src) {
-        int encode = prefixqAndEncode(dst.encoding, src.encoding);
-        emitByte(0x0F);
-        emitByte(0xAF);
-        emitByte(0xC0 | encode);
-    }
-
-    public final void imulq(Register dst, AMD64Address src) {
-        prefixq(src, dst);
-        emitByte(0x0F);
-        emitByte(0xAF);
-        emitOperandHelper(dst, src);
-    }
-
-    public final void imulq(Register dst, Register src, int value) {
-        int encode = prefixqAndEncode(dst.encoding, src.encoding);
-        if (isByte(value)) {
-            emitByte(0x6B);
-            emitByte(0xC0 | encode);
-            emitByte(value & 0xFF);
-        } else {
-            emitByte(0x69);
-            emitByte(0xC0 | encode);
-            emitInt(value);
-        }
-    }
-
-    public final void imulq(Register dst, AMD64Address src, int value) {
-        prefixq(src, dst);
-        if (isByte(value)) {
-            emitByte(0x6B);
-            emitOperandHelper(dst, src);
-            emitByte(value & 0xFF);
-        } else {
-            emitByte(0x69);
-            emitOperandHelper(dst, src);
-            emitInt(value);
-        }
-    }
-
     public final void incq(Register dst) {
         // Don't use it directly. Use Macroincrementq() instead.
         // Use two-byte form (one-byte from is a REX prefix in 64-bit mode)
@@ -2602,47 +2074,6 @@
         emitByte(0xD8 | encode);
     }
 
-    public final void notq(Register dst) {
-        int encode = prefixqAndEncode(dst.encoding);
-        emitByte(0xF7);
-        emitByte(0xD0 | encode);
-    }
-
-    public final void orq(Register dst, int imm32) {
-        emitArithImm32q(1, dst, imm32);
-    }
-
-    public final void orq(Register dst, AMD64Address src) {
-        prefixq(src, dst);
-        emitByte(0x0B);
-        emitOperandHelper(dst, src);
-    }
-
-    public final void orq(Register dst, Register src) {
-        int encode = prefixqAndEncode(dst.encoding, src.encoding);
-        emitByte(0x0B);
-        emitByte(0xC0 | encode);
-    }
-
-    public final void sarq(Register dst, int imm8) {
-        assert isShiftCount(imm8 >> 1) : "illegal shift count";
-        int encode = prefixqAndEncode(dst.encoding);
-        if (imm8 == 1) {
-            emitByte(0xD1);
-            emitByte(0xF8 | encode);
-        } else {
-            emitByte(0xC1);
-            emitByte(0xF8 | encode);
-            emitByte(imm8);
-        }
-    }
-
-    public final void sarq(Register dst) {
-        int encode = prefixqAndEncode(dst.encoding);
-        emitByte(0xD3);
-        emitByte(0xF8 | encode);
-    }
-
     public final void shlq(Register dst, int imm8) {
         assert isShiftCount(imm8 >> 1) : "illegal shift count";
         int encode = prefixqAndEncode(dst.encoding);
@@ -2656,12 +2087,6 @@
         }
     }
 
-    public final void shlq(Register dst) {
-        int encode = prefixqAndEncode(dst.encoding);
-        emitByte(0xD3);
-        emitByte(0xE0 | encode);
-    }
-
     public final void shrq(Register dst, int imm8) {
         assert isShiftCount(imm8 >> 1) : "illegal shift count";
         int encode = prefixqAndEncode(dst.encoding);
@@ -2675,50 +2100,17 @@
         }
     }
 
-    public final void shrq(Register dst) {
-        int encode = prefixqAndEncode(dst.encoding);
-        emitByte(0xD3);
-        emitByte(0xE8 | encode);
-    }
-
     public final void subq(Register dst, int imm32) {
-        subq(dst, imm32, false);
+        SUB.getMIOpcode(QWORD, isByte(imm32)).emit(this, QWORD, dst, imm32);
     }
 
     public final void subqWide(Register dst, int imm32) {
-        subq(dst, imm32, true);
-    }
-
-    private void subq(Register dst, int imm32, boolean force32Imm) {
-        emitArithImm32q(5, dst, imm32, force32Imm);
-    }
-
-    public final void subq(Register dst, AMD64Address src) {
-        prefixq(src, dst);
-        emitByte(0x2B);
-        emitOperandHelper(dst, src);
+        // don't use the sign-extending version, forcing a 32-bit immediate
+        SUB.getMIOpcode(QWORD, false).emit(this, QWORD, dst, imm32);
     }
 
     public final void subq(Register dst, Register src) {
-        int encode = prefixqAndEncode(dst.encoding, src.encoding);
-        emitByte(0x2B);
-        emitByte(0xC0 | encode);
-    }
-
-    public final void testq(Register dst, int imm32) {
-        // not using emitArith because test
-        // doesn't support sign-extension of
-        // 8bit operands
-        int encode = dst.encoding;
-        if (encode == 0) {
-            emitByte(Prefix.REXW);
-            emitByte(0xA9);
-        } else {
-            encode = prefixqAndEncode(encode);
-            emitByte(0xF7);
-            emitByte(0xC0 | encode);
-        }
-        emitInt(imm32);
+        SUB.rmOp.emit(this, QWORD, dst, src);
     }
 
     public final void testq(Register dst, Register src) {
@@ -2727,19 +2119,6 @@
         emitByte(0xC0 | encode);
     }
 
-    public final void testq(Register dst, AMD64Address src) {
-        prefixq(src, dst);
-        emitByte(0x85);
-        emitOperandHelper(dst, src);
-    }
-
-    public final void testq(AMD64Address dst, int imm32) {
-        prefixq(dst);
-        emitByte(0xF7);
-        emitOperandHelper(0, dst);
-        emitInt(imm32);
-    }
-
     public final void xaddl(AMD64Address dst, Register src) {
         prefix(dst, src);
         emitByte(0x0F);
@@ -2766,22 +2145,6 @@
         emitOperandHelper(dst, src);
     }
 
-    public final void xorq(Register dst, int imm32) {
-        emitArithImm32q(6, dst, imm32);
-    }
-
-    public final void xorq(Register dst, Register src) {
-        int encode = prefixqAndEncode(dst.encoding, src.encoding);
-        emitByte(0x33);
-        emitByte(0xC0 | encode);
-    }
-
-    public final void xorq(Register dst, AMD64Address src) {
-        prefixq(src, dst);
-        emitByte(0x33);
-        emitOperandHelper(dst, src);
-    }
-
     public final void membar(int barriers) {
         if (target.isMP) {
             // We only have to handle StoreLoad
--- a/graal/com.oracle.graal.asm.amd64/src/com/oracle/graal/asm/amd64/AMD64MacroAssembler.java	Mon Mar 16 15:59:57 2015 -0700
+++ b/graal/com.oracle.graal.asm.amd64/src/com/oracle/graal/asm/amd64/AMD64MacroAssembler.java	Mon Mar 16 16:54:10 2015 -0700
@@ -1,5 +1,5 @@
 /*
- * Copyright (c) 2009, 2013, Oracle and/or its affiliates. All rights reserved.
+ * Copyright (c) 2009, 2015, Oracle and/or its affiliates. All rights reserved.
  * DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER.
  *
  * This code is free software; you can redistribute it and/or modify it
@@ -37,10 +37,6 @@
         super(target, registerConfig);
     }
 
-    public final void xorptr(Register dst, Register src) {
-        xorq(dst, src);
-    }
-
     public final void decrementq(Register reg, int value) {
         if (value == Integer.MIN_VALUE) {
             subq(reg, value);
@@ -175,10 +171,6 @@
         }
     }
 
-    public final void signExtendShort(Register reg) {
-        movswl(reg, reg);
-    }
-
     public void movflt(Register dst, Register src) {
         assert dst.getRegisterCategory().equals(AMD64.XMM) && src.getRegisterCategory().equals(AMD64.XMM);
         if (UseXmmRegToRegMoveAll) {
--- a/graal/com.oracle.graal.compiler.amd64/src/com/oracle/graal/compiler/amd64/AMD64LIRGenerator.java	Mon Mar 16 15:59:57 2015 -0700
+++ b/graal/com.oracle.graal.compiler.amd64/src/com/oracle/graal/compiler/amd64/AMD64LIRGenerator.java	Mon Mar 16 16:54:10 2015 -0700
@@ -1,5 +1,5 @@
 /*
- * Copyright (c) 2009, 2014, Oracle and/or its affiliates. All rights reserved.
+ * Copyright (c) 2009, 2015, Oracle and/or its affiliates. All rights reserved.
  * DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER.
  *
  * This code is free software; you can redistribute it and/or modify it
@@ -24,9 +24,12 @@
 package com.oracle.graal.compiler.amd64;
 
 import static com.oracle.graal.api.code.ValueUtil.*;
+import static com.oracle.graal.asm.amd64.AMD64Assembler.AMD64BinaryArithmetic.*;
+import static com.oracle.graal.asm.amd64.AMD64Assembler.AMD64MOp.*;
+import static com.oracle.graal.asm.amd64.AMD64Assembler.AMD64RMOp.*;
+import static com.oracle.graal.asm.amd64.AMD64Assembler.AMD64Shift.*;
+import static com.oracle.graal.asm.amd64.AMD64Assembler.OperandSize.*;
 import static com.oracle.graal.lir.amd64.AMD64Arithmetic.*;
-import static com.oracle.graal.lir.amd64.AMD64BitManipulationOp.IntrinsicOpcode.*;
-import static com.oracle.graal.lir.amd64.AMD64Compare.*;
 import static com.oracle.graal.lir.amd64.AMD64MathIntrinsicOp.IntrinsicOpcode.*;
 
 import com.oracle.graal.amd64.*;
@@ -34,7 +37,16 @@
 import com.oracle.graal.api.meta.*;
 import com.oracle.graal.asm.*;
 import com.oracle.graal.asm.amd64.AMD64Address.Scale;
+import com.oracle.graal.asm.amd64.AMD64Assembler.AMD64BinaryArithmetic;
+import com.oracle.graal.asm.amd64.AMD64Assembler.AMD64MIOp;
+import com.oracle.graal.asm.amd64.AMD64Assembler.AMD64MOp;
+import com.oracle.graal.asm.amd64.AMD64Assembler.AMD64MROp;
+import com.oracle.graal.asm.amd64.AMD64Assembler.AMD64RMIOp;
+import com.oracle.graal.asm.amd64.AMD64Assembler.AMD64RMOp;
+import com.oracle.graal.asm.amd64.AMD64Assembler.AMD64Shift;
 import com.oracle.graal.asm.amd64.AMD64Assembler.ConditionFlag;
+import com.oracle.graal.asm.amd64.AMD64Assembler.OperandSize;
+import com.oracle.graal.asm.amd64.AMD64Assembler.SSEOp;
 import com.oracle.graal.compiler.common.*;
 import com.oracle.graal.compiler.common.calc.*;
 import com.oracle.graal.compiler.common.spi.*;
@@ -42,21 +54,7 @@
 import com.oracle.graal.lir.*;
 import com.oracle.graal.lir.StandardOp.JumpOp;
 import com.oracle.graal.lir.amd64.*;
-import com.oracle.graal.lir.amd64.AMD64Arithmetic.BinaryCommutative;
-import com.oracle.graal.lir.amd64.AMD64Arithmetic.BinaryMemory;
-import com.oracle.graal.lir.amd64.AMD64Arithmetic.BinaryRegConst;
-import com.oracle.graal.lir.amd64.AMD64Arithmetic.BinaryRegReg;
-import com.oracle.graal.lir.amd64.AMD64Arithmetic.BinaryRegStack;
-import com.oracle.graal.lir.amd64.AMD64Arithmetic.BinaryRegStackConst;
-import com.oracle.graal.lir.amd64.AMD64Arithmetic.DivRemOp;
 import com.oracle.graal.lir.amd64.AMD64Arithmetic.FPDivRemOp;
-import com.oracle.graal.lir.amd64.AMD64Arithmetic.MulHighOp;
-import com.oracle.graal.lir.amd64.AMD64Arithmetic.Unary1Op;
-import com.oracle.graal.lir.amd64.AMD64Arithmetic.Unary2MemoryOp;
-import com.oracle.graal.lir.amd64.AMD64Arithmetic.Unary2Op;
-import com.oracle.graal.lir.amd64.AMD64Arithmetic.Unary2RegOp;
-import com.oracle.graal.lir.amd64.AMD64Compare.CompareMemoryOp;
-import com.oracle.graal.lir.amd64.AMD64Compare.CompareOp;
 import com.oracle.graal.lir.amd64.AMD64ControlFlow.BranchOp;
 import com.oracle.graal.lir.amd64.AMD64ControlFlow.CondMoveOp;
 import com.oracle.graal.lir.amd64.AMD64ControlFlow.FloatBranchOp;
@@ -291,41 +289,61 @@
 
     private void emitIntegerTest(Value a, Value b) {
         assert a.getKind().isNumericInteger();
-        if (LIRValueUtil.isVariable(b)) {
-            append(new AMD64TestOp(load(b), loadNonConst(a)));
+        OperandSize size = a.getKind() == Kind.Long ? QWORD : DWORD;
+        if (isConstant(b)) {
+            append(new AMD64CompareConstOp(AMD64MIOp.TEST, size, asAllocatable(a), asConstant(b)));
+        } else if (isConstant(a)) {
+            append(new AMD64CompareConstOp(AMD64MIOp.TEST, size, asAllocatable(b), asConstant(a)));
+        } else if (isAllocatableValue(b)) {
+            append(new AMD64CompareOp(AMD64RMOp.TEST, size, asAllocatable(b), asAllocatable(a)));
         } else {
-            append(new AMD64TestOp(load(a), loadNonConst(b)));
+            append(new AMD64CompareOp(AMD64RMOp.TEST, size, asAllocatable(a), asAllocatable(b)));
         }
     }
 
     protected void emitCompareOp(PlatformKind cmpKind, Variable left, Value right) {
+        OperandSize size;
         switch ((Kind) cmpKind) {
             case Byte:
             case Boolean:
-                append(new CompareOp(BCMP, left, right));
+                size = BYTE;
                 break;
             case Short:
             case Char:
-                append(new CompareOp(SCMP, left, right));
+                size = WORD;
                 break;
             case Int:
-                append(new CompareOp(ICMP, left, right));
+                size = DWORD;
                 break;
             case Long:
-                append(new CompareOp(LCMP, left, right));
-                break;
             case Object:
-                append(new CompareOp(ACMP, left, right));
+                size = QWORD;
                 break;
             case Float:
-                append(new CompareOp(FCMP, left, right));
-                break;
+                append(new AMD64CompareOp(SSEOp.UCOMIS, PS, left, asAllocatable(right)));
+                return;
             case Double:
-                append(new CompareOp(DCMP, left, right));
-                break;
+                append(new AMD64CompareOp(SSEOp.UCOMIS, PD, left, asAllocatable(right)));
+                return;
             default:
-                throw GraalInternalError.shouldNotReachHere();
+                throw GraalInternalError.shouldNotReachHere("unexpected kind: " + cmpKind);
         }
+
+        if (isConstant(right)) {
+            JavaConstant c = asConstant(right);
+            if (c.isDefaultForKind()) {
+                AMD64RMOp op = size == BYTE ? TESTB : TEST;
+                append(new AMD64CompareOp(op, size, left, left));
+                return;
+            } else if (NumUtil.is32bit(c.asLong())) {
+                AMD64MIOp op = CMP.getMIOpcode(size, NumUtil.isByte(c.asLong()));
+                append(new AMD64CompareConstOp(op, size, left, c));
+                return;
+            }
+        }
+
+        AMD64RMOp op = CMP.getRMOpcode(size);
+        append(new AMD64CompareOp(op, size, left, asAllocatable(right)));
     }
 
     /**
@@ -336,74 +354,54 @@
      * @return true if the left and right operands were switched, false otherwise
      */
     private boolean emitCompareMemory(Kind cmpKind, Value a, AMD64AddressValue b, LIRFrameState state) {
-        boolean mirrored;
-        if (LIRValueUtil.isVariable(a)) {
-            Variable left = load(a);
-            emitCompareRegMemoryOp(cmpKind, left, b, state);
-            mirrored = false;
-        } else {
-            emitCompareMemoryConOp(cmpKind, b, (JavaConstant) a, state);
-            mirrored = true;
-        }
-        return mirrored;
-    }
-
-    protected void emitCompareMemoryConOp(Kind kind, AMD64AddressValue address, JavaConstant value, LIRFrameState state) {
-        assert kind.getStackKind() == value.getKind().getStackKind();
-        switch (kind) {
+        OperandSize size;
+        switch (cmpKind) {
             case Byte:
             case Boolean:
-                append(new CompareMemoryOp(BCMP, kind, address, value, state));
+                size = BYTE;
                 break;
             case Short:
             case Char:
-                append(new CompareMemoryOp(SCMP, kind, address, value, state));
+                size = WORD;
                 break;
             case Int:
-                append(new CompareMemoryOp(ICMP, kind, address, value, state));
+                size = DWORD;
                 break;
             case Long:
-                append(new CompareMemoryOp(LCMP, kind, address, value, state));
+            case Object:
+                size = QWORD;
                 break;
-            case Object:
-                assert value.isNull();
-                append(new CompareMemoryOp(ACMP, kind, address, value, state));
-                break;
+            case Float:
+                append(new AMD64CompareMemoryOp(SSEOp.UCOMIS, PS, asAllocatable(a), b, state));
+                return false;
+            case Double:
+                append(new AMD64CompareMemoryOp(SSEOp.UCOMIS, PD, asAllocatable(a), b, state));
+                return false;
             default:
-                throw GraalInternalError.shouldNotReachHere();
+                throw GraalInternalError.shouldNotReachHere("unexpected kind: " + cmpKind);
+        }
+
+        if (isConstant(a)) {
+            return emitCompareMemoryConOp(size, asConstant(a), b, state);
+        } else {
+            return emitCompareRegMemoryOp(size, a, b, state);
         }
     }
 
-    protected void emitCompareRegMemoryOp(Kind kind, Value value, AMD64AddressValue address, LIRFrameState state) {
-        AMD64Compare opcode = null;
-        switch (kind) {
-            case Byte:
-            case Boolean:
-                opcode = BCMP;
-                break;
-            case Short:
-            case Char:
-                opcode = SCMP;
-                break;
-            case Int:
-                opcode = ICMP;
-                break;
-            case Long:
-                opcode = LCMP;
-                break;
-            case Object:
-                opcode = ACMP;
-                break;
-            case Float:
-                opcode = FCMP;
-                break;
-            case Double:
-                opcode = DCMP;
-                break;
-            default:
-                throw GraalInternalError.shouldNotReachHere();
+    protected boolean emitCompareMemoryConOp(OperandSize size, JavaConstant a, AMD64AddressValue b, LIRFrameState state) {
+        if (NumUtil.is32bit(a.asLong())) {
+            AMD64MIOp op = CMP.getMIOpcode(size, NumUtil.isByte(a.asLong()));
+            append(new AMD64CompareMemoryConstOp(op, size, b, a, state));
+            return true;
+        } else {
+            return emitCompareRegMemoryOp(size, a, b, state);
         }
-        append(new CompareMemoryOp(opcode, kind, address, value, state));
+    }
+
+    private boolean emitCompareRegMemoryOp(OperandSize size, Value a, AMD64AddressValue b, LIRFrameState state) {
+        AMD64RMOp op = CMP.getRMOpcode(size);
+        append(new AMD64CompareMemoryOp(op, size, asAllocatable(a), b, state));
+        return false;
     }
 
     /**
@@ -437,16 +435,16 @@
         Variable result = newVariable(LIRKind.derive(input));
         switch (input.getKind()) {
             case Int:
-                append(new Unary1Op(INEG, result, input));
+                append(new AMD64UnaryMOp(NEG, DWORD, result, input));
                 break;
             case Long:
-                append(new Unary1Op(LNEG, result, input));
+                append(new AMD64UnaryMOp(NEG, QWORD, result, input));
                 break;
             case Float:
-                append(new BinaryRegConst(FXOR, result, input, JavaConstant.forFloat(Float.intBitsToFloat(0x80000000))));
+                append(new AMD64BinaryPatchOp(SSEOp.XOR, PS, result, input, JavaConstant.forFloat(Float.intBitsToFloat(0x80000000)), 16));
                 break;
             case Double:
-                append(new BinaryRegConst(DXOR, result, input, JavaConstant.forDouble(Double.longBitsToDouble(0x8000000000000000L))));
+                append(new AMD64BinaryPatchOp(SSEOp.XOR, PD, result, input, JavaConstant.forDouble(Double.longBitsToDouble(0x8000000000000000L)), 16));
                 break;
             default:
                 throw GraalInternalError.shouldNotReachHere();
@@ -460,10 +458,10 @@
         Variable result = newVariable(LIRKind.derive(input));
         switch (input.getKind()) {
             case Int:
-                append(new Unary1Op(INOT, result, input));
+                append(new AMD64UnaryMOp(NOT, DWORD, result, input));
                 break;
             case Long:
-                append(new Unary1Op(LNOT, result, input));
+                append(new AMD64UnaryMOp(NOT, QWORD, result, input));
                 break;
             default:
                 throw GraalInternalError.shouldNotReachHere();
@@ -471,54 +469,48 @@
         return result;
     }
 
-    private Variable emitBinary(AMD64Arithmetic op, boolean commutative, Value a, Value b) {
+    private Variable emitBinary(AMD64BinaryArithmetic op, OperandSize size, boolean commutative, Value a, Value b) {
         if (isConstant(b)) {
-            return emitBinaryConst(op, commutative, asAllocatable(a), asConstant(b));
+            return emitBinaryConst(op, size, commutative, asAllocatable(a), asConstant(b));
         } else if (commutative && isConstant(a)) {
-            return emitBinaryConst(op, commutative, asAllocatable(b), asConstant(a));
+            return emitBinaryConst(op, size, commutative, asAllocatable(b), asConstant(a));
         } else {
-            return emitBinaryVar(op, commutative, asAllocatable(a), asAllocatable(b));
+            return emitBinaryVar(op.getRMOpcode(size), size, commutative, asAllocatable(a), asAllocatable(b));
+        }
+    }
+
+    private Variable emitBinary(AMD64RMOp op, OperandSize size, boolean commutative, Value a, Value b) {
+        if (isConstant(b)) {
+            return emitBinaryConst(op, size, asAllocatable(a), asConstant(b));
+        } else if (commutative && isConstant(a)) {
+            return emitBinaryConst(op, size, asAllocatable(b), asConstant(a));
+        } else {
+            return emitBinaryVar(op, size, commutative, asAllocatable(a), asAllocatable(b));
         }
     }
 
-    private Variable emitBinaryConst(AMD64Arithmetic op, boolean commutative, AllocatableValue a, JavaConstant b) {
-        switch (op) {
-            case IADD:
-            case LADD:
-            case ISUB:
-            case LSUB:
-            case IAND:
-            case LAND:
-            case IOR:
-            case LOR:
-            case IXOR:
-            case LXOR:
-                if (NumUtil.isInt(b.asLong())) {
-                    Variable result = newVariable(LIRKind.derive(a, b));
-                    append(new BinaryRegConst(op, result, a, b));
-                    return result;
-                }
-                break;
-
-            case IMUL:
-            case LMUL:
-                if (NumUtil.isInt(b.asLong())) {
-                    Variable result = newVariable(LIRKind.derive(a, b));
-                    append(new BinaryRegStackConst(op, result, a, b));
-                    return result;
-                }
-                break;
+    private Variable emitBinaryConst(AMD64BinaryArithmetic op, OperandSize size, boolean commutative, AllocatableValue a, JavaConstant b) {
+        if (NumUtil.isInt(b.asLong())) {
+            Variable result = newVariable(LIRKind.derive(a, b));
+            append(new AMD64BinaryConstOp(op, size, result, a, b));
+            return result;
+        } else {
+            return emitBinaryVar(op.getRMOpcode(size), size, commutative, a, asAllocatable(b));
         }
-
-        return emitBinaryVar(op, commutative, a, asAllocatable(b));
     }
 
-    private Variable emitBinaryVar(AMD64Arithmetic op, boolean commutative, AllocatableValue a, AllocatableValue b) {
+    private Variable emitBinaryConst(AMD64RMOp op, OperandSize size, AllocatableValue a, JavaConstant b) {
+        Variable result = newVariable(LIRKind.derive(a, b));
+        append(new AMD64BinaryPatchOp(op, size, result, a, b));
+        return result;
+    }
+
+    private Variable emitBinaryVar(AMD64RMOp op, OperandSize size, boolean commutative, AllocatableValue a, AllocatableValue b) {
         Variable result = newVariable(LIRKind.derive(a, b));
         if (commutative) {
-            append(new BinaryCommutative(op, result, a, b));
+            append(new AMD64BinaryCommutativeOp(op, size, result, a, b));
         } else {
-            append(new BinaryRegStack(op, result, a, b));
+            append(new AMD64BinaryOp(op, size, result, a, b));
         }
         return result;
     }
@@ -527,13 +519,13 @@
     public Variable emitAdd(Value a, Value b, boolean setFlags) {
         switch (a.getKind().getStackKind()) {
             case Int:
-                return emitBinary(IADD, true, a, b);
+                return emitBinary(ADD, DWORD, true, a, b);
             case Long:
-                return emitBinary(LADD, true, a, b);
+                return emitBinary(ADD, QWORD, true, a, b);
             case Float:
-                return emitBinary(FADD, true, a, b);
+                return emitBinary(SSEOp.ADD, SS, true, a, b);
             case Double:
-                return emitBinary(DADD, true, a, b);
+                return emitBinary(SSEOp.ADD, SD, true, a, b);
             default:
                 throw GraalInternalError.shouldNotReachHere();
         }
@@ -543,48 +535,80 @@
     public Variable emitSub(Value a, Value b, boolean setFlags) {
         switch (a.getKind().getStackKind()) {
             case Int:
-                return emitBinary(ISUB, false, a, b);
+                return emitBinary(SUB, DWORD, false, a, b);
             case Long:
-                return emitBinary(LSUB, false, a, b);
+                return emitBinary(SUB, QWORD, false, a, b);
             case Float:
-                return emitBinary(FSUB, false, a, b);
+                return emitBinary(SSEOp.SUB, SS, false, a, b);
             case Double:
-                return emitBinary(DSUB, false, a, b);
+                return emitBinary(SSEOp.SUB, SD, false, a, b);
             default:
                 throw GraalInternalError.shouldNotReachHere();
         }
     }
 
+    private Variable emitIMULConst(OperandSize size, AllocatableValue a, JavaConstant b) {
+        if (NumUtil.isInt(b.asLong())) {
+            int imm = (int) b.asLong();
+            AMD64RMIOp op;
+            if (NumUtil.isByte(imm)) {
+                op = AMD64RMIOp.IMUL_SX;
+            } else {
+                op = AMD64RMIOp.IMUL;
+            }
+
+            Variable ret = newVariable(LIRKind.derive(a, b));
+            append(new AMD64MulConstOp(op, size, ret, a, b));
+            return ret;
+        } else {
+            return emitBinaryVar(AMD64RMOp.IMUL, size, true, a, asAllocatable(b));
+        }
+    }
+
+    private Variable emitIMUL(OperandSize size, Value a, Value b) {
+        if (isConstant(b)) {
+            return emitIMULConst(size, asAllocatable(a), asConstant(b));
+        } else if (isConstant(a)) {
+            return emitIMULConst(size, asAllocatable(b), asConstant(a));
+        } else {
+            return emitBinaryVar(AMD64RMOp.IMUL, size, true, asAllocatable(a), asAllocatable(b));
+        }
+    }
+
     @Override
     public Variable emitMul(Value a, Value b, boolean setFlags) {
         switch (a.getKind().getStackKind()) {
             case Int:
-                return emitBinary(IMUL, true, a, b);
+                return emitIMUL(DWORD, a, b);
             case Long:
-                return emitBinary(LMUL, true, a, b);
+                return emitIMUL(QWORD, a, b);
             case Float:
-                return emitBinary(FMUL, true, a, b);
+                return emitBinary(SSEOp.MUL, SS, true, a, b);
             case Double:
-                return emitBinary(DMUL, true, a, b);
+                return emitBinary(SSEOp.MUL, SD, true, a, b);
             default:
                 throw GraalInternalError.shouldNotReachHere();
         }
     }
 
-    private Value emitMulHigh(AMD64Arithmetic opcode, Value a, Value b) {
-        MulHighOp mulHigh = new MulHighOp(opcode, LIRKind.derive(a, b), asAllocatable(b));
-        emitMove(mulHigh.x, a);
-        append(mulHigh);
-        return emitMove(mulHigh.highResult);
+    private RegisterValue moveToReg(Register reg, Value v) {
+        RegisterValue ret = reg.asValue(v.getLIRKind());
+        emitMove(ret, v);
+        return ret;
+    }
+
+    private Value emitMulHigh(AMD64MOp opcode, OperandSize size, Value a, Value b) {
+        AMD64MulDivOp mulHigh = append(new AMD64MulDivOp(opcode, size, LIRKind.derive(a, b), moveToReg(AMD64.rax, a), asAllocatable(b)));
+        return emitMove(mulHigh.getHighResult());
     }
 
     @Override
     public Value emitMulHigh(Value a, Value b) {
         switch (a.getKind().getStackKind()) {
             case Int:
-                return emitMulHigh(IMUL, a, b);
+                return emitMulHigh(AMD64MOp.IMUL, DWORD, a, b);
             case Long:
-                return emitMulHigh(LMUL, a, b);
+                return emitMulHigh(AMD64MOp.IMUL, QWORD, a, b);
             default:
                 throw GraalInternalError.shouldNotReachHere();
         }
@@ -594,23 +618,23 @@
     public Value emitUMulHigh(Value a, Value b) {
         switch (a.getKind().getStackKind()) {
             case Int:
-                return emitMulHigh(IUMUL, a, b);
+                return emitMulHigh(AMD64MOp.MUL, DWORD, a, b);
             case Long:
-                return emitMulHigh(LUMUL, a, b);
+                return emitMulHigh(AMD64MOp.MUL, QWORD, a, b);
             default:
                 throw GraalInternalError.shouldNotReachHere();
         }
     }
 
-    public Value emitBinaryMemory(AMD64Arithmetic op, Kind kind, AllocatableValue a, AMD64AddressValue location, LIRFrameState state) {
+    public Value emitBinaryMemory(AMD64RMOp op, OperandSize size, AllocatableValue a, AMD64AddressValue location, LIRFrameState state) {
         Variable result = newVariable(LIRKind.derive(a));
-        append(new BinaryMemory(op, kind, result, a, location, state));
+        append(new AMD64BinaryMemoryOp(op, size, result, a, location, state));
         return result;
     }
 
-    protected Value emitConvert2MemoryOp(PlatformKind kind, AMD64Arithmetic op, AMD64AddressValue address, LIRFrameState state) {
+    protected Value emitConvertMemoryOp(PlatformKind kind, AMD64RMOp op, OperandSize size, AMD64AddressValue address, LIRFrameState state) {
         Variable result = newVariable(LIRKind.value(kind));
-        append(new Unary2MemoryOp(op, result, (Kind) null, address, state));
+        append(new AMD64UnaryMemoryOp(op, size, result, address, state));
         return result;
     }
 
@@ -622,48 +646,50 @@
         return result;
     }
 
-    private DivRemOp emitDivRem(AMD64Arithmetic op, Value a, Value b, LIRFrameState state) {
-        AllocatableValue rax = AMD64.rax.asValue(a.getLIRKind());
-        emitMove(rax, a);
-        DivRemOp ret = new DivRemOp(op, rax, asAllocatable(b), state);
-        append(ret);
-        return ret;
+    private AMD64MulDivOp emitIDIV(OperandSize size, Value a, Value b, LIRFrameState state) {
+        LIRKind kind = LIRKind.derive(a, b);
+
+        AMD64SignExtendOp sx = append(new AMD64SignExtendOp(size, kind, moveToReg(AMD64.rax, a)));
+        return append(new AMD64MulDivOp(AMD64MOp.IDIV, size, kind, sx.getHighResult(), sx.getLowResult(), asAllocatable(b), state));
+    }
+
+    private AMD64MulDivOp emitDIV(OperandSize size, Value a, Value b, LIRFrameState state) {
+        LIRKind kind = LIRKind.derive(a, b);
+
+        RegisterValue rax = moveToReg(AMD64.rax, a);
+        RegisterValue rdx = AMD64.rdx.asValue(kind);
+        append(new AMD64ClearRegisterOp(size, rdx));
+        return append(new AMD64MulDivOp(AMD64MOp.DIV, size, kind, rdx, rax, asAllocatable(b), state));
     }
 
     public Value[] emitIntegerDivRem(Value a, Value b, LIRFrameState state) {
-        DivRemOp op;
+        AMD64MulDivOp op;
         switch (a.getKind().getStackKind()) {
             case Int:
-                op = emitDivRem(IDIVREM, a, b, state);
+                op = emitIDIV(DWORD, a, b, state);
                 break;
             case Long:
-                op = emitDivRem(LDIVREM, a, b, state);
+                op = emitIDIV(QWORD, a, b, state);
                 break;
             default:
                 throw GraalInternalError.shouldNotReachHere();
         }
-        return new Value[]{emitMove(op.divResult), emitMove(op.remResult)};
+        return new Value[]{emitMove(op.getQuotient()), emitMove(op.getRemainder())};
     }
 
     @Override
     public Value emitDiv(Value a, Value b, LIRFrameState state) {
         switch (a.getKind().getStackKind()) {
             case Int:
-                DivRemOp op = emitDivRem(IDIV, a, b, state);
-                return emitMove(op.divResult);
+                AMD64MulDivOp op = emitIDIV(DWORD, a, b, state);
+                return emitMove(op.getQuotient());
             case Long:
-                DivRemOp lop = emitDivRem(LDIV, a, b, state);
-                return emitMove(lop.divResult);
-            case Float: {
-                Variable result = newVariable(LIRKind.derive(a, b));
-                append(new BinaryRegStack(FDIV, result, asAllocatable(a), asAllocatable(b)));
-                return result;
-            }
-            case Double: {
-                Variable result = newVariable(LIRKind.derive(a, b));
-                append(new BinaryRegStack(DDIV, result, asAllocatable(a), asAllocatable(b)));
-                return result;
-            }
+                AMD64MulDivOp lop = emitIDIV(QWORD, a, b, state);
+                return emitMove(lop.getQuotient());
+            case Float:
+                return emitBinary(SSEOp.DIV, SS, false, a, b);
+            case Double:
+                return emitBinary(SSEOp.DIV, SD, false, a, b);
             default:
                 throw GraalInternalError.shouldNotReachHere();
         }
@@ -673,11 +699,11 @@
     public Value emitRem(Value a, Value b, LIRFrameState state) {
         switch (a.getKind().getStackKind()) {
             case Int:
-                DivRemOp op = emitDivRem(IREM, a, b, state);
-                return emitMove(op.remResult);
+                AMD64MulDivOp op = emitIDIV(DWORD, a, b, state);
+                return emitMove(op.getRemainder());
             case Long:
-                DivRemOp lop = emitDivRem(LREM, a, b, state);
-                return emitMove(lop.remResult);
+                AMD64MulDivOp lop = emitIDIV(QWORD, a, b, state);
+                return emitMove(lop.getRemainder());
             case Float: {
                 Variable result = newVariable(LIRKind.derive(a, b));
                 append(new FPDivRemOp(FREM, result, load(a), load(b)));
@@ -695,43 +721,47 @@
 
     @Override
     public Variable emitUDiv(Value a, Value b, LIRFrameState state) {
-        DivRemOp op;
+        AMD64MulDivOp op;
         switch (a.getKind().getStackKind()) {
             case Int:
-                op = emitDivRem(IUDIV, a, b, state);
+                op = emitDIV(DWORD, a, b, state);
                 break;
             case Long:
-                op = emitDivRem(LUDIV, a, b, state);
+                op = emitDIV(QWORD, a, b, state);
                 break;
             default:
                 throw GraalInternalError.shouldNotReachHere();
         }
-        return emitMove(op.divResult);
+        return emitMove(op.getQuotient());
     }
 
     @Override
     public Variable emitURem(Value a, Value b, LIRFrameState state) {
-        DivRemOp op;
+        AMD64MulDivOp op;
         switch (a.getKind().getStackKind()) {
             case Int:
-                op = emitDivRem(IUREM, a, b, state);
+                op = emitDIV(DWORD, a, b, state);
                 break;
             case Long:
-                op = emitDivRem(LUREM, a, b, state);
+                op = emitDIV(QWORD, a, b, state);
                 break;
             default:
                 throw GraalInternalError.shouldNotReachHere();
         }
-        return emitMove(op.remResult);
+        return emitMove(op.getRemainder());
     }
 
     @Override
     public Variable emitAnd(Value a, Value b) {
         switch (a.getKind().getStackKind()) {
             case Int:
-                return emitBinary(IAND, true, a, b);
+                return emitBinary(AND, DWORD, true, a, b);
             case Long:
-                return emitBinary(LAND, true, a, b);
+                return emitBinary(AND, QWORD, true, a, b);
+            case Float:
+                return emitBinary(SSEOp.AND, PS, true, a, b);
+            case Double:
+                return emitBinary(SSEOp.AND, PD, true, a, b);
             default:
                 throw GraalInternalError.shouldNotReachHere();
         }
@@ -741,9 +771,13 @@
     public Variable emitOr(Value a, Value b) {
         switch (a.getKind().getStackKind()) {
             case Int:
-                return emitBinary(IOR, true, a, b);
+                return emitBinary(OR, DWORD, true, a, b);
             case Long:
-                return emitBinary(LOR, true, a, b);
+                return emitBinary(OR, QWORD, true, a, b);
+            case Float:
+                return emitBinary(SSEOp.OR, PS, true, a, b);
+            case Double:
+                return emitBinary(SSEOp.OR, PD, true, a, b);
             default:
                 throw GraalInternalError.shouldNotReachHere();
         }
@@ -753,22 +787,31 @@
     public Variable emitXor(Value a, Value b) {
         switch (a.getKind().getStackKind()) {
             case Int:
-                return emitBinary(IXOR, true, a, b);
+                return emitBinary(XOR, DWORD, true, a, b);
             case Long:
-                return emitBinary(LXOR, true, a, b);
+                return emitBinary(XOR, QWORD, true, a, b);
+            case Float:
+                return emitBinary(SSEOp.XOR, PS, true, a, b);
+            case Double:
+                return emitBinary(SSEOp.XOR, PD, true, a, b);
             default:
                 throw GraalInternalError.shouldNotReachHere();
         }
     }
 
-    private Variable emitShift(AMD64Arithmetic op, Value a, Value b) {
+    private Variable emitShift(AMD64Shift op, OperandSize size, Value a, Value b) {
         Variable result = newVariable(LIRKind.derive(a, b).changeType(a.getPlatformKind()));
         AllocatableValue input = asAllocatable(a);
         if (isConstant(b)) {
-            append(new BinaryRegConst(op, result, input, asConstant(b)));
+            JavaConstant c = asConstant(b);
+            if (c.asLong() == 1) {
+                append(new AMD64UnaryMOp(op.m1Op, size, result, input));
+            } else {
+                append(new AMD64BinaryConstOp(op.miOp, size, result, input, c));
+            }
         } else {
             emitMove(RCX_I, b);
-            append(new BinaryRegReg(op, result, input, RCX_I));
+            append(new AMD64ShiftOp(op.mcOp, size, result, input, RCX_I));
         }
         return result;
     }
@@ -777,9 +820,9 @@
     public Variable emitShl(Value a, Value b) {
         switch (a.getKind().getStackKind()) {
             case Int:
-                return emitShift(ISHL, a, b);
+                return emitShift(SHL, DWORD, a, b);
             case Long:
-                return emitShift(LSHL, a, b);
+                return emitShift(SHL, QWORD, a, b);
             default:
                 throw GraalInternalError.shouldNotReachHere();
         }
@@ -789,9 +832,9 @@
     public Variable emitShr(Value a, Value b) {
         switch (a.getKind().getStackKind()) {
             case Int:
-                return emitShift(ISHR, a, b);
+                return emitShift(SAR, DWORD, a, b);
             case Long:
-                return emitShift(LSHR, a, b);
+                return emitShift(SAR, QWORD, a, b);
             default:
                 throw GraalInternalError.shouldNotReachHere();
         }
@@ -801,9 +844,9 @@
     public Variable emitUShr(Value a, Value b) {
         switch (a.getKind().getStackKind()) {
             case Int:
-                return emitShift(IUSHR, a, b);
+                return emitShift(SHR, DWORD, a, b);
             case Long:
-                return emitShift(LUSHR, a, b);
+                return emitShift(SHR, QWORD, a, b);
             default:
                 throw GraalInternalError.shouldNotReachHere();
         }
@@ -812,9 +855,9 @@
     public Variable emitRol(Value a, Value b) {
         switch (a.getKind().getStackKind()) {
             case Int:
-                return emitShift(IROL, a, b);
+                return emitShift(ROL, DWORD, a, b);
             case Long:
-                return emitShift(LROL, a, b);
+                return emitShift(ROL, QWORD, a, b);
             default:
                 throw GraalInternalError.shouldNotReachHere();
         }
@@ -823,23 +866,23 @@
     public Variable emitRor(Value a, Value b) {
         switch (a.getKind().getStackKind()) {
             case Int:
-                return emitShift(IROR, a, b);
+                return emitShift(ROR, DWORD, a, b);
             case Long:
-                return emitShift(LROR, a, b);
+                return emitShift(ROR, QWORD, a, b);
             default:
                 throw GraalInternalError.shouldNotReachHere();
         }
     }
 
-    private AllocatableValue emitConvert2RegOp(LIRKind kind, AMD64Arithmetic op, AllocatableValue input) {
+    private AllocatableValue emitConvertOp(LIRKind kind, AMD64RMOp op, OperandSize size, Value input) {
         Variable result = newVariable(kind);
-        append(new Unary2RegOp(op, result, input));
+        append(new AMD64UnaryRMOp(op, size, result, asAllocatable(input)));
         return result;
     }
 
-    private AllocatableValue emitConvert2Op(LIRKind kind, AMD64Arithmetic op, AllocatableValue input) {
+    private AllocatableValue emitConvertOp(LIRKind kind, AMD64MROp op, OperandSize size, Value input) {
         Variable result = newVariable(kind);
-        append(new Unary2Op(op, result, input));
+        append(new AMD64UnaryMROp(op, size, result, asAllocatable(input)));
         return result;
     }
 
@@ -860,54 +903,53 @@
             case Int:
                 switch (fromKind) {
                     case Float:
-                        return emitConvert2Op(to, MOV_F2I, input);
+                        return emitConvertOp(to, AMD64MROp.MOVD, DWORD, input);
                 }
                 break;
             case Long:
                 switch (fromKind) {
                     case Double:
-                        return emitConvert2Op(to, MOV_D2L, input);
+                        return emitConvertOp(to, AMD64MROp.MOVQ, QWORD, input);
                 }
                 break;
             case Float:
                 switch (fromKind) {
                     case Int:
-                        return emitConvert2Op(to, MOV_I2F, input);
+                        return emitConvertOp(to, AMD64RMOp.MOVD, DWORD, input);
                 }
                 break;
             case Double:
                 switch (fromKind) {
                     case Long:
-                        return emitConvert2Op(to, MOV_L2D, input);
+                        return emitConvertOp(to, AMD64RMOp.MOVQ, QWORD, input);
                 }
                 break;
         }
         throw GraalInternalError.shouldNotReachHere();
     }
 
-    public Value emitFloatConvert(FloatConvert op, Value inputVal) {
-        AllocatableValue input = asAllocatable(inputVal);
+    public Value emitFloatConvert(FloatConvert op, Value input) {
         switch (op) {
             case D2F:
-                return emitConvert2Op(LIRKind.derive(inputVal).changeType(Kind.Float), D2F, input);
+                return emitConvertOp(LIRKind.derive(input).changeType(Kind.Float), SSEOp.CVTSD2SS, SD, input);
             case D2I:
-                return emitConvert2Op(LIRKind.derive(inputVal).changeType(Kind.Int), D2I, input);
+                return emitConvertOp(LIRKind.derive(input).changeType(Kind.Int), SSEOp.CVTTSD2SI, DWORD, input);
             case D2L:
-                return emitConvert2Op(LIRKind.derive(inputVal).changeType(Kind.Long), D2L, input);
+                return emitConvertOp(LIRKind.derive(input).changeType(Kind.Long), SSEOp.CVTTSD2SI, QWORD, input);
             case F2D:
-                return emitConvert2Op(LIRKind.derive(inputVal).changeType(Kind.Double), F2D, input);
+                return emitConvertOp(LIRKind.derive(input).changeType(Kind.Double), SSEOp.CVTSS2SD, SS, input);
             case F2I:
-                return emitConvert2Op(LIRKind.derive(inputVal).changeType(Kind.Int), F2I, input);
+                return emitConvertOp(LIRKind.derive(input).changeType(Kind.Int), SSEOp.CVTTSS2SI, DWORD, input);
             case F2L:
-                return emitConvert2Op(LIRKind.derive(inputVal).changeType(Kind.Long), F2L, input);
+                return emitConvertOp(LIRKind.derive(input).changeType(Kind.Long), SSEOp.CVTTSS2SI, QWORD, input);
             case I2D:
-                return emitConvert2Op(LIRKind.derive(inputVal).changeType(Kind.Double), I2D, input);
+                return emitConvertOp(LIRKind.derive(input).changeType(Kind.Double), SSEOp.CVTSI2SD, DWORD, input);
             case I2F:
-                return emitConvert2Op(LIRKind.derive(inputVal).changeType(Kind.Float), I2F, input);
+                return emitConvertOp(LIRKind.derive(input).changeType(Kind.Float), SSEOp.CVTSI2SS, DWORD, input);
             case L2D:
-                return emitConvert2Op(LIRKind.derive(inputVal).changeType(Kind.Double), L2D, input);
+                return emitConvertOp(LIRKind.derive(input).changeType(Kind.Double), SSEOp.CVTSI2SD, QWORD, input);
             case L2F:
-                return emitConvert2Op(LIRKind.derive(inputVal).changeType(Kind.Float), L2F, input);
+                return emitConvertOp(LIRKind.derive(input).changeType(Kind.Float), SSEOp.CVTSI2SS, QWORD, input);
             default:
                 throw GraalInternalError.shouldNotReachHere();
         }
@@ -917,7 +959,7 @@
     public Value emitNarrow(Value inputVal, int bits) {
         if (inputVal.getKind() == Kind.Long && bits <= 32) {
             // TODO make it possible to reinterpret Long as Int in LIR without move
-            return emitConvert2RegOp(LIRKind.derive(inputVal).changeType(Kind.Int), L2I, asAllocatable(inputVal));
+            return emitConvertOp(LIRKind.derive(inputVal).changeType(Kind.Int), AMD64RMOp.MOV, DWORD, inputVal);
         } else {
             return inputVal;
         }
@@ -932,11 +974,11 @@
             // sign extend to 64 bits
             switch (fromBits) {
                 case 8:
-                    return emitConvert2Op(LIRKind.derive(inputVal).changeType(Kind.Long), B2L, asAllocatable(inputVal));
+                    return emitConvertOp(LIRKind.derive(inputVal).changeType(Kind.Long), MOVSXB, QWORD, inputVal);
                 case 16:
-                    return emitConvert2Op(LIRKind.derive(inputVal).changeType(Kind.Long), S2L, asAllocatable(inputVal));
+                    return emitConvertOp(LIRKind.derive(inputVal).changeType(Kind.Long), MOVSX, QWORD, inputVal);
                 case 32:
-                    return emitConvert2Op(LIRKind.derive(inputVal).changeType(Kind.Long), I2L, asAllocatable(inputVal));
+                    return emitConvertOp(LIRKind.derive(inputVal).changeType(Kind.Long), MOVSXD, QWORD, inputVal);
                 default:
                     throw GraalInternalError.unimplemented("unsupported sign extension (" + fromBits + " bit -> " + toBits + " bit)");
             }
@@ -944,9 +986,9 @@
             // sign extend to 32 bits (smaller values are internally represented as 32 bit values)
             switch (fromBits) {
                 case 8:
-                    return emitConvert2Op(LIRKind.derive(inputVal).changeType(Kind.Int), B2I, asAllocatable(inputVal));
+                    return emitConvertOp(LIRKind.derive(inputVal).changeType(Kind.Int), MOVSXB, DWORD, inputVal);
                 case 16:
-                    return emitConvert2Op(LIRKind.derive(inputVal).changeType(Kind.Int), S2I, asAllocatable(inputVal));
+                    return emitConvertOp(LIRKind.derive(inputVal).changeType(Kind.Int), MOVSX, DWORD, inputVal);
                 case 32:
                     return inputVal;
                 default:
@@ -964,13 +1006,13 @@
             assert inputVal.getKind() == Kind.Long;
             Variable result = newVariable(LIRKind.derive(inputVal).changeType(Kind.Long));
             long mask = CodeUtil.mask(fromBits);
-            append(new BinaryRegConst(AMD64Arithmetic.LAND, result, asAllocatable(inputVal), JavaConstant.forLong(mask)));
+            append(new AMD64BinaryPatchOp(AND.getRMOpcode(QWORD), QWORD, result, asAllocatable(inputVal), JavaConstant.forLong(mask)));
             return result;
         } else {
             assert inputVal.getKind().getStackKind() == Kind.Int;
             Variable result = newVariable(LIRKind.derive(inputVal).changeType(Kind.Int));
             int mask = (int) CodeUtil.mask(fromBits);
-            append(new BinaryRegConst(AMD64Arithmetic.IAND, result, asAllocatable(inputVal), JavaConstant.forInt(mask)));
+            append(new AMD64BinaryPatchOp(AND.getRMOpcode(DWORD), DWORD, result, asAllocatable(inputVal), JavaConstant.forInt(mask)));
             if (toBits > 32) {
                 Variable longResult = newVariable(LIRKind.derive(inputVal).changeType(Kind.Long));
                 emitMove(longResult, result);
@@ -1005,9 +1047,9 @@
     public Variable emitBitCount(Value value) {
         Variable result = newVariable(LIRKind.derive(value).changeType(Kind.Int));
         if (value.getKind().getStackKind() == Kind.Int) {
-            append(new AMD64BitManipulationOp(IPOPCNT, result, asAllocatable(value)));
+            append(new AMD64UnaryRMOp(POPCNT, DWORD, result, asAllocatable(value)));
         } else {
-            append(new AMD64BitManipulationOp(LPOPCNT, result, asAllocatable(value)));
+            append(new AMD64UnaryRMOp(POPCNT, QWORD, result, asAllocatable(value)));
         }
         return result;
     }
@@ -1015,7 +1057,7 @@
     @Override
     public Variable emitBitScanForward(Value value) {
         Variable result = newVariable(LIRKind.derive(value).changeType(Kind.Int));
-        append(new AMD64BitManipulationOp(BSF, result, asAllocatable(value)));
+        append(new AMD64UnaryRMOp(BSF, QWORD, result, asAllocatable(value)));
         return result;
     }
 
@@ -1023,9 +1065,9 @@
     public Variable emitBitScanReverse(Value value) {
         Variable result = newVariable(LIRKind.derive(value).changeType(Kind.Int));
         if (value.getKind().getStackKind() == Kind.Int) {
-            append(new AMD64BitManipulationOp(IBSR, result, asAllocatable(value)));
+            append(new AMD64UnaryRMOp(BSR, DWORD, result, asAllocatable(value)));
         } else {
-            append(new AMD64BitManipulationOp(LBSR, result, asAllocatable(value)));
+            append(new AMD64UnaryRMOp(BSR, QWORD, result, asAllocatable(value)));
         }
         return result;
     }
@@ -1033,9 +1075,9 @@
     public Value emitCountLeadingZeros(Value value) {
         Variable result = newVariable(LIRKind.derive(value).changeType(Kind.Int));
         if (value.getKind().getStackKind() == Kind.Int) {
-            append(new AMD64BitManipulationOp(ILZCNT, result, asAllocatable(value)));
+            append(new AMD64UnaryRMOp(LZCNT, DWORD, result, asAllocatable(value)));
         } else {
-            append(new AMD64BitManipulationOp(LLZCNT, result, asAllocatable(value)));
+            append(new AMD64UnaryRMOp(LZCNT, QWORD, result, asAllocatable(value)));
         }
         return result;
     }
@@ -1043,9 +1085,9 @@
     public Value emitCountTrailingZeros(Value value) {
         Variable result = newVariable(LIRKind.derive(value).changeType(Kind.Int));
         if (value.getKind().getStackKind() == Kind.Int) {
-            append(new AMD64BitManipulationOp(ITZCNT, result, asAllocatable(value)));
+            append(new AMD64UnaryRMOp(TZCNT, DWORD, result, asAllocatable(value)));
         } else {
-            append(new AMD64BitManipulationOp(LTZCNT, result, asAllocatable(value)));
+            append(new AMD64UnaryRMOp(TZCNT, QWORD, result, asAllocatable(value)));
         }
         return result;
     }
@@ -1055,10 +1097,10 @@
         Variable result = newVariable(LIRKind.derive(input));
         switch (input.getKind()) {
             case Float:
-                append(new BinaryRegConst(FAND, result, asAllocatable(input), JavaConstant.forFloat(Float.intBitsToFloat(0x7FFFFFFF))));
+                append(new AMD64BinaryPatchOp(SSEOp.AND, PS, result, asAllocatable(input), JavaConstant.forFloat(Float.intBitsToFloat(0x7FFFFFFF)), 16));
                 break;
             case Double:
-                append(new BinaryRegConst(DAND, result, asAllocatable(input), JavaConstant.forDouble(Double.longBitsToDouble(0x7FFFFFFFFFFFFFFFL))));
+                append(new AMD64BinaryPatchOp(SSEOp.AND, PD, result, asAllocatable(input), JavaConstant.forDouble(Double.longBitsToDouble(0x7FFFFFFFFFFFFFFFL)), 16));
                 break;
             default:
                 throw GraalInternalError.shouldNotReachHere();
@@ -1069,7 +1111,16 @@
     @Override
     public Value emitMathSqrt(Value input) {
         Variable result = newVariable(LIRKind.derive(input));
-        append(new Unary2Op(SQRT, result, asAllocatable(input)));
+        switch (input.getKind()) {
+            case Float:
+                append(new AMD64UnaryRMOp(SSEOp.SQRT, SS, result, asAllocatable(input)));
+                break;
+            case Double:
+                append(new AMD64UnaryRMOp(SSEOp.SQRT, SD, result, asAllocatable(input)));
+                break;
+            default:
+                throw GraalInternalError.shouldNotReachHere();
+        }
         return result;
     }
 
--- a/graal/com.oracle.graal.compiler.amd64/src/com/oracle/graal/compiler/amd64/AMD64NodeLIRBuilder.java	Mon Mar 16 15:59:57 2015 -0700
+++ b/graal/com.oracle.graal.compiler.amd64/src/com/oracle/graal/compiler/amd64/AMD64NodeLIRBuilder.java	Mon Mar 16 16:54:10 2015 -0700
@@ -1,5 +1,5 @@
 /*
- * Copyright (c) 2009, 2014, Oracle and/or its affiliates. All rights reserved.
+ * Copyright (c) 2009, 2015, Oracle and/or its affiliates. All rights reserved.
  * DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER.
  *
  * This code is free software; you can redistribute it and/or modify it
@@ -23,12 +23,18 @@
 
 package com.oracle.graal.compiler.amd64;
 
-import static com.oracle.graal.lir.amd64.AMD64Arithmetic.*;
+import static com.oracle.graal.asm.amd64.AMD64Assembler.AMD64BinaryArithmetic.*;
+import static com.oracle.graal.asm.amd64.AMD64Assembler.AMD64RMOp.*;
+import static com.oracle.graal.asm.amd64.AMD64Assembler.OperandSize.*;
 
 import com.oracle.graal.amd64.*;
 import com.oracle.graal.api.code.*;
 import com.oracle.graal.api.meta.*;
 import com.oracle.graal.asm.*;
+import com.oracle.graal.asm.amd64.AMD64Assembler.AMD64MIOp;
+import com.oracle.graal.asm.amd64.AMD64Assembler.AMD64RMOp;
+import com.oracle.graal.asm.amd64.AMD64Assembler.OperandSize;
+import com.oracle.graal.asm.amd64.AMD64Assembler.SSEOp;
 import com.oracle.graal.compiler.common.*;
 import com.oracle.graal.compiler.common.calc.*;
 import com.oracle.graal.compiler.gen.*;
@@ -108,6 +114,27 @@
         return (Kind) gen.getLIRKind(access.asNode().stamp()).getPlatformKind();
     }
 
+    protected OperandSize getMemorySize(Access access) {
+        switch (getMemoryKind(access)) {
+            case Boolean:
+            case Byte:
+                return OperandSize.BYTE;
+            case Char:
+            case Short:
+                return OperandSize.WORD;
+            case Int:
+                return OperandSize.DWORD;
+            case Long:
+                return OperandSize.QWORD;
+            case Float:
+                return OperandSize.SS;
+            case Double:
+                return OperandSize.SD;
+            default:
+                throw GraalInternalError.shouldNotReachHere("unsupported memory access type " + getMemoryKind(access));
+        }
+    }
+
     protected AMD64AddressValue makeAddress(Access access) {
         return (AMD64AddressValue) access.accessLocation().generateAddress(this, gen, operand(access.object()));
     }
@@ -176,6 +203,7 @@
         LabelRef falseLabel = getLIRBlock(x.falseSuccessor());
         double trueLabelProbability = x.probability(x.trueSuccessor());
         Kind kind = getMemoryKind(access);
+        OperandSize size = kind == Kind.Long ? QWORD : DWORD;
         if (value.isConstant()) {
             if (kind != kind.getStackKind()) {
                 return null;
@@ -186,83 +214,61 @@
                 return null;
             }
             return builder -> {
-                gen.append(new AMD64TestMemoryOp(kind, makeAddress(access), constant, getState(access)));
+                gen.append(new AMD64CompareMemoryConstOp(AMD64MIOp.TEST, size, makeAddress(access), constant, getState(access)));
                 gen.append(new BranchOp(Condition.EQ, trueLabel, falseLabel, trueLabelProbability));
                 return null;
             };
         } else {
             return builder -> {
-                gen.append(new AMD64TestMemoryOp(kind, makeAddress(access), operand(value), getState(access)));
+                gen.append(new AMD64CompareMemoryOp(AMD64RMOp.TEST, size, gen.asAllocatable(operand(value)), makeAddress(access), getState(access)));
                 gen.append(new BranchOp(Condition.EQ, trueLabel, falseLabel, trueLabelProbability));
                 return null;
             };
         }
     }
 
-    protected Value emitConvert2MemoryOp(PlatformKind kind, AMD64Arithmetic op, Access access) {
-        AMD64AddressValue address = makeAddress(access);
-        LIRFrameState state = getState(access);
-        return getLIRGeneratorTool().emitConvert2MemoryOp(kind, op, address, state);
-    }
-
-    private Value emitFloatConvertMemory(FloatConvertNode op, Access access) {
-        switch (op.getFloatConvert()) {
-            case D2F:
-                return emitConvert2MemoryOp(Kind.Float, D2F, access);
-            case D2I:
-                return emitConvert2MemoryOp(Kind.Int, D2I, access);
-            case D2L:
-                return emitConvert2MemoryOp(Kind.Long, D2L, access);
-            case F2D:
-                return emitConvert2MemoryOp(Kind.Double, F2D, access);
-            case F2I:
-                return emitConvert2MemoryOp(Kind.Int, F2I, access);
-            case F2L:
-                return emitConvert2MemoryOp(Kind.Long, F2L, access);
-            case I2D:
-                return emitConvert2MemoryOp(Kind.Double, I2D, access);
-            case I2F:
-                return emitConvert2MemoryOp(Kind.Float, I2F, access);
-            case L2D:
-                return emitConvert2MemoryOp(Kind.Double, L2D, access);
-            case L2F:
-                return emitConvert2MemoryOp(Kind.Float, L2F, access);
-            default:
-                throw GraalInternalError.shouldNotReachHere();
-        }
+    protected ComplexMatchResult emitConvertMemoryOp(PlatformKind kind, AMD64RMOp op, OperandSize size, Access access) {
+        return builder -> {
+            AMD64AddressValue address = makeAddress(access);
+            LIRFrameState state = getState(access);
+            return getLIRGeneratorTool().emitConvertMemoryOp(kind, op, size, address, state);
+        };
     }
 
     private ComplexMatchResult emitSignExtendMemory(Access access, int fromBits, int toBits) {
         assert fromBits <= toBits && toBits <= 64;
         Kind kind = null;
-        AMD64Arithmetic op = null;
+        AMD64RMOp op;
+        OperandSize size;
         if (fromBits == toBits) {
             return null;
         } else if (toBits > 32) {
             kind = Kind.Long;
+            size = QWORD;
             // sign extend to 64 bits
             switch (fromBits) {
                 case 8:
-                    op = B2L;
+                    op = MOVSXB;
                     break;
                 case 16:
-                    op = S2L;
+                    op = MOVSX;
                     break;
                 case 32:
-                    op = I2L;
+                    op = MOVSXD;
                     break;
                 default:
                     throw GraalInternalError.unimplemented("unsupported sign extension (" + fromBits + " bit -> " + toBits + " bit)");
             }
         } else {
             kind = Kind.Int;
+            size = DWORD;
             // sign extend to 32 bits (smaller values are internally represented as 32 bit values)
             switch (fromBits) {
                 case 8:
-                    op = B2I;
+                    op = MOVSXB;
                     break;
                 case 16:
-                    op = S2I;
+                    op = MOVSX;
                     break;
                 case 32:
                     return null;
@@ -271,13 +277,7 @@
             }
         }
         if (kind != null && op != null) {
-            Kind localKind = kind;
-            AMD64Arithmetic localOp = op;
-            return new ComplexMatchResult() {
-                public Value evaluate(NodeLIRBuilder builder) {
-                    return emitConvert2MemoryOp(localKind, localOp, access);
-                }
-            };
+            return emitConvertMemoryOp(kind, op, size, access);
         }
         return null;
     }
@@ -288,66 +288,6 @@
         return getLIRGeneratorTool().emitLoad(to, address, state);
     }
 
-    protected AMD64Arithmetic getOp(ValueNode operation, Access access) {
-        Kind memoryKind = getMemoryKind(access);
-        if (operation.getClass() == AddNode.class) {
-            switch (memoryKind) {
-                case Int:
-                    return IADD;
-                case Long:
-                    return LADD;
-                case Float:
-                    return FADD;
-                case Double:
-                    return DADD;
-            }
-        } else if (operation.getClass() == AndNode.class) {
-            switch (memoryKind) {
-                case Int:
-                    return IAND;
-                case Long:
-                    return LAND;
-            }
-        } else if (operation.getClass() == OrNode.class) {
-            switch (memoryKind) {
-                case Int:
-                    return IOR;
-                case Long:
-                    return LOR;
-            }
-        } else if (operation.getClass() == XorNode.class) {
-            switch (memoryKind) {
-                case Int:
-                    return IXOR;
-                case Long:
-                    return LXOR;
-            }
-        } else if (operation.getClass() == SubNode.class) {
-            switch (memoryKind) {
-                case Int:
-                    return ISUB;
-                case Long:
-                    return LSUB;
-                case Float:
-                    return FSUB;
-                case Double:
-                    return DSUB;
-            }
-        } else if (operation.getClass() == MulNode.class) {
-            switch (memoryKind) {
-                case Int:
-                    return IMUL;
-                case Long:
-                    return LMUL;
-                case Float:
-                    return FMUL;
-                case Double:
-                    return DMUL;
-            }
-        }
-        return null;
-    }
-
     @MatchRule("(If (IntegerTest Read=access value))")
     @MatchRule("(If (IntegerTest FloatingRead=access value))")
     public ComplexMatchResult integerTestBranchMemory(IfNode root, Access access, ValueNode value) {
@@ -392,24 +332,74 @@
         return null;
     }
 
+    private ComplexMatchResult binaryRead(AMD64RMOp op, OperandSize size, ValueNode value, Access access) {
+        return builder -> getLIRGeneratorTool().emitBinaryMemory(op, size, getLIRGeneratorTool().asAllocatable(operand(value)), makeAddress(access), getState(access));
+    }
+
     @MatchRule("(Add value Read=access)")
+    @MatchRule("(Add value FloatingRead=access)")
+    public ComplexMatchResult addMemory(ValueNode value, Access access) {
+        OperandSize size = getMemorySize(access);
+        if (size.isXmmType()) {
+            return binaryRead(SSEOp.ADD, size, value, access);
+        } else {
+            return binaryRead(ADD.getRMOpcode(size), size, value, access);
+        }
+    }
+
     @MatchRule("(Sub value Read=access)")
+    @MatchRule("(Sub value FloatingRead=access)")
+    public ComplexMatchResult subMemory(ValueNode value, Access access) {
+        OperandSize size = getMemorySize(access);
+        if (size.isXmmType()) {
+            return binaryRead(SSEOp.SUB, size, value, access);
+        } else {
+            return binaryRead(SUB.getRMOpcode(size), size, value, access);
+        }
+    }
+
     @MatchRule("(Mul value Read=access)")
-    @MatchRule("(Or value Read=access)")
-    @MatchRule("(Xor value Read=access)")
-    @MatchRule("(And value Read=access)")
-    @MatchRule("(Add value FloatingRead=access)")
-    @MatchRule("(Sub value FloatingRead=access)")
     @MatchRule("(Mul value FloatingRead=access)")
-    @MatchRule("(Or value FloatingRead=access)")
-    @MatchRule("(Xor value FloatingRead=access)")
+    public ComplexMatchResult mulMemory(ValueNode value, Access access) {
+        OperandSize size = getMemorySize(access);
+        if (size.isXmmType()) {
+            return binaryRead(SSEOp.MUL, size, value, access);
+        } else {
+            return binaryRead(AMD64RMOp.IMUL, size, value, access);
+        }
+    }
+
+    @MatchRule("(And value Read=access)")
     @MatchRule("(And value FloatingRead=access)")
-    public ComplexMatchResult binaryRead(BinaryNode root, ValueNode value, Access access) {
-        AMD64Arithmetic op = getOp(root, access);
-        if (op != null) {
-            return builder -> getLIRGeneratorTool().emitBinaryMemory(op, getMemoryKind(access), getLIRGeneratorTool().asAllocatable(operand(value)), makeAddress(access), getState(access));
+    public ComplexMatchResult andMemory(ValueNode value, Access access) {
+        OperandSize size = getMemorySize(access);
+        if (size.isXmmType()) {
+            return null;
+        } else {
+            return binaryRead(AND.getRMOpcode(size), size, value, access);
         }
-        return null;
+    }
+
+    @MatchRule("(Or value Read=access)")
+    @MatchRule("(Or value FloatingRead=access)")
+    public ComplexMatchResult orMemory(ValueNode value, Access access) {
+        OperandSize size = getMemorySize(access);
+        if (size.isXmmType()) {
+            return null;
+        } else {
+            return binaryRead(OR.getRMOpcode(size), size, value, access);
+        }
+    }
+
+    @MatchRule("(Xor value Read=access)")
+    @MatchRule("(Xor value FloatingRead=access)")
+    public ComplexMatchResult xorMemory(ValueNode value, Access access) {
+        OperandSize size = getMemorySize(access);
+        if (size.isXmmType()) {
+            return null;
+        } else {
+            return binaryRead(XOR.getRMOpcode(size), size, value, access);
+        }
     }
 
     @MatchRule("(Write Narrow=narrow location value)")
@@ -447,8 +437,30 @@
     @MatchRule("(FloatConvert Read=access)")
     @MatchRule("(FloatConvert FloatingRead=access)")
     public ComplexMatchResult floatConvert(FloatConvertNode root, Access access) {
-        return builder -> emitFloatConvertMemory(root, access);
-
+        switch (root.getFloatConvert()) {
+            case D2F:
+                return emitConvertMemoryOp(Kind.Float, SSEOp.CVTSD2SS, SD, access);
+            case D2I:
+                return emitConvertMemoryOp(Kind.Int, SSEOp.CVTTSD2SI, DWORD, access);
+            case D2L:
+                return emitConvertMemoryOp(Kind.Long, SSEOp.CVTTSD2SI, QWORD, access);
+            case F2D:
+                return emitConvertMemoryOp(Kind.Double, SSEOp.CVTSS2SD, SS, access);
+            case F2I:
+                return emitConvertMemoryOp(Kind.Int, SSEOp.CVTTSS2SI, DWORD, access);
+            case F2L:
+                return emitConvertMemoryOp(Kind.Long, SSEOp.CVTTSS2SI, QWORD, access);
+            case I2D:
+                return emitConvertMemoryOp(Kind.Double, SSEOp.CVTSI2SD, DWORD, access);
+            case I2F:
+                return emitConvertMemoryOp(Kind.Float, SSEOp.CVTSI2SS, DWORD, access);
+            case L2D:
+                return emitConvertMemoryOp(Kind.Double, SSEOp.CVTSI2SD, QWORD, access);
+            case L2F:
+                return emitConvertMemoryOp(Kind.Float, SSEOp.CVTSI2SS, QWORD, access);
+            default:
+                throw GraalInternalError.shouldNotReachHere();
+        }
     }
 
     @MatchRule("(Reinterpret Read=access)")
--- a/graal/com.oracle.graal.graph/src/com/oracle/graal/graph/Edges.java	Mon Mar 16 15:59:57 2015 -0700
+++ b/graal/com.oracle.graal.graph/src/com/oracle/graal/graph/Edges.java	Mon Mar 16 16:54:10 2015 -0700
@@ -528,22 +528,56 @@
             }
             index++;
         }
-        int count = getCount();
+        int count = curOffsets.length;
         while (index < count) {
             NodeList<Node> list = getNodeList(node, curOffsets, index);
-            if (list != null) {
-                for (int i = 0; i < list.size(); ++i) {
-                    Node curNode = list.get(i);
-                    if (curNode != null) {
-                        consumer.accept(node, curNode);
-                    }
+            acceptHelper(node, consumer, list);
+            index++;
+        }
+    }
+
+    private static void acceptHelper(Node node, BiConsumer<Node, Node> consumer, NodeList<Node> list) {
+        if (list != null) {
+            for (int i = 0; i < list.size(); ++i) {
+                Node curNode = list.get(i);
+                if (curNode != null) {
+                    consumer.accept(node, curNode);
                 }
             }
-            index++;
         }
     }
 
     public long[] getOffsets() {
         return this.offsets;
     }
+
+    public void pushAll(Node node, NodeStack stack) {
+        int index = 0;
+        int curDirectCount = this.directCount;
+        final long[] curOffsets = this.offsets;
+        while (index < curDirectCount) {
+            Node curNode = getNode(node, curOffsets, index);
+            if (curNode != null) {
+                stack.push(curNode);
+            }
+            index++;
+        }
+        int count = curOffsets.length;
+        while (index < count) {
+            NodeList<Node> list = getNodeList(node, curOffsets, index);
+            pushAllHelper(stack, list);
+            index++;
+        }
+    }
+
+    private static void pushAllHelper(NodeStack stack, NodeList<Node> list) {
+        if (list != null) {
+            for (int i = 0; i < list.size(); ++i) {
+                Node curNode = list.get(i);
+                if (curNode != null) {
+                    stack.push(curNode);
+                }
+            }
+        }
+    }
 }
--- a/graal/com.oracle.graal.graph/src/com/oracle/graal/graph/Node.java	Mon Mar 16 15:59:57 2015 -0700
+++ b/graal/com.oracle.graal.graph/src/com/oracle/graal/graph/Node.java	Mon Mar 16 16:54:10 2015 -0700
@@ -1074,4 +1074,8 @@
     public boolean valueEquals(Node other) {
         return getNodeClass().dataEquals(this, other);
     }
+
+    public final void pushInputs(NodeStack stack) {
+        getNodeClass().getInputEdges().pushAll(this, stack);
+    }
 }
--- a/graal/com.oracle.graal.hotspot.amd64/src/com/oracle/graal/hotspot/amd64/AMD64HotSpotCompare.java	Mon Mar 16 15:59:57 2015 -0700
+++ /dev/null	Thu Jan 01 00:00:00 1970 +0000
@@ -1,148 +0,0 @@
-/*
- * Copyright (c) 2014, 2015, Oracle and/or its affiliates. All rights reserved.
- * DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER.
- *
- * This code is free software; you can redistribute it and/or modify it
- * under the terms of the GNU General Public License version 2 only, as
- * published by the Free Software Foundation.
- *
- * This code is distributed in the hope that it will be useful, but WITHOUT
- * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
- * FITNESS FOR A PARTICULAR PURPOSE.  See the GNU General Public License
- * version 2 for more details (a copy is included in the LICENSE file that
- * accompanied this code).
- *
- * You should have received a copy of the GNU General Public License version
- * 2 along with this work; if not, write to the Free Software Foundation,
- * Inc., 51 Franklin St, Fifth Floor, Boston, MA 02110-1301 USA.
- *
- * Please contact Oracle, 500 Oracle Parkway, Redwood Shores, CA 94065 USA
- * or visit www.oracle.com if you need additional information or have any
- * questions.
- */
-package com.oracle.graal.hotspot.amd64;
-
-import static com.oracle.graal.api.code.ValueUtil.*;
-import static com.oracle.graal.lir.LIRInstruction.OperandFlag.*;
-
-import com.oracle.graal.hotspot.HotSpotGraalRuntime;
-import com.oracle.graal.api.meta.*;
-import com.oracle.graal.asm.*;
-import com.oracle.graal.asm.amd64.*;
-import com.oracle.graal.compiler.common.*;
-import com.oracle.graal.hotspot.meta.*;
-import com.oracle.graal.lir.*;
-import com.oracle.graal.lir.amd64.*;
-import com.oracle.graal.lir.amd64.AMD64Move.MemOp;
-import com.oracle.graal.lir.asm.*;
-
-public class AMD64HotSpotCompare {
-
-    @Opcode("CMP")
-    public static final class HotSpotCompareConstantOp extends AMD64LIRInstruction {
-        public static final LIRInstructionClass<HotSpotCompareConstantOp> TYPE = LIRInstructionClass.create(HotSpotCompareConstantOp.class);
-
-        @Use({REG}) protected AllocatableValue x;
-        protected JavaConstant y;
-
-        public HotSpotCompareConstantOp(AllocatableValue x, JavaConstant y) {
-            super(TYPE);
-            this.x = x;
-            this.y = y;
-        }
-
-        @Override
-        public void emitCode(CompilationResultBuilder crb, AMD64MacroAssembler masm) {
-            assert isRegister(x);
-            if (HotSpotCompressedNullConstant.COMPRESSED_NULL.equals(y)) {
-                // compressed null
-                masm.testl(asRegister(x), asRegister(x));
-            } else if (y instanceof HotSpotObjectConstant) {
-                HotSpotObjectConstant yConst = (HotSpotObjectConstant) y;
-                if (yConst.isCompressed()) {
-                    // compressed oop
-                    crb.recordInlineDataInCode(y);
-                    masm.cmpl(asRegister(x), 0xDEADDEAD);
-                } else {
-                    // uncompressed oop
-                    AMD64Address patch = (AMD64Address) crb.recordDataReferenceInCode(y, 8);
-                    masm.cmpq(asRegister(x), patch);
-                }
-            } else if (y instanceof HotSpotMetaspaceConstant) {
-                boolean isImmutable = GraalOptions.ImmutableCode.getValue();
-                boolean generatePIC = GraalOptions.GeneratePIC.getValue();
-                if (y.getKind() == Kind.Int) {
-                    // compressed metaspace pointer
-                    crb.recordInlineDataInCode(y);
-                    if (isImmutable && generatePIC) {
-                        Kind hostWordKind = HotSpotGraalRuntime.getHostWordKind();
-                        int alignment = hostWordKind.getBitCount() / Byte.SIZE;
-                        // recordDataReferenceInCode forces the mov to be rip-relative
-                        masm.cmpl(asRegister(x), (AMD64Address) crb.recordDataReferenceInCode(JavaConstant.INT_0, alignment));
-                    } else {
-                        masm.cmpl(asRegister(x), y.asInt());
-                    }
-                } else {
-                    // uncompressed metaspace pointer
-                    if (isImmutable && generatePIC) {
-                        crb.recordInlineDataInCode(y);
-                        Kind hostWordKind = HotSpotGraalRuntime.getHostWordKind();
-                        int alignment = hostWordKind.getBitCount() / Byte.SIZE;
-                        // recordDataReferenceInCode forces the mov to be rip-relative
-                        masm.cmpq(asRegister(x), (AMD64Address) crb.recordDataReferenceInCode(JavaConstant.INT_0, alignment));
-                    } else {
-                        AMD64Address patch = (AMD64Address) crb.recordDataReferenceInCode(y, 8);
-                        masm.cmpq(asRegister(x), patch);
-                    }
-                }
-            } else {
-                throw GraalInternalError.shouldNotReachHere();
-            }
-        }
-    }
-
-    @Opcode("CMP")
-    public static final class HotSpotCompareMemoryConstantOp extends MemOp {
-        public static final LIRInstructionClass<HotSpotCompareMemoryConstantOp> TYPE = LIRInstructionClass.create(HotSpotCompareMemoryConstantOp.class);
-
-        protected JavaConstant y;
-
-        public HotSpotCompareMemoryConstantOp(Kind kind, AMD64AddressValue x, JavaConstant y, LIRFrameState state) {
-            super(TYPE, kind, x, state);
-            this.y = y;
-        }
-
-        @Override
-        protected void emitMemAccess(CompilationResultBuilder crb, AMD64MacroAssembler masm) {
-            if (HotSpotCompressedNullConstant.COMPRESSED_NULL.equals(y)) {
-                // compressed null
-                masm.cmpl(address.toAddress(), 0);
-            } else if (y instanceof HotSpotObjectConstant) {
-                HotSpotObjectConstant yConst = (HotSpotObjectConstant) y;
-                if (yConst.isCompressed() && crb.target.inlineObjects) {
-                    // compressed oop
-                    crb.recordInlineDataInCode(y);
-                    masm.cmpl(address.toAddress(), 0xDEADDEAD);
-                } else {
-                    // uncompressed oop
-                    throw GraalInternalError.shouldNotReachHere();
-                }
-            } else if (y instanceof HotSpotMetaspaceConstant) {
-                if (y.getKind() == Kind.Int) {
-                    // compressed metaspace pointer
-                    crb.recordInlineDataInCode(y);
-                    masm.cmpl(address.toAddress(), y.asInt());
-                } else if (y.getKind() == Kind.Long && NumUtil.is32bit(y.asLong())) {
-                    // uncompressed metaspace pointer
-                    crb.recordInlineDataInCode(y);
-                    masm.cmpq(address.toAddress(), (int) y.asLong());
-                } else {
-                    throw GraalInternalError.shouldNotReachHere();
-                }
-            } else {
-                throw GraalInternalError.shouldNotReachHere();
-            }
-        }
-    }
-
-}
--- /dev/null	Thu Jan 01 00:00:00 1970 +0000
+++ b/graal/com.oracle.graal.hotspot.amd64/src/com/oracle/graal/hotspot/amd64/AMD64HotSpotCompareConstOp.java	Mon Mar 16 16:54:10 2015 -0700
@@ -0,0 +1,72 @@
+/*
+ * Copyright (c) 2015, 2015, Oracle and/or its affiliates. All rights reserved.
+ * DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER.
+ *
+ * This code is free software; you can redistribute it and/or modify it
+ * under the terms of the GNU General Public License version 2 only, as
+ * published by the Free Software Foundation.
+ *
+ * This code is distributed in the hope that it will be useful, but WITHOUT
+ * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
+ * FITNESS FOR A PARTICULAR PURPOSE.  See the GNU General Public License
+ * version 2 for more details (a copy is included in the LICENSE file that
+ * accompanied this code).
+ *
+ * You should have received a copy of the GNU General Public License version
+ * 2 along with this work; if not, write to the Free Software Foundation,
+ * Inc., 51 Franklin St, Fifth Floor, Boston, MA 02110-1301 USA.
+ *
+ * Please contact Oracle, 500 Oracle Parkway, Redwood Shores, CA 94065 USA
+ * or visit www.oracle.com if you need additional information or have any
+ * questions.
+ */
+package com.oracle.graal.hotspot.amd64;
+
+import static com.oracle.graal.api.code.ValueUtil.*;
+import static com.oracle.graal.asm.amd64.AMD64Assembler.OperandSize.*;
+import static com.oracle.graal.lir.LIRInstruction.OperandFlag.*;
+
+import com.oracle.graal.api.meta.*;
+import com.oracle.graal.asm.amd64.AMD64Assembler.AMD64MIOp;
+import com.oracle.graal.asm.amd64.*;
+import com.oracle.graal.hotspot.meta.*;
+import com.oracle.graal.lir.*;
+import com.oracle.graal.lir.amd64.*;
+import com.oracle.graal.lir.asm.*;
+
+public class AMD64HotSpotCompareConstOp extends AMD64LIRInstruction {
+    public static final LIRInstructionClass<AMD64HotSpotCompareConstOp> TYPE = LIRInstructionClass.create(AMD64HotSpotCompareConstOp.class);
+
+    @Opcode private final AMD64MIOp opcode;
+
+    @Use({REG, STACK}) protected AllocatableValue x;
+    protected HotSpotConstant y;
+
+    public AMD64HotSpotCompareConstOp(AMD64MIOp opcode, AllocatableValue x, HotSpotConstant y) {
+        super(TYPE);
+        this.opcode = opcode;
+        this.x = x;
+        this.y = y;
+
+        assert y.isCompressed();
+    }
+
+    @Override
+    public void emitCode(CompilationResultBuilder crb, AMD64MacroAssembler masm) {
+        int imm32;
+        if (y instanceof HotSpotMetaspaceConstant) {
+            imm32 = (int) ((HotSpotMetaspaceConstant) y).rawValue();
+        } else {
+            assert y instanceof HotSpotObjectConstant;
+            imm32 = 0xDEADDEAD;
+        }
+
+        crb.recordInlineDataInCode(y);
+        if (isRegister(x)) {
+            opcode.emit(masm, DWORD, asRegister(x), imm32);
+        } else {
+            assert isStackSlot(x);
+            opcode.emit(masm, DWORD, (AMD64Address) crb.asAddress(x), imm32);
+        }
+    }
+}
--- /dev/null	Thu Jan 01 00:00:00 1970 +0000
+++ b/graal/com.oracle.graal.hotspot.amd64/src/com/oracle/graal/hotspot/amd64/AMD64HotSpotCompareMemoryConstOp.java	Mon Mar 16 16:54:10 2015 -0700
@@ -0,0 +1,84 @@
+/*
+ * Copyright (c) 2015, 2015, Oracle and/or its affiliates. All rights reserved.
+ * DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER.
+ *
+ * This code is free software; you can redistribute it and/or modify it
+ * under the terms of the GNU General Public License version 2 only, as
+ * published by the Free Software Foundation.
+ *
+ * This code is distributed in the hope that it will be useful, but WITHOUT
+ * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
+ * FITNESS FOR A PARTICULAR PURPOSE.  See the GNU General Public License
+ * version 2 for more details (a copy is included in the LICENSE file that
+ * accompanied this code).
+ *
+ * You should have received a copy of the GNU General Public License version
+ * 2 along with this work; if not, write to the Free Software Foundation,
+ * Inc., 51 Franklin St, Fifth Floor, Boston, MA 02110-1301 USA.
+ *
+ * Please contact Oracle, 500 Oracle Parkway, Redwood Shores, CA 94065 USA
+ * or visit www.oracle.com if you need additional information or have any
+ * questions.
+ */
+package com.oracle.graal.hotspot.amd64;
+
+import static com.oracle.graal.api.code.ValueUtil.*;
+import static com.oracle.graal.asm.amd64.AMD64Assembler.OperandSize.*;
+import static com.oracle.graal.lir.LIRInstruction.OperandFlag.*;
+
+import com.oracle.graal.api.meta.*;
+import com.oracle.graal.asm.amd64.*;
+import com.oracle.graal.asm.amd64.AMD64Assembler.AMD64MIOp;
+import com.oracle.graal.hotspot.meta.*;
+import com.oracle.graal.lir.*;
+import com.oracle.graal.lir.StandardOp.ImplicitNullCheck;
+import com.oracle.graal.lir.amd64.*;
+import com.oracle.graal.lir.asm.*;
+
+public class AMD64HotSpotCompareMemoryConstOp extends AMD64LIRInstruction implements ImplicitNullCheck {
+    public static final LIRInstructionClass<AMD64HotSpotCompareMemoryConstOp> TYPE = LIRInstructionClass.create(AMD64HotSpotCompareMemoryConstOp.class);
+
+    @Opcode private final AMD64MIOp opcode;
+
+    @Use({COMPOSITE}) protected AMD64AddressValue x;
+    protected HotSpotConstant y;
+
+    @State protected LIRFrameState state;
+
+    public AMD64HotSpotCompareMemoryConstOp(AMD64MIOp opcode, AMD64AddressValue x, HotSpotConstant y, LIRFrameState state) {
+        super(TYPE);
+        this.opcode = opcode;
+        this.x = x;
+        this.y = y;
+        this.state = state;
+
+        assert y.isCompressed();
+    }
+
+    @Override
+    public void emitCode(CompilationResultBuilder crb, AMD64MacroAssembler masm) {
+        int imm32;
+        if (y instanceof HotSpotMetaspaceConstant) {
+            imm32 = (int) ((HotSpotMetaspaceConstant) y).rawValue();
+        } else {
+            assert y instanceof HotSpotObjectConstant;
+            imm32 = 0xDEADDEAD;
+        }
+
+        crb.recordInlineDataInCode(y);
+        if (isRegister(x)) {
+            opcode.emit(masm, DWORD, asRegister(x), imm32);
+        } else {
+            assert isStackSlot(x);
+            opcode.emit(masm, DWORD, (AMD64Address) crb.asAddress(x), imm32);
+        }
+    }
+
+    public boolean makeNullCheckFor(Value value, LIRFrameState nullCheckState, int implicitNullCheckLimit) {
+        if (state == null && x.isValidImplicitNullCheckFor(value, implicitNullCheckLimit)) {
+            state = nullCheckState;
+            return true;
+        }
+        return false;
+    }
+}
--- /dev/null	Thu Jan 01 00:00:00 1970 +0000
+++ b/graal/com.oracle.graal.hotspot.amd64/src/com/oracle/graal/hotspot/amd64/AMD64HotSpotComparePatchOp.java	Mon Mar 16 16:54:10 2015 -0700
@@ -0,0 +1,60 @@
+/*
+ * Copyright (c) 2015, 2015, Oracle and/or its affiliates. All rights reserved.
+ * DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER.
+ *
+ * This code is free software; you can redistribute it and/or modify it
+ * under the terms of the GNU General Public License version 2 only, as
+ * published by the Free Software Foundation.
+ *
+ * This code is distributed in the hope that it will be useful, but WITHOUT
+ * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
+ * FITNESS FOR A PARTICULAR PURPOSE.  See the GNU General Public License
+ * version 2 for more details (a copy is included in the LICENSE file that
+ * accompanied this code).
+ *
+ * You should have received a copy of the GNU General Public License version
+ * 2 along with this work; if not, write to the Free Software Foundation,
+ * Inc., 51 Franklin St, Fifth Floor, Boston, MA 02110-1301 USA.
+ *
+ * Please contact Oracle, 500 Oracle Parkway, Redwood Shores, CA 94065 USA
+ * or visit www.oracle.com if you need additional information or have any
+ * questions.
+ */
+package com.oracle.graal.hotspot.amd64;
+
+import static com.oracle.graal.api.code.ValueUtil.*;
+import static com.oracle.graal.lir.LIRInstruction.OperandFlag.*;
+
+import com.oracle.graal.api.meta.*;
+import com.oracle.graal.asm.amd64.*;
+import com.oracle.graal.asm.amd64.AMD64Assembler.AMD64RMOp;
+import com.oracle.graal.asm.amd64.AMD64Assembler.OperandSize;
+import com.oracle.graal.hotspot.meta.*;
+import com.oracle.graal.lir.*;
+import com.oracle.graal.lir.amd64.*;
+import com.oracle.graal.lir.asm.*;
+
+public class AMD64HotSpotComparePatchOp extends AMD64LIRInstruction {
+    public static final LIRInstructionClass<AMD64HotSpotComparePatchOp> TYPE = LIRInstructionClass.create(AMD64HotSpotComparePatchOp.class);
+
+    @Opcode private final AMD64RMOp opcode;
+    private final OperandSize size;
+
+    @Use({REG}) protected AllocatableValue x;
+    protected HotSpotConstant y;
+
+    public AMD64HotSpotComparePatchOp(AMD64RMOp opcode, OperandSize size, AllocatableValue x, HotSpotConstant y) {
+        super(TYPE);
+        this.opcode = opcode;
+        this.size = size;
+
+        this.x = x;
+        this.y = y;
+    }
+
+    @Override
+    public void emitCode(CompilationResultBuilder crb, AMD64MacroAssembler masm) {
+        AMD64Address addr = (AMD64Address) crb.recordDataReferenceInCode(y, size.getBytes());
+        opcode.emit(masm, size, asRegister(x), addr);
+    }
+}
--- a/graal/com.oracle.graal.hotspot.amd64/src/com/oracle/graal/hotspot/amd64/AMD64HotSpotLIRGenerator.java	Mon Mar 16 15:59:57 2015 -0700
+++ b/graal/com.oracle.graal.hotspot.amd64/src/com/oracle/graal/hotspot/amd64/AMD64HotSpotLIRGenerator.java	Mon Mar 16 16:54:10 2015 -0700
@@ -1,5 +1,5 @@
 /*
- * Copyright (c) 2012, 2014, Oracle and/or its affiliates. All rights reserved.
+ * Copyright (c) 2012, 2015, Oracle and/or its affiliates. All rights reserved.
  * DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER.
  *
  * This code is free software; you can redistribute it and/or modify it
@@ -24,6 +24,9 @@
 
 import static com.oracle.graal.amd64.AMD64.*;
 import static com.oracle.graal.api.code.ValueUtil.*;
+import static com.oracle.graal.asm.amd64.AMD64Assembler.AMD64BinaryArithmetic.*;
+import static com.oracle.graal.asm.amd64.AMD64Assembler.AMD64RMOp.*;
+import static com.oracle.graal.asm.amd64.AMD64Assembler.OperandSize.*;
 import static com.oracle.graal.hotspot.HotSpotBackend.*;
 
 import java.util.*;
@@ -31,6 +34,7 @@
 import com.oracle.graal.amd64.*;
 import com.oracle.graal.api.code.*;
 import com.oracle.graal.api.meta.*;
+import com.oracle.graal.asm.amd64.AMD64Assembler.OperandSize;
 import com.oracle.graal.compiler.amd64.*;
 import com.oracle.graal.compiler.common.*;
 import com.oracle.graal.compiler.common.calc.*;
@@ -120,11 +124,12 @@
     List<AMD64HotSpotEpilogueOp> epilogueOps = new ArrayList<>(2);
 
     @Override
-    public void append(LIRInstruction op) {
-        super.append(op);
+    public <I extends LIRInstruction> I append(I op) {
+        I ret = super.append(op);
         if (op instanceof AMD64HotSpotEpilogueOp) {
             epilogueOps.add((AMD64HotSpotEpilogueOp) op);
         }
+        return ret;
     }
 
     @Override
@@ -579,19 +584,35 @@
 
     @Override
     protected void emitCompareOp(PlatformKind cmpKind, Variable left, Value right) {
-        if (right instanceof HotSpotConstant) {
-            append(new AMD64HotSpotCompare.HotSpotCompareConstantOp(left, (JavaConstant) right));
+        if (HotSpotCompressedNullConstant.COMPRESSED_NULL.equals(right)) {
+            append(new AMD64CompareOp(TEST, DWORD, left, left));
+        } else if (right instanceof HotSpotConstant) {
+            HotSpotConstant c = (HotSpotConstant) right;
+
+            boolean isImmutable = GraalOptions.ImmutableCode.getValue();
+            boolean generatePIC = GraalOptions.GeneratePIC.getValue();
+            if (c.isCompressed() && !(isImmutable && generatePIC)) {
+                append(new AMD64HotSpotCompareConstOp(CMP.getMIOpcode(DWORD, false), left, c));
+            } else {
+                OperandSize size = c.isCompressed() ? DWORD : QWORD;
+                append(new AMD64HotSpotComparePatchOp(CMP.getRMOpcode(size), size, left, c));
+            }
         } else {
             super.emitCompareOp(cmpKind, left, right);
         }
     }
 
     @Override
-    protected void emitCompareMemoryConOp(Kind kind, AMD64AddressValue address, JavaConstant value, LIRFrameState state) {
-        if (value instanceof HotSpotConstant) {
-            append(new AMD64HotSpotCompare.HotSpotCompareMemoryConstantOp(kind, address, value, state));
+    protected boolean emitCompareMemoryConOp(OperandSize size, JavaConstant a, AMD64AddressValue b, LIRFrameState state) {
+        if (a.isNull()) {
+            append(new AMD64CompareMemoryConstOp(CMP.getMIOpcode(size, true), size, b, PrimitiveConstant.INT_0, state));
+            return true;
+        } else if (a instanceof HotSpotConstant && size == DWORD) {
+            assert ((HotSpotConstant) a).isCompressed();
+            append(new AMD64HotSpotCompareMemoryConstOp(CMP.getMIOpcode(size, false), b, (HotSpotConstant) a, state));
+            return true;
         } else {
-            super.emitCompareMemoryConOp(kind, address, value, state);
+            return super.emitCompareMemoryConOp(size, a, b, state);
         }
     }
 
--- a/graal/com.oracle.graal.hotspot.amd64/src/com/oracle/graal/hotspot/amd64/AMD64HotSpotNodeLIRBuilder.java	Mon Mar 16 15:59:57 2015 -0700
+++ b/graal/com.oracle.graal.hotspot.amd64/src/com/oracle/graal/hotspot/amd64/AMD64HotSpotNodeLIRBuilder.java	Mon Mar 16 16:54:10 2015 -0700
@@ -1,5 +1,5 @@
 /*
- * Copyright (c) 2012, 2014, Oracle and/or its affiliates. All rights reserved.
+ * Copyright (c) 2012, 2015, Oracle and/or its affiliates. All rights reserved.
  * DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER.
  *
  * This code is free software; you can redistribute it and/or modify it
@@ -24,12 +24,14 @@
 
 import static com.oracle.graal.amd64.AMD64.*;
 import static com.oracle.graal.api.code.ValueUtil.*;
+import static com.oracle.graal.asm.amd64.AMD64Assembler.AMD64BinaryArithmetic.*;
 import static com.oracle.graal.hotspot.HotSpotBackend.*;
 
 import com.oracle.graal.amd64.*;
 import com.oracle.graal.api.code.*;
 import com.oracle.graal.api.meta.*;
 import com.oracle.graal.asm.*;
+import com.oracle.graal.asm.amd64.AMD64Assembler.*;
 import com.oracle.graal.compiler.amd64.*;
 import com.oracle.graal.compiler.common.calc.*;
 import com.oracle.graal.compiler.common.type.*;
@@ -270,27 +272,78 @@
         return null;
     }
 
+    private ComplexMatchResult binaryReadCompressed(AMD64RMOp op, OperandSize size, ValueNode value, Access access, CompressionNode compress, ConstantLocationNode location) {
+        if (canFormCompressedMemory(compress, location)) {
+            return builder -> getLIRGeneratorTool().emitBinaryMemory(op, size, getLIRGeneratorTool().asAllocatable(operand(value)), makeCompressedAddress(compress, location), getState(access));
+        } else {
+            return null;
+        }
+    }
+
     @MatchRule("(Add value (Read=access (Compression=compress object) ConstantLocation=location))")
+    @MatchRule("(Add value (FloatingRead=access (Compression=compress object) ConstantLocation=location))")
+    public ComplexMatchResult addMemoryCompressed(ValueNode value, Access access, CompressionNode compress, ConstantLocationNode location) {
+        OperandSize size = getMemorySize(access);
+        if (size.isXmmType()) {
+            return binaryReadCompressed(SSEOp.ADD, size, value, access, compress, location);
+        } else {
+            return binaryReadCompressed(ADD.getRMOpcode(size), size, value, access, compress, location);
+        }
+    }
+
     @MatchRule("(Sub value (Read=access (Compression=compress object) ConstantLocation=location))")
+    @MatchRule("(Sub value (FloatingRead=access (Compression=compress object) ConstantLocation=location))")
+    public ComplexMatchResult subMemoryCompressed(ValueNode value, Access access, CompressionNode compress, ConstantLocationNode location) {
+        OperandSize size = getMemorySize(access);
+        if (size.isXmmType()) {
+            return binaryReadCompressed(SSEOp.SUB, size, value, access, compress, location);
+        } else {
+            return binaryReadCompressed(SUB.getRMOpcode(size), size, value, access, compress, location);
+        }
+    }
+
     @MatchRule("(Mul value (Read=access (Compression=compress object) ConstantLocation=location))")
+    @MatchRule("(Mul value (FloatingRead=access (Compression=compress object) ConstantLocation=location))")
+    public ComplexMatchResult mulMemoryCompressed(ValueNode value, Access access, CompressionNode compress, ConstantLocationNode location) {
+        OperandSize size = getMemorySize(access);
+        if (size.isXmmType()) {
+            return binaryReadCompressed(SSEOp.MUL, size, value, access, compress, location);
+        } else {
+            return binaryReadCompressed(AMD64RMOp.IMUL, size, value, access, compress, location);
+        }
+    }
+
+    @MatchRule("(And value (Read=access (Compression=compress object) ConstantLocation=location))")
+    @MatchRule("(And value (FloatingRead=access (Compression=compress object) ConstantLocation=location))")
+    public ComplexMatchResult andMemoryCompressed(ValueNode value, Access access, CompressionNode compress, ConstantLocationNode location) {
+        OperandSize size = getMemorySize(access);
+        if (size.isXmmType()) {
+            return null;
+        } else {
+            return binaryReadCompressed(AND.getRMOpcode(size), size, value, access, compress, location);
+        }
+    }
+
     @MatchRule("(Or value (Read=access (Compression=compress object) ConstantLocation=location))")
+    @MatchRule("(Or value (FloatingRead=access (Compression=compress object) ConstantLocation=location))")
+    public ComplexMatchResult orMemoryCompressed(ValueNode value, Access access, CompressionNode compress, ConstantLocationNode location) {
+        OperandSize size = getMemorySize(access);
+        if (size.isXmmType()) {
+            return null;
+        } else {
+            return binaryReadCompressed(OR.getRMOpcode(size), size, value, access, compress, location);
+        }
+    }
+
     @MatchRule("(Xor value (Read=access (Compression=compress object) ConstantLocation=location))")
-    @MatchRule("(And value (Read=access (Compression=compress object) ConstantLocation=location))")
-    @MatchRule("(Add value (FloatingRead=access (Compression=compress object) ConstantLocation=location))")
-    @MatchRule("(Sub value (FloatingRead=access (Compression=compress object) ConstantLocation=location))")
-    @MatchRule("(Mul value (FloatingRead=access (Compression=compress object) ConstantLocation=location))")
-    @MatchRule("(Or value (FloatingRead=access (Compression=compress object) ConstantLocation=location))")
     @MatchRule("(Xor value (FloatingRead=access (Compression=compress object) ConstantLocation=location))")
-    @MatchRule("(And value (FloatingRead=access (Compression=compress object) ConstantLocation=location))")
-    public ComplexMatchResult binaryReadCompressed(BinaryNode root, ValueNode value, Access access, CompressionNode compress, ConstantLocationNode location) {
-        if (canFormCompressedMemory(compress, location)) {
-            AMD64Arithmetic op = getOp(root, access);
-            if (op != null) {
-                return builder -> getLIRGeneratorTool().emitBinaryMemory(op, getMemoryKind(access), getLIRGeneratorTool().asAllocatable(operand(value)), makeCompressedAddress(compress, location),
-                                getState(access));
-            }
+    public ComplexMatchResult xorMemoryCompressed(ValueNode value, Access access, CompressionNode compress, ConstantLocationNode location) {
+        OperandSize size = getMemorySize(access);
+        if (size.isXmmType()) {
+            return null;
+        } else {
+            return binaryReadCompressed(XOR.getRMOpcode(size), size, value, access, compress, location);
         }
-        return null;
     }
 
     @MatchRule("(Read (Compression=compress object) ConstantLocation=location)")
--- a/graal/com.oracle.graal.hotspot/src/com/oracle/graal/hotspot/meta/HotSpotCompressedNullConstant.java	Mon Mar 16 15:59:57 2015 -0700
+++ b/graal/com.oracle.graal.hotspot/src/com/oracle/graal/hotspot/meta/HotSpotCompressedNullConstant.java	Mon Mar 16 16:54:10 2015 -0700
@@ -1,5 +1,5 @@
 /*
- * Copyright (c) 2014, 2014, Oracle and/or its affiliates. All rights reserved.
+ * Copyright (c) 2014, 2015, Oracle and/or its affiliates. All rights reserved.
  * DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER.
  *
  * This code is free software; you can redistribute it and/or modify it
@@ -43,6 +43,11 @@
     }
 
     @Override
+    public boolean isCompressed() {
+        return true;
+    }
+
+    @Override
     public boolean isDefaultForKind() {
         return true;
     }
--- a/graal/com.oracle.graal.hotspot/src/com/oracle/graal/hotspot/meta/HotSpotConstant.java	Mon Mar 16 15:59:57 2015 -0700
+++ b/graal/com.oracle.graal.hotspot/src/com/oracle/graal/hotspot/meta/HotSpotConstant.java	Mon Mar 16 16:54:10 2015 -0700
@@ -1,5 +1,5 @@
 /*
- * Copyright (c) 2014, 2014, Oracle and/or its affiliates. All rights reserved.
+ * Copyright (c) 2014, 2015, Oracle and/or its affiliates. All rights reserved.
  * DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER.
  *
  * This code is free software; you can redistribute it and/or modify it
@@ -28,4 +28,6 @@
  * Marker interface for hotspot specific constants.
  */
 public interface HotSpotConstant extends Constant {
+
+    boolean isCompressed();
 }
--- a/graal/com.oracle.graal.hotspot/src/com/oracle/graal/hotspot/meta/HotSpotMetaspaceConstant.java	Mon Mar 16 15:59:57 2015 -0700
+++ b/graal/com.oracle.graal.hotspot/src/com/oracle/graal/hotspot/meta/HotSpotMetaspaceConstant.java	Mon Mar 16 16:54:10 2015 -0700
@@ -1,5 +1,5 @@
 /*
- * Copyright (c) 2014, 2014, Oracle and/or its affiliates. All rights reserved.
+ * Copyright (c) 2014, 2015, Oracle and/or its affiliates. All rights reserved.
  * DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER.
  *
  * This code is free software; you can redistribute it and/or modify it
@@ -27,8 +27,6 @@
 
 public interface HotSpotMetaspaceConstant extends HotSpotConstant, VMConstant {
 
-    boolean isCompressed();
-
     Constant compress(CompressEncoding encoding);
 
     Constant uncompress(CompressEncoding encoding);
--- a/graal/com.oracle.graal.hotspot/src/com/oracle/graal/hotspot/meta/HotSpotObjectConstant.java	Mon Mar 16 15:59:57 2015 -0700
+++ b/graal/com.oracle.graal.hotspot/src/com/oracle/graal/hotspot/meta/HotSpotObjectConstant.java	Mon Mar 16 16:54:10 2015 -0700
@@ -1,5 +1,5 @@
 /*
- * Copyright (c) 2009, 2014, Oracle and/or its affiliates. All rights reserved.
+ * Copyright (c) 2009, 2015, Oracle and/or its affiliates. All rights reserved.
  * DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER.
  *
  * This code is free software; you can redistribute it and/or modify it
@@ -38,8 +38,6 @@
 
     JavaConstant uncompress();
 
-    boolean isCompressed();
-
     /**
      * Gets the resolved Java type of the object represented by this constant.
      */
--- a/graal/com.oracle.graal.java/src/com/oracle/graal/java/LocalLiveness.java	Mon Mar 16 15:59:57 2015 -0700
+++ b/graal/com.oracle.graal.java/src/com/oracle/graal/java/LocalLiveness.java	Mon Mar 16 16:54:10 2015 -0700
@@ -53,24 +53,18 @@
         boolean changed;
         int iteration = 0;
         do {
-            Debug.log("Iteration %d", iteration);
+            assert traceIteration(iteration);
             changed = false;
             for (int i = blocks.length - 1; i >= 0; i--) {
                 BciBlock block = blocks[i];
                 int blockID = block.getId();
-                // log statements in IFs because debugLiveX creates a new String
-                if (Debug.isLogEnabled()) {
-                    Debug.logv("  start B%d  [%d, %d]  in: %s  out: %s  gen: %s  kill: %s", block.getId(), block.startBci, block.endBci, debugLiveIn(blockID), debugLiveOut(blockID),
-                                    debugLiveGen(blockID), debugLiveKill(blockID));
-                }
+                assert traceStart(block, blockID);
 
                 boolean blockChanged = (iteration == 0);
                 if (block.getSuccessorCount() > 0) {
                     int oldCardinality = liveOutCardinality(blockID);
                     for (BciBlock sux : block.getSuccessors()) {
-                        if (Debug.isLogEnabled()) {
-                            Debug.log("    Successor B%d: %s", sux.getId(), debugLiveIn(sux.getId()));
-                        }
+                        assert traceSuccessor(sux);
                         propagateLiveness(blockID, sux.getId());
                     }
                     blockChanged |= (oldCardinality != liveOutCardinality(blockID));
@@ -78,10 +72,7 @@
 
                 if (blockChanged) {
                     updateLiveness(blockID);
-                    if (Debug.isLogEnabled()) {
-                        Debug.logv("  end   B%d  [%d, %d]  in: %s  out: %s  gen: %s  kill: %s", block.getId(), block.startBci, block.endBci, debugLiveIn(blockID), debugLiveOut(blockID),
-                                        debugLiveGen(blockID), debugLiveKill(blockID));
-                    }
+                    assert traceEnd(block, blockID);
                 }
                 changed |= blockChanged;
             }
@@ -89,6 +80,34 @@
         } while (changed);
     }
 
+    private static boolean traceIteration(int iteration) {
+        Debug.log("Iteration %d", iteration);
+        return true;
+    }
+
+    private boolean traceEnd(BciBlock block, int blockID) {
+        if (Debug.isLogEnabled()) {
+            Debug.logv("  end   B%d  [%d, %d]  in: %s  out: %s  gen: %s  kill: %s", block.getId(), block.startBci, block.endBci, debugLiveIn(blockID), debugLiveOut(blockID), debugLiveGen(blockID),
+                            debugLiveKill(blockID));
+        }
+        return true;
+    }
+
+    private boolean traceSuccessor(BciBlock sux) {
+        if (Debug.isLogEnabled()) {
+            Debug.log("    Successor B%d: %s", sux.getId(), debugLiveIn(sux.getId()));
+        }
+        return true;
+    }
+
+    private boolean traceStart(BciBlock block, int blockID) {
+        if (Debug.isLogEnabled()) {
+            Debug.logv("  start B%d  [%d, %d]  in: %s  out: %s  gen: %s  kill: %s", block.getId(), block.startBci, block.endBci, debugLiveIn(blockID), debugLiveOut(blockID), debugLiveGen(blockID),
+                            debugLiveKill(blockID));
+        }
+        return true;
+    }
+
     /**
      * Returns whether the local is live at the beginning of the given block.
      */
--- a/graal/com.oracle.graal.lir.amd64/src/com/oracle/graal/lir/amd64/AMD64AddressValue.java	Mon Mar 16 15:59:57 2015 -0700
+++ b/graal/com.oracle.graal.lir.amd64/src/com/oracle/graal/lir/amd64/AMD64AddressValue.java	Mon Mar 16 16:54:10 2015 -0700
@@ -1,5 +1,5 @@
 /*
- * Copyright (c) 2013, 2014, Oracle and/or its affiliates. All rights reserved.
+ * Copyright (c) 2013, 2015, Oracle and/or its affiliates. All rights reserved.
  * DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER.
  *
  * This code is free software; you can redistribute it and/or modify it
@@ -90,6 +90,10 @@
         return s.toString();
     }
 
+    public boolean isValidImplicitNullCheckFor(Value value, int implicitNullCheckLimit) {
+        return value.equals(base) && index.equals(Value.ILLEGAL) && displacement >= 0 && displacement < implicitNullCheckLimit;
+    }
+
     @Override
     public boolean equals(Object obj) {
         if (obj instanceof AMD64AddressValue) {
--- a/graal/com.oracle.graal.lir.amd64/src/com/oracle/graal/lir/amd64/AMD64Arithmetic.java	Mon Mar 16 15:59:57 2015 -0700
+++ b/graal/com.oracle.graal.lir.amd64/src/com/oracle/graal/lir/amd64/AMD64Arithmetic.java	Mon Mar 16 16:54:10 2015 -0700
@@ -23,460 +23,18 @@
 package com.oracle.graal.lir.amd64;
 
 import static com.oracle.graal.api.code.ValueUtil.*;
-import static com.oracle.graal.lir.LIRInstruction.OperandFlag.*;
 
 import com.oracle.graal.amd64.*;
 import com.oracle.graal.api.meta.*;
 import com.oracle.graal.asm.*;
 import com.oracle.graal.asm.amd64.*;
 import com.oracle.graal.asm.amd64.AMD64Assembler.ConditionFlag;
-import com.oracle.graal.compiler.common.*;
 import com.oracle.graal.lir.*;
-import com.oracle.graal.lir.amd64.AMD64Move.MemOp;
 import com.oracle.graal.lir.asm.*;
 
 public enum AMD64Arithmetic {
-
-    // @formatter:off
-
-    IADD, ISUB, IMUL, IUMUL, IDIV, IDIVREM, IREM, IUDIV, IUREM, IAND, IOR, IXOR, ISHL, ISHR, IUSHR, IROL, IROR,
-    LADD, LSUB, LMUL, LUMUL, LDIV, LDIVREM, LREM, LUDIV, LUREM, LAND, LOR, LXOR, LSHL, LSHR, LUSHR, LROL, LROR,
-    FADD, FSUB, FMUL, FDIV, FREM, FAND, FOR, FXOR,
-    DADD, DSUB, DMUL, DDIV, DREM, DAND, DOR, DXOR,
-    INEG, LNEG, INOT, LNOT,
-    SQRT,
-    L2I, B2I, S2I, B2L, S2L, I2L,
-    F2D, D2F,
-    I2F, I2D,
-    L2F, L2D,
-    MOV_I2F, MOV_L2D, MOV_F2I, MOV_D2L,
-    MOV_B2UI, MOV_B2UL, // Zero extending byte loads
-
-    /*
-     * Converts a float/double to an int/long. The result of the conversion does not comply with Java semantics
-     * when the input is a NaN, infinity or the conversion result is greater than Integer.MAX_VALUE/Long.MAX_VALUE.
-     */
-    F2I, D2I, F2L, D2L;
-
-    // @formatter:on
-
-    /**
-     * Unary operation with separate source and destination operand.
-     */
-    public static final class Unary2Op extends AMD64LIRInstruction {
-
-        public static final LIRInstructionClass<Unary2Op> TYPE = LIRInstructionClass.create(Unary2Op.class);
-        @Opcode private final AMD64Arithmetic opcode;
-        @Def({REG}) protected AllocatableValue result;
-        @Use({REG, STACK}) protected AllocatableValue x;
-
-        public Unary2Op(AMD64Arithmetic opcode, AllocatableValue result, AllocatableValue x) {
-            super(TYPE);
-            this.opcode = opcode;
-            this.result = result;
-            this.x = x;
-        }
-
-        @Override
-        public void emitCode(CompilationResultBuilder crb, AMD64MacroAssembler masm) {
-            emit(crb, masm, opcode, result, x, null);
-        }
-    }
-
-    /**
-     * Unary operation with separate source and destination operand but register only.
-     */
-    public static final class Unary2RegOp extends AMD64LIRInstruction {
-        public static final LIRInstructionClass<Unary2RegOp> TYPE = LIRInstructionClass.create(Unary2RegOp.class);
-
-        @Opcode private final AMD64Arithmetic opcode;
-        @Def({REG}) protected AllocatableValue result;
-        @Use({REG}) protected AllocatableValue x;
-
-        public Unary2RegOp(AMD64Arithmetic opcode, AllocatableValue result, AllocatableValue x) {
-            super(TYPE);
-            this.opcode = opcode;
-            this.result = result;
-            this.x = x;
-        }
-
-        @Override
-        public void emitCode(CompilationResultBuilder crb, AMD64MacroAssembler masm) {
-            emit(crb, masm, opcode, result, x, null);
-        }
-    }
-
-    /**
-     * Unary operation with single operand for source and destination.
-     */
-    public static class Unary1Op extends AMD64LIRInstruction {
-        public static final LIRInstructionClass<Unary1Op> TYPE = LIRInstructionClass.create(Unary1Op.class);
-
-        @Opcode private final AMD64Arithmetic opcode;
-        @Def({REG, HINT}) protected AllocatableValue result;
-        @Use({REG, STACK}) protected AllocatableValue x;
-
-        public Unary1Op(AMD64Arithmetic opcode, AllocatableValue result, AllocatableValue x) {
-            super(TYPE);
-            this.opcode = opcode;
-            this.result = result;
-            this.x = x;
-        }
-
-        @Override
-        public void emitCode(CompilationResultBuilder crb, AMD64MacroAssembler masm) {
-            AMD64Move.move(crb, masm, result, x);
-            emit(crb, masm, opcode, result);
-        }
-    }
-
-    /**
-     * Unary operation with separate memory source and destination operand.
-     */
-    public static final class Unary2MemoryOp extends MemOp {
-        public static final LIRInstructionClass<Unary2MemoryOp> TYPE = LIRInstructionClass.create(Unary2MemoryOp.class);
-
-        @Opcode private final AMD64Arithmetic opcode;
-        @Def({REG}) protected AllocatableValue result;
-
-        public Unary2MemoryOp(AMD64Arithmetic opcode, AllocatableValue result, Kind kind, AMD64AddressValue address, LIRFrameState state) {
-            super(TYPE, kind, address, state);
-            this.opcode = opcode;
-            this.result = result;
-        }
-
-        @Override
-        public void emitMemAccess(CompilationResultBuilder crb, AMD64MacroAssembler masm) {
-            emit(crb, masm, opcode, result, address, null);
-        }
-    }
-
-    /**
-     * Binary operation with two operands. The first source operand is combined with the
-     * destination. The second source operand may be a stack slot.
-     */
-    public static class BinaryRegStack extends AMD64LIRInstruction {
-        public static final LIRInstructionClass<BinaryRegStack> TYPE = LIRInstructionClass.create(BinaryRegStack.class);
-
-        @Opcode private final AMD64Arithmetic opcode;
-        @Def({REG, HINT}) protected AllocatableValue result;
-        @Use({REG, STACK}) protected AllocatableValue x;
-        @Alive({REG, STACK}) protected AllocatableValue y;
-
-        public BinaryRegStack(AMD64Arithmetic opcode, AllocatableValue result, AllocatableValue x, AllocatableValue y) {
-            super(TYPE);
-            this.opcode = opcode;
-            this.result = result;
-            this.x = x;
-            this.y = y;
-        }
-
-        @Override
-        public void emitCode(CompilationResultBuilder crb, AMD64MacroAssembler masm) {
-            AMD64Move.move(crb, masm, result, x);
-            emit(crb, masm, opcode, result, y, null);
-        }
-
-        @Override
-        public void verify() {
-            super.verify();
-            assert differentRegisters(result, y) || sameRegister(x, y);
-            verifyKind(opcode, result, x, y);
-        }
-    }
-
-    /**
-     * Binary operation with two operands. The first source operand is combined with the
-     * destination. The second source operand may be a stack slot.
-     */
-    public static final class BinaryMemory extends AMD64LIRInstruction {
-        public static final LIRInstructionClass<BinaryMemory> TYPE = LIRInstructionClass.create(BinaryMemory.class);
-
-        @Opcode private final AMD64Arithmetic opcode;
-        @Def({REG, HINT}) protected AllocatableValue result;
-        @Use({REG}) protected AllocatableValue x;
-        protected final Kind kind;
-        @Alive({COMPOSITE}) protected AMD64AddressValue location;
-        @State protected LIRFrameState state;
-
-        public BinaryMemory(AMD64Arithmetic opcode, Kind kind, AllocatableValue result, AllocatableValue x, AMD64AddressValue location, LIRFrameState state) {
-            super(TYPE);
-            this.opcode = opcode;
-            this.result = result;
-            this.x = x;
-            this.location = location;
-            this.kind = kind;
-            this.state = state;
-        }
-
-        @Override
-        public void emitCode(CompilationResultBuilder crb, AMD64MacroAssembler masm) {
-            AMD64Move.move(crb, masm, result, x);
-            if (state != null) {
-                crb.recordImplicitException(masm.position(), state);
-            }
-            emit(crb, masm, opcode, result, location, null);
-        }
-
-        @Override
-        public void verify() {
-            super.verify();
-            assert differentRegisters(result, location) || sameRegister(x, location);
-            // verifyKind(opcode, result, x, location);
-        }
-    }
-
-    /**
-     * Binary operation with two operands. The first source operand is combined with the
-     * destination. The second source operand must be a register.
-     */
-    public static final class BinaryRegReg extends AMD64LIRInstruction {
-        public static final LIRInstructionClass<BinaryRegReg> TYPE = LIRInstructionClass.create(BinaryRegReg.class);
-
-        @Opcode private final AMD64Arithmetic opcode;
-        @Def({REG, HINT}) protected AllocatableValue result;
-        @Use({REG, STACK}) protected AllocatableValue x;
-        @Alive({REG}) protected AllocatableValue y;
-
-        public BinaryRegReg(AMD64Arithmetic opcode, AllocatableValue result, AllocatableValue x, AllocatableValue y) {
-            super(TYPE);
-            this.opcode = opcode;
-            this.result = result;
-            this.x = x;
-            this.y = y;
-        }
-
-        @Override
-        public void emitCode(CompilationResultBuilder crb, AMD64MacroAssembler masm) {
-            AMD64Move.move(crb, masm, result, x);
-            emit(crb, masm, opcode, result, y, null);
-        }
-
-        @Override
-        public void verify() {
-            super.verify();
-            assert differentRegisters(result, y) || sameRegister(x, y);
-            verifyKind(opcode, result, x, y);
-        }
-    }
-
-    /**
-     * Binary operation with single source/destination operand and one constant.
-     */
-    public static final class BinaryRegConst extends AMD64LIRInstruction {
-        public static final LIRInstructionClass<BinaryRegConst> TYPE = LIRInstructionClass.create(BinaryRegConst.class);
-
-        @Opcode private final AMD64Arithmetic opcode;
-        @Def({REG, HINT}) protected AllocatableValue result;
-        @Use({REG, STACK}) protected AllocatableValue x;
-        protected JavaConstant y;
-
-        public BinaryRegConst(AMD64Arithmetic opcode, AllocatableValue result, AllocatableValue x, JavaConstant y) {
-            super(TYPE);
-            this.opcode = opcode;
-            this.result = result;
-            this.x = x;
-            this.y = y;
-        }
-
-        @Override
-        public void emitCode(CompilationResultBuilder crb, AMD64MacroAssembler masm) {
-            AMD64Move.move(crb, masm, result, x);
-            emit(crb, masm, opcode, result, y, null);
-        }
-
-        @Override
-        public void verify() {
-            super.verify();
-            verifyKind(opcode, result, x, y);
-        }
-    }
-
-    /**
-     * Commutative binary operation with two operands. One of the operands is combined with the
-     * result.
-     */
-    public static final class BinaryCommutative extends AMD64LIRInstruction {
-        public static final LIRInstructionClass<BinaryCommutative> TYPE = LIRInstructionClass.create(BinaryCommutative.class);
-
-        @Opcode private final AMD64Arithmetic opcode;
-        @Def({REG, HINT}) protected AllocatableValue result;
-        @Use({REG, STACK}) protected AllocatableValue x;
-        @Use({REG, STACK}) protected AllocatableValue y;
-
-        public BinaryCommutative(AMD64Arithmetic opcode, AllocatableValue result, AllocatableValue x, AllocatableValue y) {
-            super(TYPE);
-            this.opcode = opcode;
-            this.result = result;
-            this.x = x;
-            this.y = y;
-        }
-
-        @Override
-        public void emitCode(CompilationResultBuilder crb, AMD64MacroAssembler masm) {
-            if (sameRegister(result, y)) {
-                emit(crb, masm, opcode, result, x, null);
-            } else {
-                AMD64Move.move(crb, masm, result, x);
-                emit(crb, masm, opcode, result, y, null);
-            }
-        }
-
-        @Override
-        public void verify() {
-            super.verify();
-            verifyKind(opcode, result, x, y);
-        }
-    }
-
-    /**
-     * Binary operation with separate source and destination and one constant operand.
-     */
-    public static final class BinaryRegStackConst extends AMD64LIRInstruction {
-        public static final LIRInstructionClass<BinaryRegStackConst> TYPE = LIRInstructionClass.create(BinaryRegStackConst.class);
-
-        @Opcode private final AMD64Arithmetic opcode;
-        @Def({REG}) protected AllocatableValue result;
-        @Use({REG, STACK}) protected AllocatableValue x;
-        protected JavaConstant y;
-
-        public BinaryRegStackConst(AMD64Arithmetic opcode, AllocatableValue result, AllocatableValue x, JavaConstant y) {
-            super(TYPE);
-            this.opcode = opcode;
-            this.result = result;
-            this.x = x;
-            this.y = y;
-        }
-
-        @Override
-        public void emitCode(CompilationResultBuilder crb, AMD64MacroAssembler masm) {
-            if (isRegister(x)) {
-                switch (opcode) {
-                    case IMUL:
-                        masm.imull(asIntReg(result), asIntReg(x), crb.asIntConst(y));
-                        break;
-                    case LMUL:
-                        masm.imulq(asLongReg(result), asLongReg(x), crb.asIntConst(y));
-                        break;
-                    default:
-                        throw GraalInternalError.shouldNotReachHere();
-                }
-            } else {
-                assert isStackSlot(x);
-                switch (opcode) {
-                    case IMUL:
-                        masm.imull(asIntReg(result), (AMD64Address) crb.asIntAddr(x), crb.asIntConst(y));
-                        break;
-                    case LMUL:
-                        masm.imulq(asLongReg(result), (AMD64Address) crb.asLongAddr(x), crb.asIntConst(y));
-                        break;
-                    default:
-                        throw GraalInternalError.shouldNotReachHere();
-                }
-            }
-        }
-
-        @Override
-        public void verify() {
-            super.verify();
-            verifyKind(opcode, result, x, y);
-        }
-    }
-
-    public static final class MulHighOp extends AMD64LIRInstruction {
-        public static final LIRInstructionClass<MulHighOp> TYPE = LIRInstructionClass.create(MulHighOp.class);
-
-        @Opcode private final AMD64Arithmetic opcode;
-        @Def({REG}) public AllocatableValue lowResult;
-        @Def({REG}) public AllocatableValue highResult;
-        @Use({REG}) public AllocatableValue x;
-        @Use({REG, STACK}) public AllocatableValue y;
-
-        public MulHighOp(AMD64Arithmetic opcode, LIRKind kind, AllocatableValue y) {
-            super(TYPE);
-            this.opcode = opcode;
-            this.x = AMD64.rax.asValue(kind);
-            this.y = y;
-            this.lowResult = AMD64.rax.asValue(kind);
-            this.highResult = AMD64.rdx.asValue(kind);
-        }
-
-        @Override
-        public void emitCode(CompilationResultBuilder crb, AMD64MacroAssembler masm) {
-            if (isRegister(y)) {
-                switch (opcode) {
-                    case IMUL:
-                        masm.imull(asRegister(y));
-                        break;
-                    case IUMUL:
-                        masm.mull(asRegister(y));
-                        break;
-                    case LMUL:
-                        masm.imulq(asRegister(y));
-                        break;
-                    case LUMUL:
-                        masm.mulq(asRegister(y));
-                        break;
-                    default:
-                        throw GraalInternalError.shouldNotReachHere();
-                }
-            } else {
-                switch (opcode) {
-                    case IMUL:
-                        masm.imull((AMD64Address) crb.asAddress(y));
-                        break;
-                    case IUMUL:
-                        masm.mull((AMD64Address) crb.asAddress(y));
-                        break;
-                    case LMUL:
-                        masm.imulq((AMD64Address) crb.asAddress(y));
-                        break;
-                    case LUMUL:
-                        masm.mulq((AMD64Address) crb.asAddress(y));
-                        break;
-                    default:
-                        throw GraalInternalError.shouldNotReachHere();
-                }
-            }
-        }
-    }
-
-    public static final class DivRemOp extends AMD64LIRInstruction {
-        public static final LIRInstructionClass<DivRemOp> TYPE = LIRInstructionClass.create(DivRemOp.class);
-
-        @Opcode private final AMD64Arithmetic opcode;
-        @Def public AllocatableValue divResult;
-        @Def public AllocatableValue remResult;
-        @Use protected AllocatableValue x;
-        @Alive protected AllocatableValue y;
-        @State protected LIRFrameState state;
-
-        public DivRemOp(AMD64Arithmetic opcode, AllocatableValue x, AllocatableValue y, LIRFrameState state) {
-            super(TYPE);
-            this.opcode = opcode;
-            this.divResult = AMD64.rax.asValue(LIRKind.derive(x, y));
-            this.remResult = AMD64.rdx.asValue(LIRKind.derive(x, y));
-            this.x = x;
-            this.y = y;
-            this.state = state;
-        }
-
-        @Override
-        public void emitCode(CompilationResultBuilder crb, AMD64MacroAssembler masm) {
-            emit(crb, masm, opcode, null, y, state);
-        }
-
-        @Override
-        public void verify() {
-            super.verify();
-            // left input in rax, right input in any register but rax and rdx, result quotient in
-            // rax, result remainder in rdx
-            assert asRegister(x).equals(AMD64.rax);
-            assert differentRegisters(y, AMD64.rax.asValue(), AMD64.rdx.asValue());
-            verifyKind(opcode, divResult, x, y);
-            verifyKind(opcode, remResult, x, y);
-        }
-    }
+    FREM,
+    DREM;
 
     public static class FPDivRemOp extends AMD64LIRInstruction {
         public static final LIRInstructionClass<FPDivRemOp> TYPE = LIRInstructionClass.create(FPDivRemOp.class);
@@ -536,647 +94,8 @@
         @Override
         public void verify() {
             super.verify();
-            verifyKind(opcode, result, x, y);
-        }
-    }
-
-    @SuppressWarnings("unused")
-    protected static void emit(CompilationResultBuilder crb, AMD64MacroAssembler masm, AMD64Arithmetic opcode, AllocatableValue result) {
-        switch (opcode) {
-            case INEG:
-                masm.negl(asIntReg(result));
-                break;
-            case LNEG:
-                masm.negq(asLongReg(result));
-                break;
-            case INOT:
-                masm.notl(asIntReg(result));
-                break;
-            case LNOT:
-                masm.notq(asLongReg(result));
-                break;
-            default:
-                throw GraalInternalError.shouldNotReachHere();
+            assert (opcode.name().startsWith("F") && result.getKind() == Kind.Float && x.getKind() == Kind.Float && y.getKind() == Kind.Float) ||
+                            (opcode.name().startsWith("D") && result.getKind() == Kind.Double && x.getKind() == Kind.Double && y.getKind() == Kind.Double);
         }
     }
-
-    public static void emit(CompilationResultBuilder crb, AMD64MacroAssembler masm, AMD64Arithmetic opcode, Value dst, Value src, LIRFrameState info) {
-        int exceptionOffset = -1;
-        if (isRegister(src)) {
-            switch (opcode) {
-                case IADD:
-                    masm.addl(asIntReg(dst), asIntReg(src));
-                    break;
-                case ISUB:
-                    masm.subl(asIntReg(dst), asIntReg(src));
-                    break;
-                case IAND:
-                    masm.andl(asIntReg(dst), asIntReg(src));
-                    break;
-                case IMUL:
-                    masm.imull(asIntReg(dst), asIntReg(src));
-                    break;
-                case IOR:
-                    masm.orl(asIntReg(dst), asIntReg(src));
-                    break;
-                case IXOR:
-                    masm.xorl(asIntReg(dst), asIntReg(src));
-                    break;
-                case ISHL:
-                    assert asIntReg(src).equals(AMD64.rcx);
-                    masm.shll(asIntReg(dst));
-                    break;
-                case ISHR:
-                    assert asIntReg(src).equals(AMD64.rcx);
-                    masm.sarl(asIntReg(dst));
-                    break;
-                case IUSHR:
-                    assert asIntReg(src).equals(AMD64.rcx);
-                    masm.shrl(asIntReg(dst));
-                    break;
-                case IROL:
-                    assert asIntReg(src).equals(AMD64.rcx);
-                    masm.roll(asIntReg(dst));
-                    break;
-                case IROR:
-                    assert asIntReg(src).equals(AMD64.rcx);
-                    masm.rorl(asIntReg(dst));
-                    break;
-
-                case LADD:
-                    masm.addq(asLongReg(dst), asLongReg(src));
-                    break;
-                case LSUB:
-                    masm.subq(asLongReg(dst), asLongReg(src));
-                    break;
-                case LMUL:
-                    masm.imulq(asLongReg(dst), asLongReg(src));
-                    break;
-                case LAND:
-                    masm.andq(asLongReg(dst), asLongReg(src));
-                    break;
-                case LOR:
-                    masm.orq(asLongReg(dst), asLongReg(src));
-                    break;
-                case LXOR:
-                    masm.xorq(asLongReg(dst), asLongReg(src));
-                    break;
-                case LSHL:
-                    assert asIntReg(src).equals(AMD64.rcx);
-                    masm.shlq(asLongReg(dst));
-                    break;
-                case LSHR:
-                    assert asIntReg(src).equals(AMD64.rcx);
-                    masm.sarq(asLongReg(dst));
-                    break;
-                case LUSHR:
-                    assert asIntReg(src).equals(AMD64.rcx);
-                    masm.shrq(asLongReg(dst));
-                    break;
-                case LROL:
-                    assert asIntReg(src).equals(AMD64.rcx);
-                    masm.rolq(asLongReg(dst));
-                    break;
-                case LROR:
-                    assert asIntReg(src).equals(AMD64.rcx);
-                    masm.rorq(asLongReg(dst));
-                    break;
-
-                case FADD:
-                    masm.addss(asFloatReg(dst), asFloatReg(src));
-                    break;
-                case FSUB:
-                    masm.subss(asFloatReg(dst), asFloatReg(src));
-                    break;
-                case FMUL:
-                    masm.mulss(asFloatReg(dst), asFloatReg(src));
-                    break;
-                case FDIV:
-                    masm.divss(asFloatReg(dst), asFloatReg(src));
-                    break;
-                case FAND:
-                    masm.andps(asFloatReg(dst), asFloatReg(src));
-                    break;
-                case FOR:
-                    masm.orps(asFloatReg(dst), asFloatReg(src));
-                    break;
-                case FXOR:
-                    masm.xorps(asFloatReg(dst), asFloatReg(src));
-                    break;
-
-                case DADD:
-                    masm.addsd(asDoubleReg(dst), asDoubleReg(src));
-                    break;
-                case DSUB:
-                    masm.subsd(asDoubleReg(dst), asDoubleReg(src));
-                    break;
-                case DMUL:
-                    masm.mulsd(asDoubleReg(dst), asDoubleReg(src));
-                    break;
-                case DDIV:
-                    masm.divsd(asDoubleReg(dst), asDoubleReg(src));
-                    break;
-                case DAND:
-                    masm.andpd(asDoubleReg(dst), asDoubleReg(src));
-                    break;
-                case DOR:
-                    masm.orpd(asDoubleReg(dst), asDoubleReg(src));
-                    break;
-                case DXOR:
-                    masm.xorpd(asDoubleReg(dst), asDoubleReg(src));
-                    break;
-
-                case SQRT:
-                    masm.sqrtsd(asDoubleReg(dst), asDoubleReg(src));
-                    break;
-
-                case B2I:
-                    masm.movsbl(asIntReg(dst), asIntReg(src));
-                    break;
-                case S2I:
-                    masm.movswl(asIntReg(dst), asIntReg(src));
-                    break;
-                case B2L:
-                    masm.movsbq(asLongReg(dst), asIntReg(src));
-                    break;
-                case S2L:
-                    masm.movswq(asLongReg(dst), asIntReg(src));
-                    break;
-                case I2L:
-                    masm.movslq(asLongReg(dst), asIntReg(src));
-                    break;
-                case L2I:
-                    masm.movl(asIntReg(dst), asLongReg(src));
-                    break;
-                case F2D:
-                    masm.cvtss2sd(asDoubleReg(dst), asFloatReg(src));
-                    break;
-                case D2F:
-                    masm.cvtsd2ss(asFloatReg(dst), asDoubleReg(src));
-                    break;
-                case I2F:
-                    masm.cvtsi2ssl(asFloatReg(dst), asIntReg(src));
-                    break;
-                case I2D:
-                    masm.cvtsi2sdl(asDoubleReg(dst), asIntReg(src));
-                    break;
-                case L2F:
-                    masm.cvtsi2ssq(asFloatReg(dst), asLongReg(src));
-                    break;
-                case L2D:
-                    masm.cvtsi2sdq(asDoubleReg(dst), asLongReg(src));
-                    break;
-                case F2I:
-                    masm.cvttss2sil(asIntReg(dst), asFloatReg(src));
-                    break;
-                case D2I:
-                    masm.cvttsd2sil(asIntReg(dst), asDoubleReg(src));
-                    break;
-                case F2L:
-                    masm.cvttss2siq(asLongReg(dst), asFloatReg(src));
-                    break;
-                case D2L:
-                    masm.cvttsd2siq(asLongReg(dst), asDoubleReg(src));
-                    break;
-                case MOV_I2F:
-                    masm.movdl(asFloatReg(dst), asIntReg(src));
-                    break;
-                case MOV_L2D:
-                    masm.movdq(asDoubleReg(dst), asLongReg(src));
-                    break;
-                case MOV_F2I:
-                    masm.movdl(asIntReg(dst), asFloatReg(src));
-                    break;
-                case MOV_D2L:
-                    masm.movdq(asLongReg(dst), asDoubleReg(src));
-                    break;
-
-                case IDIVREM:
-                case IDIV:
-                case IREM:
-                    masm.cdql();
-                    exceptionOffset = masm.position();
-                    masm.idivl(asRegister(src));
-                    break;
-
-                case LDIVREM:
-                case LDIV:
-                case LREM:
-                    masm.cdqq();
-                    exceptionOffset = masm.position();
-                    masm.idivq(asRegister(src));
-                    break;
-
-                case IUDIV:
-                case IUREM:
-                    // Must zero the high 64-bit word (in RDX) of the dividend
-                    masm.xorq(AMD64.rdx, AMD64.rdx);
-                    exceptionOffset = masm.position();
-                    masm.divl(asRegister(src));
-                    break;
-
-                case LUDIV:
-                case LUREM:
-                    // Must zero the high 64-bit word (in RDX) of the dividend
-                    masm.xorq(AMD64.rdx, AMD64.rdx);
-                    exceptionOffset = masm.position();
-                    masm.divq(asRegister(src));
-                    break;
-                default:
-                    throw GraalInternalError.shouldNotReachHere();
-            }
-        } else if (isConstant(src)) {
-            switch (opcode) {
-                case IADD:
-                    masm.incrementl(asIntReg(dst), crb.asIntConst(src));
-                    break;
-                case ISUB:
-                    masm.decrementl(asIntReg(dst), crb.asIntConst(src));
-                    break;
-                case IAND:
-                    masm.andl(asIntReg(dst), crb.asIntConst(src));
-                    break;
-                case IOR:
-                    masm.orl(asIntReg(dst), crb.asIntConst(src));
-                    break;
-                case IXOR:
-                    masm.xorl(asIntReg(dst), crb.asIntConst(src));
-                    break;
-                case ISHL:
-                    masm.shll(asIntReg(dst), crb.asIntConst(src) & 31);
-                    break;
-                case ISHR:
-                    masm.sarl(asIntReg(dst), crb.asIntConst(src) & 31);
-                    break;
-                case IUSHR:
-                    masm.shrl(asIntReg(dst), crb.asIntConst(src) & 31);
-                    break;
-                case IROL:
-                    masm.roll(asIntReg(dst), crb.asIntConst(src) & 31);
-                    break;
-                case IROR:
-                    masm.rorl(asIntReg(dst), crb.asIntConst(src) & 31);
-                    break;
-
-                case LADD:
-                    masm.addq(asLongReg(dst), crb.asIntConst(src));
-                    break;
-                case LSUB:
-                    masm.subq(asLongReg(dst), crb.asIntConst(src));
-                    break;
-                case LAND:
-                    masm.andq(asLongReg(dst), crb.asIntConst(src));
-                    break;
-                case LOR:
-                    masm.orq(asLongReg(dst), crb.asIntConst(src));
-                    break;
-                case LXOR:
-                    masm.xorq(asLongReg(dst), crb.asIntConst(src));
-                    break;
-                case LSHL:
-                    masm.shlq(asLongReg(dst), crb.asIntConst(src) & 63);
-                    break;
-                case LSHR:
-                    masm.sarq(asLongReg(dst), crb.asIntConst(src) & 63);
-                    break;
-                case LUSHR:
-                    masm.shrq(asLongReg(dst), crb.asIntConst(src) & 63);
-                    break;
-                case LROL:
-                    masm.rolq(asLongReg(dst), crb.asIntConst(src) & 31);
-                    break;
-                case LROR:
-                    masm.rorq(asLongReg(dst), crb.asIntConst(src) & 31);
-                    break;
-
-                case FADD:
-                    masm.addss(asFloatReg(dst), (AMD64Address) crb.asFloatConstRef(src));
-                    break;
-                case FSUB:
-                    masm.subss(asFloatReg(dst), (AMD64Address) crb.asFloatConstRef(src));
-                    break;
-                case FMUL:
-                    masm.mulss(asFloatReg(dst), (AMD64Address) crb.asFloatConstRef(src));
-                    break;
-                case FAND:
-                    masm.andps(asFloatReg(dst), (AMD64Address) crb.asFloatConstRef(src, 16));
-                    break;
-                case FOR:
-                    masm.orps(asFloatReg(dst), (AMD64Address) crb.asFloatConstRef(src, 16));
-                    break;
-                case FXOR:
-                    masm.xorps(asFloatReg(dst), (AMD64Address) crb.asFloatConstRef(src, 16));
-                    break;
-                case FDIV:
-                    masm.divss(asFloatReg(dst), (AMD64Address) crb.asFloatConstRef(src));
-                    break;
-
-                case DADD:
-                    masm.addsd(asDoubleReg(dst), (AMD64Address) crb.asDoubleConstRef(src));
-                    break;
-                case DSUB:
-                    masm.subsd(asDoubleReg(dst), (AMD64Address) crb.asDoubleConstRef(src));
-                    break;
-                case DMUL:
-                    masm.mulsd(asDoubleReg(dst), (AMD64Address) crb.asDoubleConstRef(src));
-                    break;
-                case DDIV:
-                    masm.divsd(asDoubleReg(dst), (AMD64Address) crb.asDoubleConstRef(src));
-                    break;
-                case DAND:
-                    masm.andpd(asDoubleReg(dst), (AMD64Address) crb.asDoubleConstRef(src, 16));
-                    break;
-                case DOR:
-                    masm.orpd(asDoubleReg(dst), (AMD64Address) crb.asDoubleConstRef(src, 16));
-                    break;
-                case DXOR:
-                    masm.xorpd(asDoubleReg(dst), (AMD64Address) crb.asDoubleConstRef(src, 16));
-                    break;
-                default:
-                    throw GraalInternalError.shouldNotReachHere();
-            }
-        } else if (isStackSlot(src)) {
-
-            switch (opcode) {
-                case IADD:
-                    masm.addl(asIntReg(dst), (AMD64Address) crb.asIntAddr(src));
-                    break;
-                case ISUB:
-                    masm.subl(asIntReg(dst), (AMD64Address) crb.asIntAddr(src));
-                    break;
-                case IAND:
-                    masm.andl(asIntReg(dst), (AMD64Address) crb.asIntAddr(src));
-                    break;
-                case IMUL:
-                    masm.imull(asIntReg(dst), (AMD64Address) crb.asIntAddr(src));
-                    break;
-                case IOR:
-                    masm.orl(asIntReg(dst), (AMD64Address) crb.asIntAddr(src));
-                    break;
-                case IXOR:
-                    masm.xorl(asIntReg(dst), (AMD64Address) crb.asIntAddr(src));
-                    break;
-
-                case LADD:
-                    masm.addq(asLongReg(dst), (AMD64Address) crb.asLongAddr(src));
-                    break;
-                case LSUB:
-                    masm.subq(asLongReg(dst), (AMD64Address) crb.asLongAddr(src));
-                    break;
-                case LMUL:
-                    masm.imulq(asLongReg(dst), (AMD64Address) crb.asLongAddr(src));
-                    break;
-                case LAND:
-                    masm.andq(asLongReg(dst), (AMD64Address) crb.asLongAddr(src));
-                    break;
-                case LOR:
-                    masm.orq(asLongReg(dst), (AMD64Address) crb.asLongAddr(src));
-                    break;
-                case LXOR:
-                    masm.xorq(asLongReg(dst), (AMD64Address) crb.asLongAddr(src));
-                    break;
-
-                case FADD:
-                    masm.addss(asFloatReg(dst), (AMD64Address) crb.asFloatAddr(src));
-                    break;
-                case FSUB:
-                    masm.subss(asFloatReg(dst), (AMD64Address) crb.asFloatAddr(src));
-                    break;
-                case FMUL:
-                    masm.mulss(asFloatReg(dst), (AMD64Address) crb.asFloatAddr(src));
-                    break;
-                case FDIV:
-                    masm.divss(asFloatReg(dst), (AMD64Address) crb.asFloatAddr(src));
-                    break;
-
-                case DADD:
-                    masm.addsd(asDoubleReg(dst), (AMD64Address) crb.asDoubleAddr(src));
-                    break;
-                case DSUB:
-                    masm.subsd(asDoubleReg(dst), (AMD64Address) crb.asDoubleAddr(src));
-                    break;
-                case DMUL:
-                    masm.mulsd(asDoubleReg(dst), (AMD64Address) crb.asDoubleAddr(src));
-                    break;
-                case DDIV:
-                    masm.divsd(asDoubleReg(dst), (AMD64Address) crb.asDoubleAddr(src));
-                    break;
-
-                case SQRT:
-                    masm.sqrtsd(asDoubleReg(dst), (AMD64Address) crb.asDoubleAddr(src));
-                    break;
-
-                case B2I:
-                    masm.movsbl(asIntReg(dst), (AMD64Address) crb.asByteAddr(src));
-                    break;
-                case S2I:
-                    masm.movswl(asIntReg(dst), (AMD64Address) crb.asShortAddr(src));
-                    break;
-                case B2L:
-                    masm.movsbq(asLongReg(dst), (AMD64Address) crb.asByteAddr(src));
-                    break;
-                case S2L:
-                    masm.movswq(asLongReg(dst), (AMD64Address) crb.asShortAddr(src));
-                    break;
-                case I2L:
-                    masm.movslq(asLongReg(dst), (AMD64Address) crb.asIntAddr(src));
-                    break;
-                case F2D:
-                    masm.cvtss2sd(asDoubleReg(dst), (AMD64Address) crb.asFloatAddr(src));
-                    break;
-                case D2F:
-                    masm.cvtsd2ss(asFloatReg(dst), (AMD64Address) crb.asDoubleAddr(src));
-                    break;
-                case I2F:
-                    masm.cvtsi2ssl(asFloatReg(dst), (AMD64Address) crb.asIntAddr(src));
-                    break;
-                case I2D:
-                    masm.cvtsi2sdl(asDoubleReg(dst), (AMD64Address) crb.asIntAddr(src));
-                    break;
-                case L2F:
-                    masm.cvtsi2ssq(asFloatReg(dst), (AMD64Address) crb.asLongAddr(src));
-                    break;
-                case L2D:
-                    masm.cvtsi2sdq(asDoubleReg(dst), (AMD64Address) crb.asLongAddr(src));
-                    break;
-                case F2I:
-                    masm.cvttss2sil(asIntReg(dst), (AMD64Address) crb.asFloatAddr(src));
-                    break;
-                case D2I:
-                    masm.cvttsd2sil(asIntReg(dst), (AMD64Address) crb.asDoubleAddr(src));
-                    break;
-                case F2L:
-                    masm.cvttss2siq(asLongReg(dst), (AMD64Address) crb.asFloatAddr(src));
-                    break;
-                case D2L:
-                    masm.cvttsd2siq(asLongReg(dst), (AMD64Address) crb.asDoubleAddr(src));
-                    break;
-                case MOV_I2F:
-                    masm.movss(asFloatReg(dst), (AMD64Address) crb.asIntAddr(src));
-                    break;
-                case MOV_L2D:
-                    masm.movsd(asDoubleReg(dst), (AMD64Address) crb.asLongAddr(src));
-                    break;
-                case MOV_F2I:
-                    masm.movl(asIntReg(dst), (AMD64Address) crb.asFloatAddr(src));
-                    break;
-                case MOV_D2L:
-                    masm.movq(asLongReg(dst), (AMD64Address) crb.asDoubleAddr(src));
-                    break;
-
-                default:
-                    throw GraalInternalError.shouldNotReachHere();
-            }
-        } else {
-            switch (opcode) {
-                case IADD:
-                    masm.addl(asIntReg(dst), ((AMD64AddressValue) src).toAddress());
-                    break;
-                case ISUB:
-                    masm.subl(asIntReg(dst), ((AMD64AddressValue) src).toAddress());
-                    break;
-                case IAND:
-                    masm.andl(asIntReg(dst), ((AMD64AddressValue) src).toAddress());
-                    break;
-                case IMUL:
-                    masm.imull(asIntReg(dst), ((AMD64AddressValue) src).toAddress());
-                    break;
-                case IOR:
-                    masm.orl(asIntReg(dst), ((AMD64AddressValue) src).toAddress());
-                    break;
-                case IXOR:
-                    masm.xorl(asIntReg(dst), ((AMD64AddressValue) src).toAddress());
-                    break;
-
-                case LADD:
-                    masm.addq(asLongReg(dst), ((AMD64AddressValue) src).toAddress());
-                    break;
-                case LSUB:
-                    masm.subq(asLongReg(dst), ((AMD64AddressValue) src).toAddress());
-                    break;
-                case LMUL:
-                    masm.imulq(asLongReg(dst), ((AMD64AddressValue) src).toAddress());
-                    break;
-                case LAND:
-                    masm.andq(asLongReg(dst), ((AMD64AddressValue) src).toAddress());
-                    break;
-                case LOR:
-                    masm.orq(asLongReg(dst), ((AMD64AddressValue) src).toAddress());
-                    break;
-                case LXOR:
-                    masm.xorq(asLongReg(dst), ((AMD64AddressValue) src).toAddress());
-                    break;
-
-                case FADD:
-                    masm.addss(asFloatReg(dst), ((AMD64AddressValue) src).toAddress());
-                    break;
-                case FSUB:
-                    masm.subss(asFloatReg(dst), ((AMD64AddressValue) src).toAddress());
-                    break;
-                case FMUL:
-                    masm.mulss(asFloatReg(dst), ((AMD64AddressValue) src).toAddress());
-                    break;
-                case FDIV:
-                    masm.divss(asFloatReg(dst), ((AMD64AddressValue) src).toAddress());
-                    break;
-
-                case DADD:
-                    masm.addsd(asDoubleReg(dst), ((AMD64AddressValue) src).toAddress());
-                    break;
-                case DSUB:
-                    masm.subsd(asDoubleReg(dst), ((AMD64AddressValue) src).toAddress());
-                    break;
-                case DMUL:
-                    masm.mulsd(asDoubleReg(dst), ((AMD64AddressValue) src).toAddress());
-                    break;
-                case DDIV:
-                    masm.divsd(asDoubleReg(dst), ((AMD64AddressValue) src).toAddress());
-                    break;
-
-                case SQRT:
-                    masm.sqrtsd(asDoubleReg(dst), ((AMD64AddressValue) src).toAddress());
-                    break;
-
-                case B2I:
-                    masm.movsbl(asIntReg(dst), ((AMD64AddressValue) src).toAddress());
-                    break;
-                case S2I:
-                    masm.movswl(asIntReg(dst), ((AMD64AddressValue) src).toAddress());
-                    break;
-                case B2L:
-                    masm.movsbq(asLongReg(dst), ((AMD64AddressValue) src).toAddress());
-                    break;
-                case S2L:
-                    masm.movswq(asLongReg(dst), ((AMD64AddressValue) src).toAddress());
-                    break;
-                case I2L:
-                    masm.movslq(asLongReg(dst), ((AMD64AddressValue) src).toAddress());
-                    break;
-                case F2D:
-                    masm.cvtss2sd(asDoubleReg(dst), ((AMD64AddressValue) src).toAddress());
-                    break;
-                case D2F:
-                    masm.cvtsd2ss(asFloatReg(dst), ((AMD64AddressValue) src).toAddress());
-                    break;
-                case I2F:
-                    masm.cvtsi2ssl(asFloatReg(dst), ((AMD64AddressValue) src).toAddress());
-                    break;
-                case I2D:
-                    masm.cvtsi2sdl(asDoubleReg(dst), ((AMD64AddressValue) src).toAddress());
-                    break;
-                case L2F:
-                    masm.cvtsi2ssq(asFloatReg(dst), ((AMD64AddressValue) src).toAddress());
-                    break;
-                case L2D:
-                    masm.cvtsi2sdq(asDoubleReg(dst), ((AMD64AddressValue) src).toAddress());
-                    break;
-                case F2I:
-                    masm.cvttss2sil(asIntReg(dst), ((AMD64AddressValue) src).toAddress());
-                    break;
-                case D2I:
-                    masm.cvttsd2sil(asIntReg(dst), ((AMD64AddressValue) src).toAddress());
-                    break;
-                case F2L:
-                    masm.cvttss2siq(asLongReg(dst), ((AMD64AddressValue) src).toAddress());
-                    break;
-                case D2L:
-                    masm.cvttsd2siq(asLongReg(dst), ((AMD64AddressValue) src).toAddress());
-                    break;
-                case MOV_I2F:
-                    masm.movss(asFloatReg(dst), ((AMD64AddressValue) src).toAddress());
-                    break;
-                case MOV_L2D:
-                    masm.movsd(asDoubleReg(dst), ((AMD64AddressValue) src).toAddress());
-                    break;
-                case MOV_F2I:
-                    masm.movl(asIntReg(dst), ((AMD64AddressValue) src).toAddress());
-                    break;
-                case MOV_D2L:
-                    masm.movq(asLongReg(dst), ((AMD64AddressValue) src).toAddress());
-                    break;
-                case MOV_B2UI:
-                    masm.movzbl(asIntReg(dst), ((AMD64AddressValue) src).toAddress());
-                    break;
-                case MOV_B2UL:
-                    masm.movzbl(asLongReg(dst), ((AMD64AddressValue) src).toAddress());
-                    break;
-
-                default:
-                    throw GraalInternalError.shouldNotReachHere();
-            }
-        }
-
-        if (info != null) {
-            assert exceptionOffset != -1;
-            crb.recordImplicitException(exceptionOffset, info);
-        }
-    }
-
-    private static void verifyKind(AMD64Arithmetic opcode, Value result, Value x, Value y) {
-        assert (opcode.name().startsWith("I") && result.getKind().getStackKind() == Kind.Int && x.getKind().getStackKind() == Kind.Int && y.getKind().getStackKind() == Kind.Int) ||
-                        (opcode.name().startsWith("L") && result.getKind() == Kind.Long && x.getKind() == Kind.Long && y.getKind() == Kind.Long) ||
-                        (opcode.name().startsWith("F") && result.getKind() == Kind.Float && x.getKind() == Kind.Float && y.getKind() == Kind.Float) ||
-                        (opcode.name().startsWith("D") && result.getKind() == Kind.Double && x.getKind() == Kind.Double && y.getKind() == Kind.Double) ||
-                        (opcode.name().matches(".U?SH.") && result.getKind() == x.getKind() && y.getKind() == Kind.Int && (isConstant(y) || asRegister(y).equals(AMD64.rcx)));
-    }
 }
--- /dev/null	Thu Jan 01 00:00:00 1970 +0000
+++ b/graal/com.oracle.graal.lir.amd64/src/com/oracle/graal/lir/amd64/AMD64BinaryCommutativeOp.java	Mon Mar 16 16:54:10 2015 -0700
@@ -0,0 +1,72 @@
+/*
+ * Copyright (c) 2015, 2015, Oracle and/or its affiliates. All rights reserved.
+ * DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER.
+ *
+ * This code is free software; you can redistribute it and/or modify it
+ * under the terms of the GNU General Public License version 2 only, as
+ * published by the Free Software Foundation.
+ *
+ * This code is distributed in the hope that it will be useful, but WITHOUT
+ * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
+ * FITNESS FOR A PARTICULAR PURPOSE.  See the GNU General Public License
+ * version 2 for more details (a copy is included in the LICENSE file that
+ * accompanied this code).
+ *
+ * You should have received a copy of the GNU General Public License version
+ * 2 along with this work; if not, write to the Free Software Foundation,
+ * Inc., 51 Franklin St, Fifth Floor, Boston, MA 02110-1301 USA.
+ *
+ * Please contact Oracle, 500 Oracle Parkway, Redwood Shores, CA 94065 USA
+ * or visit www.oracle.com if you need additional information or have any
+ * questions.
+ */
+package com.oracle.graal.lir.amd64;
+
+import static com.oracle.graal.api.code.ValueUtil.*;
+import static com.oracle.graal.lir.LIRInstruction.OperandFlag.*;
+
+import com.oracle.graal.api.meta.*;
+import com.oracle.graal.asm.amd64.AMD64Assembler.AMD64RMOp;
+import com.oracle.graal.asm.amd64.AMD64Assembler.OperandSize;
+import com.oracle.graal.asm.amd64.*;
+import com.oracle.graal.lir.*;
+import com.oracle.graal.lir.asm.*;
+
+public class AMD64BinaryCommutativeOp extends AMD64LIRInstruction {
+    public static final LIRInstructionClass<AMD64BinaryCommutativeOp> TYPE = LIRInstructionClass.create(AMD64BinaryCommutativeOp.class);
+
+    @Opcode private final AMD64RMOp opcode;
+    private final OperandSize size;
+
+    @Def({REG, HINT}) protected AllocatableValue result;
+    @Use({REG, STACK}) protected AllocatableValue x;
+    @Use({REG, STACK}) protected AllocatableValue y;
+
+    public AMD64BinaryCommutativeOp(AMD64RMOp opcode, OperandSize size, AllocatableValue result, AllocatableValue x, AllocatableValue y) {
+        super(TYPE);
+        this.opcode = opcode;
+        this.size = size;
+
+        this.result = result;
+        this.x = x;
+        this.y = y;
+    }
+
+    @Override
+    public void emitCode(CompilationResultBuilder crb, AMD64MacroAssembler masm) {
+        AllocatableValue input;
+        if (sameRegister(result, y)) {
+            input = x;
+        } else {
+            AMD64Move.move(crb, masm, result, x);
+            input = y;
+        }
+
+        if (isRegister(input)) {
+            opcode.emit(masm, size, asRegister(result), asRegister(input));
+        } else {
+            assert isStackSlot(input);
+            opcode.emit(masm, size, asRegister(result), (AMD64Address) crb.asAddress(input));
+        }
+    }
+}
--- /dev/null	Thu Jan 01 00:00:00 1970 +0000
+++ b/graal/com.oracle.graal.lir.amd64/src/com/oracle/graal/lir/amd64/AMD64BinaryConstOp.java	Mon Mar 16 16:54:10 2015 -0700
@@ -0,0 +1,67 @@
+/*
+ * Copyright (c) 2015, 2015, Oracle and/or its affiliates. All rights reserved.
+ * DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER.
+ *
+ * This code is free software; you can redistribute it and/or modify it
+ * under the terms of the GNU General Public License version 2 only, as
+ * published by the Free Software Foundation.
+ *
+ * This code is distributed in the hope that it will be useful, but WITHOUT
+ * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
+ * FITNESS FOR A PARTICULAR PURPOSE.  See the GNU General Public License
+ * version 2 for more details (a copy is included in the LICENSE file that
+ * accompanied this code).
+ *
+ * You should have received a copy of the GNU General Public License version
+ * 2 along with this work; if not, write to the Free Software Foundation,
+ * Inc., 51 Franklin St, Fifth Floor, Boston, MA 02110-1301 USA.
+ *
+ * Please contact Oracle, 500 Oracle Parkway, Redwood Shores, CA 94065 USA
+ * or visit www.oracle.com if you need additional information or have any
+ * questions.
+ */
+package com.oracle.graal.lir.amd64;
+
+import static com.oracle.graal.api.code.ValueUtil.*;
+import static com.oracle.graal.lir.LIRInstruction.OperandFlag.*;
+
+import com.oracle.graal.api.meta.*;
+import com.oracle.graal.asm.*;
+import com.oracle.graal.asm.amd64.AMD64Assembler.AMD64BinaryArithmetic;
+import com.oracle.graal.asm.amd64.AMD64Assembler.AMD64MIOp;
+import com.oracle.graal.asm.amd64.AMD64Assembler.OperandSize;
+import com.oracle.graal.asm.amd64.*;
+import com.oracle.graal.lir.*;
+import com.oracle.graal.lir.asm.*;
+
+public class AMD64BinaryConstOp extends AMD64LIRInstruction {
+    public static final LIRInstructionClass<AMD64BinaryConstOp> TYPE = LIRInstructionClass.create(AMD64BinaryConstOp.class);
+
+    @Opcode private final AMD64MIOp opcode;
+    private final OperandSize size;
+
+    @Def({REG, HINT}) protected AllocatableValue result;
+    @Use({REG, STACK}) protected AllocatableValue x;
+    protected JavaConstant y;
+
+    public AMD64BinaryConstOp(AMD64BinaryArithmetic opcode, OperandSize size, AllocatableValue result, AllocatableValue x, JavaConstant y) {
+        this(opcode.getMIOpcode(size, NumUtil.isByte(y.asLong())), size, result, x, y);
+    }
+
+    public AMD64BinaryConstOp(AMD64MIOp opcode, OperandSize size, AllocatableValue result, AllocatableValue x, JavaConstant y) {
+        super(TYPE);
+        this.opcode = opcode;
+        this.size = size;
+
+        this.result = result;
+        this.x = x;
+        this.y = y;
+    }
+
+    @Override
+    public void emitCode(CompilationResultBuilder crb, AMD64MacroAssembler masm) {
+        AMD64Move.move(crb, masm, result, x);
+        assert NumUtil.is32bit(y.asLong());
+        opcode.emit(masm, size, asRegister(result), (int) y.asLong());
+    }
+}
--- /dev/null	Thu Jan 01 00:00:00 1970 +0000
+++ b/graal/com.oracle.graal.lir.amd64/src/com/oracle/graal/lir/amd64/AMD64BinaryMemoryOp.java	Mon Mar 16 16:54:10 2015 -0700
@@ -0,0 +1,83 @@
+/*
+ * Copyright (c) 2015, 2015, Oracle and/or its affiliates. All rights reserved.
+ * DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER.
+ *
+ * This code is free software; you can redistribute it and/or modify it
+ * under the terms of the GNU General Public License version 2 only, as
+ * published by the Free Software Foundation.
+ *
+ * This code is distributed in the hope that it will be useful, but WITHOUT
+ * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
+ * FITNESS FOR A PARTICULAR PURPOSE.  See the GNU General Public License
+ * version 2 for more details (a copy is included in the LICENSE file that
+ * accompanied this code).
+ *
+ * You should have received a copy of the GNU General Public License version
+ * 2 along with this work; if not, write to the Free Software Foundation,
+ * Inc., 51 Franklin St, Fifth Floor, Boston, MA 02110-1301 USA.
+ *
+ * Please contact Oracle, 500 Oracle Parkway, Redwood Shores, CA 94065 USA
+ * or visit www.oracle.com if you need additional information or have any
+ * questions.
+ */
+package com.oracle.graal.lir.amd64;
+
+import static com.oracle.graal.api.code.ValueUtil.*;
+import static com.oracle.graal.lir.LIRInstruction.OperandFlag.*;
+
+import com.oracle.graal.api.meta.*;
+import com.oracle.graal.asm.amd64.AMD64Assembler.AMD64RMOp;
+import com.oracle.graal.asm.amd64.AMD64Assembler.OperandSize;
+import com.oracle.graal.asm.amd64.*;
+import com.oracle.graal.lir.*;
+import com.oracle.graal.lir.StandardOp.ImplicitNullCheck;
+import com.oracle.graal.lir.asm.*;
+
+public class AMD64BinaryMemoryOp extends AMD64LIRInstruction implements ImplicitNullCheck {
+    public static final LIRInstructionClass<AMD64BinaryMemoryOp> TYPE = LIRInstructionClass.create(AMD64BinaryMemoryOp.class);
+
+    @Opcode private final AMD64RMOp opcode;
+    private final OperandSize size;
+
+    @Def({REG, HINT}) protected AllocatableValue result;
+    @Use({REG, STACK}) protected AllocatableValue x;
+    @Alive({COMPOSITE}) protected AMD64AddressValue y;
+
+    @State protected LIRFrameState state;
+
+    public AMD64BinaryMemoryOp(AMD64RMOp opcode, OperandSize size, AllocatableValue result, AllocatableValue x, AMD64AddressValue y, LIRFrameState state) {
+        super(TYPE);
+        this.opcode = opcode;
+        this.size = size;
+
+        this.result = result;
+        this.x = x;
+        this.y = y;
+
+        this.state = state;
+    }
+
+    @Override
+    public void emitCode(CompilationResultBuilder crb, AMD64MacroAssembler masm) {
+        AMD64Move.move(crb, masm, result, x);
+        if (state != null) {
+            crb.recordImplicitException(masm.position(), state);
+        }
+        opcode.emit(masm, size, asRegister(result), y.toAddress());
+    }
+
+    @Override
+    public void verify() {
+        super.verify();
+        assert differentRegisters(result, y) || sameRegister(x, y);
+    }
+
+    @Override
+    public boolean makeNullCheckFor(Value value, LIRFrameState nullCheckState, int implicitNullCheckLimit) {
+        if (state == null && y.isValidImplicitNullCheckFor(value, implicitNullCheckLimit)) {
+            state = nullCheckState;
+            return true;
+        }
+        return false;
+    }
+}
--- /dev/null	Thu Jan 01 00:00:00 1970 +0000
+++ b/graal/com.oracle.graal.lir.amd64/src/com/oracle/graal/lir/amd64/AMD64BinaryOp.java	Mon Mar 16 16:54:10 2015 -0700
@@ -0,0 +1,65 @@
+/*
+ * Copyright (c) 2015, 2015, Oracle and/or its affiliates. All rights reserved.
+ * DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER.
+ *
+ * This code is free software; you can redistribute it and/or modify it
+ * under the terms of the GNU General Public License version 2 only, as
+ * published by the Free Software Foundation.
+ *
+ * This code is distributed in the hope that it will be useful, but WITHOUT
+ * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
+ * FITNESS FOR A PARTICULAR PURPOSE.  See the GNU General Public License
+ * version 2 for more details (a copy is included in the LICENSE file that
+ * accompanied this code).
+ *
+ * You should have received a copy of the GNU General Public License version
+ * 2 along with this work; if not, write to the Free Software Foundation,
+ * Inc., 51 Franklin St, Fifth Floor, Boston, MA 02110-1301 USA.
+ *
+ * Please contact Oracle, 500 Oracle Parkway, Redwood Shores, CA 94065 USA
+ * or visit www.oracle.com if you need additional information or have any
+ * questions.
+ */
+package com.oracle.graal.lir.amd64;
+
+import static com.oracle.graal.api.code.ValueUtil.*;
+import static com.oracle.graal.lir.LIRInstruction.OperandFlag.*;
+
+import com.oracle.graal.api.meta.*;
+import com.oracle.graal.asm.amd64.AMD64Assembler.AMD64RMOp;
+import com.oracle.graal.asm.amd64.AMD64Assembler.OperandSize;
+import com.oracle.graal.asm.amd64.*;
+import com.oracle.graal.lir.*;
+import com.oracle.graal.lir.asm.*;
+
+public class AMD64BinaryOp extends AMD64LIRInstruction {
+    public static final LIRInstructionClass<AMD64BinaryOp> TYPE = LIRInstructionClass.create(AMD64BinaryOp.class);
+
+    @Opcode private final AMD64RMOp opcode;
+    private final OperandSize size;
+
+    @Def({REG, HINT}) protected AllocatableValue result;
+    @Use({REG, STACK}) protected AllocatableValue x;
+    @Alive({REG, STACK}) protected AllocatableValue y;
+
+    public AMD64BinaryOp(AMD64RMOp opcode, OperandSize size, AllocatableValue result, AllocatableValue x, AllocatableValue y) {
+        super(TYPE);
+        this.opcode = opcode;
+        this.size = size;
+
+        this.result = result;
+        this.x = x;
+        this.y = y;
+    }
+
+    @Override
+    public void emitCode(CompilationResultBuilder crb, AMD64MacroAssembler masm) {
+        AMD64Move.move(crb, masm, result, x);
+        if (isRegister(y)) {
+            opcode.emit(masm, size, asRegister(result), asRegister(y));
+        } else {
+            assert isStackSlot(y);
+            opcode.emit(masm, size, asRegister(result), (AMD64Address) crb.asAddress(y));
+        }
+    }
+}
--- /dev/null	Thu Jan 01 00:00:00 1970 +0000
+++ b/graal/com.oracle.graal.lir.amd64/src/com/oracle/graal/lir/amd64/AMD64BinaryPatchOp.java	Mon Mar 16 16:54:10 2015 -0700
@@ -0,0 +1,68 @@
+/*
+ * Copyright (c) 2015, 2015, Oracle and/or its affiliates. All rights reserved.
+ * DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER.
+ *
+ * This code is free software; you can redistribute it and/or modify it
+ * under the terms of the GNU General Public License version 2 only, as
+ * published by the Free Software Foundation.
+ *
+ * This code is distributed in the hope that it will be useful, but WITHOUT
+ * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
+ * FITNESS FOR A PARTICULAR PURPOSE.  See the GNU General Public License
+ * version 2 for more details (a copy is included in the LICENSE file that
+ * accompanied this code).
+ *
+ * You should have received a copy of the GNU General Public License version
+ * 2 along with this work; if not, write to the Free Software Foundation,
+ * Inc., 51 Franklin St, Fifth Floor, Boston, MA 02110-1301 USA.
+ *
+ * Please contact Oracle, 500 Oracle Parkway, Redwood Shores, CA 94065 USA
+ * or visit www.oracle.com if you need additional information or have any
+ * questions.
+ */
+package com.oracle.graal.lir.amd64;
+
+import static com.oracle.graal.api.code.ValueUtil.*;
+import static com.oracle.graal.lir.LIRInstruction.OperandFlag.*;
+
+import com.oracle.graal.api.meta.*;
+import com.oracle.graal.asm.amd64.*;
+import com.oracle.graal.asm.amd64.AMD64Assembler.AMD64RMOp;
+import com.oracle.graal.asm.amd64.AMD64Assembler.OperandSize;
+import com.oracle.graal.lir.*;
+import com.oracle.graal.lir.asm.*;
+
+public class AMD64BinaryPatchOp extends AMD64LIRInstruction {
+    public static final LIRInstructionClass<AMD64BinaryPatchOp> TYPE = LIRInstructionClass.create(AMD64BinaryPatchOp.class);
+
+    @Opcode private final AMD64RMOp opcode;
+    private final OperandSize size;
+
+    @Def({REG, HINT}) protected AllocatableValue result;
+    @Use({REG, STACK}) protected AllocatableValue x;
+    protected JavaConstant y;
+
+    private final int alignment;
+
+    public AMD64BinaryPatchOp(AMD64RMOp opcode, OperandSize size, AllocatableValue result, AllocatableValue x, JavaConstant y) {
+        this(opcode, size, result, x, y, y.getKind().getByteCount());
+    }
+
+    public AMD64BinaryPatchOp(AMD64RMOp opcode, OperandSize size, AllocatableValue result, AllocatableValue x, JavaConstant y, int alignment) {
+        super(TYPE);
+        this.opcode = opcode;
+        this.size = size;
+
+        this.result = result;
+        this.x = x;
+        this.y = y;
+
+        this.alignment = alignment;
+    }
+
+    @Override
+    public void emitCode(CompilationResultBuilder crb, AMD64MacroAssembler masm) {
+        AMD64Move.move(crb, masm, result, x);
+        opcode.emit(masm, size, asRegister(result), (AMD64Address) crb.recordDataReferenceInCode(y, alignment));
+    }
+}
--- a/graal/com.oracle.graal.lir.amd64/src/com/oracle/graal/lir/amd64/AMD64BitManipulationOp.java	Mon Mar 16 15:59:57 2015 -0700
+++ /dev/null	Thu Jan 01 00:00:00 1970 +0000
@@ -1,125 +0,0 @@
-/*
- * Copyright (c) 2012, 2012, Oracle and/or its affiliates. All rights reserved.
- * DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER.
- *
- * This code is free software; you can redistribute it and/or modify it
- * under the terms of the GNU General Public License version 2 only, as
- * published by the Free Software Foundation.
- *
- * This code is distributed in the hope that it will be useful, but WITHOUT
- * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
- * FITNESS FOR A PARTICULAR PURPOSE.  See the GNU General Public License
- * version 2 for more details (a copy is included in the LICENSE file that
- * accompanied this code).
- *
- * You should have received a copy of the GNU General Public License version
- * 2 along with this work; if not, write to the Free Software Foundation,
- * Inc., 51 Franklin St, Fifth Floor, Boston, MA 02110-1301 USA.
- *
- * Please contact Oracle, 500 Oracle Parkway, Redwood Shores, CA 94065 USA
- * or visit www.oracle.com if you need additional information or have any
- * questions.
- */
-package com.oracle.graal.lir.amd64;
-
-import com.oracle.graal.api.code.*;
-import com.oracle.graal.api.meta.*;
-import com.oracle.graal.asm.amd64.*;
-import com.oracle.graal.lir.*;
-import com.oracle.graal.lir.asm.*;
-
-public final class AMD64BitManipulationOp extends AMD64LIRInstruction {
-    public static final LIRInstructionClass<AMD64BitManipulationOp> TYPE = LIRInstructionClass.create(AMD64BitManipulationOp.class);
-
-    public enum IntrinsicOpcode {
-        IPOPCNT,
-        LPOPCNT,
-        IBSR,
-        LBSR,
-        BSF,
-        ILZCNT,
-        LLZCNT,
-        ITZCNT,
-        LTZCNT
-    }
-
-    @Opcode private final IntrinsicOpcode opcode;
-    @Def protected AllocatableValue result;
-    @Use({OperandFlag.REG, OperandFlag.STACK}) protected AllocatableValue input;
-
-    public AMD64BitManipulationOp(IntrinsicOpcode opcode, AllocatableValue result, AllocatableValue input) {
-        super(TYPE);
-        this.opcode = opcode;
-        this.result = result;
-        this.input = input;
-    }
-
-    @Override
-    public void emitCode(CompilationResultBuilder crb, AMD64MacroAssembler masm) {
-        Register dst = ValueUtil.asIntReg(result);
-        if (ValueUtil.isRegister(input)) {
-            Register src = ValueUtil.asRegister(input);
-            switch (opcode) {
-                case IPOPCNT:
-                    masm.popcntl(dst, src);
-                    break;
-                case LPOPCNT:
-                    masm.popcntq(dst, src);
-                    break;
-                case BSF:
-                    masm.bsfq(dst, src);
-                    break;
-                case IBSR:
-                    masm.bsrl(dst, src);
-                    break;
-                case LBSR:
-                    masm.bsrq(dst, src);
-                    break;
-                case ILZCNT:
-                    masm.lzcntl(dst, src);
-                    break;
-                case LLZCNT:
-                    masm.lzcntq(dst, src);
-                    break;
-                case ITZCNT:
-                    masm.tzcntl(dst, src);
-                    break;
-                case LTZCNT:
-                    masm.tzcntq(dst, src);
-                    break;
-            }
-        } else {
-            AMD64Address src = (AMD64Address) crb.asAddress(input);
-            switch (opcode) {
-                case IPOPCNT:
-                    masm.popcntl(dst, src);
-                    break;
-                case LPOPCNT:
-                    masm.popcntq(dst, src);
-                    break;
-                case BSF:
-                    masm.bsfq(dst, src);
-                    break;
-                case IBSR:
-                    masm.bsrl(dst, src);
-                    break;
-                case LBSR:
-                    masm.bsrq(dst, src);
-                    break;
-                case ILZCNT:
-                    masm.lzcntl(dst, src);
-                    break;
-                case LLZCNT:
-                    masm.lzcntq(dst, src);
-                    break;
-                case ITZCNT:
-                    masm.tzcntl(dst, src);
-                    break;
-                case LTZCNT:
-                    masm.tzcntq(dst, src);
-                    break;
-            }
-        }
-    }
-
-}
--- /dev/null	Thu Jan 01 00:00:00 1970 +0000
+++ b/graal/com.oracle.graal.lir.amd64/src/com/oracle/graal/lir/amd64/AMD64ClearRegisterOp.java	Mon Mar 16 16:54:10 2015 -0700
@@ -0,0 +1,55 @@
+/*
+ * Copyright (c) 2015, 2015, Oracle and/or its affiliates. All rights reserved.
+ * DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER.
+ *
+ * This code is free software; you can redistribute it and/or modify it
+ * under the terms of the GNU General Public License version 2 only, as
+ * published by the Free Software Foundation.
+ *
+ * This code is distributed in the hope that it will be useful, but WITHOUT
+ * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
+ * FITNESS FOR A PARTICULAR PURPOSE.  See the GNU General Public License
+ * version 2 for more details (a copy is included in the LICENSE file that
+ * accompanied this code).
+ *
+ * You should have received a copy of the GNU General Public License version
+ * 2 along with this work; if not, write to the Free Software Foundation,
+ * Inc., 51 Franklin St, Fifth Floor, Boston, MA 02110-1301 USA.
+ *
+ * Please contact Oracle, 500 Oracle Parkway, Redwood Shores, CA 94065 USA
+ * or visit www.oracle.com if you need additional information or have any
+ * questions.
+ */
+package com.oracle.graal.lir.amd64;
+
+import static com.oracle.graal.api.code.ValueUtil.*;
+import static com.oracle.graal.asm.amd64.AMD64Assembler.AMD64BinaryArithmetic.*;
+import static com.oracle.graal.lir.LIRInstruction.OperandFlag.*;
+
+import com.oracle.graal.api.meta.*;
+import com.oracle.graal.asm.amd64.AMD64Assembler.AMD64RMOp;
+import com.oracle.graal.asm.amd64.AMD64Assembler.OperandSize;
+import com.oracle.graal.asm.amd64.*;
+import com.oracle.graal.lir.*;
+import com.oracle.graal.lir.asm.*;
+
+public class AMD64ClearRegisterOp extends AMD64LIRInstruction {
+    public static final LIRInstructionClass<AMD64ClearRegisterOp> TYPE = LIRInstructionClass.create(AMD64ClearRegisterOp.class);
+
+    @Opcode private final AMD64RMOp op;
+    private final OperandSize size;
+
+    @Def({REG}) protected AllocatableValue result;
+
+    public AMD64ClearRegisterOp(OperandSize size, AllocatableValue result) {
+        super(TYPE);
+        this.op = XOR.getRMOpcode(size);
+        this.size = size;
+        this.result = result;
+    }
+
+    @Override
+    public void emitCode(CompilationResultBuilder crb, AMD64MacroAssembler masm) {
+        op.emit(masm, size, asRegister(result), asRegister(result));
+    }
+}
--- a/graal/com.oracle.graal.lir.amd64/src/com/oracle/graal/lir/amd64/AMD64Compare.java	Mon Mar 16 15:59:57 2015 -0700
+++ /dev/null	Thu Jan 01 00:00:00 1970 +0000
@@ -1,263 +0,0 @@
-/*
- * Copyright (c) 2011, 2014, Oracle and/or its affiliates. All rights reserved.
- * DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER.
- *
- * This code is free software; you can redistribute it and/or modify it
- * under the terms of the GNU General Public License version 2 only, as
- * published by the Free Software Foundation.
- *
- * This code is distributed in the hope that it will be useful, but WITHOUT
- * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
- * FITNESS FOR A PARTICULAR PURPOSE.  See the GNU General Public License
- * version 2 for more details (a copy is included in the LICENSE file that
- * accompanied this code).
- *
- * You should have received a copy of the GNU General Public License version
- * 2 along with this work; if not, write to the Free Software Foundation,
- * Inc., 51 Franklin St, Fifth Floor, Boston, MA 02110-1301 USA.
- *
- * Please contact Oracle, 500 Oracle Parkway, Redwood Shores, CA 94065 USA
- * or visit www.oracle.com if you need additional information or have any
- * questions.
- */
-package com.oracle.graal.lir.amd64;
-
-import static com.oracle.graal.api.code.ValueUtil.*;
-import static com.oracle.graal.lir.LIRInstruction.OperandFlag.*;
-
-import com.oracle.graal.api.meta.*;
-import com.oracle.graal.asm.*;
-import com.oracle.graal.asm.amd64.*;
-import com.oracle.graal.compiler.common.*;
-import com.oracle.graal.lir.*;
-import com.oracle.graal.lir.amd64.AMD64Move.MemOp;
-import com.oracle.graal.lir.asm.*;
-
-public enum AMD64Compare {
-    BCMP,
-    SCMP,
-    ICMP,
-    LCMP,
-    ACMP,
-    FCMP,
-    DCMP;
-
-    public static final class CompareOp extends AMD64LIRInstruction {
-        public static final LIRInstructionClass<CompareOp> TYPE = LIRInstructionClass.create(CompareOp.class);
-        @Opcode private final AMD64Compare opcode;
-        @Use({REG}) protected Value x;
-        @Use({REG, STACK, CONST}) protected Value y;
-
-        public CompareOp(AMD64Compare opcode, Value x, Value y) {
-            super(TYPE);
-            this.opcode = opcode;
-            this.x = x;
-            this.y = y;
-        }
-
-        @Override
-        public void emitCode(CompilationResultBuilder crb, AMD64MacroAssembler masm) {
-            emit(crb, masm, opcode, x, y);
-        }
-
-        @Override
-        public void verify() {
-            super.verify();
-            assert (name().startsWith("B") && x.getKind().getStackKind() == Kind.Int && y.getKind().getStackKind() == Kind.Int) ||
-                            (name().startsWith("S") && x.getKind().getStackKind() == Kind.Int && y.getKind().getStackKind() == Kind.Int) ||
-                            (name().startsWith("I") && x.getKind() == Kind.Int && y.getKind() == Kind.Int) || (name().startsWith("L") && x.getKind() == Kind.Long && y.getKind() == Kind.Long) ||
-                            (name().startsWith("A") && x.getKind() == Kind.Object && y.getKind() == Kind.Object) ||
-                            (name().startsWith("F") && x.getKind() == Kind.Float && y.getKind() == Kind.Float) || (name().startsWith("D") && x.getKind() == Kind.Double && y.getKind() == Kind.Double) : String.format(
-                            "%s(%s, %s)", opcode, x, y);
-        }
-    }
-
-    public static final class CompareMemoryOp extends MemOp {
-        public static final LIRInstructionClass<CompareMemoryOp> TYPE = LIRInstructionClass.create(CompareMemoryOp.class);
-        @Opcode private final AMD64Compare opcode;
-        @Use({REG, CONST}) protected Value y;
-
-        /**
-         * Compare memory, constant or register, memory.
-         */
-        public CompareMemoryOp(AMD64Compare opcode, Kind kind, AMD64AddressValue address, Value y, LIRFrameState state) {
-            super(TYPE, kind, address, state);
-            this.opcode = opcode;
-            this.y = y;
-        }
-
-        @Override
-        protected void emitMemAccess(CompilationResultBuilder crb, AMD64MacroAssembler masm) {
-            if (isRegister(y)) {
-                switch (opcode) {
-                    case BCMP:
-                        masm.cmpb(asIntReg(y), address.toAddress());
-                        break;
-                    case SCMP:
-                        masm.cmpw(asIntReg(y), address.toAddress());
-                        break;
-                    case ICMP:
-                        masm.cmpl(asIntReg(y), address.toAddress());
-                        break;
-                    case LCMP:
-                        masm.cmpq(asLongReg(y), address.toAddress());
-                        break;
-                    case ACMP:
-                        masm.cmpptr(asObjectReg(y), address.toAddress());
-                        break;
-                    case FCMP:
-                        masm.ucomiss(asFloatReg(y), address.toAddress());
-                        break;
-                    case DCMP:
-                        masm.ucomisd(asDoubleReg(y), address.toAddress());
-                        break;
-                    default:
-                        throw GraalInternalError.shouldNotReachHere();
-                }
-            } else if (isConstant(y)) {
-                switch (opcode) {
-                    case BCMP:
-                        masm.cmpb(address.toAddress(), crb.asIntConst(y));
-                        break;
-                    case SCMP:
-                        masm.cmpw(address.toAddress(), crb.asIntConst(y));
-                        break;
-                    case ICMP:
-                        masm.cmpl(address.toAddress(), crb.asIntConst(y));
-                        break;
-                    case LCMP:
-                        if (NumUtil.isInt(crb.asLongConst(y))) {
-                            masm.cmpq(address.toAddress(), (int) crb.asLongConst(y));
-                        } else {
-                            throw GraalInternalError.shouldNotReachHere();
-                        }
-                        break;
-                    case ACMP:
-                        if (asConstant(y).isNull()) {
-                            masm.cmpq(address.toAddress(), 0);
-                        } else {
-                            throw GraalInternalError.shouldNotReachHere();
-                        }
-                        break;
-                    default:
-                        throw GraalInternalError.shouldNotReachHere();
-                }
-
-            } else {
-                throw GraalInternalError.shouldNotReachHere();
-            }
-        }
-
-        @Override
-        public void verify() {
-            super.verify();
-            assert y instanceof Variable || y instanceof JavaConstant;
-            assert kind != Kind.Long || !(y instanceof JavaConstant) || NumUtil.isInt(((JavaConstant) y).asLong());
-        }
-    }
-
-    public static void emit(CompilationResultBuilder crb, AMD64MacroAssembler masm, AMD64Compare opcode, Value x, Value y) {
-        if (isRegister(x) && isRegister(y)) {
-            switch (opcode) {
-                case BCMP:
-                    masm.cmpb(asIntReg(x), asIntReg(y));
-                    break;
-                case SCMP:
-                    masm.cmpw(asIntReg(x), asIntReg(y));
-                    break;
-                case ICMP:
-                    masm.cmpl(asIntReg(x), asIntReg(y));
-                    break;
-                case LCMP:
-                    masm.cmpq(asLongReg(x), asLongReg(y));
-                    break;
-                case ACMP:
-                    masm.cmpptr(asObjectReg(x), asObjectReg(y));
-                    break;
-                case FCMP:
-                    masm.ucomiss(asFloatReg(x), asFloatReg(y));
-                    break;
-                case DCMP:
-                    masm.ucomisd(asDoubleReg(x), asDoubleReg(y));
-                    break;
-                default:
-                    throw GraalInternalError.shouldNotReachHere();
-            }
-        } else if (isRegister(x) && isConstant(y)) {
-            boolean isZero = ((JavaConstant) y).isDefaultForKind();
-            switch (opcode) {
-                case BCMP:
-                    if (isZero) {
-                        masm.testl(asIntReg(x), asIntReg(x));
-                    } else {
-                        masm.cmpb(asIntReg(x), crb.asIntConst(y));
-                    }
-                    break;
-                case SCMP:
-                    if (isZero) {
-                        masm.testl(asIntReg(x), asIntReg(x));
-                    } else {
-                        masm.cmpw(asIntReg(x), crb.asIntConst(y));
-                    }
-                    break;
-                case ICMP:
-                    if (isZero) {
-                        masm.testl(asIntReg(x), asIntReg(x));
-                    } else {
-                        masm.cmpl(asIntReg(x), crb.asIntConst(y));
-                    }
-                    break;
-                case LCMP:
-                    if (isZero) {
-                        masm.testq(asLongReg(x), asLongReg(x));
-                    } else {
-                        masm.cmpq(asLongReg(x), crb.asIntConst(y));
-                    }
-                    break;
-                case ACMP:
-                    if (isZero) {
-                        masm.testq(asObjectReg(x), asObjectReg(x));
-                        break;
-                    } else {
-                        throw GraalInternalError.shouldNotReachHere("Only null object constants are allowed in comparisons");
-                    }
-                case FCMP:
-                    masm.ucomiss(asFloatReg(x), (AMD64Address) crb.asFloatConstRef(y));
-                    break;
-                case DCMP:
-                    masm.ucomisd(asDoubleReg(x), (AMD64Address) crb.asDoubleConstRef(y));
-                    break;
-                default:
-                    throw GraalInternalError.shouldNotReachHere();
-            }
-        } else if (isRegister(x) && isStackSlot(y)) {
-            switch (opcode) {
-                case BCMP:
-                    masm.cmpb(asIntReg(x), (AMD64Address) crb.asByteAddr(y));
-                    break;
-                case SCMP:
-                    masm.cmpw(asIntReg(x), (AMD64Address) crb.asShortAddr(y));
-                    break;
-                case ICMP:
-                    masm.cmpl(asIntReg(x), (AMD64Address) crb.asIntAddr(y));
-                    break;
-                case LCMP:
-                    masm.cmpq(asLongReg(x), (AMD64Address) crb.asLongAddr(y));
-                    break;
-                case ACMP:
-                    masm.cmpptr(asObjectReg(x), (AMD64Address) crb.asObjectAddr(y));
-                    break;
-                case FCMP:
-                    masm.ucomiss(asFloatReg(x), (AMD64Address) crb.asFloatAddr(y));
-                    break;
-                case DCMP:
-                    masm.ucomisd(asDoubleReg(x), (AMD64Address) crb.asDoubleAddr(y));
-                    break;
-                default:
-                    throw GraalInternalError.shouldNotReachHere();
-            }
-        } else {
-            throw GraalInternalError.shouldNotReachHere();
-        }
-    }
-}
--- /dev/null	Thu Jan 01 00:00:00 1970 +0000
+++ b/graal/com.oracle.graal.lir.amd64/src/com/oracle/graal/lir/amd64/AMD64CompareConstOp.java	Mon Mar 16 16:54:10 2015 -0700
@@ -0,0 +1,64 @@
+/*
+ * Copyright (c) 2015, 2015, Oracle and/or its affiliates. All rights reserved.
+ * DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER.
+ *
+ * This code is free software; you can redistribute it and/or modify it
+ * under the terms of the GNU General Public License version 2 only, as
+ * published by the Free Software Foundation.
+ *
+ * This code is distributed in the hope that it will be useful, but WITHOUT
+ * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
+ * FITNESS FOR A PARTICULAR PURPOSE.  See the GNU General Public License
+ * version 2 for more details (a copy is included in the LICENSE file that
+ * accompanied this code).
+ *
+ * You should have received a copy of the GNU General Public License version
+ * 2 along with this work; if not, write to the Free Software Foundation,
+ * Inc., 51 Franklin St, Fifth Floor, Boston, MA 02110-1301 USA.
+ *
+ * Please contact Oracle, 500 Oracle Parkway, Redwood Shores, CA 94065 USA
+ * or visit www.oracle.com if you need additional information or have any
+ * questions.
+ */
+package com.oracle.graal.lir.amd64;
+
+import static com.oracle.graal.api.code.ValueUtil.*;
+import static com.oracle.graal.lir.LIRInstruction.OperandFlag.*;
+
+import com.oracle.graal.api.meta.*;
+import com.oracle.graal.asm.*;
+import com.oracle.graal.asm.amd64.*;
+import com.oracle.graal.asm.amd64.AMD64Assembler.AMD64MIOp;
+import com.oracle.graal.asm.amd64.AMD64Assembler.OperandSize;
+import com.oracle.graal.lir.*;
+import com.oracle.graal.lir.asm.*;
+
+public class AMD64CompareConstOp extends AMD64LIRInstruction {
+    public static final LIRInstructionClass<AMD64CompareConstOp> TYPE = LIRInstructionClass.create(AMD64CompareConstOp.class);
+
+    @Opcode private final AMD64MIOp opcode;
+    private final OperandSize size;
+
+    @Use({REG, STACK}) protected AllocatableValue x;
+    protected JavaConstant y;
+
+    public AMD64CompareConstOp(AMD64MIOp opcode, OperandSize size, AllocatableValue x, JavaConstant y) {
+        super(TYPE);
+        this.opcode = opcode;
+        this.size = size;
+
+        this.x = x;
+        this.y = y;
+    }
+
+    @Override
+    public void emitCode(CompilationResultBuilder crb, AMD64MacroAssembler masm) {
+        assert NumUtil.is32bit(y.asLong());
+        if (isRegister(x)) {
+            opcode.emit(masm, size, asRegister(x), (int) y.asLong());
+        } else {
+            assert isStackSlot(x);
+            opcode.emit(masm, size, (AMD64Address) crb.asAddress(x), (int) y.asLong());
+        }
+    }
+}
--- /dev/null	Thu Jan 01 00:00:00 1970 +0000
+++ b/graal/com.oracle.graal.lir.amd64/src/com/oracle/graal/lir/amd64/AMD64CompareMemoryConstOp.java	Mon Mar 16 16:54:10 2015 -0700
@@ -0,0 +1,75 @@
+/*
+ * Copyright (c) 2015, 2015, Oracle and/or its affiliates. All rights reserved.
+ * DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER.
+ *
+ * This code is free software; you can redistribute it and/or modify it
+ * under the terms of the GNU General Public License version 2 only, as
+ * published by the Free Software Foundation.
+ *
+ * This code is distributed in the hope that it will be useful, but WITHOUT
+ * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
+ * FITNESS FOR A PARTICULAR PURPOSE.  See the GNU General Public License
+ * version 2 for more details (a copy is included in the LICENSE file that
+ * accompanied this code).
+ *
+ * You should have received a copy of the GNU General Public License version
+ * 2 along with this work; if not, write to the Free Software Foundation,
+ * Inc., 51 Franklin St, Fifth Floor, Boston, MA 02110-1301 USA.
+ *
+ * Please contact Oracle, 500 Oracle Parkway, Redwood Shores, CA 94065 USA
+ * or visit www.oracle.com if you need additional information or have any
+ * questions.
+ */
+package com.oracle.graal.lir.amd64;
+
+import static com.oracle.graal.lir.LIRInstruction.OperandFlag.*;
+
+import com.oracle.graal.api.meta.*;
+import com.oracle.graal.asm.*;
+import com.oracle.graal.asm.amd64.AMD64Assembler.AMD64MIOp;
+import com.oracle.graal.asm.amd64.AMD64Assembler.OperandSize;
+import com.oracle.graal.asm.amd64.*;
+import com.oracle.graal.lir.*;
+import com.oracle.graal.lir.StandardOp.ImplicitNullCheck;
+import com.oracle.graal.lir.asm.*;
+
+public class AMD64CompareMemoryConstOp extends AMD64LIRInstruction implements ImplicitNullCheck {
+    public static final LIRInstructionClass<AMD64CompareMemoryConstOp> TYPE = LIRInstructionClass.create(AMD64CompareMemoryConstOp.class);
+
+    @Opcode private final AMD64MIOp opcode;
+    private final OperandSize size;
+
+    @Use({COMPOSITE}) protected AMD64AddressValue x;
+    protected JavaConstant y;
+
+    @State protected LIRFrameState state;
+
+    public AMD64CompareMemoryConstOp(AMD64MIOp opcode, OperandSize size, AMD64AddressValue x, JavaConstant y, LIRFrameState state) {
+        super(TYPE);
+        this.opcode = opcode;
+        this.size = size;
+
+        this.x = x;
+        this.y = y;
+
+        this.state = state;
+    }
+
+    @Override
+    public void emitCode(CompilationResultBuilder crb, AMD64MacroAssembler masm) {
+        if (state != null) {
+            crb.recordImplicitException(masm.position(), state);
+        }
+        assert NumUtil.is32bit(y.asLong());
+        opcode.emit(masm, size, x.toAddress(), (int) y.asLong());
+    }
+
+    @Override
+    public boolean makeNullCheckFor(Value value, LIRFrameState nullCheckState, int implicitNullCheckLimit) {
+        if (state == null && x.isValidImplicitNullCheckFor(value, implicitNullCheckLimit)) {
+            state = nullCheckState;
+            return true;
+        }
+        return false;
+    }
+}
--- /dev/null	Thu Jan 01 00:00:00 1970 +0000
+++ b/graal/com.oracle.graal.lir.amd64/src/com/oracle/graal/lir/amd64/AMD64CompareMemoryOp.java	Mon Mar 16 16:54:10 2015 -0700
@@ -0,0 +1,74 @@
+/*
+ * Copyright (c) 2015, 2015, Oracle and/or its affiliates. All rights reserved.
+ * DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER.
+ *
+ * This code is free software; you can redistribute it and/or modify it
+ * under the terms of the GNU General Public License version 2 only, as
+ * published by the Free Software Foundation.
+ *
+ * This code is distributed in the hope that it will be useful, but WITHOUT
+ * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
+ * FITNESS FOR A PARTICULAR PURPOSE.  See the GNU General Public License
+ * version 2 for more details (a copy is included in the LICENSE file that
+ * accompanied this code).
+ *
+ * You should have received a copy of the GNU General Public License version
+ * 2 along with this work; if not, write to the Free Software Foundation,
+ * Inc., 51 Franklin St, Fifth Floor, Boston, MA 02110-1301 USA.
+ *
+ * Please contact Oracle, 500 Oracle Parkway, Redwood Shores, CA 94065 USA
+ * or visit www.oracle.com if you need additional information or have any
+ * questions.
+ */
+package com.oracle.graal.lir.amd64;
+
+import static com.oracle.graal.api.code.ValueUtil.*;
+import static com.oracle.graal.lir.LIRInstruction.OperandFlag.*;
+
+import com.oracle.graal.api.meta.*;
+import com.oracle.graal.asm.amd64.AMD64Assembler.AMD64RMOp;
+import com.oracle.graal.asm.amd64.AMD64Assembler.OperandSize;
+import com.oracle.graal.asm.amd64.*;
+import com.oracle.graal.lir.*;
+import com.oracle.graal.lir.StandardOp.ImplicitNullCheck;
+import com.oracle.graal.lir.asm.*;
+
+public class AMD64CompareMemoryOp extends AMD64LIRInstruction implements ImplicitNullCheck {
+    public static final LIRInstructionClass<AMD64CompareMemoryOp> TYPE = LIRInstructionClass.create(AMD64CompareMemoryOp.class);
+
+    @Opcode private final AMD64RMOp opcode;
+    private final OperandSize size;
+
+    @Use({REG}) protected AllocatableValue x;
+    @Use({COMPOSITE}) protected AMD64AddressValue y;
+
+    @State protected LIRFrameState state;
+
+    public AMD64CompareMemoryOp(AMD64RMOp opcode, OperandSize size, AllocatableValue x, AMD64AddressValue y, LIRFrameState state) {
+        super(TYPE);
+        this.opcode = opcode;
+        this.size = size;
+
+        this.x = x;
+        this.y = y;
+
+        this.state = state;
+    }
+
+    @Override
+    public void emitCode(CompilationResultBuilder crb, AMD64MacroAssembler masm) {
+        if (state != null) {
+            crb.recordImplicitException(masm.position(), state);
+        }
+        opcode.emit(masm, size, asRegister(x), y.toAddress());
+    }
+
+    @Override
+    public boolean makeNullCheckFor(Value value, LIRFrameState nullCheckState, int implicitNullCheckLimit) {
+        if (state == null && y.isValidImplicitNullCheckFor(value, implicitNullCheckLimit)) {
+            state = nullCheckState;
+            return true;
+        }
+        return false;
+    }
+}
--- /dev/null	Thu Jan 01 00:00:00 1970 +0000
+++ b/graal/com.oracle.graal.lir.amd64/src/com/oracle/graal/lir/amd64/AMD64CompareOp.java	Mon Mar 16 16:54:10 2015 -0700
@@ -0,0 +1,62 @@
+/*
+ * Copyright (c) 2015, 2015, Oracle and/or its affiliates. All rights reserved.
+ * DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER.
+ *
+ * This code is free software; you can redistribute it and/or modify it
+ * under the terms of the GNU General Public License version 2 only, as
+ * published by the Free Software Foundation.
+ *
+ * This code is distributed in the hope that it will be useful, but WITHOUT
+ * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
+ * FITNESS FOR A PARTICULAR PURPOSE.  See the GNU General Public License
+ * version 2 for more details (a copy is included in the LICENSE file that
+ * accompanied this code).
+ *
+ * You should have received a copy of the GNU General Public License version
+ * 2 along with this work; if not, write to the Free Software Foundation,
+ * Inc., 51 Franklin St, Fifth Floor, Boston, MA 02110-1301 USA.
+ *
+ * Please contact Oracle, 500 Oracle Parkway, Redwood Shores, CA 94065 USA
+ * or visit www.oracle.com if you need additional information or have any
+ * questions.
+ */
+package com.oracle.graal.lir.amd64;
+
+import static com.oracle.graal.api.code.ValueUtil.*;
+import static com.oracle.graal.lir.LIRInstruction.OperandFlag.*;
+
+import com.oracle.graal.api.meta.*;
+import com.oracle.graal.asm.amd64.*;
+import com.oracle.graal.asm.amd64.AMD64Assembler.AMD64RMOp;
+import com.oracle.graal.asm.amd64.AMD64Assembler.OperandSize;
+import com.oracle.graal.lir.*;
+import com.oracle.graal.lir.asm.*;
+
+public class AMD64CompareOp extends AMD64LIRInstruction {
+    public static final LIRInstructionClass<AMD64CompareOp> TYPE = LIRInstructionClass.create(AMD64CompareOp.class);
+
+    @Opcode private final AMD64RMOp opcode;
+    private final OperandSize size;
+
+    @Use({REG}) protected AllocatableValue x;
+    @Use({REG, STACK}) protected AllocatableValue y;
+
+    public AMD64CompareOp(AMD64RMOp opcode, OperandSize size, AllocatableValue x, AllocatableValue y) {
+        super(TYPE);
+        this.opcode = opcode;
+        this.size = size;
+
+        this.x = x;
+        this.y = y;
+    }
+
+    @Override
+    public void emitCode(CompilationResultBuilder crb, AMD64MacroAssembler masm) {
+        if (isRegister(y)) {
+            opcode.emit(masm, size, asRegister(x), asRegister(y));
+        } else {
+            assert isStackSlot(y);
+            opcode.emit(masm, size, asRegister(x), (AMD64Address) crb.asAddress(y));
+        }
+    }
+}
--- /dev/null	Thu Jan 01 00:00:00 1970 +0000
+++ b/graal/com.oracle.graal.lir.amd64/src/com/oracle/graal/lir/amd64/AMD64MulConstOp.java	Mon Mar 16 16:54:10 2015 -0700
@@ -0,0 +1,67 @@
+/*
+ * Copyright (c) 2015, 2015, Oracle and/or its affiliates. All rights reserved.
+ * DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER.
+ *
+ * This code is free software; you can redistribute it and/or modify it
+ * under the terms of the GNU General Public License version 2 only, as
+ * published by the Free Software Foundation.
+ *
+ * This code is distributed in the hope that it will be useful, but WITHOUT
+ * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
+ * FITNESS FOR A PARTICULAR PURPOSE.  See the GNU General Public License
+ * version 2 for more details (a copy is included in the LICENSE file that
+ * accompanied this code).
+ *
+ * You should have received a copy of the GNU General Public License version
+ * 2 along with this work; if not, write to the Free Software Foundation,
+ * Inc., 51 Franklin St, Fifth Floor, Boston, MA 02110-1301 USA.
+ *
+ * Please contact Oracle, 500 Oracle Parkway, Redwood Shores, CA 94065 USA
+ * or visit www.oracle.com if you need additional information or have any
+ * questions.
+ */
+package com.oracle.graal.lir.amd64;
+
+import static com.oracle.graal.api.code.ValueUtil.*;
+import static com.oracle.graal.lir.LIRInstruction.OperandFlag.*;
+
+import com.oracle.graal.api.meta.*;
+import com.oracle.graal.asm.*;
+import com.oracle.graal.asm.amd64.*;
+import com.oracle.graal.asm.amd64.AMD64Assembler.AMD64RMIOp;
+import com.oracle.graal.asm.amd64.AMD64Assembler.OperandSize;
+import com.oracle.graal.lir.*;
+import com.oracle.graal.lir.asm.*;
+
+public class AMD64MulConstOp extends AMD64LIRInstruction {
+    public static final LIRInstructionClass<AMD64MulConstOp> TYPE = LIRInstructionClass.create(AMD64MulConstOp.class);
+
+    @Opcode private final AMD64RMIOp opcode;
+    private final OperandSize size;
+
+    @Def({REG}) protected AllocatableValue result;
+    @Use({REG, STACK}) protected AllocatableValue x;
+    protected JavaConstant y;
+
+    public AMD64MulConstOp(AMD64RMIOp opcode, OperandSize size, AllocatableValue result, AllocatableValue x, JavaConstant y) {
+        super(TYPE);
+        this.opcode = opcode;
+        this.size = size;
+
+        this.result = result;
+        this.x = x;
+        this.y = y;
+    }
+
+    @Override
+    public void emitCode(CompilationResultBuilder crb, AMD64MacroAssembler masm) {
+        assert NumUtil.isInt(y.asLong());
+        int imm = (int) y.asLong();
+        if (isRegister(x)) {
+            opcode.emit(masm, size, asRegister(result), asRegister(x), imm);
+        } else {
+            assert isStackSlot(x);
+            opcode.emit(masm, size, asRegister(result), (AMD64Address) crb.asAddress(x), imm);
+        }
+    }
+}
--- /dev/null	Thu Jan 01 00:00:00 1970 +0000
+++ b/graal/com.oracle.graal.lir.amd64/src/com/oracle/graal/lir/amd64/AMD64MulDivOp.java	Mon Mar 16 16:54:10 2015 -0700
@@ -0,0 +1,114 @@
+/*
+ * Copyright (c) 2015, 2015, Oracle and/or its affiliates. All rights reserved.
+ * DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER.
+ *
+ * This code is free software; you can redistribute it and/or modify it
+ * under the terms of the GNU General Public License version 2 only, as
+ * published by the Free Software Foundation.
+ *
+ * This code is distributed in the hope that it will be useful, but WITHOUT
+ * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
+ * FITNESS FOR A PARTICULAR PURPOSE.  See the GNU General Public License
+ * version 2 for more details (a copy is included in the LICENSE file that
+ * accompanied this code).
+ *
+ * You should have received a copy of the GNU General Public License version
+ * 2 along with this work; if not, write to the Free Software Foundation,
+ * Inc., 51 Franklin St, Fifth Floor, Boston, MA 02110-1301 USA.
+ *
+ * Please contact Oracle, 500 Oracle Parkway, Redwood Shores, CA 94065 USA
+ * or visit www.oracle.com if you need additional information or have any
+ * questions.
+ */
+package com.oracle.graal.lir.amd64;
+
+import static com.oracle.graal.api.code.ValueUtil.*;
+import static com.oracle.graal.asm.amd64.AMD64Assembler.AMD64MOp.*;
+import static com.oracle.graal.lir.LIRInstruction.OperandFlag.*;
+
+import com.oracle.graal.amd64.*;
+import com.oracle.graal.api.meta.*;
+import com.oracle.graal.asm.amd64.*;
+import com.oracle.graal.asm.amd64.AMD64Assembler.AMD64MOp;
+import com.oracle.graal.asm.amd64.AMD64Assembler.OperandSize;
+import com.oracle.graal.lir.*;
+import com.oracle.graal.lir.asm.*;
+
+public class AMD64MulDivOp extends AMD64LIRInstruction {
+    public static final LIRInstructionClass<AMD64MulDivOp> TYPE = LIRInstructionClass.create(AMD64MulDivOp.class);
+
+    @Opcode private final AMD64MOp opcode;
+    private final OperandSize size;
+
+    @Def({REG}) protected AllocatableValue highResult;
+    @Def({REG}) protected AllocatableValue lowResult;
+
+    @Use({REG, ILLEGAL}) protected AllocatableValue highX;
+    @Use({REG}) protected AllocatableValue lowX;
+
+    @Use({REG, STACK}) protected AllocatableValue y;
+
+    @State protected LIRFrameState state;
+
+    public AMD64MulDivOp(AMD64MOp opcode, OperandSize size, LIRKind resultKind, AllocatableValue x, AllocatableValue y) {
+        this(opcode, size, resultKind, Value.ILLEGAL, x, y, null);
+    }
+
+    public AMD64MulDivOp(AMD64MOp opcode, OperandSize size, LIRKind resultKind, AllocatableValue highX, AllocatableValue lowX, AllocatableValue y, LIRFrameState state) {
+        super(TYPE);
+        this.opcode = opcode;
+        this.size = size;
+
+        this.highResult = AMD64.rdx.asValue(resultKind);
+        this.lowResult = AMD64.rax.asValue(resultKind);
+
+        this.highX = highX;
+        this.lowX = lowX;
+
+        this.y = y;
+
+        this.state = state;
+    }
+
+    public AllocatableValue getHighResult() {
+        return highResult;
+    }
+
+    public AllocatableValue getLowResult() {
+        return lowResult;
+    }
+
+    public AllocatableValue getQuotient() {
+        return lowResult;
+    }
+
+    public AllocatableValue getRemainder() {
+        return highResult;
+    }
+
+    @Override
+    public void emitCode(CompilationResultBuilder crb, AMD64MacroAssembler masm) {
+        if (state != null) {
+            crb.recordImplicitException(masm.position(), state);
+        }
+        if (isRegister(y)) {
+            opcode.emit(masm, size, asRegister(y));
+        } else {
+            assert isStackSlot(y);
+            opcode.emit(masm, size, (AMD64Address) crb.asAddress(y));
+        }
+    }
+
+    @Override
+    public void verify() {
+        assert asRegister(highResult).equals(AMD64.rdx);
+        assert asRegister(lowResult).equals(AMD64.rax);
+
+        assert asRegister(lowX).equals(AMD64.rax);
+        if (opcode == DIV || opcode == IDIV) {
+            assert asRegister(highX).equals(AMD64.rdx);
+        } else if (opcode == MUL || opcode == IMUL) {
+            assert isIllegal(highX);
+        }
+    }
+}
--- /dev/null	Thu Jan 01 00:00:00 1970 +0000
+++ b/graal/com.oracle.graal.lir.amd64/src/com/oracle/graal/lir/amd64/AMD64RegStackConstOp.java	Mon Mar 16 16:54:10 2015 -0700
@@ -0,0 +1,66 @@
+/*
+ * Copyright (c) 2015, 2015, Oracle and/or its affiliates. All rights reserved.
+ * DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER.
+ *
+ * This code is free software; you can redistribute it and/or modify it
+ * under the terms of the GNU General Public License version 2 only, as
+ * published by the Free Software Foundation.
+ *
+ * This code is distributed in the hope that it will be useful, but WITHOUT
+ * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
+ * FITNESS FOR A PARTICULAR PURPOSE.  See the GNU General Public License
+ * version 2 for more details (a copy is included in the LICENSE file that
+ * accompanied this code).
+ *
+ * You should have received a copy of the GNU General Public License version
+ * 2 along with this work; if not, write to the Free Software Foundation,
+ * Inc., 51 Franklin St, Fifth Floor, Boston, MA 02110-1301 USA.
+ *
+ * Please contact Oracle, 500 Oracle Parkway, Redwood Shores, CA 94065 USA
+ * or visit www.oracle.com if you need additional information or have any
+ * questions.
+ */
+package com.oracle.graal.lir.amd64;
+
+import static com.oracle.graal.api.code.ValueUtil.*;
+import static com.oracle.graal.lir.LIRInstruction.OperandFlag.*;
+
+import com.oracle.graal.api.meta.*;
+import com.oracle.graal.asm.*;
+import com.oracle.graal.asm.amd64.AMD64Assembler.AMD64RMIOp;
+import com.oracle.graal.asm.amd64.AMD64Assembler.OperandSize;
+import com.oracle.graal.asm.amd64.*;
+import com.oracle.graal.lir.*;
+import com.oracle.graal.lir.asm.*;
+
+public class AMD64RegStackConstOp extends AMD64LIRInstruction {
+    public static final LIRInstructionClass<AMD64RegStackConstOp> TYPE = LIRInstructionClass.create(AMD64RegStackConstOp.class);
+
+    @Opcode private final AMD64RMIOp opcode;
+    private final OperandSize size;
+
+    @Def({REG}) protected AllocatableValue result;
+    @Use({REG, STACK}) protected AllocatableValue x;
+    protected JavaConstant y;
+
+    public AMD64RegStackConstOp(AMD64RMIOp opcode, OperandSize size, AllocatableValue result, AllocatableValue x, JavaConstant y) {
+        super(TYPE);
+        this.opcode = opcode;
+        this.size = size;
+
+        this.result = result;
+        this.x = x;
+        this.y = y;
+    }
+
+    @Override
+    public void emitCode(CompilationResultBuilder crb, AMD64MacroAssembler masm) {
+        assert NumUtil.is32bit(y.asLong());
+        if (isRegister(x)) {
+            opcode.emit(masm, size, asRegister(result), asRegister(x), (int) y.asLong());
+        } else {
+            assert isStackSlot(x);
+            opcode.emit(masm, size, asRegister(result), (AMD64Address) crb.asAddress(x), (int) y.asLong());
+        }
+    }
+}
--- /dev/null	Thu Jan 01 00:00:00 1970 +0000
+++ b/graal/com.oracle.graal.lir.amd64/src/com/oracle/graal/lir/amd64/AMD64ShiftOp.java	Mon Mar 16 16:54:10 2015 -0700
@@ -0,0 +1,66 @@
+/*
+ * Copyright (c) 2015, 2015, Oracle and/or its affiliates. All rights reserved.
+ * DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER.
+ *
+ * This code is free software; you can redistribute it and/or modify it
+ * under the terms of the GNU General Public License version 2 only, as
+ * published by the Free Software Foundation.
+ *
+ * This code is distributed in the hope that it will be useful, but WITHOUT
+ * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
+ * FITNESS FOR A PARTICULAR PURPOSE.  See the GNU General Public License
+ * version 2 for more details (a copy is included in the LICENSE file that
+ * accompanied this code).
+ *
+ * You should have received a copy of the GNU General Public License version
+ * 2 along with this work; if not, write to the Free Software Foundation,
+ * Inc., 51 Franklin St, Fifth Floor, Boston, MA 02110-1301 USA.
+ *
+ * Please contact Oracle, 500 Oracle Parkway, Redwood Shores, CA 94065 USA
+ * or visit www.oracle.com if you need additional information or have any
+ * questions.
+ */
+package com.oracle.graal.lir.amd64;
+
+import static com.oracle.graal.api.code.ValueUtil.*;
+import static com.oracle.graal.lir.LIRInstruction.OperandFlag.*;
+
+import com.oracle.graal.amd64.*;
+import com.oracle.graal.api.meta.*;
+import com.oracle.graal.asm.amd64.*;
+import com.oracle.graal.asm.amd64.AMD64Assembler.AMD64MOp;
+import com.oracle.graal.asm.amd64.AMD64Assembler.OperandSize;
+import com.oracle.graal.lir.*;
+import com.oracle.graal.lir.asm.*;
+
+public class AMD64ShiftOp extends AMD64LIRInstruction {
+    public static final LIRInstructionClass<AMD64ShiftOp> TYPE = LIRInstructionClass.create(AMD64ShiftOp.class);
+
+    @Opcode private final AMD64MOp opcode;
+    private final OperandSize size;
+
+    @Def({REG, HINT}) protected AllocatableValue result;
+    @Use({REG, STACK}) protected AllocatableValue x;
+    @Alive({REG}) protected AllocatableValue y;
+
+    public AMD64ShiftOp(AMD64MOp opcode, OperandSize size, AllocatableValue result, AllocatableValue x, AllocatableValue y) {
+        super(TYPE);
+        this.opcode = opcode;
+        this.size = size;
+
+        this.result = result;
+        this.x = x;
+        this.y = y;
+    }
+
+    @Override
+    public void emitCode(CompilationResultBuilder crb, AMD64MacroAssembler masm) {
+        AMD64Move.move(crb, masm, result, x);
+        opcode.emit(masm, size, asRegister(result));
+    }
+
+    @Override
+    public void verify() {
+        assert asRegister(y).equals(AMD64.rcx);
+    }
+}
--- /dev/null	Thu Jan 01 00:00:00 1970 +0000
+++ b/graal/com.oracle.graal.lir.amd64/src/com/oracle/graal/lir/amd64/AMD64SignExtendOp.java	Mon Mar 16 16:54:10 2015 -0700
@@ -0,0 +1,80 @@
+/*
+ * Copyright (c) 2015, 2015, Oracle and/or its affiliates. All rights reserved.
+ * DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER.
+ *
+ * This code is free software; you can redistribute it and/or modify it
+ * under the terms of the GNU General Public License version 2 only, as
+ * published by the Free Software Foundation.
+ *
+ * This code is distributed in the hope that it will be useful, but WITHOUT
+ * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
+ * FITNESS FOR A PARTICULAR PURPOSE.  See the GNU General Public License
+ * version 2 for more details (a copy is included in the LICENSE file that
+ * accompanied this code).
+ *
+ * You should have received a copy of the GNU General Public License version
+ * 2 along with this work; if not, write to the Free Software Foundation,
+ * Inc., 51 Franklin St, Fifth Floor, Boston, MA 02110-1301 USA.
+ *
+ * Please contact Oracle, 500 Oracle Parkway, Redwood Shores, CA 94065 USA
+ * or visit www.oracle.com if you need additional information or have any
+ * questions.
+ */
+package com.oracle.graal.lir.amd64;
+
+import static com.oracle.graal.api.code.ValueUtil.*;
+import static com.oracle.graal.asm.amd64.AMD64Assembler.OperandSize.*;
+import static com.oracle.graal.lir.LIRInstruction.OperandFlag.*;
+
+import com.oracle.graal.amd64.*;
+import com.oracle.graal.api.meta.*;
+import com.oracle.graal.asm.amd64.AMD64Assembler.OperandSize;
+import com.oracle.graal.asm.amd64.*;
+import com.oracle.graal.lir.*;
+import com.oracle.graal.lir.asm.*;
+
+@Opcode("CDQ")
+public class AMD64SignExtendOp extends AMD64LIRInstruction {
+    public static final LIRInstructionClass<AMD64SignExtendOp> TYPE = LIRInstructionClass.create(AMD64SignExtendOp.class);
+
+    private final OperandSize size;
+
+    @Def({REG}) protected AllocatableValue highResult;
+    @Def({REG}) protected AllocatableValue lowResult;
+
+    @Use({REG}) protected AllocatableValue input;
+
+    public AMD64SignExtendOp(OperandSize size, LIRKind resultKind, AllocatableValue input) {
+        super(TYPE);
+        this.size = size;
+
+        this.highResult = AMD64.rdx.asValue(resultKind);
+        this.lowResult = AMD64.rax.asValue(resultKind);
+        this.input = input;
+    }
+
+    public AllocatableValue getHighResult() {
+        return highResult;
+    }
+
+    public AllocatableValue getLowResult() {
+        return lowResult;
+    }
+
+    @Override
+    public void emitCode(CompilationResultBuilder crb, AMD64MacroAssembler masm) {
+        if (size == DWORD) {
+            masm.cdql();
+        } else {
+            assert size == QWORD;
+            masm.cdqq();
+        }
+    }
+
+    @Override
+    public void verify() {
+        assert asRegister(highResult).equals(AMD64.rdx);
+        assert asRegister(lowResult).equals(AMD64.rax);
+        assert asRegister(input).equals(AMD64.rax);
+    }
+}
--- a/graal/com.oracle.graal.lir.amd64/src/com/oracle/graal/lir/amd64/AMD64TestMemoryOp.java	Mon Mar 16 15:59:57 2015 -0700
+++ /dev/null	Thu Jan 01 00:00:00 1970 +0000
@@ -1,80 +0,0 @@
-/*
- * Copyright (c) 2011, 2015, Oracle and/or its affiliates. All rights reserved.
- * DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER.
- *
- * This code is free software; you can redistribute it and/or modify it
- * under the terms of the GNU General Public License version 2 only, as
- * published by the Free Software Foundation.
- *
- * This code is distributed in the hope that it will be useful, but WITHOUT
- * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
- * FITNESS FOR A PARTICULAR PURPOSE.  See the GNU General Public License
- * version 2 for more details (a copy is included in the LICENSE file that
- * accompanied this code).
- *
- * You should have received a copy of the GNU General Public License version
- * 2 along with this work; if not, write to the Free Software Foundation,
- * Inc., 51 Franklin St, Fifth Floor, Boston, MA 02110-1301 USA.
- *
- * Please contact Oracle, 500 Oracle Parkway, Redwood Shores, CA 94065 USA
- * or visit www.oracle.com if you need additional information or have any
- * questions.
- */
-package com.oracle.graal.lir.amd64;
-
-import static com.oracle.graal.api.code.ValueUtil.*;
-import static com.oracle.graal.lir.LIRInstruction.OperandFlag.*;
-
-import com.oracle.graal.api.meta.*;
-import com.oracle.graal.asm.amd64.*;
-import com.oracle.graal.compiler.common.*;
-import com.oracle.graal.lir.*;
-import com.oracle.graal.lir.amd64.AMD64Move.MemOp;
-import com.oracle.graal.lir.asm.*;
-
-public final class AMD64TestMemoryOp extends MemOp {
-    public static final LIRInstructionClass<AMD64TestMemoryOp> TYPE = LIRInstructionClass.create(AMD64TestMemoryOp.class);
-
-    @Use({REG, CONST}) protected Value y;
-
-    public AMD64TestMemoryOp(Kind kind, AMD64AddressValue x, Value y, LIRFrameState state) {
-        super(TYPE, kind, x, state);
-        this.y = y;
-        this.state = state;
-    }
-
-    @Override
-    public void emitMemAccess(CompilationResultBuilder crb, AMD64MacroAssembler masm) {
-        if (isRegister(y)) {
-            switch (kind) {
-                case Int:
-                    masm.testl(asIntReg(y), address.toAddress());
-                    break;
-                case Long:
-                    masm.testq(asLongReg(y), address.toAddress());
-                    break;
-                default:
-                    throw GraalInternalError.shouldNotReachHere();
-            }
-        } else if (isConstant(y)) {
-            switch (kind) {
-                case Int:
-                    masm.testl(address.toAddress(), crb.asIntConst(y));
-                    break;
-                case Long:
-                    masm.testq(address.toAddress(), crb.asIntConst(y));
-                    break;
-                default:
-                    throw GraalInternalError.shouldNotReachHere();
-            }
-        } else {
-            throw GraalInternalError.shouldNotReachHere();
-        }
-    }
-
-    @Override
-    public void verify() {
-        super.verify();
-        assert (kind == Kind.Int || kind == Kind.Long) && kind == y.getKind() : address + " " + y;
-    }
-}
--- a/graal/com.oracle.graal.lir.amd64/src/com/oracle/graal/lir/amd64/AMD64TestOp.java	Mon Mar 16 15:59:57 2015 -0700
+++ /dev/null	Thu Jan 01 00:00:00 1970 +0000
@@ -1,93 +0,0 @@
-/*
- * Copyright (c) 2011, 2015, Oracle and/or its affiliates. All rights reserved.
- * DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER.
- *
- * This code is free software; you can redistribute it and/or modify it
- * under the terms of the GNU General Public License version 2 only, as
- * published by the Free Software Foundation.
- *
- * This code is distributed in the hope that it will be useful, but WITHOUT
- * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
- * FITNESS FOR A PARTICULAR PURPOSE.  See the GNU General Public License
- * version 2 for more details (a copy is included in the LICENSE file that
- * accompanied this code).
- *
- * You should have received a copy of the GNU General Public License version
- * 2 along with this work; if not, write to the Free Software Foundation,
- * Inc., 51 Franklin St, Fifth Floor, Boston, MA 02110-1301 USA.
- *
- * Please contact Oracle, 500 Oracle Parkway, Redwood Shores, CA 94065 USA
- * or visit www.oracle.com if you need additional information or have any
- * questions.
- */
-package com.oracle.graal.lir.amd64;
-
-import static com.oracle.graal.api.code.ValueUtil.*;
-import static com.oracle.graal.lir.LIRInstruction.OperandFlag.*;
-
-import com.oracle.graal.api.meta.*;
-import com.oracle.graal.asm.amd64.*;
-import com.oracle.graal.compiler.common.*;
-import com.oracle.graal.lir.*;
-import com.oracle.graal.lir.asm.*;
-
-public final class AMD64TestOp extends AMD64LIRInstruction {
-    public static final LIRInstructionClass<AMD64TestOp> TYPE = LIRInstructionClass.create(AMD64TestOp.class);
-
-    @Use({REG}) protected Value x;
-    @Use({REG, STACK, CONST}) protected Value y;
-
-    public AMD64TestOp(Value x, Value y) {
-        super(TYPE);
-        this.x = x;
-        this.y = y;
-    }
-
-    @Override
-    public void emitCode(CompilationResultBuilder crb, AMD64MacroAssembler masm) {
-        emit(crb, masm, x, y);
-    }
-
-    @Override
-    public void verify() {
-        super.verify();
-        assert (x.getKind() == Kind.Int && y.getKind().getStackKind() == Kind.Int) || (x.getKind() == Kind.Long && y.getKind() == Kind.Long) : x + " " + y;
-    }
-
-    public static void emit(CompilationResultBuilder crb, AMD64MacroAssembler masm, Value x, Value y) {
-        if (isRegister(y)) {
-            switch (x.getKind()) {
-                case Int:
-                    masm.testl(asIntReg(x), asIntReg(y));
-                    break;
-                case Long:
-                    masm.testq(asLongReg(x), asLongReg(y));
-                    break;
-                default:
-                    throw GraalInternalError.shouldNotReachHere();
-            }
-        } else if (isConstant(y)) {
-            switch (x.getKind()) {
-                case Int:
-                    masm.testl(asIntReg(x), crb.asIntConst(y));
-                    break;
-                case Long:
-                    masm.testq(asLongReg(x), crb.asIntConst(y));
-                    break;
-                default:
-                    throw GraalInternalError.shouldNotReachHere();
-            }
-        } else {
-            switch (x.getKind()) {
-                case Int:
-                    masm.testl(asIntReg(x), (AMD64Address) crb.asIntAddr(y));
-                    break;
-                case Long:
-                    masm.testq(asLongReg(x), (AMD64Address) crb.asLongAddr(y));
-                    break;
-                default:
-                    throw GraalInternalError.shouldNotReachHere();
-            }
-        }
-    }
-}
--- /dev/null	Thu Jan 01 00:00:00 1970 +0000
+++ b/graal/com.oracle.graal.lir.amd64/src/com/oracle/graal/lir/amd64/AMD64UnaryMOp.java	Mon Mar 16 16:54:10 2015 -0700
@@ -0,0 +1,58 @@
+/*
+ * Copyright (c) 2015, 2015, Oracle and/or its affiliates. All rights reserved.
+ * DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER.
+ *
+ * This code is free software; you can redistribute it and/or modify it
+ * under the terms of the GNU General Public License version 2 only, as
+ * published by the Free Software Foundation.
+ *
+ * This code is distributed in the hope that it will be useful, but WITHOUT
+ * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
+ * FITNESS FOR A PARTICULAR PURPOSE.  See the GNU General Public License
+ * version 2 for more details (a copy is included in the LICENSE file that
+ * accompanied this code).
+ *
+ * You should have received a copy of the GNU General Public License version
+ * 2 along with this work; if not, write to the Free Software Foundation,
+ * Inc., 51 Franklin St, Fifth Floor, Boston, MA 02110-1301 USA.
+ *
+ * Please contact Oracle, 500 Oracle Parkway, Redwood Shores, CA 94065 USA
+ * or visit www.oracle.com if you need additional information or have any
+ * questions.
+ */
+package com.oracle.graal.lir.amd64;
+
+import static com.oracle.graal.api.code.ValueUtil.*;
+import static com.oracle.graal.lir.LIRInstruction.OperandFlag.*;
+
+import com.oracle.graal.api.meta.*;
+import com.oracle.graal.asm.amd64.AMD64Assembler.AMD64MOp;
+import com.oracle.graal.asm.amd64.AMD64Assembler.OperandSize;
+import com.oracle.graal.asm.amd64.*;
+import com.oracle.graal.lir.*;
+import com.oracle.graal.lir.asm.*;
+
+public class AMD64UnaryMOp extends AMD64LIRInstruction {
+    public static final LIRInstructionClass<AMD64UnaryMOp> TYPE = LIRInstructionClass.create(AMD64UnaryMOp.class);
+
+    @Opcode private final AMD64MOp opcode;
+    private final OperandSize size;
+
+    @Def({REG, HINT}) protected AllocatableValue result;
+    @Use({REG, STACK}) protected AllocatableValue value;
+
+    public AMD64UnaryMOp(AMD64MOp opcode, OperandSize size, AllocatableValue result, AllocatableValue value) {
+        super(TYPE);
+        this.opcode = opcode;
+        this.size = size;
+
+        this.result = result;
+        this.value = value;
+    }
+
+    @Override
+    public void emitCode(CompilationResultBuilder crb, AMD64MacroAssembler masm) {
+        AMD64Move.move(crb, masm, result, value);
+        opcode.emit(masm, size, asRegister(result));
+    }
+}
--- /dev/null	Thu Jan 01 00:00:00 1970 +0000
+++ b/graal/com.oracle.graal.lir.amd64/src/com/oracle/graal/lir/amd64/AMD64UnaryMROp.java	Mon Mar 16 16:54:10 2015 -0700
@@ -0,0 +1,62 @@
+/*
+ * Copyright (c) 2015, 2015, Oracle and/or its affiliates. All rights reserved.
+ * DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER.
+ *
+ * This code is free software; you can redistribute it and/or modify it
+ * under the terms of the GNU General Public License version 2 only, as
+ * published by the Free Software Foundation.
+ *
+ * This code is distributed in the hope that it will be useful, but WITHOUT
+ * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
+ * FITNESS FOR A PARTICULAR PURPOSE.  See the GNU General Public License
+ * version 2 for more details (a copy is included in the LICENSE file that
+ * accompanied this code).
+ *
+ * You should have received a copy of the GNU General Public License version
+ * 2 along with this work; if not, write to the Free Software Foundation,
+ * Inc., 51 Franklin St, Fifth Floor, Boston, MA 02110-1301 USA.
+ *
+ * Please contact Oracle, 500 Oracle Parkway, Redwood Shores, CA 94065 USA
+ * or visit www.oracle.com if you need additional information or have any
+ * questions.
+ */
+package com.oracle.graal.lir.amd64;
+
+import static com.oracle.graal.api.code.ValueUtil.*;
+import static com.oracle.graal.lir.LIRInstruction.OperandFlag.*;
+
+import com.oracle.graal.api.meta.*;
+import com.oracle.graal.asm.amd64.*;
+import com.oracle.graal.asm.amd64.AMD64Assembler.AMD64MROp;
+import com.oracle.graal.asm.amd64.AMD64Assembler.OperandSize;
+import com.oracle.graal.lir.*;
+import com.oracle.graal.lir.asm.*;
+
+public class AMD64UnaryMROp extends AMD64LIRInstruction {
+    public static final LIRInstructionClass<AMD64UnaryMROp> TYPE = LIRInstructionClass.create(AMD64UnaryMROp.class);
+
+    @Opcode private final AMD64MROp opcode;
+    private final OperandSize size;
+
+    @Def({REG, STACK}) protected AllocatableValue result;
+    @Use({REG}) protected AllocatableValue value;
+
+    public AMD64UnaryMROp(AMD64MROp opcode, OperandSize size, AllocatableValue result, AllocatableValue value) {
+        super(TYPE);
+        this.opcode = opcode;
+        this.size = size;
+
+        this.result = result;
+        this.value = value;
+    }
+
+    @Override
+    public void emitCode(CompilationResultBuilder crb, AMD64MacroAssembler masm) {
+        if (isRegister(result)) {
+            opcode.emit(masm, size, asRegister(result), asRegister(value));
+        } else {
+            assert isStackSlot(result);
+            opcode.emit(masm, size, (AMD64Address) crb.asAddress(result), asRegister(value));
+        }
+    }
+}
--- /dev/null	Thu Jan 01 00:00:00 1970 +0000
+++ b/graal/com.oracle.graal.lir.amd64/src/com/oracle/graal/lir/amd64/AMD64UnaryMemoryOp.java	Mon Mar 16 16:54:10 2015 -0700
@@ -0,0 +1,73 @@
+/*
+ * Copyright (c) 2015, 2015, Oracle and/or its affiliates. All rights reserved.
+ * DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER.
+ *
+ * This code is free software; you can redistribute it and/or modify it
+ * under the terms of the GNU General Public License version 2 only, as
+ * published by the Free Software Foundation.
+ *
+ * This code is distributed in the hope that it will be useful, but WITHOUT
+ * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
+ * FITNESS FOR A PARTICULAR PURPOSE.  See the GNU General Public License
+ * version 2 for more details (a copy is included in the LICENSE file that
+ * accompanied this code).
+ *
+ * You should have received a copy of the GNU General Public License version
+ * 2 along with this work; if not, write to the Free Software Foundation,
+ * Inc., 51 Franklin St, Fifth Floor, Boston, MA 02110-1301 USA.
+ *
+ * Please contact Oracle, 500 Oracle Parkway, Redwood Shores, CA 94065 USA
+ * or visit www.oracle.com if you need additional information or have any
+ * questions.
+ */
+package com.oracle.graal.lir.amd64;
+
+import static com.oracle.graal.api.code.ValueUtil.*;
+import static com.oracle.graal.lir.LIRInstruction.OperandFlag.*;
+
+import com.oracle.graal.api.meta.*;
+import com.oracle.graal.asm.amd64.AMD64Assembler.AMD64RMOp;
+import com.oracle.graal.asm.amd64.AMD64Assembler.OperandSize;
+import com.oracle.graal.asm.amd64.*;
+import com.oracle.graal.lir.*;
+import com.oracle.graal.lir.StandardOp.ImplicitNullCheck;
+import com.oracle.graal.lir.asm.*;
+
+public class AMD64UnaryMemoryOp extends AMD64LIRInstruction implements ImplicitNullCheck {
+    public static final LIRInstructionClass<AMD64UnaryMemoryOp> TYPE = LIRInstructionClass.create(AMD64UnaryMemoryOp.class);
+
+    @Opcode private final AMD64RMOp opcode;
+    private final OperandSize size;
+
+    @Def({REG}) protected AllocatableValue result;
+    @Use({COMPOSITE}) protected AMD64AddressValue input;
+
+    @State protected LIRFrameState state;
+
+    public AMD64UnaryMemoryOp(AMD64RMOp opcode, OperandSize size, AllocatableValue result, AMD64AddressValue input, LIRFrameState state) {
+        super(TYPE);
+        this.opcode = opcode;
+        this.size = size;
+
+        this.result = result;
+        this.input = input;
+
+        this.state = state;
+    }
+
+    @Override
+    public void emitCode(CompilationResultBuilder crb, AMD64MacroAssembler masm) {
+        if (state != null) {
+            crb.recordImplicitException(masm.position(), state);
+        }
+        opcode.emit(masm, size, asRegister(result), input.toAddress());
+    }
+
+    public boolean makeNullCheckFor(Value value, LIRFrameState nullCheckState, int implicitNullCheckLimit) {
+        if (state == null && input.isValidImplicitNullCheckFor(value, implicitNullCheckLimit)) {
+            state = nullCheckState;
+            return true;
+        }
+        return false;
+    }
+}
--- /dev/null	Thu Jan 01 00:00:00 1970 +0000
+++ b/graal/com.oracle.graal.lir.amd64/src/com/oracle/graal/lir/amd64/AMD64UnaryRMOp.java	Mon Mar 16 16:54:10 2015 -0700
@@ -0,0 +1,62 @@
+/*
+ * Copyright (c) 2015, 2015, Oracle and/or its affiliates. All rights reserved.
+ * DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER.
+ *
+ * This code is free software; you can redistribute it and/or modify it
+ * under the terms of the GNU General Public License version 2 only, as
+ * published by the Free Software Foundation.
+ *
+ * This code is distributed in the hope that it will be useful, but WITHOUT
+ * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
+ * FITNESS FOR A PARTICULAR PURPOSE.  See the GNU General Public License
+ * version 2 for more details (a copy is included in the LICENSE file that
+ * accompanied this code).
+ *
+ * You should have received a copy of the GNU General Public License version
+ * 2 along with this work; if not, write to the Free Software Foundation,
+ * Inc., 51 Franklin St, Fifth Floor, Boston, MA 02110-1301 USA.
+ *
+ * Please contact Oracle, 500 Oracle Parkway, Redwood Shores, CA 94065 USA
+ * or visit www.oracle.com if you need additional information or have any
+ * questions.
+ */
+package com.oracle.graal.lir.amd64;
+
+import static com.oracle.graal.api.code.ValueUtil.*;
+import static com.oracle.graal.lir.LIRInstruction.OperandFlag.*;
+
+import com.oracle.graal.api.meta.*;
+import com.oracle.graal.asm.amd64.AMD64Assembler.AMD64RMOp;
+import com.oracle.graal.asm.amd64.AMD64Assembler.OperandSize;
+import com.oracle.graal.asm.amd64.*;
+import com.oracle.graal.lir.*;
+import com.oracle.graal.lir.asm.*;
+
+public class AMD64UnaryRMOp extends AMD64LIRInstruction {
+    public static final LIRInstructionClass<AMD64UnaryRMOp> TYPE = LIRInstructionClass.create(AMD64UnaryRMOp.class);
+
+    @Opcode private final AMD64RMOp opcode;
+    private final OperandSize size;
+
+    @Def({REG}) protected AllocatableValue result;
+    @Use({REG, STACK}) protected AllocatableValue value;
+
+    public AMD64UnaryRMOp(AMD64RMOp opcode, OperandSize size, AllocatableValue result, AllocatableValue value) {
+        super(TYPE);
+        this.opcode = opcode;
+        this.size = size;
+
+        this.result = result;
+        this.value = value;
+    }
+
+    @Override
+    public void emitCode(CompilationResultBuilder crb, AMD64MacroAssembler masm) {
+        if (isRegister(value)) {
+            opcode.emit(masm, size, asRegister(result), asRegister(value));
+        } else {
+            assert isStackSlot(value);
+            opcode.emit(masm, size, asRegister(result), (AMD64Address) crb.asAddress(value));
+        }
+    }
+}
--- a/graal/com.oracle.graal.lir/src/com/oracle/graal/lir/asm/CompilationResultBuilder.java	Mon Mar 16 15:59:57 2015 -0700
+++ b/graal/com.oracle.graal.lir/src/com/oracle/graal/lir/asm/CompilationResultBuilder.java	Mon Mar 16 16:54:10 2015 -0700
@@ -1,5 +1,5 @@
 /*
- * Copyright (c) 2013, 2014, Oracle and/or its affiliates. All rights reserved.
+ * Copyright (c) 2013, 2015, Oracle and/or its affiliates. All rights reserved.
  * DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER.
  *
  * This code is free software; you can redistribute it and/or modify it
@@ -162,7 +162,7 @@
         compilationResult.recordInfopoint(pos, debugInfo, reason);
     }
 
-    public void recordInlineDataInCode(JavaConstant data) {
+    public void recordInlineDataInCode(Constant data) {
         assert data != null;
         int pos = asm.position();
         Debug.log("Inline data in code: pos = %d, data = %s", pos, data);
--- a/graal/com.oracle.graal.lir/src/com/oracle/graal/lir/gen/LIRGenerator.java	Mon Mar 16 15:59:57 2015 -0700
+++ b/graal/com.oracle.graal.lir/src/com/oracle/graal/lir/gen/LIRGenerator.java	Mon Mar 16 16:54:10 2015 -0700
@@ -1,5 +1,5 @@
 /*
- * Copyright (c) 2009, 2014, Oracle and/or its affiliates. All rights reserved.
+ * Copyright (c) 2009, 2015, Oracle and/or its affiliates. All rights reserved.
  * DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER.
  *
  * This code is free software; you can redistribute it and/or modify it
@@ -177,13 +177,14 @@
         return res.getFrameMapBuilder().getRegisterConfig().getReturnRegister((Kind) kind.getPlatformKind()).asValue(kind);
     }
 
-    public void append(LIRInstruction op) {
+    public <I extends LIRInstruction> I append(I op) {
         if (Options.PrintIRWithLIR.getValue() && !TTY.isSuppressed()) {
             TTY.println(op.toStringWithIdPrefix());
             TTY.println();
         }
         assert LIRVerifier.verify(op);
         res.getLIR().getLIRforBlock(currentBlock).add(op);
+        return op;
     }
 
     public boolean hasBlockEnd(AbstractBlockBase<?> block) {
--- a/graal/com.oracle.graal.lir/src/com/oracle/graal/lir/gen/LIRGeneratorTool.java	Mon Mar 16 15:59:57 2015 -0700
+++ b/graal/com.oracle.graal.lir/src/com/oracle/graal/lir/gen/LIRGeneratorTool.java	Mon Mar 16 16:54:10 2015 -0700
@@ -1,5 +1,5 @@
 /*
- * Copyright (c) 2011, 2014, Oracle and/or its affiliates. All rights reserved.
+ * Copyright (c) 2011, 2015, Oracle and/or its affiliates. All rights reserved.
  * DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER.
  *
  * This code is free software; you can redistribute it and/or modify it
@@ -159,7 +159,7 @@
      */
     AllocatableValue resultOperandFor(LIRKind kind);
 
-    void append(LIRInstruction op);
+    <I extends LIRInstruction> I append(I op);
 
     void emitJump(LabelRef label);
 
--- a/graal/com.oracle.graal.nodes/src/com/oracle/graal/nodes/ConstantNode.java	Mon Mar 16 15:59:57 2015 -0700
+++ b/graal/com.oracle.graal.nodes/src/com/oracle/graal/nodes/ConstantNode.java	Mon Mar 16 16:54:10 2015 -0700
@@ -28,7 +28,6 @@
 import com.oracle.graal.api.meta.*;
 import com.oracle.graal.compiler.common.*;
 import com.oracle.graal.compiler.common.type.*;
-import com.oracle.graal.debug.*;
 import com.oracle.graal.graph.*;
 import com.oracle.graal.graph.iterators.*;
 import com.oracle.graal.nodeinfo.*;
@@ -42,7 +41,6 @@
 public final class ConstantNode extends FloatingNode implements LIRLowerable {
 
     public static final NodeClass<ConstantNode> TYPE = NodeClass.create(ConstantNode.class);
-    private static final DebugMetric ConstantNodes = Debug.metric("ConstantNodes");
 
     protected final Constant value;
 
@@ -60,7 +58,6 @@
         super(TYPE, stamp);
         assert stamp != null && isCompatible(value, stamp);
         this.value = value;
-        ConstantNodes.increment();
     }
 
     private static boolean isCompatible(Constant value, Stamp stamp) {
--- a/graal/com.oracle.graal.phases.common/src/com/oracle/graal/phases/common/CanonicalizerPhase.java	Mon Mar 16 15:59:57 2015 -0700
+++ b/graal/com.oracle.graal.phases.common/src/com/oracle/graal/phases/common/CanonicalizerPhase.java	Mon Mar 16 16:54:10 2015 -0700
@@ -24,7 +24,6 @@
 
 import com.oracle.graal.api.meta.*;
 import com.oracle.graal.debug.*;
-import com.oracle.graal.debug.Debug.Scope;
 import com.oracle.graal.graph.*;
 import com.oracle.graal.graph.Graph.Mark;
 import com.oracle.graal.graph.Graph.NodeEventListener;
@@ -254,30 +253,24 @@
             }
             if (nodeClass.isCanonicalizable()) {
                 METRIC_CANONICALIZATION_CONSIDERED_NODES.increment();
-                try (Scope s = Debug.scope("CanonicalizeNode", node)) {
-                    Node canonical;
-                    try (AutoCloseable verify = getCanonicalizeableContractAssertion(node)) {
-                        canonical = ((Canonicalizable) node).canonical(tool);
-                        if (canonical == node && nodeClass.isCommutative()) {
-                            canonical = ((BinaryCommutative<?>) node).maybeCommuteInputs();
-                        }
-                    }
-                    if (performReplacement(node, canonical)) {
-                        return true;
+                Node canonical;
+                try (AutoCloseable verify = getCanonicalizeableContractAssertion(node)) {
+                    canonical = ((Canonicalizable) node).canonical(tool);
+                    if (canonical == node && nodeClass.isCommutative()) {
+                        canonical = ((BinaryCommutative<?>) node).maybeCommuteInputs();
                     }
                 } catch (Throwable e) {
                     throw Debug.handle(e);
                 }
+                if (performReplacement(node, canonical)) {
+                    return true;
+                }
             }
 
             if (nodeClass.isSimplifiable() && simplify) {
                 Debug.log(3, "Canonicalizer: simplifying %s", node);
                 METRIC_SIMPLIFICATION_CONSIDERED_NODES.increment();
-                try (Scope s = Debug.scope("SimplifyNode", node)) {
-                    node.simplify(tool);
-                } catch (Throwable e) {
-                    throw Debug.handle(e);
-                }
+                node.simplify(tool);
                 return node.isDeleted();
             }
             return false;
--- a/graal/com.oracle.graal.phases/src/com/oracle/graal/phases/schedule/SchedulePhase.java	Mon Mar 16 15:59:57 2015 -0700
+++ b/graal/com.oracle.graal.phases/src/com/oracle/graal/phases/schedule/SchedulePhase.java	Mon Mar 16 16:54:10 2015 -0700
@@ -547,8 +547,9 @@
             if (n instanceof FloatingReadNode) {
                 FloatingReadNode floatingReadNode = (FloatingReadNode) n;
                 LocationIdentity locationIdentity = floatingReadNode.getLocationIdentity();
-                if (locationIdentity.isMutable()) {
-                    ValueNode lastAccessLocation = floatingReadNode.getLastLocationAccess().asNode();
+                MemoryNode lastLocationAccess = floatingReadNode.getLastLocationAccess();
+                if (locationIdentity.isMutable() && lastLocationAccess != null) {
+                    ValueNode lastAccessLocation = lastLocationAccess.asNode();
                     if (nodeToBlock.get(lastAccessLocation) == b && lastAccessLocation != beginNode) {
                         // This node's last access location is within this block. Add to watch list
                         // when processing the last access location.
@@ -588,7 +589,7 @@
                 newList.add(n);
             } else {
                 // This node was pulled up.
-                assert !(n instanceof FixedNode);
+                assert !(n instanceof FixedNode) : n;
             }
         }
 
@@ -627,12 +628,16 @@
                         }
                     }
                 } else {
-                    for (Node input : current.inputs()) {
-                        if (current instanceof FrameState && input instanceof StateSplit && ((StateSplit) input).stateAfter() == current) {
-                            // Ignore the cycle.
-                        } else {
-                            stack.push(input);
+                    if (current instanceof FrameState) {
+                        for (Node input : current.inputs()) {
+                            if (input instanceof StateSplit && ((StateSplit) input).stateAfter() == current) {
+                                // Ignore the cycle.
+                            } else {
+                                stack.push(input);
+                            }
                         }
+                    } else {
+                        current.pushInputs(stack);
                     }
                 }
             } else {
--- a/graal/com.oracle.graal.printer/src/com/oracle/graal/printer/BinaryGraphPrinter.java	Mon Mar 16 15:59:57 2015 -0700
+++ b/graal/com.oracle.graal.printer/src/com/oracle/graal/printer/BinaryGraphPrinter.java	Mon Mar 16 16:54:10 2015 -0700
@@ -469,7 +469,7 @@
     }
 
     private void writeBlocks(List<Block> blocks, BlockMap<List<Node>> blockToNodes) throws IOException {
-        if (blocks != null) {
+        if (blocks != null && blockToNodes != null) {
             for (Block block : blocks) {
                 List<Node> nodes = blockToNodes.get(block);
                 if (nodes == null) {
--- a/graal/com.oracle.graal.truffle/src/com/oracle/graal/truffle/OptimizedCallTarget.java	Mon Mar 16 15:59:57 2015 -0700
+++ b/graal/com.oracle.graal.truffle/src/com/oracle/graal/truffle/OptimizedCallTarget.java	Mon Mar 16 16:54:10 2015 -0700
@@ -344,18 +344,22 @@
     }
 
     public void notifyCompilationFailed(Throwable t) {
-        if (!(t instanceof BailoutException) || ((BailoutException) t).isPermanent()) {
+        if (t instanceof BailoutException && !((BailoutException) t).isPermanent()) {
+            /*
+             * Non permanent bailouts are expected cases. A non permanent bailout would be for
+             * example class redefinition during code installation. As opposed to permanent
+             * bailouts, non permanent bailouts will trigger recompilation and are not considered a
+             * failure state.
+             */
+        } else {
             compilationPolicy.recordCompilationFailure(t);
             if (TruffleCompilationExceptionsAreThrown.getValue()) {
                 throw new OptimizationFailedException(t, this);
             }
-        }
-
-        if (t instanceof BailoutException) {
-            // Bailout => move on.
-        } else if (TruffleCompilationExceptionsAreFatal.getValue()) {
-            t.printStackTrace(OUT);
-            System.exit(-1);
+            if (TruffleCompilationExceptionsAreFatal.getValue()) {
+                t.printStackTrace(OUT);
+                System.exit(-1);
+            }
         }
     }
 
--- a/graal/com.oracle.graal.truffle/src/com/oracle/graal/truffle/PartialEvaluator.java	Mon Mar 16 15:59:57 2015 -0700
+++ b/graal/com.oracle.graal.truffle/src/com/oracle/graal/truffle/PartialEvaluator.java	Mon Mar 16 16:54:10 2015 -0700
@@ -262,6 +262,7 @@
         plugins.setInlineInvokePlugin(new InlineInvokePlugin(callTarget.getInlining(), providers.getReplacements()));
         plugins.setLoopExplosionPlugin(new LoopExplosionPlugin());
         TruffleGraphBuilderPlugins.registerInvocationPlugins(providers.getMetaAccess(), newConfig.getPlugins().getInvocationPlugins());
+
         new GraphBuilderPhase.Instance(providers.getMetaAccess(), providers.getStampProvider(), this.snippetReflection, providers.getConstantReflection(), newConfig,
                         TruffleCompilerImpl.Optimizations, null).apply(graph);
         Debug.dump(graph, "After FastPE");
--- a/mxtool/mx.py	Mon Mar 16 15:59:57 2015 -0700
+++ b/mxtool/mx.py	Mon Mar 16 16:54:10 2015 -0700
@@ -2823,13 +2823,16 @@
         else:
             # Using just SC_ARG_MAX without extra downwards adjustment
             # results in "[Errno 7] Argument list too long" on MacOS.
-            syslimit = os.sysconf('SC_ARG_MAX') - 20000
+            commandLinePrefixAllowance -= 20000
+            syslimit = os.sysconf('SC_ARG_MAX')
             if syslimit == -1:
                 syslimit = 262144 # we could use sys.maxint but we prefer a more robust smaller value
             limit = syslimit - commandLinePrefixAllowance
+            assert limit > 0
     for i in range(len(files)):
         path = pathFunction(files[i])
         size = len(path) + 1
+        assert size < limit
         if chunkSize + size < limit:
             chunkSize += size
         else: