changeset 21708:6df25b1418be

moved com.oracle.asm.** to jvmci-util.jar (JBS:GRAAL-53)
author Doug Simon <doug.simon@oracle.com>
date Wed, 03 Jun 2015 18:06:44 +0200
parents e0f311284930
children 3c17c0c41a6b
files graal/com.oracle.graal.asm.amd64.test/src/com/oracle/graal/asm/amd64/test/BitOpsTest.java graal/com.oracle.graal.asm.amd64.test/src/com/oracle/graal/asm/amd64/test/IncrementDecrementMacroTest.java graal/com.oracle.graal.asm.amd64.test/src/com/oracle/graal/asm/amd64/test/SimpleAssemblerTest.java graal/com.oracle.graal.asm.amd64/src/com/oracle/graal/asm/amd64/AMD64Address.java graal/com.oracle.graal.asm.amd64/src/com/oracle/graal/asm/amd64/AMD64AsmOptions.java graal/com.oracle.graal.asm.amd64/src/com/oracle/graal/asm/amd64/AMD64Assembler.java graal/com.oracle.graal.asm.amd64/src/com/oracle/graal/asm/amd64/AMD64MacroAssembler.java graal/com.oracle.graal.asm.sparc/src/com/oracle/graal/asm/sparc/SPARCAddress.java graal/com.oracle.graal.asm.sparc/src/com/oracle/graal/asm/sparc/SPARCAssembler.java graal/com.oracle.graal.asm.sparc/src/com/oracle/graal/asm/sparc/SPARCInstructionCounter.java graal/com.oracle.graal.asm.sparc/src/com/oracle/graal/asm/sparc/SPARCMacroAssembler.java graal/com.oracle.graal.asm/overview.html graal/com.oracle.graal.asm/src/com/oracle/graal/asm/AsmOptions.java graal/com.oracle.graal.asm/src/com/oracle/graal/asm/Assembler.java graal/com.oracle.graal.asm/src/com/oracle/graal/asm/Buffer.java graal/com.oracle.graal.asm/src/com/oracle/graal/asm/Label.java graal/com.oracle.graal.asm/src/com/oracle/graal/asm/NumUtil.java graal/com.oracle.graal.compiler.amd64/src/com/oracle/graal/compiler/amd64/AMD64LIRGenerator.java graal/com.oracle.graal.compiler.amd64/src/com/oracle/graal/compiler/amd64/AMD64NodeLIRBuilder.java graal/com.oracle.graal.compiler.sparc/src/com/oracle/graal/compiler/sparc/SPARCLIRGenerator.java graal/com.oracle.graal.compiler/src/com/oracle/graal/compiler/target/Backend.java graal/com.oracle.graal.hotspot.amd64.test/src/com/oracle/graal/hotspot/amd64/test/AMD64HotSpotFrameOmissionTest.java graal/com.oracle.graal.hotspot.amd64.test/src/com/oracle/graal/hotspot/amd64/test/DataPatchInConstantsTest.java graal/com.oracle.graal.hotspot.amd64/src/com/oracle/graal/hotspot/amd64/AMD64DeoptimizeOp.java graal/com.oracle.graal.hotspot.amd64/src/com/oracle/graal/hotspot/amd64/AMD64HotSpotBackend.java graal/com.oracle.graal.hotspot.amd64/src/com/oracle/graal/hotspot/amd64/AMD64HotSpotBinaryConsumer.java graal/com.oracle.graal.hotspot.amd64/src/com/oracle/graal/hotspot/amd64/AMD64HotSpotCRuntimeCallEpilogueOp.java graal/com.oracle.graal.hotspot.amd64/src/com/oracle/graal/hotspot/amd64/AMD64HotSpotCRuntimeCallPrologueOp.java graal/com.oracle.graal.hotspot.amd64/src/com/oracle/graal/hotspot/amd64/AMD64HotSpotCardTableAddressOp.java graal/com.oracle.graal.hotspot.amd64/src/com/oracle/graal/hotspot/amd64/AMD64HotSpotCardTableShiftOp.java graal/com.oracle.graal.hotspot.amd64/src/com/oracle/graal/hotspot/amd64/AMD64HotSpotCounterOp.java graal/com.oracle.graal.hotspot.amd64/src/com/oracle/graal/hotspot/amd64/AMD64HotSpotDeoptimizeCallerOp.java graal/com.oracle.graal.hotspot.amd64/src/com/oracle/graal/hotspot/amd64/AMD64HotSpotEnterUnpackFramesStackFrameOp.java graal/com.oracle.graal.hotspot.amd64/src/com/oracle/graal/hotspot/amd64/AMD64HotSpotEpilogueOp.java graal/com.oracle.graal.hotspot.amd64/src/com/oracle/graal/hotspot/amd64/AMD64HotSpotJumpToExceptionHandlerInCallerOp.java graal/com.oracle.graal.hotspot.amd64/src/com/oracle/graal/hotspot/amd64/AMD64HotSpotLIRGenerator.java graal/com.oracle.graal.hotspot.amd64/src/com/oracle/graal/hotspot/amd64/AMD64HotSpotLeaveCurrentStackFrameOp.java graal/com.oracle.graal.hotspot.amd64/src/com/oracle/graal/hotspot/amd64/AMD64HotSpotLeaveDeoptimizedStackFrameOp.java graal/com.oracle.graal.hotspot.amd64/src/com/oracle/graal/hotspot/amd64/AMD64HotSpotLeaveUnpackFramesStackFrameOp.java graal/com.oracle.graal.hotspot.amd64/src/com/oracle/graal/hotspot/amd64/AMD64HotSpotMove.java graal/com.oracle.graal.hotspot.amd64/src/com/oracle/graal/hotspot/amd64/AMD64HotSpotNodeLIRBuilder.java graal/com.oracle.graal.hotspot.amd64/src/com/oracle/graal/hotspot/amd64/AMD64HotSpotPatchReturnAddressOp.java graal/com.oracle.graal.hotspot.amd64/src/com/oracle/graal/hotspot/amd64/AMD64HotSpotPushInterpreterFrameOp.java graal/com.oracle.graal.hotspot.amd64/src/com/oracle/graal/hotspot/amd64/AMD64HotSpotReturnOp.java graal/com.oracle.graal.hotspot.amd64/src/com/oracle/graal/hotspot/amd64/AMD64HotSpotSafepointOp.java graal/com.oracle.graal.hotspot.amd64/src/com/oracle/graal/hotspot/amd64/AMD64HotSpotUnwindOp.java graal/com.oracle.graal.hotspot.amd64/src/com/oracle/graal/hotspot/amd64/AMD64HotspotDirectStaticCallOp.java graal/com.oracle.graal.hotspot.amd64/src/com/oracle/graal/hotspot/amd64/AMD64HotspotDirectVirtualCallOp.java graal/com.oracle.graal.hotspot.amd64/src/com/oracle/graal/hotspot/amd64/AMD64IndirectCallOp.java graal/com.oracle.graal.hotspot.amd64/src/com/oracle/graal/hotspot/amd64/AMD64PrefetchOp.java graal/com.oracle.graal.hotspot.amd64/src/com/oracle/graal/hotspot/amd64/AMD64TailcallOp.java graal/com.oracle.graal.hotspot.sparc/src/com/oracle/graal/hotspot/sparc/SPARCDeoptimizeOp.java graal/com.oracle.graal.hotspot.sparc/src/com/oracle/graal/hotspot/sparc/SPARCHotSpotBackend.java graal/com.oracle.graal.hotspot.sparc/src/com/oracle/graal/hotspot/sparc/SPARCHotSpotCRuntimeCallEpilogueOp.java graal/com.oracle.graal.hotspot.sparc/src/com/oracle/graal/hotspot/sparc/SPARCHotSpotCRuntimeCallPrologueOp.java graal/com.oracle.graal.hotspot.sparc/src/com/oracle/graal/hotspot/sparc/SPARCHotSpotCounterOp.java graal/com.oracle.graal.hotspot.sparc/src/com/oracle/graal/hotspot/sparc/SPARCHotSpotDeoptimizeCallerOp.java graal/com.oracle.graal.hotspot.sparc/src/com/oracle/graal/hotspot/sparc/SPARCHotSpotEnterUnpackFramesStackFrameOp.java graal/com.oracle.graal.hotspot.sparc/src/com/oracle/graal/hotspot/sparc/SPARCHotSpotJumpToExceptionHandlerInCallerOp.java graal/com.oracle.graal.hotspot.sparc/src/com/oracle/graal/hotspot/sparc/SPARCHotSpotJumpToExceptionHandlerOp.java graal/com.oracle.graal.hotspot.sparc/src/com/oracle/graal/hotspot/sparc/SPARCHotSpotLeaveCurrentStackFrameOp.java graal/com.oracle.graal.hotspot.sparc/src/com/oracle/graal/hotspot/sparc/SPARCHotSpotLeaveDeoptimizedStackFrameOp.java graal/com.oracle.graal.hotspot.sparc/src/com/oracle/graal/hotspot/sparc/SPARCHotSpotLeaveUnpackFramesStackFrameOp.java graal/com.oracle.graal.hotspot.sparc/src/com/oracle/graal/hotspot/sparc/SPARCHotSpotMove.java graal/com.oracle.graal.hotspot.sparc/src/com/oracle/graal/hotspot/sparc/SPARCHotSpotPatchReturnAddressOp.java graal/com.oracle.graal.hotspot.sparc/src/com/oracle/graal/hotspot/sparc/SPARCHotSpotPushInterpreterFrameOp.java graal/com.oracle.graal.hotspot.sparc/src/com/oracle/graal/hotspot/sparc/SPARCHotSpotReturnOp.java graal/com.oracle.graal.hotspot.sparc/src/com/oracle/graal/hotspot/sparc/SPARCHotSpotSafepointOp.java graal/com.oracle.graal.hotspot.sparc/src/com/oracle/graal/hotspot/sparc/SPARCHotSpotUnwindOp.java graal/com.oracle.graal.hotspot.sparc/src/com/oracle/graal/hotspot/sparc/SPARCHotspotDirectStaticCallOp.java graal/com.oracle.graal.hotspot.sparc/src/com/oracle/graal/hotspot/sparc/SPARCHotspotDirectVirtualCallOp.java graal/com.oracle.graal.hotspot.sparc/src/com/oracle/graal/hotspot/sparc/SPARCIndirectCallOp.java graal/com.oracle.graal.hotspot.sparc/src/com/oracle/graal/hotspot/sparc/SPARCPrefetchOp.java graal/com.oracle.graal.hotspot/src/com/oracle/graal/hotspot/HotSpotCounterOp.java graal/com.oracle.graal.hotspot/src/com/oracle/graal/hotspot/HotSpotInstructionProfiling.java graal/com.oracle.graal.hotspot/src/com/oracle/graal/hotspot/nodes/DimensionsNode.java graal/com.oracle.graal.hotspot/src/com/oracle/graal/hotspot/replacements/arraycopy/UnsafeArrayCopySnippets.java graal/com.oracle.graal.hotspot/src/com/oracle/graal/hotspot/stubs/DeoptimizationStub.java graal/com.oracle.graal.hotspot/src/com/oracle/graal/hotspot/stubs/UncommonTrapStub.java graal/com.oracle.graal.lir.amd64/src/com/oracle/graal/lir/amd64/AMD64AddressValue.java graal/com.oracle.graal.lir.amd64/src/com/oracle/graal/lir/amd64/AMD64Arithmetic.java graal/com.oracle.graal.lir.amd64/src/com/oracle/graal/lir/amd64/AMD64ArrayEqualsOp.java graal/com.oracle.graal.lir.amd64/src/com/oracle/graal/lir/amd64/AMD64Binary.java graal/com.oracle.graal.lir.amd64/src/com/oracle/graal/lir/amd64/AMD64BinaryConsumer.java graal/com.oracle.graal.lir.amd64/src/com/oracle/graal/lir/amd64/AMD64BreakpointOp.java graal/com.oracle.graal.lir.amd64/src/com/oracle/graal/lir/amd64/AMD64ByteSwapOp.java graal/com.oracle.graal.lir.amd64/src/com/oracle/graal/lir/amd64/AMD64CCall.java graal/com.oracle.graal.lir.amd64/src/com/oracle/graal/lir/amd64/AMD64Call.java graal/com.oracle.graal.lir.amd64/src/com/oracle/graal/lir/amd64/AMD64ClearRegisterOp.java graal/com.oracle.graal.lir.amd64/src/com/oracle/graal/lir/amd64/AMD64ControlFlow.java graal/com.oracle.graal.lir.amd64/src/com/oracle/graal/lir/amd64/AMD64FrameMap.java graal/com.oracle.graal.lir.amd64/src/com/oracle/graal/lir/amd64/AMD64LIRInstruction.java graal/com.oracle.graal.lir.amd64/src/com/oracle/graal/lir/amd64/AMD64MathIntrinsicOp.java graal/com.oracle.graal.lir.amd64/src/com/oracle/graal/lir/amd64/AMD64Move.java graal/com.oracle.graal.lir.amd64/src/com/oracle/graal/lir/amd64/AMD64MulDivOp.java graal/com.oracle.graal.lir.amd64/src/com/oracle/graal/lir/amd64/AMD64RestoreRegistersOp.java graal/com.oracle.graal.lir.amd64/src/com/oracle/graal/lir/amd64/AMD64SaveRegistersOp.java graal/com.oracle.graal.lir.amd64/src/com/oracle/graal/lir/amd64/AMD64ShiftOp.java graal/com.oracle.graal.lir.amd64/src/com/oracle/graal/lir/amd64/AMD64SignExtendOp.java graal/com.oracle.graal.lir.amd64/src/com/oracle/graal/lir/amd64/AMD64Unary.java graal/com.oracle.graal.lir.amd64/src/com/oracle/graal/lir/amd64/AMD64ZapRegistersOp.java graal/com.oracle.graal.lir.sparc/src/com/oracle/graal/lir/sparc/SPARCAddressValue.java graal/com.oracle.graal.lir.sparc/src/com/oracle/graal/lir/sparc/SPARCArithmetic.java graal/com.oracle.graal.lir.sparc/src/com/oracle/graal/lir/sparc/SPARCArrayEqualsOp.java graal/com.oracle.graal.lir.sparc/src/com/oracle/graal/lir/sparc/SPARCBitManipulationOp.java graal/com.oracle.graal.lir.sparc/src/com/oracle/graal/lir/sparc/SPARCBreakpointOp.java graal/com.oracle.graal.lir.sparc/src/com/oracle/graal/lir/sparc/SPARCByteSwapOp.java graal/com.oracle.graal.lir.sparc/src/com/oracle/graal/lir/sparc/SPARCCall.java graal/com.oracle.graal.lir.sparc/src/com/oracle/graal/lir/sparc/SPARCCompare.java graal/com.oracle.graal.lir.sparc/src/com/oracle/graal/lir/sparc/SPARCControlFlow.java graal/com.oracle.graal.lir.sparc/src/com/oracle/graal/lir/sparc/SPARCDelayedControlTransfer.java graal/com.oracle.graal.lir.sparc/src/com/oracle/graal/lir/sparc/SPARCFrameMap.java graal/com.oracle.graal.lir.sparc/src/com/oracle/graal/lir/sparc/SPARCJumpOp.java graal/com.oracle.graal.lir.sparc/src/com/oracle/graal/lir/sparc/SPARCLIRInstruction.java graal/com.oracle.graal.lir.sparc/src/com/oracle/graal/lir/sparc/SPARCMathIntrinsicOp.java graal/com.oracle.graal.lir.sparc/src/com/oracle/graal/lir/sparc/SPARCMove.java graal/com.oracle.graal.lir.sparc/src/com/oracle/graal/lir/sparc/SPARCSaveRegistersOp.java graal/com.oracle.graal.lir.sparc/src/com/oracle/graal/lir/sparc/SPARCTailDelayedLIRInstruction.java graal/com.oracle.graal.lir.sparc/src/com/oracle/graal/lir/sparc/SPARCTestOp.java graal/com.oracle.graal.lir/src/com/oracle/graal/lir/LabelRef.java graal/com.oracle.graal.lir/src/com/oracle/graal/lir/StandardOp.java graal/com.oracle.graal.lir/src/com/oracle/graal/lir/SwitchStrategy.java graal/com.oracle.graal.lir/src/com/oracle/graal/lir/asm/CompilationResultBuilder.java graal/com.oracle.graal.lir/src/com/oracle/graal/lir/asm/CompilationResultBuilderFactory.java graal/com.oracle.graal.lir/src/com/oracle/graal/lir/framemap/FrameMap.java graal/com.oracle.graal.lir/src/com/oracle/graal/lir/gen/LIRGenerator.java graal/com.oracle.graal.printer/src/com/oracle/graal/printer/HexCodeFile.java graal/com.oracle.graal.replacements/src/com/oracle/graal/replacements/DefaultJavaLoweringProvider.java graal/com.oracle.graal.truffle.hotspot.amd64/src/com/oracle/graal/truffle/hotspot/amd64/AMD64OptimizedCallTargetInstrumentationFactory.java graal/com.oracle.graal.truffle.hotspot.sparc/src/com/oracle/graal/truffle/hotspot/sparc/SPARCOptimizedCallTargetInstumentationFactory.java graal/com.oracle.graal.truffle.hotspot/src/com/oracle/graal/truffle/hotspot/OptimizedCallTargetInstrumentation.java graal/com.oracle.jvmci.asm.amd64/src/com/oracle/jvmci/asm/amd64/AMD64Address.java graal/com.oracle.jvmci.asm.amd64/src/com/oracle/jvmci/asm/amd64/AMD64AsmOptions.java graal/com.oracle.jvmci.asm.amd64/src/com/oracle/jvmci/asm/amd64/AMD64Assembler.java graal/com.oracle.jvmci.asm.amd64/src/com/oracle/jvmci/asm/amd64/AMD64MacroAssembler.java graal/com.oracle.jvmci.asm.sparc/src/com/oracle/jvmci/asm/sparc/SPARCAddress.java graal/com.oracle.jvmci.asm.sparc/src/com/oracle/jvmci/asm/sparc/SPARCAssembler.java graal/com.oracle.jvmci.asm.sparc/src/com/oracle/jvmci/asm/sparc/SPARCInstructionCounter.java graal/com.oracle.jvmci.asm.sparc/src/com/oracle/jvmci/asm/sparc/SPARCMacroAssembler.java graal/com.oracle.jvmci.asm/overview.html graal/com.oracle.jvmci.asm/src/com/oracle/jvmci/asm/AsmOptions.java graal/com.oracle.jvmci.asm/src/com/oracle/jvmci/asm/Assembler.java graal/com.oracle.jvmci.asm/src/com/oracle/jvmci/asm/Buffer.java graal/com.oracle.jvmci.asm/src/com/oracle/jvmci/asm/Label.java graal/com.oracle.jvmci.asm/src/com/oracle/jvmci/asm/NumUtil.java make/defs.make mx/mx_graal.py mx/suite.py
diffstat 147 files changed, 6559 insertions(+), 6587 deletions(-) [+]
line wrap: on
line diff
--- a/graal/com.oracle.graal.asm.amd64.test/src/com/oracle/graal/asm/amd64/test/BitOpsTest.java	Wed Jun 03 17:12:05 2015 +0200
+++ b/graal/com.oracle.graal.asm.amd64.test/src/com/oracle/graal/asm/amd64/test/BitOpsTest.java	Wed Jun 03 18:06:44 2015 +0200
@@ -23,8 +23,10 @@
 
 package com.oracle.graal.asm.amd64.test;
 
+import com.oracle.graal.asm.test.*;
 import com.oracle.jvmci.amd64.*;
 import com.oracle.jvmci.amd64.AMD64.*;
+import com.oracle.jvmci.asm.amd64.*;
 import com.oracle.jvmci.code.RegisterConfig;
 import com.oracle.jvmci.code.TargetDescription;
 import com.oracle.jvmci.code.Register;
@@ -32,9 +34,9 @@
 import com.oracle.jvmci.code.CompilationResult;
 import com.oracle.jvmci.meta.Kind;
 
+import static com.oracle.jvmci.asm.amd64.AMD64Assembler.AMD64RMOp.*;
+import static com.oracle.jvmci.asm.amd64.AMD64Assembler.OperandSize.*;
 import static com.oracle.jvmci.code.ValueUtil.*;
-import static com.oracle.graal.asm.amd64.AMD64Assembler.AMD64RMOp.*;
-import static com.oracle.graal.asm.amd64.AMD64Assembler.OperandSize.*;
 import static com.oracle.jvmci.common.UnsafeAccess.*;
 import static org.junit.Assume.*;
 
@@ -43,9 +45,6 @@
 
 import org.junit.*;
 
-import com.oracle.graal.asm.amd64.*;
-import com.oracle.graal.asm.test.*;
-
 public class BitOpsTest extends AssemblerTest {
     private static boolean lzcntSupported;
     private static boolean tzcntSupported;
--- a/graal/com.oracle.graal.asm.amd64.test/src/com/oracle/graal/asm/amd64/test/IncrementDecrementMacroTest.java	Wed Jun 03 17:12:05 2015 +0200
+++ b/graal/com.oracle.graal.asm.amd64.test/src/com/oracle/graal/asm/amd64/test/IncrementDecrementMacroTest.java	Wed Jun 03 18:06:44 2015 +0200
@@ -22,7 +22,9 @@
  */
 package com.oracle.graal.asm.amd64.test;
 
+import com.oracle.graal.asm.test.*;
 import com.oracle.jvmci.amd64.*;
+import com.oracle.jvmci.asm.amd64.*;
 import com.oracle.jvmci.code.RegisterConfig;
 import com.oracle.jvmci.code.TargetDescription;
 import com.oracle.jvmci.code.Register;
@@ -38,9 +40,6 @@
 
 import org.junit.*;
 
-import com.oracle.graal.asm.amd64.*;
-import com.oracle.graal.asm.test.*;
-
 public class IncrementDecrementMacroTest extends AssemblerTest {
 
     @Before
--- a/graal/com.oracle.graal.asm.amd64.test/src/com/oracle/graal/asm/amd64/test/SimpleAssemblerTest.java	Wed Jun 03 17:12:05 2015 +0200
+++ b/graal/com.oracle.graal.asm.amd64.test/src/com/oracle/graal/asm/amd64/test/SimpleAssemblerTest.java	Wed Jun 03 18:06:44 2015 +0200
@@ -22,7 +22,9 @@
  */
 package com.oracle.graal.asm.amd64.test;
 
+import com.oracle.graal.asm.test.*;
 import com.oracle.jvmci.amd64.*;
+import com.oracle.jvmci.asm.amd64.*;
 import com.oracle.jvmci.code.RegisterConfig;
 import com.oracle.jvmci.code.TargetDescription;
 import com.oracle.jvmci.code.Register;
@@ -40,8 +42,6 @@
 import com.oracle.jvmci.code.CompilationResult.DataSectionReference;
 import com.oracle.jvmci.code.DataSection.Data;
 import com.oracle.jvmci.code.DataSection.DataBuilder;
-import com.oracle.graal.asm.amd64.*;
-import com.oracle.graal.asm.test.*;
 
 public class SimpleAssemblerTest extends AssemblerTest {
 
--- a/graal/com.oracle.graal.asm.amd64/src/com/oracle/graal/asm/amd64/AMD64Address.java	Wed Jun 03 17:12:05 2015 +0200
+++ /dev/null	Thu Jan 01 00:00:00 1970 +0000
@@ -1,169 +0,0 @@
-/*
- * Copyright (c) 2010, 2013, Oracle and/or its affiliates. All rights reserved.
- * DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER.
- *
- * This code is free software; you can redistribute it and/or modify it
- * under the terms of the GNU General Public License version 2 only, as
- * published by the Free Software Foundation.
- *
- * This code is distributed in the hope that it will be useful, but WITHOUT
- * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
- * FITNESS FOR A PARTICULAR PURPOSE.  See the GNU General Public License
- * version 2 for more details (a copy is included in the LICENSE file that
- * accompanied this code).
- *
- * You should have received a copy of the GNU General Public License version
- * 2 along with this work; if not, write to the Free Software Foundation,
- * Inc., 51 Franklin St, Fifth Floor, Boston, MA 02110-1301 USA.
- *
- * Please contact Oracle, 500 Oracle Parkway, Redwood Shores, CA 94065 USA
- * or visit www.oracle.com if you need additional information or have any
- * questions.
- */
-package com.oracle.graal.asm.amd64;
-
-import com.oracle.jvmci.code.Register;
-import com.oracle.jvmci.code.AbstractAddress;
-
-/**
- * Represents an address in target machine memory, specified via some combination of a base
- * register, an index register, a displacement and a scale. Note that the base and index registers
- * may be a variable that will get a register assigned later by the register allocator.
- */
-public final class AMD64Address extends AbstractAddress {
-
-    private final Register base;
-    private final Register index;
-    private final Scale scale;
-    private final int displacement;
-
-    /**
-     * Creates an {@link AMD64Address} with given base register, no scaling and no displacement.
-     *
-     * @param base the base register
-     */
-    public AMD64Address(Register base) {
-        this(base, Register.None, Scale.Times1, 0);
-    }
-
-    /**
-     * Creates an {@link AMD64Address} with given base register, no scaling and a given
-     * displacement.
-     *
-     * @param base the base register
-     * @param displacement the displacement
-     */
-    public AMD64Address(Register base, int displacement) {
-        this(base, Register.None, Scale.Times1, displacement);
-    }
-
-    /**
-     * Creates an {@link AMD64Address} with given base and index registers, scaling and
-     * displacement. This is the most general constructor.
-     *
-     * @param base the base register
-     * @param index the index register
-     * @param scale the scaling factor
-     * @param displacement the displacement
-     */
-    public AMD64Address(Register base, Register index, Scale scale, int displacement) {
-        this.base = base;
-        this.index = index;
-        this.scale = scale;
-        this.displacement = displacement;
-
-        assert scale != null;
-    }
-
-    /**
-     * A scaling factor used in the SIB addressing mode.
-     */
-    public enum Scale {
-        Times1(1, 0),
-        Times2(2, 1),
-        Times4(4, 2),
-        Times8(8, 3);
-
-        private Scale(int value, int log2) {
-            this.value = value;
-            this.log2 = log2;
-        }
-
-        /**
-         * The value (or multiplier) of this scale.
-         */
-        public final int value;
-
-        /**
-         * The {@linkplain #value value} of this scale log 2.
-         */
-        public final int log2;
-
-        public static Scale fromInt(int scale) {
-            switch (scale) {
-                case 1:
-                    return Times1;
-                case 2:
-                    return Times2;
-                case 4:
-                    return Times4;
-                case 8:
-                    return Times8;
-                default:
-                    return null;
-            }
-        }
-    }
-
-    @Override
-    public String toString() {
-        StringBuilder s = new StringBuilder();
-        s.append("[");
-        String sep = "";
-        if (!getBase().equals(Register.None)) {
-            s.append(getBase());
-            sep = " + ";
-        }
-        if (!getIndex().equals(Register.None)) {
-            s.append(sep).append(getIndex()).append(" * ").append(getScale().value);
-            sep = " + ";
-        }
-        if (getDisplacement() < 0) {
-            s.append(" - ").append(-getDisplacement());
-        } else if (getDisplacement() > 0) {
-            s.append(sep).append(getDisplacement());
-        }
-        s.append("]");
-        return s.toString();
-    }
-
-    /**
-     * @return Base register that defines the start of the address computation. If not present, is
-     *         denoted by {@link Register#None}.
-     */
-    public Register getBase() {
-        return base;
-    }
-
-    /**
-     * @return Index register, the value of which (possibly scaled by {@link #getScale}) is added to
-     *         {@link #getBase}. If not present, is denoted by {@link Register#None}.
-     */
-    public Register getIndex() {
-        return index;
-    }
-
-    /**
-     * @return Scaling factor for indexing, dependent on target operand size.
-     */
-    public Scale getScale() {
-        return scale;
-    }
-
-    /**
-     * @return Optional additive displacement.
-     */
-    public int getDisplacement() {
-        return displacement;
-    }
-}
--- a/graal/com.oracle.graal.asm.amd64/src/com/oracle/graal/asm/amd64/AMD64AsmOptions.java	Wed Jun 03 17:12:05 2015 +0200
+++ /dev/null	Thu Jan 01 00:00:00 1970 +0000
@@ -1,31 +0,0 @@
-/*
- * Copyright (c) 2012, Oracle and/or its affiliates. All rights reserved.
- * DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER.
- *
- * This code is free software; you can redistribute it and/or modify it
- * under the terms of the GNU General Public License version 2 only, as
- * published by the Free Software Foundation.
- *
- * This code is distributed in the hope that it will be useful, but WITHOUT
- * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
- * FITNESS FOR A PARTICULAR PURPOSE.  See the GNU General Public License
- * version 2 for more details (a copy is included in the LICENSE file that
- * accompanied this code).
- *
- * You should have received a copy of the GNU General Public License version
- * 2 along with this work; if not, write to the Free Software Foundation,
- * Inc., 51 Franklin St, Fifth Floor, Boston, MA 02110-1301 USA.
- *
- * Please contact Oracle, 500 Oracle Parkway, Redwood Shores, CA 94065 USA
- * or visit www.oracle.com if you need additional information or have any
- * questions.
- */
-package com.oracle.graal.asm.amd64;
-
-public class AMD64AsmOptions {
-    public static final boolean UseNormalNop = false;
-    public static final boolean UseAddressNop = true;
-    public static final boolean UseIncDec = true;
-    public static final boolean UseXmmLoadAndClearUpper = true;
-    public static final boolean UseXmmRegToRegMoveAll = true;
-}
--- a/graal/com.oracle.graal.asm.amd64/src/com/oracle/graal/asm/amd64/AMD64Assembler.java	Wed Jun 03 17:12:05 2015 +0200
+++ /dev/null	Thu Jan 01 00:00:00 1970 +0000
@@ -1,2445 +0,0 @@
-/*
- * Copyright (c) 2009, 2015, Oracle and/or its affiliates. All rights reserved.
- * DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER.
- *
- * This code is free software; you can redistribute it and/or modify it
- * under the terms of the GNU General Public License version 2 only, as
- * published by the Free Software Foundation.
- *
- * This code is distributed in the hope that it will be useful, but WITHOUT
- * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
- * FITNESS FOR A PARTICULAR PURPOSE.  See the GNU General Public License
- * version 2 for more details (a copy is included in the LICENSE file that
- * accompanied this code).
- *
- * You should have received a copy of the GNU General Public License version
- * 2 along with this work; if not, write to the Free Software Foundation,
- * Inc., 51 Franklin St, Fifth Floor, Boston, MA 02110-1301 USA.
- *
- * Please contact Oracle, 500 Oracle Parkway, Redwood Shores, CA 94065 USA
- * or visit www.oracle.com if you need additional information or have any
- * questions.
- */
-package com.oracle.graal.asm.amd64;
-
-import com.oracle.jvmci.amd64.*;
-import com.oracle.jvmci.amd64.AMD64.*;
-import com.oracle.jvmci.code.Register;
-import com.oracle.jvmci.code.TargetDescription;
-import com.oracle.jvmci.code.RegisterConfig;
-
-import static com.oracle.jvmci.amd64.AMD64.*;
-import static com.oracle.jvmci.code.MemoryBarriers.*;
-import static com.oracle.graal.asm.NumUtil.*;
-import static com.oracle.graal.asm.amd64.AMD64AsmOptions.*;
-import static com.oracle.graal.asm.amd64.AMD64Assembler.AMD64BinaryArithmetic.*;
-import static com.oracle.graal.asm.amd64.AMD64Assembler.AMD64MOp.*;
-import static com.oracle.graal.asm.amd64.AMD64Assembler.OperandSize.*;
-
-import com.oracle.jvmci.code.Register.RegisterCategory;
-import com.oracle.graal.asm.*;
-
-/**
- * This class implements an assembler that can encode most X86 instructions.
- */
-public class AMD64Assembler extends Assembler {
-
-    private static final int MinEncodingNeedsRex = 8;
-
-    /**
-     * A sentinel value used as a place holder in an instruction stream for an address that will be
-     * patched.
-     */
-    private static final AMD64Address Placeholder = new AMD64Address(rip);
-
-    /**
-     * The x86 condition codes used for conditional jumps/moves.
-     */
-    public enum ConditionFlag {
-        Zero(0x4, "|zero|"),
-        NotZero(0x5, "|nzero|"),
-        Equal(0x4, "="),
-        NotEqual(0x5, "!="),
-        Less(0xc, "<"),
-        LessEqual(0xe, "<="),
-        Greater(0xf, ">"),
-        GreaterEqual(0xd, ">="),
-        Below(0x2, "|<|"),
-        BelowEqual(0x6, "|<=|"),
-        Above(0x7, "|>|"),
-        AboveEqual(0x3, "|>=|"),
-        Overflow(0x0, "|of|"),
-        NoOverflow(0x1, "|nof|"),
-        CarrySet(0x2, "|carry|"),
-        CarryClear(0x3, "|ncarry|"),
-        Negative(0x8, "|neg|"),
-        Positive(0x9, "|pos|"),
-        Parity(0xa, "|par|"),
-        NoParity(0xb, "|npar|");
-
-        private final int value;
-        private final String operator;
-
-        private ConditionFlag(int value, String operator) {
-            this.value = value;
-            this.operator = operator;
-        }
-
-        public ConditionFlag negate() {
-            switch (this) {
-                case Zero:
-                    return NotZero;
-                case NotZero:
-                    return Zero;
-                case Equal:
-                    return NotEqual;
-                case NotEqual:
-                    return Equal;
-                case Less:
-                    return GreaterEqual;
-                case LessEqual:
-                    return Greater;
-                case Greater:
-                    return LessEqual;
-                case GreaterEqual:
-                    return Less;
-                case Below:
-                    return AboveEqual;
-                case BelowEqual:
-                    return Above;
-                case Above:
-                    return BelowEqual;
-                case AboveEqual:
-                    return Below;
-                case Overflow:
-                    return NoOverflow;
-                case NoOverflow:
-                    return Overflow;
-                case CarrySet:
-                    return CarryClear;
-                case CarryClear:
-                    return CarrySet;
-                case Negative:
-                    return Positive;
-                case Positive:
-                    return Negative;
-                case Parity:
-                    return NoParity;
-                case NoParity:
-                    return Parity;
-            }
-            throw new IllegalArgumentException();
-        }
-
-        public int getValue() {
-            return value;
-        }
-
-        @Override
-        public String toString() {
-            return operator;
-        }
-    }
-
-    /**
-     * Constants for X86 prefix bytes.
-     */
-    private static class Prefix {
-
-        private static final int REX = 0x40;
-        private static final int REXB = 0x41;
-        private static final int REXX = 0x42;
-        private static final int REXXB = 0x43;
-        private static final int REXR = 0x44;
-        private static final int REXRB = 0x45;
-        private static final int REXRX = 0x46;
-        private static final int REXRXB = 0x47;
-        private static final int REXW = 0x48;
-        private static final int REXWB = 0x49;
-        private static final int REXWX = 0x4A;
-        private static final int REXWXB = 0x4B;
-        private static final int REXWR = 0x4C;
-        private static final int REXWRB = 0x4D;
-        private static final int REXWRX = 0x4E;
-        private static final int REXWRXB = 0x4F;
-    }
-
-    /**
-     * The x86 operand sizes.
-     */
-    public static enum OperandSize {
-        BYTE(1) {
-            @Override
-            protected void emitImmediate(AMD64Assembler asm, int imm) {
-                assert imm == (byte) imm;
-                asm.emitByte(imm);
-            }
-        },
-
-        WORD(2, 0x66) {
-            @Override
-            protected void emitImmediate(AMD64Assembler asm, int imm) {
-                assert imm == (short) imm;
-                asm.emitShort(imm);
-            }
-        },
-
-        DWORD(4) {
-            @Override
-            protected void emitImmediate(AMD64Assembler asm, int imm) {
-                asm.emitInt(imm);
-            }
-        },
-
-        QWORD(8) {
-            @Override
-            protected void emitImmediate(AMD64Assembler asm, int imm) {
-                asm.emitInt(imm);
-            }
-        },
-
-        SS(4, 0xF3, true),
-
-        SD(8, 0xF2, true),
-
-        PS(16, true),
-
-        PD(16, 0x66, true);
-
-        private final int sizePrefix;
-
-        private final int bytes;
-        private final boolean xmm;
-
-        private OperandSize(int bytes) {
-            this(bytes, 0);
-        }
-
-        private OperandSize(int bytes, int sizePrefix) {
-            this(bytes, sizePrefix, false);
-        }
-
-        private OperandSize(int bytes, boolean xmm) {
-            this(bytes, 0, xmm);
-        }
-
-        private OperandSize(int bytes, int sizePrefix, boolean xmm) {
-            this.sizePrefix = sizePrefix;
-            this.bytes = bytes;
-            this.xmm = xmm;
-        }
-
-        public int getBytes() {
-            return bytes;
-        }
-
-        public boolean isXmmType() {
-            return xmm;
-        }
-
-        /**
-         * Emit an immediate of this size. Note that immediate {@link #QWORD} operands are encoded
-         * as sign-extended 32-bit values.
-         *
-         * @param asm
-         * @param imm
-         */
-        protected void emitImmediate(AMD64Assembler asm, int imm) {
-            assert false;
-        }
-    }
-
-    /**
-     * Operand size and register type constraints.
-     */
-    private static enum OpAssertion {
-        ByteAssertion(CPU, CPU, BYTE),
-        IntegerAssertion(CPU, CPU, WORD, DWORD, QWORD),
-        No16BitAssertion(CPU, CPU, DWORD, QWORD),
-        QwordOnlyAssertion(CPU, CPU, QWORD),
-        FloatingAssertion(XMM, XMM, SS, SD, PS, PD),
-        PackedFloatingAssertion(XMM, XMM, PS, PD),
-        SingleAssertion(XMM, XMM, SS),
-        DoubleAssertion(XMM, XMM, SD),
-        IntToFloatingAssertion(XMM, CPU, DWORD, QWORD),
-        FloatingToIntAssertion(CPU, XMM, DWORD, QWORD);
-
-        private final RegisterCategory resultCategory;
-        private final RegisterCategory inputCategory;
-        private final OperandSize[] allowedSizes;
-
-        private OpAssertion(RegisterCategory resultCategory, RegisterCategory inputCategory, OperandSize... allowedSizes) {
-            this.resultCategory = resultCategory;
-            this.inputCategory = inputCategory;
-            this.allowedSizes = allowedSizes;
-        }
-
-        protected boolean checkOperands(AMD64Op op, OperandSize size, Register resultReg, Register inputReg) {
-            assert resultReg == null || resultCategory.equals(resultReg.getRegisterCategory()) : "invalid result register " + resultReg + " used in " + op;
-            assert inputReg == null || inputCategory.equals(inputReg.getRegisterCategory()) : "invalid input register " + inputReg + " used in " + op;
-
-            for (OperandSize s : allowedSizes) {
-                if (size == s) {
-                    return true;
-                }
-            }
-
-            assert false : "invalid operand size " + size + " used in " + op;
-            return false;
-        }
-    }
-
-    /**
-     * The register to which {@link Register#Frame} and {@link Register#CallerFrame} are bound.
-     */
-    public final Register frameRegister;
-
-    /**
-     * Constructs an assembler for the AMD64 architecture.
-     *
-     * @param registerConfig the register configuration used to bind {@link Register#Frame} and
-     *            {@link Register#CallerFrame} to physical registers. This value can be null if this
-     *            assembler instance will not be used to assemble instructions using these logical
-     *            registers.
-     */
-    public AMD64Assembler(TargetDescription target, RegisterConfig registerConfig) {
-        super(target);
-        this.frameRegister = registerConfig == null ? null : registerConfig.getFrameRegister();
-    }
-
-    private boolean supports(CPUFeature feature) {
-        return ((AMD64) target.arch).getFeatures().contains(feature);
-    }
-
-    private static int encode(Register r) {
-        assert r.encoding < 16 && r.encoding >= 0 : "encoding out of range: " + r.encoding;
-        return r.encoding & 0x7;
-    }
-
-    /**
-     * Get RXB bits for register-register instruction. In that encoding, ModRM.rm contains a
-     * register index. The R bit extends the ModRM.reg field and the B bit extends the ModRM.rm
-     * field. The X bit must be 0.
-     */
-    protected static int getRXB(Register reg, Register rm) {
-        int rxb = (reg == null ? 0 : reg.encoding & 0x08) >> 1;
-        rxb |= (rm == null ? 0 : rm.encoding & 0x08) >> 3;
-        return rxb;
-    }
-
-    /**
-     * Get RXB bits for register-memory instruction. The R bit extends the ModRM.reg field. There
-     * are two cases for the memory operand:<br>
-     * ModRM.rm contains the base register: In that case, B extends the ModRM.rm field and X = 0.<br>
-     * There is an SIB byte: In that case, X extends SIB.index and B extends SIB.base.
-     */
-    protected static int getRXB(Register reg, AMD64Address rm) {
-        int rxb = (reg == null ? 0 : reg.encoding & 0x08) >> 1;
-        if (!rm.getIndex().equals(Register.None)) {
-            rxb |= (rm.getIndex().encoding & 0x08) >> 2;
-        }
-        if (!rm.getBase().equals(Register.None)) {
-            rxb |= (rm.getBase().encoding & 0x08) >> 3;
-        }
-        return rxb;
-    }
-
-    /**
-     * Emit the ModR/M byte for one register operand and an opcode extension in the R field.
-     * <p>
-     * Format: [ 11 reg r/m ]
-     */
-    protected void emitModRM(int reg, Register rm) {
-        assert (reg & 0x07) == reg;
-        emitByte(0xC0 | (reg << 3) | (rm.encoding & 0x07));
-    }
-
-    /**
-     * Emit the ModR/M byte for two register operands.
-     * <p>
-     * Format: [ 11 reg r/m ]
-     */
-    protected void emitModRM(Register reg, Register rm) {
-        emitModRM(reg.encoding & 0x07, rm);
-    }
-
-    /**
-     * Emits the ModR/M byte and optionally the SIB byte for one register and one memory operand.
-     */
-    protected void emitOperandHelper(Register reg, AMD64Address addr) {
-        assert !reg.equals(Register.None);
-        emitOperandHelper(encode(reg), addr);
-    }
-
-    /**
-     * Emits the ModR/M byte and optionally the SIB byte for one memory operand and an opcode
-     * extension in the R field.
-     */
-    protected void emitOperandHelper(int reg, AMD64Address addr) {
-        assert (reg & 0x07) == reg;
-        int regenc = reg << 3;
-
-        Register base = addr.getBase();
-        Register index = addr.getIndex();
-
-        AMD64Address.Scale scale = addr.getScale();
-        int disp = addr.getDisplacement();
-
-        if (base.equals(Register.Frame)) {
-            assert frameRegister != null : "cannot use register " + Register.Frame + " in assembler with null register configuration";
-            base = frameRegister;
-        }
-
-        if (base.equals(AMD64.rip)) { // also matches Placeholder
-            // [00 000 101] disp32
-            assert index.equals(Register.None) : "cannot use RIP relative addressing with index register";
-            emitByte(0x05 | regenc);
-            emitInt(disp);
-        } else if (base.isValid()) {
-            int baseenc = base.isValid() ? encode(base) : 0;
-            if (index.isValid()) {
-                int indexenc = encode(index) << 3;
-                // [base + indexscale + disp]
-                if (disp == 0 && !base.equals(rbp) && !base.equals(r13)) {
-                    // [base + indexscale]
-                    // [00 reg 100][ss index base]
-                    assert !index.equals(rsp) : "illegal addressing mode";
-                    emitByte(0x04 | regenc);
-                    emitByte(scale.log2 << 6 | indexenc | baseenc);
-                } else if (isByte(disp)) {
-                    // [base + indexscale + imm8]
-                    // [01 reg 100][ss index base] imm8
-                    assert !index.equals(rsp) : "illegal addressing mode";
-                    emitByte(0x44 | regenc);
-                    emitByte(scale.log2 << 6 | indexenc | baseenc);
-                    emitByte(disp & 0xFF);
-                } else {
-                    // [base + indexscale + disp32]
-                    // [10 reg 100][ss index base] disp32
-                    assert !index.equals(rsp) : "illegal addressing mode";
-                    emitByte(0x84 | regenc);
-                    emitByte(scale.log2 << 6 | indexenc | baseenc);
-                    emitInt(disp);
-                }
-            } else if (base.equals(rsp) || base.equals(r12)) {
-                // [rsp + disp]
-                if (disp == 0) {
-                    // [rsp]
-                    // [00 reg 100][00 100 100]
-                    emitByte(0x04 | regenc);
-                    emitByte(0x24);
-                } else if (isByte(disp)) {
-                    // [rsp + imm8]
-                    // [01 reg 100][00 100 100] disp8
-                    emitByte(0x44 | regenc);
-                    emitByte(0x24);
-                    emitByte(disp & 0xFF);
-                } else {
-                    // [rsp + imm32]
-                    // [10 reg 100][00 100 100] disp32
-                    emitByte(0x84 | regenc);
-                    emitByte(0x24);
-                    emitInt(disp);
-                }
-            } else {
-                // [base + disp]
-                assert !base.equals(rsp) && !base.equals(r12) : "illegal addressing mode";
-                if (disp == 0 && !base.equals(rbp) && !base.equals(r13)) {
-                    // [base]
-                    // [00 reg base]
-                    emitByte(0x00 | regenc | baseenc);
-                } else if (isByte(disp)) {
-                    // [base + disp8]
-                    // [01 reg base] disp8
-                    emitByte(0x40 | regenc | baseenc);
-                    emitByte(disp & 0xFF);
-                } else {
-                    // [base + disp32]
-                    // [10 reg base] disp32
-                    emitByte(0x80 | regenc | baseenc);
-                    emitInt(disp);
-                }
-            }
-        } else {
-            if (index.isValid()) {
-                int indexenc = encode(index) << 3;
-                // [indexscale + disp]
-                // [00 reg 100][ss index 101] disp32
-                assert !index.equals(rsp) : "illegal addressing mode";
-                emitByte(0x04 | regenc);
-                emitByte(scale.log2 << 6 | indexenc | 0x05);
-                emitInt(disp);
-            } else {
-                // [disp] ABSOLUTE
-                // [00 reg 100][00 100 101] disp32
-                emitByte(0x04 | regenc);
-                emitByte(0x25);
-                emitInt(disp);
-            }
-        }
-    }
-
-    /**
-     * Base class for AMD64 opcodes.
-     */
-    public static class AMD64Op {
-
-        protected static final int P_0F = 0x0F;
-        protected static final int P_0F38 = 0x380F;
-        protected static final int P_0F3A = 0x3A0F;
-
-        private final String opcode;
-
-        private final int prefix1;
-        private final int prefix2;
-        private final int op;
-
-        private final boolean dstIsByte;
-        private final boolean srcIsByte;
-
-        private final OpAssertion assertion;
-        private final CPUFeature feature;
-
-        protected AMD64Op(String opcode, int prefix1, int prefix2, int op, OpAssertion assertion, CPUFeature feature) {
-            this(opcode, prefix1, prefix2, op, assertion == OpAssertion.ByteAssertion, assertion == OpAssertion.ByteAssertion, assertion, feature);
-        }
-
-        protected AMD64Op(String opcode, int prefix1, int prefix2, int op, boolean dstIsByte, boolean srcIsByte, OpAssertion assertion, CPUFeature feature) {
-            this.opcode = opcode;
-            this.prefix1 = prefix1;
-            this.prefix2 = prefix2;
-            this.op = op;
-
-            this.dstIsByte = dstIsByte;
-            this.srcIsByte = srcIsByte;
-
-            this.assertion = assertion;
-            this.feature = feature;
-        }
-
-        protected final void emitOpcode(AMD64Assembler asm, OperandSize size, int rxb, int dstEnc, int srcEnc) {
-            if (prefix1 != 0) {
-                asm.emitByte(prefix1);
-            }
-            if (size.sizePrefix != 0) {
-                asm.emitByte(size.sizePrefix);
-            }
-            int rexPrefix = 0x40 | rxb;
-            if (size == QWORD) {
-                rexPrefix |= 0x08;
-            }
-            if (rexPrefix != 0x40 || (dstIsByte && dstEnc >= 4) || (srcIsByte && srcEnc >= 4)) {
-                asm.emitByte(rexPrefix);
-            }
-            if (prefix2 > 0xFF) {
-                asm.emitShort(prefix2);
-            } else if (prefix2 > 0) {
-                asm.emitByte(prefix2);
-            }
-            asm.emitByte(op);
-        }
-
-        protected final boolean verify(AMD64Assembler asm, OperandSize size, Register resultReg, Register inputReg) {
-            assert feature == null || asm.supports(feature) : String.format("unsupported feature %s required for %s", feature, opcode);
-            assert assertion.checkOperands(this, size, resultReg, inputReg);
-            return true;
-        }
-
-        @Override
-        public String toString() {
-            return opcode;
-        }
-    }
-
-    /**
-     * Base class for AMD64 opcodes with immediate operands.
-     */
-    public static class AMD64ImmOp extends AMD64Op {
-
-        private final boolean immIsByte;
-
-        protected AMD64ImmOp(String opcode, boolean immIsByte, int prefix, int op, OpAssertion assertion) {
-            super(opcode, 0, prefix, op, assertion, null);
-            this.immIsByte = immIsByte;
-        }
-
-        protected final void emitImmediate(AMD64Assembler asm, OperandSize size, int imm) {
-            if (immIsByte) {
-                assert imm == (byte) imm;
-                asm.emitByte(imm);
-            } else {
-                size.emitImmediate(asm, imm);
-            }
-        }
-    }
-
-    /**
-     * Opcode with operand order of either RM or MR.
-     */
-    public abstract static class AMD64RROp extends AMD64Op {
-
-        protected AMD64RROp(String opcode, int prefix1, int prefix2, int op, OpAssertion assertion, CPUFeature feature) {
-            super(opcode, prefix1, prefix2, op, assertion, feature);
-        }
-
-        protected AMD64RROp(String opcode, int prefix1, int prefix2, int op, boolean dstIsByte, boolean srcIsByte, OpAssertion assertion, CPUFeature feature) {
-            super(opcode, prefix1, prefix2, op, dstIsByte, srcIsByte, assertion, feature);
-        }
-
-        public abstract void emit(AMD64Assembler asm, OperandSize size, Register dst, Register src);
-    }
-
-    /**
-     * Opcode with operand order of RM.
-     */
-    public static class AMD64RMOp extends AMD64RROp {
-        // @formatter:off
-        public static final AMD64RMOp IMUL   = new AMD64RMOp("IMUL",         P_0F, 0xAF);
-        public static final AMD64RMOp BSF    = new AMD64RMOp("BSF",          P_0F, 0xBC);
-        public static final AMD64RMOp BSR    = new AMD64RMOp("BSR",          P_0F, 0xBD);
-        public static final AMD64RMOp POPCNT = new AMD64RMOp("POPCNT", 0xF3, P_0F, 0xB8, CPUFeature.POPCNT);
-        public static final AMD64RMOp TZCNT  = new AMD64RMOp("TZCNT",  0xF3, P_0F, 0xBC, CPUFeature.BMI1);
-        public static final AMD64RMOp LZCNT  = new AMD64RMOp("LZCNT",  0xF3, P_0F, 0xBD, CPUFeature.LZCNT);
-        public static final AMD64RMOp MOVZXB = new AMD64RMOp("MOVZXB",       P_0F, 0xB6, false, true, OpAssertion.IntegerAssertion);
-        public static final AMD64RMOp MOVZX  = new AMD64RMOp("MOVZX",        P_0F, 0xB7, OpAssertion.No16BitAssertion);
-        public static final AMD64RMOp MOVSXB = new AMD64RMOp("MOVSXB",       P_0F, 0xBE, false, true, OpAssertion.IntegerAssertion);
-        public static final AMD64RMOp MOVSX  = new AMD64RMOp("MOVSX",        P_0F, 0xBF, OpAssertion.No16BitAssertion);
-        public static final AMD64RMOp MOVSXD = new AMD64RMOp("MOVSXD",             0x63, OpAssertion.QwordOnlyAssertion);
-        public static final AMD64RMOp MOVB   = new AMD64RMOp("MOVB",               0x8A, OpAssertion.ByteAssertion);
-        public static final AMD64RMOp MOV    = new AMD64RMOp("MOV",                0x8B);
-
-        // MOVD/MOVQ and MOVSS/MOVSD are the same opcode, just with different operand size prefix
-        public static final AMD64RMOp MOVD   = new AMD64RMOp("MOVD",   0x66, P_0F, 0x6E, OpAssertion.IntToFloatingAssertion, CPUFeature.SSE2);
-        public static final AMD64RMOp MOVQ   = new AMD64RMOp("MOVQ",   0x66, P_0F, 0x6E, OpAssertion.IntToFloatingAssertion, CPUFeature.SSE2);
-        public static final AMD64RMOp MOVSS  = new AMD64RMOp("MOVSS",        P_0F, 0x10, OpAssertion.FloatingAssertion, CPUFeature.SSE);
-        public static final AMD64RMOp MOVSD  = new AMD64RMOp("MOVSD",        P_0F, 0x10, OpAssertion.FloatingAssertion, CPUFeature.SSE);
-
-        // TEST is documented as MR operation, but it's symmetric, and using it as RM operation is more convenient.
-        public static final AMD64RMOp TESTB  = new AMD64RMOp("TEST",               0x84, OpAssertion.ByteAssertion);
-        public static final AMD64RMOp TEST   = new AMD64RMOp("TEST",               0x85);
-        // @formatter:on
-
-        protected AMD64RMOp(String opcode, int op) {
-            this(opcode, 0, op);
-        }
-
-        protected AMD64RMOp(String opcode, int op, OpAssertion assertion) {
-            this(opcode, 0, op, assertion);
-        }
-
-        protected AMD64RMOp(String opcode, int prefix, int op) {
-            this(opcode, 0, prefix, op, null);
-        }
-
-        protected AMD64RMOp(String opcode, int prefix, int op, OpAssertion assertion) {
-            this(opcode, 0, prefix, op, assertion, null);
-        }
-
-        protected AMD64RMOp(String opcode, int prefix, int op, OpAssertion assertion, CPUFeature feature) {
-            this(opcode, 0, prefix, op, assertion, feature);
-        }
-
-        protected AMD64RMOp(String opcode, int prefix, int op, boolean dstIsByte, boolean srcIsByte, OpAssertion assertion) {
-            super(opcode, 0, prefix, op, dstIsByte, srcIsByte, assertion, null);
-        }
-
-        protected AMD64RMOp(String opcode, int prefix1, int prefix2, int op, CPUFeature feature) {
-            this(opcode, prefix1, prefix2, op, OpAssertion.IntegerAssertion, feature);
-        }
-
-        protected AMD64RMOp(String opcode, int prefix1, int prefix2, int op, OpAssertion assertion, CPUFeature feature) {
-            super(opcode, prefix1, prefix2, op, assertion, feature);
-        }
-
-        @Override
-        public final void emit(AMD64Assembler asm, OperandSize size, Register dst, Register src) {
-            assert verify(asm, size, dst, src);
-            emitOpcode(asm, size, getRXB(dst, src), dst.encoding, src.encoding);
-            asm.emitModRM(dst, src);
-        }
-
-        public final void emit(AMD64Assembler asm, OperandSize size, Register dst, AMD64Address src) {
-            assert verify(asm, size, dst, null);
-            emitOpcode(asm, size, getRXB(dst, src), dst.encoding, 0);
-            asm.emitOperandHelper(dst, src);
-        }
-    }
-
-    /**
-     * Opcode with operand order of MR.
-     */
-    public static class AMD64MROp extends AMD64RROp {
-        // @formatter:off
-        public static final AMD64MROp MOVB   = new AMD64MROp("MOVB",               0x88, OpAssertion.ByteAssertion);
-        public static final AMD64MROp MOV    = new AMD64MROp("MOV",                0x89);
-
-        // MOVD and MOVQ are the same opcode, just with different operand size prefix
-        // Note that as MR opcodes, they have reverse operand order, so the IntToFloatingAssertion must be used.
-        public static final AMD64MROp MOVD   = new AMD64MROp("MOVD",   0x66, P_0F, 0x7E, OpAssertion.IntToFloatingAssertion, CPUFeature.SSE2);
-        public static final AMD64MROp MOVQ   = new AMD64MROp("MOVQ",   0x66, P_0F, 0x7E, OpAssertion.IntToFloatingAssertion, CPUFeature.SSE2);
-
-        // MOVSS and MOVSD are the same opcode, just with different operand size prefix
-        public static final AMD64MROp MOVSS  = new AMD64MROp("MOVSS",        P_0F, 0x11, OpAssertion.FloatingAssertion, CPUFeature.SSE);
-        public static final AMD64MROp MOVSD  = new AMD64MROp("MOVSD",        P_0F, 0x11, OpAssertion.FloatingAssertion, CPUFeature.SSE);
-        // @formatter:on
-
-        protected AMD64MROp(String opcode, int op) {
-            this(opcode, 0, op);
-        }
-
-        protected AMD64MROp(String opcode, int op, OpAssertion assertion) {
-            this(opcode, 0, op, assertion);
-        }
-
-        protected AMD64MROp(String opcode, int prefix, int op) {
-            this(opcode, prefix, op, OpAssertion.IntegerAssertion);
-        }
-
-        protected AMD64MROp(String opcode, int prefix, int op, OpAssertion assertion) {
-            this(opcode, prefix, op, assertion, null);
-        }
-
-        protected AMD64MROp(String opcode, int prefix, int op, OpAssertion assertion, CPUFeature feature) {
-            this(opcode, 0, prefix, op, assertion, feature);
-        }
-
-        protected AMD64MROp(String opcode, int prefix1, int prefix2, int op, OpAssertion assertion, CPUFeature feature) {
-            super(opcode, prefix1, prefix2, op, assertion, feature);
-        }
-
-        @Override
-        public final void emit(AMD64Assembler asm, OperandSize size, Register dst, Register src) {
-            assert verify(asm, size, src, dst);
-            emitOpcode(asm, size, getRXB(src, dst), src.encoding, dst.encoding);
-            asm.emitModRM(src, dst);
-        }
-
-        public final void emit(AMD64Assembler asm, OperandSize size, AMD64Address dst, Register src) {
-            assert verify(asm, size, null, src);
-            emitOpcode(asm, size, getRXB(src, dst), src.encoding, 0);
-            asm.emitOperandHelper(src, dst);
-        }
-    }
-
-    /**
-     * Opcodes with operand order of M.
-     */
-    public static class AMD64MOp extends AMD64Op {
-        // @formatter:off
-        public static final AMD64MOp NOT  = new AMD64MOp("NOT",  0xF7, 2);
-        public static final AMD64MOp NEG  = new AMD64MOp("NEG",  0xF7, 3);
-        public static final AMD64MOp MUL  = new AMD64MOp("MUL",  0xF7, 4);
-        public static final AMD64MOp IMUL = new AMD64MOp("IMUL", 0xF7, 5);
-        public static final AMD64MOp DIV  = new AMD64MOp("DIV",  0xF7, 6);
-        public static final AMD64MOp IDIV = new AMD64MOp("IDIV", 0xF7, 7);
-        public static final AMD64MOp INC  = new AMD64MOp("INC",  0xFF, 0);
-        public static final AMD64MOp DEC  = new AMD64MOp("DEC",  0xFF, 1);
-        // @formatter:on
-
-        private final int ext;
-
-        protected AMD64MOp(String opcode, int op, int ext) {
-            this(opcode, 0, op, ext);
-        }
-
-        protected AMD64MOp(String opcode, int prefix, int op, int ext) {
-            this(opcode, prefix, op, ext, OpAssertion.IntegerAssertion);
-        }
-
-        protected AMD64MOp(String opcode, int prefix, int op, int ext, OpAssertion assertion) {
-            super(opcode, 0, prefix, op, assertion, null);
-            this.ext = ext;
-        }
-
-        public final void emit(AMD64Assembler asm, OperandSize size, Register dst) {
-            assert verify(asm, size, dst, null);
-            emitOpcode(asm, size, getRXB(null, dst), 0, dst.encoding);
-            asm.emitModRM(ext, dst);
-        }
-
-        public final void emit(AMD64Assembler asm, OperandSize size, AMD64Address dst) {
-            assert verify(asm, size, null, null);
-            emitOpcode(asm, size, getRXB(null, dst), 0, 0);
-            asm.emitOperandHelper(ext, dst);
-        }
-    }
-
-    /**
-     * Opcodes with operand order of MI.
-     */
-    public static class AMD64MIOp extends AMD64ImmOp {
-        // @formatter:off
-        public static final AMD64MIOp MOVB = new AMD64MIOp("MOVB", true,  0xC6, 0, OpAssertion.ByteAssertion);
-        public static final AMD64MIOp MOV  = new AMD64MIOp("MOV",  false, 0xC7, 0);
-        public static final AMD64MIOp TEST = new AMD64MIOp("TEST", false, 0xF7, 0);
-        // @formatter:on
-
-        private final int ext;
-
-        protected AMD64MIOp(String opcode, boolean immIsByte, int op, int ext) {
-            this(opcode, immIsByte, op, ext, OpAssertion.IntegerAssertion);
-        }
-
-        protected AMD64MIOp(String opcode, boolean immIsByte, int op, int ext, OpAssertion assertion) {
-            this(opcode, immIsByte, 0, op, ext, assertion);
-        }
-
-        protected AMD64MIOp(String opcode, boolean immIsByte, int prefix, int op, int ext, OpAssertion assertion) {
-            super(opcode, immIsByte, prefix, op, assertion);
-            this.ext = ext;
-        }
-
-        public final void emit(AMD64Assembler asm, OperandSize size, Register dst, int imm) {
-            assert verify(asm, size, dst, null);
-            emitOpcode(asm, size, getRXB(null, dst), 0, dst.encoding);
-            asm.emitModRM(ext, dst);
-            emitImmediate(asm, size, imm);
-        }
-
-        public final void emit(AMD64Assembler asm, OperandSize size, AMD64Address dst, int imm) {
-            assert verify(asm, size, null, null);
-            emitOpcode(asm, size, getRXB(null, dst), 0, 0);
-            asm.emitOperandHelper(ext, dst);
-            emitImmediate(asm, size, imm);
-        }
-    }
-
-    /**
-     * Opcodes with operand order of RMI.
-     */
-    public static class AMD64RMIOp extends AMD64ImmOp {
-        // @formatter:off
-        public static final AMD64RMIOp IMUL    = new AMD64RMIOp("IMUL", false, 0x69);
-        public static final AMD64RMIOp IMUL_SX = new AMD64RMIOp("IMUL", true,  0x6B);
-        // @formatter:on
-
-        protected AMD64RMIOp(String opcode, boolean immIsByte, int op) {
-            this(opcode, immIsByte, 0, op, OpAssertion.IntegerAssertion);
-        }
-
-        protected AMD64RMIOp(String opcode, boolean immIsByte, int prefix, int op, OpAssertion assertion) {
-            super(opcode, immIsByte, prefix, op, assertion);
-        }
-
-        public final void emit(AMD64Assembler asm, OperandSize size, Register dst, Register src, int imm) {
-            assert verify(asm, size, dst, src);
-            emitOpcode(asm, size, getRXB(dst, src), dst.encoding, src.encoding);
-            asm.emitModRM(dst, src);
-            emitImmediate(asm, size, imm);
-        }
-
-        public final void emit(AMD64Assembler asm, OperandSize size, Register dst, AMD64Address src, int imm) {
-            assert verify(asm, size, dst, null);
-            emitOpcode(asm, size, getRXB(dst, src), dst.encoding, 0);
-            asm.emitOperandHelper(dst, src);
-            emitImmediate(asm, size, imm);
-        }
-    }
-
-    public static class SSEOp extends AMD64RMOp {
-        // @formatter:off
-        public static final SSEOp CVTSI2SS  = new SSEOp("CVTSI2SS",  0xF3, P_0F, 0x2A, OpAssertion.IntToFloatingAssertion);
-        public static final SSEOp CVTSI2SD  = new SSEOp("CVTSI2SS",  0xF2, P_0F, 0x2A, OpAssertion.IntToFloatingAssertion);
-        public static final SSEOp CVTTSS2SI = new SSEOp("CVTTSS2SI", 0xF3, P_0F, 0x2C, OpAssertion.FloatingToIntAssertion);
-        public static final SSEOp CVTTSD2SI = new SSEOp("CVTTSD2SI", 0xF2, P_0F, 0x2C, OpAssertion.FloatingToIntAssertion);
-        public static final SSEOp UCOMIS    = new SSEOp("UCOMIS",          P_0F, 0x2E, OpAssertion.PackedFloatingAssertion);
-        public static final SSEOp SQRT      = new SSEOp("SQRT",            P_0F, 0x51);
-        public static final SSEOp AND       = new SSEOp("AND",             P_0F, 0x54, OpAssertion.PackedFloatingAssertion);
-        public static final SSEOp ANDN      = new SSEOp("ANDN",            P_0F, 0x55, OpAssertion.PackedFloatingAssertion);
-        public static final SSEOp OR        = new SSEOp("OR",              P_0F, 0x56, OpAssertion.PackedFloatingAssertion);
-        public static final SSEOp XOR       = new SSEOp("XOR",             P_0F, 0x57, OpAssertion.PackedFloatingAssertion);
-        public static final SSEOp ADD       = new SSEOp("ADD",             P_0F, 0x58);
-        public static final SSEOp MUL       = new SSEOp("MUL",             P_0F, 0x59);
-        public static final SSEOp CVTSS2SD  = new SSEOp("CVTSS2SD",        P_0F, 0x5A, OpAssertion.SingleAssertion);
-        public static final SSEOp CVTSD2SS  = new SSEOp("CVTSD2SS",        P_0F, 0x5A, OpAssertion.DoubleAssertion);
-        public static final SSEOp SUB       = new SSEOp("SUB",             P_0F, 0x5C);
-        public static final SSEOp MIN       = new SSEOp("MIN",             P_0F, 0x5D);
-        public static final SSEOp DIV       = new SSEOp("DIV",             P_0F, 0x5E);
-        public static final SSEOp MAX       = new SSEOp("MAX",             P_0F, 0x5F);
-        // @formatter:on
-
-        protected SSEOp(String opcode, int prefix, int op) {
-            this(opcode, prefix, op, OpAssertion.FloatingAssertion);
-        }
-
-        protected SSEOp(String opcode, int prefix, int op, OpAssertion assertion) {
-            this(opcode, 0, prefix, op, assertion);
-        }
-
-        protected SSEOp(String opcode, int mandatoryPrefix, int prefix, int op, OpAssertion assertion) {
-            super(opcode, mandatoryPrefix, prefix, op, assertion, CPUFeature.SSE2);
-        }
-    }
-
-    /**
-     * Arithmetic operation with operand order of RM, MR or MI.
-     */
-    public static final class AMD64BinaryArithmetic {
-        // @formatter:off
-        public static final AMD64BinaryArithmetic ADD = new AMD64BinaryArithmetic("ADD", 0);
-        public static final AMD64BinaryArithmetic OR  = new AMD64BinaryArithmetic("OR",  1);
-        public static final AMD64BinaryArithmetic ADC = new AMD64BinaryArithmetic("ADC", 2);
-        public static final AMD64BinaryArithmetic SBB = new AMD64BinaryArithmetic("SBB", 3);
-        public static final AMD64BinaryArithmetic AND = new AMD64BinaryArithmetic("AND", 4);
-        public static final AMD64BinaryArithmetic SUB = new AMD64BinaryArithmetic("SUB", 5);
-        public static final AMD64BinaryArithmetic XOR = new AMD64BinaryArithmetic("XOR", 6);
-        public static final AMD64BinaryArithmetic CMP = new AMD64BinaryArithmetic("CMP", 7);
-        // @formatter:on
-
-        private final AMD64MIOp byteImmOp;
-        private final AMD64MROp byteMrOp;
-        private final AMD64RMOp byteRmOp;
-
-        private final AMD64MIOp immOp;
-        private final AMD64MIOp immSxOp;
-        private final AMD64MROp mrOp;
-        private final AMD64RMOp rmOp;
-
-        private AMD64BinaryArithmetic(String opcode, int code) {
-            int baseOp = code << 3;
-
-            byteImmOp = new AMD64MIOp(opcode, true, 0, 0x80, code, OpAssertion.ByteAssertion);
-            byteMrOp = new AMD64MROp(opcode, 0, baseOp, OpAssertion.ByteAssertion);
-            byteRmOp = new AMD64RMOp(opcode, 0, baseOp | 0x02, OpAssertion.ByteAssertion);
-
-            immOp = new AMD64MIOp(opcode, false, 0, 0x81, code, OpAssertion.IntegerAssertion);
-            immSxOp = new AMD64MIOp(opcode, true, 0, 0x83, code, OpAssertion.IntegerAssertion);
-            mrOp = new AMD64MROp(opcode, 0, baseOp | 0x01, OpAssertion.IntegerAssertion);
-            rmOp = new AMD64RMOp(opcode, 0, baseOp | 0x03, OpAssertion.IntegerAssertion);
-        }
-
-        public AMD64MIOp getMIOpcode(OperandSize size, boolean sx) {
-            if (size == BYTE) {
-                return byteImmOp;
-            } else if (sx) {
-                return immSxOp;
-            } else {
-                return immOp;
-            }
-        }
-
-        public AMD64MROp getMROpcode(OperandSize size) {
-            if (size == BYTE) {
-                return byteMrOp;
-            } else {
-                return mrOp;
-            }
-        }
-
-        public AMD64RMOp getRMOpcode(OperandSize size) {
-            if (size == BYTE) {
-                return byteRmOp;
-            } else {
-                return rmOp;
-            }
-        }
-    }
-
-    /**
-     * Shift operation with operand order of M1, MC or MI.
-     */
-    public static final class AMD64Shift {
-        // @formatter:off
-        public static final AMD64Shift ROL = new AMD64Shift("ROL", 0);
-        public static final AMD64Shift ROR = new AMD64Shift("ROR", 1);
-        public static final AMD64Shift RCL = new AMD64Shift("RCL", 2);
-        public static final AMD64Shift RCR = new AMD64Shift("RCR", 3);
-        public static final AMD64Shift SHL = new AMD64Shift("SHL", 4);
-        public static final AMD64Shift SHR = new AMD64Shift("SHR", 5);
-        public static final AMD64Shift SAR = new AMD64Shift("SAR", 7);
-        // @formatter:on
-
-        public final AMD64MOp m1Op;
-        public final AMD64MOp mcOp;
-        public final AMD64MIOp miOp;
-
-        private AMD64Shift(String opcode, int code) {
-            m1Op = new AMD64MOp(opcode, 0, 0xD1, code, OpAssertion.IntegerAssertion);
-            mcOp = new AMD64MOp(opcode, 0, 0xD3, code, OpAssertion.IntegerAssertion);
-            miOp = new AMD64MIOp(opcode, true, 0, 0xC1, code, OpAssertion.IntegerAssertion);
-        }
-    }
-
-    public final void addl(AMD64Address dst, int imm32) {
-        ADD.getMIOpcode(DWORD, isByte(imm32)).emit(this, DWORD, dst, imm32);
-    }
-
-    public final void addl(Register dst, int imm32) {
-        ADD.getMIOpcode(DWORD, isByte(imm32)).emit(this, DWORD, dst, imm32);
-    }
-
-    private void addrNop4() {
-        // 4 bytes: NOP DWORD PTR [EAX+0]
-        emitByte(0x0F);
-        emitByte(0x1F);
-        emitByte(0x40); // emitRm(cbuf, 0x1, EAXEnc, EAXEnc);
-        emitByte(0); // 8-bits offset (1 byte)
-    }
-
-    private void addrNop5() {
-        // 5 bytes: NOP DWORD PTR [EAX+EAX*0+0] 8-bits offset
-        emitByte(0x0F);
-        emitByte(0x1F);
-        emitByte(0x44); // emitRm(cbuf, 0x1, EAXEnc, 0x4);
-        emitByte(0x00); // emitRm(cbuf, 0x0, EAXEnc, EAXEnc);
-        emitByte(0); // 8-bits offset (1 byte)
-    }
-
-    private void addrNop7() {
-        // 7 bytes: NOP DWORD PTR [EAX+0] 32-bits offset
-        emitByte(0x0F);
-        emitByte(0x1F);
-        emitByte(0x80); // emitRm(cbuf, 0x2, EAXEnc, EAXEnc);
-        emitInt(0); // 32-bits offset (4 bytes)
-    }
-
-    private void addrNop8() {
-        // 8 bytes: NOP DWORD PTR [EAX+EAX*0+0] 32-bits offset
-        emitByte(0x0F);
-        emitByte(0x1F);
-        emitByte(0x84); // emitRm(cbuf, 0x2, EAXEnc, 0x4);
-        emitByte(0x00); // emitRm(cbuf, 0x0, EAXEnc, EAXEnc);
-        emitInt(0); // 32-bits offset (4 bytes)
-    }
-
-    public final void andl(Register dst, int imm32) {
-        AND.getMIOpcode(DWORD, isByte(imm32)).emit(this, DWORD, dst, imm32);
-    }
-
-    public final void bswapl(Register reg) {
-        int encode = prefixAndEncode(reg.encoding);
-        emitByte(0x0F);
-        emitByte(0xC8 | encode);
-    }
-
-    public final void cdql() {
-        emitByte(0x99);
-    }
-
-    public final void cmovl(ConditionFlag cc, Register dst, Register src) {
-        int encode = prefixAndEncode(dst.encoding, src.encoding);
-        emitByte(0x0F);
-        emitByte(0x40 | cc.getValue());
-        emitByte(0xC0 | encode);
-    }
-
-    public final void cmovl(ConditionFlag cc, Register dst, AMD64Address src) {
-        prefix(src, dst);
-        emitByte(0x0F);
-        emitByte(0x40 | cc.getValue());
-        emitOperandHelper(dst, src);
-    }
-
-    public final void cmpl(Register dst, int imm32) {
-        CMP.getMIOpcode(DWORD, isByte(imm32)).emit(this, DWORD, dst, imm32);
-    }
-
-    public final void cmpl(Register dst, Register src) {
-        CMP.rmOp.emit(this, DWORD, dst, src);
-    }
-
-    public final void cmpl(Register dst, AMD64Address src) {
-        CMP.rmOp.emit(this, DWORD, dst, src);
-    }
-
-    public final void cmpl(AMD64Address dst, int imm32) {
-        CMP.getMIOpcode(DWORD, isByte(imm32)).emit(this, DWORD, dst, imm32);
-    }
-
-    // The 32-bit cmpxchg compares the value at adr with the contents of X86.rax,
-    // and stores reg into adr if so; otherwise, the value at adr is loaded into X86.rax,.
-    // The ZF is set if the compared values were equal, and cleared otherwise.
-    public final void cmpxchgl(Register reg, AMD64Address adr) { // cmpxchg
-        prefix(adr, reg);
-        emitByte(0x0F);
-        emitByte(0xB1);
-        emitOperandHelper(reg, adr);
-    }
-
-    protected final void decl(AMD64Address dst) {
-        prefix(dst);
-        emitByte(0xFF);
-        emitOperandHelper(1, dst);
-    }
-
-    public final void hlt() {
-        emitByte(0xF4);
-    }
-
-    public final void imull(Register dst, Register src, int value) {
-        if (isByte(value)) {
-            AMD64RMIOp.IMUL_SX.emit(this, DWORD, dst, src, value);
-        } else {
-            AMD64RMIOp.IMUL.emit(this, DWORD, dst, src, value);
-        }
-    }
-
-    protected final void incl(AMD64Address dst) {
-        prefix(dst);
-        emitByte(0xFF);
-        emitOperandHelper(0, dst);
-    }
-
-    public void jcc(ConditionFlag cc, int jumpTarget, boolean forceDisp32) {
-        int shortSize = 2;
-        int longSize = 6;
-        long disp = jumpTarget - position();
-        if (!forceDisp32 && isByte(disp - shortSize)) {
-            // 0111 tttn #8-bit disp
-            emitByte(0x70 | cc.getValue());
-            emitByte((int) ((disp - shortSize) & 0xFF));
-        } else {
-            // 0000 1111 1000 tttn #32-bit disp
-            assert isInt(disp - longSize) : "must be 32bit offset (call4)";
-            emitByte(0x0F);
-            emitByte(0x80 | cc.getValue());
-            emitInt((int) (disp - longSize));
-        }
-    }
-
-    public final void jcc(ConditionFlag cc, Label l) {
-        assert (0 <= cc.getValue()) && (cc.getValue() < 16) : "illegal cc";
-        if (l.isBound()) {
-            jcc(cc, l.position(), false);
-        } else {
-            // Note: could eliminate cond. jumps to this jump if condition
-            // is the same however, seems to be rather unlikely case.
-            // Note: use jccb() if label to be bound is very close to get
-            // an 8-bit displacement
-            l.addPatchAt(position());
-            emitByte(0x0F);
-            emitByte(0x80 | cc.getValue());
-            emitInt(0);
-        }
-
-    }
-
-    public final void jccb(ConditionFlag cc, Label l) {
-        if (l.isBound()) {
-            int shortSize = 2;
-            int entry = l.position();
-            assert isByte(entry - (position() + shortSize)) : "Dispacement too large for a short jmp";
-            long disp = entry - position();
-            // 0111 tttn #8-bit disp
-            emitByte(0x70 | cc.getValue());
-            emitByte((int) ((disp - shortSize) & 0xFF));
-        } else {
-            l.addPatchAt(position());
-            emitByte(0x70 | cc.getValue());
-            emitByte(0);
-        }
-    }
-
-    public final void jmp(int jumpTarget, boolean forceDisp32) {
-        int shortSize = 2;
-        int longSize = 5;
-        long disp = jumpTarget - position();
-        if (!forceDisp32 && isByte(disp - shortSize)) {
-            emitByte(0xEB);
-            emitByte((int) ((disp - shortSize) & 0xFF));
-        } else {
-            emitByte(0xE9);
-            emitInt((int) (disp - longSize));
-        }
-    }
-
-    @Override
-    public final void jmp(Label l) {
-        if (l.isBound()) {
-            jmp(l.position(), false);
-        } else {
-            // By default, forward jumps are always 32-bit displacements, since
-            // we can't yet know where the label will be bound. If you're sure that
-            // the forward jump will not run beyond 256 bytes, use jmpb to
-            // force an 8-bit displacement.
-
-            l.addPatchAt(position());
-            emitByte(0xE9);
-            emitInt(0);
-        }
-    }
-
-    public final void jmp(Register entry) {
-        int encode = prefixAndEncode(entry.encoding);
-        emitByte(0xFF);
-        emitByte(0xE0 | encode);
-    }
-
-    public final void jmpb(Label l) {
-        if (l.isBound()) {
-            int shortSize = 2;
-            int entry = l.position();
-            assert isByte((entry - position()) + shortSize) : "Dispacement too large for a short jmp";
-            long offs = entry - position();
-            emitByte(0xEB);
-            emitByte((int) ((offs - shortSize) & 0xFF));
-        } else {
-
-            l.addPatchAt(position());
-            emitByte(0xEB);
-            emitByte(0);
-        }
-    }
-
-    public final void leaq(Register dst, AMD64Address src) {
-        prefixq(src, dst);
-        emitByte(0x8D);
-        emitOperandHelper(dst, src);
-    }
-
-    public final void leave() {
-        emitByte(0xC9);
-    }
-
-    public final void lock() {
-        emitByte(0xF0);
-    }
-
-    public final void movapd(Register dst, Register src) {
-        assert dst.getRegisterCategory().equals(AMD64.XMM);
-        assert src.getRegisterCategory().equals(AMD64.XMM);
-        int dstenc = dst.encoding;
-        int srcenc = src.encoding;
-        emitByte(0x66);
-        if (dstenc < 8) {
-            if (srcenc >= 8) {
-                emitByte(Prefix.REXB);
-                srcenc -= 8;
-            }
-        } else {
-            if (srcenc < 8) {
-                emitByte(Prefix.REXR);
-            } else {
-                emitByte(Prefix.REXRB);
-                srcenc -= 8;
-            }
-            dstenc -= 8;
-        }
-        emitByte(0x0F);
-        emitByte(0x28);
-        emitByte(0xC0 | dstenc << 3 | srcenc);
-    }
-
-    public final void movaps(Register dst, Register src) {
-        assert dst.getRegisterCategory().equals(AMD64.XMM);
-        assert src.getRegisterCategory().equals(AMD64.XMM);
-        int dstenc = dst.encoding;
-        int srcenc = src.encoding;
-        if (dstenc < 8) {
-            if (srcenc >= 8) {
-                emitByte(Prefix.REXB);
-                srcenc -= 8;
-            }
-        } else {
-            if (srcenc < 8) {
-                emitByte(Prefix.REXR);
-            } else {
-                emitByte(Prefix.REXRB);
-                srcenc -= 8;
-            }
-            dstenc -= 8;
-        }
-        emitByte(0x0F);
-        emitByte(0x28);
-        emitByte(0xC0 | dstenc << 3 | srcenc);
-    }
-
-    public final void movb(AMD64Address dst, int imm8) {
-        prefix(dst);
-        emitByte(0xC6);
-        emitOperandHelper(0, dst);
-        emitByte(imm8);
-    }
-
-    public final void movb(AMD64Address dst, Register src) {
-        assert src.getRegisterCategory().equals(AMD64.CPU) : "must have byte register";
-        prefix(dst, src, true);
-        emitByte(0x88);
-        emitOperandHelper(src, dst);
-    }
-
-    public final void movl(Register dst, int imm32) {
-        int encode = prefixAndEncode(dst.encoding);
-        emitByte(0xB8 | encode);
-        emitInt(imm32);
-    }
-
-    public final void movl(Register dst, Register src) {
-        int encode = prefixAndEncode(dst.encoding, src.encoding);
-        emitByte(0x8B);
-        emitByte(0xC0 | encode);
-    }
-
-    public final void movl(Register dst, AMD64Address src) {
-        prefix(src, dst);
-        emitByte(0x8B);
-        emitOperandHelper(dst, src);
-    }
-
-    public final void movl(AMD64Address dst, int imm32) {
-        prefix(dst);
-        emitByte(0xC7);
-        emitOperandHelper(0, dst);
-        emitInt(imm32);
-    }
-
-    public final void movl(AMD64Address dst, Register src) {
-        prefix(dst, src);
-        emitByte(0x89);
-        emitOperandHelper(src, dst);
-    }
-
-    /**
-     * New CPUs require use of movsd and movss to avoid partial register stall when loading from
-     * memory. But for old Opteron use movlpd instead of movsd. The selection is done in
-     * {@link AMD64MacroAssembler#movdbl(Register, AMD64Address)} and
-     * {@link AMD64MacroAssembler#movflt(Register, Register)}.
-     */
-    public final void movlpd(Register dst, AMD64Address src) {
-        assert dst.getRegisterCategory().equals(AMD64.XMM);
-        emitByte(0x66);
-        prefix(src, dst);
-        emitByte(0x0F);
-        emitByte(0x12);
-        emitOperandHelper(dst, src);
-    }
-
-    public final void movq(Register dst, AMD64Address src) {
-        if (dst.getRegisterCategory().equals(AMD64.XMM)) {
-            emitByte(0xF3);
-            prefixq(src, dst);
-            emitByte(0x0F);
-            emitByte(0x7E);
-            emitOperandHelper(dst, src);
-        } else {
-            prefixq(src, dst);
-            emitByte(0x8B);
-            emitOperandHelper(dst, src);
-        }
-    }
-
-    public final void movq(Register dst, Register src) {
-        int encode = prefixqAndEncode(dst.encoding, src.encoding);
-        emitByte(0x8B);
-        emitByte(0xC0 | encode);
-    }
-
-    public final void movq(AMD64Address dst, Register src) {
-        if (src.getRegisterCategory().equals(AMD64.XMM)) {
-            emitByte(0x66);
-            prefixq(dst, src);
-            emitByte(0x0F);
-            emitByte(0xD6);
-            emitOperandHelper(src, dst);
-        } else {
-            prefixq(dst, src);
-            emitByte(0x89);
-            emitOperandHelper(src, dst);
-        }
-    }
-
-    public final void movsbl(Register dst, AMD64Address src) {
-        prefix(src, dst);
-        emitByte(0x0F);
-        emitByte(0xBE);
-        emitOperandHelper(dst, src);
-    }
-
-    public final void movsbl(Register dst, Register src) {
-        int encode = prefixAndEncode(dst.encoding, false, src.encoding, true);
-        emitByte(0x0F);
-        emitByte(0xBE);
-        emitByte(0xC0 | encode);
-    }
-
-    public final void movsbq(Register dst, AMD64Address src) {
-        prefixq(src, dst);
-        emitByte(0x0F);
-        emitByte(0xBE);
-        emitOperandHelper(dst, src);
-    }
-
-    public final void movsbq(Register dst, Register src) {
-        int encode = prefixqAndEncode(dst.encoding, src.encoding);
-        emitByte(0x0F);
-        emitByte(0xBE);
-        emitByte(0xC0 | encode);
-    }
-
-    public final void movsd(Register dst, Register src) {
-        assert dst.getRegisterCategory().equals(AMD64.XMM);
-        assert src.getRegisterCategory().equals(AMD64.XMM);
-        emitByte(0xF2);
-        int encode = prefixAndEncode(dst.encoding, src.encoding);
-        emitByte(0x0F);
-        emitByte(0x10);
-        emitByte(0xC0 | encode);
-    }
-
-    public final void movsd(Register dst, AMD64Address src) {
-        assert dst.getRegisterCategory().equals(AMD64.XMM);
-        emitByte(0xF2);
-        prefix(src, dst);
-        emitByte(0x0F);
-        emitByte(0x10);
-        emitOperandHelper(dst, src);
-    }
-
-    public final void movsd(AMD64Address dst, Register src) {
-        assert src.getRegisterCategory().equals(AMD64.XMM);
-        emitByte(0xF2);
-        prefix(dst, src);
-        emitByte(0x0F);
-        emitByte(0x11);
-        emitOperandHelper(src, dst);
-    }
-
-    public final void movss(Register dst, Register src) {
-        assert dst.getRegisterCategory().equals(AMD64.XMM);
-        assert src.getRegisterCategory().equals(AMD64.XMM);
-        emitByte(0xF3);
-        int encode = prefixAndEncode(dst.encoding, src.encoding);
-        emitByte(0x0F);
-        emitByte(0x10);
-        emitByte(0xC0 | encode);
-    }
-
-    public final void movss(Register dst, AMD64Address src) {
-        assert dst.getRegisterCategory().equals(AMD64.XMM);
-        emitByte(0xF3);
-        prefix(src, dst);
-        emitByte(0x0F);
-        emitByte(0x10);
-        emitOperandHelper(dst, src);
-    }
-
-    public final void movss(AMD64Address dst, Register src) {
-        assert src.getRegisterCategory().equals(AMD64.XMM);
-        emitByte(0xF3);
-        prefix(dst, src);
-        emitByte(0x0F);
-        emitByte(0x11);
-        emitOperandHelper(src, dst);
-    }
-
-    public final void movswl(Register dst, AMD64Address src) {
-        prefix(src, dst);
-        emitByte(0x0F);
-        emitByte(0xBF);
-        emitOperandHelper(dst, src);
-    }
-
-    public final void movw(AMD64Address dst, int imm16) {
-        emitByte(0x66); // switch to 16-bit mode
-        prefix(dst);
-        emitByte(0xC7);
-        emitOperandHelper(0, dst);
-        emitShort(imm16);
-    }
-
-    public final void movw(AMD64Address dst, Register src) {
-        emitByte(0x66);
-        prefix(dst, src);
-        emitByte(0x89);
-        emitOperandHelper(src, dst);
-    }
-
-    public final void movzbl(Register dst, AMD64Address src) {
-        prefix(src, dst);
-        emitByte(0x0F);
-        emitByte(0xB6);
-        emitOperandHelper(dst, src);
-    }
-
-    public final void movzwl(Register dst, AMD64Address src) {
-        prefix(src, dst);
-        emitByte(0x0F);
-        emitByte(0xB7);
-        emitOperandHelper(dst, src);
-    }
-
-    @Override
-    public final void ensureUniquePC() {
-        nop();
-    }
-
-    public final void nop() {
-        nop(1);
-    }
-
-    public void nop(int count) {
-        int i = count;
-        if (UseNormalNop) {
-            assert i > 0 : " ";
-            // The fancy nops aren't currently recognized by debuggers making it a
-            // pain to disassemble code while debugging. If assert are on clearly
-            // speed is not an issue so simply use the single byte traditional nop
-            // to do alignment.
-
-            for (; i > 0; i--) {
-                emitByte(0x90);
-            }
-            return;
-        }
-
-        if (UseAddressNop) {
-            //
-            // Using multi-bytes nops "0x0F 0x1F [Address]" for AMD.
-            // 1: 0x90
-            // 2: 0x66 0x90
-            // 3: 0x66 0x66 0x90 (don't use "0x0F 0x1F 0x00" - need patching safe padding)
-            // 4: 0x0F 0x1F 0x40 0x00
-            // 5: 0x0F 0x1F 0x44 0x00 0x00
-            // 6: 0x66 0x0F 0x1F 0x44 0x00 0x00
-            // 7: 0x0F 0x1F 0x80 0x00 0x00 0x00 0x00
-            // 8: 0x0F 0x1F 0x84 0x00 0x00 0x00 0x00 0x00
-            // 9: 0x66 0x0F 0x1F 0x84 0x00 0x00 0x00 0x00 0x00
-            // 10: 0x66 0x66 0x0F 0x1F 0x84 0x00 0x00 0x00 0x00 0x00
-            // 11: 0x66 0x66 0x66 0x0F 0x1F 0x84 0x00 0x00 0x00 0x00 0x00
-
-            // The rest coding is AMD specific - use consecutive Address nops
-
-            // 12: 0x66 0x0F 0x1F 0x44 0x00 0x00 0x66 0x0F 0x1F 0x44 0x00 0x00
-            // 13: 0x0F 0x1F 0x80 0x00 0x00 0x00 0x00 0x66 0x0F 0x1F 0x44 0x00 0x00
-            // 14: 0x0F 0x1F 0x80 0x00 0x00 0x00 0x00 0x0F 0x1F 0x80 0x00 0x00 0x00 0x00
-            // 15: 0x0F 0x1F 0x84 0x00 0x00 0x00 0x00 0x00 0x0F 0x1F 0x80 0x00 0x00 0x00 0x00
-            // 16: 0x0F 0x1F 0x84 0x00 0x00 0x00 0x00 0x00 0x0F 0x1F 0x84 0x00 0x00 0x00 0x00 0x00
-            // Size prefixes (0x66) are added for larger sizes
-
-            while (i >= 22) {
-                i -= 11;
-                emitByte(0x66); // size prefix
-                emitByte(0x66); // size prefix
-                emitByte(0x66); // size prefix
-                addrNop8();
-            }
-            // Generate first nop for size between 21-12
-            switch (i) {
-                case 21:
-                    i -= 1;
-                    emitByte(0x66); // size prefix
-                    // fall through
-                case 20:
-                    // fall through
-                case 19:
-                    i -= 1;
-                    emitByte(0x66); // size prefix
-                    // fall through
-                case 18:
-                    // fall through
-                case 17:
-                    i -= 1;
-                    emitByte(0x66); // size prefix
-                    // fall through
-                case 16:
-                    // fall through
-                case 15:
-                    i -= 8;
-                    addrNop8();
-                    break;
-                case 14:
-                case 13:
-                    i -= 7;
-                    addrNop7();
-                    break;
-                case 12:
-                    i -= 6;
-                    emitByte(0x66); // size prefix
-                    addrNop5();
-                    break;
-                default:
-                    assert i < 12;
-            }
-
-            // Generate second nop for size between 11-1
-            switch (i) {
-                case 11:
-                    emitByte(0x66); // size prefix
-                    emitByte(0x66); // size prefix
-                    emitByte(0x66); // size prefix
-                    addrNop8();
-                    break;
-                case 10:
-                    emitByte(0x66); // size prefix
-                    emitByte(0x66); // size prefix
-                    addrNop8();
-                    break;
-                case 9:
-                    emitByte(0x66); // size prefix
-                    addrNop8();
-                    break;
-                case 8:
-                    addrNop8();
-                    break;
-                case 7:
-                    addrNop7();
-                    break;
-                case 6:
-                    emitByte(0x66); // size prefix
-                    addrNop5();
-                    break;
-                case 5:
-                    addrNop5();
-                    break;
-                case 4:
-                    addrNop4();
-                    break;
-                case 3:
-                    // Don't use "0x0F 0x1F 0x00" - need patching safe padding
-                    emitByte(0x66); // size prefix
-                    emitByte(0x66); // size prefix
-                    emitByte(0x90); // nop
-                    break;
-                case 2:
-                    emitByte(0x66); // size prefix
-                    emitByte(0x90); // nop
-                    break;
-                case 1:
-                    emitByte(0x90); // nop
-                    break;
-                default:
-                    assert i == 0;
-            }
-            return;
-        }
-
-        // Using nops with size prefixes "0x66 0x90".
-        // From AMD Optimization Guide:
-        // 1: 0x90
-        // 2: 0x66 0x90
-        // 3: 0x66 0x66 0x90
-        // 4: 0x66 0x66 0x66 0x90
-        // 5: 0x66 0x66 0x90 0x66 0x90
-        // 6: 0x66 0x66 0x90 0x66 0x66 0x90
-        // 7: 0x66 0x66 0x66 0x90 0x66 0x66 0x90
-        // 8: 0x66 0x66 0x66 0x90 0x66 0x66 0x66 0x90
-        // 9: 0x66 0x66 0x90 0x66 0x66 0x90 0x66 0x66 0x90
-        // 10: 0x66 0x66 0x66 0x90 0x66 0x66 0x90 0x66 0x66 0x90
-        //
-        while (i > 12) {
-            i -= 4;
-            emitByte(0x66); // size prefix
-            emitByte(0x66);
-            emitByte(0x66);
-            emitByte(0x90); // nop
-        }
-        // 1 - 12 nops
-        if (i > 8) {
-            if (i > 9) {
-                i -= 1;
-                emitByte(0x66);
-            }
-            i -= 3;
-            emitByte(0x66);
-            emitByte(0x66);
-            emitByte(0x90);
-        }
-        // 1 - 8 nops
-        if (i > 4) {
-            if (i > 6) {
-                i -= 1;
-                emitByte(0x66);
-            }
-            i -= 3;
-            emitByte(0x66);
-            emitByte(0x66);
-            emitByte(0x90);
-        }
-        switch (i) {
-            case 4:
-                emitByte(0x66);
-                emitByte(0x66);
-                emitByte(0x66);
-                emitByte(0x90);
-                break;
-            case 3:
-                emitByte(0x66);
-                emitByte(0x66);
-                emitByte(0x90);
-                break;
-            case 2:
-                emitByte(0x66);
-                emitByte(0x90);
-                break;
-            case 1:
-                emitByte(0x90);
-                break;
-            default:
-                assert i == 0;
-        }
-    }
-
-    public final void pop(Register dst) {
-        int encode = prefixAndEncode(dst.encoding);
-        emitByte(0x58 | encode);
-    }
-
-    public void popfq() {
-        emitByte(0x9D);
-    }
-
-    public final void ptest(Register dst, Register src) {
-        assert supports(CPUFeature.SSE4_1);
-        emitByte(0x66);
-        int encode = prefixAndEncode(dst.encoding, src.encoding);
-        emitByte(0x0F);
-        emitByte(0x38);
-        emitByte(0x17);
-        emitByte(0xC0 | encode);
-    }
-
-    public final void push(Register src) {
-        int encode = prefixAndEncode(src.encoding);
-        emitByte(0x50 | encode);
-    }
-
-    public void pushfq() {
-        emitByte(0x9c);
-    }
-
-    public final void pxor(Register dst, Register src) {
-        emitByte(0x66);
-        int encode = prefixAndEncode(dst.encoding, src.encoding);
-        emitByte(0x0F);
-        emitByte(0xEF);
-        emitByte(0xC0 | encode);
-    }
-
-    public final void ret(int imm16) {
-        if (imm16 == 0) {
-            emitByte(0xC3);
-        } else {
-            emitByte(0xC2);
-            emitShort(imm16);
-        }
-    }
-
-    public final void subl(AMD64Address dst, int imm32) {
-        SUB.getMIOpcode(DWORD, isByte(imm32)).emit(this, DWORD, dst, imm32);
-    }
-
-    public final void subl(Register dst, int imm32) {
-        SUB.getMIOpcode(DWORD, isByte(imm32)).emit(this, DWORD, dst, imm32);
-    }
-
-    public final void testl(Register dst, int imm32) {
-        // not using emitArith because test
-        // doesn't support sign-extension of
-        // 8bit operands
-        int encode = dst.encoding;
-        if (encode == 0) {
-            emitByte(0xA9);
-        } else {
-            encode = prefixAndEncode(encode);
-            emitByte(0xF7);
-            emitByte(0xC0 | encode);
-        }
-        emitInt(imm32);
-    }
-
-    public final void testl(Register dst, Register src) {
-        int encode = prefixAndEncode(dst.encoding, src.encoding);
-        emitByte(0x85);
-        emitByte(0xC0 | encode);
-    }
-
-    public final void testl(Register dst, AMD64Address src) {
-        prefix(src, dst);
-        emitByte(0x85);
-        emitOperandHelper(dst, src);
-    }
-
-    public final void xorl(Register dst, Register src) {
-        XOR.rmOp.emit(this, DWORD, dst, src);
-    }
-
-    public final void xorpd(Register dst, Register src) {
-        emitByte(0x66);
-        xorps(dst, src);
-    }
-
-    public final void xorps(Register dst, Register src) {
-        assert dst.getRegisterCategory().equals(AMD64.XMM) && src.getRegisterCategory().equals(AMD64.XMM);
-        int encode = prefixAndEncode(dst.encoding, src.encoding);
-        emitByte(0x0F);
-        emitByte(0x57);
-        emitByte(0xC0 | encode);
-    }
-
-    protected final void decl(Register dst) {
-        // Use two-byte form (one-byte form is a REX prefix in 64-bit mode)
-        int encode = prefixAndEncode(dst.encoding);
-        emitByte(0xFF);
-        emitByte(0xC8 | encode);
-    }
-
-    protected final void incl(Register dst) {
-        // Use two-byte form (one-byte from is a REX prefix in 64-bit mode)
-        int encode = prefixAndEncode(dst.encoding);
-        emitByte(0xFF);
-        emitByte(0xC0 | encode);
-    }
-
-    private int prefixAndEncode(int regEnc) {
-        return prefixAndEncode(regEnc, false);
-    }
-
-    private int prefixAndEncode(int regEnc, boolean byteinst) {
-        if (regEnc >= 8) {
-            emitByte(Prefix.REXB);
-            return regEnc - 8;
-        } else if (byteinst && regEnc >= 4) {
-            emitByte(Prefix.REX);
-        }
-        return regEnc;
-    }
-
-    private int prefixqAndEncode(int regEnc) {
-        if (regEnc < 8) {
-            emitByte(Prefix.REXW);
-            return regEnc;
-        } else {
-            emitByte(Prefix.REXWB);
-            return regEnc - 8;
-        }
-    }
-
-    private int prefixAndEncode(int dstEnc, int srcEnc) {
-        return prefixAndEncode(dstEnc, false, srcEnc, false);
-    }
-
-    private int prefixAndEncode(int dstEncoding, boolean dstIsByte, int srcEncoding, boolean srcIsByte) {
-        int srcEnc = srcEncoding;
-        int dstEnc = dstEncoding;
-        if (dstEnc < 8) {
-            if (srcEnc >= 8) {
-                emitByte(Prefix.REXB);
-                srcEnc -= 8;
-            } else if ((srcIsByte && srcEnc >= 4) || (dstIsByte && dstEnc >= 4)) {
-                emitByte(Prefix.REX);
-            }
-        } else {
-            if (srcEnc < 8) {
-                emitByte(Prefix.REXR);
-            } else {
-                emitByte(Prefix.REXRB);
-                srcEnc -= 8;
-            }
-            dstEnc -= 8;
-        }
-        return dstEnc << 3 | srcEnc;
-    }
-
-    /**
-     * Creates prefix and the encoding of the lower 6 bits of the ModRM-Byte. It emits an operand
-     * prefix. If the given operands exceed 3 bits, the 4th bit is encoded in the prefix.
-     *
-     * @param regEncoding the encoding of the register part of the ModRM-Byte
-     * @param rmEncoding the encoding of the r/m part of the ModRM-Byte
-     * @return the lower 6 bits of the ModRM-Byte that should be emitted
-     */
-    private int prefixqAndEncode(int regEncoding, int rmEncoding) {
-        int rmEnc = rmEncoding;
-        int regEnc = regEncoding;
-        if (regEnc < 8) {
-            if (rmEnc < 8) {
-                emitByte(Prefix.REXW);
-            } else {
-                emitByte(Prefix.REXWB);
-                rmEnc -= 8;
-            }
-        } else {
-            if (rmEnc < 8) {
-                emitByte(Prefix.REXWR);
-            } else {
-                emitByte(Prefix.REXWRB);
-                rmEnc -= 8;
-            }
-            regEnc -= 8;
-        }
-        return regEnc << 3 | rmEnc;
-    }
-
-    private static boolean needsRex(Register reg) {
-        return reg.encoding >= MinEncodingNeedsRex;
-    }
-
-    private void prefix(AMD64Address adr) {
-        if (needsRex(adr.getBase())) {
-            if (needsRex(adr.getIndex())) {
-                emitByte(Prefix.REXXB);
-            } else {
-                emitByte(Prefix.REXB);
-            }
-        } else {
-            if (needsRex(adr.getIndex())) {
-                emitByte(Prefix.REXX);
-            }
-        }
-    }
-
-    private void prefixq(AMD64Address adr) {
-        if (needsRex(adr.getBase())) {
-            if (needsRex(adr.getIndex())) {
-                emitByte(Prefix.REXWXB);
-            } else {
-                emitByte(Prefix.REXWB);
-            }
-        } else {
-            if (needsRex(adr.getIndex())) {
-                emitByte(Prefix.REXWX);
-            } else {
-                emitByte(Prefix.REXW);
-            }
-        }
-    }
-
-    private void prefix(AMD64Address adr, Register reg) {
-        prefix(adr, reg, false);
-    }
-
-    private void prefix(AMD64Address adr, Register reg, boolean byteinst) {
-        if (reg.encoding < 8) {
-            if (needsRex(adr.getBase())) {
-                if (needsRex(adr.getIndex())) {
-                    emitByte(Prefix.REXXB);
-                } else {
-                    emitByte(Prefix.REXB);
-                }
-            } else {
-                if (needsRex(adr.getIndex())) {
-                    emitByte(Prefix.REXX);
-                } else if (byteinst && reg.encoding >= 4) {
-                    emitByte(Prefix.REX);
-                }
-            }
-        } else {
-            if (needsRex(adr.getBase())) {
-                if (needsRex(adr.getIndex())) {
-                    emitByte(Prefix.REXRXB);
-                } else {
-                    emitByte(Prefix.REXRB);
-                }
-            } else {
-                if (needsRex(adr.getIndex())) {
-                    emitByte(Prefix.REXRX);
-                } else {
-                    emitByte(Prefix.REXR);
-                }
-            }
-        }
-    }
-
-    private void prefixq(AMD64Address adr, Register src) {
-        if (src.encoding < 8) {
-            if (needsRex(adr.getBase())) {
-                if (needsRex(adr.getIndex())) {
-                    emitByte(Prefix.REXWXB);
-                } else {
-                    emitByte(Prefix.REXWB);
-                }
-            } else {
-                if (needsRex(adr.getIndex())) {
-                    emitByte(Prefix.REXWX);
-                } else {
-                    emitByte(Prefix.REXW);
-                }
-            }
-        } else {
-            if (needsRex(adr.getBase())) {
-                if (needsRex(adr.getIndex())) {
-                    emitByte(Prefix.REXWRXB);
-                } else {
-                    emitByte(Prefix.REXWRB);
-                }
-            } else {
-                if (needsRex(adr.getIndex())) {
-                    emitByte(Prefix.REXWRX);
-                } else {
-                    emitByte(Prefix.REXWR);
-                }
-            }
-        }
-    }
-
-    public final void addq(Register dst, int imm32) {
-        ADD.getMIOpcode(QWORD, isByte(imm32)).emit(this, QWORD, dst, imm32);
-    }
-
-    public final void addq(AMD64Address dst, int imm32) {
-        ADD.getMIOpcode(QWORD, isByte(imm32)).emit(this, QWORD, dst, imm32);
-    }
-
-    public final void addq(Register dst, Register src) {
-        ADD.rmOp.emit(this, QWORD, dst, src);
-    }
-
-    public final void addq(AMD64Address dst, Register src) {
-        ADD.mrOp.emit(this, QWORD, dst, src);
-    }
-
-    public final void andq(Register dst, int imm32) {
-        AND.getMIOpcode(QWORD, isByte(imm32)).emit(this, QWORD, dst, imm32);
-    }
-
-    public final void bswapq(Register reg) {
-        int encode = prefixqAndEncode(reg.encoding);
-        emitByte(0x0F);
-        emitByte(0xC8 | encode);
-    }
-
-    public final void cdqq() {
-        emitByte(Prefix.REXW);
-        emitByte(0x99);
-    }
-
-    public final void cmovq(ConditionFlag cc, Register dst, Register src) {
-        int encode = prefixqAndEncode(dst.encoding, src.encoding);
-        emitByte(0x0F);
-        emitByte(0x40 | cc.getValue());
-        emitByte(0xC0 | encode);
-    }
-
-    public final void cmovq(ConditionFlag cc, Register dst, AMD64Address src) {
-        prefixq(src, dst);
-        emitByte(0x0F);
-        emitByte(0x40 | cc.getValue());
-        emitOperandHelper(dst, src);
-    }
-
-    public final void cmpq(Register dst, int imm32) {
-        CMP.getMIOpcode(QWORD, isByte(imm32)).emit(this, QWORD, dst, imm32);
-    }
-
-    public final void cmpq(Register dst, Register src) {
-        CMP.rmOp.emit(this, QWORD, dst, src);
-    }
-
-    public final void cmpq(Register dst, AMD64Address src) {
-        CMP.rmOp.emit(this, QWORD, dst, src);
-    }
-
-    public final void cmpxchgq(Register reg, AMD64Address adr) {
-        prefixq(adr, reg);
-        emitByte(0x0F);
-        emitByte(0xB1);
-        emitOperandHelper(reg, adr);
-    }
-
-    protected final void decq(Register dst) {
-        // Use two-byte form (one-byte from is a REX prefix in 64-bit mode)
-        int encode = prefixqAndEncode(dst.encoding);
-        emitByte(0xFF);
-        emitByte(0xC8 | encode);
-    }
-
-    public final void decq(AMD64Address dst) {
-        DEC.emit(this, QWORD, dst);
-    }
-
-    public final void incq(Register dst) {
-        // Don't use it directly. Use Macroincrementq() instead.
-        // Use two-byte form (one-byte from is a REX prefix in 64-bit mode)
-        int encode = prefixqAndEncode(dst.encoding);
-        emitByte(0xFF);
-        emitByte(0xC0 | encode);
-    }
-
-    public final void incq(AMD64Address dst) {
-        INC.emit(this, QWORD, dst);
-    }
-
-    public final void movq(Register dst, long imm64) {
-        int encode = prefixqAndEncode(dst.encoding);
-        emitByte(0xB8 | encode);
-        emitLong(imm64);
-    }
-
-    public final void movslq(Register dst, int imm32) {
-        int encode = prefixqAndEncode(dst.encoding);
-        emitByte(0xC7);
-        emitByte(0xC0 | encode);
-        emitInt(imm32);
-    }
-
-    public final void movdq(Register dst, Register src) {
-
-        // table D-1 says MMX/SSE2
-        emitByte(0x66);
-
-        if (dst.getRegisterCategory().equals(AMD64.XMM)) {
-            int encode = prefixqAndEncode(dst.encoding, src.encoding);
-            emitByte(0x0F);
-            emitByte(0x6E);
-            emitByte(0xC0 | encode);
-        } else if (src.getRegisterCategory().equals(AMD64.XMM)) {
-
-            // swap src/dst to get correct prefix
-            int encode = prefixqAndEncode(src.encoding, dst.encoding);
-            emitByte(0x0F);
-            emitByte(0x7E);
-            emitByte(0xC0 | encode);
-        } else {
-            throw new InternalError("should not reach here");
-        }
-    }
-
-    public final void movdqu(Register dst, AMD64Address src) {
-        emitByte(0xF3);
-        prefix(src, dst);
-        emitByte(0x0F);
-        emitByte(0x6F);
-        emitOperandHelper(dst, src);
-    }
-
-    public final void movslq(AMD64Address dst, int imm32) {
-        prefixq(dst);
-        emitByte(0xC7);
-        emitOperandHelper(0, dst);
-        emitInt(imm32);
-    }
-
-    public final void movslq(Register dst, AMD64Address src) {
-        prefixq(src, dst);
-        emitByte(0x63);
-        emitOperandHelper(dst, src);
-    }
-
-    public final void movslq(Register dst, Register src) {
-        int encode = prefixqAndEncode(dst.encoding, src.encoding);
-        emitByte(0x63);
-        emitByte(0xC0 | encode);
-    }
-
-    public final void negq(Register dst) {
-        int encode = prefixqAndEncode(dst.encoding);
-        emitByte(0xF7);
-        emitByte(0xD8 | encode);
-    }
-
-    public final void shlq(Register dst, int imm8) {
-        assert isShiftCount(imm8 >> 1) : "illegal shift count";
-        int encode = prefixqAndEncode(dst.encoding);
-        if (imm8 == 1) {
-            emitByte(0xD1);
-            emitByte(0xE0 | encode);
-        } else {
-            emitByte(0xC1);
-            emitByte(0xE0 | encode);
-            emitByte(imm8);
-        }
-    }
-
-    public final void shrq(Register dst, int imm8) {
-        assert isShiftCount(imm8 >> 1) : "illegal shift count";
-        int encode = prefixqAndEncode(dst.encoding);
-        if (imm8 == 1) {
-            emitByte(0xD1);
-            emitByte(0xE8 | encode);
-        } else {
-            emitByte(0xC1);
-            emitByte(0xE8 | encode);
-            emitByte(imm8);
-        }
-    }
-
-    public final void subq(Register dst, int imm32) {
-        SUB.getMIOpcode(QWORD, isByte(imm32)).emit(this, QWORD, dst, imm32);
-    }
-
-    public final void subq(AMD64Address dst, int imm32) {
-        SUB.getMIOpcode(QWORD, isByte(imm32)).emit(this, QWORD, dst, imm32);
-    }
-
-    public final void subqWide(Register dst, int imm32) {
-        // don't use the sign-extending version, forcing a 32-bit immediate
-        SUB.getMIOpcode(QWORD, false).emit(this, QWORD, dst, imm32);
-    }
-
-    public final void subq(Register dst, Register src) {
-        SUB.rmOp.emit(this, QWORD, dst, src);
-    }
-
-    public final void testq(Register dst, Register src) {
-        int encode = prefixqAndEncode(dst.encoding, src.encoding);
-        emitByte(0x85);
-        emitByte(0xC0 | encode);
-    }
-
-    public final void xaddl(AMD64Address dst, Register src) {
-        prefix(dst, src);
-        emitByte(0x0F);
-        emitByte(0xC1);
-        emitOperandHelper(src, dst);
-    }
-
-    public final void xaddq(AMD64Address dst, Register src) {
-        prefixq(dst, src);
-        emitByte(0x0F);
-        emitByte(0xC1);
-        emitOperandHelper(src, dst);
-    }
-
-    public final void xchgl(Register dst, AMD64Address src) {
-        prefix(src, dst);
-        emitByte(0x87);
-        emitOperandHelper(dst, src);
-    }
-
-    public final void xchgq(Register dst, AMD64Address src) {
-        prefixq(src, dst);
-        emitByte(0x87);
-        emitOperandHelper(dst, src);
-    }
-
-    public final void membar(int barriers) {
-        if (target.isMP) {
-            // We only have to handle StoreLoad
-            if ((barriers & STORE_LOAD) != 0) {
-                // All usable chips support "locked" instructions which suffice
-                // as barriers, and are much faster than the alternative of
-                // using cpuid instruction. We use here a locked add [rsp],0.
-                // This is conveniently otherwise a no-op except for blowing
-                // flags.
-                // Any change to this code may need to revisit other places in
-                // the code where this idiom is used, in particular the
-                // orderAccess code.
-                lock();
-                addl(new AMD64Address(rsp, 0), 0); // Assert the lock# signal here
-            }
-        }
-    }
-
-    @Override
-    protected final void patchJumpTarget(int branch, int branchTarget) {
-        int op = getByte(branch);
-        assert op == 0xE8 // call
-                        ||
-                        op == 0x00 // jump table entry
-                        || op == 0xE9 // jmp
-                        || op == 0xEB // short jmp
-                        || (op & 0xF0) == 0x70 // short jcc
-                        || op == 0x0F && (getByte(branch + 1) & 0xF0) == 0x80 // jcc
-        : "Invalid opcode at patch point branch=" + branch + ", branchTarget=" + branchTarget + ", op=" + op;
-
-        if (op == 0x00) {
-            int offsetToJumpTableBase = getShort(branch + 1);
-            int jumpTableBase = branch - offsetToJumpTableBase;
-            int imm32 = branchTarget - jumpTableBase;
-            emitInt(imm32, branch);
-        } else if (op == 0xEB || (op & 0xF0) == 0x70) {
-
-            // short offset operators (jmp and jcc)
-            final int imm8 = branchTarget - (branch + 2);
-            /*
-             * Since a wrongly patched short branch can potentially lead to working but really bad
-             * behaving code we should always fail with an exception instead of having an assert.
-             */
-            if (!NumUtil.isByte(imm8)) {
-                throw new InternalError("branch displacement out of range: " + imm8);
-            }
-            emitByte(imm8, branch + 1);
-
-        } else {
-
-            int off = 1;
-            if (op == 0x0F) {
-                off = 2;
-            }
-
-            int imm32 = branchTarget - (branch + 4 + off);
-            emitInt(imm32, branch + off);
-        }
-    }
-
-    public void nullCheck(AMD64Address address) {
-        testl(AMD64.rax, address);
-    }
-
-    @Override
-    public void align(int modulus) {
-        if (position() % modulus != 0) {
-            nop(modulus - (position() % modulus));
-        }
-    }
-
-    /**
-     * Emits a direct call instruction. Note that the actual call target is not specified, because
-     * all calls need patching anyway. Therefore, 0 is emitted as the call target, and the user is
-     * responsible to add the call address to the appropriate patching tables.
-     */
-    public final void call() {
-        emitByte(0xE8);
-        emitInt(0);
-    }
-
-    public final void call(Register src) {
-        int encode = prefixAndEncode(src.encoding);
-        emitByte(0xFF);
-        emitByte(0xD0 | encode);
-    }
-
-    public final void int3() {
-        emitByte(0xCC);
-    }
-
-    private void emitx87(int b1, int b2, int i) {
-        assert 0 <= i && i < 8 : "illegal stack offset";
-        emitByte(b1);
-        emitByte(b2 + i);
-    }
-
-    public final void fldd(AMD64Address src) {
-        emitByte(0xDD);
-        emitOperandHelper(0, src);
-    }
-
-    public final void flds(AMD64Address src) {
-        emitByte(0xD9);
-        emitOperandHelper(0, src);
-    }
-
-    public final void fldln2() {
-        emitByte(0xD9);
-        emitByte(0xED);
-    }
-
-    public final void fldlg2() {
-        emitByte(0xD9);
-        emitByte(0xEC);
-    }
-
-    public final void fyl2x() {
-        emitByte(0xD9);
-        emitByte(0xF1);
-    }
-
-    public final void fstps(AMD64Address src) {
-        emitByte(0xD9);
-        emitOperandHelper(3, src);
-    }
-
-    public final void fstpd(AMD64Address src) {
-        emitByte(0xDD);
-        emitOperandHelper(3, src);
-    }
-
-    private void emitFPUArith(int b1, int b2, int i) {
-        assert 0 <= i && i < 8 : "illegal FPU register: " + i;
-        emitByte(b1);
-        emitByte(b2 + i);
-    }
-
-    public void ffree(int i) {
-        emitFPUArith(0xDD, 0xC0, i);
-    }
-
-    public void fincstp() {
-        emitByte(0xD9);
-        emitByte(0xF7);
-    }
-
-    public void fxch(int i) {
-        emitFPUArith(0xD9, 0xC8, i);
-    }
-
-    public void fnstswAX() {
-        emitByte(0xDF);
-        emitByte(0xE0);
-    }
-
-    public void fwait() {
-        emitByte(0x9B);
-    }
-
-    public void fprem() {
-        emitByte(0xD9);
-        emitByte(0xF8);
-    }
-
-    public final void fsin() {
-        emitByte(0xD9);
-        emitByte(0xFE);
-    }
-
-    public final void fcos() {
-        emitByte(0xD9);
-        emitByte(0xFF);
-    }
-
-    public final void fptan() {
-        emitByte(0xD9);
-        emitByte(0xF2);
-    }
-
-    public final void fstp(int i) {
-        emitx87(0xDD, 0xD8, i);
-    }
-
-    @Override
-    public AMD64Address makeAddress(Register base, int displacement) {
-        return new AMD64Address(base, displacement);
-    }
-
-    @Override
-    public AMD64Address getPlaceholder() {
-        return Placeholder;
-    }
-
-    private void prefetchPrefix(AMD64Address src) {
-        prefix(src);
-        emitByte(0x0F);
-    }
-
-    public void prefetchnta(AMD64Address src) {
-        prefetchPrefix(src);
-        emitByte(0x18);
-        emitOperandHelper(0, src);
-    }
-
-    void prefetchr(AMD64Address src) {
-        assert supports(CPUFeature.AMD_3DNOW_PREFETCH);
-        prefetchPrefix(src);
-        emitByte(0x0D);
-        emitOperandHelper(0, src);
-    }
-
-    public void prefetcht0(AMD64Address src) {
-        assert supports(CPUFeature.SSE);
-        prefetchPrefix(src);
-        emitByte(0x18);
-        emitOperandHelper(1, src);
-    }
-
-    public void prefetcht1(AMD64Address src) {
-        assert supports(CPUFeature.SSE);
-        prefetchPrefix(src);
-        emitByte(0x18);
-        emitOperandHelper(2, src);
-    }
-
-    public void prefetcht2(AMD64Address src) {
-        assert supports(CPUFeature.SSE);
-        prefix(src);
-        emitByte(0x0f);
-        emitByte(0x18);
-        emitOperandHelper(3, src);
-    }
-
-    public void prefetchw(AMD64Address src) {
-        assert supports(CPUFeature.AMD_3DNOW_PREFETCH);
-        prefix(src);
-        emitByte(0x0f);
-        emitByte(0x0D);
-        emitOperandHelper(1, src);
-    }
-
-    /**
-     * Emits an instruction which is considered to be illegal. This is used if we deliberately want
-     * to crash the program (debugging etc.).
-     */
-    public void illegal() {
-        emitByte(0x0f);
-        emitByte(0x0b);
-    }
-}
--- a/graal/com.oracle.graal.asm.amd64/src/com/oracle/graal/asm/amd64/AMD64MacroAssembler.java	Wed Jun 03 17:12:05 2015 +0200
+++ /dev/null	Thu Jan 01 00:00:00 1970 +0000
@@ -1,345 +0,0 @@
-/*
- * Copyright (c) 2009, 2015, Oracle and/or its affiliates. All rights reserved.
- * DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER.
- *
- * This code is free software; you can redistribute it and/or modify it
- * under the terms of the GNU General Public License version 2 only, as
- * published by the Free Software Foundation.
- *
- * This code is distributed in the hope that it will be useful, but WITHOUT
- * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
- * FITNESS FOR A PARTICULAR PURPOSE.  See the GNU General Public License
- * version 2 for more details (a copy is included in the LICENSE file that
- * accompanied this code).
- *
- * You should have received a copy of the GNU General Public License version
- * 2 along with this work; if not, write to the Free Software Foundation,
- * Inc., 51 Franklin St, Fifth Floor, Boston, MA 02110-1301 USA.
- *
- * Please contact Oracle, 500 Oracle Parkway, Redwood Shores, CA 94065 USA
- * or visit www.oracle.com if you need additional information or have any
- * questions.
- */
-package com.oracle.graal.asm.amd64;
-
-import com.oracle.jvmci.amd64.*;
-import com.oracle.jvmci.code.Register;
-import com.oracle.jvmci.code.CalleeSaveLayout;
-import com.oracle.jvmci.code.TargetDescription;
-import com.oracle.jvmci.code.RegisterConfig;
-import com.oracle.jvmci.meta.Kind;
-
-import static com.oracle.graal.asm.amd64.AMD64AsmOptions.*;
-
-import com.oracle.graal.asm.*;
-
-/**
- * This class implements commonly used X86 code patterns.
- */
-public class AMD64MacroAssembler extends AMD64Assembler {
-
-    public AMD64MacroAssembler(TargetDescription target, RegisterConfig registerConfig) {
-        super(target, registerConfig);
-    }
-
-    public final void decrementq(Register reg, int value) {
-        if (value == Integer.MIN_VALUE) {
-            subq(reg, value);
-            return;
-        }
-        if (value < 0) {
-            incrementq(reg, -value);
-            return;
-        }
-        if (value == 0) {
-            return;
-        }
-        if (value == 1 && UseIncDec) {
-            decq(reg);
-        } else {
-            subq(reg, value);
-        }
-    }
-
-    public final void decrementq(AMD64Address dst, int value) {
-        if (value == Integer.MIN_VALUE) {
-            subq(dst, value);
-            return;
-        }
-        if (value < 0) {
-            incrementq(dst, -value);
-            return;
-        }
-        if (value == 0) {
-            return;
-        }
-        if (value == 1 && UseIncDec) {
-            decq(dst);
-        } else {
-            subq(dst, value);
-        }
-    }
-
-    public void incrementq(Register reg, int value) {
-        if (value == Integer.MIN_VALUE) {
-            addq(reg, value);
-            return;
-        }
-        if (value < 0) {
-            decrementq(reg, -value);
-            return;
-        }
-        if (value == 0) {
-            return;
-        }
-        if (value == 1 && UseIncDec) {
-            incq(reg);
-        } else {
-            addq(reg, value);
-        }
-    }
-
-    public final void incrementq(AMD64Address dst, int value) {
-        if (value == Integer.MIN_VALUE) {
-            addq(dst, value);
-            return;
-        }
-        if (value < 0) {
-            decrementq(dst, -value);
-            return;
-        }
-        if (value == 0) {
-            return;
-        }
-        if (value == 1 && UseIncDec) {
-            incq(dst);
-        } else {
-            addq(dst, value);
-        }
-    }
-
-    public final void movptr(Register dst, AMD64Address src) {
-        movq(dst, src);
-    }
-
-    public final void movptr(AMD64Address dst, Register src) {
-        movq(dst, src);
-    }
-
-    public final void movptr(AMD64Address dst, int src) {
-        movslq(dst, src);
-    }
-
-    public final void cmpptr(Register src1, Register src2) {
-        cmpq(src1, src2);
-    }
-
-    public final void cmpptr(Register src1, AMD64Address src2) {
-        cmpq(src1, src2);
-    }
-
-    public final void decrementl(Register reg, int value) {
-        if (value == Integer.MIN_VALUE) {
-            subl(reg, value);
-            return;
-        }
-        if (value < 0) {
-            incrementl(reg, -value);
-            return;
-        }
-        if (value == 0) {
-            return;
-        }
-        if (value == 1 && UseIncDec) {
-            decl(reg);
-        } else {
-            subl(reg, value);
-        }
-    }
-
-    public final void decrementl(AMD64Address dst, int value) {
-        if (value == Integer.MIN_VALUE) {
-            subl(dst, value);
-            return;
-        }
-        if (value < 0) {
-            incrementl(dst, -value);
-            return;
-        }
-        if (value == 0) {
-            return;
-        }
-        if (value == 1 && UseIncDec) {
-            decl(dst);
-        } else {
-            subl(dst, value);
-        }
-    }
-
-    public final void incrementl(Register reg, int value) {
-        if (value == Integer.MIN_VALUE) {
-            addl(reg, value);
-            return;
-        }
-        if (value < 0) {
-            decrementl(reg, -value);
-            return;
-        }
-        if (value == 0) {
-            return;
-        }
-        if (value == 1 && UseIncDec) {
-            incl(reg);
-        } else {
-            addl(reg, value);
-        }
-    }
-
-    public final void incrementl(AMD64Address dst, int value) {
-        if (value == Integer.MIN_VALUE) {
-            addl(dst, value);
-            return;
-        }
-        if (value < 0) {
-            decrementl(dst, -value);
-            return;
-        }
-        if (value == 0) {
-            return;
-        }
-        if (value == 1 && UseIncDec) {
-            incl(dst);
-        } else {
-            addl(dst, value);
-        }
-    }
-
-    public void movflt(Register dst, Register src) {
-        assert dst.getRegisterCategory().equals(AMD64.XMM) && src.getRegisterCategory().equals(AMD64.XMM);
-        if (UseXmmRegToRegMoveAll) {
-            movaps(dst, src);
-        } else {
-            movss(dst, src);
-        }
-    }
-
-    public void movflt(Register dst, AMD64Address src) {
-        assert dst.getRegisterCategory().equals(AMD64.XMM);
-        movss(dst, src);
-    }
-
-    public void movflt(AMD64Address dst, Register src) {
-        assert src.getRegisterCategory().equals(AMD64.XMM);
-        movss(dst, src);
-    }
-
-    public void movdbl(Register dst, Register src) {
-        assert dst.getRegisterCategory().equals(AMD64.XMM) && src.getRegisterCategory().equals(AMD64.XMM);
-        if (UseXmmRegToRegMoveAll) {
-            movapd(dst, src);
-        } else {
-            movsd(dst, src);
-        }
-    }
-
-    public void movdbl(Register dst, AMD64Address src) {
-        assert dst.getRegisterCategory().equals(AMD64.XMM);
-        if (UseXmmLoadAndClearUpper) {
-            movsd(dst, src);
-        } else {
-            movlpd(dst, src);
-        }
-    }
-
-    public void movdbl(AMD64Address dst, Register src) {
-        assert src.getRegisterCategory().equals(AMD64.XMM);
-        movsd(dst, src);
-    }
-
-    /**
-     * Non-atomic write of a 64-bit constant to memory. Do not use if the address might be a
-     * volatile field!
-     */
-    public final void movlong(AMD64Address dst, long src) {
-        if (NumUtil.isInt(src)) {
-            AMD64MIOp.MOV.emit(this, OperandSize.QWORD, dst, (int) src);
-        } else {
-            AMD64Address high = new AMD64Address(dst.getBase(), dst.getIndex(), dst.getScale(), dst.getDisplacement() + 4);
-            movl(dst, (int) (src & 0xFFFFFFFF));
-            movl(high, (int) (src >> 32));
-        }
-
-    }
-
-    public final void flog(Register dest, Register value, boolean base10) {
-        if (base10) {
-            fldlg2();
-        } else {
-            fldln2();
-        }
-        AMD64Address tmp = trigPrologue(value);
-        fyl2x();
-        trigEpilogue(dest, tmp);
-    }
-
-    public final void fsin(Register dest, Register value) {
-        AMD64Address tmp = trigPrologue(value);
-        fsin();
-        trigEpilogue(dest, tmp);
-    }
-
-    public final void fcos(Register dest, Register value) {
-        AMD64Address tmp = trigPrologue(value);
-        fcos();
-        trigEpilogue(dest, tmp);
-    }
-
-    public final void ftan(Register dest, Register value) {
-        AMD64Address tmp = trigPrologue(value);
-        fptan();
-        fstp(0); // ftan pushes 1.0 in addition to the actual result, pop
-        trigEpilogue(dest, tmp);
-    }
-
-    public final void fpop() {
-        ffree(0);
-        fincstp();
-    }
-
-    private AMD64Address trigPrologue(Register value) {
-        assert value.getRegisterCategory().equals(AMD64.XMM);
-        AMD64Address tmp = new AMD64Address(AMD64.rsp);
-        subq(AMD64.rsp, target.getSizeInBytes(Kind.Double));
-        movdbl(tmp, value);
-        fldd(tmp);
-        return tmp;
-    }
-
-    private void trigEpilogue(Register dest, AMD64Address tmp) {
-        assert dest.getRegisterCategory().equals(AMD64.XMM);
-        fstpd(tmp);
-        movdbl(dest, tmp);
-        addq(AMD64.rsp, target.getSizeInBytes(Kind.Double));
-    }
-
-    /**
-     * Emit code to save a given set of callee save registers in the {@linkplain CalleeSaveLayout
-     * CSA} within the frame.
-     *
-     * @param csl the description of the CSA
-     * @param frameToCSA offset from the frame pointer to the CSA
-     */
-    public final void save(CalleeSaveLayout csl, int frameToCSA) {
-        for (Register r : csl.registers) {
-            int offset = csl.offsetOf(r);
-            movq(new AMD64Address(frameRegister, frameToCSA + offset), r);
-        }
-    }
-
-    public final void restore(CalleeSaveLayout csl, int frameToCSA) {
-        for (Register r : csl.registers) {
-            int offset = csl.offsetOf(r);
-            movq(r, new AMD64Address(frameRegister, frameToCSA + offset));
-        }
-    }
-}
--- a/graal/com.oracle.graal.asm.sparc/src/com/oracle/graal/asm/sparc/SPARCAddress.java	Wed Jun 03 17:12:05 2015 +0200
+++ /dev/null	Thu Jan 01 00:00:00 1970 +0000
@@ -1,126 +0,0 @@
-/*
- * Copyright (c) 2013, Oracle and/or its affiliates. All rights reserved.
- * DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER.
- *
- * This code is free software; you can redistribute it and/or modify it
- * under the terms of the GNU General Public License version 2 only, as
- * published by the Free Software Foundation.
- *
- * This code is distributed in the hope that it will be useful, but WITHOUT
- * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
- * FITNESS FOR A PARTICULAR PURPOSE.  See the GNU General Public License
- * version 2 for more details (a copy is included in the LICENSE file that
- * accompanied this code).
- *
- * You should have received a copy of the GNU General Public License version
- * 2 along with this work; if not, write to the Free Software Foundation,
- * Inc., 51 Franklin St, Fifth Floor, Boston, MA 02110-1301 USA.
- *
- * Please contact Oracle, 500 Oracle Parkway, Redwood Shores, CA 94065 USA
- * or visit www.oracle.com if you need additional information or have any
- * questions.
- */
-package com.oracle.graal.asm.sparc;
-
-import com.oracle.jvmci.code.AbstractAddress;
-import com.oracle.jvmci.code.Register;
-
-import static com.oracle.jvmci.sparc.SPARC.*;
-
-import com.oracle.jvmci.common.*;
-import com.oracle.jvmci.sparc.*;
-
-public class SPARCAddress extends AbstractAddress {
-
-    private final Register base;
-    private final Register index;
-    private final int displacement;
-
-    /**
-     * Creates an {@link SPARCAddress} with given base register, no scaling and a given
-     * displacement.
-     *
-     * @param base the base register
-     * @param displacement the displacement
-     */
-    public SPARCAddress(Register base, int displacement) {
-        this.base = base;
-        this.index = Register.None;
-        this.displacement = displacement;
-    }
-
-    /**
-     * Creates an {@link SPARCAddress} with given base register, no scaling and a given index.
-     *
-     * @param base the base register
-     * @param index the index register
-     */
-    public SPARCAddress(Register base, Register index) {
-        this.base = base;
-        this.index = index;
-        this.displacement = 0;
-    }
-
-    @Override
-    public String toString() {
-        StringBuilder s = new StringBuilder();
-        s.append("[");
-        String sep = "";
-        if (!getBase().equals(Register.None)) {
-            s.append(getBase());
-            sep = " + ";
-        }
-        if (!getIndex().equals(Register.None)) {
-            s.append(sep).append(getIndex());
-            sep = " + ";
-        } else {
-            if (getDisplacement() < 0) {
-                s.append(" - ").append(-getDisplacement());
-            } else if (getDisplacement() > 0) {
-                s.append(sep).append(getDisplacement());
-            }
-        }
-        s.append("]");
-        return s.toString();
-    }
-
-    /**
-     * @return Base register that defines the start of the address computation. If not present, is
-     *         denoted by {@link Register#None}.
-     */
-    public Register getBase() {
-        return base;
-    }
-
-    /**
-     * @return Index register, the value of which is added to {@link #getBase}. If not present, is
-     *         denoted by {@link Register#None}.
-     */
-    public Register getIndex() {
-        return index;
-    }
-
-    /**
-     * @return true if this address has an index register
-     */
-    public boolean hasIndex() {
-        return !getIndex().equals(Register.None);
-    }
-
-    /**
-     * This method adds the stack-bias to the displacement if the base register is either
-     * {@link SPARC#sp} or {@link SPARC#fp}.
-     *
-     * @return Optional additive displacement.
-     */
-    public int getDisplacement() {
-        if (hasIndex()) {
-            throw JVMCIError.shouldNotReachHere("address has index register");
-        }
-        // TODO Should we also hide the register save area size here?
-        if (getBase().equals(sp) || getBase().equals(fp)) {
-            return displacement + STACK_BIAS;
-        }
-        return displacement;
-    }
-}
--- a/graal/com.oracle.graal.asm.sparc/src/com/oracle/graal/asm/sparc/SPARCAssembler.java	Wed Jun 03 17:12:05 2015 +0200
+++ /dev/null	Thu Jan 01 00:00:00 1970 +0000
@@ -1,1805 +0,0 @@
-/*
- * Copyright (c) 2009, 2014, Oracle and/or its affiliates. All rights reserved.
- * DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER.
- *
- * This code is free software; you can redistribute it and/or modify it
- * under the terms of the GNU General Public License version 2 only, as
- * published by the Free Software Foundation.
- *
- * This code is distributed in the hope that it will be useful, but WITHOUT
- * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
- * FITNESS FOR A PARTICULAR PURPOSE.  See the GNU General Public License
- * version 2 for more details (a copy is included in the LICENSE file that
- * accompanied this code).
- *
- * You should have received a copy of the GNU General Public License version
- * 2 along with this work; if not, write to the Free Software Foundation,
- * Inc., 51 Franklin St, Fifth Floor, Boston, MA 02110-1301 USA.
- *
- * Please contact Oracle, 500 Oracle Parkway, Redwood Shores, CA 94065 USA
- * or visit www.oracle.com if you need additional information or have any
- * questions.
- */
-package com.oracle.graal.asm.sparc;
-
-import com.oracle.jvmci.code.TargetDescription;
-import com.oracle.jvmci.code.RegisterConfig;
-import com.oracle.jvmci.code.Register;
-import com.oracle.jvmci.meta.Kind;
-import com.oracle.jvmci.meta.JavaConstant;
-import com.oracle.jvmci.sparc.*;
-import com.oracle.jvmci.sparc.SPARC.*;
-
-import static com.oracle.graal.asm.sparc.SPARCAssembler.CC.*;
-import static com.oracle.graal.asm.sparc.SPARCAssembler.ConditionFlag.*;
-import static com.oracle.graal.asm.sparc.SPARCAssembler.Op.*;
-import static com.oracle.graal.asm.sparc.SPARCAssembler.Op3s.*;
-import static com.oracle.graal.asm.sparc.SPARCAssembler.Opfs.*;
-import static com.oracle.graal.hotspot.HotSpotGraalRuntime.*;
-import static com.oracle.jvmci.sparc.SPARC.*;
-import static java.lang.String.*;
-
-import com.oracle.graal.asm.*;
-import com.oracle.graal.compiler.common.calc.*;
-import com.oracle.jvmci.common.*;
-
-/**
- * This class implements an assembler that can encode most SPARC instructions.
- */
-public abstract class SPARCAssembler extends Assembler {
-
-    /**
-     * Constructs an assembler for the SPARC architecture.
-     *
-     * @param registerConfig the register configuration used to bind {@link Register#Frame} and
-     *            {@link Register#CallerFrame} to physical registers. This value can be null if this
-     *            assembler instance will not be used to assemble instructions using these logical
-     *            registers.
-     */
-    public SPARCAssembler(TargetDescription target, RegisterConfig registerConfig) {
-        super(target);
-    }
-
-    public static final int CCR_ICC_SHIFT = 0;
-    public static final int CCR_XCC_SHIFT = 4;
-    public static final int CCR_C_SHIFT = 0;
-    public static final int CCR_V_SHIFT = 1;
-    public static final int CCR_Z_SHIFT = 2;
-    public static final int CCR_N_SHIFT = 3;
-
-    protected static final int OP_SHIFT = 30;
-    protected static final int CBCOND_SHIFT = 28;
-    protected static final int OP2_SHIFT = 22;
-    protected static final int A_SHIFT = 29;
-
-    protected static final int A_MASK = 0b0010_0000_0000_0000_0000_0000_0000_0000;
-    protected static final int OP_MASK = 0b1100_0000_0000_0000_0000_0000_0000_0000;
-    protected static final int CBCOND_MASK = 0b0001_0000_0000_0000_0000_0000_0000_0000; // Used for
-    // distinguish CBcond and BPr instructions
-    protected static final int OP2_MASK = 0b0000_0001_1100_0000_0000_0000_0000_0000;
-
-    protected static final int DISP22_SHIFT = 0;
-    protected static final int DISP22_MASK = 0b00000000001111111111111111111111;
-
-    protected static final int DISP19_SHIFT = 0;
-    protected static final int DISP19_MASK = 0b00000000000001111111111111111111;
-
-    protected static final int D16HI_SHIFT = 20;
-    protected static final int D16HI_MASK = 0b0000_0000_0011_0000_0000_0000_0000_0000;
-    protected static final int D16LO_SHIFT = 0;
-    protected static final int D16LO_MASK = 0b0000_0000_0000_0000_0011_1111_1111_1111;
-
-    protected static final int D10LO_MASK = 0b0000_0000_0000_0000_0001_1111_1110_0000;
-    protected static final int D10HI_MASK = 0b0000_0000_0001_1000_0000_0000_0000_0000;
-    protected static final int D10LO_SHIFT = 5;
-    protected static final int D10HI_SHIFT = 19;
-
-    public enum Ops {
-        // @formatter:off
-
-        BranchOp(0b00),
-        CallOp(0b01),
-        ArithOp(0b10),
-        LdstOp(0b11);
-
-        // @formatter:on
-
-        private final int value;
-
-        private Ops(int value) {
-            this.value = value;
-        }
-
-        public int getValue() {
-            return value;
-        }
-
-        public boolean appliesTo(int instructionWord) {
-            int opShift = 30;
-            return (instructionWord >>> opShift) == value;
-        }
-    }
-
-    public enum Op {
-        Op00(0b00),
-        Op01(0b01),
-        Op10(0b10),
-        Op11(0b11);
-        int op;
-
-        Op(int op) {
-            this.op = op;
-        }
-    }
-
-    public enum Op2s {
-        // @formatter:off
-
-        Illtrap(0b000),
-        Bpr    (0b011),
-        Fb     (0b110),
-        Fbp    (0b101),
-        Br     (0b010),
-        Bp     (0b001),
-        Cb     (0b111),
-        Sethi  (0b100);
-
-
-        // @formatter:on
-
-        private final int value;
-
-        private Op2s(int value) {
-            this.value = value;
-        }
-
-        public int getValue() {
-            return value;
-        }
-
-        public static Op2s byValue(int value) {
-            for (Op2s op : values()) {
-                if (op.getValue() == value) {
-                    return op;
-                }
-            }
-            return null;
-        }
-    }
-
-    public enum Op3s {
-        // @formatter:off
-
-        Add(0x00, "add", Op10),
-        And(0x01, "and", Op10),
-        Or(0x02, "or", Op10),
-        Xor(0x03, "xor", Op10),
-        Sub(0x04, "sub", Op10),
-        Andn(0x05, "andn", Op10),
-        Orn(0x06, "orn", Op10),
-        Xnor(0x07, "xnor", Op10),
-        Addc(0x08, "addc", Op10),
-        Mulx(0x09, "mulx", Op10),
-        Umul(0x0A, "umul", Op10),
-        Smul(0x0B, "smul", Op10),
-        Subc(0x0C, "subc", Op10),
-        Udivx(0x0D, "udivx", Op10),
-        Udiv(0x0E, "udiv", Op10),
-        Sdiv(0x0F, "sdiv", Op10),
-
-        Addcc(0x10, "addcc", Op10),
-        Andcc(0x11, "andcc", Op10),
-        Orcc(0x12, "orcc", Op10),
-        Xorcc(0x13, "xorcc", Op10),
-        Subcc(0x14, "subcc", Op10),
-        Andncc(0x15, "andncc", Op10),
-        Orncc(0x16, "orncc", Op10),
-        Xnorcc(0x17, "xnorcc", Op10),
-        Addccc(0x18, "addccc", Op10),
-
-        Umulcc(0x1A, "umulcc", Op10),
-        Smulcc(0x1B, "smulcc", Op10),
-        Subccc(0x1C, "subccc", Op10),
-        Udivcc(0x1E, "udivcc", Op10),
-        Sdivcc(0x1F, "sdivcc", Op10),
-
-        Taddcc(0x20, "taddcc", Op10),
-        Tsubcc(0x21, "tsubcc", Op10),
-        Taddcctv(0x22, "taddcctv", Op10),
-        Tsubcctv(0x23, "tsubcctv", Op10),
-        Mulscc(0x24, "mulscc", Op10),
-        Sll(0x25, "sll", Op10),
-        Sllx(0x25, "sllx", Op10),
-        Srl(0x26, "srl", Op10),
-        Srlx(0x26, "srlx", Op10),
-        Sra(0x27, "srax", Op10),
-        Srax(0x27, "srax", Op10),
-        Membar(0x28, "membar", Op10),
-
-        Flushw(0x2B, "flushw", Op10),
-        Movcc(0x2C, "movcc", Op10),
-        Sdivx(0x2D, "sdivx", Op10),
-        Popc(0x2E, "popc", Op10),
-        Movr(0x2F, "movr", Op10),
-
-        Fpop1(0b11_0100, "fpop1", Op10),
-        Fpop2(0b11_0101, "fpop2", Op10),
-        Impdep1(0b11_0110, "impdep1", Op10),
-        Impdep2(0b11_0111, "impdep2", Op10),
-        Jmpl(0x38, "jmpl", Op10),
-        Rett(0x39, "rett", Op10),
-        Trap(0x3a, "trap", Op10),
-        Flush(0x3b, "flush", Op10),
-        Save(0x3c, "save", Op10),
-        Restore(0x3d, "restore", Op10),
-        Retry(0x3e, "retry", Op10),
-
-
-        Casa(0b111100, "casa", Op11),
-        Casxa(0b111110, "casxa", Op11),
-        Prefetch(0b101101, "prefetch", Op11),
-        Prefetcha(0b111101, "prefetcha", Op11),
-
-        Lduw  (0b00_0000, "lduw", Op11),
-        Ldub  (0b00_0001, "ldub", Op11),
-        Lduh  (0b00_0010, "lduh", Op11),
-        Stw   (0b00_0100, "stw", Op11),
-        Stb   (0b00_0101, "stb", Op11),
-        Sth   (0b00_0110, "sth", Op11),
-        Ldsw  (0b00_1000, "ldsw", Op11),
-        Ldsb  (0b00_1001, "ldsb", Op11),
-        Ldsh  (0b00_1010, "ldsh", Op11),
-        Ldx   (0b00_1011, "ldx", Op11),
-        Stx   (0b00_1110, "stx", Op11),
-
-        Ldf   (0b10_0000, "ldf", Op11),
-        Ldfsr (0b10_0001, "ldfsr", Op11),
-        Ldaf  (0b10_0010, "ldaf", Op11),
-        Lddf  (0b10_0011, "lddf", Op11),
-        Stf   (0b10_0100, "stf", Op11),
-        Stfsr (0b10_0101, "stfsr", Op11),
-        Staf  (0x10_0110, "staf", Op11),
-        Stdf  (0b10_0111, "stdf", Op11),
-
-        Rd    (0b10_1000, "rd", Op10),
-        Wr    (0b11_0000, "wr", Op10),
-        Fcmp  (0b11_0101, "fcmp", Op10),
-
-        Ldxa  (0b01_1011, "ldxa", Op11),
-        Lduwa (0b01_0000, "lduwa", Op11),
-
-        Tcc(0b11_1010, "tcc", Op10);
-
-        // @formatter:on
-
-        private final int value;
-        private final String operator;
-        private final Op op;
-
-        private Op3s(int value, String name, Op op) {
-            this.value = value;
-            this.operator = name;
-            this.op = op;
-        }
-
-        public int getValue() {
-            return value;
-        }
-
-        public String getOperator() {
-            return operator;
-        }
-
-        public boolean appliesTo(int instructionWord) {
-            return ((instructionWord >>> 19) & 0b1_1111) == value;
-        }
-    }
-
-    public enum Opfs {
-        // @formatter:off
-
-        Fmovs(0b0_0000_0001, "fmovs"),
-        Fmovd(0b0_0000_0010, "fmovd"),
-        Fmovq(0b0_0000_0011, "fmovq"),
-        Fmovscc(0b00_0001, "fmovscc"),
-        Fmovdcc(0b00_0010, "fmovdcc"),
-        Fnegs(0x05, "fnegs"),
-        Fnegd(0x06, "fnegd"),
-        Fnegq(0x07, "fnegq"),
-        Fabss(0x09, "fabss"),
-        Fabsd(0x0A, "fabsd"),
-        Fabsq(0x0B, "fabsq"),
-
-        // start VIS1
-        Edge8cc(0x0, "edge8cc"),
-        Edge8n(0x1, "edge8n"),
-        Edge8lcc(0x2, "edge8lcc"),
-        Edge8ln(0x3, "edge8ln"),
-        Edge16cc(0x4, "edge16cc"),
-        Edge16n(0x5, "edge16n"),
-        Edge16lcc(0x6, "edge16lcc"),
-        Edge16ln(0x7, "edge16ln"),
-        Edge32cc(0x8, "edge32cc"),
-        Edge32n(0x9, "edge32n"),
-        Edge32lcc(0xA, "edge32lcc"),
-        Edge32ln(0xB, "edge32ln"),
-        Array8(0x10, "array8"),
-        Array16(0x12, "array16"),
-        Array32(0x14, "array32"),
-        AlignAddress(0x18, "alignaddress"),
-        AlignAddressLittle(0x1A, "alignaddress_little"),
-        Fpcmple16(0x20, "fpcmple16"),
-        Fpcmpne16(0x22, "fpcmpne16"),
-        Fpcmple32(0x24, "fpcmple32"),
-        Fpcmpne32(0x26, "fpcmpne32"),
-        Fpcmpgt16(0x28, "fpcmpgt16"),
-        Fpcmpeq16(0x2A, "fpcmpeq16"),
-        Fpcmpgt32(0x2C, "fpcmpgt32"),
-        Fpcmpeq32(0x2E, "fpcmpeq32"),
-        Fmul8x16(0x31, "fmul8x16"),
-        Fmul8x16au(0x33, "fmul8x16au"),
-        Fmul8x16al(0x35, "fmul8x16al"),
-        Fmul8sux16(0x36, "fmul8sux16"),
-        Fmul8ulx16(0x37, "fmul8ulx16"),
-        Fmuld8sux16(0x38, "fmuld8sux16"),
-        Fmuld8ulx16(0x39, "fmuld8ulx16"),
-        Fpack32(0x3A, "fpack32"),
-        Fpack16(0x3B, "fpack16"),
-        Fpackfix(0x3D, "fpackfix"),
-        Faligndatag(0x48, "faligndata"),
-        Fpmerge(0x4B, "fpmerge"),
-        Fpadd16(0x50, "fpadd16"),
-        Fpadd16s(0x51, "fpadd16s"),
-        Fpadd32(0x52, "fpadd32"),
-        Fpadd32s(0x53, "fpadd32s"),
-        Fpsub16(0x54, "fpadd16"),
-        Fpsub16s(0x55, "fpadd16s"),
-        Fpsub32(0x56, "fpadd32"),
-        Fpsub32s(0x57, "fpadd32s"),
-        Fzerod(0x60, "fzerod"),
-        Fzeros(0x61, "fzeros"),
-        Fnot2d(0x66, "fnot1d"),
-        Fnot2s(0x67, "fnot1s"),
-        Fnot1d(0x6A, "fnot1d"),
-        Fnot1s(0x6B, "fnot1s"),
-        Fsrc1d(0x74, "fsrc1d"),
-        Fsrc1s(0x75, "fsrc1s"),
-        Fsrc2d(0x78, "fsrc2d"),
-        Fsrc2s(0x79, "fsrc2s"),
-        Foned(0x7E, "foned"),
-        Fones(0x7F, "fones"),
-        Fandd(0b0_0111_0000, "fandd"),
-        Fands(0b0_0111_0001, "fands"),
-        Fxord(0b0_0110_1100, "fxord"),
-        Fxors(0b0_0110_1101, "fxors"),
-        // end VIS1
-
-        // start VIS2
-        Bmask(0x19, "bmask"),
-        Bshuffle(0x4c, "bshuffle"),
-        // end VIS2 only
-
-        // start VIS3
-        Addxc(0x11, "addxc"),
-        Addxccc(0x13, "addxccc"),
-        Cmask8(0x1B, "cmask8"),
-        Cmask16(0x1D, "cmask16"),
-        Cmask32(0x1F, "cmask32"),
-        Fmean16(0x40, "fmean16"),
-        Fnadds(0x51, "fnadds"),
-        Fnaddd(0x52, "fnaddd"),
-        Fnmuls(0x59, "fnmuls"),
-        Fnmuld(0x5A, "fnmuld"),
-        Fnsmuld(0x79, "fnsmuld"),
-        Fnhadds(0x71, "fnhadds"),
-        Fnhaddd(0x72, "fnhaddd"),
-        Movdtox(0x110, "movdtox"),
-        Movstouw(0x111, "movstouw"),
-        Movstosw(0x113, "movstosw"),
-        Movxtod(0x118, "movxtod"),
-        Movwtos(0b1_0001_1001, "movwtos"),
-        UMulxhi(0b0_0001_0110, "umulxhi"),
-        Lzcnt  (0b0_0001_0111, "lzcnt"),
-        // end VIS3
-
-        // start CAMMELLIA
-        CammelliaFl(0x13C, "cammelia_fl"),
-        CammelliaFli(0x13D, "cammellia_fli"),
-        // end CAMMELLIA
-
-        // start CRYPTO
-        Crc32c(0x147, "crc32c"),
-        // end CRYPTO
-
-        // start OSA 2011
-        Fpadd64(0x44, "fpadd64"),
-        Fpsub64(0x46, "fpsub64"),
-        Fpadds16(0x58, "fpadds16"),
-        Fpadds16s(0x59, "fpadds16"),
-        Fpadds32(0x5A, "fpadds32"),
-        Fpadds32s(0x5B, "fpadds32s"),
-        Fpsubs16(0x5C, "fpsubs16"),
-        Fpsubs16s(0x5D, "fpsubs16s"),
-        Fpsubs32(0x5E, "fpsubs32"),
-        Fpsubs32s(0x5F, "fpsubs32s"),
-        Fpcmpne8(0x122, "fpcmpne8"),
-        Fpcmpeq8(0x12C, "fpcmpeq8"),
-        // end OSA 2011
-
-        Fadds(0x41, "fadds"),
-        Faddd(0x42, "faddd"),
-        Faddq(0x43, "faddq"),
-        Fsubs(0x45, "fsubs"),
-        Fsubd(0x46, "fsubd"),
-        Fsubq(0x47, "fsubq"),
-        Fmuls(0x49, "fmuls"),
-        Fmuld(0x4A, "fmuld"),
-        Fdivs(0x4D, "fdivs"),
-        Fdivd(0x4E, "fdivd"),
-        Fdivq(0x4F, "fdivq"),
-
-        Fsqrts(0x29, "fsqrts"),
-        Fsqrtd(0x2A, "fsqrtd"),
-        Fsqrtq(0x2B, "fsqrtq"),
-
-        Fsmuld(0x69, "fsmuld"),
-        Fmulq(0x6B, "fmulq"),
-        Fdmuldq(0x6E, "fdmulq"),
-
-        Fstoi(0xD1, "fstoi"),
-        Fdtoi(0xD2, "fdtoi"),
-        Fstox(0x81, "fstox"),
-        Fdtox(0x82, "fdtox"),
-        Fxtos(0x84, "fxtos"),
-        Fxtod(0x88, "fxtod"),
-        Fxtoq(0x8C, "fxtoq"),
-        Fitos(0xC4, "fitos"),
-        Fdtos(0xC6, "fdtos"),
-        Fitod(0xC8, "fitod"),
-        Fstod(0xC9, "fstod"),
-        Fitoq(0xCC, "fitoq"),
-
-
-        Fcmps(0x51, "fcmps"),
-        Fcmpd(0x52, "fcmpd"),
-        Fcmpq(0x53, "fcmpq");
-
-        // @formatter:on
-
-        private final int value;
-        private final String operator;
-
-        private Opfs(int value, String op) {
-            this.value = value;
-            this.operator = op;
-        }
-
-        public int getValue() {
-            return value;
-        }
-
-        public String getOperator() {
-            return operator;
-        }
-    }
-
-    public enum Annul {
-        ANNUL(1),
-        NOT_ANNUL(0);
-        public final int flag;
-
-        Annul(int flag) {
-            this.flag = flag;
-        }
-    }
-
-    public enum BranchPredict {
-        PREDICT_TAKEN(1),
-        PREDICT_NOT_TAKEN(0);
-        public final int flag;
-
-        BranchPredict(int flag) {
-            this.flag = flag;
-        }
-    }
-
-    public enum MembarMask {
-        // @formatter:off
-
-        StoreStore(1 << 3, "storestore"),
-        LoadStore(1 << 2, "loadstore"),
-        StoreLoad(1 << 1, "storeload"),
-        LoadLoad(1 << 0, "loadload"),
-        Sync(1 << 6, "sync"),
-        MemIssue(1 << 5, "memissue"),
-        LookAside(1 << 4, "lookaside");
-
-        // @formatter:on
-
-        private final int value;
-        private final String operator;
-
-        private MembarMask(int value, String op) {
-            this.value = value;
-            this.operator = op;
-        }
-
-        public int getValue() {
-            return value | 0x2000;
-        }
-
-        public String getOperator() {
-            return operator;
-        }
-    }
-
-    /**
-     * Condition Codes to use for instruction.
-     */
-    public enum CC {
-        // @formatter:off
-        /**
-         * Condition is considered as 32bit operation condition.
-         */
-        Icc(0b00, "icc", false),
-        /**
-         * Condition is considered as 64bit operation condition.
-         */
-        Xcc(0b10, "xcc", false),
-        Ptrcc(getHostWordKind() == Kind.Long ? Xcc.getValue() : Icc.getValue(), "ptrcc", false),
-        Fcc0(0b00, "fcc0", true),
-        Fcc1(0b01, "fcc1", true),
-        Fcc2(0b10, "fcc2", true),
-        Fcc3(0b11, "fcc3", true);
-
-        // @formatter:on
-
-        private final int value;
-        private final String operator;
-        private boolean isFloat;
-
-        private CC(int value, String op, boolean isFloat) {
-            this.value = value;
-            this.operator = op;
-            this.isFloat = isFloat;
-        }
-
-        public int getValue() {
-            return value;
-        }
-
-        public String getOperator() {
-            return operator;
-        }
-
-        public static CC forKind(Kind kind) {
-            boolean isInt = kind == Kind.Boolean || kind == Kind.Byte || kind == Kind.Char || kind == Kind.Short || kind == Kind.Int;
-            boolean isFloat = kind == Kind.Float || kind == Kind.Double;
-            boolean isLong = kind == Kind.Long || kind == Kind.Object;
-            assert isInt || isFloat || isLong;
-            if (isLong) {
-                return Xcc;
-            } else if (isInt) {
-                return Icc;
-            } else if (isFloat) {
-                return Fcc0;
-            } else {
-                throw JVMCIError.shouldNotReachHere();
-            }
-        }
-    }
-
-    public enum ConditionFlag {
-        // @formatter:off
-
-        // for FBfcc & FBPfcc instruction
-        F_Never(0, "f_never"),
-        F_NotEqual(1, "f_notEqual"),
-        F_LessOrGreater(2, "f_lessOrGreater"),
-        F_UnorderedOrLess(3, "f_unorderedOrLess"),
-        F_Less(4, "f_less"),
-        F_UnorderedOrGreater(5, "f_unorderedOrGreater"),
-        F_Greater(6, "f_greater"),
-        F_Unordered(7, "f_unordered"),
-        F_Always(8, "f_always"),
-        F_Equal(9, "f_equal"),
-        F_UnorderedOrEqual(10, "f_unorderedOrEqual"),
-        F_GreaterOrEqual(11, "f_greaterOrEqual"),
-        F_UnorderedGreaterOrEqual(12, "f_unorderedGreaterOrEqual"),
-        F_LessOrEqual(13, "f_lessOrEqual"),
-        F_UnorderedOrLessOrEqual(14, "f_unorderedOrLessOrEqual"),
-        F_Ordered(15, "f_ordered"),
-
-        // for integers
-        Never(0, "never"),
-        Equal(1, "equal", true),
-        Zero(1, "zero"),
-        LessEqual(2, "lessEqual", true),
-        Less(3, "less", true),
-        LessEqualUnsigned(4, "lessEqualUnsigned", true),
-        LessUnsigned(5, "lessUnsigned", true),
-        CarrySet(5, "carrySet"),
-        Negative(6, "negative", true),
-        OverflowSet(7, "overflowSet", true),
-        Always(8, "always"),
-        NotEqual(9, "notEqual", true),
-        NotZero(9, "notZero"),
-        Greater(10, "greater", true),
-        GreaterEqual(11, "greaterEqual", true),
-        GreaterUnsigned(12, "greaterUnsigned", true),
-        GreaterEqualUnsigned(13, "greaterEqualUnsigned", true),
-        CarryClear(13, "carryClear"),
-        Positive(14, "positive", true),
-        OverflowClear(15, "overflowClear", true);
-
-        // @formatter:on
-
-        private final int value;
-        private final String operator;
-        private boolean forCBcond = false;
-
-        private ConditionFlag(int value, String op) {
-            this(value, op, false);
-        }
-
-        private ConditionFlag(int value, String op, boolean cbcond) {
-            this.value = value;
-            this.operator = op;
-            this.forCBcond = cbcond;
-        }
-
-        public boolean isCBCond() {
-            return forCBcond;
-        }
-
-        public int getValue() {
-            return value;
-        }
-
-        public String getOperator() {
-            return operator;
-        }
-
-        public ConditionFlag negate() {
-            //@formatter:off
-            switch (this) {
-                case F_Never                  : return F_Always;
-                case F_Always                 : return F_Never;
-                case F_NotEqual               : return F_Equal;
-                case F_Equal                  : return F_NotEqual;
-                case F_LessOrGreater          : return F_UnorderedOrEqual;
-                case F_UnorderedOrEqual       : return F_LessOrGreater;
-                case F_Less                   : return F_UnorderedGreaterOrEqual;
-                case F_UnorderedGreaterOrEqual: return F_Less;
-                case F_LessOrEqual            : return F_UnorderedOrGreater;
-                case F_UnorderedOrGreater     : return F_LessOrEqual;
-                case F_Greater                : return F_UnorderedOrLessOrEqual;
-                case F_UnorderedOrLessOrEqual : return F_Greater;
-                case F_GreaterOrEqual         : return F_UnorderedOrLess;
-                case F_UnorderedOrLess        : return F_GreaterOrEqual;
-                case F_Unordered              : return F_Ordered;
-                case F_Ordered                : return F_Unordered;
-                case Never                    : return Always;
-                case Always                   : return Never;
-                case Equal                    : return NotEqual;
-                case NotEqual                 : return Equal;
-                case Zero                     : return NotZero;
-                case NotZero                  : return Zero;
-                case LessEqual                : return Greater;
-                case Greater                  : return LessEqual;
-                case Less                     : return GreaterEqual;
-                case GreaterEqual             : return Less;
-                case LessEqualUnsigned        : return GreaterUnsigned;
-                case GreaterUnsigned          : return LessEqualUnsigned;
-                case LessUnsigned             : return GreaterEqualUnsigned;
-                case GreaterEqualUnsigned     : return LessUnsigned;
-                case CarrySet                 : return CarryClear;
-                case CarryClear               : return CarrySet;
-                case Negative                 : return Positive;
-                case Positive                 : return Negative;
-                case OverflowSet              : return OverflowClear;
-                case OverflowClear            : return OverflowSet;
-                default:
-                    JVMCIError.unimplemented();
-            }
-            //@formatter:on
-            return null;
-        }
-
-        public ConditionFlag mirror() {
-            switch (this) {
-            //@formatter:off
-                case F_Less                   : return F_Greater;
-                case F_Greater                : return F_Less;
-                case F_LessOrEqual            : return F_GreaterOrEqual;
-                case F_UnorderedGreaterOrEqual: return F_UnorderedOrLessOrEqual;
-                case F_UnorderedOrGreater     : return F_UnorderedOrLess;
-                case F_UnorderedOrLessOrEqual : return F_UnorderedGreaterOrEqual;
-                case F_GreaterOrEqual         : return F_LessOrEqual;
-                case F_UnorderedOrLess        : return F_UnorderedOrGreater;
-                case LessEqual                : return GreaterEqual;
-                case Greater                  : return Less;
-                case Less                     : return Greater;
-                case GreaterEqual             : return LessEqual;
-                case LessEqualUnsigned        : return GreaterEqualUnsigned;
-                case GreaterUnsigned          : return LessUnsigned;
-                case LessUnsigned             : return GreaterUnsigned;
-                case GreaterEqualUnsigned     : return LessEqualUnsigned;
-                default:
-                    return this;
-                //@formatter:on
-            }
-        }
-
-        public static ConditionFlag fromCondtition(CC conditionFlagsRegister, Condition cond, boolean unorderedIsTrue) {
-            switch (conditionFlagsRegister) {
-                case Xcc:
-                case Icc:
-                    switch (cond) {
-                        case EQ:
-                            return Equal;
-                        case NE:
-                            return NotEqual;
-                        case BT:
-                            return LessUnsigned;
-                        case LT:
-                            return Less;
-                        case BE:
-                            return LessEqualUnsigned;
-                        case LE:
-                            return LessEqual;
-                        case AE:
-                            return GreaterEqualUnsigned;
-                        case GE:
-                            return GreaterEqual;
-                        case AT:
-                            return GreaterUnsigned;
-                        case GT:
-                            return Greater;
-                    }
-                    throw JVMCIError.shouldNotReachHere("Unimplemented for: " + cond);
-                case Fcc0:
-                case Fcc1:
-                case Fcc2:
-                case Fcc3:
-                    switch (cond) {
-                        case EQ:
-                            return unorderedIsTrue ? F_UnorderedOrEqual : F_Equal;
-                        case NE:
-                            return ConditionFlag.F_NotEqual;
-                        case LT:
-                            return unorderedIsTrue ? F_UnorderedOrLess : F_Less;
-                        case LE:
-                            return unorderedIsTrue ? F_UnorderedOrLessOrEqual : F_LessOrEqual;
-                        case GE:
-                            return unorderedIsTrue ? F_UnorderedGreaterOrEqual : F_GreaterOrEqual;
-                        case GT:
-                            return unorderedIsTrue ? F_UnorderedOrGreater : F_Greater;
-                    }
-                    throw JVMCIError.shouldNotReachHere("Unkown condition: " + cond);
-            }
-            throw JVMCIError.shouldNotReachHere("Unknown condition flag register " + conditionFlagsRegister);
-        }
-    }
-
-    public enum RCondition {
-        // @formatter:off
-
-        Rc_z(0b001, "rc_z"),
-        Rc_lez(0b010, "rc_lez"),
-        Rc_lz(0b011, "rc_lz"),
-        Rc_nz(0b101, "rc_nz"),
-        Rc_gz(0b110, "rc_gz"),
-        Rc_gez(0b111, "rc_gez"),
-        Rc_last(Rc_gez.getValue(), "rc_last");
-
-        // @formatter:on
-
-        private final int value;
-        private final String operator;
-
-        private RCondition(int value, String op) {
-            this.value = value;
-            this.operator = op;
-        }
-
-        public int getValue() {
-            return value;
-        }
-
-        public String getOperator() {
-            return operator;
-        }
-    }
-
-    /**
-     * Represents the <b>Address Space Identifier</b> defined in the SPARC architecture.
-     */
-    public enum Asi {
-        // @formatter:off
-
-        INVALID(-1),
-        ASI_PRIMARY(0x80),
-        ASI_PRIMARY_NOFAULT(0x82),
-        ASI_PRIMARY_LITTLE(0x88),
-        // Block initializing store
-        ASI_ST_BLKINIT_PRIMARY(0xE2),
-        // Most-Recently-Used (MRU) BIS variant
-        ASI_ST_BLKINIT_MRU_PRIMARY(0xF2);
-
-        // @formatter:on
-
-        private final int value;
-
-        private Asi(int value) {
-            this.value = value;
-        }
-
-        public int getValue() {
-            return value;
-        }
-
-        public boolean isValid() {
-            return value != INVALID.getValue();
-        }
-    }
-
-    public enum Fcn {
-        SeveralWritesAndPossiblyReads(2),
-        SeveralReadsWeak(0),
-        OneRead(1),
-        OneWrite(3),
-        Page(4),
-        NearestUnifiedCache(17),
-        SeveralReadsStrong(20),
-        OneReadStrong(21),
-        SeveralWritesAndPossiblyReadsStrong(22),
-        OneWriteStrong(23);
-
-        private final int value;
-
-        private Fcn(int value) {
-            this.value = value;
-        }
-
-        public int getValue() {
-            return value;
-        }
-    }
-
-    public boolean hasFeature(CPUFeature feature) {
-        return ((SPARC) this.target.arch).features.contains(feature);
-    }
-
-    public static final int simm(int x, int nbits) {
-        // assert_signed_range(x, nbits);
-        return x & ((1 << nbits) - 1);
-    }
-
-    public static final boolean isImm(int x, int nbits) {
-        // assert_signed_range(x, nbits);
-        return simm(x, nbits) == x;
-    }
-
-    /**
-     * Minimum value for signed immediate ranges.
-     */
-    public static long minSimm(long nbits) {
-        return -(1L << (nbits - 1));
-    }
-
-    /**
-     * Maximum value for signed immediate ranges.
-     */
-    public static long maxSimm(long nbits) {
-        return (1L << (nbits - 1)) - 1;
-    }
-
-    /**
-     * Test if imm is within signed immediate range for nbits.
-     */
-    public static boolean isSimm(long imm, int nbits) {
-        return minSimm(nbits) <= imm && imm <= maxSimm(nbits);
-    }
-
-    public static boolean isSimm10(long imm) {
-        return isSimm(imm, 10);
-    }
-
-    public static boolean isSimm11(long imm) {
-        return isSimm(imm, 11);
-    }
-
-    public static boolean isSimm11(JavaConstant constant) {
-        return constant.isNull() || isSimm11(constant.asLong());
-    }
-
-    public static boolean isSimm5(JavaConstant constant) {
-        return constant.isNull() || isSimm(constant.asLong(), 5);
-    }
-
-    public static boolean isSimm13(int imm) {
-        return isSimm(imm, 13);
-    }
-
-    public static boolean isSimm13(JavaConstant constant) {
-        return constant.isNull() || isSimm13(constant.asLong());
-    }
-
-    public static boolean isSimm13(long imm) {
-        return NumUtil.isInt(imm) && isSimm(imm, 13);
-    }
-
-    public static boolean isWordDisp30(long imm) {
-        return isSimm(imm, 30 + 2);
-    }
-
-    public static final int hi22(int x) {
-        return x >>> 10;
-    }
-
-    public static final int lo10(int x) {
-        return x & ((1 << 10) - 1);
-    }
-
-    // @formatter:off
-    /**
-     * Instruction format for Fmt00 instructions. This abstraction is needed as it
-     * makes the patching easier later on.
-     * <pre>
-     * | 00  |    a   | op2 |               b                         |
-     * |31 30|29    25|24 22|21                                      0|
-     * </pre>
-     */
-    // @formatter:on
-    protected void fmt00(int a, int op2, int b) {
-        assert isImm(a, 5) && isImm(op2, 3) && isImm(b, 22) : String.format("a: 0x%x op2: 0x%x b: 0x%x", a, op2, b);
-        this.emitInt(a << 25 | op2 << 22 | b);
-    }
-
-    private void op3(Op3s op3, Opfs opf, Register rs1, Register rs2, Register rd) {
-        int b = opf.value << 5 | (rs2 == null ? 0 : rs2.encoding);
-        fmt(op3.op.op, rd.encoding, op3.value, rs1 == null ? 0 : rs1.encoding, b);
-    }
-
-    protected void op3(Op3s op3, Register rs1, Register rs2, Register rd) {
-        int b = rs2 == null ? 0 : rs2.encoding;
-        int xBit = getXBit(op3);
-        fmt(op3.op.op, rd.encoding, op3.value, rs1 == null ? 0 : rs1.encoding, b | xBit);
-    }
-
-    protected void op3(Op3s op3, Register rs1, int simm13, Register rd) {
-        assert isSimm13(simm13);
-        int i = 1 << 13;
-        int simm13WithX = simm13 | getXBit(op3);
-        fmt(op3.op.op, rd.encoding, op3.value, rs1.encoding, i | simm13WithX & ((1 << 13) - 1));
-    }
-
-    // @formatter:off
-    /**
-     * Branch on Integer Condition Codes.
-     * <pre>
-     * | 00  |annul| cond| 010 |               disp22                 |
-     * |31 30|29   |28 25|24 22|21                                   0|
-     * </pre>
-     */
-    // @formatter:on
-    public void bicc(ConditionFlag cond, Annul annul, Label l) {
-        bcc(Op2s.Br, cond, annul, l);
-    }
-
-    // @formatter:off
-    /**
-     * Branch on Floating-Point Condition Codes.
-     * <pre>
-     * | 00  |annul| cond| 110 |               disp22                 |
-     * |31 30|29   |28 25|24 22|21                                   0|
-     * </pre>
-     */
-    // @formatter:on
-    public void fbcc(ConditionFlag cond, Annul annul, Label l) {
-        bcc(Op2s.Fb, cond, annul, l);
-    }
-
-    // @formatter:off
-    /**
-     * Branch on (Integer|Floatingpoint) Condition Codes.
-     * <pre>
-     * | 00  |annul| cond| op2 |               disp22                 |
-     * |31 30|29   |28 25|24 22|21                                   0|
-     * </pre>
-     */
-    // @formatter:on
-    private void bcc(Op2s op2, ConditionFlag cond, Annul annul, Label l) {
-        int pos = !l.isBound() ? patchUnbound(l) : (l.position() - position()) / 4;
-        final int disp = 22;
-        assert isSimm(pos, disp);
-        pos &= (1 << disp) - 1;
-        int a = (annul.flag << 4) | cond.getValue();
-        fmt00(a, op2.getValue(), pos);
-    }
-
-    // @formatter:off
-    /**
-     * Branch on Integer Condition Codes with Prediction.
-     * <pre>
-     * | 00  |an|cond | 001 |cc1 2|p |           disp19               |
-     * |31 30|29|28 25|24 22|21 20|19|                               0|
-     * </pre>
-     */
-    // @formatter:on
-    public void bpcc(ConditionFlag cond, Annul annul, Label l, CC cc, BranchPredict predictTaken) {
-        bpcc(Op2s.Bp, cond, annul, l, cc, predictTaken);
-    }
-
-    // @formatter:off
-    /**
-     * Branch on Integer Condition Codes with Prediction.
-     * <pre>
-     * | 00  |an|cond | 101 |cc1 2|p |           disp19               |
-     * |31 30|29|28 25|24 22|21 20|19|                               0|
-     * </pre>
-     */
-    // @formatter:on
-    public void fbpcc(ConditionFlag cond, Annul annul, Label l, CC cc, BranchPredict predictTaken) {
-        bpcc(Op2s.Fbp, cond, annul, l, cc, predictTaken);
-    }
-
-    // @formatter:off
-    /**
-     * Used for fbpcc (Float) and bpcc (Integer).
-     * <pre>
-     * | 00  |an|cond | op2 |cc1 2|p |           disp19               |
-     * |31 30|29|28 25|24 22|21 20|19|                               0|
-     * </pre>
-     */
-    // @formatter:on
-    private void bpcc(Op2s op2, ConditionFlag cond, Annul annul, Label l, CC cc, BranchPredict predictTaken) {
-        int pos = !l.isBound() ? patchUnbound(l) : (l.position() - position()) / 4;
-        final int disp = 19;
-        assert isSimm(pos, disp);
-        pos &= (1 << disp) - 1;
-        int a = (annul.flag << 4) | cond.getValue();
-        int b = (cc.getValue() << 20) | ((predictTaken.flag) << 19) | pos;
-        fmt00(a, op2.getValue(), b);
-    }
-
-    // @formatter:off
-    /**
-     * Branch on Integer Register with Prediction.
-     * <pre>
-     * | 00  |an| 0|rcond | 011 |d16hi|p | rs1 |    d16lo             |
-     * |31 30|29|28|27 25 |24 22|21 20|19|18 14|                     0|
-     * </pre>
-     */
-    // @formatter:on
-    public void bpr(RCondition cond, Annul annul, Label l, BranchPredict predictTaken, Register rs1) {
-        int pos = !l.isBound() ? patchUnbound(l) : (l.position() - position()) / 4;
-        final int disp = 16;
-        assert isSimm(pos, disp);
-        pos &= (1 << disp) - 1;
-        int a = (annul.flag << 4) | cond.getValue();
-        int d16hi = (pos >> 13) << 13;
-        int d16lo = d16hi ^ pos;
-        int b = (d16hi << 20) | (predictTaken.flag << 19) | (rs1.encoding() << 14) | d16lo;
-        fmt00(a, Op2s.Bpr.getValue(), b);
-    }
-
-    private int patchUnbound(Label label) {
-        label.addPatchAt(position());
-        return 0;
-    }
-
-    public void cbcondw(ConditionFlag cf, Register rs1, Register rs2, Label lab) {
-        cbcond(0, 0, cf, rs1, rs2.encoding, lab);
-    }
-
-    public void cbcondw(ConditionFlag cf, Register rs1, int rs2, Label lab) {
-        assert isSimm(rs2, 5);
-        cbcond(0, 1, cf, rs1, rs2 & ((1 << 5) - 1), lab);
-    }
-
-    public void cbcondx(ConditionFlag cf, Register rs1, Register rs2, Label lab) {
-        cbcond(1, 0, cf, rs1, rs2.encoding, lab);
-    }
-
-    public void cbcondx(ConditionFlag cf, Register rs1, int rs2, Label lab) {
-        assert isSimm(rs2, 5);
-        cbcond(1, 1, cf, rs1, rs2 & ((1 << 5) - 1), lab);
-    }
-
-    private void cbcond(int cc2, int i, ConditionFlag cf, Register rs1, int rs2, Label l) {
-        int disp10 = !l.isBound() ? patchUnbound(l) : (l.position() - position()) / 4;
-        assert isSimm(disp10, 10) && isImm(rs2, 5);
-        disp10 &= (1 << 10) - 1;
-        final int cLo = cf.value & 0b111;
-        final int cHi = cf.value >> 3;
-        final int d10Lo = disp10 & ((1 << 8) - 1);
-        final int d10Hi = disp10 >> 8;
-        int a = cHi << 4 | 0b1000 | cLo;
-        int b = cc2 << 21 | d10Hi << D10HI_SHIFT | rs1.encoding << 14 | i << 13 | d10Lo << D10LO_SHIFT | rs2;
-        fmt00(a, Op2s.Bpr.value, b);
-    }
-
-    // @formatter:off
-    /**
-     * NOP.
-     * <pre>
-     * | 00  |00000| 100 |                0                    |
-     * |31 30|29 25|24 22|21                                  0|
-     * </pre>
-     */
-    // @formatter:on
-    public void nop() {
-        emitInt(1 << 24);
-    }
-
-    public void sethi(int imm22, Register dst) {
-        fmt00(dst.encoding, Op2s.Sethi.value, imm22);
-    }
-
-    // @formatter:off
-    /**
-     * Instruction format for calls.
-     * <pre>
-     * | 01  |                      disp30                             |
-     * |31 30|29                                                      0|
-     * </pre>
-     */
-    // @formatter:on
-    public void call(int disp30) {
-        assert isImm(disp30, 30);
-        int instr = 1 << 30;
-        instr |= disp30;
-        emitInt(instr);
-    }
-
-    public void add(Register rs1, Register rs2, Register rd) {
-        op3(Add, rs1, rs2, rd);
-    }
-
-    public void add(Register rs1, int simm13, Register rd) {
-        op3(Add, rs1, simm13, rd);
-    }
-
-    public void addc(Register rs1, Register rs2, Register rd) {
-        op3(Addc, rs1, rs2, rd);
-    }
-
-    public void addc(Register rs1, int simm13, Register rd) {
-        op3(Addc, rs1, simm13, rd);
-    }
-
-    public void addcc(Register rs1, Register rs2, Register rd) {
-        op3(Addcc, rs1, rs2, rd);
-    }
-
-    public void addcc(Register rs1, int simm13, Register rd) {
-        op3(Addcc, rs1, simm13, rd);
-    }
-
-    public void and(Register rs1, Register rs2, Register rd) {
-        op3(And, rs1, rs2, rd);
-    }
-
-    public void and(Register rs1, int simm13, Register rd) {
-        op3(And, rs1, simm13, rd);
-    }
-
-    public void andcc(Register rs1, Register rs2, Register rd) {
-        op3(Andcc, rs1, rs2, rd);
-    }
-
-    public void andcc(Register rs1, int simm13, Register rd) {
-        op3(Andcc, rs1, simm13, rd);
-    }
-
-    public void andn(Register rs1, Register rs2, Register rd) {
-        op3(Andn, rs1, rs2, rd);
-    }
-
-    public void andn(Register rs1, int simm13, Register rd) {
-        op3(Andn, rs1, simm13, rd);
-    }
-
-    public void andncc(Register rs1, Register rs2, Register rd) {
-        op3(Andncc, rs1, rs2, rd);
-    }
-
-    public void andncc(Register rs1, int simm13, Register rd) {
-        op3(Andncc, rs1, simm13, rd);
-    }
-
-    public void movwtos(Register rs2, Register rd) {
-        assert isSingleFloatRegister(rd) && isCPURegister(rs2) : String.format("%s %s", rs2, rd);
-        op3(Impdep1, Movwtos, null, rs2, rd);
-    }
-
-    public void umulxhi(Register rs1, Register rs2, Register rd) {
-        op3(Impdep1, UMulxhi, rs1, rs2, rd);
-    }
-
-    public void fdtos(Register rs2, Register rd) {
-        assert isSingleFloatRegister(rd) && isDoubleFloatRegister(rs2) : String.format("%s %s", rs2, rd);
-        op3(Fpop1, Fdtos, null, rs2, rd);
-    }
-
-    public void movstouw(Register rs2, Register rd) {
-        assert isSingleFloatRegister(rs2) && isCPURegister(rd) : String.format("%s %s", rs2, rd);
-        op3(Impdep1, Movstosw, null, rs2, rd);
-    }
-
-    public void movstosw(Register rs2, Register rd) {
-        assert isSingleFloatRegister(rs2) && isCPURegister(rd) : String.format("%s %s", rs2, rd);
-        op3(Impdep1, Movstosw, null, rs2, rd);
-    }
-
-    public void movdtox(Register rs2, Register rd) {
-        assert isDoubleFloatRegister(rs2) && isCPURegister(rd) : String.format("%s %s", rs2, rd);
-        op3(Impdep1, Movdtox, null, rs2, rd);
-    }
-
-    public void movxtod(Register rs2, Register rd) {
-        assert isCPURegister(rs2) && isDoubleFloatRegister(rd) : String.format("%s %s", rs2, rd);
-        op3(Impdep1, Movxtod, null, rs2, rd);
-    }
-
-    public void fadds(Register rs1, Register rs2, Register rd) {
-        op3(Fpop1, Fadds, rs1, rs2, rd);
-    }
-
-    public void faddd(Register rs1, Register rs2, Register rd) {
-        op3(Fpop1, Faddd, rs1, rs2, rd);
-    }
-
-    public void faddq(Register rs1, Register rs2, Register rd) {
-        op3(Fpop1, Faddq, rs1, rs2, rd);
-    }
-
-    public void fdivs(Register rs1, Register rs2, Register rd) {
-        op3(Fpop1, Fdivs, rs1, rs2, rd);
-    }
-
-    public void fdivd(Register rs1, Register rs2, Register rd) {
-        op3(Fpop1, Fdivd, rs1, rs2, rd);
-    }
-
-    public void fmovs(Register rs2, Register rd) {
-        op3(Fpop1, Fmovs, null, rs2, rd);
-    }
-
-    public void fmovd(Register rs2, Register rd) {
-        op3(Fpop1, Fmovd, null, rs2, rd);
-    }
-
-    public void fmuls(Register rs1, Register rs2, Register rd) {
-        op3(Fpop1, Fmuls, rs1, rs2, rd);
-    }
-
-    public void fsmuld(Register rs1, Register rs2, Register rd) {
-        op3(Fpop1, Fsmuld, rs1, rs2, rd);
-    }
-
-    public void fmuld(Register rs1, Register rs2, Register rd) {
-        op3(Fpop1, Fmuld, rs1, rs2, rd);
-    }
-
-    public void fnegs(Register rs2, Register rd) {
-        op3(Fpop1, Fnegs, null, rs2, rd);
-    }
-
-    public void fnegd(Register rs2, Register rd) {
-        op3(Fpop1, Fnegd, null, rs2, rd);
-    }
-
-    /**
-     * Helper method to determine if the instruction needs the X bit set.
-     */
-    private static int getXBit(Op3s op3) {
-        switch (op3) {
-            case Sllx:
-            case Srax:
-            case Srlx:
-                return 1 << 12;
-            default:
-                return 0;
-        }
-    }
-
-    public void fstoi(Register rs2, Register rd) {
-        op3(Fpop1, Fstoi, null, rs2, rd);
-    }
-
-    public void fstox(Register rs2, Register rd) {
-        op3(Fpop1, Fstox, null, rs2, rd);
-    }
-
-    public void fdtox(Register rs2, Register rd) {
-        op3(Fpop1, Fdtox, null, rs2, rd);
-    }
-
-    public void fstod(Register rs2, Register rd) {
-        op3(Fpop1, Fstod, null, rs2, rd);
-    }
-
-    public void fdtoi(Register rs2, Register rd) {
-        op3(Fpop1, Fdtoi, null, rs2, rd);
-    }
-
-    public void fitos(Register rs2, Register rd) {
-        op3(Fpop1, Fitos, null, rs2, rd);
-    }
-
-    public void fitod(Register rs2, Register rd) {
-        op3(Fpop1, Fitod, null, rs2, rd);
-    }
-
-    public void fxtos(Register rs2, Register rd) {
-        op3(Fpop1, Fxtos, null, rs2, rd);
-    }
-
-    public void fxtod(Register rs2, Register rd) {
-        op3(Fpop1, Fxtod, null, rs2, rd);
-    }
-
-    public void fzeros(Register rd) {
-        op3(Impdep1, Fzeros, null, null, rd);
-    }
-
-    public void fzerod(Register rd) {
-        op3(Impdep1, Fzerod, null, null, rd);
-    }
-
-    public void flushw() {
-        op3(Flushw, g0, g0, g0);
-    }
-
-    public void fsqrtd(Register rs2, Register rd) {
-        op3(Fpop1, Fsqrtd, null, rs2, rd);
-    }
-
-    public void fsqrts(Register rs2, Register rd) {
-        op3(Fpop1, Fsqrts, null, rs2, rd);
-    }
-
-    public void fabss(Register rs2, Register rd) {
-        op3(Fpop1, Fabss, null, rs2, rd);
-    }
-
-    public void fabsd(Register rs2, Register rd) {
-        op3(Fpop1, Fabsd, null, rs2, rd);
-    }
-
-    public void fsubs(Register rs1, Register rs2, Register rd) {
-        op3(Fpop1, Fsubs, rs1, rs2, rd);
-    }
-
-    public void fsubd(Register rs1, Register rs2, Register rd) {
-        op3(Fpop1, Fsubd, rs1, rs2, rd);
-    }
-
-    // @formatter:off
-    /**
-     * Instruction format for fcmp.
-     * <pre>
-     * | 10  | --- |cc1|cc0|desc |   rs1   |   opf  | rs2 |
-     * |31 30|29 27|26 |25 |24 19|18     14|13     5|4   0|
-     * </pre>
-     */
-    // @formatter:on
-    public void fcmp(CC cc, Opfs opf, Register rs1, Register rs2) {
-        int a = cc.value;
-        int b = opf.value << 5 | rs2.encoding;
-        fmt10(a, Fcmp.value, rs1.encoding, b);
-    }
-
-    // @formatter:off
-    /**
-     * Instruction format for most arithmetic stuff.
-     * <pre>
-     * |  10 | rd  | op3 | rs1 |   b   |
-     * |31 30|29 25|24 19|18 14|13    0|
-     * </pre>
-     */
-    // @formatter:on
-    protected void fmt10(int rd, int op3, int rs1, int b) {
-        fmt(0b10, rd, op3, rs1, b);
-    }
-
-    // @formatter:off
-    /**
-     * Instruction format for most arithmetic stuff.
-     * <pre>
-     * |  op | rd  | op3 | rs1 |   b   |
-     * |31 30|29 25|24 19|18 14|13    0|
-     * </pre>
-     */
-    // @formatter:on
-    protected void fmt(int op, int rd, int op3, int rs1, int b) {
-        assert isImm(rd, 5) && isImm(op3, 6) && isImm(b, 14) : String.format("rd: 0x%x op3: 0x%x b: 0x%x", rd, op3, b);
-        int instr = op << 30 | rd << 25 | op3 << 19 | rs1 << 14 | b;
-        emitInt(instr);
-    }
-
-    public void illtrap(int const22) {
-        fmt00(0, Op2s.Illtrap.value, const22);
-    }
-
-    public void jmpl(Register rs1, Register rs2, Register rd) {
-        op3(Jmpl, rs1, rs2, rd);
-    }
-
-    public void jmpl(Register rs1, int simm13, Register rd) {
-        op3(Jmpl, rs1, simm13, rd);
-    }
-
-    public void fmovdcc(ConditionFlag cond, CC cc, Register rs2, Register rd) {
-        fmovcc(cond, cc, rs2, rd, Fmovdcc.value);
-    }
-
-    public void fmovscc(ConditionFlag cond, CC cc, Register rs2, Register rd) {
-        fmovcc(cond, cc, rs2, rd, Fmovscc.value);
-    }
-
-    private void fmovcc(ConditionFlag cond, CC cc, Register rs2, Register rd, int opfLow) {
-        int opfCC = cc.value;
-        int a = opfCC << 11 | opfLow << 5 | rs2.encoding;
-        fmt10(rd.encoding, Fpop2.value, cond.value, a);
-    }
-
-    public void movcc(ConditionFlag conditionFlag, CC cc, Register rs2, Register rd) {
-        movcc(conditionFlag, cc, 0, rs2.encoding, rd);
-    }
-
-    public void movcc(ConditionFlag conditionFlag, CC cc, int simm11, Register rd) {
-        assert isSimm11(simm11);
-        movcc(conditionFlag, cc, 1, simm11 & ((1 << 11) - 1), rd);
-    }
-
-    private void movcc(ConditionFlag conditionFlag, CC cc, int i, int imm, Register rd) {
-        int cc01 = 0b11 & cc.value;
-        int cc2 = cc.isFloat ? 0 : 1;
-        int a = cc2 << 4 | conditionFlag.value;
-        int b = cc01 << 11 | i << 13 | imm;
-        fmt10(rd.encoding, Movcc.value, a, b);
-    }
-
-    public void mulx(Register rs1, Register rs2, Register rd) {
-        op3(Mulx, rs1, rs2, rd);
-    }
-
-    public void mulx(Register rs1, int simm13, Register rd) {
-        op3(Mulx, rs1, simm13, rd);
-    }
-
-    public void or(Register rs1, Register rs2, Register rd) {
-        assert isCPURegister(rs1, rs2, rd) : String.format("%s %s %s", rs1, rs2, rd);
-        op3(Or, rs1, rs2, rd);
-    }
-
-    public void or(Register rs1, int simm13, Register rd) {
-        assert isCPURegister(rs1, rd) : String.format("%s %s", rs1, rd);
-        op3(Or, rs1, simm13, rd);
-    }
-
-    public void popc(Register rs2, Register rd) {
-        op3(Popc, g0, rs2, rd);
-    }
-
-    public void popc(int simm13, Register rd) {
-        op3(Popc, g0, simm13, rd);
-    }
-
-    public void prefetch(SPARCAddress addr, Fcn fcn) {
-        Register rs1 = addr.getBase();
-        if (addr.getIndex().equals(Register.None)) {
-            int dis = addr.getDisplacement();
-            assert isSimm13(dis);
-            fmt(Prefetch.op.op, fcn.value, Prefetch.value, rs1.encoding, 1 << 13 | dis & ((1 << 13) - 1));
-        } else {
-            Register rs2 = addr.getIndex();
-            fmt(Prefetch.op.op, fcn.value, Prefetch.value, rs1.encoding, rs2.encoding);
-        }
-    }
-
-    // A.44 Read State Register
-
-    public void rdpc(Register rd) {
-        op3(Rd, r5, g0, rd);
-    }
-
-    public void restore(Register rs1, Register rs2, Register rd) {
-        op3(Restore, rs1, rs2, rd);
-    }
-
-    public static final int PC_RETURN_OFFSET = 8;
-
-    public void save(Register rs1, Register rs2, Register rd) {
-        op3(Save, rs1, rs2, rd);
-    }
-
-    public void save(Register rs1, int simm13, Register rd) {
-        op3(Save, rs1, simm13, rd);
-    }
-
-    public void sdivx(Register rs1, Register rs2, Register rd) {
-        op3(Sdivx, rs1, rs2, rd);
-    }
-
-    public void sdivx(Register rs1, int simm13, Register rd) {
-        op3(Sdivx, rs1, simm13, rd);
-    }
-
-    public void udivx(Register rs1, Register rs2, Register rd) {
-        op3(Udivx, rs1, rs2, rd);
-    }
-
-    public void udivx(Register rs1, int simm13, Register rd) {
-        op3(Udivx, rs1, simm13, rd);
-    }
-
-    public void sll(Register rs1, Register rs2, Register rd) {
-        op3(Sll, rs1, rs2, rd);
-    }
-
-    public void sll(Register rs1, int shcnt32, Register rd) {
-        assert isImm(shcnt32, 5);
-        op3(Sll, rs1, shcnt32, rd);
-    }
-
-    public void sllx(Register rs1, Register rs2, Register rd) {
-        op3(Sllx, rs1, rs2, rd);
-    }
-
-    public void sllx(Register rs1, int shcnt64, Register rd) {
-        assert isImm(shcnt64, 6);
-        op3(Sllx, rs1, shcnt64, rd);
-    }
-
-    public void sra(Register rs1, Register rs2, Register rd) {
-        op3(Sra, rs1, rs2, rd);
-    }
-
-    public void sra(Register rs1, int simm13, Register rd) {
-        op3(Sra, rs1, simm13, rd);
-    }
-
-    public void srax(Register rs1, Register rs2, Register rd) {
-        op3(Srax, rs1, rs2, rd);
-    }
-
-    public void srax(Register rs1, int shcnt64, Register rd) {
-        assert isImm(shcnt64, 6);
-        op3(Srax, rs1, shcnt64, rd);
-    }
-
-    public void srl(Register rs1, Register rs2, Register rd) {
-        op3(Srl, rs1, rs2, rd);
-    }
-
-    public void srl(Register rs1, int simm13, Register rd) {
-        op3(Srl, rs1, simm13, rd);
-    }
-
-    public void srlx(Register rs1, Register rs2, Register rd) {
-        op3(Srlx, rs1, rs2, rd);
-    }
-
-    public void srlx(Register rs1, int shcnt64, Register rd) {
-        assert isImm(shcnt64, 6);
-        op3(Srlx, rs1, shcnt64, rd);
-    }
-
-    public void fandd(Register rs1, Register rs2, Register rd) {
-        op3(Impdep1, Fandd, rs1, rs2, rd);
-    }
-
-    public void sub(Register rs1, Register rs2, Register rd) {
-        op3(Sub, rs1, rs2, rd);
-    }
-
-    public void sub(Register rs1, int simm13, Register rd) {
-        op3(Sub, rs1, simm13, rd);
-    }
-
-    public void subcc(Register rs1, Register rs2, Register rd) {
-        op3(Subcc, rs1, rs2, rd);
-    }
-
-    public void subcc(Register rs1, int simm13, Register rd) {
-        op3(Subcc, rs1, simm13, rd);
-    }
-
-    public void ta(int trap) {
-        tcc(Icc, Always, trap);
-    }
-
-    public void tcc(CC cc, ConditionFlag flag, int trap) {
-        assert isImm(trap, 8);
-        int b = cc.value << 11;
-        b |= 1 << 13;
-        b |= trap;
-        fmt10(flag.value, Op3s.Tcc.getValue(), 0, b);
-    }
-
-    public void wrccr(Register rs1, Register rs2) {
-        op3(Wr, rs1, rs2, r2);
-    }
-
-    public void wrccr(Register rs1, int simm13) {
-        op3(Wr, rs1, simm13, r2);
-    }
-
-    public void xor(Register rs1, Register rs2, Register rd) {
-        op3(Xor, rs1, rs2, rd);
-    }
-
-    public void xor(Register rs1, int simm13, Register rd) {
-        op3(Xor, rs1, simm13, rd);
-    }
-
-    public void xorcc(Register rs1, Register rs2, Register rd) {
-        op3(Xorcc, rs1, rs2, rd);
-    }
-
-    public void xorcc(Register rs1, int simm13, Register rd) {
-        op3(Xorcc, rs1, simm13, rd);
-    }
-
-    public void xnor(Register rs1, Register rs2, Register rd) {
-        op3(Xnor, rs1, rs2, rd);
-    }
-
-    public void xnor(Register rs1, int simm13, Register rd) {
-        op3(Xnor, rs1, simm13, rd);
-    }
-
-    /*
-     * Load/Store
-     */
-    protected void ld(Op3s op3, SPARCAddress addr, Register rd, Asi asi) {
-        Register rs1 = addr.getBase();
-        if (!addr.getIndex().equals(Register.None)) {
-            Register rs2 = addr.getIndex();
-            if (asi != null) {
-                int b = rs2.encoding;
-                b |= asi.value << 5;
-                fmt(op3.op.op, rd.encoding, op3.value, rs1.encoding, b);
-            } else {
-                op3(op3, rs1, rs2, rd);
-            }
-        } else {
-            int imm = addr.getDisplacement();
-            op3(op3, rs1, imm, rd);
-        }
-    }
-
-    protected void ld(Op3s op3, SPARCAddress addr, Register rd) {
-        ld(op3, addr, rd, null);
-    }
-
-    public void lddf(SPARCAddress src, Register dst) {
-        assert isDoubleFloatRegister(dst) : dst;
-        ld(Lddf, src, dst);
-    }
-
-    public void ldf(SPARCAddress src, Register dst) {
-        assert isSingleFloatRegister(dst) : dst;
-        ld(Ldf, src, dst);
-    }
-
-    public void lduh(SPARCAddress src, Register dst) {
-        assert isCPURegister(dst) : dst;
-        ld(Lduh, src, dst);
-    }
-
-    public void ldsh(SPARCAddress src, Register dst) {
-        assert isCPURegister(dst) : dst;
-        ld(Ldsh, src, dst);
-    }
-
-    public void ldub(SPARCAddress src, Register dst) {
-        assert isCPURegister(dst) : dst;
-        ld(Ldub, src, dst);
-    }
-
-    public void ldsb(SPARCAddress src, Register dst) {
-        assert isCPURegister(dst) : dst;
-        ld(Ldsb, src, dst);
-    }
-
-    public void lduw(SPARCAddress src, Register dst) {
-        assert isCPURegister(dst) : dst;
-        ld(Lduw, src, dst);
-    }
-
-    public void ldsw(SPARCAddress src, Register dst) {
-        assert isCPURegister(dst) : dst;
-        ld(Ldsw, src, dst);
-    }
-
-    public void ldx(SPARCAddress src, Register dst) {
-        assert isCPURegister(dst) : dst;
-        ld(Ldx, src, dst);
-    }
-
-    public void ldxa(Register rs1, Register rs2, Register rd, Asi asi) {
-        assert SPARC.isCPURegister(rs1, rs2, rd) : format("%s %s %s", rs1, rs2, rd);
-        ld(Ldxa, new SPARCAddress(rs1, rs2), rd, asi);
-    }
-
-    public void lduwa(Register rs1, Register rs2, Register rd, Asi asi) {
-        assert SPARC.isCPURegister(rs1, rs2, rd) : format("%s %s %s", rs1, rs2, rd);
-        ld(Lduwa, new SPARCAddress(rs1, rs2), rd, asi);
-    }
-
-    protected void st(Op3s op3, Register rs1, SPARCAddress dest) {
-        ld(op3, dest, rs1);
-    }
-
-    public void stdf(Register rd, SPARCAddress addr) {
-        assert isDoubleFloatRegister(rd) : rd;
-        st(Stdf, rd, addr);
-    }
-
-    public void stf(Register rd, SPARCAddress addr) {
-        assert isSingleFloatRegister(rd) : rd;
-        st(Stf, rd, addr);
-    }
-
-    public void stb(Register rd, SPARCAddress addr) {
-        assert isCPURegister(rd) : rd;
-        st(Stb, rd, addr);
-    }
-
-    public void sth(Register rd, SPARCAddress addr) {
-        assert isCPURegister(rd) : rd;
-        st(Sth, rd, addr);
-    }
-
-    public void stw(Register rd, SPARCAddress addr) {
-        assert isCPURegister(rd) : rd;
-        st(Stw, rd, addr);
-    }
-
-    public void stx(Register rd, SPARCAddress addr) {
-        assert isCPURegister(rd) : rd;
-        st(Stx, rd, addr);
-    }
-
-    public void membar(int barriers) {
-        op3(Membar, r15, barriers, g0);
-    }
-
-    public void casa(Register rs1, Register rs2, Register rd, Asi asi) {
-        ld(Casa, new SPARCAddress(rs1, rs2), rd, asi);
-    }
-
-    public void casxa(Register rs1, Register rs2, Register rd, Asi asi) {
-        ld(Casxa, new SPARCAddress(rs1, rs2), rd, asi);
-    }
-
-    @Override
-    public InstructionCounter getInstructionCounter() {
-        return new SPARCInstructionCounter(this);
-    }
-
-    public void patchAddImmediate(int position, int simm13) {
-        int inst = getInt(position);
-        assert SPARCAssembler.isSimm13(simm13) : simm13;
-        assert (inst >>> 30) == 0b10 : String.format("0x%x", inst);
-        assert ((inst >>> 18) & 0b11_1111) == 0 : String.format("0x%x", inst);
-        assert (inst & (1 << 13)) != 0 : String.format("0x%x", inst);
-        inst = inst & (~((1 << 13) - 1));
-        inst |= simm13 & ((1 << 12) - 1);
-        emitInt(inst, position);
-    }
-
-    public void fpadd32(Register rs1, Register rs2, Register rd) {
-        op3(Impdep1, Fpadd32, rs1, rs2, rd);
-    }
-
-    public boolean isCbcond(int i) {
-        return (i & 0xC1C00000) == 0xC00000;
-    }
-}
--- a/graal/com.oracle.graal.asm.sparc/src/com/oracle/graal/asm/sparc/SPARCInstructionCounter.java	Wed Jun 03 17:12:05 2015 +0200
+++ /dev/null	Thu Jan 01 00:00:00 1970 +0000
@@ -1,126 +0,0 @@
-/*
- * Copyright (c) 2009, 2015, Oracle and/or its affiliates. All rights reserved.
- * DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER.
- *
- * This code is free software; you can redistribute it and/or modify it
- * under the terms of the GNU General Public License version 2 only, as
- * published by the Free Software Foundation.
- *
- * This code is distributed in the hope that it will be useful, but WITHOUT
- * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
- * FITNESS FOR A PARTICULAR PURPOSE.  See the GNU General Public License
- * version 2 for more details (a copy is included in the LICENSE file that
- * accompanied this code).
- *
- * You should have received a copy of the GNU General Public License version
- * 2 along with this work; if not, write to the Free Software Foundation,
- * Inc., 51 Franklin St, Fifth Floor, Boston, MA 02110-1301 USA.
- *
- * Please contact Oracle, 500 Oracle Parkway, Redwood Shores, CA 94065 USA
- * or visit www.oracle.com if you need additional information or have any
- * questions.
- */
-package com.oracle.graal.asm.sparc;
-
-import java.util.*;
-
-import com.oracle.graal.asm.Assembler.InstructionCounter;
-
-public class SPARCInstructionCounter implements InstructionCounter {
-    // Use a treemap to keep the order in the output
-    private static final TreeMap<String, SPARCInstructionMatch> INSTRUCTION_MATCHER = new TreeMap<>();
-    static {
-        // @formatter:off
-        INSTRUCTION_MATCHER.put("nop", new SPARCInstructionMatch(0xFFFF_FFFF, 0x0100_0000));
-        INSTRUCTION_MATCHER.put("st", new OP3LowBitsMatcher(0b11, 0x4, 0x5, 0x6, 0x7, 0xe, 0xf));
-        INSTRUCTION_MATCHER.put("ld", new OP3LowBitsMatcher(0b11, 0x0, 0x1, 0x2, 0x3, 0x8, 0x9, 0xa, 0xb, 0xc, 0xd));
-        INSTRUCTION_MATCHER.put("all", new SPARCInstructionMatch(0x0, 0x0));
-        // @formatter:on
-    }
-    private final SPARCAssembler asm;
-
-    public SPARCInstructionCounter(SPARCAssembler asm) {
-        super();
-        this.asm = asm;
-    }
-
-    @Override
-    public int[] countInstructions(String[] instructionTypes, int beginPc, int endPc) {
-        SPARCInstructionMatch[] matchers = new SPARCInstructionMatch[instructionTypes.length];
-        for (int i = 0; i < instructionTypes.length; i++) {
-            String typeName = instructionTypes[i];
-            matchers[i] = INSTRUCTION_MATCHER.get(typeName);
-            if (matchers[i] == null) {
-                throw new IllegalArgumentException(String.format("Unknown instruction class %s, supported types are: %s", typeName, INSTRUCTION_MATCHER.keySet()));
-            }
-        }
-        return countBetween(matchers, beginPc, endPc);
-    }
-
-    private int[] countBetween(SPARCInstructionMatch[] matchers, int startPc, int endPc) {
-        int[] counts = new int[matchers.length];
-        for (int p = startPc; p < endPc; p += 4) {
-            int instr = asm.getInt(p);
-            for (int i = 0; i < matchers.length; i++) {
-                SPARCInstructionMatch matcher = matchers[i];
-                if (matcher.matches(instr)) {
-                    counts[i]++;
-                }
-            }
-        }
-        return counts;
-    }
-
-    @Override
-    public String[] getSupportedInstructionTypes() {
-        return INSTRUCTION_MATCHER.keySet().toArray(new String[0]);
-    }
-
-    /**
-     * Tests the lower 3 bits of the op3 field.
-     */
-    private static class OP3LowBitsMatcher extends SPARCInstructionMatch {
-        private final int[] op3b03;
-        private final int op;
-
-        public OP3LowBitsMatcher(int op, int... op3b03) {
-            super(0, 0);
-            this.op = op;
-            this.op3b03 = op3b03;
-        }
-
-        @Override
-        public boolean matches(int instruction) {
-            if (instruction >>> 30 != op) {
-                return false;
-            }
-            int op3lo = (instruction >> 19) & ((1 << 4) - 1);
-            for (int op3Part : op3b03) {
-                if (op3Part == op3lo) {
-                    return true;
-                }
-            }
-            return false;
-        }
-    }
-
-    private static class SPARCInstructionMatch {
-        private final int mask;
-        private final int[] patterns;
-
-        public SPARCInstructionMatch(int mask, int... patterns) {
-            super();
-            this.mask = mask;
-            this.patterns = patterns;
-        }
-
-        public boolean matches(int instruction) {
-            for (int pattern : patterns) {
-                if ((instruction & mask) == pattern) {
-                    return true;
-                }
-            }
-            return false;
-        }
-    }
-}
--- a/graal/com.oracle.graal.asm.sparc/src/com/oracle/graal/asm/sparc/SPARCMacroAssembler.java	Wed Jun 03 17:12:05 2015 +0200
+++ /dev/null	Thu Jan 01 00:00:00 1970 +0000
@@ -1,421 +0,0 @@
-/*
- * Copyright (c) 2013, Oracle and/or its affiliates. All rights reserved.
- * DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER.
- *
- * This code is free software; you can redistribute it and/or modify it
- * under the terms of the GNU General Public License version 2 only, as
- * published by the Free Software Foundation.
- *
- * This code is distributed in the hope that it will be useful, but WITHOUT
- * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
- * FITNESS FOR A PARTICULAR PURPOSE.  See the GNU General Public License
- * version 2 for more details (a copy is included in the LICENSE file that
- * accompanied this code).
- *
- * You should have received a copy of the GNU General Public License version
- * 2 along with this work; if not, write to the Free Software Foundation,
- * Inc., 51 Franklin St, Fifth Floor, Boston, MA 02110-1301 USA.
- *
- * Please contact Oracle, 500 Oracle Parkway, Redwood Shores, CA 94065 USA
- * or visit www.oracle.com if you need additional information or have any
- * questions.
- */
-package com.oracle.graal.asm.sparc;
-
-import com.oracle.jvmci.code.AbstractAddress;
-import com.oracle.jvmci.code.TargetDescription;
-import com.oracle.jvmci.code.RegisterConfig;
-import com.oracle.jvmci.code.Register;
-
-import static com.oracle.graal.asm.sparc.SPARCAssembler.Annul.*;
-import static com.oracle.graal.asm.sparc.SPARCAssembler.ConditionFlag.*;
-import static com.oracle.jvmci.sparc.SPARC.*;
-
-import java.util.function.*;
-
-import com.oracle.graal.asm.*;
-import com.oracle.jvmci.common.*;
-
-public class SPARCMacroAssembler extends SPARCAssembler {
-
-    /**
-     * A sentinel value used as a place holder in an instruction stream for an address that will be
-     * patched.
-     */
-    private static final SPARCAddress Placeholder = new SPARCAddress(g0, 0);
-    private final ScratchRegister[] scratchRegister = new ScratchRegister[]{new ScratchRegister(g1), new ScratchRegister(g3)};
-    // Points to the next free scratch register
-    private int nextFreeScratchRegister = 0;
-
-    public SPARCMacroAssembler(TargetDescription target, RegisterConfig registerConfig) {
-        super(target, registerConfig);
-    }
-
-    @Override
-    public void align(int modulus) {
-        while (position() % modulus != 0) {
-            nop();
-        }
-    }
-
-    @Override
-    public void jmp(Label l) {
-        bicc(Always, NOT_ANNUL, l);
-        nop();  // delay slot
-    }
-
-    @Override
-    protected final void patchJumpTarget(int branch, int branchTarget) {
-        final int disp = (branchTarget - branch) / 4;
-        final int inst = getInt(branch);
-        Op2s op2 = Op2s.byValue((inst & OP2_MASK) >> OP2_SHIFT);
-        int maskBits;
-        int setBits;
-        switch (op2) {
-            case Br:
-            case Fb:
-            case Sethi:
-            case Illtrap:
-                // Disp 22 in the lower 22 bits
-                assert isSimm(disp, 22);
-                setBits = disp << DISP22_SHIFT;
-                maskBits = DISP22_MASK;
-                break;
-            case Fbp:
-            case Bp:
-                // Disp 19 in the lower 19 bits
-                assert isSimm(disp, 19);
-                setBits = disp << DISP19_SHIFT;
-                maskBits = DISP19_MASK;
-                break;
-            case Bpr:
-                boolean isCBcond = (inst & CBCOND_MASK) != 0;
-                if (isCBcond) {
-                    assert isSimm10(disp) : String.format("%d: instruction: 0x%x", disp, inst);
-                    int d10Split = 0;
-                    d10Split |= (disp & 0b11_0000_0000) << D10HI_SHIFT - 8;
-                    d10Split |= (disp & 0b00_1111_1111) << D10LO_SHIFT;
-                    setBits = d10Split;
-                    maskBits = D10LO_MASK | D10HI_MASK;
-                } else {
-                    assert isSimm(disp, 16);
-                    int d16Split = 0;
-                    d16Split |= (disp & 0b1100_0000_0000_0000) << D16HI_SHIFT - 14;
-                    d16Split |= (disp & 0b0011_1111_1111_1111) << D16LO_SHIFT;
-                    setBits = d16Split;
-                    maskBits = D16HI_MASK | D16LO_MASK;
-                }
-                break;
-            default:
-                throw JVMCIError.shouldNotReachHere("Unknown op2 " + op2);
-        }
-        int newInst = ~maskBits & inst;
-        newInst |= setBits;
-        emitInt(newInst, branch);
-    }
-
-    @Override
-    public AbstractAddress makeAddress(Register base, int displacement) {
-        return new SPARCAddress(base, displacement);
-    }
-
-    @Override
-    public AbstractAddress getPlaceholder() {
-        return Placeholder;
-    }
-
-    @Override
-    public final void ensureUniquePC() {
-        nop();
-    }
-
-    public void cas(Register rs1, Register rs2, Register rd) {
-        casa(rs1, rs2, rd, Asi.ASI_PRIMARY);
-    }
-
-    public void casx(Register rs1, Register rs2, Register rd) {
-        casxa(rs1, rs2, rd, Asi.ASI_PRIMARY);
-    }
-
-    public void clr(Register dst) {
-        or(g0, g0, dst);
-    }
-
-    public void clrb(SPARCAddress addr) {
-        stb(g0, addr);
-    }
-
-    public void clrh(SPARCAddress addr) {
-        sth(g0, addr);
-    }
-
-    public void clrx(SPARCAddress addr) {
-        stx(g0, addr);
-    }
-
-    public void cmp(Register rs1, Register rs2) {
-        subcc(rs1, rs2, g0);
-    }
-
-    public void cmp(Register rs1, int simm13) {
-        subcc(rs1, simm13, g0);
-    }
-
-    public void dec(Register rd) {
-        sub(rd, 1, rd);
-    }
-
-    public void dec(int simm13, Register rd) {
-        sub(rd, simm13, rd);
-    }
-
-    public void jmp(SPARCAddress address) {
-        jmpl(address.getBase(), address.getDisplacement(), g0);
-    }
-
-    public void jmp(Register rd) {
-        jmpl(rd, 0, g0);
-    }
-
-    public void neg(Register rs1, Register rd) {
-        sub(g0, rs1, rd);
-    }
-
-    public void neg(Register rd) {
-        sub(g0, rd, rd);
-    }
-
-    public void mov(Register rs, Register rd) {
-        or(g0, rs, rd);
-    }
-
-    public void mov(int simm13, Register rd) {
-        or(g0, simm13, rd);
-    }
-
-    public void not(Register rs1, Register rd) {
-        xnor(rs1, g0, rd);
-    }
-
-    public void not(Register rd) {
-        xnor(rd, g0, rd);
-    }
-
-    public void restoreWindow() {
-        restore(g0, g0, g0);
-    }
-
-    public void ret() {
-        jmpl(i7, 8, g0);
-    }
-
-    /**
-     * This instruction is like sethi but for 64-bit values.
-     */
-    public static class Sethix {
-
-        private static final int INSTRUCTION_SIZE = 7;
-
-        private long value;
-        private Register dst;
-        private boolean forceRelocatable;
-        private boolean delayed = false;
-        private Consumer<SPARCAssembler> delayedInstructionEmitter;
-
-        public Sethix(long value, Register dst, boolean forceRelocatable, boolean delayed) {
-            this(value, dst, forceRelocatable);
-            assert !(forceRelocatable && delayed) : "Relocatable sethix cannot be delayed";
-            this.delayed = delayed;
-        }
-
-        public Sethix(long value, Register dst, boolean forceRelocatable) {
-            this.value = value;
-            this.dst = dst;
-            this.forceRelocatable = forceRelocatable;
-        }
-
-        public Sethix(long value, Register dst) {
-            this(value, dst, false);
-        }
-
-        private void emitInstruction(Consumer<SPARCAssembler> cb, SPARCMacroAssembler masm) {
-            if (delayed) {
-                if (this.delayedInstructionEmitter != null) {
-                    delayedInstructionEmitter.accept(masm);
-                }
-                delayedInstructionEmitter = cb;
-            } else {
-                cb.accept(masm);
-            }
-        }
-
-        public void emit(SPARCMacroAssembler masm) {
-            final int hi = (int) (value >> 32);
-            final int lo = (int) (value & ~0);
-
-            // This is the same logic as MacroAssembler::internal_set.
-            final int startPc = masm.position();
-
-            if (hi == 0 && lo >= 0) {
-                Consumer<SPARCAssembler> cb = eMasm -> eMasm.sethi(hi22(lo), dst);
-                emitInstruction(cb, masm);
-            } else if (hi == -1) {
-                Consumer<SPARCAssembler> cb = eMasm -> eMasm.sethi(hi22(~lo), dst);
-                emitInstruction(cb, masm);
-                cb = eMasm -> eMasm.xor(dst, ~lo10(~0), dst);
-                emitInstruction(cb, masm);
-            } else {
-                final int shiftcnt;
-                final int shiftcnt2;
-                Consumer<SPARCAssembler> cb = eMasm -> eMasm.sethi(hi22(hi), dst);
-                emitInstruction(cb, masm);
-                if ((hi & 0x3ff) != 0) {                                  // Any bits?
-                    // msb 32-bits are now in lsb 32
-                    cb = eMasm -> eMasm.or(dst, hi & 0x3ff, dst);
-                    emitInstruction(cb, masm);
-                }
-                if ((lo & 0xFFFFFC00) != 0) {                             // done?
-                    if (((lo >> 20) & 0xfff) != 0) {                      // Any bits set?
-                        // Make room for next 12 bits
-                        cb = eMasm -> eMasm.sllx(dst, 12, dst);
-                        emitInstruction(cb, masm);
-                        // Or in next 12
-                        cb = eMasm -> eMasm.or(dst, (lo >> 20) & 0xfff, dst);
-                        emitInstruction(cb, masm);
-                        shiftcnt = 0;                                     // We already shifted
-                    } else {
-                        shiftcnt = 12;
-                    }
-                    if (((lo >> 10) & 0x3ff) != 0) {
-                        // Make room for last 10 bits
-                        cb = eMasm -> eMasm.sllx(dst, shiftcnt + 10, dst);
-                        emitInstruction(cb, masm);
-                        // Or in next 10
-                        cb = eMasm -> eMasm.or(dst, (lo >> 10) & 0x3ff, dst);
-                        emitInstruction(cb, masm);
-                        shiftcnt2 = 0;
-                    } else {
-                        shiftcnt2 = 10;
-                    }
-                    // Shift leaving disp field 0'd
-                    cb = eMasm -> eMasm.sllx(dst, shiftcnt2 + 10, dst);
-                    emitInstruction(cb, masm);
-                } else {
-                    cb = eMasm -> eMasm.sllx(dst, 32, dst);
-                    emitInstruction(cb, masm);
-                }
-            }
-            // Pad out the instruction sequence so it can be patched later.
-            if (forceRelocatable) {
-                while (masm.position() < (startPc + (INSTRUCTION_SIZE * 4))) {
-                    Consumer<SPARCAssembler> cb = eMasm -> eMasm.nop();
-                    emitInstruction(cb, masm);
-                }
-            }
-        }
-
-        public void emitDelayed(SPARCMacroAssembler masm) {
-            assert delayedInstructionEmitter != null;
-            delayedInstructionEmitter.accept(masm);
-        }
-    }
-
-    public static class Setx {
-
-        private long value;
-        private Register dst;
-        private boolean forceRelocatable;
-        private boolean delayed = false;
-        private boolean delayedFirstEmitted = false;
-        private Sethix sethix;
-        private Consumer<SPARCMacroAssembler> delayedAdd;
-
-        public Setx(long value, Register dst, boolean forceRelocatable, boolean delayed) {
-            assert !(forceRelocatable && delayed) : "Cannot use relocatable setx as delayable";
-            this.value = value;
-            this.dst = dst;
-            this.forceRelocatable = forceRelocatable;
-            this.delayed = delayed;
-        }
-
-        public Setx(long value, Register dst, boolean forceRelocatable) {
-            this(value, dst, forceRelocatable, false);
-        }
-
-        public Setx(long value, Register dst) {
-            this(value, dst, false);
-        }
-
-        public void emit(SPARCMacroAssembler masm) {
-            assert !delayed;
-            doEmit(masm);
-        }
-
-        private void doEmit(SPARCMacroAssembler masm) {
-            sethix = new Sethix(value, dst, forceRelocatable, delayed);
-            sethix.emit(masm);
-            int lo = (int) (value & ~0);
-            if (lo10(lo) != 0 || forceRelocatable) {
-                Consumer<SPARCMacroAssembler> add = eMasm -> eMasm.add(dst, lo10(lo), dst);
-                if (delayed) {
-                    sethix.emitDelayed(masm);
-                    sethix = null;
-                    delayedAdd = add;
-                } else {
-                    sethix = null;
-                    add.accept(masm);
-                }
-            }
-        }
-
-        public void emitFirstPartOfDelayed(SPARCMacroAssembler masm) {
-            assert !forceRelocatable : "Cannot use delayed mode with relocatable setx";
-            assert delayed : "Can only be used in delayed mode";
-            doEmit(masm);
-            delayedFirstEmitted = true;
-        }
-
-        public void emitSecondPartOfDelayed(SPARCMacroAssembler masm) {
-            assert !forceRelocatable : "Cannot use delayed mode with relocatable setx";
-            assert delayed : "Can only be used in delayed mode";
-            assert delayedFirstEmitted : "First part has not been emitted so far.";
-            assert delayedAdd == null && sethix != null || delayedAdd != null && sethix == null : "Either add or sethix must be set";
-            if (delayedAdd != null) {
-                delayedAdd.accept(masm);
-            } else {
-                sethix.emitDelayed(masm);
-            }
-
-        }
-    }
-
-    public void signx(Register rs, Register rd) {
-        sra(rs, g0, rd);
-    }
-
-    public void signx(Register rd) {
-        sra(rd, g0, rd);
-    }
-
-    public ScratchRegister getScratchRegister() {
-        return scratchRegister[nextFreeScratchRegister++];
-    }
-
-    public class ScratchRegister implements AutoCloseable {
-        private final Register register;
-
-        public ScratchRegister(Register register) {
-            super();
-            this.register = register;
-        }
-
-        public Register getRegister() {
-            return register;
-        }
-
-        public void close() {
-            assert nextFreeScratchRegister > 0 : "Close called too often";
-            nextFreeScratchRegister--;
-        }
-    }
-}
--- a/graal/com.oracle.graal.asm/overview.html	Wed Jun 03 17:12:05 2015 +0200
+++ /dev/null	Thu Jan 01 00:00:00 1970 +0000
@@ -1,36 +0,0 @@
-<!DOCTYPE HTML PUBLIC "-//W3C//DTD HTML 3.2 Final//EN">
-<html>
-<head>
-<!--
-
-Copyright (c) 2012, Oracle and/or its affiliates. All rights reserved.
-DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER.
-
-This code is free software; you can redistribute it and/or modify it
-under the terms of the GNU General Public License version 2 only, as
-published by the Free Software Foundation.  Oracle designates this
-particular file as subject to the "Classpath" exception as provided
-by Oracle in the LICENSE file that accompanied this code.
-
-This code is distributed in the hope that it will be useful, but WITHOUT
-ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
-FITNESS FOR A PARTICULAR PURPOSE.  See the GNU General Public License
-version 2 for more details (a copy is included in the LICENSE file that
-accompanied this code).
-
-You should have received a copy of the GNU General Public License version
-2 along with this work; if not, write to the Free Software Foundation,
-Inc., 51 Franklin St, Fifth Floor, Boston, MA 02110-1301 USA.
-
-Please contact Oracle, 500 Oracle Parkway, Redwood Shores, CA 94065 USA
-or visit www.oracle.com if you need additional information or have any
-questions.
--->
-
-</head>
-<body>
-
-Documentation for the <code>com.oracle.max.asm</code> project.
-
-</body>
-</html>
--- a/graal/com.oracle.graal.asm/src/com/oracle/graal/asm/AsmOptions.java	Wed Jun 03 17:12:05 2015 +0200
+++ /dev/null	Thu Jan 01 00:00:00 1970 +0000
@@ -1,28 +0,0 @@
-/*
- * Copyright (c) 2011, 2011, Oracle and/or its affiliates. All rights reserved.
- * DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER.
- *
- * This code is free software; you can redistribute it and/or modify it
- * under the terms of the GNU General Public License version 2 only, as
- * published by the Free Software Foundation.
- *
- * This code is distributed in the hope that it will be useful, but WITHOUT
- * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
- * FITNESS FOR A PARTICULAR PURPOSE.  See the GNU General Public License
- * version 2 for more details (a copy is included in the LICENSE file that
- * accompanied this code).
- *
- * You should have received a copy of the GNU General Public License version
- * 2 along with this work; if not, write to the Free Software Foundation,
- * Inc., 51 Franklin St, Fifth Floor, Boston, MA 02110-1301 USA.
- *
- * Please contact Oracle, 500 Oracle Parkway, Redwood Shores, CA 94065 USA
- * or visit www.oracle.com if you need additional information or have any
- * questions.
- */
-package com.oracle.graal.asm;
-
-public class AsmOptions {
-
-    public static int InitialCodeBufferSize = 232;
-}
--- a/graal/com.oracle.graal.asm/src/com/oracle/graal/asm/Assembler.java	Wed Jun 03 17:12:05 2015 +0200
+++ /dev/null	Thu Jan 01 00:00:00 1970 +0000
@@ -1,259 +0,0 @@
-/*
- * Copyright (c) 2009, 2011, Oracle and/or its affiliates. All rights reserved.
- * DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER.
- *
- * This code is free software; you can redistribute it and/or modify it
- * under the terms of the GNU General Public License version 2 only, as
- * published by the Free Software Foundation.
- *
- * This code is distributed in the hope that it will be useful, but WITHOUT
- * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
- * FITNESS FOR A PARTICULAR PURPOSE.  See the GNU General Public License
- * version 2 for more details (a copy is included in the LICENSE file that
- * accompanied this code).
- *
- * You should have received a copy of the GNU General Public License version
- * 2 along with this work; if not, write to the Free Software Foundation,
- * Inc., 51 Franklin St, Fifth Floor, Boston, MA 02110-1301 USA.
- *
- * Please contact Oracle, 500 Oracle Parkway, Redwood Shores, CA 94065 USA
- * or visit www.oracle.com if you need additional information or have any
- * questions.
- */
-package com.oracle.graal.asm;
-
-import java.nio.*;
-import java.util.*;
-
-import com.oracle.jvmci.code.*;
-
-/**
- * The platform-independent base class for the assembler.
- */
-public abstract class Assembler {
-
-    public final TargetDescription target;
-    private List<LabelHint> jumpDisplacementHints;
-
-    /**
-     * Backing code buffer.
-     */
-    private final Buffer codeBuffer;
-
-    public Assembler(TargetDescription target) {
-        this.target = target;
-        if (target.arch.getByteOrder() == ByteOrder.BIG_ENDIAN) {
-            this.codeBuffer = new Buffer.BigEndian();
-        } else {
-            this.codeBuffer = new Buffer.LittleEndian();
-        }
-    }
-
-    /**
-     * Returns the current position of the underlying code buffer.
-     *
-     * @return current position in code buffer
-     */
-    public int position() {
-        return codeBuffer.position();
-    }
-
-    public final void emitByte(int x) {
-        codeBuffer.emitByte(x);
-    }
-
-    public final void emitShort(int x) {
-        codeBuffer.emitShort(x);
-    }
-
-    public final void emitInt(int x) {
-        codeBuffer.emitInt(x);
-    }
-
-    public final void emitLong(long x) {
-        codeBuffer.emitLong(x);
-    }
-
-    public final void emitByte(int b, int pos) {
-        codeBuffer.emitByte(b, pos);
-    }
-
-    public final void emitShort(int b, int pos) {
-        codeBuffer.emitShort(b, pos);
-    }
-
-    public final void emitInt(int b, int pos) {
-        codeBuffer.emitInt(b, pos);
-    }
-
-    public final void emitLong(long b, int pos) {
-        codeBuffer.emitLong(b, pos);
-    }
-
-    public final int getByte(int pos) {
-        return codeBuffer.getByte(pos);
-    }
-
-    public final int getShort(int pos) {
-        return codeBuffer.getShort(pos);
-    }
-
-    public final int getInt(int pos) {
-        return codeBuffer.getInt(pos);
-    }
-
-    private static final String NEWLINE = System.getProperty("line.separator");
-
-    /**
-     * Some GPU architectures have a text based encoding.
-     */
-    public final void emitString(String x) {
-        emitString0("\t");  // XXX REMOVE ME pretty-printing
-        emitString0(x);
-        emitString0(NEWLINE);
-    }
-
-    // XXX for pretty-printing
-    public final void emitString0(String x) {
-        codeBuffer.emitBytes(x.getBytes(), 0, x.length());
-    }
-
-    public void emitString(String s, int pos) {
-        codeBuffer.emitBytes(s.getBytes(), pos);
-    }
-
-    /**
-     * Closes this assembler. No extra data can be written to this assembler after this call.
-     *
-     * @param trimmedCopy if {@code true}, then a copy of the underlying byte array up to (but not
-     *            including) {@code position()} is returned
-     * @return the data in this buffer or a trimmed copy if {@code trimmedCopy} is {@code true}
-     */
-    public byte[] close(boolean trimmedCopy) {
-        return codeBuffer.close(trimmedCopy);
-    }
-
-    public void bind(Label l) {
-        assert !l.isBound() : "can bind label only once";
-        l.bind(position());
-        l.patchInstructions(this);
-    }
-
-    public abstract void align(int modulus);
-
-    public abstract void jmp(Label l);
-
-    protected abstract void patchJumpTarget(int branch, int jumpTarget);
-
-    private Map<Label, String> nameMap;
-
-    /**
-     * Creates a name for a label.
-     *
-     * @param l the label for which a name is being created
-     * @param id a label identifier that is unique with the scope of this assembler
-     * @return a label name in the form of "L123"
-     */
-    protected String createLabelName(Label l, int id) {
-        return "L" + id;
-    }
-
-    /**
-     * Gets a name for a label, creating it if it does not yet exist. By default, the returned name
-     * is only unique with the scope of this assembler.
-     */
-    public String nameOf(Label l) {
-        if (nameMap == null) {
-            nameMap = new HashMap<>();
-        }
-        String name = nameMap.get(l);
-        if (name == null) {
-            name = createLabelName(l, nameMap.size());
-            nameMap.put(l, name);
-        }
-        return name;
-    }
-
-    /**
-     * This is used by the CompilationResultBuilder to convert a {@link StackSlot} to an
-     * {@link AbstractAddress}.
-     */
-    public abstract AbstractAddress makeAddress(Register base, int displacement);
-
-    /**
-     * Returns a target specific placeholder address that can be used for code patching.
-     */
-    public abstract AbstractAddress getPlaceholder();
-
-    /**
-     * Emits a NOP instruction to advance the current PC.
-     */
-    public abstract void ensureUniquePC();
-
-    public void reset() {
-        codeBuffer.reset();
-        captureLabelPositions();
-    }
-
-    private void captureLabelPositions() {
-        if (jumpDisplacementHints == null) {
-            return;
-        }
-        for (LabelHint request : this.jumpDisplacementHints) {
-            request.capture();
-        }
-    }
-
-    public LabelHint requestLabelHint(Label label) {
-        if (jumpDisplacementHints == null) {
-            jumpDisplacementHints = new ArrayList<>();
-        }
-        LabelHint hint = new LabelHint(label, position());
-        this.jumpDisplacementHints.add(hint);
-        return hint;
-    }
-
-    public InstructionCounter getInstructionCounter() {
-        throw new UnsupportedOperationException("Instruction counter is not implemented for " + this);
-    }
-
-    public static class LabelHint {
-        private Label label;
-        private int forPosition;
-        private int capturedTarget = -1;
-
-        protected LabelHint(Label label, int lastPosition) {
-            super();
-            this.label = label;
-            this.forPosition = lastPosition;
-        }
-
-        protected void capture() {
-            this.capturedTarget = label.position();
-        }
-
-        public int getTarget() {
-            assert isValid();
-            return capturedTarget;
-        }
-
-        public int getPosition() {
-            assert isValid();
-            return forPosition;
-        }
-
-        public boolean isValid() {
-            return capturedTarget >= 0;
-        }
-    }
-
-    /**
-     * Instruction counter class which gives the user of the assembler to count different kinds of
-     * instructions in the generated assembler code.
-     */
-    public interface InstructionCounter {
-        String[] getSupportedInstructionTypes();
-
-        int[] countInstructions(String[] instructionTypes, int beginPc, int endPc);
-    }
-}
--- a/graal/com.oracle.graal.asm/src/com/oracle/graal/asm/Buffer.java	Wed Jun 03 17:12:05 2015 +0200
+++ /dev/null	Thu Jan 01 00:00:00 1970 +0000
@@ -1,239 +0,0 @@
-/*
- * Copyright (c) 2009, 2014, Oracle and/or its affiliates. All rights reserved.
- * DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER.
- *
- * This code is free software; you can redistribute it and/or modify it
- * under the terms of the GNU General Public License version 2 only, as
- * published by the Free Software Foundation.
- *
- * This code is distributed in the hope that it will be useful, but WITHOUT
- * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
- * FITNESS FOR A PARTICULAR PURPOSE.  See the GNU General Public License
- * version 2 for more details (a copy is included in the LICENSE file that
- * accompanied this code).
- *
- * You should have received a copy of the GNU General Public License version
- * 2 along with this work; if not, write to the Free Software Foundation,
- * Inc., 51 Franklin St, Fifth Floor, Boston, MA 02110-1301 USA.
- *
- * Please contact Oracle, 500 Oracle Parkway, Redwood Shores, CA 94065 USA
- * or visit www.oracle.com if you need additional information or have any
- * questions.
- */
-package com.oracle.graal.asm;
-
-import java.util.*;
-
-/**
- * Code buffer management for the assembler. Support for little endian and big endian architectures
- * is implemented using subclasses.
- */
-abstract class Buffer {
-
-    protected byte[] data;
-    protected int position;
-
-    public Buffer() {
-        data = new byte[AsmOptions.InitialCodeBufferSize];
-    }
-
-    public int position() {
-        return position;
-    }
-
-    public void setPosition(int position) {
-        assert position >= 0 && position <= data.length;
-        this.position = position;
-    }
-
-    /**
-     * Closes this buffer. No extra data can be written to this buffer after this call.
-     *
-     * @param trimmedCopy if {@code true}, then a copy of the underlying byte array up to (but not
-     *            including) {@code position()} is returned
-     * @return the data in this buffer or a trimmed copy if {@code trimmedCopy} is {@code true}
-     */
-    public byte[] close(boolean trimmedCopy) {
-        byte[] result = trimmedCopy ? Arrays.copyOf(data, position()) : data;
-        data = null;
-        return result;
-    }
-
-    public byte[] copyData(int start, int end) {
-        if (data == null) {
-            return null;
-        }
-        return Arrays.copyOfRange(data, start, end);
-    }
-
-    /**
-     * Copies the data from this buffer into a given array.
-     *
-     * @param dst the destination array
-     * @param off starting position in {@code dst}
-     * @param len number of bytes to copy
-     */
-    public void copyInto(byte[] dst, int off, int len) {
-        System.arraycopy(data, 0, dst, off, len);
-    }
-
-    protected void ensureSize(int length) {
-        if (length >= data.length) {
-            data = Arrays.copyOf(data, length * 4);
-        }
-    }
-
-    public void emitBytes(byte[] arr, int off, int len) {
-        ensureSize(position + len);
-        System.arraycopy(arr, off, data, position, len);
-        position += len;
-    }
-
-    public void emitByte(int b) {
-        position = emitByte(b, position);
-    }
-
-    public void emitShort(int b) {
-        position = emitShort(b, position);
-    }
-
-    public void emitInt(int b) {
-        position = emitInt(b, position);
-    }
-
-    public void emitLong(long b) {
-        position = emitLong(b, position);
-    }
-
-    public int emitBytes(byte[] arr, int pos) {
-        final int len = arr.length;
-        final int newPos = pos + len;
-        ensureSize(newPos);
-        System.arraycopy(arr, 0, data, pos, len);
-        return newPos;
-    }
-
-    public int emitByte(int b, int pos) {
-        assert NumUtil.isUByte(b) || NumUtil.isByte(b);
-        int newPos = pos + 1;
-        ensureSize(newPos);
-        data[pos] = (byte) (b & 0xFF);
-        return newPos;
-    }
-
-    public abstract int emitShort(int b, int pos);
-
-    public abstract int emitInt(int b, int pos);
-
-    public abstract int emitLong(long b, int pos);
-
-    public int getByte(int pos) {
-        return data[pos] & 0xff;
-    }
-
-    public abstract int getShort(int pos);
-
-    public abstract int getInt(int pos);
-
-    public static final class BigEndian extends Buffer {
-
-        @Override
-        public int emitShort(int b, int pos) {
-            assert NumUtil.isUShort(b) || NumUtil.isShort(b);
-            int newPos = pos + 2;
-            ensureSize(pos + 2);
-            data[pos] = (byte) ((b >> 8) & 0xFF);
-            data[pos + 1] = (byte) (b & 0xFF);
-            return newPos;
-        }
-
-        @Override
-        public int emitInt(int b, int pos) {
-            int newPos = pos + 4;
-            ensureSize(newPos);
-            data[pos] = (byte) ((b >> 24) & 0xFF);
-            data[pos + 1] = (byte) ((b >> 16) & 0xFF);
-            data[pos + 2] = (byte) ((b >> 8) & 0xFF);
-            data[pos + 3] = (byte) (b & 0xFF);
-            return newPos;
-        }
-
-        @Override
-        public int emitLong(long b, int pos) {
-            int newPos = pos + 8;
-            ensureSize(newPos);
-            data[pos] = (byte) ((b >> 56) & 0xFF);
-            data[pos + 1] = (byte) ((b >> 48) & 0xFF);
-            data[pos + 2] = (byte) ((b >> 40) & 0xFF);
-            data[pos + 3] = (byte) ((b >> 32) & 0xFF);
-            data[pos + 4] = (byte) ((b >> 24) & 0xFF);
-            data[pos + 5] = (byte) ((b >> 16) & 0xFF);
-            data[pos + 6] = (byte) ((b >> 8) & 0xFF);
-            data[pos + 7] = (byte) (b & 0xFF);
-            return newPos;
-        }
-
-        @Override
-        public int getShort(int pos) {
-            return (data[pos + 0] & 0xff) << 8 | (data[pos + 1] & 0xff) << 0;
-        }
-
-        @Override
-        public int getInt(int pos) {
-            return (data[pos + 0] & 0xff) << 24 | (data[pos + 1] & 0xff) << 16 | (data[pos + 2] & 0xff) << 8 | (data[pos + 3] & 0xff) << 0;
-        }
-    }
-
-    public static final class LittleEndian extends Buffer {
-
-        @Override
-        public int emitShort(int b, int pos) {
-            assert NumUtil.isUShort(b) || NumUtil.isShort(b);
-            int newPos = pos + 2;
-            ensureSize(newPos);
-            data[pos] = (byte) (b & 0xFF);
-            data[pos + 1] = (byte) ((b >> 8) & 0xFF);
-            return newPos;
-        }
-
-        @Override
-        public int emitInt(int b, int pos) {
-            int newPos = pos + 4;
-            ensureSize(newPos);
-            data[pos] = (byte) (b & 0xFF);
-            data[pos + 1] = (byte) ((b >> 8) & 0xFF);
-            data[pos + 2] = (byte) ((b >> 16) & 0xFF);
-            data[pos + 3] = (byte) ((b >> 24) & 0xFF);
-            return newPos;
-        }
-
-        @Override
-        public int emitLong(long b, int pos) {
-            int newPos = pos + 8;
-            ensureSize(newPos);
-            data[pos] = (byte) (b & 0xFF);
-            data[pos + 1] = (byte) ((b >> 8) & 0xFF);
-            data[pos + 2] = (byte) ((b >> 16) & 0xFF);
-            data[pos + 3] = (byte) ((b >> 24) & 0xFF);
-            data[pos + 4] = (byte) ((b >> 32) & 0xFF);
-            data[pos + 5] = (byte) ((b >> 40) & 0xFF);
-            data[pos + 6] = (byte) ((b >> 48) & 0xFF);
-            data[pos + 7] = (byte) ((b >> 56) & 0xFF);
-            return newPos;
-        }
-
-        @Override
-        public int getShort(int pos) {
-            return (data[pos + 1] & 0xff) << 8 | (data[pos + 0] & 0xff) << 0;
-        }
-
-        @Override
-        public int getInt(int pos) {
-            return (data[pos + 3] & 0xff) << 24 | (data[pos + 2] & 0xff) << 16 | (data[pos + 1] & 0xff) << 8 | (data[pos + 0] & 0xff) << 0;
-        }
-    }
-
-    public void reset() {
-        position = 0;
-    }
-}
--- a/graal/com.oracle.graal.asm/src/com/oracle/graal/asm/Label.java	Wed Jun 03 17:12:05 2015 +0200
+++ /dev/null	Thu Jan 01 00:00:00 1970 +0000
@@ -1,106 +0,0 @@
-/*
- * Copyright (c) 2009, 2011, Oracle and/or its affiliates. All rights reserved.
- * DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER.
- *
- * This code is free software; you can redistribute it and/or modify it
- * under the terms of the GNU General Public License version 2 only, as
- * published by the Free Software Foundation.
- *
- * This code is distributed in the hope that it will be useful, but WITHOUT
- * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
- * FITNESS FOR A PARTICULAR PURPOSE.  See the GNU General Public License
- * version 2 for more details (a copy is included in the LICENSE file that
- * accompanied this code).
- *
- * You should have received a copy of the GNU General Public License version
- * 2 along with this work; if not, write to the Free Software Foundation,
- * Inc., 51 Franklin St, Fifth Floor, Boston, MA 02110-1301 USA.
- *
- * Please contact Oracle, 500 Oracle Parkway, Redwood Shores, CA 94065 USA
- * or visit www.oracle.com if you need additional information or have any
- * questions.
- */
-package com.oracle.graal.asm;
-
-import java.util.*;
-
-/**
- * This class represents a label within assembly code.
- */
-public final class Label {
-
-    private int position = -1;
-    private int blockId = -1;
-
-    /**
-     * References to instructions that jump to this unresolved label. These instructions need to be
-     * patched when the label is bound using the {@link #patchInstructions(Assembler)} method.
-     */
-    private ArrayList<Integer> patchPositions = null;
-
-    /**
-     * Returns the position of this label in the code buffer.
-     *
-     * @return the position
-     */
-    public int position() {
-        assert position >= 0 : "Unbound label is being referenced";
-        return position;
-    }
-
-    public Label() {
-    }
-
-    public Label(int id) {
-        blockId = id;
-    }
-
-    public int getBlockId() {
-        return blockId;
-    }
-
-    /**
-     * Binds the label to the specified position.
-     *
-     * @param pos the position
-     */
-    protected void bind(int pos) {
-        this.position = pos;
-        assert isBound();
-    }
-
-    public boolean isBound() {
-        return position >= 0;
-    }
-
-    public void addPatchAt(int branchLocation) {
-        assert !isBound() : "Label is already bound " + this + " " + branchLocation + " at position " + position;
-        if (patchPositions == null) {
-            patchPositions = new ArrayList<>(2);
-        }
-        patchPositions.add(branchLocation);
-    }
-
-    protected void patchInstructions(Assembler masm) {
-        assert isBound() : "Label should be bound";
-        if (patchPositions != null) {
-            int target = position;
-            for (int i = 0; i < patchPositions.size(); ++i) {
-                int pos = patchPositions.get(i);
-                masm.patchJumpTarget(pos, target);
-            }
-        }
-    }
-
-    public void reset() {
-        if (this.patchPositions != null) {
-            this.patchPositions.clear();
-        }
-        this.position = -1;
-    }
-
-    @Override
-    public String toString() {
-        return isBound() ? String.valueOf(position()) : "?";
-    }
-}
--- a/graal/com.oracle.graal.asm/src/com/oracle/graal/asm/NumUtil.java	Wed Jun 03 17:12:05 2015 +0200
+++ /dev/null	Thu Jan 01 00:00:00 1970 +0000
@@ -1,112 +0,0 @@
-/*
- * Copyright (c) 2011, 2013, Oracle and/or its affiliates. All rights reserved.
- * DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER.
- *
- * This code is free software; you can redistribute it and/or modify it
- * under the terms of the GNU General Public License version 2 only, as
- * published by the Free Software Foundation.
- *
- * This code is distributed in the hope that it will be useful, but WITHOUT
- * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
- * FITNESS FOR A PARTICULAR PURPOSE.  See the GNU General Public License
- * version 2 for more details (a copy is included in the LICENSE file that
- * accompanied this code).
- *
- * You should have received a copy of the GNU General Public License version
- * 2 along with this work; if not, write to the Free Software Foundation,
- * Inc., 51 Franklin St, Fifth Floor, Boston, MA 02110-1301 USA.
- *
- * Please contact Oracle, 500 Oracle Parkway, Redwood Shores, CA 94065 USA
- * or visit www.oracle.com if you need additional information or have any
- * questions.
- */
-package com.oracle.graal.asm;
-
-// JaCoCo Exclude
-
-/**
- * A collection of static utility functions that check ranges of numbers.
- */
-public class NumUtil {
-
-    public static boolean isShiftCount(int x) {
-        return 0 <= x && x < 32;
-    }
-
-    /**
-     * Determines if a given {@code int} value is the range of unsigned byte values.
-     */
-    public static boolean isUByte(int x) {
-        return (x & 0xff) == x;
-    }
-
-    /**
-     * Determines if a given {@code int} value is the range of signed byte values.
-     */
-    public static boolean isByte(int x) {
-        return (byte) x == x;
-    }
-
-    /**
-     * Determines if a given {@code long} value is the range of unsigned byte values.
-     */
-    public static boolean isUByte(long x) {
-        return (x & 0xffL) == x;
-    }
-
-    /**
-     * Determines if a given {@code long} value is the range of signed byte values.
-     */
-    public static boolean isByte(long l) {
-        return (byte) l == l;
-    }
-
-    /**
-     * Determines if a given {@code long} value is the range of unsigned int values.
-     */
-    public static boolean isUInt(long x) {
-        return (x & 0xffffffffL) == x;
-    }
-
-    /**
-     * Determines if a given {@code long} value is the range of signed int values.
-     */
-    public static boolean isInt(long l) {
-        return (int) l == l;
-    }
-
-    /**
-     * Determines if a given {@code int} value is the range of signed short values.
-     */
-    public static boolean isShort(int x) {
-        return (short) x == x;
-    }
-
-    /**
-     * Determines if a given {@code long} value is the range of signed short values.
-     */
-    public static boolean isShort(long x) {
-        return (short) x == x;
-    }
-
-    public static boolean isUShort(int s) {
-        return s == (s & 0xFFFF);
-    }
-
-    public static boolean is32bit(long x) {
-        return -0x80000000L <= x && x < 0x80000000L;
-    }
-
-    public static short safeToShort(int v) {
-        assert isShort(v);
-        return (short) v;
-    }
-
-    public static int roundUp(int number, int mod) {
-        return ((number + mod - 1) / mod) * mod;
-    }
-
-    public static long roundUp(long number, long mod) {
-        return ((number + mod - 1L) / mod) * mod;
-    }
-}
--- a/graal/com.oracle.graal.compiler.amd64/src/com/oracle/graal/compiler/amd64/AMD64LIRGenerator.java	Wed Jun 03 17:12:05 2015 +0200
+++ b/graal/com.oracle.graal.compiler.amd64/src/com/oracle/graal/compiler/amd64/AMD64LIRGenerator.java	Wed Jun 03 18:06:44 2015 +0200
@@ -24,6 +24,9 @@
 package com.oracle.graal.compiler.amd64;
 
 import com.oracle.jvmci.amd64.*;
+import com.oracle.jvmci.asm.*;
+import com.oracle.jvmci.asm.amd64.AMD64Address.*;
+import com.oracle.jvmci.asm.amd64.AMD64Assembler.*;
 import com.oracle.jvmci.code.RegisterConfig;
 import com.oracle.jvmci.code.Register;
 import com.oracle.jvmci.code.ForeignCallLinkage;
@@ -40,29 +43,17 @@
 import com.oracle.jvmci.meta.AllocatableValue;
 import com.oracle.jvmci.meta.LIRKind;
 
+import static com.oracle.jvmci.asm.amd64.AMD64Assembler.AMD64BinaryArithmetic.*;
+import static com.oracle.jvmci.asm.amd64.AMD64Assembler.AMD64MOp.*;
+import static com.oracle.jvmci.asm.amd64.AMD64Assembler.AMD64RMOp.*;
+import static com.oracle.jvmci.asm.amd64.AMD64Assembler.AMD64Shift.*;
+import static com.oracle.jvmci.asm.amd64.AMD64Assembler.OperandSize.*;
 import static com.oracle.jvmci.code.ValueUtil.*;
-import static com.oracle.graal.asm.amd64.AMD64Assembler.AMD64BinaryArithmetic.*;
-import static com.oracle.graal.asm.amd64.AMD64Assembler.AMD64MOp.*;
-import static com.oracle.graal.asm.amd64.AMD64Assembler.AMD64RMOp.*;
-import static com.oracle.graal.asm.amd64.AMD64Assembler.AMD64Shift.*;
-import static com.oracle.graal.asm.amd64.AMD64Assembler.OperandSize.*;
 import static com.oracle.graal.lir.amd64.AMD64Arithmetic.*;
 import static com.oracle.graal.lir.amd64.AMD64MathIntrinsicOp.IntrinsicOpcode.*;
 
 import java.util.*;
 
-import com.oracle.graal.asm.*;
-import com.oracle.graal.asm.amd64.AMD64Address.Scale;
-import com.oracle.graal.asm.amd64.AMD64Assembler.AMD64BinaryArithmetic;
-import com.oracle.graal.asm.amd64.AMD64Assembler.AMD64MIOp;
-import com.oracle.graal.asm.amd64.AMD64Assembler.AMD64MOp;
-import com.oracle.graal.asm.amd64.AMD64Assembler.AMD64MROp;
-import com.oracle.graal.asm.amd64.AMD64Assembler.AMD64RMIOp;
-import com.oracle.graal.asm.amd64.AMD64Assembler.AMD64RMOp;
-import com.oracle.graal.asm.amd64.AMD64Assembler.AMD64Shift;
-import com.oracle.graal.asm.amd64.AMD64Assembler.ConditionFlag;
-import com.oracle.graal.asm.amd64.AMD64Assembler.OperandSize;
-import com.oracle.graal.asm.amd64.AMD64Assembler.SSEOp;
 import com.oracle.graal.compiler.common.calc.*;
 import com.oracle.graal.compiler.common.spi.*;
 import com.oracle.graal.compiler.common.util.*;
--- a/graal/com.oracle.graal.compiler.amd64/src/com/oracle/graal/compiler/amd64/AMD64NodeLIRBuilder.java	Wed Jun 03 17:12:05 2015 +0200
+++ b/graal/com.oracle.graal.compiler.amd64/src/com/oracle/graal/compiler/amd64/AMD64NodeLIRBuilder.java	Wed Jun 03 18:06:44 2015 +0200
@@ -24,6 +24,8 @@
 package com.oracle.graal.compiler.amd64;
 
 import com.oracle.jvmci.amd64.*;
+import com.oracle.jvmci.asm.*;
+import com.oracle.jvmci.asm.amd64.AMD64Assembler.*;
 import com.oracle.jvmci.code.CallingConvention;
 import com.oracle.jvmci.meta.Kind;
 import com.oracle.jvmci.meta.JavaType;
@@ -33,15 +35,10 @@
 import com.oracle.jvmci.meta.AllocatableValue;
 import com.oracle.jvmci.meta.LIRKind;
 
-import static com.oracle.graal.asm.amd64.AMD64Assembler.AMD64BinaryArithmetic.*;
-import static com.oracle.graal.asm.amd64.AMD64Assembler.AMD64RMOp.*;
-import static com.oracle.graal.asm.amd64.AMD64Assembler.OperandSize.*;
+import static com.oracle.jvmci.asm.amd64.AMD64Assembler.AMD64BinaryArithmetic.*;
+import static com.oracle.jvmci.asm.amd64.AMD64Assembler.AMD64RMOp.*;
+import static com.oracle.jvmci.asm.amd64.AMD64Assembler.OperandSize.*;
 
-import com.oracle.graal.asm.*;
-import com.oracle.graal.asm.amd64.AMD64Assembler.AMD64MIOp;
-import com.oracle.graal.asm.amd64.AMD64Assembler.AMD64RMOp;
-import com.oracle.graal.asm.amd64.AMD64Assembler.OperandSize;
-import com.oracle.graal.asm.amd64.AMD64Assembler.SSEOp;
 import com.oracle.graal.compiler.common.calc.*;
 import com.oracle.graal.compiler.gen.*;
 import com.oracle.graal.compiler.match.*;
--- a/graal/com.oracle.graal.compiler.sparc/src/com/oracle/graal/compiler/sparc/SPARCLIRGenerator.java	Wed Jun 03 17:12:05 2015 +0200
+++ b/graal/com.oracle.graal.compiler.sparc/src/com/oracle/graal/compiler/sparc/SPARCLIRGenerator.java	Wed Jun 03 18:06:44 2015 +0200
@@ -23,28 +23,12 @@
 
 package com.oracle.graal.compiler.sparc;
 
-import com.oracle.jvmci.code.ForeignCallLinkage;
-import com.oracle.jvmci.code.CodeUtil;
-import com.oracle.jvmci.code.CallingConvention;
-import com.oracle.jvmci.code.StackSlotValue;
-import com.oracle.jvmci.meta.JavaConstant;
-import com.oracle.jvmci.meta.Value;
-import com.oracle.jvmci.meta.AllocatableValue;
-import com.oracle.jvmci.meta.PlatformKind;
-import com.oracle.jvmci.meta.Kind;
-import com.oracle.jvmci.meta.LIRKind;
-import com.oracle.jvmci.sparc.*;
-import com.oracle.jvmci.sparc.SPARC.*;
-
-import static com.oracle.jvmci.code.ValueUtil.*;
 import static com.oracle.graal.lir.sparc.SPARCArithmetic.*;
 import static com.oracle.graal.lir.sparc.SPARCBitManipulationOp.IntrinsicOpcode.*;
 import static com.oracle.graal.lir.sparc.SPARCCompare.*;
 import static com.oracle.graal.lir.sparc.SPARCMathIntrinsicOp.IntrinsicOpcode.*;
+import static com.oracle.jvmci.code.ValueUtil.*;
 
-import com.oracle.graal.asm.sparc.*;
-import com.oracle.graal.asm.sparc.SPARCAssembler.CC;
-import com.oracle.graal.asm.sparc.SPARCAssembler.ConditionFlag;
 import com.oracle.graal.compiler.common.calc.*;
 import com.oracle.graal.compiler.common.spi.*;
 import com.oracle.graal.lir.*;
@@ -74,7 +58,14 @@
 import com.oracle.graal.lir.sparc.SPARCMove.SPARCStackMove;
 import com.oracle.graal.lir.sparc.SPARCMove.StackLoadAddressOp;
 import com.oracle.graal.phases.util.*;
+import com.oracle.jvmci.asm.sparc.*;
+import com.oracle.jvmci.asm.sparc.SPARCAssembler.CC;
+import com.oracle.jvmci.asm.sparc.SPARCAssembler.ConditionFlag;
+import com.oracle.jvmci.code.*;
 import com.oracle.jvmci.common.*;
+import com.oracle.jvmci.meta.*;
+import com.oracle.jvmci.sparc.*;
+import com.oracle.jvmci.sparc.SPARC.CPUFeature;
 
 /**
  * This class implements the SPARC specific portion of the LIR generator.
@@ -356,7 +347,7 @@
                 throw JVMCIError.shouldNotReachHere();
         }
         Variable result = newVariable(trueValue.getLIRKind());
-        ConditionFlag finalCondition = ConditionFlag.fromCondtition(conditionFlags, mirrored ? cond.mirror() : cond, unorderedIsTrue);
+        ConditionFlag finalCondition = SPARCControlFlow.fromCondition(conditionFlags, mirrored ? cond.mirror() : cond, unorderedIsTrue);
         append(new CondMoveOp(result, conditionFlags, finalCondition, actualTrueValue, actualFalseValue));
         return result;
     }
@@ -432,7 +423,7 @@
             default:
                 throw JVMCIError.shouldNotReachHere();
         }
-        ConditionFlag flag = ConditionFlag.fromCondtition(conditionCode, Condition.EQ, false);
+        ConditionFlag flag = SPARCControlFlow.fromCondition(conditionCode, Condition.EQ, false);
         append(new CondMoveOp(result, conditionCode, flag, loadSimm11(trueValue), loadSimm11(falseValue)));
         return result;
     }
--- a/graal/com.oracle.graal.compiler/src/com/oracle/graal/compiler/target/Backend.java	Wed Jun 03 17:12:05 2015 +0200
+++ b/graal/com.oracle.graal.compiler/src/com/oracle/graal/compiler/target/Backend.java	Wed Jun 03 18:06:44 2015 +0200
@@ -22,7 +22,6 @@
  */
 package com.oracle.graal.compiler.target;
 
-import com.oracle.graal.asm.*;
 import com.oracle.graal.compiler.common.alloc.*;
 import com.oracle.graal.compiler.gen.*;
 import com.oracle.graal.lir.*;
@@ -33,6 +32,7 @@
 import com.oracle.graal.nodes.spi.*;
 import com.oracle.graal.phases.tiers.*;
 import com.oracle.graal.phases.util.*;
+import com.oracle.jvmci.asm.*;
 import com.oracle.jvmci.code.*;
 import com.oracle.jvmci.code.stack.*;
 import com.oracle.jvmci.common.*;
--- a/graal/com.oracle.graal.hotspot.amd64.test/src/com/oracle/graal/hotspot/amd64/test/AMD64HotSpotFrameOmissionTest.java	Wed Jun 03 17:12:05 2015 +0200
+++ b/graal/com.oracle.graal.hotspot.amd64.test/src/com/oracle/graal/hotspot/amd64/test/AMD64HotSpotFrameOmissionTest.java	Wed Jun 03 18:06:44 2015 +0200
@@ -22,6 +22,7 @@
  */
 package com.oracle.graal.hotspot.amd64.test;
 
+import com.oracle.jvmci.asm.amd64.*;
 import com.oracle.jvmci.code.InstalledCode;
 import com.oracle.jvmci.code.Register;
 import com.oracle.jvmci.code.CallingConvention;
@@ -36,7 +37,6 @@
 
 import org.junit.*;
 
-import com.oracle.graal.asm.amd64.*;
 import com.oracle.graal.compiler.test.*;
 
 /**
--- a/graal/com.oracle.graal.hotspot.amd64.test/src/com/oracle/graal/hotspot/amd64/test/DataPatchInConstantsTest.java	Wed Jun 03 17:12:05 2015 +0200
+++ b/graal/com.oracle.graal.hotspot.amd64.test/src/com/oracle/graal/hotspot/amd64/test/DataPatchInConstantsTest.java	Wed Jun 03 18:06:44 2015 +0200
@@ -32,7 +32,6 @@
 import org.junit.*;
 
 import com.oracle.graal.api.replacements.*;
-import com.oracle.graal.asm.amd64.*;
 import com.oracle.graal.graph.*;
 import com.oracle.graal.hotspot.*;
 import com.oracle.graal.hotspot.nodes.*;
@@ -46,6 +45,7 @@
 import com.oracle.graal.nodes.*;
 import com.oracle.graal.nodes.spi.*;
 import com.oracle.jvmci.amd64.*;
+import com.oracle.jvmci.asm.amd64.*;
 import com.oracle.jvmci.hotspot.*;
 
 public class DataPatchInConstantsTest extends HotSpotGraalCompilerTest {
--- a/graal/com.oracle.graal.hotspot.amd64/src/com/oracle/graal/hotspot/amd64/AMD64DeoptimizeOp.java	Wed Jun 03 17:12:05 2015 +0200
+++ b/graal/com.oracle.graal.hotspot.amd64/src/com/oracle/graal/hotspot/amd64/AMD64DeoptimizeOp.java	Wed Jun 03 18:06:44 2015 +0200
@@ -24,11 +24,11 @@
 
 import static com.oracle.graal.hotspot.HotSpotHostBackend.*;
 
-import com.oracle.graal.asm.amd64.*;
 import com.oracle.graal.lir.*;
 import com.oracle.graal.lir.StandardOp.BlockEndOp;
 import com.oracle.graal.lir.amd64.*;
 import com.oracle.graal.lir.asm.*;
+import com.oracle.jvmci.asm.amd64.*;
 
 @Opcode("DEOPT")
 final class AMD64DeoptimizeOp extends AMD64LIRInstruction implements BlockEndOp {
--- a/graal/com.oracle.graal.hotspot.amd64/src/com/oracle/graal/hotspot/amd64/AMD64HotSpotBackend.java	Wed Jun 03 17:12:05 2015 +0200
+++ b/graal/com.oracle.graal.hotspot.amd64/src/com/oracle/graal/hotspot/amd64/AMD64HotSpotBackend.java	Wed Jun 03 18:06:44 2015 +0200
@@ -23,6 +23,9 @@
 package com.oracle.graal.hotspot.amd64;
 
 import com.oracle.jvmci.amd64.*;
+import com.oracle.jvmci.asm.*;
+import com.oracle.jvmci.asm.amd64.*;
+import com.oracle.jvmci.asm.amd64.AMD64Assembler.*;
 import com.oracle.jvmci.code.CompilationResult;
 import com.oracle.jvmci.code.Register;
 import com.oracle.jvmci.code.RegisterConfig;
@@ -40,9 +43,6 @@
 
 import java.util.*;
 
-import com.oracle.graal.asm.*;
-import com.oracle.graal.asm.amd64.*;
-import com.oracle.graal.asm.amd64.AMD64Assembler.ConditionFlag;
 import com.oracle.graal.compiler.common.alloc.*;
 import com.oracle.graal.compiler.gen.*;
 import com.oracle.graal.compiler.target.*;
--- a/graal/com.oracle.graal.hotspot.amd64/src/com/oracle/graal/hotspot/amd64/AMD64HotSpotBinaryConsumer.java	Wed Jun 03 17:12:05 2015 +0200
+++ b/graal/com.oracle.graal.hotspot.amd64/src/com/oracle/graal/hotspot/amd64/AMD64HotSpotBinaryConsumer.java	Wed Jun 03 18:06:44 2015 +0200
@@ -23,13 +23,14 @@
 package com.oracle.graal.hotspot.amd64;
 
 import com.oracle.jvmci.meta.AllocatableValue;
-import static com.oracle.graal.asm.amd64.AMD64Assembler.OperandSize.*;
 
-import com.oracle.graal.asm.amd64.AMD64Assembler.AMD64MIOp;
-import com.oracle.graal.asm.amd64.*;
+import static com.oracle.jvmci.asm.amd64.AMD64Assembler.OperandSize.*;
+
 import com.oracle.graal.lir.*;
 import com.oracle.graal.lir.amd64.*;
 import com.oracle.graal.lir.asm.*;
+import com.oracle.jvmci.asm.amd64.*;
+import com.oracle.jvmci.asm.amd64.AMD64Assembler.*;
 import com.oracle.jvmci.hotspot.*;
 
 public class AMD64HotSpotBinaryConsumer {
--- a/graal/com.oracle.graal.hotspot.amd64/src/com/oracle/graal/hotspot/amd64/AMD64HotSpotCRuntimeCallEpilogueOp.java	Wed Jun 03 17:12:05 2015 +0200
+++ b/graal/com.oracle.graal.hotspot.amd64/src/com/oracle/graal/hotspot/amd64/AMD64HotSpotCRuntimeCallEpilogueOp.java	Wed Jun 03 18:06:44 2015 +0200
@@ -22,8 +22,8 @@
  */
 package com.oracle.graal.hotspot.amd64;
 
+import com.oracle.jvmci.asm.amd64.*;
 import com.oracle.jvmci.code.Register;
-import com.oracle.graal.asm.amd64.*;
 import com.oracle.graal.lir.*;
 import com.oracle.graal.lir.amd64.*;
 import com.oracle.graal.lir.asm.*;
--- a/graal/com.oracle.graal.hotspot.amd64/src/com/oracle/graal/hotspot/amd64/AMD64HotSpotCRuntimeCallPrologueOp.java	Wed Jun 03 17:12:05 2015 +0200
+++ b/graal/com.oracle.graal.hotspot.amd64/src/com/oracle/graal/hotspot/amd64/AMD64HotSpotCRuntimeCallPrologueOp.java	Wed Jun 03 18:06:44 2015 +0200
@@ -22,11 +22,11 @@
  */
 package com.oracle.graal.hotspot.amd64;
 
+import com.oracle.jvmci.asm.amd64.*;
 import com.oracle.jvmci.code.Register;
 
 import static com.oracle.jvmci.amd64.AMD64.*;
 
-import com.oracle.graal.asm.amd64.*;
 import com.oracle.graal.lir.*;
 import com.oracle.graal.lir.amd64.*;
 import com.oracle.graal.lir.asm.*;
--- a/graal/com.oracle.graal.hotspot.amd64/src/com/oracle/graal/hotspot/amd64/AMD64HotSpotCardTableAddressOp.java	Wed Jun 03 17:12:05 2015 +0200
+++ b/graal/com.oracle.graal.hotspot.amd64/src/com/oracle/graal/hotspot/amd64/AMD64HotSpotCardTableAddressOp.java	Wed Jun 03 18:06:44 2015 +0200
@@ -22,11 +22,11 @@
  */
 package com.oracle.graal.hotspot.amd64;
 
+import com.oracle.jvmci.asm.amd64.*;
 import com.oracle.jvmci.code.ValueUtil;
 import com.oracle.jvmci.meta.JavaConstant;
 import com.oracle.jvmci.meta.AllocatableValue;
 import com.oracle.jvmci.meta.Kind;
-import com.oracle.graal.asm.amd64.*;
 import com.oracle.graal.hotspot.*;
 import com.oracle.graal.lir.*;
 import com.oracle.graal.lir.amd64.*;
--- a/graal/com.oracle.graal.hotspot.amd64/src/com/oracle/graal/hotspot/amd64/AMD64HotSpotCardTableShiftOp.java	Wed Jun 03 17:12:05 2015 +0200
+++ b/graal/com.oracle.graal.hotspot.amd64/src/com/oracle/graal/hotspot/amd64/AMD64HotSpotCardTableShiftOp.java	Wed Jun 03 18:06:44 2015 +0200
@@ -22,11 +22,11 @@
  */
 package com.oracle.graal.hotspot.amd64;
 
+import com.oracle.jvmci.asm.amd64.*;
 import com.oracle.jvmci.code.ValueUtil;
 import com.oracle.jvmci.meta.JavaConstant;
 import com.oracle.jvmci.meta.AllocatableValue;
 import com.oracle.jvmci.meta.Kind;
-import com.oracle.graal.asm.amd64.*;
 import com.oracle.graal.hotspot.*;
 import com.oracle.graal.lir.*;
 import com.oracle.graal.lir.amd64.*;
--- a/graal/com.oracle.graal.hotspot.amd64/src/com/oracle/graal/hotspot/amd64/AMD64HotSpotCounterOp.java	Wed Jun 03 17:12:05 2015 +0200
+++ b/graal/com.oracle.graal.hotspot.amd64/src/com/oracle/graal/hotspot/amd64/AMD64HotSpotCounterOp.java	Wed Jun 03 18:06:44 2015 +0200
@@ -22,6 +22,7 @@
  */
 package com.oracle.graal.hotspot.amd64;
 
+import com.oracle.jvmci.asm.amd64.*;
 import com.oracle.jvmci.code.Register;
 import com.oracle.jvmci.code.TargetDescription;
 import com.oracle.jvmci.code.StackSlotValue;
@@ -31,7 +32,6 @@
 import static com.oracle.jvmci.code.ValueUtil.*;
 import static com.oracle.jvmci.common.JVMCIError.*;
 
-import com.oracle.graal.asm.amd64.*;
 import com.oracle.graal.hotspot.*;
 import com.oracle.graal.hotspot.meta.*;
 import com.oracle.graal.lir.*;
--- a/graal/com.oracle.graal.hotspot.amd64/src/com/oracle/graal/hotspot/amd64/AMD64HotSpotDeoptimizeCallerOp.java	Wed Jun 03 17:12:05 2015 +0200
+++ b/graal/com.oracle.graal.hotspot.amd64/src/com/oracle/graal/hotspot/amd64/AMD64HotSpotDeoptimizeCallerOp.java	Wed Jun 03 18:06:44 2015 +0200
@@ -24,11 +24,11 @@
 
 import static com.oracle.graal.hotspot.HotSpotHostBackend.*;
 
-import com.oracle.graal.asm.amd64.*;
 import com.oracle.graal.lir.*;
 import com.oracle.graal.lir.StandardOp.BlockEndOp;
 import com.oracle.graal.lir.amd64.*;
 import com.oracle.graal.lir.asm.*;
+import com.oracle.jvmci.asm.amd64.*;
 import com.oracle.jvmci.meta.*;
 
 /**
--- a/graal/com.oracle.graal.hotspot.amd64/src/com/oracle/graal/hotspot/amd64/AMD64HotSpotEnterUnpackFramesStackFrameOp.java	Wed Jun 03 17:12:05 2015 +0200
+++ b/graal/com.oracle.graal.hotspot.amd64/src/com/oracle/graal/hotspot/amd64/AMD64HotSpotEnterUnpackFramesStackFrameOp.java	Wed Jun 03 18:06:44 2015 +0200
@@ -22,6 +22,7 @@
  */
 package com.oracle.graal.hotspot.amd64;
 
+import com.oracle.jvmci.asm.amd64.*;
 import com.oracle.jvmci.code.Register;
 import com.oracle.jvmci.code.RegisterConfig;
 import com.oracle.jvmci.code.RegisterSaveLayout;
@@ -32,7 +33,6 @@
 import static com.oracle.jvmci.code.ValueUtil.*;
 import static com.oracle.graal.lir.LIRInstruction.OperandFlag.*;
 
-import com.oracle.graal.asm.amd64.*;
 import com.oracle.graal.hotspot.*;
 import com.oracle.graal.lir.*;
 import com.oracle.graal.lir.StandardOp.SaveRegistersOp;
--- a/graal/com.oracle.graal.hotspot.amd64/src/com/oracle/graal/hotspot/amd64/AMD64HotSpotEpilogueOp.java	Wed Jun 03 17:12:05 2015 +0200
+++ b/graal/com.oracle.graal.hotspot.amd64/src/com/oracle/graal/hotspot/amd64/AMD64HotSpotEpilogueOp.java	Wed Jun 03 18:06:44 2015 +0200
@@ -26,10 +26,10 @@
 import static com.oracle.jvmci.amd64.AMD64.*;
 import static com.oracle.jvmci.code.ValueUtil.*;
 
-import com.oracle.graal.asm.amd64.*;
 import com.oracle.graal.lir.*;
 import com.oracle.graal.lir.amd64.*;
 import com.oracle.graal.lir.asm.*;
+import com.oracle.jvmci.asm.amd64.*;
 import com.oracle.jvmci.code.*;
 import com.oracle.jvmci.meta.*;
 
--- a/graal/com.oracle.graal.hotspot.amd64/src/com/oracle/graal/hotspot/amd64/AMD64HotSpotJumpToExceptionHandlerInCallerOp.java	Wed Jun 03 17:12:05 2015 +0200
+++ b/graal/com.oracle.graal.hotspot.amd64/src/com/oracle/graal/hotspot/amd64/AMD64HotSpotJumpToExceptionHandlerInCallerOp.java	Wed Jun 03 18:06:44 2015 +0200
@@ -22,6 +22,8 @@
  */
 package com.oracle.graal.hotspot.amd64;
 
+import com.oracle.jvmci.asm.amd64.*;
+import com.oracle.jvmci.asm.amd64.AMD64Assembler.*;
 import com.oracle.jvmci.code.Register;
 import com.oracle.jvmci.meta.AllocatableValue;
 
@@ -29,8 +31,6 @@
 import static com.oracle.jvmci.code.ValueUtil.*;
 import static com.oracle.graal.lir.LIRInstruction.OperandFlag.*;
 
-import com.oracle.graal.asm.amd64.*;
-import com.oracle.graal.asm.amd64.AMD64Assembler.ConditionFlag;
 import com.oracle.graal.lir.*;
 import com.oracle.graal.lir.StandardOp.BlockEndOp;
 import com.oracle.graal.lir.asm.*;
--- a/graal/com.oracle.graal.hotspot.amd64/src/com/oracle/graal/hotspot/amd64/AMD64HotSpotLIRGenerator.java	Wed Jun 03 17:12:05 2015 +0200
+++ b/graal/com.oracle.graal.hotspot.amd64/src/com/oracle/graal/hotspot/amd64/AMD64HotSpotLIRGenerator.java	Wed Jun 03 18:06:44 2015 +0200
@@ -22,16 +22,14 @@
  */
 package com.oracle.graal.hotspot.amd64;
 
-import static com.oracle.graal.asm.amd64.AMD64Assembler.AMD64BinaryArithmetic.*;
-import static com.oracle.graal.asm.amd64.AMD64Assembler.AMD64RMOp.*;
-import static com.oracle.graal.asm.amd64.AMD64Assembler.OperandSize.*;
 import static com.oracle.graal.hotspot.HotSpotBackend.*;
 import static com.oracle.jvmci.amd64.AMD64.*;
+import static com.oracle.jvmci.asm.amd64.AMD64Assembler.AMD64BinaryArithmetic.*;
+import static com.oracle.jvmci.asm.amd64.AMD64Assembler.AMD64RMOp.*;
+import static com.oracle.jvmci.asm.amd64.AMD64Assembler.OperandSize.*;
 
 import java.util.*;
 
-import com.oracle.graal.asm.amd64.AMD64Assembler.AMD64MIOp;
-import com.oracle.graal.asm.amd64.AMD64Assembler.OperandSize;
 import com.oracle.graal.compiler.amd64.*;
 import com.oracle.graal.compiler.common.*;
 import com.oracle.graal.compiler.common.spi.*;
@@ -50,6 +48,7 @@
 import com.oracle.graal.lir.framemap.*;
 import com.oracle.graal.lir.gen.*;
 import com.oracle.jvmci.amd64.*;
+import com.oracle.jvmci.asm.amd64.AMD64Assembler.*;
 import com.oracle.jvmci.code.*;
 import com.oracle.jvmci.common.*;
 import com.oracle.jvmci.debug.*;
--- a/graal/com.oracle.graal.hotspot.amd64/src/com/oracle/graal/hotspot/amd64/AMD64HotSpotLeaveCurrentStackFrameOp.java	Wed Jun 03 17:12:05 2015 +0200
+++ b/graal/com.oracle.graal.hotspot.amd64/src/com/oracle/graal/hotspot/amd64/AMD64HotSpotLeaveCurrentStackFrameOp.java	Wed Jun 03 18:06:44 2015 +0200
@@ -24,11 +24,11 @@
 
 import static com.oracle.jvmci.amd64.AMD64.*;
 
-import com.oracle.graal.asm.amd64.*;
 import com.oracle.graal.lir.*;
 import com.oracle.graal.lir.StandardOp.SaveRegistersOp;
 import com.oracle.graal.lir.asm.*;
 import com.oracle.graal.lir.framemap.*;
+import com.oracle.jvmci.asm.amd64.*;
 import com.oracle.jvmci.code.*;
 import com.oracle.jvmci.meta.*;
 
--- a/graal/com.oracle.graal.hotspot.amd64/src/com/oracle/graal/hotspot/amd64/AMD64HotSpotLeaveDeoptimizedStackFrameOp.java	Wed Jun 03 17:12:05 2015 +0200
+++ b/graal/com.oracle.graal.hotspot.amd64/src/com/oracle/graal/hotspot/amd64/AMD64HotSpotLeaveDeoptimizedStackFrameOp.java	Wed Jun 03 18:06:44 2015 +0200
@@ -22,6 +22,7 @@
  */
 package com.oracle.graal.hotspot.amd64;
 
+import com.oracle.jvmci.asm.amd64.*;
 import com.oracle.jvmci.code.Register;
 import com.oracle.jvmci.meta.AllocatableValue;
 
@@ -29,7 +30,6 @@
 import static com.oracle.jvmci.code.ValueUtil.*;
 import static com.oracle.graal.lir.LIRInstruction.OperandFlag.*;
 
-import com.oracle.graal.asm.amd64.*;
 import com.oracle.graal.lir.*;
 import com.oracle.graal.lir.asm.*;
 
--- a/graal/com.oracle.graal.hotspot.amd64/src/com/oracle/graal/hotspot/amd64/AMD64HotSpotLeaveUnpackFramesStackFrameOp.java	Wed Jun 03 17:12:05 2015 +0200
+++ b/graal/com.oracle.graal.hotspot.amd64/src/com/oracle/graal/hotspot/amd64/AMD64HotSpotLeaveUnpackFramesStackFrameOp.java	Wed Jun 03 18:06:44 2015 +0200
@@ -22,11 +22,11 @@
  */
 package com.oracle.graal.hotspot.amd64;
 
+import com.oracle.jvmci.asm.amd64.*;
 import com.oracle.jvmci.code.Register;
 import com.oracle.jvmci.code.RegisterConfig;
 import com.oracle.jvmci.code.RegisterSaveLayout;
 import com.oracle.jvmci.meta.Kind;
-import com.oracle.graal.asm.amd64.*;
 import com.oracle.graal.hotspot.*;
 import com.oracle.graal.lir.*;
 import com.oracle.graal.lir.StandardOp.SaveRegistersOp;
--- a/graal/com.oracle.graal.hotspot.amd64/src/com/oracle/graal/hotspot/amd64/AMD64HotSpotMove.java	Wed Jun 03 17:12:05 2015 +0200
+++ b/graal/com.oracle.graal.hotspot.amd64/src/com/oracle/graal/hotspot/amd64/AMD64HotSpotMove.java	Wed Jun 03 18:06:44 2015 +0200
@@ -25,9 +25,6 @@
 import static com.oracle.graal.lir.LIRInstruction.OperandFlag.*;
 import static com.oracle.jvmci.code.ValueUtil.*;
 
-import com.oracle.graal.asm.*;
-import com.oracle.graal.asm.amd64.*;
-import com.oracle.graal.asm.amd64.AMD64Assembler.ConditionFlag;
 import com.oracle.graal.compiler.common.*;
 import com.oracle.graal.hotspot.*;
 import com.oracle.graal.lir.*;
@@ -35,6 +32,9 @@
 import com.oracle.graal.lir.StandardOp.StackStoreOp;
 import com.oracle.graal.lir.amd64.*;
 import com.oracle.graal.lir.asm.*;
+import com.oracle.jvmci.asm.*;
+import com.oracle.jvmci.asm.amd64.*;
+import com.oracle.jvmci.asm.amd64.AMD64Assembler.*;
 import com.oracle.jvmci.code.*;
 import com.oracle.jvmci.common.*;
 import com.oracle.jvmci.hotspot.*;
--- a/graal/com.oracle.graal.hotspot.amd64/src/com/oracle/graal/hotspot/amd64/AMD64HotSpotNodeLIRBuilder.java	Wed Jun 03 17:12:05 2015 +0200
+++ b/graal/com.oracle.graal.hotspot.amd64/src/com/oracle/graal/hotspot/amd64/AMD64HotSpotNodeLIRBuilder.java	Wed Jun 03 18:06:44 2015 +0200
@@ -22,15 +22,11 @@
  */
 package com.oracle.graal.hotspot.amd64;
 
-import static com.oracle.graal.asm.amd64.AMD64Assembler.AMD64BinaryArithmetic.*;
 import static com.oracle.graal.hotspot.HotSpotBackend.*;
 import static com.oracle.jvmci.amd64.AMD64.*;
+import static com.oracle.jvmci.asm.amd64.AMD64Assembler.AMD64BinaryArithmetic.*;
 import static com.oracle.jvmci.code.ValueUtil.*;
 
-import com.oracle.graal.asm.*;
-import com.oracle.graal.asm.amd64.AMD64Assembler.AMD64RMOp;
-import com.oracle.graal.asm.amd64.AMD64Assembler.OperandSize;
-import com.oracle.graal.asm.amd64.AMD64Assembler.SSEOp;
 import com.oracle.graal.compiler.amd64.*;
 import com.oracle.graal.compiler.common.calc.*;
 import com.oracle.graal.compiler.common.type.*;
@@ -50,6 +46,8 @@
 import com.oracle.graal.nodes.extended.*;
 import com.oracle.graal.nodes.memory.*;
 import com.oracle.jvmci.amd64.*;
+import com.oracle.jvmci.asm.*;
+import com.oracle.jvmci.asm.amd64.AMD64Assembler.*;
 import com.oracle.jvmci.code.*;
 import com.oracle.jvmci.debug.*;
 import com.oracle.jvmci.hotspot.*;
--- a/graal/com.oracle.graal.hotspot.amd64/src/com/oracle/graal/hotspot/amd64/AMD64HotSpotPatchReturnAddressOp.java	Wed Jun 03 17:12:05 2015 +0200
+++ b/graal/com.oracle.graal.hotspot.amd64/src/com/oracle/graal/hotspot/amd64/AMD64HotSpotPatchReturnAddressOp.java	Wed Jun 03 18:06:44 2015 +0200
@@ -22,13 +22,13 @@
  */
 package com.oracle.graal.hotspot.amd64;
 
+import com.oracle.jvmci.asm.amd64.*;
 import com.oracle.jvmci.meta.AllocatableValue;
 
 import static com.oracle.jvmci.amd64.AMD64.*;
 import static com.oracle.jvmci.code.ValueUtil.*;
 import static com.oracle.graal.lir.LIRInstruction.OperandFlag.*;
 
-import com.oracle.graal.asm.amd64.*;
 import com.oracle.graal.lir.*;
 import com.oracle.graal.lir.amd64.*;
 import com.oracle.graal.lir.asm.*;
--- a/graal/com.oracle.graal.hotspot.amd64/src/com/oracle/graal/hotspot/amd64/AMD64HotSpotPushInterpreterFrameOp.java	Wed Jun 03 17:12:05 2015 +0200
+++ b/graal/com.oracle.graal.hotspot.amd64/src/com/oracle/graal/hotspot/amd64/AMD64HotSpotPushInterpreterFrameOp.java	Wed Jun 03 18:06:44 2015 +0200
@@ -22,6 +22,7 @@
  */
 package com.oracle.graal.hotspot.amd64;
 
+import com.oracle.jvmci.asm.amd64.*;
 import com.oracle.jvmci.code.Register;
 import com.oracle.jvmci.meta.AllocatableValue;
 
@@ -29,7 +30,6 @@
 import static com.oracle.jvmci.code.ValueUtil.*;
 import static com.oracle.graal.lir.LIRInstruction.OperandFlag.*;
 
-import com.oracle.graal.asm.amd64.*;
 import com.oracle.graal.lir.*;
 import com.oracle.graal.lir.amd64.*;
 import com.oracle.graal.lir.asm.*;
--- a/graal/com.oracle.graal.hotspot.amd64/src/com/oracle/graal/hotspot/amd64/AMD64HotSpotReturnOp.java	Wed Jun 03 17:12:05 2015 +0200
+++ b/graal/com.oracle.graal.hotspot.amd64/src/com/oracle/graal/hotspot/amd64/AMD64HotSpotReturnOp.java	Wed Jun 03 18:06:44 2015 +0200
@@ -24,10 +24,10 @@
 
 import static com.oracle.graal.lir.LIRInstruction.OperandFlag.*;
 
-import com.oracle.graal.asm.amd64.*;
 import com.oracle.graal.lir.*;
 import com.oracle.graal.lir.StandardOp.BlockEndOp;
 import com.oracle.graal.lir.asm.*;
+import com.oracle.jvmci.asm.amd64.*;
 import com.oracle.jvmci.code.*;
 import com.oracle.jvmci.hotspot.*;
 import com.oracle.jvmci.meta.*;
--- a/graal/com.oracle.graal.hotspot.amd64/src/com/oracle/graal/hotspot/amd64/AMD64HotSpotSafepointOp.java	Wed Jun 03 17:12:05 2015 +0200
+++ b/graal/com.oracle.graal.hotspot.amd64/src/com/oracle/graal/hotspot/amd64/AMD64HotSpotSafepointOp.java	Wed Jun 03 18:06:44 2015 +0200
@@ -22,6 +22,7 @@
  */
 package com.oracle.graal.hotspot.amd64;
 
+import com.oracle.jvmci.asm.amd64.*;
 import com.oracle.jvmci.code.RegisterValue;
 import com.oracle.jvmci.code.Register;
 import com.oracle.jvmci.code.InfopointReason;
@@ -31,11 +32,10 @@
 import com.oracle.jvmci.meta.AllocatableValue;
 import com.oracle.jvmci.meta.Kind;
 
-import static com.oracle.graal.asm.NumUtil.*;
 import static com.oracle.graal.compiler.common.GraalOptions.*;
 import static com.oracle.jvmci.amd64.AMD64.*;
+import static com.oracle.jvmci.asm.NumUtil.*;
 
-import com.oracle.graal.asm.amd64.*;
 import com.oracle.graal.lir.*;
 import com.oracle.graal.lir.amd64.*;
 import com.oracle.graal.lir.asm.*;
--- a/graal/com.oracle.graal.hotspot.amd64/src/com/oracle/graal/hotspot/amd64/AMD64HotSpotUnwindOp.java	Wed Jun 03 17:12:05 2015 +0200
+++ b/graal/com.oracle.graal.hotspot.amd64/src/com/oracle/graal/hotspot/amd64/AMD64HotSpotUnwindOp.java	Wed Jun 03 18:06:44 2015 +0200
@@ -27,12 +27,12 @@
 import static com.oracle.jvmci.amd64.AMD64.*;
 import static com.oracle.jvmci.code.ValueUtil.*;
 
-import com.oracle.graal.asm.amd64.*;
 import com.oracle.graal.hotspot.stubs.*;
 import com.oracle.graal.lir.*;
 import com.oracle.graal.lir.StandardOp.BlockEndOp;
 import com.oracle.graal.lir.amd64.*;
 import com.oracle.graal.lir.asm.*;
+import com.oracle.jvmci.asm.amd64.*;
 import com.oracle.jvmci.code.*;
 import com.oracle.jvmci.meta.*;
 
--- a/graal/com.oracle.graal.hotspot.amd64/src/com/oracle/graal/hotspot/amd64/AMD64HotspotDirectStaticCallOp.java	Wed Jun 03 17:12:05 2015 +0200
+++ b/graal/com.oracle.graal.hotspot.amd64/src/com/oracle/graal/hotspot/amd64/AMD64HotspotDirectStaticCallOp.java	Wed Jun 03 18:06:44 2015 +0200
@@ -24,11 +24,11 @@
 
 import com.oracle.jvmci.meta.Value;
 import com.oracle.jvmci.meta.ResolvedJavaMethod;
-import com.oracle.graal.asm.amd64.*;
 import com.oracle.graal.lir.*;
 import com.oracle.graal.lir.amd64.AMD64Call.DirectCallOp;
 import com.oracle.graal.lir.asm.*;
 import com.oracle.graal.nodes.CallTargetNode.InvokeKind;
+import com.oracle.jvmci.asm.amd64.*;
 import com.oracle.jvmci.hotspot.*;
 
 /**
--- a/graal/com.oracle.graal.hotspot.amd64/src/com/oracle/graal/hotspot/amd64/AMD64HotspotDirectVirtualCallOp.java	Wed Jun 03 17:12:05 2015 +0200
+++ b/graal/com.oracle.graal.hotspot.amd64/src/com/oracle/graal/hotspot/amd64/AMD64HotspotDirectVirtualCallOp.java	Wed Jun 03 18:06:44 2015 +0200
@@ -24,12 +24,12 @@
 
 import com.oracle.jvmci.meta.Value;
 import com.oracle.jvmci.meta.ResolvedJavaMethod;
-import com.oracle.graal.asm.amd64.*;
 import com.oracle.graal.lir.*;
 import com.oracle.graal.lir.amd64.AMD64Call.DirectCallOp;
 import com.oracle.graal.lir.asm.*;
 import com.oracle.graal.nodes.CallTargetNode.InvokeKind;
 import com.oracle.jvmci.amd64.*;
+import com.oracle.jvmci.asm.amd64.*;
 import com.oracle.jvmci.hotspot.*;
 
 /**
--- a/graal/com.oracle.graal.hotspot.amd64/src/com/oracle/graal/hotspot/amd64/AMD64IndirectCallOp.java	Wed Jun 03 17:12:05 2015 +0200
+++ b/graal/com.oracle.graal.hotspot.amd64/src/com/oracle/graal/hotspot/amd64/AMD64IndirectCallOp.java	Wed Jun 03 18:06:44 2015 +0200
@@ -23,6 +23,7 @@
 package com.oracle.graal.hotspot.amd64;
 
 import com.oracle.jvmci.amd64.*;
+import com.oracle.jvmci.asm.amd64.*;
 import com.oracle.jvmci.code.Register;
 import com.oracle.jvmci.meta.Value;
 import com.oracle.jvmci.meta.ResolvedJavaMethod;
@@ -30,7 +31,6 @@
 import static com.oracle.jvmci.code.ValueUtil.*;
 import static com.oracle.graal.lir.LIRInstruction.OperandFlag.*;
 
-import com.oracle.graal.asm.amd64.*;
 import com.oracle.graal.lir.*;
 import com.oracle.graal.lir.amd64.*;
 import com.oracle.graal.lir.amd64.AMD64Call.IndirectCallOp;
--- a/graal/com.oracle.graal.hotspot.amd64/src/com/oracle/graal/hotspot/amd64/AMD64PrefetchOp.java	Wed Jun 03 17:12:05 2015 +0200
+++ b/graal/com.oracle.graal.hotspot.amd64/src/com/oracle/graal/hotspot/amd64/AMD64PrefetchOp.java	Wed Jun 03 18:06:44 2015 +0200
@@ -25,10 +25,10 @@
 
 import static com.oracle.graal.lir.LIRInstruction.OperandFlag.*;
 
-import com.oracle.graal.asm.amd64.*;
 import com.oracle.graal.lir.*;
 import com.oracle.graal.lir.amd64.*;
 import com.oracle.graal.lir.asm.*;
+import com.oracle.jvmci.asm.amd64.*;
 import com.oracle.jvmci.common.*;
 
 public final class AMD64PrefetchOp extends AMD64LIRInstruction {
--- a/graal/com.oracle.graal.hotspot.amd64/src/com/oracle/graal/hotspot/amd64/AMD64TailcallOp.java	Wed Jun 03 17:12:05 2015 +0200
+++ b/graal/com.oracle.graal.hotspot.amd64/src/com/oracle/graal/hotspot/amd64/AMD64TailcallOp.java	Wed Jun 03 18:06:44 2015 +0200
@@ -24,10 +24,10 @@
 
 import static com.oracle.jvmci.code.ValueUtil.*;
 
-import com.oracle.graal.asm.amd64.*;
 import com.oracle.graal.lir.*;
 import com.oracle.graal.lir.amd64.*;
 import com.oracle.graal.lir.asm.*;
+import com.oracle.jvmci.asm.amd64.*;
 import com.oracle.jvmci.code.*;
 import com.oracle.jvmci.meta.*;
 
--- a/graal/com.oracle.graal.hotspot.sparc/src/com/oracle/graal/hotspot/sparc/SPARCDeoptimizeOp.java	Wed Jun 03 17:12:05 2015 +0200
+++ b/graal/com.oracle.graal.hotspot.sparc/src/com/oracle/graal/hotspot/sparc/SPARCDeoptimizeOp.java	Wed Jun 03 18:06:44 2015 +0200
@@ -24,11 +24,11 @@
 
 import static com.oracle.graal.hotspot.HotSpotHostBackend.*;
 
-import com.oracle.graal.asm.sparc.*;
 import com.oracle.graal.lir.*;
 import com.oracle.graal.lir.StandardOp.BlockEndOp;
 import com.oracle.graal.lir.asm.*;
 import com.oracle.graal.lir.sparc.*;
+import com.oracle.jvmci.asm.sparc.*;
 
 @Opcode("DEOPT")
 final class SPARCDeoptimizeOp extends SPARCLIRInstruction implements BlockEndOp {
--- a/graal/com.oracle.graal.hotspot.sparc/src/com/oracle/graal/hotspot/sparc/SPARCHotSpotBackend.java	Wed Jun 03 17:12:05 2015 +0200
+++ b/graal/com.oracle.graal.hotspot.sparc/src/com/oracle/graal/hotspot/sparc/SPARCHotSpotBackend.java	Wed Jun 03 18:06:44 2015 +0200
@@ -22,6 +22,9 @@
  */
 package com.oracle.graal.hotspot.sparc;
 
+import com.oracle.jvmci.asm.*;
+import com.oracle.jvmci.asm.sparc.*;
+import com.oracle.jvmci.asm.sparc.SPARCMacroAssembler.*;
 import com.oracle.jvmci.code.CompilationResult;
 import com.oracle.jvmci.code.CallingConvention;
 import com.oracle.jvmci.code.StackSlot;
@@ -30,22 +33,18 @@
 import com.oracle.jvmci.meta.ResolvedJavaMethod;
 import com.oracle.jvmci.meta.JavaType;
 
+import static com.oracle.jvmci.asm.sparc.SPARCAssembler.Annul.*;
+import static com.oracle.jvmci.asm.sparc.SPARCAssembler.BranchPredict.*;
+import static com.oracle.jvmci.asm.sparc.SPARCAssembler.CC.*;
+import static com.oracle.jvmci.asm.sparc.SPARCAssembler.ConditionFlag.*;
 import static com.oracle.jvmci.code.CallingConvention.Type.*;
 import static com.oracle.jvmci.code.ValueUtil.*;
-import static com.oracle.graal.asm.sparc.SPARCAssembler.Annul.*;
-import static com.oracle.graal.asm.sparc.SPARCAssembler.BranchPredict.*;
-import static com.oracle.graal.asm.sparc.SPARCAssembler.CC.*;
-import static com.oracle.graal.asm.sparc.SPARCAssembler.ConditionFlag.*;
 import static com.oracle.graal.compiler.common.GraalOptions.*;
 import static com.oracle.jvmci.common.UnsafeAccess.*;
 import static com.oracle.jvmci.sparc.SPARC.*;
 
 import java.util.*;
 
-import com.oracle.graal.asm.*;
-import com.oracle.graal.asm.sparc.*;
-import com.oracle.graal.asm.sparc.SPARCMacroAssembler.ScratchRegister;
-import com.oracle.graal.asm.sparc.SPARCMacroAssembler.Setx;
 import com.oracle.graal.compiler.common.alloc.*;
 import com.oracle.graal.compiler.common.cfg.*;
 import com.oracle.graal.hotspot.*;
--- a/graal/com.oracle.graal.hotspot.sparc/src/com/oracle/graal/hotspot/sparc/SPARCHotSpotCRuntimeCallEpilogueOp.java	Wed Jun 03 17:12:05 2015 +0200
+++ b/graal/com.oracle.graal.hotspot.sparc/src/com/oracle/graal/hotspot/sparc/SPARCHotSpotCRuntimeCallEpilogueOp.java	Wed Jun 03 18:06:44 2015 +0200
@@ -22,6 +22,7 @@
  */
 package com.oracle.graal.hotspot.sparc;
 
+import com.oracle.jvmci.asm.sparc.*;
 import com.oracle.jvmci.code.Register;
 import com.oracle.jvmci.meta.Kind;
 import com.oracle.jvmci.meta.LIRKind;
@@ -30,7 +31,6 @@
 import static com.oracle.graal.lir.LIRInstruction.OperandFlag.*;
 import static com.oracle.jvmci.sparc.SPARC.*;
 
-import com.oracle.graal.asm.sparc.*;
 import com.oracle.graal.lir.*;
 import com.oracle.graal.lir.asm.*;
 import com.oracle.graal.lir.sparc.*;
--- a/graal/com.oracle.graal.hotspot.sparc/src/com/oracle/graal/hotspot/sparc/SPARCHotSpotCRuntimeCallPrologueOp.java	Wed Jun 03 17:12:05 2015 +0200
+++ b/graal/com.oracle.graal.hotspot.sparc/src/com/oracle/graal/hotspot/sparc/SPARCHotSpotCRuntimeCallPrologueOp.java	Wed Jun 03 18:06:44 2015 +0200
@@ -22,6 +22,7 @@
  */
 package com.oracle.graal.hotspot.sparc;
 
+import com.oracle.jvmci.asm.sparc.*;
 import com.oracle.jvmci.code.Register;
 import com.oracle.jvmci.meta.Kind;
 import com.oracle.jvmci.meta.LIRKind;
@@ -30,7 +31,6 @@
 import static com.oracle.graal.lir.LIRInstruction.OperandFlag.*;
 import static com.oracle.jvmci.sparc.SPARC.*;
 
-import com.oracle.graal.asm.sparc.*;
 import com.oracle.graal.lir.*;
 import com.oracle.graal.lir.asm.*;
 import com.oracle.graal.lir.sparc.*;
--- a/graal/com.oracle.graal.hotspot.sparc/src/com/oracle/graal/hotspot/sparc/SPARCHotSpotCounterOp.java	Wed Jun 03 17:12:05 2015 +0200
+++ b/graal/com.oracle.graal.hotspot.sparc/src/com/oracle/graal/hotspot/sparc/SPARCHotSpotCounterOp.java	Wed Jun 03 18:06:44 2015 +0200
@@ -22,16 +22,16 @@
  */
 package com.oracle.graal.hotspot.sparc;
 
+import com.oracle.jvmci.asm.*;
+import com.oracle.jvmci.asm.sparc.*;
+import com.oracle.jvmci.asm.sparc.SPARCMacroAssembler.*;
 import com.oracle.jvmci.code.TargetDescription;
 import com.oracle.jvmci.code.Register;
 import com.oracle.jvmci.meta.Value;
-import static com.oracle.jvmci.code.ValueUtil.*;
-import static com.oracle.graal.asm.sparc.SPARCAssembler.*;
 
-import com.oracle.graal.asm.*;
-import com.oracle.graal.asm.sparc.*;
-import com.oracle.graal.asm.sparc.SPARCMacroAssembler.ScratchRegister;
-import com.oracle.graal.asm.sparc.SPARCMacroAssembler.Setx;
+import static com.oracle.jvmci.asm.sparc.SPARCAssembler.*;
+import static com.oracle.jvmci.code.ValueUtil.*;
+
 import com.oracle.graal.hotspot.*;
 import com.oracle.graal.hotspot.meta.*;
 import com.oracle.graal.lir.*;
--- a/graal/com.oracle.graal.hotspot.sparc/src/com/oracle/graal/hotspot/sparc/SPARCHotSpotDeoptimizeCallerOp.java	Wed Jun 03 17:12:05 2015 +0200
+++ b/graal/com.oracle.graal.hotspot.sparc/src/com/oracle/graal/hotspot/sparc/SPARCHotSpotDeoptimizeCallerOp.java	Wed Jun 03 18:06:44 2015 +0200
@@ -22,11 +22,12 @@
  */
 package com.oracle.graal.hotspot.sparc;
 
+import com.oracle.jvmci.asm.sparc.*;
+import com.oracle.jvmci.asm.sparc.SPARCMacroAssembler.*;
 import com.oracle.jvmci.code.Register;
+
 import static com.oracle.graal.hotspot.HotSpotHostBackend.*;
 
-import com.oracle.graal.asm.sparc.*;
-import com.oracle.graal.asm.sparc.SPARCMacroAssembler.ScratchRegister;
 import com.oracle.graal.lir.*;
 import com.oracle.graal.lir.asm.*;
 import com.oracle.graal.lir.sparc.*;
--- a/graal/com.oracle.graal.hotspot.sparc/src/com/oracle/graal/hotspot/sparc/SPARCHotSpotEnterUnpackFramesStackFrameOp.java	Wed Jun 03 17:12:05 2015 +0200
+++ b/graal/com.oracle.graal.hotspot.sparc/src/com/oracle/graal/hotspot/sparc/SPARCHotSpotEnterUnpackFramesStackFrameOp.java	Wed Jun 03 18:06:44 2015 +0200
@@ -22,6 +22,7 @@
  */
 package com.oracle.graal.hotspot.sparc;
 
+import com.oracle.jvmci.asm.sparc.*;
 import com.oracle.jvmci.code.Register;
 import com.oracle.jvmci.meta.AllocatableValue;
 
@@ -29,7 +30,6 @@
 import static com.oracle.jvmci.sparc.SPARC.*;
 import static com.oracle.graal.lir.LIRInstruction.OperandFlag.*;
 
-import com.oracle.graal.asm.sparc.*;
 import com.oracle.graal.hotspot.*;
 import com.oracle.graal.lir.*;
 import com.oracle.graal.lir.asm.*;
--- a/graal/com.oracle.graal.hotspot.sparc/src/com/oracle/graal/hotspot/sparc/SPARCHotSpotJumpToExceptionHandlerInCallerOp.java	Wed Jun 03 17:12:05 2015 +0200
+++ b/graal/com.oracle.graal.hotspot.sparc/src/com/oracle/graal/hotspot/sparc/SPARCHotSpotJumpToExceptionHandlerInCallerOp.java	Wed Jun 03 18:06:44 2015 +0200
@@ -22,6 +22,9 @@
  */
 package com.oracle.graal.hotspot.sparc;
 
+import com.oracle.jvmci.asm.sparc.*;
+import com.oracle.jvmci.asm.sparc.SPARCAssembler.*;
+import com.oracle.jvmci.asm.sparc.SPARCMacroAssembler.*;
 import com.oracle.jvmci.code.Register;
 import com.oracle.jvmci.meta.AllocatableValue;
 
@@ -29,10 +32,6 @@
 import static com.oracle.jvmci.sparc.SPARC.*;
 import static com.oracle.graal.lir.LIRInstruction.OperandFlag.*;
 
-import com.oracle.graal.asm.sparc.*;
-import com.oracle.graal.asm.sparc.SPARCAssembler.CC;
-import com.oracle.graal.asm.sparc.SPARCAssembler.ConditionFlag;
-import com.oracle.graal.asm.sparc.SPARCMacroAssembler.ScratchRegister;
 import com.oracle.graal.lir.*;
 import com.oracle.graal.lir.asm.*;
 
--- a/graal/com.oracle.graal.hotspot.sparc/src/com/oracle/graal/hotspot/sparc/SPARCHotSpotJumpToExceptionHandlerOp.java	Wed Jun 03 17:12:05 2015 +0200
+++ b/graal/com.oracle.graal.hotspot.sparc/src/com/oracle/graal/hotspot/sparc/SPARCHotSpotJumpToExceptionHandlerOp.java	Wed Jun 03 18:06:44 2015 +0200
@@ -22,12 +22,13 @@
  */
 package com.oracle.graal.hotspot.sparc;
 
+import com.oracle.jvmci.asm.sparc.*;
 import com.oracle.jvmci.code.Register;
 import com.oracle.jvmci.meta.AllocatableValue;
+
 import static com.oracle.jvmci.code.ValueUtil.*;
 import static com.oracle.graal.lir.LIRInstruction.OperandFlag.*;
 
-import com.oracle.graal.asm.sparc.*;
 import com.oracle.graal.lir.*;
 import com.oracle.graal.lir.asm.*;
 import com.oracle.graal.lir.sparc.*;
--- a/graal/com.oracle.graal.hotspot.sparc/src/com/oracle/graal/hotspot/sparc/SPARCHotSpotLeaveCurrentStackFrameOp.java	Wed Jun 03 17:12:05 2015 +0200
+++ b/graal/com.oracle.graal.hotspot.sparc/src/com/oracle/graal/hotspot/sparc/SPARCHotSpotLeaveCurrentStackFrameOp.java	Wed Jun 03 18:06:44 2015 +0200
@@ -24,10 +24,10 @@
 
 import static com.oracle.jvmci.sparc.SPARC.*;
 
-import com.oracle.graal.asm.sparc.*;
 import com.oracle.graal.lir.*;
 import com.oracle.graal.lir.asm.*;
 import com.oracle.graal.lir.sparc.*;
+import com.oracle.jvmci.asm.sparc.*;
 
 /**
  * Pops the current frame off the stack.
--- a/graal/com.oracle.graal.hotspot.sparc/src/com/oracle/graal/hotspot/sparc/SPARCHotSpotLeaveDeoptimizedStackFrameOp.java	Wed Jun 03 17:12:05 2015 +0200
+++ b/graal/com.oracle.graal.hotspot.sparc/src/com/oracle/graal/hotspot/sparc/SPARCHotSpotLeaveDeoptimizedStackFrameOp.java	Wed Jun 03 18:06:44 2015 +0200
@@ -24,10 +24,10 @@
 
 import static com.oracle.jvmci.sparc.SPARC.*;
 
-import com.oracle.graal.asm.sparc.*;
 import com.oracle.graal.lir.*;
 import com.oracle.graal.lir.asm.*;
 import com.oracle.graal.lir.sparc.*;
+import com.oracle.jvmci.asm.sparc.*;
 
 /**
  * Pops the current frame off the stack including the return address.
--- a/graal/com.oracle.graal.hotspot.sparc/src/com/oracle/graal/hotspot/sparc/SPARCHotSpotLeaveUnpackFramesStackFrameOp.java	Wed Jun 03 17:12:05 2015 +0200
+++ b/graal/com.oracle.graal.hotspot.sparc/src/com/oracle/graal/hotspot/sparc/SPARCHotSpotLeaveUnpackFramesStackFrameOp.java	Wed Jun 03 18:06:44 2015 +0200
@@ -22,11 +22,11 @@
  */
 package com.oracle.graal.hotspot.sparc;
 
+import com.oracle.jvmci.asm.sparc.*;
 import com.oracle.jvmci.code.Register;
 
 import static com.oracle.jvmci.sparc.SPARC.*;
 
-import com.oracle.graal.asm.sparc.*;
 import com.oracle.graal.hotspot.*;
 import com.oracle.graal.lir.*;
 import com.oracle.graal.lir.asm.*;
--- a/graal/com.oracle.graal.hotspot.sparc/src/com/oracle/graal/hotspot/sparc/SPARCHotSpotMove.java	Wed Jun 03 17:12:05 2015 +0200
+++ b/graal/com.oracle.graal.hotspot.sparc/src/com/oracle/graal/hotspot/sparc/SPARCHotSpotMove.java	Wed Jun 03 18:06:44 2015 +0200
@@ -22,6 +22,10 @@
  */
 package com.oracle.graal.hotspot.sparc;
 
+import com.oracle.jvmci.asm.*;
+import com.oracle.jvmci.asm.sparc.*;
+import com.oracle.jvmci.asm.sparc.SPARCAssembler.*;
+import com.oracle.jvmci.asm.sparc.SPARCMacroAssembler.*;
 import com.oracle.jvmci.code.StackSlot;
 import com.oracle.jvmci.code.Register;
 import com.oracle.jvmci.meta.JavaConstant;
@@ -33,14 +37,6 @@
 import static com.oracle.jvmci.code.ValueUtil.*;
 import static com.oracle.graal.lir.LIRInstruction.OperandFlag.*;
 
-import com.oracle.graal.asm.*;
-import com.oracle.graal.asm.sparc.*;
-import com.oracle.graal.asm.sparc.SPARCAssembler.Annul;
-import com.oracle.graal.asm.sparc.SPARCAssembler.BranchPredict;
-import com.oracle.graal.asm.sparc.SPARCAssembler.CC;
-import com.oracle.graal.asm.sparc.SPARCAssembler.ConditionFlag;
-import com.oracle.graal.asm.sparc.SPARCAssembler.RCondition;
-import com.oracle.graal.asm.sparc.SPARCMacroAssembler.ScratchRegister;
 import com.oracle.graal.compiler.common.*;
 import com.oracle.graal.hotspot.*;
 import com.oracle.graal.lir.*;
--- a/graal/com.oracle.graal.hotspot.sparc/src/com/oracle/graal/hotspot/sparc/SPARCHotSpotPatchReturnAddressOp.java	Wed Jun 03 17:12:05 2015 +0200
+++ b/graal/com.oracle.graal.hotspot.sparc/src/com/oracle/graal/hotspot/sparc/SPARCHotSpotPatchReturnAddressOp.java	Wed Jun 03 18:06:44 2015 +0200
@@ -22,6 +22,7 @@
  */
 package com.oracle.graal.hotspot.sparc;
 
+import com.oracle.jvmci.asm.sparc.*;
 import com.oracle.jvmci.code.Register;
 import com.oracle.jvmci.meta.AllocatableValue;
 
@@ -29,7 +30,6 @@
 import static com.oracle.jvmci.sparc.SPARC.*;
 import static com.oracle.graal.lir.LIRInstruction.OperandFlag.*;
 
-import com.oracle.graal.asm.sparc.*;
 import com.oracle.graal.lir.*;
 import com.oracle.graal.lir.asm.*;
 import com.oracle.graal.lir.sparc.*;
--- a/graal/com.oracle.graal.hotspot.sparc/src/com/oracle/graal/hotspot/sparc/SPARCHotSpotPushInterpreterFrameOp.java	Wed Jun 03 17:12:05 2015 +0200
+++ b/graal/com.oracle.graal.hotspot.sparc/src/com/oracle/graal/hotspot/sparc/SPARCHotSpotPushInterpreterFrameOp.java	Wed Jun 03 18:06:44 2015 +0200
@@ -22,6 +22,7 @@
  */
 package com.oracle.graal.hotspot.sparc;
 
+import com.oracle.jvmci.asm.sparc.*;
 import com.oracle.jvmci.code.Register;
 import com.oracle.jvmci.meta.AllocatableValue;
 
@@ -29,7 +30,6 @@
 import static com.oracle.jvmci.sparc.SPARC.*;
 import static com.oracle.graal.lir.LIRInstruction.OperandFlag.*;
 
-import com.oracle.graal.asm.sparc.*;
 import com.oracle.graal.lir.*;
 import com.oracle.graal.lir.asm.*;
 import com.oracle.graal.lir.sparc.*;
--- a/graal/com.oracle.graal.hotspot.sparc/src/com/oracle/graal/hotspot/sparc/SPARCHotSpotReturnOp.java	Wed Jun 03 17:12:05 2015 +0200
+++ b/graal/com.oracle.graal.hotspot.sparc/src/com/oracle/graal/hotspot/sparc/SPARCHotSpotReturnOp.java	Wed Jun 03 18:06:44 2015 +0200
@@ -27,10 +27,10 @@
 
 import static com.oracle.graal.lir.LIRInstruction.OperandFlag.*;
 
-import com.oracle.graal.asm.sparc.*;
 import com.oracle.graal.lir.*;
 import com.oracle.graal.lir.asm.*;
 import com.oracle.graal.lir.sparc.SPARCControlFlow.ReturnOp;
+import com.oracle.jvmci.asm.sparc.*;
 import com.oracle.jvmci.hotspot.*;
 
 /**
--- a/graal/com.oracle.graal.hotspot.sparc/src/com/oracle/graal/hotspot/sparc/SPARCHotSpotSafepointOp.java	Wed Jun 03 17:12:05 2015 +0200
+++ b/graal/com.oracle.graal.hotspot.sparc/src/com/oracle/graal/hotspot/sparc/SPARCHotSpotSafepointOp.java	Wed Jun 03 18:06:44 2015 +0200
@@ -22,6 +22,8 @@
  */
 package com.oracle.graal.hotspot.sparc;
 
+import com.oracle.jvmci.asm.sparc.*;
+import com.oracle.jvmci.asm.sparc.SPARCMacroAssembler.*;
 import com.oracle.jvmci.code.Register;
 import com.oracle.jvmci.code.InfopointReason;
 import com.oracle.jvmci.code.RegisterValue;
@@ -30,8 +32,6 @@
 
 import static com.oracle.jvmci.sparc.SPARC.*;
 
-import com.oracle.graal.asm.sparc.*;
-import com.oracle.graal.asm.sparc.SPARCMacroAssembler.Setx;
 import com.oracle.graal.lir.*;
 import com.oracle.graal.lir.asm.*;
 import com.oracle.graal.lir.gen.*;
--- a/graal/com.oracle.graal.hotspot.sparc/src/com/oracle/graal/hotspot/sparc/SPARCHotSpotUnwindOp.java	Wed Jun 03 17:12:05 2015 +0200
+++ b/graal/com.oracle.graal.hotspot.sparc/src/com/oracle/graal/hotspot/sparc/SPARCHotSpotUnwindOp.java	Wed Jun 03 18:06:44 2015 +0200
@@ -22,6 +22,7 @@
  */
 package com.oracle.graal.hotspot.sparc;
 
+import com.oracle.jvmci.asm.sparc.*;
 import com.oracle.jvmci.code.ForeignCallLinkage;
 import com.oracle.jvmci.code.CallingConvention;
 import com.oracle.jvmci.code.Register;
@@ -32,7 +33,6 @@
 import static com.oracle.graal.hotspot.HotSpotBackend.*;
 import static com.oracle.graal.lir.LIRInstruction.OperandFlag.*;
 
-import com.oracle.graal.asm.sparc.*;
 import com.oracle.graal.hotspot.stubs.*;
 import com.oracle.graal.lir.*;
 import com.oracle.graal.lir.asm.*;
--- a/graal/com.oracle.graal.hotspot.sparc/src/com/oracle/graal/hotspot/sparc/SPARCHotspotDirectStaticCallOp.java	Wed Jun 03 17:12:05 2015 +0200
+++ b/graal/com.oracle.graal.hotspot.sparc/src/com/oracle/graal/hotspot/sparc/SPARCHotspotDirectStaticCallOp.java	Wed Jun 03 18:06:44 2015 +0200
@@ -24,11 +24,11 @@
 
 import com.oracle.jvmci.meta.ResolvedJavaMethod;
 import com.oracle.jvmci.meta.Value;
-import com.oracle.graal.asm.sparc.*;
 import com.oracle.graal.lir.*;
 import com.oracle.graal.lir.asm.*;
 import com.oracle.graal.lir.sparc.SPARCCall.DirectCallOp;
 import com.oracle.graal.nodes.CallTargetNode.InvokeKind;
+import com.oracle.jvmci.asm.sparc.*;
 import com.oracle.jvmci.hotspot.*;
 
 /**
--- a/graal/com.oracle.graal.hotspot.sparc/src/com/oracle/graal/hotspot/sparc/SPARCHotspotDirectVirtualCallOp.java	Wed Jun 03 17:12:05 2015 +0200
+++ b/graal/com.oracle.graal.hotspot.sparc/src/com/oracle/graal/hotspot/sparc/SPARCHotspotDirectVirtualCallOp.java	Wed Jun 03 18:06:44 2015 +0200
@@ -22,14 +22,14 @@
  */
 package com.oracle.graal.hotspot.sparc;
 
+import com.oracle.jvmci.asm.sparc.*;
+import com.oracle.jvmci.asm.sparc.SPARCMacroAssembler.*;
 import com.oracle.jvmci.code.Register;
 import com.oracle.jvmci.meta.ResolvedJavaMethod;
 import com.oracle.jvmci.meta.Value;
 
 import static com.oracle.jvmci.sparc.SPARC.*;
 
-import com.oracle.graal.asm.sparc.*;
-import com.oracle.graal.asm.sparc.SPARCMacroAssembler.Setx;
 import com.oracle.graal.lir.*;
 import com.oracle.graal.lir.asm.*;
 import com.oracle.graal.lir.sparc.SPARCCall.DirectCallOp;
--- a/graal/com.oracle.graal.hotspot.sparc/src/com/oracle/graal/hotspot/sparc/SPARCIndirectCallOp.java	Wed Jun 03 17:12:05 2015 +0200
+++ b/graal/com.oracle.graal.hotspot.sparc/src/com/oracle/graal/hotspot/sparc/SPARCIndirectCallOp.java	Wed Jun 03 18:06:44 2015 +0200
@@ -22,6 +22,7 @@
  */
 package com.oracle.graal.hotspot.sparc;
 
+import com.oracle.jvmci.asm.sparc.*;
 import com.oracle.jvmci.code.Register;
 import com.oracle.jvmci.meta.ResolvedJavaMethod;
 import com.oracle.jvmci.meta.Value;
@@ -30,7 +31,6 @@
 import static com.oracle.jvmci.sparc.SPARC.*;
 import static com.oracle.graal.lir.LIRInstruction.OperandFlag.*;
 
-import com.oracle.graal.asm.sparc.*;
 import com.oracle.graal.lir.*;
 import com.oracle.graal.lir.asm.*;
 import com.oracle.graal.lir.sparc.*;
--- a/graal/com.oracle.graal.hotspot.sparc/src/com/oracle/graal/hotspot/sparc/SPARCPrefetchOp.java	Wed Jun 03 17:12:05 2015 +0200
+++ b/graal/com.oracle.graal.hotspot.sparc/src/com/oracle/graal/hotspot/sparc/SPARCPrefetchOp.java	Wed Jun 03 18:06:44 2015 +0200
@@ -25,10 +25,10 @@
 
 import static com.oracle.graal.lir.LIRInstruction.OperandFlag.*;
 
-import com.oracle.graal.asm.sparc.*;
 import com.oracle.graal.lir.*;
 import com.oracle.graal.lir.asm.*;
 import com.oracle.graal.lir.sparc.*;
+import com.oracle.jvmci.asm.sparc.*;
 
 public final class SPARCPrefetchOp extends SPARCLIRInstruction {
     public static final LIRInstructionClass<SPARCPrefetchOp> TYPE = LIRInstructionClass.create(SPARCPrefetchOp.class);
--- a/graal/com.oracle.graal.hotspot/src/com/oracle/graal/hotspot/HotSpotCounterOp.java	Wed Jun 03 17:12:05 2015 +0200
+++ b/graal/com.oracle.graal.hotspot/src/com/oracle/graal/hotspot/HotSpotCounterOp.java	Wed Jun 03 18:06:44 2015 +0200
@@ -22,16 +22,17 @@
  */
 package com.oracle.graal.hotspot;
 
+import com.oracle.jvmci.asm.*;
 import com.oracle.jvmci.code.Register;
 import com.oracle.jvmci.code.TargetDescription;
 import com.oracle.jvmci.meta.JavaConstant;
 import com.oracle.jvmci.meta.Value;
 import com.oracle.jvmci.meta.Kind;
+
 import static com.oracle.jvmci.code.ValueUtil.*;
 
 import java.util.*;
 
-import com.oracle.graal.asm.*;
 import com.oracle.graal.hotspot.debug.*;
 import com.oracle.graal.hotspot.meta.*;
 import com.oracle.graal.lir.*;
--- a/graal/com.oracle.graal.hotspot/src/com/oracle/graal/hotspot/HotSpotInstructionProfiling.java	Wed Jun 03 17:12:05 2015 +0200
+++ b/graal/com.oracle.graal.hotspot/src/com/oracle/graal/hotspot/HotSpotInstructionProfiling.java	Wed Jun 03 18:06:44 2015 +0200
@@ -22,13 +22,14 @@
  */
 package com.oracle.graal.hotspot;
 
+import com.oracle.jvmci.asm.*;
+import com.oracle.jvmci.asm.Assembler.*;
 import com.oracle.jvmci.code.TargetDescription;
 import com.oracle.jvmci.meta.JavaConstant;
 import com.oracle.jvmci.meta.Value;
+
 import java.util.*;
 
-import com.oracle.graal.asm.*;
-import com.oracle.graal.asm.Assembler.InstructionCounter;
 import com.oracle.graal.compiler.common.cfg.*;
 import com.oracle.graal.lir.*;
 import com.oracle.graal.lir.StandardOp.BlockEndOp;
--- a/graal/com.oracle.graal.hotspot/src/com/oracle/graal/hotspot/nodes/DimensionsNode.java	Wed Jun 03 17:12:05 2015 +0200
+++ b/graal/com.oracle.graal.hotspot/src/com/oracle/graal/hotspot/nodes/DimensionsNode.java	Wed Jun 03 18:06:44 2015 +0200
@@ -24,7 +24,8 @@
 
 import com.oracle.jvmci.code.VirtualStackSlot;
 import com.oracle.jvmci.meta.Value;
-import static com.oracle.graal.asm.NumUtil.*;
+
+import static com.oracle.jvmci.asm.NumUtil.*;
 
 import java.util.*;
 
--- a/graal/com.oracle.graal.hotspot/src/com/oracle/graal/hotspot/replacements/arraycopy/UnsafeArrayCopySnippets.java	Wed Jun 03 17:12:05 2015 +0200
+++ b/graal/com.oracle.graal.hotspot/src/com/oracle/graal/hotspot/replacements/arraycopy/UnsafeArrayCopySnippets.java	Wed Jun 03 18:06:44 2015 +0200
@@ -22,10 +22,12 @@
  */
 package com.oracle.graal.hotspot.replacements.arraycopy;
 
+import com.oracle.jvmci.asm.*;
 import com.oracle.jvmci.code.TargetDescription;
 import com.oracle.jvmci.meta.NamedLocationIdentity;
 import com.oracle.jvmci.meta.LocationIdentity;
 import com.oracle.jvmci.meta.Kind;
+
 import static com.oracle.jvmci.meta.LocationIdentity.*;
 import static com.oracle.graal.hotspot.HotSpotGraalRuntime.*;
 import static com.oracle.graal.hotspot.replacements.HotSpotReplacementsUtil.*;
@@ -33,7 +35,6 @@
 import static com.oracle.graal.replacements.SnippetTemplate.*;
 
 import com.oracle.graal.api.replacements.*;
-import com.oracle.graal.asm.*;
 import com.oracle.graal.hotspot.meta.*;
 import com.oracle.graal.hotspot.phases.*;
 import com.oracle.graal.nodes.extended.*;
--- a/graal/com.oracle.graal.hotspot/src/com/oracle/graal/hotspot/stubs/DeoptimizationStub.java	Wed Jun 03 17:12:05 2015 +0200
+++ b/graal/com.oracle.graal.hotspot/src/com/oracle/graal/hotspot/stubs/DeoptimizationStub.java	Wed Jun 03 18:06:44 2015 +0200
@@ -22,9 +22,11 @@
  */
 package com.oracle.graal.hotspot.stubs;
 
+import com.oracle.jvmci.asm.*;
 import com.oracle.jvmci.code.Register;
 import com.oracle.jvmci.code.TargetDescription;
 import com.oracle.jvmci.meta.ForeignCallDescriptor;
+
 import static com.oracle.graal.hotspot.HotSpotBackend.*;
 import static com.oracle.graal.hotspot.HotSpotBackend.Options.*;
 import static com.oracle.graal.hotspot.nodes.DeoptimizationFetchUnrollInfoCallNode.*;
@@ -32,7 +34,6 @@
 import static com.oracle.graal.hotspot.stubs.UncommonTrapStub.*;
 
 import com.oracle.graal.api.replacements.*;
-import com.oracle.graal.asm.*;
 import com.oracle.graal.graph.Node.ConstantNodeParameter;
 import com.oracle.graal.graph.Node.NodeIntrinsic;
 import com.oracle.graal.hotspot.*;
--- a/graal/com.oracle.graal.hotspot/src/com/oracle/graal/hotspot/stubs/UncommonTrapStub.java	Wed Jun 03 17:12:05 2015 +0200
+++ b/graal/com.oracle.graal.hotspot/src/com/oracle/graal/hotspot/stubs/UncommonTrapStub.java	Wed Jun 03 18:06:44 2015 +0200
@@ -22,17 +22,18 @@
  */
 package com.oracle.graal.hotspot.stubs;
 
+import com.oracle.jvmci.asm.*;
 import com.oracle.jvmci.code.Register;
 import com.oracle.jvmci.code.TargetDescription;
 import com.oracle.jvmci.meta.NamedLocationIdentity;
 import com.oracle.jvmci.meta.LocationIdentity;
 import com.oracle.jvmci.meta.ForeignCallDescriptor;
+
 import static com.oracle.graal.hotspot.HotSpotBackend.*;
 import static com.oracle.graal.hotspot.HotSpotBackend.Options.*;
 import static com.oracle.graal.hotspot.replacements.HotSpotReplacementsUtil.*;
 
 import com.oracle.graal.api.replacements.*;
-import com.oracle.graal.asm.*;
 import com.oracle.graal.graph.Node.ConstantNodeParameter;
 import com.oracle.graal.graph.Node.NodeIntrinsic;
 import com.oracle.graal.hotspot.*;
--- a/graal/com.oracle.graal.lir.amd64/src/com/oracle/graal/lir/amd64/AMD64AddressValue.java	Wed Jun 03 17:12:05 2015 +0200
+++ b/graal/com.oracle.graal.lir.amd64/src/com/oracle/graal/lir/amd64/AMD64AddressValue.java	Wed Jun 03 18:06:44 2015 +0200
@@ -22,18 +22,19 @@
  */
 package com.oracle.graal.lir.amd64;
 
+import com.oracle.jvmci.asm.amd64.*;
+import com.oracle.jvmci.asm.amd64.AMD64Address.*;
 import com.oracle.jvmci.code.Register;
 import com.oracle.jvmci.code.RegisterValue;
 import com.oracle.jvmci.meta.AllocatableValue;
 import com.oracle.jvmci.meta.LIRKind;
 import com.oracle.jvmci.meta.Value;
+
 import static com.oracle.jvmci.code.ValueUtil.*;
 import static com.oracle.graal.lir.LIRInstruction.OperandFlag.*;
 
 import java.util.*;
 
-import com.oracle.graal.asm.amd64.*;
-import com.oracle.graal.asm.amd64.AMD64Address.Scale;
 import com.oracle.graal.lir.*;
 import com.oracle.graal.lir.LIRInstruction.OperandFlag;
 import com.oracle.graal.lir.LIRInstruction.OperandMode;
--- a/graal/com.oracle.graal.lir.amd64/src/com/oracle/graal/lir/amd64/AMD64Arithmetic.java	Wed Jun 03 17:12:05 2015 +0200
+++ b/graal/com.oracle.graal.lir.amd64/src/com/oracle/graal/lir/amd64/AMD64Arithmetic.java	Wed Jun 03 18:06:44 2015 +0200
@@ -23,15 +23,15 @@
 package com.oracle.graal.lir.amd64;
 
 import com.oracle.jvmci.amd64.*;
+import com.oracle.jvmci.asm.*;
+import com.oracle.jvmci.asm.amd64.*;
+import com.oracle.jvmci.asm.amd64.AMD64Assembler.*;
 import com.oracle.jvmci.meta.Kind;
 import com.oracle.jvmci.meta.AllocatableValue;
 import com.oracle.jvmci.meta.LIRKind;
 
 import static com.oracle.jvmci.code.ValueUtil.*;
 
-import com.oracle.graal.asm.*;
-import com.oracle.graal.asm.amd64.*;
-import com.oracle.graal.asm.amd64.AMD64Assembler.ConditionFlag;
 import com.oracle.graal.lir.*;
 import com.oracle.graal.lir.asm.*;
 
--- a/graal/com.oracle.graal.lir.amd64/src/com/oracle/graal/lir/amd64/AMD64ArrayEqualsOp.java	Wed Jun 03 17:12:05 2015 +0200
+++ b/graal/com.oracle.graal.lir.amd64/src/com/oracle/graal/lir/amd64/AMD64ArrayEqualsOp.java	Wed Jun 03 18:06:44 2015 +0200
@@ -24,6 +24,10 @@
 
 import com.oracle.jvmci.amd64.*;
 import com.oracle.jvmci.amd64.AMD64.*;
+import com.oracle.jvmci.asm.*;
+import com.oracle.jvmci.asm.amd64.*;
+import com.oracle.jvmci.asm.amd64.AMD64Address.*;
+import com.oracle.jvmci.asm.amd64.AMD64Assembler.*;
 import com.oracle.jvmci.code.Register;
 import com.oracle.jvmci.code.TargetDescription;
 import com.oracle.jvmci.meta.Kind;
@@ -36,10 +40,6 @@
 
 import java.lang.reflect.*;
 
-import com.oracle.graal.asm.*;
-import com.oracle.graal.asm.amd64.*;
-import com.oracle.graal.asm.amd64.AMD64Address.Scale;
-import com.oracle.graal.asm.amd64.AMD64Assembler.ConditionFlag;
 import com.oracle.graal.lir.*;
 import com.oracle.graal.lir.asm.*;
 import com.oracle.graal.lir.gen.*;
--- a/graal/com.oracle.graal.lir.amd64/src/com/oracle/graal/lir/amd64/AMD64Binary.java	Wed Jun 03 17:12:05 2015 +0200
+++ b/graal/com.oracle.graal.lir.amd64/src/com/oracle/graal/lir/amd64/AMD64Binary.java	Wed Jun 03 18:06:44 2015 +0200
@@ -25,17 +25,14 @@
 import com.oracle.jvmci.meta.AllocatableValue;
 import com.oracle.jvmci.meta.Value;
 import com.oracle.jvmci.meta.JavaConstant;
+
 import static com.oracle.jvmci.code.ValueUtil.*;
 import static com.oracle.graal.lir.LIRInstruction.OperandFlag.*;
 
+import com.oracle.jvmci.asm.*;
+import com.oracle.jvmci.asm.amd64.*;
+import com.oracle.jvmci.asm.amd64.AMD64Assembler.*;
 import com.oracle.jvmci.code.CompilationResult.DataSectionReference;
-import com.oracle.graal.asm.*;
-import com.oracle.graal.asm.amd64.*;
-import com.oracle.graal.asm.amd64.AMD64Assembler.AMD64BinaryArithmetic;
-import com.oracle.graal.asm.amd64.AMD64Assembler.AMD64MIOp;
-import com.oracle.graal.asm.amd64.AMD64Assembler.AMD64RMIOp;
-import com.oracle.graal.asm.amd64.AMD64Assembler.AMD64RMOp;
-import com.oracle.graal.asm.amd64.AMD64Assembler.OperandSize;
 import com.oracle.graal.lir.*;
 import com.oracle.graal.lir.StandardOp.ImplicitNullCheck;
 import com.oracle.graal.lir.asm.*;
--- a/graal/com.oracle.graal.lir.amd64/src/com/oracle/graal/lir/amd64/AMD64BinaryConsumer.java	Wed Jun 03 17:12:05 2015 +0200
+++ b/graal/com.oracle.graal.lir.amd64/src/com/oracle/graal/lir/amd64/AMD64BinaryConsumer.java	Wed Jun 03 18:06:44 2015 +0200
@@ -25,17 +25,14 @@
 import com.oracle.jvmci.meta.Constant;
 import com.oracle.jvmci.meta.AllocatableValue;
 import com.oracle.jvmci.meta.Value;
+
 import static com.oracle.jvmci.code.ValueUtil.*;
 import static com.oracle.graal.lir.LIRInstruction.OperandFlag.*;
 
+import com.oracle.jvmci.asm.*;
+import com.oracle.jvmci.asm.amd64.*;
+import com.oracle.jvmci.asm.amd64.AMD64Assembler.*;
 import com.oracle.jvmci.code.CompilationResult.DataSectionReference;
-import com.oracle.graal.asm.*;
-import com.oracle.graal.asm.amd64.*;
-import com.oracle.graal.asm.amd64.AMD64Assembler.AMD64BinaryArithmetic;
-import com.oracle.graal.asm.amd64.AMD64Assembler.AMD64MIOp;
-import com.oracle.graal.asm.amd64.AMD64Assembler.AMD64MROp;
-import com.oracle.graal.asm.amd64.AMD64Assembler.AMD64RMOp;
-import com.oracle.graal.asm.amd64.AMD64Assembler.OperandSize;
 import com.oracle.graal.lir.*;
 import com.oracle.graal.lir.StandardOp.ImplicitNullCheck;
 import com.oracle.graal.lir.asm.*;
--- a/graal/com.oracle.graal.lir.amd64/src/com/oracle/graal/lir/amd64/AMD64BreakpointOp.java	Wed Jun 03 17:12:05 2015 +0200
+++ b/graal/com.oracle.graal.lir.amd64/src/com/oracle/graal/lir/amd64/AMD64BreakpointOp.java	Wed Jun 03 18:06:44 2015 +0200
@@ -22,10 +22,11 @@
  */
 package com.oracle.graal.lir.amd64;
 
+import com.oracle.jvmci.asm.amd64.*;
 import com.oracle.jvmci.meta.Value;
+
 import static com.oracle.graal.lir.LIRInstruction.OperandFlag.*;
 
-import com.oracle.graal.asm.amd64.*;
 import com.oracle.graal.lir.*;
 import com.oracle.graal.lir.asm.*;
 
--- a/graal/com.oracle.graal.lir.amd64/src/com/oracle/graal/lir/amd64/AMD64ByteSwapOp.java	Wed Jun 03 17:12:05 2015 +0200
+++ b/graal/com.oracle.graal.lir.amd64/src/com/oracle/graal/lir/amd64/AMD64ByteSwapOp.java	Wed Jun 03 18:06:44 2015 +0200
@@ -22,9 +22,9 @@
  */
 package com.oracle.graal.lir.amd64;
 
+import com.oracle.jvmci.asm.amd64.*;
 import com.oracle.jvmci.code.ValueUtil;
 import com.oracle.jvmci.meta.Value;
-import com.oracle.graal.asm.amd64.*;
 import com.oracle.graal.lir.*;
 import com.oracle.graal.lir.asm.*;
 
--- a/graal/com.oracle.graal.lir.amd64/src/com/oracle/graal/lir/amd64/AMD64CCall.java	Wed Jun 03 17:12:05 2015 +0200
+++ b/graal/com.oracle.graal.lir.amd64/src/com/oracle/graal/lir/amd64/AMD64CCall.java	Wed Jun 03 18:06:44 2015 +0200
@@ -22,12 +22,13 @@
  */
 package com.oracle.graal.lir.amd64;
 
+import com.oracle.jvmci.asm.amd64.*;
 import com.oracle.jvmci.code.Register;
 import com.oracle.jvmci.code.ValueUtil;
 import com.oracle.jvmci.meta.Value;
+
 import static com.oracle.graal.lir.LIRInstruction.OperandFlag.*;
 
-import com.oracle.graal.asm.amd64.*;
 import com.oracle.graal.lir.*;
 import com.oracle.graal.lir.asm.*;
 
--- a/graal/com.oracle.graal.lir.amd64/src/com/oracle/graal/lir/amd64/AMD64Call.java	Wed Jun 03 17:12:05 2015 +0200
+++ b/graal/com.oracle.graal.lir.amd64/src/com/oracle/graal/lir/amd64/AMD64Call.java	Wed Jun 03 18:06:44 2015 +0200
@@ -23,6 +23,8 @@
 package com.oracle.graal.lir.amd64;
 
 import com.oracle.jvmci.amd64.*;
+import com.oracle.jvmci.asm.amd64.*;
+import com.oracle.jvmci.asm.amd64.AMD64Assembler.*;
 import com.oracle.jvmci.code.Register;
 import com.oracle.jvmci.code.RegisterValue;
 import com.oracle.jvmci.code.ValueUtil;
@@ -37,8 +39,6 @@
 import static com.oracle.jvmci.code.ValueUtil.*;
 import static com.oracle.graal.lir.LIRInstruction.OperandFlag.*;
 
-import com.oracle.graal.asm.amd64.AMD64Assembler.ConditionFlag;
-import com.oracle.graal.asm.amd64.*;
 import com.oracle.graal.lir.*;
 import com.oracle.graal.lir.asm.*;
 
--- a/graal/com.oracle.graal.lir.amd64/src/com/oracle/graal/lir/amd64/AMD64ClearRegisterOp.java	Wed Jun 03 17:12:05 2015 +0200
+++ b/graal/com.oracle.graal.lir.amd64/src/com/oracle/graal/lir/amd64/AMD64ClearRegisterOp.java	Wed Jun 03 18:06:44 2015 +0200
@@ -22,14 +22,14 @@
  */
 package com.oracle.graal.lir.amd64;
 
+import com.oracle.jvmci.asm.amd64.*;
+import com.oracle.jvmci.asm.amd64.AMD64Assembler.*;
 import com.oracle.jvmci.meta.AllocatableValue;
+
+import static com.oracle.jvmci.asm.amd64.AMD64Assembler.AMD64BinaryArithmetic.*;
 import static com.oracle.jvmci.code.ValueUtil.*;
-import static com.oracle.graal.asm.amd64.AMD64Assembler.AMD64BinaryArithmetic.*;
 import static com.oracle.graal.lir.LIRInstruction.OperandFlag.*;
 
-import com.oracle.graal.asm.amd64.AMD64Assembler.AMD64RMOp;
-import com.oracle.graal.asm.amd64.AMD64Assembler.OperandSize;
-import com.oracle.graal.asm.amd64.*;
 import com.oracle.graal.lir.*;
 import com.oracle.graal.lir.asm.*;
 
--- a/graal/com.oracle.graal.lir.amd64/src/com/oracle/graal/lir/amd64/AMD64ControlFlow.java	Wed Jun 03 17:12:05 2015 +0200
+++ b/graal/com.oracle.graal.lir.amd64/src/com/oracle/graal/lir/amd64/AMD64ControlFlow.java	Wed Jun 03 18:06:44 2015 +0200
@@ -23,6 +23,10 @@
 package com.oracle.graal.lir.amd64;
 
 import com.oracle.jvmci.amd64.*;
+import com.oracle.jvmci.asm.*;
+import com.oracle.jvmci.asm.amd64.*;
+import com.oracle.jvmci.asm.amd64.AMD64Address.*;
+import com.oracle.jvmci.asm.amd64.AMD64Assembler.*;
 import com.oracle.jvmci.code.Register;
 import com.oracle.jvmci.meta.Kind;
 import com.oracle.jvmci.meta.AllocatableValue;
@@ -33,10 +37,6 @@
 import static com.oracle.graal.lir.LIRInstruction.OperandFlag.*;
 
 import com.oracle.jvmci.code.CompilationResult.JumpTable;
-import com.oracle.graal.asm.*;
-import com.oracle.graal.asm.amd64.*;
-import com.oracle.graal.asm.amd64.AMD64Address.Scale;
-import com.oracle.graal.asm.amd64.AMD64Assembler.ConditionFlag;
 import com.oracle.graal.compiler.common.calc.*;
 import com.oracle.graal.lir.*;
 import com.oracle.graal.lir.StandardOp.BlockEndOp;
--- a/graal/com.oracle.graal.lir.amd64/src/com/oracle/graal/lir/amd64/AMD64FrameMap.java	Wed Jun 03 17:12:05 2015 +0200
+++ b/graal/com.oracle.graal.lir.amd64/src/com/oracle/graal/lir/amd64/AMD64FrameMap.java	Wed Jun 03 18:06:44 2015 +0200
@@ -22,13 +22,15 @@
  */
 package com.oracle.graal.lir.amd64;
 
+import com.oracle.jvmci.asm.*;
 import com.oracle.jvmci.code.CodeCacheProvider;
 import com.oracle.jvmci.code.StackSlot;
 import com.oracle.jvmci.code.RegisterConfig;
 import com.oracle.jvmci.meta.Kind;
 import com.oracle.jvmci.meta.LIRKind;
+
 import static com.oracle.jvmci.code.ValueUtil.*;
-import com.oracle.graal.asm.*;
+
 import com.oracle.graal.lir.framemap.*;
 
 /**
--- a/graal/com.oracle.graal.lir.amd64/src/com/oracle/graal/lir/amd64/AMD64LIRInstruction.java	Wed Jun 03 17:12:05 2015 +0200
+++ b/graal/com.oracle.graal.lir.amd64/src/com/oracle/graal/lir/amd64/AMD64LIRInstruction.java	Wed Jun 03 18:06:44 2015 +0200
@@ -22,9 +22,9 @@
  */
 package com.oracle.graal.lir.amd64;
 
-import com.oracle.graal.asm.amd64.*;
 import com.oracle.graal.lir.*;
 import com.oracle.graal.lir.asm.*;
+import com.oracle.jvmci.asm.amd64.*;
 
 /**
  * Convenience class to provide AMD64MacroAssembler for the {@link #emitCode} method.
--- a/graal/com.oracle.graal.lir.amd64/src/com/oracle/graal/lir/amd64/AMD64MathIntrinsicOp.java	Wed Jun 03 17:12:05 2015 +0200
+++ b/graal/com.oracle.graal.lir.amd64/src/com/oracle/graal/lir/amd64/AMD64MathIntrinsicOp.java	Wed Jun 03 18:06:44 2015 +0200
@@ -23,11 +23,12 @@
 package com.oracle.graal.lir.amd64;
 
 import com.oracle.jvmci.meta.Value;
+
 import static com.oracle.jvmci.code.ValueUtil.*;
 
-import com.oracle.graal.asm.amd64.*;
 import com.oracle.graal.lir.*;
 import com.oracle.graal.lir.asm.*;
+import com.oracle.jvmci.asm.amd64.*;
 import com.oracle.jvmci.common.*;
 
 // @formatter:off
--- a/graal/com.oracle.graal.lir.amd64/src/com/oracle/graal/lir/amd64/AMD64Move.java	Wed Jun 03 17:12:05 2015 +0200
+++ b/graal/com.oracle.graal.lir.amd64/src/com/oracle/graal/lir/amd64/AMD64Move.java	Wed Jun 03 18:06:44 2015 +0200
@@ -23,6 +23,9 @@
 package com.oracle.graal.lir.amd64;
 
 import com.oracle.jvmci.amd64.*;
+import com.oracle.jvmci.asm.*;
+import com.oracle.jvmci.asm.amd64.*;
+import com.oracle.jvmci.asm.amd64.AMD64Assembler.*;
 import com.oracle.jvmci.code.Register;
 import com.oracle.jvmci.code.StackSlotValue;
 import com.oracle.jvmci.meta.Kind;
@@ -35,10 +38,6 @@
 import static java.lang.Double.*;
 import static java.lang.Float.*;
 
-import com.oracle.graal.asm.*;
-import com.oracle.graal.asm.amd64.*;
-import com.oracle.graal.asm.amd64.AMD64Assembler.AMD64MIOp;
-import com.oracle.graal.asm.amd64.AMD64Assembler.OperandSize;
 import com.oracle.graal.lir.*;
 import com.oracle.graal.lir.StandardOp.MoveOp;
 import com.oracle.graal.lir.StandardOp.NullCheck;
--- a/graal/com.oracle.graal.lir.amd64/src/com/oracle/graal/lir/amd64/AMD64MulDivOp.java	Wed Jun 03 17:12:05 2015 +0200
+++ b/graal/com.oracle.graal.lir.amd64/src/com/oracle/graal/lir/amd64/AMD64MulDivOp.java	Wed Jun 03 18:06:44 2015 +0200
@@ -23,17 +23,16 @@
 package com.oracle.graal.lir.amd64;
 
 import com.oracle.jvmci.amd64.*;
+import com.oracle.jvmci.asm.amd64.*;
+import com.oracle.jvmci.asm.amd64.AMD64Assembler.*;
 import com.oracle.jvmci.meta.AllocatableValue;
 import com.oracle.jvmci.meta.LIRKind;
 import com.oracle.jvmci.meta.Value;
 
+import static com.oracle.jvmci.asm.amd64.AMD64Assembler.AMD64MOp.*;
 import static com.oracle.jvmci.code.ValueUtil.*;
-import static com.oracle.graal.asm.amd64.AMD64Assembler.AMD64MOp.*;
 import static com.oracle.graal.lir.LIRInstruction.OperandFlag.*;
 
-import com.oracle.graal.asm.amd64.*;
-import com.oracle.graal.asm.amd64.AMD64Assembler.AMD64MOp;
-import com.oracle.graal.asm.amd64.AMD64Assembler.OperandSize;
 import com.oracle.graal.lir.*;
 import com.oracle.graal.lir.asm.*;
 
--- a/graal/com.oracle.graal.lir.amd64/src/com/oracle/graal/lir/amd64/AMD64RestoreRegistersOp.java	Wed Jun 03 17:12:05 2015 +0200
+++ b/graal/com.oracle.graal.lir.amd64/src/com/oracle/graal/lir/amd64/AMD64RestoreRegistersOp.java	Wed Jun 03 18:06:44 2015 +0200
@@ -22,17 +22,18 @@
  */
 package com.oracle.graal.lir.amd64;
 
+import com.oracle.jvmci.asm.amd64.*;
 import com.oracle.jvmci.code.Register;
 import com.oracle.jvmci.code.RegisterValue;
 import com.oracle.jvmci.code.ValueUtil;
 import com.oracle.jvmci.code.StackSlotValue;
 import com.oracle.jvmci.code.StackSlot;
+
 import static com.oracle.jvmci.code.ValueUtil.*;
 import static com.oracle.graal.lir.LIRInstruction.OperandFlag.*;
 
 import java.util.*;
 
-import com.oracle.graal.asm.amd64.*;
 import com.oracle.graal.lir.*;
 import com.oracle.graal.lir.asm.*;
 
--- a/graal/com.oracle.graal.lir.amd64/src/com/oracle/graal/lir/amd64/AMD64SaveRegistersOp.java	Wed Jun 03 17:12:05 2015 +0200
+++ b/graal/com.oracle.graal.lir.amd64/src/com/oracle/graal/lir/amd64/AMD64SaveRegistersOp.java	Wed Jun 03 18:06:44 2015 +0200
@@ -22,18 +22,19 @@
  */
 package com.oracle.graal.lir.amd64;
 
+import com.oracle.jvmci.asm.amd64.*;
 import com.oracle.jvmci.code.Register;
 import com.oracle.jvmci.code.RegisterValue;
 import com.oracle.jvmci.code.ValueUtil;
 import com.oracle.jvmci.code.RegisterSaveLayout;
 import com.oracle.jvmci.code.StackSlotValue;
 import com.oracle.jvmci.code.StackSlot;
+
 import static com.oracle.jvmci.code.ValueUtil.*;
 import static com.oracle.graal.lir.LIRInstruction.OperandFlag.*;
 
 import java.util.*;
 
-import com.oracle.graal.asm.amd64.*;
 import com.oracle.graal.lir.*;
 import com.oracle.graal.lir.StandardOp.SaveRegistersOp;
 import com.oracle.graal.lir.asm.*;
--- a/graal/com.oracle.graal.lir.amd64/src/com/oracle/graal/lir/amd64/AMD64ShiftOp.java	Wed Jun 03 17:12:05 2015 +0200
+++ b/graal/com.oracle.graal.lir.amd64/src/com/oracle/graal/lir/amd64/AMD64ShiftOp.java	Wed Jun 03 18:06:44 2015 +0200
@@ -23,14 +23,13 @@
 package com.oracle.graal.lir.amd64;
 
 import com.oracle.jvmci.amd64.*;
+import com.oracle.jvmci.asm.amd64.*;
+import com.oracle.jvmci.asm.amd64.AMD64Assembler.*;
 import com.oracle.jvmci.meta.AllocatableValue;
 
 import static com.oracle.jvmci.code.ValueUtil.*;
 import static com.oracle.graal.lir.LIRInstruction.OperandFlag.*;
 
-import com.oracle.graal.asm.amd64.AMD64Assembler.AMD64MOp;
-import com.oracle.graal.asm.amd64.AMD64Assembler.OperandSize;
-import com.oracle.graal.asm.amd64.*;
 import com.oracle.graal.lir.*;
 import com.oracle.graal.lir.asm.*;
 
--- a/graal/com.oracle.graal.lir.amd64/src/com/oracle/graal/lir/amd64/AMD64SignExtendOp.java	Wed Jun 03 17:12:05 2015 +0200
+++ b/graal/com.oracle.graal.lir.amd64/src/com/oracle/graal/lir/amd64/AMD64SignExtendOp.java	Wed Jun 03 18:06:44 2015 +0200
@@ -23,15 +23,15 @@
 package com.oracle.graal.lir.amd64;
 
 import com.oracle.jvmci.amd64.*;
+import com.oracle.jvmci.asm.amd64.*;
+import com.oracle.jvmci.asm.amd64.AMD64Assembler.*;
 import com.oracle.jvmci.meta.AllocatableValue;
 import com.oracle.jvmci.meta.LIRKind;
 
+import static com.oracle.jvmci.asm.amd64.AMD64Assembler.OperandSize.*;
 import static com.oracle.jvmci.code.ValueUtil.*;
-import static com.oracle.graal.asm.amd64.AMD64Assembler.OperandSize.*;
 import static com.oracle.graal.lir.LIRInstruction.OperandFlag.*;
 
-import com.oracle.graal.asm.amd64.AMD64Assembler.OperandSize;
-import com.oracle.graal.asm.amd64.*;
 import com.oracle.graal.lir.*;
 import com.oracle.graal.lir.asm.*;
 
--- a/graal/com.oracle.graal.lir.amd64/src/com/oracle/graal/lir/amd64/AMD64Unary.java	Wed Jun 03 17:12:05 2015 +0200
+++ b/graal/com.oracle.graal.lir.amd64/src/com/oracle/graal/lir/amd64/AMD64Unary.java	Wed Jun 03 18:06:44 2015 +0200
@@ -22,16 +22,14 @@
  */
 package com.oracle.graal.lir.amd64;
 
+import com.oracle.jvmci.asm.amd64.*;
+import com.oracle.jvmci.asm.amd64.AMD64Assembler.*;
 import com.oracle.jvmci.meta.AllocatableValue;
 import com.oracle.jvmci.meta.Value;
+
 import static com.oracle.jvmci.code.ValueUtil.*;
 import static com.oracle.graal.lir.LIRInstruction.OperandFlag.*;
 
-import com.oracle.graal.asm.amd64.*;
-import com.oracle.graal.asm.amd64.AMD64Assembler.AMD64MOp;
-import com.oracle.graal.asm.amd64.AMD64Assembler.AMD64MROp;
-import com.oracle.graal.asm.amd64.AMD64Assembler.AMD64RMOp;
-import com.oracle.graal.asm.amd64.AMD64Assembler.OperandSize;
 import com.oracle.graal.lir.*;
 import com.oracle.graal.lir.StandardOp.ImplicitNullCheck;
 import com.oracle.graal.lir.asm.*;
--- a/graal/com.oracle.graal.lir.amd64/src/com/oracle/graal/lir/amd64/AMD64ZapRegistersOp.java	Wed Jun 03 17:12:05 2015 +0200
+++ b/graal/com.oracle.graal.lir.amd64/src/com/oracle/graal/lir/amd64/AMD64ZapRegistersOp.java	Wed Jun 03 18:06:44 2015 +0200
@@ -22,16 +22,17 @@
  */
 package com.oracle.graal.lir.amd64;
 
+import com.oracle.jvmci.asm.amd64.*;
 import com.oracle.jvmci.code.Register;
 import com.oracle.jvmci.code.RegisterValue;
 import com.oracle.jvmci.code.RegisterSaveLayout;
 import com.oracle.jvmci.meta.JavaConstant;
+
 import static com.oracle.graal.lir.LIRInstruction.OperandFlag.*;
 import static com.oracle.graal.lir.amd64.AMD64SaveRegistersOp.*;
 
 import java.util.*;
 
-import com.oracle.graal.asm.amd64.*;
 import com.oracle.graal.lir.*;
 import com.oracle.graal.lir.StandardOp.SaveRegistersOp;
 import com.oracle.graal.lir.asm.*;
--- a/graal/com.oracle.graal.lir.sparc/src/com/oracle/graal/lir/sparc/SPARCAddressValue.java	Wed Jun 03 17:12:05 2015 +0200
+++ b/graal/com.oracle.graal.lir.sparc/src/com/oracle/graal/lir/sparc/SPARCAddressValue.java	Wed Jun 03 18:06:44 2015 +0200
@@ -22,17 +22,18 @@
  */
 package com.oracle.graal.lir.sparc;
 
+import com.oracle.jvmci.asm.sparc.*;
 import com.oracle.jvmci.code.Register;
 import com.oracle.jvmci.code.RegisterValue;
 import com.oracle.jvmci.meta.LIRKind;
 import com.oracle.jvmci.meta.Value;
 import com.oracle.jvmci.meta.AllocatableValue;
+
 import static com.oracle.jvmci.code.ValueUtil.*;
 import static com.oracle.graal.lir.LIRInstruction.OperandFlag.*;
 
 import java.util.*;
 
-import com.oracle.graal.asm.sparc.*;
 import com.oracle.graal.lir.*;
 import com.oracle.graal.lir.LIRInstruction.OperandFlag;
 import com.oracle.graal.lir.LIRInstruction.OperandMode;
--- a/graal/com.oracle.graal.lir.sparc/src/com/oracle/graal/lir/sparc/SPARCArithmetic.java	Wed Jun 03 17:12:05 2015 +0200
+++ b/graal/com.oracle.graal.lir.sparc/src/com/oracle/graal/lir/sparc/SPARCArithmetic.java	Wed Jun 03 18:06:44 2015 +0200
@@ -22,6 +22,9 @@
  */
 package com.oracle.graal.lir.sparc;
 
+import com.oracle.jvmci.asm.*;
+import com.oracle.jvmci.asm.sparc.*;
+import com.oracle.jvmci.asm.sparc.SPARCMacroAssembler.*;
 import com.oracle.jvmci.code.Register;
 import com.oracle.jvmci.meta.LIRKind;
 import com.oracle.jvmci.meta.Kind;
@@ -30,21 +33,17 @@
 import com.oracle.jvmci.meta.JavaConstant;
 import com.oracle.jvmci.sparc.*;
 
+import static com.oracle.jvmci.asm.sparc.SPARCAssembler.*;
+import static com.oracle.jvmci.asm.sparc.SPARCAssembler.Annul.*;
+import static com.oracle.jvmci.asm.sparc.SPARCAssembler.BranchPredict.*;
+import static com.oracle.jvmci.asm.sparc.SPARCAssembler.CC.*;
+import static com.oracle.jvmci.asm.sparc.SPARCAssembler.ConditionFlag.*;
+import static com.oracle.jvmci.asm.sparc.SPARCAssembler.Opfs.*;
 import static com.oracle.jvmci.code.ValueUtil.*;
 import static com.oracle.jvmci.sparc.SPARC.*;
 import static com.oracle.jvmci.sparc.SPARC.CPUFeature.*;
-import static com.oracle.graal.asm.sparc.SPARCAssembler.*;
-import static com.oracle.graal.asm.sparc.SPARCAssembler.Annul.*;
-import static com.oracle.graal.asm.sparc.SPARCAssembler.BranchPredict.*;
-import static com.oracle.graal.asm.sparc.SPARCAssembler.CC.*;
-import static com.oracle.graal.asm.sparc.SPARCAssembler.ConditionFlag.*;
-import static com.oracle.graal.asm.sparc.SPARCAssembler.Opfs.*;
 import static com.oracle.graal.lir.LIRInstruction.OperandFlag.*;
 
-import com.oracle.graal.asm.*;
-import com.oracle.graal.asm.sparc.*;
-import com.oracle.graal.asm.sparc.SPARCMacroAssembler.ScratchRegister;
-import com.oracle.graal.asm.sparc.SPARCMacroAssembler.Setx;
 import com.oracle.graal.lir.*;
 import com.oracle.graal.lir.asm.*;
 import com.oracle.graal.lir.gen.*;
--- a/graal/com.oracle.graal.lir.sparc/src/com/oracle/graal/lir/sparc/SPARCArrayEqualsOp.java	Wed Jun 03 17:12:05 2015 +0200
+++ b/graal/com.oracle.graal.lir.sparc/src/com/oracle/graal/lir/sparc/SPARCArrayEqualsOp.java	Wed Jun 03 18:06:44 2015 +0200
@@ -22,18 +22,21 @@
  */
 package com.oracle.graal.lir.sparc;
 
+import com.oracle.jvmci.asm.*;
+import com.oracle.jvmci.asm.sparc.*;
+import com.oracle.jvmci.asm.sparc.SPARCAssembler.*;
 import com.oracle.jvmci.code.Register;
 import com.oracle.jvmci.meta.LIRKind;
 import com.oracle.jvmci.meta.Value;
 import com.oracle.jvmci.meta.Kind;
 import com.oracle.jvmci.sparc.SPARC.*;
 
+import static com.oracle.jvmci.asm.sparc.SPARCAssembler.Annul.*;
+import static com.oracle.jvmci.asm.sparc.SPARCAssembler.BranchPredict.*;
+import static com.oracle.jvmci.asm.sparc.SPARCAssembler.CC.*;
+import static com.oracle.jvmci.asm.sparc.SPARCAssembler.ConditionFlag.*;
+import static com.oracle.jvmci.asm.sparc.SPARCAssembler.RCondition.*;
 import static com.oracle.jvmci.code.ValueUtil.*;
-import static com.oracle.graal.asm.sparc.SPARCAssembler.Annul.*;
-import static com.oracle.graal.asm.sparc.SPARCAssembler.BranchPredict.*;
-import static com.oracle.graal.asm.sparc.SPARCAssembler.CC.*;
-import static com.oracle.graal.asm.sparc.SPARCAssembler.ConditionFlag.*;
-import static com.oracle.graal.asm.sparc.SPARCAssembler.RCondition.*;
 import static com.oracle.graal.lir.LIRInstruction.OperandFlag.*;
 import static com.oracle.jvmci.common.UnsafeAccess.*;
 import static com.oracle.jvmci.sparc.SPARC.*;
@@ -41,10 +44,6 @@
 
 import java.lang.reflect.*;
 
-import com.oracle.graal.asm.*;
-import com.oracle.graal.asm.sparc.*;
-import com.oracle.graal.asm.sparc.SPARCAssembler.CC;
-import com.oracle.graal.asm.sparc.SPARCAssembler.ConditionFlag;
 import com.oracle.graal.lir.*;
 import com.oracle.graal.lir.asm.*;
 import com.oracle.graal.lir.gen.*;
--- a/graal/com.oracle.graal.lir.sparc/src/com/oracle/graal/lir/sparc/SPARCBitManipulationOp.java	Wed Jun 03 17:12:05 2015 +0200
+++ b/graal/com.oracle.graal.lir.sparc/src/com/oracle/graal/lir/sparc/SPARCBitManipulationOp.java	Wed Jun 03 18:06:44 2015 +0200
@@ -22,18 +22,18 @@
  */
 package com.oracle.graal.lir.sparc;
 
+import com.oracle.jvmci.asm.sparc.*;
 import com.oracle.jvmci.code.Register;
 import com.oracle.jvmci.meta.LIRKind;
 import com.oracle.jvmci.meta.Kind;
 import com.oracle.jvmci.meta.Value;
 import com.oracle.jvmci.meta.AllocatableValue;
 
+import static com.oracle.jvmci.asm.sparc.SPARCAssembler.*;
 import static com.oracle.jvmci.code.ValueUtil.*;
 import static com.oracle.jvmci.sparc.SPARC.*;
-import static com.oracle.graal.asm.sparc.SPARCAssembler.*;
 import static com.oracle.graal.lir.LIRInstruction.OperandFlag.*;
 
-import com.oracle.graal.asm.sparc.*;
 import com.oracle.graal.lir.*;
 import com.oracle.graal.lir.asm.*;
 import com.oracle.graal.lir.gen.*;
--- a/graal/com.oracle.graal.lir.sparc/src/com/oracle/graal/lir/sparc/SPARCBreakpointOp.java	Wed Jun 03 17:12:05 2015 +0200
+++ b/graal/com.oracle.graal.lir.sparc/src/com/oracle/graal/lir/sparc/SPARCBreakpointOp.java	Wed Jun 03 18:06:44 2015 +0200
@@ -22,10 +22,11 @@
  */
 package com.oracle.graal.lir.sparc;
 
+import com.oracle.jvmci.asm.sparc.*;
 import com.oracle.jvmci.meta.Value;
+
 import static com.oracle.graal.lir.LIRInstruction.OperandFlag.*;
 
-import com.oracle.graal.asm.sparc.*;
 import com.oracle.graal.lir.*;
 import com.oracle.graal.lir.asm.*;
 
--- a/graal/com.oracle.graal.lir.sparc/src/com/oracle/graal/lir/sparc/SPARCByteSwapOp.java	Wed Jun 03 17:12:05 2015 +0200
+++ b/graal/com.oracle.graal.lir.sparc/src/com/oracle/graal/lir/sparc/SPARCByteSwapOp.java	Wed Jun 03 18:06:44 2015 +0200
@@ -22,17 +22,18 @@
  */
 package com.oracle.graal.lir.sparc;
 
+import com.oracle.jvmci.asm.sparc.*;
+import com.oracle.jvmci.asm.sparc.SPARCAssembler.*;
 import com.oracle.jvmci.code.ValueUtil;
 import com.oracle.jvmci.code.Register;
 import com.oracle.jvmci.code.StackSlotValue;
 import com.oracle.jvmci.meta.LIRKind;
 import com.oracle.jvmci.meta.Kind;
 import com.oracle.jvmci.meta.Value;
+
 import static com.oracle.jvmci.code.ValueUtil.*;
 import static com.oracle.graal.lir.LIRInstruction.OperandFlag.*;
 
-import com.oracle.graal.asm.sparc.*;
-import com.oracle.graal.asm.sparc.SPARCAssembler.Asi;
 import com.oracle.graal.lir.*;
 import com.oracle.graal.lir.asm.*;
 import com.oracle.graal.lir.gen.*;
--- a/graal/com.oracle.graal.lir.sparc/src/com/oracle/graal/lir/sparc/SPARCCall.java	Wed Jun 03 17:12:05 2015 +0200
+++ b/graal/com.oracle.graal.lir.sparc/src/com/oracle/graal/lir/sparc/SPARCCall.java	Wed Jun 03 18:06:44 2015 +0200
@@ -22,6 +22,8 @@
  */
 package com.oracle.graal.lir.sparc;
 
+import com.oracle.jvmci.asm.sparc.*;
+import com.oracle.jvmci.asm.sparc.SPARCMacroAssembler.*;
 import com.oracle.jvmci.code.ForeignCallLinkage;
 import com.oracle.jvmci.code.Register;
 import com.oracle.jvmci.meta.ResolvedJavaMethod;
@@ -32,8 +34,6 @@
 import static com.oracle.jvmci.sparc.SPARC.*;
 import static com.oracle.graal.lir.LIRInstruction.OperandFlag.*;
 
-import com.oracle.graal.asm.sparc.*;
-import com.oracle.graal.asm.sparc.SPARCMacroAssembler.Sethix;
 import com.oracle.graal.lir.*;
 import com.oracle.graal.lir.asm.*;
 import com.oracle.jvmci.common.*;
--- a/graal/com.oracle.graal.lir.sparc/src/com/oracle/graal/lir/sparc/SPARCCompare.java	Wed Jun 03 17:12:05 2015 +0200
+++ b/graal/com.oracle.graal.lir.sparc/src/com/oracle/graal/lir/sparc/SPARCCompare.java	Wed Jun 03 18:06:44 2015 +0200
@@ -25,16 +25,17 @@
 import com.oracle.jvmci.meta.Kind;
 import com.oracle.jvmci.meta.Value;
 import com.oracle.jvmci.meta.JavaConstant;
+
+import static com.oracle.jvmci.asm.sparc.SPARCAssembler.*;
+import static com.oracle.jvmci.asm.sparc.SPARCAssembler.CC.*;
+import static com.oracle.jvmci.asm.sparc.SPARCAssembler.Opfs.*;
 import static com.oracle.jvmci.code.ValueUtil.*;
-import static com.oracle.graal.asm.sparc.SPARCAssembler.*;
-import static com.oracle.graal.asm.sparc.SPARCAssembler.CC.*;
-import static com.oracle.graal.asm.sparc.SPARCAssembler.Opfs.*;
 import static com.oracle.graal.lir.LIRInstruction.OperandFlag.*;
 
-import com.oracle.graal.asm.sparc.*;
 import com.oracle.graal.lir.*;
 import com.oracle.graal.lir.asm.*;
 import com.oracle.graal.lir.sparc.SPARCControlFlow.CompareBranchOp;
+import com.oracle.jvmci.asm.sparc.*;
 import com.oracle.jvmci.common.*;
 
 public enum SPARCCompare {
--- a/graal/com.oracle.graal.lir.sparc/src/com/oracle/graal/lir/sparc/SPARCControlFlow.java	Wed Jun 03 17:12:05 2015 +0200
+++ b/graal/com.oracle.graal.lir.sparc/src/com/oracle/graal/lir/sparc/SPARCControlFlow.java	Wed Jun 03 18:06:44 2015 +0200
@@ -22,31 +22,30 @@
  */
 package com.oracle.graal.lir.sparc;
 
+import com.oracle.jvmci.asm.*;
+import com.oracle.jvmci.asm.Assembler.*;
+import com.oracle.jvmci.asm.sparc.*;
+import com.oracle.jvmci.asm.sparc.SPARCAssembler.CC;
+import com.oracle.jvmci.asm.sparc.SPARCAssembler.ConditionFlag;
+import com.oracle.jvmci.asm.sparc.SPARCAssembler.*;
+import com.oracle.jvmci.asm.sparc.SPARCMacroAssembler.*;
 import com.oracle.jvmci.code.Register;
 import com.oracle.jvmci.meta.Kind;
 import com.oracle.jvmci.meta.Value;
 import com.oracle.jvmci.meta.JavaConstant;
 import com.oracle.jvmci.sparc.SPARC.*;
 
+import static com.oracle.jvmci.asm.sparc.SPARCAssembler.*;
+import static com.oracle.jvmci.asm.sparc.SPARCAssembler.Annul.*;
+import static com.oracle.jvmci.asm.sparc.SPARCAssembler.BranchPredict.*;
+import static com.oracle.jvmci.asm.sparc.SPARCAssembler.CC.*;
+import static com.oracle.jvmci.asm.sparc.SPARCAssembler.ConditionFlag.*;
 import static com.oracle.jvmci.code.ValueUtil.*;
 import static com.oracle.jvmci.sparc.SPARC.*;
-import static com.oracle.graal.asm.sparc.SPARCAssembler.*;
-import static com.oracle.graal.asm.sparc.SPARCAssembler.Annul.*;
-import static com.oracle.graal.asm.sparc.SPARCAssembler.BranchPredict.*;
-import static com.oracle.graal.asm.sparc.SPARCAssembler.CC.*;
-import static com.oracle.graal.asm.sparc.SPARCAssembler.ConditionFlag.*;
 import static com.oracle.graal.lir.LIRInstruction.OperandFlag.*;
 
 import java.util.*;
 
-import com.oracle.graal.asm.*;
-import com.oracle.graal.asm.Assembler.LabelHint;
-import com.oracle.graal.asm.sparc.*;
-import com.oracle.graal.asm.sparc.SPARCAssembler.BranchPredict;
-import com.oracle.graal.asm.sparc.SPARCAssembler.CC;
-import com.oracle.graal.asm.sparc.SPARCAssembler.ConditionFlag;
-import com.oracle.graal.asm.sparc.SPARCMacroAssembler.ScratchRegister;
-import com.oracle.graal.asm.sparc.SPARCMacroAssembler.Setx;
 import com.oracle.graal.compiler.common.calc.*;
 import com.oracle.graal.lir.*;
 import com.oracle.graal.lir.StandardOp.BlockEndOp;
@@ -111,7 +110,7 @@
             this.unorderedIsTrue = unorderedIsTrue;
             this.trueDestinationProbability = trueDestinationProbability;
             CC conditionCodeReg = CC.forKind(kind);
-            conditionFlag = ConditionFlag.fromCondtition(conditionCodeReg, condition, unorderedIsTrue);
+            conditionFlag = fromCondition(conditionCodeReg, condition, unorderedIsTrue);
         }
 
         @Override
@@ -437,14 +436,14 @@
                             break;
                         }
                         case Object: {
-                            conditionCode = CC.Ptrcc;
+                            conditionCode = crb.codeCache.getTarget().wordKind == Kind.Long ? CC.Xcc : CC.Icc;
                             scratchRegister = asObjectReg(scratch);
                             break;
                         }
                         default:
                             throw new JVMCIError("switch only supported for int, long and object");
                     }
-                    ConditionFlag conditionFlag = ConditionFlag.fromCondtition(conditionCode, condition, false);
+                    ConditionFlag conditionFlag = fromCondition(conditionCode, condition, false);
                     masm.cmp(keyRegister, scratchRegister);
                     masm.bpcc(conditionFlag, NOT_ANNUL, target, conditionCode, PREDICT_TAKEN);
                     masm.nop();  // delay slot
@@ -606,4 +605,54 @@
                 throw JVMCIError.shouldNotReachHere();
         }
     }
+
+    public static ConditionFlag fromCondition(CC conditionFlagsRegister, Condition cond, boolean unorderedIsTrue) {
+        switch (conditionFlagsRegister) {
+            case Xcc:
+            case Icc:
+                switch (cond) {
+                    case EQ:
+                        return Equal;
+                    case NE:
+                        return NotEqual;
+                    case BT:
+                        return LessUnsigned;
+                    case LT:
+                        return Less;
+                    case BE:
+                        return LessEqualUnsigned;
+                    case LE:
+                        return LessEqual;
+                    case AE:
+                        return GreaterEqualUnsigned;
+                    case GE:
+                        return GreaterEqual;
+                    case AT:
+                        return GreaterUnsigned;
+                    case GT:
+                        return Greater;
+                }
+                throw JVMCIError.shouldNotReachHere("Unimplemented for: " + cond);
+            case Fcc0:
+            case Fcc1:
+            case Fcc2:
+            case Fcc3:
+                switch (cond) {
+                    case EQ:
+                        return unorderedIsTrue ? F_UnorderedOrEqual : F_Equal;
+                    case NE:
+                        return ConditionFlag.F_NotEqual;
+                    case LT:
+                        return unorderedIsTrue ? F_UnorderedOrLess : F_Less;
+                    case LE:
+                        return unorderedIsTrue ? F_UnorderedOrLessOrEqual : F_LessOrEqual;
+                    case GE:
+                        return unorderedIsTrue ? F_UnorderedGreaterOrEqual : F_GreaterOrEqual;
+                    case GT:
+                        return unorderedIsTrue ? F_UnorderedOrGreater : F_Greater;
+                }
+                throw JVMCIError.shouldNotReachHere("Unkown condition: " + cond);
+        }
+        throw JVMCIError.shouldNotReachHere("Unknown condition flag register " + conditionFlagsRegister);
+    }
 }
--- a/graal/com.oracle.graal.lir.sparc/src/com/oracle/graal/lir/sparc/SPARCDelayedControlTransfer.java	Wed Jun 03 17:12:05 2015 +0200
+++ b/graal/com.oracle.graal.lir.sparc/src/com/oracle/graal/lir/sparc/SPARCDelayedControlTransfer.java	Wed Jun 03 18:06:44 2015 +0200
@@ -22,9 +22,9 @@
  */
 package com.oracle.graal.lir.sparc;
 
-import com.oracle.graal.asm.sparc.*;
 import com.oracle.graal.lir.*;
 import com.oracle.graal.lir.asm.*;
+import com.oracle.jvmci.asm.sparc.*;
 
 /**
  * This interface is used for {@link LIRInstruction}s which provide a delay slot for one instruction
--- a/graal/com.oracle.graal.lir.sparc/src/com/oracle/graal/lir/sparc/SPARCFrameMap.java	Wed Jun 03 17:12:05 2015 +0200
+++ b/graal/com.oracle.graal.lir.sparc/src/com/oracle/graal/lir/sparc/SPARCFrameMap.java	Wed Jun 03 18:06:44 2015 +0200
@@ -22,13 +22,13 @@
  */
 package com.oracle.graal.lir.sparc;
 
+import com.oracle.jvmci.asm.*;
 import com.oracle.jvmci.code.RegisterConfig;
 import com.oracle.jvmci.code.CodeCacheProvider;
 import com.oracle.jvmci.code.StackSlot;
 import com.oracle.jvmci.meta.LIRKind;
 import com.oracle.jvmci.meta.Kind;
 import com.oracle.jvmci.sparc.*;
-import com.oracle.graal.asm.*;
 import com.oracle.graal.lir.framemap.*;
 
 /**
--- a/graal/com.oracle.graal.lir.sparc/src/com/oracle/graal/lir/sparc/SPARCJumpOp.java	Wed Jun 03 17:12:05 2015 +0200
+++ b/graal/com.oracle.graal.lir.sparc/src/com/oracle/graal/lir/sparc/SPARCJumpOp.java	Wed Jun 03 18:06:44 2015 +0200
@@ -22,13 +22,13 @@
  */
 package com.oracle.graal.lir.sparc;
 
-import static com.oracle.graal.asm.sparc.SPARCAssembler.Annul.*;
+import static com.oracle.jvmci.asm.sparc.SPARCAssembler.Annul.*;
 
-import com.oracle.graal.asm.sparc.SPARCAssembler.ConditionFlag;
-import com.oracle.graal.asm.sparc.*;
 import com.oracle.graal.lir.*;
 import com.oracle.graal.lir.StandardOp.JumpOp;
 import com.oracle.graal.lir.asm.*;
+import com.oracle.jvmci.asm.sparc.*;
+import com.oracle.jvmci.asm.sparc.SPARCAssembler.*;
 
 public final class SPARCJumpOp extends JumpOp implements SPARCDelayedControlTransfer {
     public static final LIRInstructionClass<SPARCJumpOp> TYPE = LIRInstructionClass.create(SPARCJumpOp.class);
--- a/graal/com.oracle.graal.lir.sparc/src/com/oracle/graal/lir/sparc/SPARCLIRInstruction.java	Wed Jun 03 17:12:05 2015 +0200
+++ b/graal/com.oracle.graal.lir.sparc/src/com/oracle/graal/lir/sparc/SPARCLIRInstruction.java	Wed Jun 03 18:06:44 2015 +0200
@@ -22,9 +22,9 @@
  */
 package com.oracle.graal.lir.sparc;
 
-import com.oracle.graal.asm.sparc.*;
 import com.oracle.graal.lir.*;
 import com.oracle.graal.lir.asm.*;
+import com.oracle.jvmci.asm.sparc.*;
 
 /**
  * Convenience class to provide SPARCMacroAssembler for the {@link #emitCode} method.
--- a/graal/com.oracle.graal.lir.sparc/src/com/oracle/graal/lir/sparc/SPARCMathIntrinsicOp.java	Wed Jun 03 17:12:05 2015 +0200
+++ b/graal/com.oracle.graal.lir.sparc/src/com/oracle/graal/lir/sparc/SPARCMathIntrinsicOp.java	Wed Jun 03 18:06:44 2015 +0200
@@ -24,11 +24,12 @@
 
 import com.oracle.jvmci.meta.Kind;
 import com.oracle.jvmci.meta.Value;
+
 import static com.oracle.jvmci.code.ValueUtil.*;
 
-import com.oracle.graal.asm.sparc.*;
 import com.oracle.graal.lir.*;
 import com.oracle.graal.lir.asm.*;
+import com.oracle.jvmci.asm.sparc.*;
 import com.oracle.jvmci.common.*;
 
 public final class SPARCMathIntrinsicOp extends SPARCLIRInstruction implements SPARCTailDelayedLIRInstruction {
--- a/graal/com.oracle.graal.lir.sparc/src/com/oracle/graal/lir/sparc/SPARCMove.java	Wed Jun 03 17:12:05 2015 +0200
+++ b/graal/com.oracle.graal.lir.sparc/src/com/oracle/graal/lir/sparc/SPARCMove.java	Wed Jun 03 18:06:44 2015 +0200
@@ -22,6 +22,8 @@
  */
 package com.oracle.graal.lir.sparc;
 
+import com.oracle.jvmci.asm.sparc.*;
+import com.oracle.jvmci.asm.sparc.SPARCMacroAssembler.*;
 import com.oracle.jvmci.code.Register;
 import com.oracle.jvmci.code.StackSlotValue;
 import com.oracle.jvmci.code.StackSlot;
@@ -34,15 +36,12 @@
 import com.oracle.jvmci.sparc.*;
 import com.oracle.jvmci.sparc.SPARC.*;
 
+import static com.oracle.jvmci.asm.sparc.SPARCAssembler.*;
 import static com.oracle.jvmci.code.ValueUtil.*;
 import static com.oracle.jvmci.meta.Kind.*;
 import static com.oracle.jvmci.sparc.SPARC.*;
-import static com.oracle.graal.asm.sparc.SPARCAssembler.*;
 import static com.oracle.graal.lir.LIRInstruction.OperandFlag.*;
 
-import com.oracle.graal.asm.sparc.*;
-import com.oracle.graal.asm.sparc.SPARCMacroAssembler.ScratchRegister;
-import com.oracle.graal.asm.sparc.SPARCMacroAssembler.Setx;
 import com.oracle.graal.lir.*;
 import com.oracle.graal.lir.StandardOp.ImplicitNullCheck;
 import com.oracle.graal.lir.StandardOp.MoveOp;
--- a/graal/com.oracle.graal.lir.sparc/src/com/oracle/graal/lir/sparc/SPARCSaveRegistersOp.java	Wed Jun 03 17:12:05 2015 +0200
+++ b/graal/com.oracle.graal.lir.sparc/src/com/oracle/graal/lir/sparc/SPARCSaveRegistersOp.java	Wed Jun 03 18:06:44 2015 +0200
@@ -22,6 +22,7 @@
  */
 package com.oracle.graal.lir.sparc;
 
+import com.oracle.jvmci.asm.sparc.*;
 import com.oracle.jvmci.code.ValueUtil;
 import com.oracle.jvmci.code.RegisterSaveLayout;
 import com.oracle.jvmci.code.Register;
@@ -35,7 +36,6 @@
 
 import java.util.*;
 
-import com.oracle.graal.asm.sparc.*;
 import com.oracle.graal.lir.*;
 import com.oracle.graal.lir.StandardOp.SaveRegistersOp;
 import com.oracle.graal.lir.asm.*;
--- a/graal/com.oracle.graal.lir.sparc/src/com/oracle/graal/lir/sparc/SPARCTailDelayedLIRInstruction.java	Wed Jun 03 17:12:05 2015 +0200
+++ b/graal/com.oracle.graal.lir.sparc/src/com/oracle/graal/lir/sparc/SPARCTailDelayedLIRInstruction.java	Wed Jun 03 18:06:44 2015 +0200
@@ -23,6 +23,7 @@
 package com.oracle.graal.lir.sparc;
 
 import com.oracle.graal.lir.*;
+import com.oracle.jvmci.asm.sparc.*;
 
 /**
  * Implementors of this interface are able to place its last instruction into the delay slot of a
@@ -33,14 +34,14 @@
  * <ol>
  * <li>Emit everything up to the second last instruction.</li>
  * <li>Call
- * {@link SPARCDelayedControlTransfer#emitControlTransfer(com.oracle.graal.lir.asm.CompilationResultBuilder, com.oracle.graal.asm.sparc.SPARCMacroAssembler)}
+ * {@link SPARCDelayedControlTransfer#emitControlTransfer(com.oracle.graal.lir.asm.CompilationResultBuilder, SPARCMacroAssembler)}
  * to let the DelayedControlTransfer instruction emit its own code (But must not stuff the delay
  * slot with Nop)</li>
  * <li>emit the last instruction for this {@link LIRInstruction}</li>
  * </ol>
  *
  * Note: If this instruction decides not to use the delay slot, it can skip the call of
- * {@link SPARCDelayedControlTransfer#emitControlTransfer(com.oracle.graal.lir.asm.CompilationResultBuilder, com.oracle.graal.asm.sparc.SPARCMacroAssembler)}
+ * {@link SPARCDelayedControlTransfer#emitControlTransfer(com.oracle.graal.lir.asm.CompilationResultBuilder, SPARCMacroAssembler)}
  * . The DelayedControlTransfer instruction will emit the code just with Nop in the delay slot.
  */
 public interface SPARCTailDelayedLIRInstruction {
--- a/graal/com.oracle.graal.lir.sparc/src/com/oracle/graal/lir/sparc/SPARCTestOp.java	Wed Jun 03 17:12:05 2015 +0200
+++ b/graal/com.oracle.graal.lir.sparc/src/com/oracle/graal/lir/sparc/SPARCTestOp.java	Wed Jun 03 18:06:44 2015 +0200
@@ -28,9 +28,9 @@
 import static com.oracle.jvmci.sparc.SPARC.*;
 import static com.oracle.graal.lir.LIRInstruction.OperandFlag.*;
 
-import com.oracle.graal.asm.sparc.*;
 import com.oracle.graal.lir.*;
 import com.oracle.graal.lir.asm.*;
+import com.oracle.jvmci.asm.sparc.*;
 import com.oracle.jvmci.common.*;
 
 public class SPARCTestOp extends SPARCLIRInstruction {
--- a/graal/com.oracle.graal.lir/src/com/oracle/graal/lir/LabelRef.java	Wed Jun 03 17:12:05 2015 +0200
+++ b/graal/com.oracle.graal.lir/src/com/oracle/graal/lir/LabelRef.java	Wed Jun 03 18:06:44 2015 +0200
@@ -22,10 +22,10 @@
  */
 package com.oracle.graal.lir;
 
-import com.oracle.graal.asm.*;
 import com.oracle.graal.compiler.common.cfg.*;
 import com.oracle.graal.lir.StandardOp.BranchOp;
 import com.oracle.graal.lir.StandardOp.JumpOp;
+import com.oracle.jvmci.asm.*;
 
 /**
  * LIR instructions such as {@link JumpOp} and {@link BranchOp} need to reference their target
--- a/graal/com.oracle.graal.lir/src/com/oracle/graal/lir/StandardOp.java	Wed Jun 03 17:12:05 2015 +0200
+++ b/graal/com.oracle.graal.lir/src/com/oracle/graal/lir/StandardOp.java	Wed Jun 03 18:06:44 2015 +0200
@@ -27,10 +27,10 @@
 
 import java.util.*;
 
-import com.oracle.graal.asm.*;
 import com.oracle.graal.compiler.common.cfg.*;
 import com.oracle.graal.lir.asm.*;
 import com.oracle.graal.lir.framemap.*;
+import com.oracle.jvmci.asm.*;
 import com.oracle.jvmci.code.*;
 import com.oracle.jvmci.common.*;
 import com.oracle.jvmci.meta.*;
--- a/graal/com.oracle.graal.lir/src/com/oracle/graal/lir/SwitchStrategy.java	Wed Jun 03 17:12:05 2015 +0200
+++ b/graal/com.oracle.graal.lir/src/com/oracle/graal/lir/SwitchStrategy.java	Wed Jun 03 18:06:44 2015 +0200
@@ -24,9 +24,9 @@
 
 import java.util.*;
 
-import com.oracle.graal.asm.*;
 import com.oracle.graal.compiler.common.calc.*;
 import com.oracle.graal.lir.asm.*;
+import com.oracle.jvmci.asm.*;
 import com.oracle.jvmci.meta.*;
 
 /**
--- a/graal/com.oracle.graal.lir/src/com/oracle/graal/lir/asm/CompilationResultBuilder.java	Wed Jun 03 17:12:05 2015 +0200
+++ b/graal/com.oracle.graal.lir/src/com/oracle/graal/lir/asm/CompilationResultBuilder.java	Wed Jun 03 18:06:44 2015 +0200
@@ -22,6 +22,7 @@
  */
 package com.oracle.graal.lir.asm;
 
+import com.oracle.jvmci.asm.*;
 import com.oracle.jvmci.code.CompilationResult;
 import com.oracle.jvmci.code.StackSlot;
 import com.oracle.jvmci.code.TargetDescription;
@@ -36,6 +37,7 @@
 import com.oracle.jvmci.meta.JavaConstant;
 import com.oracle.jvmci.meta.Value;
 import com.oracle.jvmci.meta.InvokeTarget;
+
 import static com.oracle.jvmci.code.ValueUtil.*;
 
 import java.util.*;
@@ -44,7 +46,6 @@
 import com.oracle.jvmci.code.CompilationResult.DataSectionReference;
 import com.oracle.jvmci.code.DataSection.Data;
 import com.oracle.jvmci.code.DataSection.DataBuilder;
-import com.oracle.graal.asm.*;
 import com.oracle.graal.compiler.common.cfg.*;
 import com.oracle.graal.lir.*;
 import com.oracle.graal.lir.framemap.*;
--- a/graal/com.oracle.graal.lir/src/com/oracle/graal/lir/asm/CompilationResultBuilderFactory.java	Wed Jun 03 17:12:05 2015 +0200
+++ b/graal/com.oracle.graal.lir/src/com/oracle/graal/lir/asm/CompilationResultBuilderFactory.java	Wed Jun 03 18:06:44 2015 +0200
@@ -22,10 +22,10 @@
  */
 package com.oracle.graal.lir.asm;
 
+import com.oracle.jvmci.asm.*;
 import com.oracle.jvmci.code.CompilationResult;
 import com.oracle.jvmci.code.CodeCacheProvider;
 import com.oracle.jvmci.code.ForeignCallsProvider;
-import com.oracle.graal.asm.*;
 import com.oracle.graal.lir.framemap.*;
 
 /**
--- a/graal/com.oracle.graal.lir/src/com/oracle/graal/lir/framemap/FrameMap.java	Wed Jun 03 17:12:05 2015 +0200
+++ b/graal/com.oracle.graal.lir/src/com/oracle/graal/lir/framemap/FrameMap.java	Wed Jun 03 18:06:44 2015 +0200
@@ -26,7 +26,7 @@
 
 import java.util.*;
 
-import com.oracle.graal.asm.*;
+import com.oracle.jvmci.asm.*;
 import com.oracle.jvmci.code.*;
 import com.oracle.jvmci.meta.*;
 
--- a/graal/com.oracle.graal.lir/src/com/oracle/graal/lir/gen/LIRGenerator.java	Wed Jun 03 17:12:05 2015 +0200
+++ b/graal/com.oracle.graal.lir/src/com/oracle/graal/lir/gen/LIRGenerator.java	Wed Jun 03 18:06:44 2015 +0200
@@ -22,6 +22,7 @@
  */
 package com.oracle.graal.lir.gen;
 
+import com.oracle.jvmci.asm.*;
 import com.oracle.jvmci.code.ForeignCallLinkage;
 import com.oracle.jvmci.code.CallingConvention;
 import com.oracle.jvmci.code.RegisterAttributes;
@@ -38,12 +39,12 @@
 import com.oracle.jvmci.meta.LIRKind;
 import com.oracle.jvmci.meta.AllocatableValue;
 import com.oracle.jvmci.meta.MetaAccessProvider;
+
 import static com.oracle.jvmci.code.ValueUtil.*;
 import static com.oracle.graal.lir.LIRValueUtil.*;
 
 import java.util.*;
 
-import com.oracle.graal.asm.*;
 import com.oracle.graal.compiler.common.calc.*;
 import com.oracle.graal.compiler.common.cfg.*;
 import com.oracle.graal.compiler.common.spi.*;
--- a/graal/com.oracle.graal.replacements/src/com/oracle/graal/replacements/DefaultJavaLoweringProvider.java	Wed Jun 03 17:12:05 2015 +0200
+++ b/graal/com.oracle.graal.replacements/src/com/oracle/graal/replacements/DefaultJavaLoweringProvider.java	Wed Jun 03 18:06:44 2015 +0200
@@ -31,7 +31,6 @@
 import java.util.*;
 
 import com.oracle.graal.api.replacements.*;
-import com.oracle.graal.asm.*;
 import com.oracle.graal.compiler.common.type.*;
 import com.oracle.graal.graph.*;
 import com.oracle.graal.nodes.*;
@@ -46,6 +45,7 @@
 import com.oracle.graal.nodes.util.*;
 import com.oracle.graal.nodes.virtual.*;
 import com.oracle.graal.phases.util.*;
+import com.oracle.jvmci.asm.*;
 import com.oracle.jvmci.code.*;
 import com.oracle.jvmci.common.*;
 import com.oracle.jvmci.meta.*;
--- a/graal/com.oracle.graal.truffle.hotspot.amd64/src/com/oracle/graal/truffle/hotspot/amd64/AMD64OptimizedCallTargetInstrumentationFactory.java	Wed Jun 03 17:12:05 2015 +0200
+++ b/graal/com.oracle.graal.truffle.hotspot.amd64/src/com/oracle/graal/truffle/hotspot/amd64/AMD64OptimizedCallTargetInstrumentationFactory.java	Wed Jun 03 18:06:44 2015 +0200
@@ -22,15 +22,15 @@
  */
 package com.oracle.graal.truffle.hotspot.amd64;
 
-import com.oracle.graal.asm.*;
-import com.oracle.graal.asm.amd64.*;
-import com.oracle.graal.asm.amd64.AMD64Assembler.ConditionFlag;
 import com.oracle.graal.hotspot.meta.*;
 import com.oracle.graal.lir.asm.*;
 import com.oracle.graal.lir.framemap.*;
 import com.oracle.graal.truffle.*;
 import com.oracle.graal.truffle.hotspot.*;
 import com.oracle.jvmci.amd64.*;
+import com.oracle.jvmci.asm.*;
+import com.oracle.jvmci.asm.amd64.*;
+import com.oracle.jvmci.asm.amd64.AMD64Assembler.*;
 import com.oracle.jvmci.code.CallingConvention.Type;
 import com.oracle.jvmci.code.*;
 import com.oracle.jvmci.hotspot.*;
--- a/graal/com.oracle.graal.truffle.hotspot.sparc/src/com/oracle/graal/truffle/hotspot/sparc/SPARCOptimizedCallTargetInstumentationFactory.java	Wed Jun 03 17:12:05 2015 +0200
+++ b/graal/com.oracle.graal.truffle.hotspot.sparc/src/com/oracle/graal/truffle/hotspot/sparc/SPARCOptimizedCallTargetInstumentationFactory.java	Wed Jun 03 18:06:44 2015 +0200
@@ -22,22 +22,22 @@
  */
 package com.oracle.graal.truffle.hotspot.sparc;
 
-import static com.oracle.graal.asm.sparc.SPARCAssembler.Annul.*;
-import static com.oracle.graal.asm.sparc.SPARCAssembler.BranchPredict.*;
-import static com.oracle.graal.asm.sparc.SPARCAssembler.CC.*;
-import static com.oracle.graal.asm.sparc.SPARCAssembler.ConditionFlag.*;
+import static com.oracle.jvmci.asm.sparc.SPARCAssembler.Annul.*;
+import static com.oracle.jvmci.asm.sparc.SPARCAssembler.BranchPredict.*;
+import static com.oracle.jvmci.asm.sparc.SPARCAssembler.CC.*;
+import static com.oracle.jvmci.asm.sparc.SPARCAssembler.ConditionFlag.*;
 import static com.oracle.jvmci.code.CallingConvention.Type.*;
 import static com.oracle.jvmci.meta.Kind.*;
 import static com.oracle.jvmci.sparc.SPARC.CPUFeature.*;
 
-import com.oracle.graal.asm.*;
-import com.oracle.graal.asm.sparc.*;
-import com.oracle.graal.asm.sparc.SPARCMacroAssembler.ScratchRegister;
 import com.oracle.graal.hotspot.meta.*;
 import com.oracle.graal.lir.asm.*;
 import com.oracle.graal.lir.framemap.*;
 import com.oracle.graal.truffle.*;
 import com.oracle.graal.truffle.hotspot.*;
+import com.oracle.jvmci.asm.*;
+import com.oracle.jvmci.asm.sparc.*;
+import com.oracle.jvmci.asm.sparc.SPARCMacroAssembler.*;
 import com.oracle.jvmci.code.*;
 import com.oracle.jvmci.hotspot.*;
 import com.oracle.jvmci.meta.*;
--- a/graal/com.oracle.graal.truffle.hotspot/src/com/oracle/graal/truffle/hotspot/OptimizedCallTargetInstrumentation.java	Wed Jun 03 17:12:05 2015 +0200
+++ b/graal/com.oracle.graal.truffle.hotspot/src/com/oracle/graal/truffle/hotspot/OptimizedCallTargetInstrumentation.java	Wed Jun 03 18:06:44 2015 +0200
@@ -22,13 +22,14 @@
  */
 package com.oracle.graal.truffle.hotspot;
 
+import com.oracle.jvmci.asm.*;
 import com.oracle.jvmci.code.CodeCacheProvider;
 import com.oracle.jvmci.code.CompilationResult;
 import com.oracle.jvmci.code.ForeignCallsProvider;
+
 import java.lang.reflect.*;
 
 import com.oracle.jvmci.code.CompilationResult.Mark;
-import com.oracle.graal.asm.*;
 import com.oracle.graal.hotspot.*;
 import com.oracle.graal.hotspot.meta.*;
 import com.oracle.graal.lir.asm.*;
--- /dev/null	Thu Jan 01 00:00:00 1970 +0000
+++ b/graal/com.oracle.jvmci.asm.amd64/src/com/oracle/jvmci/asm/amd64/AMD64Address.java	Wed Jun 03 18:06:44 2015 +0200
@@ -0,0 +1,169 @@
+/*
+ * Copyright (c) 2010, 2013, Oracle and/or its affiliates. All rights reserved.
+ * DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER.
+ *
+ * This code is free software; you can redistribute it and/or modify it
+ * under the terms of the GNU General Public License version 2 only, as
+ * published by the Free Software Foundation.
+ *
+ * This code is distributed in the hope that it will be useful, but WITHOUT
+ * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
+ * FITNESS FOR A PARTICULAR PURPOSE.  See the GNU General Public License
+ * version 2 for more details (a copy is included in the LICENSE file that
+ * accompanied this code).
+ *
+ * You should have received a copy of the GNU General Public License version
+ * 2 along with this work; if not, write to the Free Software Foundation,
+ * Inc., 51 Franklin St, Fifth Floor, Boston, MA 02110-1301 USA.
+ *
+ * Please contact Oracle, 500 Oracle Parkway, Redwood Shores, CA 94065 USA
+ * or visit www.oracle.com if you need additional information or have any
+ * questions.
+ */
+package com.oracle.jvmci.asm.amd64;
+
+import com.oracle.jvmci.code.Register;
+import com.oracle.jvmci.code.AbstractAddress;
+
+/**
+ * Represents an address in target machine memory, specified via some combination of a base
+ * register, an index register, a displacement and a scale. Note that the base and index registers
+ * may be a variable that will get a register assigned later by the register allocator.
+ */
+public final class AMD64Address extends AbstractAddress {
+
+    private final Register base;
+    private final Register index;
+    private final Scale scale;
+    private final int displacement;
+
+    /**
+     * Creates an {@link AMD64Address} with given base register, no scaling and no displacement.
+     *
+     * @param base the base register
+     */
+    public AMD64Address(Register base) {
+        this(base, Register.None, Scale.Times1, 0);
+    }
+
+    /**
+     * Creates an {@link AMD64Address} with given base register, no scaling and a given
+     * displacement.
+     *
+     * @param base the base register
+     * @param displacement the displacement
+     */
+    public AMD64Address(Register base, int displacement) {
+        this(base, Register.None, Scale.Times1, displacement);
+    }
+
+    /**
+     * Creates an {@link AMD64Address} with given base and index registers, scaling and
+     * displacement. This is the most general constructor.
+     *
+     * @param base the base register
+     * @param index the index register
+     * @param scale the scaling factor
+     * @param displacement the displacement
+     */
+    public AMD64Address(Register base, Register index, Scale scale, int displacement) {
+        this.base = base;
+        this.index = index;
+        this.scale = scale;
+        this.displacement = displacement;
+
+        assert scale != null;
+    }
+
+    /**
+     * A scaling factor used in the SIB addressing mode.
+     */
+    public enum Scale {
+        Times1(1, 0),
+        Times2(2, 1),
+        Times4(4, 2),
+        Times8(8, 3);
+
+        private Scale(int value, int log2) {
+            this.value = value;
+            this.log2 = log2;
+        }
+
+        /**
+         * The value (or multiplier) of this scale.
+         */
+        public final int value;
+
+        /**
+         * The {@linkplain #value value} of this scale log 2.
+         */
+        public final int log2;
+
+        public static Scale fromInt(int scale) {
+            switch (scale) {
+                case 1:
+                    return Times1;
+                case 2:
+                    return Times2;
+                case 4:
+                    return Times4;
+                case 8:
+                    return Times8;
+                default:
+                    return null;
+            }
+        }
+    }
+
+    @Override
+    public String toString() {
+        StringBuilder s = new StringBuilder();
+        s.append("[");
+        String sep = "";
+        if (!getBase().equals(Register.None)) {
+            s.append(getBase());
+            sep = " + ";
+        }
+        if (!getIndex().equals(Register.None)) {
+            s.append(sep).append(getIndex()).append(" * ").append(getScale().value);
+            sep = " + ";
+        }
+        if (getDisplacement() < 0) {
+            s.append(" - ").append(-getDisplacement());
+        } else if (getDisplacement() > 0) {
+            s.append(sep).append(getDisplacement());
+        }
+        s.append("]");
+        return s.toString();
+    }
+
+    /**
+     * @return Base register that defines the start of the address computation. If not present, is
+     *         denoted by {@link Register#None}.
+     */
+    public Register getBase() {
+        return base;
+    }
+
+    /**
+     * @return Index register, the value of which (possibly scaled by {@link #getScale}) is added to
+     *         {@link #getBase}. If not present, is denoted by {@link Register#None}.
+     */
+    public Register getIndex() {
+        return index;
+    }
+
+    /**
+     * @return Scaling factor for indexing, dependent on target operand size.
+     */
+    public Scale getScale() {
+        return scale;
+    }
+
+    /**
+     * @return Optional additive displacement.
+     */
+    public int getDisplacement() {
+        return displacement;
+    }
+}
--- /dev/null	Thu Jan 01 00:00:00 1970 +0000
+++ b/graal/com.oracle.jvmci.asm.amd64/src/com/oracle/jvmci/asm/amd64/AMD64AsmOptions.java	Wed Jun 03 18:06:44 2015 +0200
@@ -0,0 +1,31 @@
+/*
+ * Copyright (c) 2012, Oracle and/or its affiliates. All rights reserved.
+ * DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER.
+ *
+ * This code is free software; you can redistribute it and/or modify it
+ * under the terms of the GNU General Public License version 2 only, as
+ * published by the Free Software Foundation.
+ *
+ * This code is distributed in the hope that it will be useful, but WITHOUT
+ * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
+ * FITNESS FOR A PARTICULAR PURPOSE.  See the GNU General Public License
+ * version 2 for more details (a copy is included in the LICENSE file that
+ * accompanied this code).
+ *
+ * You should have received a copy of the GNU General Public License version
+ * 2 along with this work; if not, write to the Free Software Foundation,
+ * Inc., 51 Franklin St, Fifth Floor, Boston, MA 02110-1301 USA.
+ *
+ * Please contact Oracle, 500 Oracle Parkway, Redwood Shores, CA 94065 USA
+ * or visit www.oracle.com if you need additional information or have any
+ * questions.
+ */
+package com.oracle.jvmci.asm.amd64;
+
+public class AMD64AsmOptions {
+    public static final boolean UseNormalNop = false;
+    public static final boolean UseAddressNop = true;
+    public static final boolean UseIncDec = true;
+    public static final boolean UseXmmLoadAndClearUpper = true;
+    public static final boolean UseXmmRegToRegMoveAll = true;
+}
--- /dev/null	Thu Jan 01 00:00:00 1970 +0000
+++ b/graal/com.oracle.jvmci.asm.amd64/src/com/oracle/jvmci/asm/amd64/AMD64Assembler.java	Wed Jun 03 18:06:44 2015 +0200
@@ -0,0 +1,2445 @@
+/*
+ * Copyright (c) 2009, 2015, Oracle and/or its affiliates. All rights reserved.
+ * DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER.
+ *
+ * This code is free software; you can redistribute it and/or modify it
+ * under the terms of the GNU General Public License version 2 only, as
+ * published by the Free Software Foundation.
+ *
+ * This code is distributed in the hope that it will be useful, but WITHOUT
+ * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
+ * FITNESS FOR A PARTICULAR PURPOSE.  See the GNU General Public License
+ * version 2 for more details (a copy is included in the LICENSE file that
+ * accompanied this code).
+ *
+ * You should have received a copy of the GNU General Public License version
+ * 2 along with this work; if not, write to the Free Software Foundation,
+ * Inc., 51 Franklin St, Fifth Floor, Boston, MA 02110-1301 USA.
+ *
+ * Please contact Oracle, 500 Oracle Parkway, Redwood Shores, CA 94065 USA
+ * or visit www.oracle.com if you need additional information or have any
+ * questions.
+ */
+package com.oracle.jvmci.asm.amd64;
+
+import com.oracle.jvmci.amd64.*;
+import com.oracle.jvmci.amd64.AMD64.*;
+import com.oracle.jvmci.asm.*;
+import com.oracle.jvmci.code.Register;
+import com.oracle.jvmci.code.TargetDescription;
+import com.oracle.jvmci.code.RegisterConfig;
+
+import static com.oracle.jvmci.amd64.AMD64.*;
+import static com.oracle.jvmci.asm.NumUtil.*;
+import static com.oracle.jvmci.asm.amd64.AMD64AsmOptions.*;
+import static com.oracle.jvmci.asm.amd64.AMD64Assembler.AMD64BinaryArithmetic.*;
+import static com.oracle.jvmci.asm.amd64.AMD64Assembler.AMD64MOp.*;
+import static com.oracle.jvmci.asm.amd64.AMD64Assembler.OperandSize.*;
+import static com.oracle.jvmci.code.MemoryBarriers.*;
+
+import com.oracle.jvmci.code.Register.RegisterCategory;
+
+/**
+ * This class implements an assembler that can encode most X86 instructions.
+ */
+public class AMD64Assembler extends Assembler {
+
+    private static final int MinEncodingNeedsRex = 8;
+
+    /**
+     * A sentinel value used as a place holder in an instruction stream for an address that will be
+     * patched.
+     */
+    private static final AMD64Address Placeholder = new AMD64Address(rip);
+
+    /**
+     * The x86 condition codes used for conditional jumps/moves.
+     */
+    public enum ConditionFlag {
+        Zero(0x4, "|zero|"),
+        NotZero(0x5, "|nzero|"),
+        Equal(0x4, "="),
+        NotEqual(0x5, "!="),
+        Less(0xc, "<"),
+        LessEqual(0xe, "<="),
+        Greater(0xf, ">"),
+        GreaterEqual(0xd, ">="),
+        Below(0x2, "|<|"),
+        BelowEqual(0x6, "|<=|"),
+        Above(0x7, "|>|"),
+        AboveEqual(0x3, "|>=|"),
+        Overflow(0x0, "|of|"),
+        NoOverflow(0x1, "|nof|"),
+        CarrySet(0x2, "|carry|"),
+        CarryClear(0x3, "|ncarry|"),
+        Negative(0x8, "|neg|"),
+        Positive(0x9, "|pos|"),
+        Parity(0xa, "|par|"),
+        NoParity(0xb, "|npar|");
+
+        private final int value;
+        private final String operator;
+
+        private ConditionFlag(int value, String operator) {
+            this.value = value;
+            this.operator = operator;
+        }
+
+        public ConditionFlag negate() {
+            switch (this) {
+                case Zero:
+                    return NotZero;
+                case NotZero:
+                    return Zero;
+                case Equal:
+                    return NotEqual;
+                case NotEqual:
+                    return Equal;
+                case Less:
+                    return GreaterEqual;
+                case LessEqual:
+                    return Greater;
+                case Greater:
+                    return LessEqual;
+                case GreaterEqual:
+                    return Less;
+                case Below:
+                    return AboveEqual;
+                case BelowEqual:
+                    return Above;
+                case Above:
+                    return BelowEqual;
+                case AboveEqual:
+                    return Below;
+                case Overflow:
+                    return NoOverflow;
+                case NoOverflow:
+                    return Overflow;
+                case CarrySet:
+                    return CarryClear;
+                case CarryClear:
+                    return CarrySet;
+                case Negative:
+                    return Positive;
+                case Positive:
+                    return Negative;
+                case Parity:
+                    return NoParity;
+                case NoParity:
+                    return Parity;
+            }
+            throw new IllegalArgumentException();
+        }
+
+        public int getValue() {
+            return value;
+        }
+
+        @Override
+        public String toString() {
+            return operator;
+        }
+    }
+
+    /**
+     * Constants for X86 prefix bytes.
+     */
+    private static class Prefix {
+
+        private static final int REX = 0x40;
+        private static final int REXB = 0x41;
+        private static final int REXX = 0x42;
+        private static final int REXXB = 0x43;
+        private static final int REXR = 0x44;
+        private static final int REXRB = 0x45;
+        private static final int REXRX = 0x46;
+        private static final int REXRXB = 0x47;
+        private static final int REXW = 0x48;
+        private static final int REXWB = 0x49;
+        private static final int REXWX = 0x4A;
+        private static final int REXWXB = 0x4B;
+        private static final int REXWR = 0x4C;
+        private static final int REXWRB = 0x4D;
+        private static final int REXWRX = 0x4E;
+        private static final int REXWRXB = 0x4F;
+    }
+
+    /**
+     * The x86 operand sizes.
+     */
+    public static enum OperandSize {
+        BYTE(1) {
+            @Override
+            protected void emitImmediate(AMD64Assembler asm, int imm) {
+                assert imm == (byte) imm;
+                asm.emitByte(imm);
+            }
+        },
+
+        WORD(2, 0x66) {
+            @Override
+            protected void emitImmediate(AMD64Assembler asm, int imm) {
+                assert imm == (short) imm;
+                asm.emitShort(imm);
+            }
+        },
+
+        DWORD(4) {
+            @Override
+            protected void emitImmediate(AMD64Assembler asm, int imm) {
+                asm.emitInt(imm);
+            }
+        },
+
+        QWORD(8) {
+            @Override
+            protected void emitImmediate(AMD64Assembler asm, int imm) {
+                asm.emitInt(imm);
+            }
+        },
+
+        SS(4, 0xF3, true),
+
+        SD(8, 0xF2, true),
+
+        PS(16, true),
+
+        PD(16, 0x66, true);
+
+        private final int sizePrefix;
+
+        private final int bytes;
+        private final boolean xmm;
+
+        private OperandSize(int bytes) {
+            this(bytes, 0);
+        }
+
+        private OperandSize(int bytes, int sizePrefix) {
+            this(bytes, sizePrefix, false);
+        }
+
+        private OperandSize(int bytes, boolean xmm) {
+            this(bytes, 0, xmm);
+        }
+
+        private OperandSize(int bytes, int sizePrefix, boolean xmm) {
+            this.sizePrefix = sizePrefix;
+            this.bytes = bytes;
+            this.xmm = xmm;
+        }
+
+        public int getBytes() {
+            return bytes;
+        }
+
+        public boolean isXmmType() {
+            return xmm;
+        }
+
+        /**
+         * Emit an immediate of this size. Note that immediate {@link #QWORD} operands are encoded
+         * as sign-extended 32-bit values.
+         *
+         * @param asm
+         * @param imm
+         */
+        protected void emitImmediate(AMD64Assembler asm, int imm) {
+            assert false;
+        }
+    }
+
+    /**
+     * Operand size and register type constraints.
+     */
+    private static enum OpAssertion {
+        ByteAssertion(CPU, CPU, BYTE),
+        IntegerAssertion(CPU, CPU, WORD, DWORD, QWORD),
+        No16BitAssertion(CPU, CPU, DWORD, QWORD),
+        QwordOnlyAssertion(CPU, CPU, QWORD),
+        FloatingAssertion(XMM, XMM, SS, SD, PS, PD),
+        PackedFloatingAssertion(XMM, XMM, PS, PD),
+        SingleAssertion(XMM, XMM, SS),
+        DoubleAssertion(XMM, XMM, SD),
+        IntToFloatingAssertion(XMM, CPU, DWORD, QWORD),
+        FloatingToIntAssertion(CPU, XMM, DWORD, QWORD);
+
+        private final RegisterCategory resultCategory;
+        private final RegisterCategory inputCategory;
+        private final OperandSize[] allowedSizes;
+
+        private OpAssertion(RegisterCategory resultCategory, RegisterCategory inputCategory, OperandSize... allowedSizes) {
+            this.resultCategory = resultCategory;
+            this.inputCategory = inputCategory;
+            this.allowedSizes = allowedSizes;
+        }
+
+        protected boolean checkOperands(AMD64Op op, OperandSize size, Register resultReg, Register inputReg) {
+            assert resultReg == null || resultCategory.equals(resultReg.getRegisterCategory()) : "invalid result register " + resultReg + " used in " + op;
+            assert inputReg == null || inputCategory.equals(inputReg.getRegisterCategory()) : "invalid input register " + inputReg + " used in " + op;
+
+            for (OperandSize s : allowedSizes) {
+                if (size == s) {
+                    return true;
+                }
+            }
+
+            assert false : "invalid operand size " + size + " used in " + op;
+            return false;
+        }
+    }
+
+    /**
+     * The register to which {@link Register#Frame} and {@link Register#CallerFrame} are bound.
+     */
+    public final Register frameRegister;
+
+    /**
+     * Constructs an assembler for the AMD64 architecture.
+     *
+     * @param registerConfig the register configuration used to bind {@link Register#Frame} and
+     *            {@link Register#CallerFrame} to physical registers. This value can be null if this
+     *            assembler instance will not be used to assemble instructions using these logical
+     *            registers.
+     */
+    public AMD64Assembler(TargetDescription target, RegisterConfig registerConfig) {
+        super(target);
+        this.frameRegister = registerConfig == null ? null : registerConfig.getFrameRegister();
+    }
+
+    private boolean supports(CPUFeature feature) {
+        return ((AMD64) target.arch).getFeatures().contains(feature);
+    }
+
+    private static int encode(Register r) {
+        assert r.encoding < 16 && r.encoding >= 0 : "encoding out of range: " + r.encoding;
+        return r.encoding & 0x7;
+    }
+
+    /**
+     * Get RXB bits for register-register instruction. In that encoding, ModRM.rm contains a
+     * register index. The R bit extends the ModRM.reg field and the B bit extends the ModRM.rm
+     * field. The X bit must be 0.
+     */
+    protected static int getRXB(Register reg, Register rm) {
+        int rxb = (reg == null ? 0 : reg.encoding & 0x08) >> 1;
+        rxb |= (rm == null ? 0 : rm.encoding & 0x08) >> 3;
+        return rxb;
+    }
+
+    /**
+     * Get RXB bits for register-memory instruction. The R bit extends the ModRM.reg field. There
+     * are two cases for the memory operand:<br>
+     * ModRM.rm contains the base register: In that case, B extends the ModRM.rm field and X = 0.<br>
+     * There is an SIB byte: In that case, X extends SIB.index and B extends SIB.base.
+     */
+    protected static int getRXB(Register reg, AMD64Address rm) {
+        int rxb = (reg == null ? 0 : reg.encoding & 0x08) >> 1;
+        if (!rm.getIndex().equals(Register.None)) {
+            rxb |= (rm.getIndex().encoding & 0x08) >> 2;
+        }
+        if (!rm.getBase().equals(Register.None)) {
+            rxb |= (rm.getBase().encoding & 0x08) >> 3;
+        }
+        return rxb;
+    }
+
+    /**
+     * Emit the ModR/M byte for one register operand and an opcode extension in the R field.
+     * <p>
+     * Format: [ 11 reg r/m ]
+     */
+    protected void emitModRM(int reg, Register rm) {
+        assert (reg & 0x07) == reg;
+        emitByte(0xC0 | (reg << 3) | (rm.encoding & 0x07));
+    }
+
+    /**
+     * Emit the ModR/M byte for two register operands.
+     * <p>
+     * Format: [ 11 reg r/m ]
+     */
+    protected void emitModRM(Register reg, Register rm) {
+        emitModRM(reg.encoding & 0x07, rm);
+    }
+
+    /**
+     * Emits the ModR/M byte and optionally the SIB byte for one register and one memory operand.
+     */
+    protected void emitOperandHelper(Register reg, AMD64Address addr) {
+        assert !reg.equals(Register.None);
+        emitOperandHelper(encode(reg), addr);
+    }
+
+    /**
+     * Emits the ModR/M byte and optionally the SIB byte for one memory operand and an opcode
+     * extension in the R field.
+     */
+    protected void emitOperandHelper(int reg, AMD64Address addr) {
+        assert (reg & 0x07) == reg;
+        int regenc = reg << 3;
+
+        Register base = addr.getBase();
+        Register index = addr.getIndex();
+
+        AMD64Address.Scale scale = addr.getScale();
+        int disp = addr.getDisplacement();
+
+        if (base.equals(Register.Frame)) {
+            assert frameRegister != null : "cannot use register " + Register.Frame + " in assembler with null register configuration";
+            base = frameRegister;
+        }
+
+        if (base.equals(AMD64.rip)) { // also matches Placeholder
+            // [00 000 101] disp32
+            assert index.equals(Register.None) : "cannot use RIP relative addressing with index register";
+            emitByte(0x05 | regenc);
+            emitInt(disp);
+        } else if (base.isValid()) {
+            int baseenc = base.isValid() ? encode(base) : 0;
+            if (index.isValid()) {
+                int indexenc = encode(index) << 3;
+                // [base + indexscale + disp]
+                if (disp == 0 && !base.equals(rbp) && !base.equals(r13)) {
+                    // [base + indexscale]
+                    // [00 reg 100][ss index base]
+                    assert !index.equals(rsp) : "illegal addressing mode";
+                    emitByte(0x04 | regenc);
+                    emitByte(scale.log2 << 6 | indexenc | baseenc);
+                } else if (isByte(disp)) {
+                    // [base + indexscale + imm8]
+                    // [01 reg 100][ss index base] imm8
+                    assert !index.equals(rsp) : "illegal addressing mode";
+                    emitByte(0x44 | regenc);
+                    emitByte(scale.log2 << 6 | indexenc | baseenc);
+                    emitByte(disp & 0xFF);
+                } else {
+                    // [base + indexscale + disp32]
+                    // [10 reg 100][ss index base] disp32
+                    assert !index.equals(rsp) : "illegal addressing mode";
+                    emitByte(0x84 | regenc);
+                    emitByte(scale.log2 << 6 | indexenc | baseenc);
+                    emitInt(disp);
+                }
+            } else if (base.equals(rsp) || base.equals(r12)) {
+                // [rsp + disp]
+                if (disp == 0) {
+                    // [rsp]
+                    // [00 reg 100][00 100 100]
+                    emitByte(0x04 | regenc);
+                    emitByte(0x24);
+                } else if (isByte(disp)) {
+                    // [rsp + imm8]
+                    // [01 reg 100][00 100 100] disp8
+                    emitByte(0x44 | regenc);
+                    emitByte(0x24);
+                    emitByte(disp & 0xFF);
+                } else {
+                    // [rsp + imm32]
+                    // [10 reg 100][00 100 100] disp32
+                    emitByte(0x84 | regenc);
+                    emitByte(0x24);
+                    emitInt(disp);
+                }
+            } else {
+                // [base + disp]
+                assert !base.equals(rsp) && !base.equals(r12) : "illegal addressing mode";
+                if (disp == 0 && !base.equals(rbp) && !base.equals(r13)) {
+                    // [base]
+                    // [00 reg base]
+                    emitByte(0x00 | regenc | baseenc);
+                } else if (isByte(disp)) {
+                    // [base + disp8]
+                    // [01 reg base] disp8
+                    emitByte(0x40 | regenc | baseenc);
+                    emitByte(disp & 0xFF);
+                } else {
+                    // [base + disp32]
+                    // [10 reg base] disp32
+                    emitByte(0x80 | regenc | baseenc);
+                    emitInt(disp);
+                }
+            }
+        } else {
+            if (index.isValid()) {
+                int indexenc = encode(index) << 3;
+                // [indexscale + disp]
+                // [00 reg 100][ss index 101] disp32
+                assert !index.equals(rsp) : "illegal addressing mode";
+                emitByte(0x04 | regenc);
+                emitByte(scale.log2 << 6 | indexenc | 0x05);
+                emitInt(disp);
+            } else {
+                // [disp] ABSOLUTE
+                // [00 reg 100][00 100 101] disp32
+                emitByte(0x04 | regenc);
+                emitByte(0x25);
+                emitInt(disp);
+            }
+        }
+    }
+
+    /**
+     * Base class for AMD64 opcodes.
+     */
+    public static class AMD64Op {
+
+        protected static final int P_0F = 0x0F;
+        protected static final int P_0F38 = 0x380F;
+        protected static final int P_0F3A = 0x3A0F;
+
+        private final String opcode;
+
+        private final int prefix1;
+        private final int prefix2;
+        private final int op;
+
+        private final boolean dstIsByte;
+        private final boolean srcIsByte;
+
+        private final OpAssertion assertion;
+        private final CPUFeature feature;
+
+        protected AMD64Op(String opcode, int prefix1, int prefix2, int op, OpAssertion assertion, CPUFeature feature) {
+            this(opcode, prefix1, prefix2, op, assertion == OpAssertion.ByteAssertion, assertion == OpAssertion.ByteAssertion, assertion, feature);
+        }
+
+        protected AMD64Op(String opcode, int prefix1, int prefix2, int op, boolean dstIsByte, boolean srcIsByte, OpAssertion assertion, CPUFeature feature) {
+            this.opcode = opcode;
+            this.prefix1 = prefix1;
+            this.prefix2 = prefix2;
+            this.op = op;
+
+            this.dstIsByte = dstIsByte;
+            this.srcIsByte = srcIsByte;
+
+            this.assertion = assertion;
+            this.feature = feature;
+        }
+
+        protected final void emitOpcode(AMD64Assembler asm, OperandSize size, int rxb, int dstEnc, int srcEnc) {
+            if (prefix1 != 0) {
+                asm.emitByte(prefix1);
+            }
+            if (size.sizePrefix != 0) {
+                asm.emitByte(size.sizePrefix);
+            }
+            int rexPrefix = 0x40 | rxb;
+            if (size == QWORD) {
+                rexPrefix |= 0x08;
+            }
+            if (rexPrefix != 0x40 || (dstIsByte && dstEnc >= 4) || (srcIsByte && srcEnc >= 4)) {
+                asm.emitByte(rexPrefix);
+            }
+            if (prefix2 > 0xFF) {
+                asm.emitShort(prefix2);
+            } else if (prefix2 > 0) {
+                asm.emitByte(prefix2);
+            }
+            asm.emitByte(op);
+        }
+
+        protected final boolean verify(AMD64Assembler asm, OperandSize size, Register resultReg, Register inputReg) {
+            assert feature == null || asm.supports(feature) : String.format("unsupported feature %s required for %s", feature, opcode);
+            assert assertion.checkOperands(this, size, resultReg, inputReg);
+            return true;
+        }
+
+        @Override
+        public String toString() {
+            return opcode;
+        }
+    }
+
+    /**
+     * Base class for AMD64 opcodes with immediate operands.
+     */
+    public static class AMD64ImmOp extends AMD64Op {
+
+        private final boolean immIsByte;
+
+        protected AMD64ImmOp(String opcode, boolean immIsByte, int prefix, int op, OpAssertion assertion) {
+            super(opcode, 0, prefix, op, assertion, null);
+            this.immIsByte = immIsByte;
+        }
+
+        protected final void emitImmediate(AMD64Assembler asm, OperandSize size, int imm) {
+            if (immIsByte) {
+                assert imm == (byte) imm;
+                asm.emitByte(imm);
+            } else {
+                size.emitImmediate(asm, imm);
+            }
+        }
+    }
+
+    /**
+     * Opcode with operand order of either RM or MR.
+     */
+    public abstract static class AMD64RROp extends AMD64Op {
+
+        protected AMD64RROp(String opcode, int prefix1, int prefix2, int op, OpAssertion assertion, CPUFeature feature) {
+            super(opcode, prefix1, prefix2, op, assertion, feature);
+        }
+
+        protected AMD64RROp(String opcode, int prefix1, int prefix2, int op, boolean dstIsByte, boolean srcIsByte, OpAssertion assertion, CPUFeature feature) {
+            super(opcode, prefix1, prefix2, op, dstIsByte, srcIsByte, assertion, feature);
+        }
+
+        public abstract void emit(AMD64Assembler asm, OperandSize size, Register dst, Register src);
+    }
+
+    /**
+     * Opcode with operand order of RM.
+     */
+    public static class AMD64RMOp extends AMD64RROp {
+        // @formatter:off
+        public static final AMD64RMOp IMUL   = new AMD64RMOp("IMUL",         P_0F, 0xAF);
+        public static final AMD64RMOp BSF    = new AMD64RMOp("BSF",          P_0F, 0xBC);
+        public static final AMD64RMOp BSR    = new AMD64RMOp("BSR",          P_0F, 0xBD);
+        public static final AMD64RMOp POPCNT = new AMD64RMOp("POPCNT", 0xF3, P_0F, 0xB8, CPUFeature.POPCNT);
+        public static final AMD64RMOp TZCNT  = new AMD64RMOp("TZCNT",  0xF3, P_0F, 0xBC, CPUFeature.BMI1);
+        public static final AMD64RMOp LZCNT  = new AMD64RMOp("LZCNT",  0xF3, P_0F, 0xBD, CPUFeature.LZCNT);
+        public static final AMD64RMOp MOVZXB = new AMD64RMOp("MOVZXB",       P_0F, 0xB6, false, true, OpAssertion.IntegerAssertion);
+        public static final AMD64RMOp MOVZX  = new AMD64RMOp("MOVZX",        P_0F, 0xB7, OpAssertion.No16BitAssertion);
+        public static final AMD64RMOp MOVSXB = new AMD64RMOp("MOVSXB",       P_0F, 0xBE, false, true, OpAssertion.IntegerAssertion);
+        public static final AMD64RMOp MOVSX  = new AMD64RMOp("MOVSX",        P_0F, 0xBF, OpAssertion.No16BitAssertion);
+        public static final AMD64RMOp MOVSXD = new AMD64RMOp("MOVSXD",             0x63, OpAssertion.QwordOnlyAssertion);
+        public static final AMD64RMOp MOVB   = new AMD64RMOp("MOVB",               0x8A, OpAssertion.ByteAssertion);
+        public static final AMD64RMOp MOV    = new AMD64RMOp("MOV",                0x8B);
+
+        // MOVD/MOVQ and MOVSS/MOVSD are the same opcode, just with different operand size prefix
+        public static final AMD64RMOp MOVD   = new AMD64RMOp("MOVD",   0x66, P_0F, 0x6E, OpAssertion.IntToFloatingAssertion, CPUFeature.SSE2);
+        public static final AMD64RMOp MOVQ   = new AMD64RMOp("MOVQ",   0x66, P_0F, 0x6E, OpAssertion.IntToFloatingAssertion, CPUFeature.SSE2);
+        public static final AMD64RMOp MOVSS  = new AMD64RMOp("MOVSS",        P_0F, 0x10, OpAssertion.FloatingAssertion, CPUFeature.SSE);
+        public static final AMD64RMOp MOVSD  = new AMD64RMOp("MOVSD",        P_0F, 0x10, OpAssertion.FloatingAssertion, CPUFeature.SSE);
+
+        // TEST is documented as MR operation, but it's symmetric, and using it as RM operation is more convenient.
+        public static final AMD64RMOp TESTB  = new AMD64RMOp("TEST",               0x84, OpAssertion.ByteAssertion);
+        public static final AMD64RMOp TEST   = new AMD64RMOp("TEST",               0x85);
+        // @formatter:on
+
+        protected AMD64RMOp(String opcode, int op) {
+            this(opcode, 0, op);
+        }
+
+        protected AMD64RMOp(String opcode, int op, OpAssertion assertion) {
+            this(opcode, 0, op, assertion);
+        }
+
+        protected AMD64RMOp(String opcode, int prefix, int op) {
+            this(opcode, 0, prefix, op, null);
+        }
+
+        protected AMD64RMOp(String opcode, int prefix, int op, OpAssertion assertion) {
+            this(opcode, 0, prefix, op, assertion, null);
+        }
+
+        protected AMD64RMOp(String opcode, int prefix, int op, OpAssertion assertion, CPUFeature feature) {
+            this(opcode, 0, prefix, op, assertion, feature);
+        }
+
+        protected AMD64RMOp(String opcode, int prefix, int op, boolean dstIsByte, boolean srcIsByte, OpAssertion assertion) {
+            super(opcode, 0, prefix, op, dstIsByte, srcIsByte, assertion, null);
+        }
+
+        protected AMD64RMOp(String opcode, int prefix1, int prefix2, int op, CPUFeature feature) {
+            this(opcode, prefix1, prefix2, op, OpAssertion.IntegerAssertion, feature);
+        }
+
+        protected AMD64RMOp(String opcode, int prefix1, int prefix2, int op, OpAssertion assertion, CPUFeature feature) {
+            super(opcode, prefix1, prefix2, op, assertion, feature);
+        }
+
+        @Override
+        public final void emit(AMD64Assembler asm, OperandSize size, Register dst, Register src) {
+            assert verify(asm, size, dst, src);
+            emitOpcode(asm, size, getRXB(dst, src), dst.encoding, src.encoding);
+            asm.emitModRM(dst, src);
+        }
+
+        public final void emit(AMD64Assembler asm, OperandSize size, Register dst, AMD64Address src) {
+            assert verify(asm, size, dst, null);
+            emitOpcode(asm, size, getRXB(dst, src), dst.encoding, 0);
+            asm.emitOperandHelper(dst, src);
+        }
+    }
+
+    /**
+     * Opcode with operand order of MR.
+     */
+    public static class AMD64MROp extends AMD64RROp {
+        // @formatter:off
+        public static final AMD64MROp MOVB   = new AMD64MROp("MOVB",               0x88, OpAssertion.ByteAssertion);
+        public static final AMD64MROp MOV    = new AMD64MROp("MOV",                0x89);
+
+        // MOVD and MOVQ are the same opcode, just with different operand size prefix
+        // Note that as MR opcodes, they have reverse operand order, so the IntToFloatingAssertion must be used.
+        public static final AMD64MROp MOVD   = new AMD64MROp("MOVD",   0x66, P_0F, 0x7E, OpAssertion.IntToFloatingAssertion, CPUFeature.SSE2);
+        public static final AMD64MROp MOVQ   = new AMD64MROp("MOVQ",   0x66, P_0F, 0x7E, OpAssertion.IntToFloatingAssertion, CPUFeature.SSE2);
+
+        // MOVSS and MOVSD are the same opcode, just with different operand size prefix
+        public static final AMD64MROp MOVSS  = new AMD64MROp("MOVSS",        P_0F, 0x11, OpAssertion.FloatingAssertion, CPUFeature.SSE);
+        public static final AMD64MROp MOVSD  = new AMD64MROp("MOVSD",        P_0F, 0x11, OpAssertion.FloatingAssertion, CPUFeature.SSE);
+        // @formatter:on
+
+        protected AMD64MROp(String opcode, int op) {
+            this(opcode, 0, op);
+        }
+
+        protected AMD64MROp(String opcode, int op, OpAssertion assertion) {
+            this(opcode, 0, op, assertion);
+        }
+
+        protected AMD64MROp(String opcode, int prefix, int op) {
+            this(opcode, prefix, op, OpAssertion.IntegerAssertion);
+        }
+
+        protected AMD64MROp(String opcode, int prefix, int op, OpAssertion assertion) {
+            this(opcode, prefix, op, assertion, null);
+        }
+
+        protected AMD64MROp(String opcode, int prefix, int op, OpAssertion assertion, CPUFeature feature) {
+            this(opcode, 0, prefix, op, assertion, feature);
+        }
+
+        protected AMD64MROp(String opcode, int prefix1, int prefix2, int op, OpAssertion assertion, CPUFeature feature) {
+            super(opcode, prefix1, prefix2, op, assertion, feature);
+        }
+
+        @Override
+        public final void emit(AMD64Assembler asm, OperandSize size, Register dst, Register src) {
+            assert verify(asm, size, src, dst);
+            emitOpcode(asm, size, getRXB(src, dst), src.encoding, dst.encoding);
+            asm.emitModRM(src, dst);
+        }
+
+        public final void emit(AMD64Assembler asm, OperandSize size, AMD64Address dst, Register src) {
+            assert verify(asm, size, null, src);
+            emitOpcode(asm, size, getRXB(src, dst), src.encoding, 0);
+            asm.emitOperandHelper(src, dst);
+        }
+    }
+
+    /**
+     * Opcodes with operand order of M.
+     */
+    public static class AMD64MOp extends AMD64Op {
+        // @formatter:off
+        public static final AMD64MOp NOT  = new AMD64MOp("NOT",  0xF7, 2);
+        public static final AMD64MOp NEG  = new AMD64MOp("NEG",  0xF7, 3);
+        public static final AMD64MOp MUL  = new AMD64MOp("MUL",  0xF7, 4);
+        public static final AMD64MOp IMUL = new AMD64MOp("IMUL", 0xF7, 5);
+        public static final AMD64MOp DIV  = new AMD64MOp("DIV",  0xF7, 6);
+        public static final AMD64MOp IDIV = new AMD64MOp("IDIV", 0xF7, 7);
+        public static final AMD64MOp INC  = new AMD64MOp("INC",  0xFF, 0);
+        public static final AMD64MOp DEC  = new AMD64MOp("DEC",  0xFF, 1);
+        // @formatter:on
+
+        private final int ext;
+
+        protected AMD64MOp(String opcode, int op, int ext) {
+            this(opcode, 0, op, ext);
+        }
+
+        protected AMD64MOp(String opcode, int prefix, int op, int ext) {
+            this(opcode, prefix, op, ext, OpAssertion.IntegerAssertion);
+        }
+
+        protected AMD64MOp(String opcode, int prefix, int op, int ext, OpAssertion assertion) {
+            super(opcode, 0, prefix, op, assertion, null);
+            this.ext = ext;
+        }
+
+        public final void emit(AMD64Assembler asm, OperandSize size, Register dst) {
+            assert verify(asm, size, dst, null);
+            emitOpcode(asm, size, getRXB(null, dst), 0, dst.encoding);
+            asm.emitModRM(ext, dst);
+        }
+
+        public final void emit(AMD64Assembler asm, OperandSize size, AMD64Address dst) {
+            assert verify(asm, size, null, null);
+            emitOpcode(asm, size, getRXB(null, dst), 0, 0);
+            asm.emitOperandHelper(ext, dst);
+        }
+    }
+
+    /**
+     * Opcodes with operand order of MI.
+     */
+    public static class AMD64MIOp extends AMD64ImmOp {
+        // @formatter:off
+        public static final AMD64MIOp MOVB = new AMD64MIOp("MOVB", true,  0xC6, 0, OpAssertion.ByteAssertion);
+        public static final AMD64MIOp MOV  = new AMD64MIOp("MOV",  false, 0xC7, 0);
+        public static final AMD64MIOp TEST = new AMD64MIOp("TEST", false, 0xF7, 0);
+        // @formatter:on
+
+        private final int ext;
+
+        protected AMD64MIOp(String opcode, boolean immIsByte, int op, int ext) {
+            this(opcode, immIsByte, op, ext, OpAssertion.IntegerAssertion);
+        }
+
+        protected AMD64MIOp(String opcode, boolean immIsByte, int op, int ext, OpAssertion assertion) {
+            this(opcode, immIsByte, 0, op, ext, assertion);
+        }
+
+        protected AMD64MIOp(String opcode, boolean immIsByte, int prefix, int op, int ext, OpAssertion assertion) {
+            super(opcode, immIsByte, prefix, op, assertion);
+            this.ext = ext;
+        }
+
+        public final void emit(AMD64Assembler asm, OperandSize size, Register dst, int imm) {
+            assert verify(asm, size, dst, null);
+            emitOpcode(asm, size, getRXB(null, dst), 0, dst.encoding);
+            asm.emitModRM(ext, dst);
+            emitImmediate(asm, size, imm);
+        }
+
+        public final void emit(AMD64Assembler asm, OperandSize size, AMD64Address dst, int imm) {
+            assert verify(asm, size, null, null);
+            emitOpcode(asm, size, getRXB(null, dst), 0, 0);
+            asm.emitOperandHelper(ext, dst);
+            emitImmediate(asm, size, imm);
+        }
+    }
+
+    /**
+     * Opcodes with operand order of RMI.
+     */
+    public static class AMD64RMIOp extends AMD64ImmOp {
+        // @formatter:off
+        public static final AMD64RMIOp IMUL    = new AMD64RMIOp("IMUL", false, 0x69);
+        public static final AMD64RMIOp IMUL_SX = new AMD64RMIOp("IMUL", true,  0x6B);
+        // @formatter:on
+
+        protected AMD64RMIOp(String opcode, boolean immIsByte, int op) {
+            this(opcode, immIsByte, 0, op, OpAssertion.IntegerAssertion);
+        }
+
+        protected AMD64RMIOp(String opcode, boolean immIsByte, int prefix, int op, OpAssertion assertion) {
+            super(opcode, immIsByte, prefix, op, assertion);
+        }
+
+        public final void emit(AMD64Assembler asm, OperandSize size, Register dst, Register src, int imm) {
+            assert verify(asm, size, dst, src);
+            emitOpcode(asm, size, getRXB(dst, src), dst.encoding, src.encoding);
+            asm.emitModRM(dst, src);
+            emitImmediate(asm, size, imm);
+        }
+
+        public final void emit(AMD64Assembler asm, OperandSize size, Register dst, AMD64Address src, int imm) {
+            assert verify(asm, size, dst, null);
+            emitOpcode(asm, size, getRXB(dst, src), dst.encoding, 0);
+            asm.emitOperandHelper(dst, src);
+            emitImmediate(asm, size, imm);
+        }
+    }
+
+    public static class SSEOp extends AMD64RMOp {
+        // @formatter:off
+        public static final SSEOp CVTSI2SS  = new SSEOp("CVTSI2SS",  0xF3, P_0F, 0x2A, OpAssertion.IntToFloatingAssertion);
+        public static final SSEOp CVTSI2SD  = new SSEOp("CVTSI2SS",  0xF2, P_0F, 0x2A, OpAssertion.IntToFloatingAssertion);
+        public static final SSEOp CVTTSS2SI = new SSEOp("CVTTSS2SI", 0xF3, P_0F, 0x2C, OpAssertion.FloatingToIntAssertion);
+        public static final SSEOp CVTTSD2SI = new SSEOp("CVTTSD2SI", 0xF2, P_0F, 0x2C, OpAssertion.FloatingToIntAssertion);
+        public static final SSEOp UCOMIS    = new SSEOp("UCOMIS",          P_0F, 0x2E, OpAssertion.PackedFloatingAssertion);
+        public static final SSEOp SQRT      = new SSEOp("SQRT",            P_0F, 0x51);
+        public static final SSEOp AND       = new SSEOp("AND",             P_0F, 0x54, OpAssertion.PackedFloatingAssertion);
+        public static final SSEOp ANDN      = new SSEOp("ANDN",            P_0F, 0x55, OpAssertion.PackedFloatingAssertion);
+        public static final SSEOp OR        = new SSEOp("OR",              P_0F, 0x56, OpAssertion.PackedFloatingAssertion);
+        public static final SSEOp XOR       = new SSEOp("XOR",             P_0F, 0x57, OpAssertion.PackedFloatingAssertion);
+        public static final SSEOp ADD       = new SSEOp("ADD",             P_0F, 0x58);
+        public static final SSEOp MUL       = new SSEOp("MUL",             P_0F, 0x59);
+        public static final SSEOp CVTSS2SD  = new SSEOp("CVTSS2SD",        P_0F, 0x5A, OpAssertion.SingleAssertion);
+        public static final SSEOp CVTSD2SS  = new SSEOp("CVTSD2SS",        P_0F, 0x5A, OpAssertion.DoubleAssertion);
+        public static final SSEOp SUB       = new SSEOp("SUB",             P_0F, 0x5C);
+        public static final SSEOp MIN       = new SSEOp("MIN",             P_0F, 0x5D);
+        public static final SSEOp DIV       = new SSEOp("DIV",             P_0F, 0x5E);
+        public static final SSEOp MAX       = new SSEOp("MAX",             P_0F, 0x5F);
+        // @formatter:on
+
+        protected SSEOp(String opcode, int prefix, int op) {
+            this(opcode, prefix, op, OpAssertion.FloatingAssertion);
+        }
+
+        protected SSEOp(String opcode, int prefix, int op, OpAssertion assertion) {
+            this(opcode, 0, prefix, op, assertion);
+        }
+
+        protected SSEOp(String opcode, int mandatoryPrefix, int prefix, int op, OpAssertion assertion) {
+            super(opcode, mandatoryPrefix, prefix, op, assertion, CPUFeature.SSE2);
+        }
+    }
+
+    /**
+     * Arithmetic operation with operand order of RM, MR or MI.
+     */
+    public static final class AMD64BinaryArithmetic {
+        // @formatter:off
+        public static final AMD64BinaryArithmetic ADD = new AMD64BinaryArithmetic("ADD", 0);
+        public static final AMD64BinaryArithmetic OR  = new AMD64BinaryArithmetic("OR",  1);
+        public static final AMD64BinaryArithmetic ADC = new AMD64BinaryArithmetic("ADC", 2);
+        public static final AMD64BinaryArithmetic SBB = new AMD64BinaryArithmetic("SBB", 3);
+        public static final AMD64BinaryArithmetic AND = new AMD64BinaryArithmetic("AND", 4);
+        public static final AMD64BinaryArithmetic SUB = new AMD64BinaryArithmetic("SUB", 5);
+        public static final AMD64BinaryArithmetic XOR = new AMD64BinaryArithmetic("XOR", 6);
+        public static final AMD64BinaryArithmetic CMP = new AMD64BinaryArithmetic("CMP", 7);
+        // @formatter:on
+
+        private final AMD64MIOp byteImmOp;
+        private final AMD64MROp byteMrOp;
+        private final AMD64RMOp byteRmOp;
+
+        private final AMD64MIOp immOp;
+        private final AMD64MIOp immSxOp;
+        private final AMD64MROp mrOp;
+        private final AMD64RMOp rmOp;
+
+        private AMD64BinaryArithmetic(String opcode, int code) {
+            int baseOp = code << 3;
+
+            byteImmOp = new AMD64MIOp(opcode, true, 0, 0x80, code, OpAssertion.ByteAssertion);
+            byteMrOp = new AMD64MROp(opcode, 0, baseOp, OpAssertion.ByteAssertion);
+            byteRmOp = new AMD64RMOp(opcode, 0, baseOp | 0x02, OpAssertion.ByteAssertion);
+
+            immOp = new AMD64MIOp(opcode, false, 0, 0x81, code, OpAssertion.IntegerAssertion);
+            immSxOp = new AMD64MIOp(opcode, true, 0, 0x83, code, OpAssertion.IntegerAssertion);
+            mrOp = new AMD64MROp(opcode, 0, baseOp | 0x01, OpAssertion.IntegerAssertion);
+            rmOp = new AMD64RMOp(opcode, 0, baseOp | 0x03, OpAssertion.IntegerAssertion);
+        }
+
+        public AMD64MIOp getMIOpcode(OperandSize size, boolean sx) {
+            if (size == BYTE) {
+                return byteImmOp;
+            } else if (sx) {
+                return immSxOp;
+            } else {
+                return immOp;
+            }
+        }
+
+        public AMD64MROp getMROpcode(OperandSize size) {
+            if (size == BYTE) {
+                return byteMrOp;
+            } else {
+                return mrOp;
+            }
+        }
+
+        public AMD64RMOp getRMOpcode(OperandSize size) {
+            if (size == BYTE) {
+                return byteRmOp;
+            } else {
+                return rmOp;
+            }
+        }
+    }
+
+    /**
+     * Shift operation with operand order of M1, MC or MI.
+     */
+    public static final class AMD64Shift {
+        // @formatter:off
+        public static final AMD64Shift ROL = new AMD64Shift("ROL", 0);
+        public static final AMD64Shift ROR = new AMD64Shift("ROR", 1);
+        public static final AMD64Shift RCL = new AMD64Shift("RCL", 2);
+        public static final AMD64Shift RCR = new AMD64Shift("RCR", 3);
+        public static final AMD64Shift SHL = new AMD64Shift("SHL", 4);
+        public static final AMD64Shift SHR = new AMD64Shift("SHR", 5);
+        public static final AMD64Shift SAR = new AMD64Shift("SAR", 7);
+        // @formatter:on
+
+        public final AMD64MOp m1Op;
+        public final AMD64MOp mcOp;
+        public final AMD64MIOp miOp;
+
+        private AMD64Shift(String opcode, int code) {
+            m1Op = new AMD64MOp(opcode, 0, 0xD1, code, OpAssertion.IntegerAssertion);
+            mcOp = new AMD64MOp(opcode, 0, 0xD3, code, OpAssertion.IntegerAssertion);
+            miOp = new AMD64MIOp(opcode, true, 0, 0xC1, code, OpAssertion.IntegerAssertion);
+        }
+    }
+
+    public final void addl(AMD64Address dst, int imm32) {
+        ADD.getMIOpcode(DWORD, isByte(imm32)).emit(this, DWORD, dst, imm32);
+    }
+
+    public final void addl(Register dst, int imm32) {
+        ADD.getMIOpcode(DWORD, isByte(imm32)).emit(this, DWORD, dst, imm32);
+    }
+
+    private void addrNop4() {
+        // 4 bytes: NOP DWORD PTR [EAX+0]
+        emitByte(0x0F);
+        emitByte(0x1F);
+        emitByte(0x40); // emitRm(cbuf, 0x1, EAXEnc, EAXEnc);
+        emitByte(0); // 8-bits offset (1 byte)
+    }
+
+    private void addrNop5() {
+        // 5 bytes: NOP DWORD PTR [EAX+EAX*0+0] 8-bits offset
+        emitByte(0x0F);
+        emitByte(0x1F);
+        emitByte(0x44); // emitRm(cbuf, 0x1, EAXEnc, 0x4);
+        emitByte(0x00); // emitRm(cbuf, 0x0, EAXEnc, EAXEnc);
+        emitByte(0); // 8-bits offset (1 byte)
+    }
+
+    private void addrNop7() {
+        // 7 bytes: NOP DWORD PTR [EAX+0] 32-bits offset
+        emitByte(0x0F);
+        emitByte(0x1F);
+        emitByte(0x80); // emitRm(cbuf, 0x2, EAXEnc, EAXEnc);
+        emitInt(0); // 32-bits offset (4 bytes)
+    }
+
+    private void addrNop8() {
+        // 8 bytes: NOP DWORD PTR [EAX+EAX*0+0] 32-bits offset
+        emitByte(0x0F);
+        emitByte(0x1F);
+        emitByte(0x84); // emitRm(cbuf, 0x2, EAXEnc, 0x4);
+        emitByte(0x00); // emitRm(cbuf, 0x0, EAXEnc, EAXEnc);
+        emitInt(0); // 32-bits offset (4 bytes)
+    }
+
+    public final void andl(Register dst, int imm32) {
+        AND.getMIOpcode(DWORD, isByte(imm32)).emit(this, DWORD, dst, imm32);
+    }
+
+    public final void bswapl(Register reg) {
+        int encode = prefixAndEncode(reg.encoding);
+        emitByte(0x0F);
+        emitByte(0xC8 | encode);
+    }
+
+    public final void cdql() {
+        emitByte(0x99);
+    }
+
+    public final void cmovl(ConditionFlag cc, Register dst, Register src) {
+        int encode = prefixAndEncode(dst.encoding, src.encoding);
+        emitByte(0x0F);
+        emitByte(0x40 | cc.getValue());
+        emitByte(0xC0 | encode);
+    }
+
+    public final void cmovl(ConditionFlag cc, Register dst, AMD64Address src) {
+        prefix(src, dst);
+        emitByte(0x0F);
+        emitByte(0x40 | cc.getValue());
+        emitOperandHelper(dst, src);
+    }
+
+    public final void cmpl(Register dst, int imm32) {
+        CMP.getMIOpcode(DWORD, isByte(imm32)).emit(this, DWORD, dst, imm32);
+    }
+
+    public final void cmpl(Register dst, Register src) {
+        CMP.rmOp.emit(this, DWORD, dst, src);
+    }
+
+    public final void cmpl(Register dst, AMD64Address src) {
+        CMP.rmOp.emit(this, DWORD, dst, src);
+    }
+
+    public final void cmpl(AMD64Address dst, int imm32) {
+        CMP.getMIOpcode(DWORD, isByte(imm32)).emit(this, DWORD, dst, imm32);
+    }
+
+    // The 32-bit cmpxchg compares the value at adr with the contents of X86.rax,
+    // and stores reg into adr if so; otherwise, the value at adr is loaded into X86.rax,.
+    // The ZF is set if the compared values were equal, and cleared otherwise.
+    public final void cmpxchgl(Register reg, AMD64Address adr) { // cmpxchg
+        prefix(adr, reg);
+        emitByte(0x0F);
+        emitByte(0xB1);
+        emitOperandHelper(reg, adr);
+    }
+
+    protected final void decl(AMD64Address dst) {
+        prefix(dst);
+        emitByte(0xFF);
+        emitOperandHelper(1, dst);
+    }
+
+    public final void hlt() {
+        emitByte(0xF4);
+    }
+
+    public final void imull(Register dst, Register src, int value) {
+        if (isByte(value)) {
+            AMD64RMIOp.IMUL_SX.emit(this, DWORD, dst, src, value);
+        } else {
+            AMD64RMIOp.IMUL.emit(this, DWORD, dst, src, value);
+        }
+    }
+
+    protected final void incl(AMD64Address dst) {
+        prefix(dst);
+        emitByte(0xFF);
+        emitOperandHelper(0, dst);
+    }
+
+    public void jcc(ConditionFlag cc, int jumpTarget, boolean forceDisp32) {
+        int shortSize = 2;
+        int longSize = 6;
+        long disp = jumpTarget - position();
+        if (!forceDisp32 && isByte(disp - shortSize)) {
+            // 0111 tttn #8-bit disp
+            emitByte(0x70 | cc.getValue());
+            emitByte((int) ((disp - shortSize) & 0xFF));
+        } else {
+            // 0000 1111 1000 tttn #32-bit disp
+            assert isInt(disp - longSize) : "must be 32bit offset (call4)";
+            emitByte(0x0F);
+            emitByte(0x80 | cc.getValue());
+            emitInt((int) (disp - longSize));
+        }
+    }
+
+    public final void jcc(ConditionFlag cc, Label l) {
+        assert (0 <= cc.getValue()) && (cc.getValue() < 16) : "illegal cc";
+        if (l.isBound()) {
+            jcc(cc, l.position(), false);
+        } else {
+            // Note: could eliminate cond. jumps to this jump if condition
+            // is the same however, seems to be rather unlikely case.
+            // Note: use jccb() if label to be bound is very close to get
+            // an 8-bit displacement
+            l.addPatchAt(position());
+            emitByte(0x0F);
+            emitByte(0x80 | cc.getValue());
+            emitInt(0);
+        }
+
+    }
+
+    public final void jccb(ConditionFlag cc, Label l) {
+        if (l.isBound()) {
+            int shortSize = 2;
+            int entry = l.position();
+            assert isByte(entry - (position() + shortSize)) : "Dispacement too large for a short jmp";
+            long disp = entry - position();
+            // 0111 tttn #8-bit disp
+            emitByte(0x70 | cc.getValue());
+            emitByte((int) ((disp - shortSize) & 0xFF));
+        } else {
+            l.addPatchAt(position());
+            emitByte(0x70 | cc.getValue());
+            emitByte(0);
+        }
+    }
+
+    public final void jmp(int jumpTarget, boolean forceDisp32) {
+        int shortSize = 2;
+        int longSize = 5;
+        long disp = jumpTarget - position();
+        if (!forceDisp32 && isByte(disp - shortSize)) {
+            emitByte(0xEB);
+            emitByte((int) ((disp - shortSize) & 0xFF));
+        } else {
+            emitByte(0xE9);
+            emitInt((int) (disp - longSize));
+        }
+    }
+
+    @Override
+    public final void jmp(Label l) {
+        if (l.isBound()) {
+            jmp(l.position(), false);
+        } else {
+            // By default, forward jumps are always 32-bit displacements, since
+            // we can't yet know where the label will be bound. If you're sure that
+            // the forward jump will not run beyond 256 bytes, use jmpb to
+            // force an 8-bit displacement.
+
+            l.addPatchAt(position());
+            emitByte(0xE9);
+            emitInt(0);
+        }
+    }
+
+    public final void jmp(Register entry) {
+        int encode = prefixAndEncode(entry.encoding);
+        emitByte(0xFF);
+        emitByte(0xE0 | encode);
+    }
+
+    public final void jmpb(Label l) {
+        if (l.isBound()) {
+            int shortSize = 2;
+            int entry = l.position();
+            assert isByte((entry - position()) + shortSize) : "Dispacement too large for a short jmp";
+            long offs = entry - position();
+            emitByte(0xEB);
+            emitByte((int) ((offs - shortSize) & 0xFF));
+        } else {
+
+            l.addPatchAt(position());
+            emitByte(0xEB);
+            emitByte(0);
+        }
+    }
+
+    public final void leaq(Register dst, AMD64Address src) {
+        prefixq(src, dst);
+        emitByte(0x8D);
+        emitOperandHelper(dst, src);
+    }
+
+    public final void leave() {
+        emitByte(0xC9);
+    }
+
+    public final void lock() {
+        emitByte(0xF0);
+    }
+
+    public final void movapd(Register dst, Register src) {
+        assert dst.getRegisterCategory().equals(AMD64.XMM);
+        assert src.getRegisterCategory().equals(AMD64.XMM);
+        int dstenc = dst.encoding;
+        int srcenc = src.encoding;
+        emitByte(0x66);
+        if (dstenc < 8) {
+            if (srcenc >= 8) {
+                emitByte(Prefix.REXB);
+                srcenc -= 8;
+            }
+        } else {
+            if (srcenc < 8) {
+                emitByte(Prefix.REXR);
+            } else {
+                emitByte(Prefix.REXRB);
+                srcenc -= 8;
+            }
+            dstenc -= 8;
+        }
+        emitByte(0x0F);
+        emitByte(0x28);
+        emitByte(0xC0 | dstenc << 3 | srcenc);
+    }
+
+    public final void movaps(Register dst, Register src) {
+        assert dst.getRegisterCategory().equals(AMD64.XMM);
+        assert src.getRegisterCategory().equals(AMD64.XMM);
+        int dstenc = dst.encoding;
+        int srcenc = src.encoding;
+        if (dstenc < 8) {
+            if (srcenc >= 8) {
+                emitByte(Prefix.REXB);
+                srcenc -= 8;
+            }
+        } else {
+            if (srcenc < 8) {
+                emitByte(Prefix.REXR);
+            } else {
+                emitByte(Prefix.REXRB);
+                srcenc -= 8;
+            }
+            dstenc -= 8;
+        }
+        emitByte(0x0F);
+        emitByte(0x28);
+        emitByte(0xC0 | dstenc << 3 | srcenc);
+    }
+
+    public final void movb(AMD64Address dst, int imm8) {
+        prefix(dst);
+        emitByte(0xC6);
+        emitOperandHelper(0, dst);
+        emitByte(imm8);
+    }
+
+    public final void movb(AMD64Address dst, Register src) {
+        assert src.getRegisterCategory().equals(AMD64.CPU) : "must have byte register";
+        prefix(dst, src, true);
+        emitByte(0x88);
+        emitOperandHelper(src, dst);
+    }
+
+    public final void movl(Register dst, int imm32) {
+        int encode = prefixAndEncode(dst.encoding);
+        emitByte(0xB8 | encode);
+        emitInt(imm32);
+    }
+
+    public final void movl(Register dst, Register src) {
+        int encode = prefixAndEncode(dst.encoding, src.encoding);
+        emitByte(0x8B);
+        emitByte(0xC0 | encode);
+    }
+
+    public final void movl(Register dst, AMD64Address src) {
+        prefix(src, dst);
+        emitByte(0x8B);
+        emitOperandHelper(dst, src);
+    }
+
+    public final void movl(AMD64Address dst, int imm32) {
+        prefix(dst);
+        emitByte(0xC7);
+        emitOperandHelper(0, dst);
+        emitInt(imm32);
+    }
+
+    public final void movl(AMD64Address dst, Register src) {
+        prefix(dst, src);
+        emitByte(0x89);
+        emitOperandHelper(src, dst);
+    }
+
+    /**
+     * New CPUs require use of movsd and movss to avoid partial register stall when loading from
+     * memory. But for old Opteron use movlpd instead of movsd. The selection is done in
+     * {@link AMD64MacroAssembler#movdbl(Register, AMD64Address)} and
+     * {@link AMD64MacroAssembler#movflt(Register, Register)}.
+     */
+    public final void movlpd(Register dst, AMD64Address src) {
+        assert dst.getRegisterCategory().equals(AMD64.XMM);
+        emitByte(0x66);
+        prefix(src, dst);
+        emitByte(0x0F);
+        emitByte(0x12);
+        emitOperandHelper(dst, src);
+    }
+
+    public final void movq(Register dst, AMD64Address src) {
+        if (dst.getRegisterCategory().equals(AMD64.XMM)) {
+            emitByte(0xF3);
+            prefixq(src, dst);
+            emitByte(0x0F);
+            emitByte(0x7E);
+            emitOperandHelper(dst, src);
+        } else {
+            prefixq(src, dst);
+            emitByte(0x8B);
+            emitOperandHelper(dst, src);
+        }
+    }
+
+    public final void movq(Register dst, Register src) {
+        int encode = prefixqAndEncode(dst.encoding, src.encoding);
+        emitByte(0x8B);
+        emitByte(0xC0 | encode);
+    }
+
+    public final void movq(AMD64Address dst, Register src) {
+        if (src.getRegisterCategory().equals(AMD64.XMM)) {
+            emitByte(0x66);
+            prefixq(dst, src);
+            emitByte(0x0F);
+            emitByte(0xD6);
+            emitOperandHelper(src, dst);
+        } else {
+            prefixq(dst, src);
+            emitByte(0x89);
+            emitOperandHelper(src, dst);
+        }
+    }
+
+    public final void movsbl(Register dst, AMD64Address src) {
+        prefix(src, dst);
+        emitByte(0x0F);
+        emitByte(0xBE);
+        emitOperandHelper(dst, src);
+    }
+
+    public final void movsbl(Register dst, Register src) {
+        int encode = prefixAndEncode(dst.encoding, false, src.encoding, true);
+        emitByte(0x0F);
+        emitByte(0xBE);
+        emitByte(0xC0 | encode);
+    }
+
+    public final void movsbq(Register dst, AMD64Address src) {
+        prefixq(src, dst);
+        emitByte(0x0F);
+        emitByte(0xBE);
+        emitOperandHelper(dst, src);
+    }
+
+    public final void movsbq(Register dst, Register src) {
+        int encode = prefixqAndEncode(dst.encoding, src.encoding);
+        emitByte(0x0F);
+        emitByte(0xBE);
+        emitByte(0xC0 | encode);
+    }
+
+    public final void movsd(Register dst, Register src) {
+        assert dst.getRegisterCategory().equals(AMD64.XMM);
+        assert src.getRegisterCategory().equals(AMD64.XMM);
+        emitByte(0xF2);
+        int encode = prefixAndEncode(dst.encoding, src.encoding);
+        emitByte(0x0F);
+        emitByte(0x10);
+        emitByte(0xC0 | encode);
+    }
+
+    public final void movsd(Register dst, AMD64Address src) {
+        assert dst.getRegisterCategory().equals(AMD64.XMM);
+        emitByte(0xF2);
+        prefix(src, dst);
+        emitByte(0x0F);
+        emitByte(0x10);
+        emitOperandHelper(dst, src);
+    }
+
+    public final void movsd(AMD64Address dst, Register src) {
+        assert src.getRegisterCategory().equals(AMD64.XMM);
+        emitByte(0xF2);
+        prefix(dst, src);
+        emitByte(0x0F);
+        emitByte(0x11);
+        emitOperandHelper(src, dst);
+    }
+
+    public final void movss(Register dst, Register src) {
+        assert dst.getRegisterCategory().equals(AMD64.XMM);
+        assert src.getRegisterCategory().equals(AMD64.XMM);
+        emitByte(0xF3);
+        int encode = prefixAndEncode(dst.encoding, src.encoding);
+        emitByte(0x0F);
+        emitByte(0x10);
+        emitByte(0xC0 | encode);
+    }
+
+    public final void movss(Register dst, AMD64Address src) {
+        assert dst.getRegisterCategory().equals(AMD64.XMM);
+        emitByte(0xF3);
+        prefix(src, dst);
+        emitByte(0x0F);
+        emitByte(0x10);
+        emitOperandHelper(dst, src);
+    }
+
+    public final void movss(AMD64Address dst, Register src) {
+        assert src.getRegisterCategory().equals(AMD64.XMM);
+        emitByte(0xF3);
+        prefix(dst, src);
+        emitByte(0x0F);
+        emitByte(0x11);
+        emitOperandHelper(src, dst);
+    }
+
+    public final void movswl(Register dst, AMD64Address src) {
+        prefix(src, dst);
+        emitByte(0x0F);
+        emitByte(0xBF);
+        emitOperandHelper(dst, src);
+    }
+
+    public final void movw(AMD64Address dst, int imm16) {
+        emitByte(0x66); // switch to 16-bit mode
+        prefix(dst);
+        emitByte(0xC7);
+        emitOperandHelper(0, dst);
+        emitShort(imm16);
+    }
+
+    public final void movw(AMD64Address dst, Register src) {
+        emitByte(0x66);
+        prefix(dst, src);
+        emitByte(0x89);
+        emitOperandHelper(src, dst);
+    }
+
+    public final void movzbl(Register dst, AMD64Address src) {
+        prefix(src, dst);
+        emitByte(0x0F);
+        emitByte(0xB6);
+        emitOperandHelper(dst, src);
+    }
+
+    public final void movzwl(Register dst, AMD64Address src) {
+        prefix(src, dst);
+        emitByte(0x0F);
+        emitByte(0xB7);
+        emitOperandHelper(dst, src);
+    }
+
+    @Override
+    public final void ensureUniquePC() {
+        nop();
+    }
+
+    public final void nop() {
+        nop(1);
+    }
+
+    public void nop(int count) {
+        int i = count;
+        if (UseNormalNop) {
+            assert i > 0 : " ";
+            // The fancy nops aren't currently recognized by debuggers making it a
+            // pain to disassemble code while debugging. If assert are on clearly
+            // speed is not an issue so simply use the single byte traditional nop
+            // to do alignment.
+
+            for (; i > 0; i--) {
+                emitByte(0x90);
+            }
+            return;
+        }
+
+        if (UseAddressNop) {
+            //
+            // Using multi-bytes nops "0x0F 0x1F [Address]" for AMD.
+            // 1: 0x90
+            // 2: 0x66 0x90
+            // 3: 0x66 0x66 0x90 (don't use "0x0F 0x1F 0x00" - need patching safe padding)
+            // 4: 0x0F 0x1F 0x40 0x00
+            // 5: 0x0F 0x1F 0x44 0x00 0x00
+            // 6: 0x66 0x0F 0x1F 0x44 0x00 0x00
+            // 7: 0x0F 0x1F 0x80 0x00 0x00 0x00 0x00
+            // 8: 0x0F 0x1F 0x84 0x00 0x00 0x00 0x00 0x00
+            // 9: 0x66 0x0F 0x1F 0x84 0x00 0x00 0x00 0x00 0x00
+            // 10: 0x66 0x66 0x0F 0x1F 0x84 0x00 0x00 0x00 0x00 0x00
+            // 11: 0x66 0x66 0x66 0x0F 0x1F 0x84 0x00 0x00 0x00 0x00 0x00
+
+            // The rest coding is AMD specific - use consecutive Address nops
+
+            // 12: 0x66 0x0F 0x1F 0x44 0x00 0x00 0x66 0x0F 0x1F 0x44 0x00 0x00
+            // 13: 0x0F 0x1F 0x80 0x00 0x00 0x00 0x00 0x66 0x0F 0x1F 0x44 0x00 0x00
+            // 14: 0x0F 0x1F 0x80 0x00 0x00 0x00 0x00 0x0F 0x1F 0x80 0x00 0x00 0x00 0x00
+            // 15: 0x0F 0x1F 0x84 0x00 0x00 0x00 0x00 0x00 0x0F 0x1F 0x80 0x00 0x00 0x00 0x00
+            // 16: 0x0F 0x1F 0x84 0x00 0x00 0x00 0x00 0x00 0x0F 0x1F 0x84 0x00 0x00 0x00 0x00 0x00
+            // Size prefixes (0x66) are added for larger sizes
+
+            while (i >= 22) {
+                i -= 11;
+                emitByte(0x66); // size prefix
+                emitByte(0x66); // size prefix
+                emitByte(0x66); // size prefix
+                addrNop8();
+            }
+            // Generate first nop for size between 21-12
+            switch (i) {
+                case 21:
+                    i -= 1;
+                    emitByte(0x66); // size prefix
+                    // fall through
+                case 20:
+                    // fall through
+                case 19:
+                    i -= 1;
+                    emitByte(0x66); // size prefix
+                    // fall through
+                case 18:
+                    // fall through
+                case 17:
+                    i -= 1;
+                    emitByte(0x66); // size prefix
+                    // fall through
+                case 16:
+                    // fall through
+                case 15:
+                    i -= 8;
+                    addrNop8();
+                    break;
+                case 14:
+                case 13:
+                    i -= 7;
+                    addrNop7();
+                    break;
+                case 12:
+                    i -= 6;
+                    emitByte(0x66); // size prefix
+                    addrNop5();
+                    break;
+                default:
+                    assert i < 12;
+            }
+
+            // Generate second nop for size between 11-1
+            switch (i) {
+                case 11:
+                    emitByte(0x66); // size prefix
+                    emitByte(0x66); // size prefix
+                    emitByte(0x66); // size prefix
+                    addrNop8();
+                    break;
+                case 10:
+                    emitByte(0x66); // size prefix
+                    emitByte(0x66); // size prefix
+                    addrNop8();
+                    break;
+                case 9:
+                    emitByte(0x66); // size prefix
+                    addrNop8();
+                    break;
+                case 8:
+                    addrNop8();
+                    break;
+                case 7:
+                    addrNop7();
+                    break;
+                case 6:
+                    emitByte(0x66); // size prefix
+                    addrNop5();
+                    break;
+                case 5:
+                    addrNop5();
+                    break;
+                case 4:
+                    addrNop4();
+                    break;
+                case 3:
+                    // Don't use "0x0F 0x1F 0x00" - need patching safe padding
+                    emitByte(0x66); // size prefix
+                    emitByte(0x66); // size prefix
+                    emitByte(0x90); // nop
+                    break;
+                case 2:
+                    emitByte(0x66); // size prefix
+                    emitByte(0x90); // nop
+                    break;
+                case 1:
+                    emitByte(0x90); // nop
+                    break;
+                default:
+                    assert i == 0;
+            }
+            return;
+        }
+
+        // Using nops with size prefixes "0x66 0x90".
+        // From AMD Optimization Guide:
+        // 1: 0x90
+        // 2: 0x66 0x90
+        // 3: 0x66 0x66 0x90
+        // 4: 0x66 0x66 0x66 0x90
+        // 5: 0x66 0x66 0x90 0x66 0x90
+        // 6: 0x66 0x66 0x90 0x66 0x66 0x90
+        // 7: 0x66 0x66 0x66 0x90 0x66 0x66 0x90
+        // 8: 0x66 0x66 0x66 0x90 0x66 0x66 0x66 0x90
+        // 9: 0x66 0x66 0x90 0x66 0x66 0x90 0x66 0x66 0x90
+        // 10: 0x66 0x66 0x66 0x90 0x66 0x66 0x90 0x66 0x66 0x90
+        //
+        while (i > 12) {
+            i -= 4;
+            emitByte(0x66); // size prefix
+            emitByte(0x66);
+            emitByte(0x66);
+            emitByte(0x90); // nop
+        }
+        // 1 - 12 nops
+        if (i > 8) {
+            if (i > 9) {
+                i -= 1;
+                emitByte(0x66);
+            }
+            i -= 3;
+            emitByte(0x66);
+            emitByte(0x66);
+            emitByte(0x90);
+        }
+        // 1 - 8 nops
+        if (i > 4) {
+            if (i > 6) {
+                i -= 1;
+                emitByte(0x66);
+            }
+            i -= 3;
+            emitByte(0x66);
+            emitByte(0x66);
+            emitByte(0x90);
+        }
+        switch (i) {
+            case 4:
+                emitByte(0x66);
+                emitByte(0x66);
+                emitByte(0x66);
+                emitByte(0x90);
+                break;
+            case 3:
+                emitByte(0x66);
+                emitByte(0x66);
+                emitByte(0x90);
+                break;
+            case 2:
+                emitByte(0x66);
+                emitByte(0x90);
+                break;
+            case 1:
+                emitByte(0x90);
+                break;
+            default:
+                assert i == 0;
+        }
+    }
+
+    public final void pop(Register dst) {
+        int encode = prefixAndEncode(dst.encoding);
+        emitByte(0x58 | encode);
+    }
+
+    public void popfq() {
+        emitByte(0x9D);
+    }
+
+    public final void ptest(Register dst, Register src) {
+        assert supports(CPUFeature.SSE4_1);
+        emitByte(0x66);
+        int encode = prefixAndEncode(dst.encoding, src.encoding);
+        emitByte(0x0F);
+        emitByte(0x38);
+        emitByte(0x17);
+        emitByte(0xC0 | encode);
+    }
+
+    public final void push(Register src) {
+        int encode = prefixAndEncode(src.encoding);
+        emitByte(0x50 | encode);
+    }
+
+    public void pushfq() {
+        emitByte(0x9c);
+    }
+
+    public final void pxor(Register dst, Register src) {
+        emitByte(0x66);
+        int encode = prefixAndEncode(dst.encoding, src.encoding);
+        emitByte(0x0F);
+        emitByte(0xEF);
+        emitByte(0xC0 | encode);
+    }
+
+    public final void ret(int imm16) {
+        if (imm16 == 0) {
+            emitByte(0xC3);
+        } else {
+            emitByte(0xC2);
+            emitShort(imm16);
+        }
+    }
+
+    public final void subl(AMD64Address dst, int imm32) {
+        SUB.getMIOpcode(DWORD, isByte(imm32)).emit(this, DWORD, dst, imm32);
+    }
+
+    public final void subl(Register dst, int imm32) {
+        SUB.getMIOpcode(DWORD, isByte(imm32)).emit(this, DWORD, dst, imm32);
+    }
+
+    public final void testl(Register dst, int imm32) {
+        // not using emitArith because test
+        // doesn't support sign-extension of
+        // 8bit operands
+        int encode = dst.encoding;
+        if (encode == 0) {
+            emitByte(0xA9);
+        } else {
+            encode = prefixAndEncode(encode);
+            emitByte(0xF7);
+            emitByte(0xC0 | encode);
+        }
+        emitInt(imm32);
+    }
+
+    public final void testl(Register dst, Register src) {
+        int encode = prefixAndEncode(dst.encoding, src.encoding);
+        emitByte(0x85);
+        emitByte(0xC0 | encode);
+    }
+
+    public final void testl(Register dst, AMD64Address src) {
+        prefix(src, dst);
+        emitByte(0x85);
+        emitOperandHelper(dst, src);
+    }
+
+    public final void xorl(Register dst, Register src) {
+        XOR.rmOp.emit(this, DWORD, dst, src);
+    }
+
+    public final void xorpd(Register dst, Register src) {
+        emitByte(0x66);
+        xorps(dst, src);
+    }
+
+    public final void xorps(Register dst, Register src) {
+        assert dst.getRegisterCategory().equals(AMD64.XMM) && src.getRegisterCategory().equals(AMD64.XMM);
+        int encode = prefixAndEncode(dst.encoding, src.encoding);
+        emitByte(0x0F);
+        emitByte(0x57);
+        emitByte(0xC0 | encode);
+    }
+
+    protected final void decl(Register dst) {
+        // Use two-byte form (one-byte form is a REX prefix in 64-bit mode)
+        int encode = prefixAndEncode(dst.encoding);
+        emitByte(0xFF);
+        emitByte(0xC8 | encode);
+    }
+
+    protected final void incl(Register dst) {
+        // Use two-byte form (one-byte from is a REX prefix in 64-bit mode)
+        int encode = prefixAndEncode(dst.encoding);
+        emitByte(0xFF);
+        emitByte(0xC0 | encode);
+    }
+
+    private int prefixAndEncode(int regEnc) {
+        return prefixAndEncode(regEnc, false);
+    }
+
+    private int prefixAndEncode(int regEnc, boolean byteinst) {
+        if (regEnc >= 8) {
+            emitByte(Prefix.REXB);
+            return regEnc - 8;
+        } else if (byteinst && regEnc >= 4) {
+            emitByte(Prefix.REX);
+        }
+        return regEnc;
+    }
+
+    private int prefixqAndEncode(int regEnc) {
+        if (regEnc < 8) {
+            emitByte(Prefix.REXW);
+            return regEnc;
+        } else {
+            emitByte(Prefix.REXWB);
+            return regEnc - 8;
+        }
+    }
+
+    private int prefixAndEncode(int dstEnc, int srcEnc) {
+        return prefixAndEncode(dstEnc, false, srcEnc, false);
+    }
+
+    private int prefixAndEncode(int dstEncoding, boolean dstIsByte, int srcEncoding, boolean srcIsByte) {
+        int srcEnc = srcEncoding;
+        int dstEnc = dstEncoding;
+        if (dstEnc < 8) {
+            if (srcEnc >= 8) {
+                emitByte(Prefix.REXB);
+                srcEnc -= 8;
+            } else if ((srcIsByte && srcEnc >= 4) || (dstIsByte && dstEnc >= 4)) {
+                emitByte(Prefix.REX);
+            }
+        } else {
+            if (srcEnc < 8) {
+                emitByte(Prefix.REXR);
+            } else {
+                emitByte(Prefix.REXRB);
+                srcEnc -= 8;
+            }
+            dstEnc -= 8;
+        }
+        return dstEnc << 3 | srcEnc;
+    }
+
+    /**
+     * Creates prefix and the encoding of the lower 6 bits of the ModRM-Byte. It emits an operand
+     * prefix. If the given operands exceed 3 bits, the 4th bit is encoded in the prefix.
+     *
+     * @param regEncoding the encoding of the register part of the ModRM-Byte
+     * @param rmEncoding the encoding of the r/m part of the ModRM-Byte
+     * @return the lower 6 bits of the ModRM-Byte that should be emitted
+     */
+    private int prefixqAndEncode(int regEncoding, int rmEncoding) {
+        int rmEnc = rmEncoding;
+        int regEnc = regEncoding;
+        if (regEnc < 8) {
+            if (rmEnc < 8) {
+                emitByte(Prefix.REXW);
+            } else {
+                emitByte(Prefix.REXWB);
+                rmEnc -= 8;
+            }
+        } else {
+            if (rmEnc < 8) {
+                emitByte(Prefix.REXWR);
+            } else {
+                emitByte(Prefix.REXWRB);
+                rmEnc -= 8;
+            }
+            regEnc -= 8;
+        }
+        return regEnc << 3 | rmEnc;
+    }
+
+    private static boolean needsRex(Register reg) {
+        return reg.encoding >= MinEncodingNeedsRex;
+    }
+
+    private void prefix(AMD64Address adr) {
+        if (needsRex(adr.getBase())) {
+            if (needsRex(adr.getIndex())) {
+                emitByte(Prefix.REXXB);
+            } else {
+                emitByte(Prefix.REXB);
+            }
+        } else {
+            if (needsRex(adr.getIndex())) {
+                emitByte(Prefix.REXX);
+            }
+        }
+    }
+
+    private void prefixq(AMD64Address adr) {
+        if (needsRex(adr.getBase())) {
+            if (needsRex(adr.getIndex())) {
+                emitByte(Prefix.REXWXB);
+            } else {
+                emitByte(Prefix.REXWB);
+            }
+        } else {
+            if (needsRex(adr.getIndex())) {
+                emitByte(Prefix.REXWX);
+            } else {
+                emitByte(Prefix.REXW);
+            }
+        }
+    }
+
+    private void prefix(AMD64Address adr, Register reg) {
+        prefix(adr, reg, false);
+    }
+
+    private void prefix(AMD64Address adr, Register reg, boolean byteinst) {
+        if (reg.encoding < 8) {
+            if (needsRex(adr.getBase())) {
+                if (needsRex(adr.getIndex())) {
+                    emitByte(Prefix.REXXB);
+                } else {
+                    emitByte(Prefix.REXB);
+                }
+            } else {
+                if (needsRex(adr.getIndex())) {
+                    emitByte(Prefix.REXX);
+                } else if (byteinst && reg.encoding >= 4) {
+                    emitByte(Prefix.REX);
+                }
+            }
+        } else {
+            if (needsRex(adr.getBase())) {
+                if (needsRex(adr.getIndex())) {
+                    emitByte(Prefix.REXRXB);
+                } else {
+                    emitByte(Prefix.REXRB);
+                }
+            } else {
+                if (needsRex(adr.getIndex())) {
+                    emitByte(Prefix.REXRX);
+                } else {
+                    emitByte(Prefix.REXR);
+                }
+            }
+        }
+    }
+
+    private void prefixq(AMD64Address adr, Register src) {
+        if (src.encoding < 8) {
+            if (needsRex(adr.getBase())) {
+                if (needsRex(adr.getIndex())) {
+                    emitByte(Prefix.REXWXB);
+                } else {
+                    emitByte(Prefix.REXWB);
+                }
+            } else {
+                if (needsRex(adr.getIndex())) {
+                    emitByte(Prefix.REXWX);
+                } else {
+                    emitByte(Prefix.REXW);
+                }
+            }
+        } else {
+            if (needsRex(adr.getBase())) {
+                if (needsRex(adr.getIndex())) {
+                    emitByte(Prefix.REXWRXB);
+                } else {
+                    emitByte(Prefix.REXWRB);
+                }
+            } else {
+                if (needsRex(adr.getIndex())) {
+                    emitByte(Prefix.REXWRX);
+                } else {
+                    emitByte(Prefix.REXWR);
+                }
+            }
+        }
+    }
+
+    public final void addq(Register dst, int imm32) {
+        ADD.getMIOpcode(QWORD, isByte(imm32)).emit(this, QWORD, dst, imm32);
+    }
+
+    public final void addq(AMD64Address dst, int imm32) {
+        ADD.getMIOpcode(QWORD, isByte(imm32)).emit(this, QWORD, dst, imm32);
+    }
+
+    public final void addq(Register dst, Register src) {
+        ADD.rmOp.emit(this, QWORD, dst, src);
+    }
+
+    public final void addq(AMD64Address dst, Register src) {
+        ADD.mrOp.emit(this, QWORD, dst, src);
+    }
+
+    public final void andq(Register dst, int imm32) {
+        AND.getMIOpcode(QWORD, isByte(imm32)).emit(this, QWORD, dst, imm32);
+    }
+
+    public final void bswapq(Register reg) {
+        int encode = prefixqAndEncode(reg.encoding);
+        emitByte(0x0F);
+        emitByte(0xC8 | encode);
+    }
+
+    public final void cdqq() {
+        emitByte(Prefix.REXW);
+        emitByte(0x99);
+    }
+
+    public final void cmovq(ConditionFlag cc, Register dst, Register src) {
+        int encode = prefixqAndEncode(dst.encoding, src.encoding);
+        emitByte(0x0F);
+        emitByte(0x40 | cc.getValue());
+        emitByte(0xC0 | encode);
+    }
+
+    public final void cmovq(ConditionFlag cc, Register dst, AMD64Address src) {
+        prefixq(src, dst);
+        emitByte(0x0F);
+        emitByte(0x40 | cc.getValue());
+        emitOperandHelper(dst, src);
+    }
+
+    public final void cmpq(Register dst, int imm32) {
+        CMP.getMIOpcode(QWORD, isByte(imm32)).emit(this, QWORD, dst, imm32);
+    }
+
+    public final void cmpq(Register dst, Register src) {
+        CMP.rmOp.emit(this, QWORD, dst, src);
+    }
+
+    public final void cmpq(Register dst, AMD64Address src) {
+        CMP.rmOp.emit(this, QWORD, dst, src);
+    }
+
+    public final void cmpxchgq(Register reg, AMD64Address adr) {
+        prefixq(adr, reg);
+        emitByte(0x0F);
+        emitByte(0xB1);
+        emitOperandHelper(reg, adr);
+    }
+
+    protected final void decq(Register dst) {
+        // Use two-byte form (one-byte from is a REX prefix in 64-bit mode)
+        int encode = prefixqAndEncode(dst.encoding);
+        emitByte(0xFF);
+        emitByte(0xC8 | encode);
+    }
+
+    public final void decq(AMD64Address dst) {
+        DEC.emit(this, QWORD, dst);
+    }
+
+    public final void incq(Register dst) {
+        // Don't use it directly. Use Macroincrementq() instead.
+        // Use two-byte form (one-byte from is a REX prefix in 64-bit mode)
+        int encode = prefixqAndEncode(dst.encoding);
+        emitByte(0xFF);
+        emitByte(0xC0 | encode);
+    }
+
+    public final void incq(AMD64Address dst) {
+        INC.emit(this, QWORD, dst);
+    }
+
+    public final void movq(Register dst, long imm64) {
+        int encode = prefixqAndEncode(dst.encoding);
+        emitByte(0xB8 | encode);
+        emitLong(imm64);
+    }
+
+    public final void movslq(Register dst, int imm32) {
+        int encode = prefixqAndEncode(dst.encoding);
+        emitByte(0xC7);
+        emitByte(0xC0 | encode);
+        emitInt(imm32);
+    }
+
+    public final void movdq(Register dst, Register src) {
+
+        // table D-1 says MMX/SSE2
+        emitByte(0x66);
+
+        if (dst.getRegisterCategory().equals(AMD64.XMM)) {
+            int encode = prefixqAndEncode(dst.encoding, src.encoding);
+            emitByte(0x0F);
+            emitByte(0x6E);
+            emitByte(0xC0 | encode);
+        } else if (src.getRegisterCategory().equals(AMD64.XMM)) {
+
+            // swap src/dst to get correct prefix
+            int encode = prefixqAndEncode(src.encoding, dst.encoding);
+            emitByte(0x0F);
+            emitByte(0x7E);
+            emitByte(0xC0 | encode);
+        } else {
+            throw new InternalError("should not reach here");
+        }
+    }
+
+    public final void movdqu(Register dst, AMD64Address src) {
+        emitByte(0xF3);
+        prefix(src, dst);
+        emitByte(0x0F);
+        emitByte(0x6F);
+        emitOperandHelper(dst, src);
+    }
+
+    public final void movslq(AMD64Address dst, int imm32) {
+        prefixq(dst);
+        emitByte(0xC7);
+        emitOperandHelper(0, dst);
+        emitInt(imm32);
+    }
+
+    public final void movslq(Register dst, AMD64Address src) {
+        prefixq(src, dst);
+        emitByte(0x63);
+        emitOperandHelper(dst, src);
+    }
+
+    public final void movslq(Register dst, Register src) {
+        int encode = prefixqAndEncode(dst.encoding, src.encoding);
+        emitByte(0x63);
+        emitByte(0xC0 | encode);
+    }
+
+    public final void negq(Register dst) {
+        int encode = prefixqAndEncode(dst.encoding);
+        emitByte(0xF7);
+        emitByte(0xD8 | encode);
+    }
+
+    public final void shlq(Register dst, int imm8) {
+        assert isShiftCount(imm8 >> 1) : "illegal shift count";
+        int encode = prefixqAndEncode(dst.encoding);
+        if (imm8 == 1) {
+            emitByte(0xD1);
+            emitByte(0xE0 | encode);
+        } else {
+            emitByte(0xC1);
+            emitByte(0xE0 | encode);
+            emitByte(imm8);
+        }
+    }
+
+    public final void shrq(Register dst, int imm8) {
+        assert isShiftCount(imm8 >> 1) : "illegal shift count";
+        int encode = prefixqAndEncode(dst.encoding);
+        if (imm8 == 1) {
+            emitByte(0xD1);
+            emitByte(0xE8 | encode);
+        } else {
+            emitByte(0xC1);
+            emitByte(0xE8 | encode);
+            emitByte(imm8);
+        }
+    }
+
+    public final void subq(Register dst, int imm32) {
+        SUB.getMIOpcode(QWORD, isByte(imm32)).emit(this, QWORD, dst, imm32);
+    }
+
+    public final void subq(AMD64Address dst, int imm32) {
+        SUB.getMIOpcode(QWORD, isByte(imm32)).emit(this, QWORD, dst, imm32);
+    }
+
+    public final void subqWide(Register dst, int imm32) {
+        // don't use the sign-extending version, forcing a 32-bit immediate
+        SUB.getMIOpcode(QWORD, false).emit(this, QWORD, dst, imm32);
+    }
+
+    public final void subq(Register dst, Register src) {
+        SUB.rmOp.emit(this, QWORD, dst, src);
+    }
+
+    public final void testq(Register dst, Register src) {
+        int encode = prefixqAndEncode(dst.encoding, src.encoding);
+        emitByte(0x85);
+        emitByte(0xC0 | encode);
+    }
+
+    public final void xaddl(AMD64Address dst, Register src) {
+        prefix(dst, src);
+        emitByte(0x0F);
+        emitByte(0xC1);
+        emitOperandHelper(src, dst);
+    }
+
+    public final void xaddq(AMD64Address dst, Register src) {
+        prefixq(dst, src);
+        emitByte(0x0F);
+        emitByte(0xC1);
+        emitOperandHelper(src, dst);
+    }
+
+    public final void xchgl(Register dst, AMD64Address src) {
+        prefix(src, dst);
+        emitByte(0x87);
+        emitOperandHelper(dst, src);
+    }
+
+    public final void xchgq(Register dst, AMD64Address src) {
+        prefixq(src, dst);
+        emitByte(0x87);
+        emitOperandHelper(dst, src);
+    }
+
+    public final void membar(int barriers) {
+        if (target.isMP) {
+            // We only have to handle StoreLoad
+            if ((barriers & STORE_LOAD) != 0) {
+                // All usable chips support "locked" instructions which suffice
+                // as barriers, and are much faster than the alternative of
+                // using cpuid instruction. We use here a locked add [rsp],0.
+                // This is conveniently otherwise a no-op except for blowing
+                // flags.
+                // Any change to this code may need to revisit other places in
+                // the code where this idiom is used, in particular the
+                // orderAccess code.
+                lock();
+                addl(new AMD64Address(rsp, 0), 0); // Assert the lock# signal here
+            }
+        }
+    }
+
+    @Override
+    protected final void patchJumpTarget(int branch, int branchTarget) {
+        int op = getByte(branch);
+        assert op == 0xE8 // call
+                        ||
+                        op == 0x00 // jump table entry
+                        || op == 0xE9 // jmp
+                        || op == 0xEB // short jmp
+                        || (op & 0xF0) == 0x70 // short jcc
+                        || op == 0x0F && (getByte(branch + 1) & 0xF0) == 0x80 // jcc
+        : "Invalid opcode at patch point branch=" + branch + ", branchTarget=" + branchTarget + ", op=" + op;
+
+        if (op == 0x00) {
+            int offsetToJumpTableBase = getShort(branch + 1);
+            int jumpTableBase = branch - offsetToJumpTableBase;
+            int imm32 = branchTarget - jumpTableBase;
+            emitInt(imm32, branch);
+        } else if (op == 0xEB || (op & 0xF0) == 0x70) {
+
+            // short offset operators (jmp and jcc)
+            final int imm8 = branchTarget - (branch + 2);
+            /*
+             * Since a wrongly patched short branch can potentially lead to working but really bad
+             * behaving code we should always fail with an exception instead of having an assert.
+             */
+            if (!NumUtil.isByte(imm8)) {
+                throw new InternalError("branch displacement out of range: " + imm8);
+            }
+            emitByte(imm8, branch + 1);
+
+        } else {
+
+            int off = 1;
+            if (op == 0x0F) {
+                off = 2;
+            }
+
+            int imm32 = branchTarget - (branch + 4 + off);
+            emitInt(imm32, branch + off);
+        }
+    }
+
+    public void nullCheck(AMD64Address address) {
+        testl(AMD64.rax, address);
+    }
+
+    @Override
+    public void align(int modulus) {
+        if (position() % modulus != 0) {
+            nop(modulus - (position() % modulus));
+        }
+    }
+
+    /**
+     * Emits a direct call instruction. Note that the actual call target is not specified, because
+     * all calls need patching anyway. Therefore, 0 is emitted as the call target, and the user is
+     * responsible to add the call address to the appropriate patching tables.
+     */
+    public final void call() {
+        emitByte(0xE8);
+        emitInt(0);
+    }
+
+    public final void call(Register src) {
+        int encode = prefixAndEncode(src.encoding);
+        emitByte(0xFF);
+        emitByte(0xD0 | encode);
+    }
+
+    public final void int3() {
+        emitByte(0xCC);
+    }
+
+    private void emitx87(int b1, int b2, int i) {
+        assert 0 <= i && i < 8 : "illegal stack offset";
+        emitByte(b1);
+        emitByte(b2 + i);
+    }
+
+    public final void fldd(AMD64Address src) {
+        emitByte(0xDD);
+        emitOperandHelper(0, src);
+    }
+
+    public final void flds(AMD64Address src) {
+        emitByte(0xD9);
+        emitOperandHelper(0, src);
+    }
+
+    public final void fldln2() {
+        emitByte(0xD9);
+        emitByte(0xED);
+    }
+
+    public final void fldlg2() {
+        emitByte(0xD9);
+        emitByte(0xEC);
+    }
+
+    public final void fyl2x() {
+        emitByte(0xD9);
+        emitByte(0xF1);
+    }
+
+    public final void fstps(AMD64Address src) {
+        emitByte(0xD9);
+        emitOperandHelper(3, src);
+    }
+
+    public final void fstpd(AMD64Address src) {
+        emitByte(0xDD);
+        emitOperandHelper(3, src);
+    }
+
+    private void emitFPUArith(int b1, int b2, int i) {
+        assert 0 <= i && i < 8 : "illegal FPU register: " + i;
+        emitByte(b1);
+        emitByte(b2 + i);
+    }
+
+    public void ffree(int i) {
+        emitFPUArith(0xDD, 0xC0, i);
+    }
+
+    public void fincstp() {
+        emitByte(0xD9);
+        emitByte(0xF7);
+    }
+
+    public void fxch(int i) {
+        emitFPUArith(0xD9, 0xC8, i);
+    }
+
+    public void fnstswAX() {
+        emitByte(0xDF);
+        emitByte(0xE0);
+    }
+
+    public void fwait() {
+        emitByte(0x9B);
+    }
+
+    public void fprem() {
+        emitByte(0xD9);
+        emitByte(0xF8);
+    }
+
+    public final void fsin() {
+        emitByte(0xD9);
+        emitByte(0xFE);
+    }
+
+    public final void fcos() {
+        emitByte(0xD9);
+        emitByte(0xFF);
+    }
+
+    public final void fptan() {
+        emitByte(0xD9);
+        emitByte(0xF2);
+    }
+
+    public final void fstp(int i) {
+        emitx87(0xDD, 0xD8, i);
+    }
+
+    @Override
+    public AMD64Address makeAddress(Register base, int displacement) {
+        return new AMD64Address(base, displacement);
+    }
+
+    @Override
+    public AMD64Address getPlaceholder() {
+        return Placeholder;
+    }
+
+    private void prefetchPrefix(AMD64Address src) {
+        prefix(src);
+        emitByte(0x0F);
+    }
+
+    public void prefetchnta(AMD64Address src) {
+        prefetchPrefix(src);
+        emitByte(0x18);
+        emitOperandHelper(0, src);
+    }
+
+    void prefetchr(AMD64Address src) {
+        assert supports(CPUFeature.AMD_3DNOW_PREFETCH);
+        prefetchPrefix(src);
+        emitByte(0x0D);
+        emitOperandHelper(0, src);
+    }
+
+    public void prefetcht0(AMD64Address src) {
+        assert supports(CPUFeature.SSE);
+        prefetchPrefix(src);
+        emitByte(0x18);
+        emitOperandHelper(1, src);
+    }
+
+    public void prefetcht1(AMD64Address src) {
+        assert supports(CPUFeature.SSE);
+        prefetchPrefix(src);
+        emitByte(0x18);
+        emitOperandHelper(2, src);
+    }
+
+    public void prefetcht2(AMD64Address src) {
+        assert supports(CPUFeature.SSE);
+        prefix(src);
+        emitByte(0x0f);
+        emitByte(0x18);
+        emitOperandHelper(3, src);
+    }
+
+    public void prefetchw(AMD64Address src) {
+        assert supports(CPUFeature.AMD_3DNOW_PREFETCH);
+        prefix(src);
+        emitByte(0x0f);
+        emitByte(0x0D);
+        emitOperandHelper(1, src);
+    }
+
+    /**
+     * Emits an instruction which is considered to be illegal. This is used if we deliberately want
+     * to crash the program (debugging etc.).
+     */
+    public void illegal() {
+        emitByte(0x0f);
+        emitByte(0x0b);
+    }
+}
--- /dev/null	Thu Jan 01 00:00:00 1970 +0000
+++ b/graal/com.oracle.jvmci.asm.amd64/src/com/oracle/jvmci/asm/amd64/AMD64MacroAssembler.java	Wed Jun 03 18:06:44 2015 +0200
@@ -0,0 +1,344 @@
+/*
+ * Copyright (c) 2009, 2015, Oracle and/or its affiliates. All rights reserved.
+ * DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER.
+ *
+ * This code is free software; you can redistribute it and/or modify it
+ * under the terms of the GNU General Public License version 2 only, as
+ * published by the Free Software Foundation.
+ *
+ * This code is distributed in the hope that it will be useful, but WITHOUT
+ * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
+ * FITNESS FOR A PARTICULAR PURPOSE.  See the GNU General Public License
+ * version 2 for more details (a copy is included in the LICENSE file that
+ * accompanied this code).
+ *
+ * You should have received a copy of the GNU General Public License version
+ * 2 along with this work; if not, write to the Free Software Foundation,
+ * Inc., 51 Franklin St, Fifth Floor, Boston, MA 02110-1301 USA.
+ *
+ * Please contact Oracle, 500 Oracle Parkway, Redwood Shores, CA 94065 USA
+ * or visit www.oracle.com if you need additional information or have any
+ * questions.
+ */
+package com.oracle.jvmci.asm.amd64;
+
+import com.oracle.jvmci.amd64.*;
+import com.oracle.jvmci.asm.*;
+import com.oracle.jvmci.code.Register;
+import com.oracle.jvmci.code.CalleeSaveLayout;
+import com.oracle.jvmci.code.TargetDescription;
+import com.oracle.jvmci.code.RegisterConfig;
+import com.oracle.jvmci.meta.Kind;
+
+import static com.oracle.jvmci.asm.amd64.AMD64AsmOptions.*;
+
+/**
+ * This class implements commonly used X86 code patterns.
+ */
+public class AMD64MacroAssembler extends AMD64Assembler {
+
+    public AMD64MacroAssembler(TargetDescription target, RegisterConfig registerConfig) {
+        super(target, registerConfig);
+    }
+
+    public final void decrementq(Register reg, int value) {
+        if (value == Integer.MIN_VALUE) {
+            subq(reg, value);
+            return;
+        }
+        if (value < 0) {
+            incrementq(reg, -value);
+            return;
+        }
+        if (value == 0) {
+            return;
+        }
+        if (value == 1 && UseIncDec) {
+            decq(reg);
+        } else {
+            subq(reg, value);
+        }
+    }
+
+    public final void decrementq(AMD64Address dst, int value) {
+        if (value == Integer.MIN_VALUE) {
+            subq(dst, value);
+            return;
+        }
+        if (value < 0) {
+            incrementq(dst, -value);
+            return;
+        }
+        if (value == 0) {
+            return;
+        }
+        if (value == 1 && UseIncDec) {
+            decq(dst);
+        } else {
+            subq(dst, value);
+        }
+    }
+
+    public void incrementq(Register reg, int value) {
+        if (value == Integer.MIN_VALUE) {
+            addq(reg, value);
+            return;
+        }
+        if (value < 0) {
+            decrementq(reg, -value);
+            return;
+        }
+        if (value == 0) {
+            return;
+        }
+        if (value == 1 && UseIncDec) {
+            incq(reg);
+        } else {
+            addq(reg, value);
+        }
+    }
+
+    public final void incrementq(AMD64Address dst, int value) {
+        if (value == Integer.MIN_VALUE) {
+            addq(dst, value);
+            return;
+        }
+        if (value < 0) {
+            decrementq(dst, -value);
+            return;
+        }
+        if (value == 0) {
+            return;
+        }
+        if (value == 1 && UseIncDec) {
+            incq(dst);
+        } else {
+            addq(dst, value);
+        }
+    }
+
+    public final void movptr(Register dst, AMD64Address src) {
+        movq(dst, src);
+    }
+
+    public final void movptr(AMD64Address dst, Register src) {
+        movq(dst, src);
+    }
+
+    public final void movptr(AMD64Address dst, int src) {
+        movslq(dst, src);
+    }
+
+    public final void cmpptr(Register src1, Register src2) {
+        cmpq(src1, src2);
+    }
+
+    public final void cmpptr(Register src1, AMD64Address src2) {
+        cmpq(src1, src2);
+    }
+
+    public final void decrementl(Register reg, int value) {
+        if (value == Integer.MIN_VALUE) {
+            subl(reg, value);
+            return;
+        }
+        if (value < 0) {
+            incrementl(reg, -value);
+            return;
+        }
+        if (value == 0) {
+            return;
+        }
+        if (value == 1 && UseIncDec) {
+            decl(reg);
+        } else {
+            subl(reg, value);
+        }
+    }
+
+    public final void decrementl(AMD64Address dst, int value) {
+        if (value == Integer.MIN_VALUE) {
+            subl(dst, value);
+            return;
+        }
+        if (value < 0) {
+            incrementl(dst, -value);
+            return;
+        }
+        if (value == 0) {
+            return;
+        }
+        if (value == 1 && UseIncDec) {
+            decl(dst);
+        } else {
+            subl(dst, value);
+        }
+    }
+
+    public final void incrementl(Register reg, int value) {
+        if (value == Integer.MIN_VALUE) {
+            addl(reg, value);
+            return;
+        }
+        if (value < 0) {
+            decrementl(reg, -value);
+            return;
+        }
+        if (value == 0) {
+            return;
+        }
+        if (value == 1 && UseIncDec) {
+            incl(reg);
+        } else {
+            addl(reg, value);
+        }
+    }
+
+    public final void incrementl(AMD64Address dst, int value) {
+        if (value == Integer.MIN_VALUE) {
+            addl(dst, value);
+            return;
+        }
+        if (value < 0) {
+            decrementl(dst, -value);
+            return;
+        }
+        if (value == 0) {
+            return;
+        }
+        if (value == 1 && UseIncDec) {
+            incl(dst);
+        } else {
+            addl(dst, value);
+        }
+    }
+
+    public void movflt(Register dst, Register src) {
+        assert dst.getRegisterCategory().equals(AMD64.XMM) && src.getRegisterCategory().equals(AMD64.XMM);
+        if (UseXmmRegToRegMoveAll) {
+            movaps(dst, src);
+        } else {
+            movss(dst, src);
+        }
+    }
+
+    public void movflt(Register dst, AMD64Address src) {
+        assert dst.getRegisterCategory().equals(AMD64.XMM);
+        movss(dst, src);
+    }
+
+    public void movflt(AMD64Address dst, Register src) {
+        assert src.getRegisterCategory().equals(AMD64.XMM);
+        movss(dst, src);
+    }
+
+    public void movdbl(Register dst, Register src) {
+        assert dst.getRegisterCategory().equals(AMD64.XMM) && src.getRegisterCategory().equals(AMD64.XMM);
+        if (UseXmmRegToRegMoveAll) {
+            movapd(dst, src);
+        } else {
+            movsd(dst, src);
+        }
+    }
+
+    public void movdbl(Register dst, AMD64Address src) {
+        assert dst.getRegisterCategory().equals(AMD64.XMM);
+        if (UseXmmLoadAndClearUpper) {
+            movsd(dst, src);
+        } else {
+            movlpd(dst, src);
+        }
+    }
+
+    public void movdbl(AMD64Address dst, Register src) {
+        assert src.getRegisterCategory().equals(AMD64.XMM);
+        movsd(dst, src);
+    }
+
+    /**
+     * Non-atomic write of a 64-bit constant to memory. Do not use if the address might be a
+     * volatile field!
+     */
+    public final void movlong(AMD64Address dst, long src) {
+        if (NumUtil.isInt(src)) {
+            AMD64MIOp.MOV.emit(this, OperandSize.QWORD, dst, (int) src);
+        } else {
+            AMD64Address high = new AMD64Address(dst.getBase(), dst.getIndex(), dst.getScale(), dst.getDisplacement() + 4);
+            movl(dst, (int) (src & 0xFFFFFFFF));
+            movl(high, (int) (src >> 32));
+        }
+
+    }
+
+    public final void flog(Register dest, Register value, boolean base10) {
+        if (base10) {
+            fldlg2();
+        } else {
+            fldln2();
+        }
+        AMD64Address tmp = trigPrologue(value);
+        fyl2x();
+        trigEpilogue(dest, tmp);
+    }
+
+    public final void fsin(Register dest, Register value) {
+        AMD64Address tmp = trigPrologue(value);
+        fsin();
+        trigEpilogue(dest, tmp);
+    }
+
+    public final void fcos(Register dest, Register value) {
+        AMD64Address tmp = trigPrologue(value);
+        fcos();
+        trigEpilogue(dest, tmp);
+    }
+
+    public final void ftan(Register dest, Register value) {
+        AMD64Address tmp = trigPrologue(value);
+        fptan();
+        fstp(0); // ftan pushes 1.0 in addition to the actual result, pop
+        trigEpilogue(dest, tmp);
+    }
+
+    public final void fpop() {
+        ffree(0);
+        fincstp();
+    }
+
+    private AMD64Address trigPrologue(Register value) {
+        assert value.getRegisterCategory().equals(AMD64.XMM);
+        AMD64Address tmp = new AMD64Address(AMD64.rsp);
+        subq(AMD64.rsp, target.getSizeInBytes(Kind.Double));
+        movdbl(tmp, value);
+        fldd(tmp);
+        return tmp;
+    }
+
+    private void trigEpilogue(Register dest, AMD64Address tmp) {
+        assert dest.getRegisterCategory().equals(AMD64.XMM);
+        fstpd(tmp);
+        movdbl(dest, tmp);
+        addq(AMD64.rsp, target.getSizeInBytes(Kind.Double));
+    }
+
+    /**
+     * Emit code to save a given set of callee save registers in the {@linkplain CalleeSaveLayout
+     * CSA} within the frame.
+     *
+     * @param csl the description of the CSA
+     * @param frameToCSA offset from the frame pointer to the CSA
+     */
+    public final void save(CalleeSaveLayout csl, int frameToCSA) {
+        for (Register r : csl.registers) {
+            int offset = csl.offsetOf(r);
+            movq(new AMD64Address(frameRegister, frameToCSA + offset), r);
+        }
+    }
+
+    public final void restore(CalleeSaveLayout csl, int frameToCSA) {
+        for (Register r : csl.registers) {
+            int offset = csl.offsetOf(r);
+            movq(r, new AMD64Address(frameRegister, frameToCSA + offset));
+        }
+    }
+}
--- /dev/null	Thu Jan 01 00:00:00 1970 +0000
+++ b/graal/com.oracle.jvmci.asm.sparc/src/com/oracle/jvmci/asm/sparc/SPARCAddress.java	Wed Jun 03 18:06:44 2015 +0200
@@ -0,0 +1,123 @@
+/*
+ * Copyright (c) 2013, Oracle and/or its affiliates. All rights reserved.
+ * DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER.
+ *
+ * This code is free software; you can redistribute it and/or modify it
+ * under the terms of the GNU General Public License version 2 only, as
+ * published by the Free Software Foundation.
+ *
+ * This code is distributed in the hope that it will be useful, but WITHOUT
+ * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
+ * FITNESS FOR A PARTICULAR PURPOSE.  See the GNU General Public License
+ * version 2 for more details (a copy is included in the LICENSE file that
+ * accompanied this code).
+ *
+ * You should have received a copy of the GNU General Public License version
+ * 2 along with this work; if not, write to the Free Software Foundation,
+ * Inc., 51 Franklin St, Fifth Floor, Boston, MA 02110-1301 USA.
+ *
+ * Please contact Oracle, 500 Oracle Parkway, Redwood Shores, CA 94065 USA
+ * or visit www.oracle.com if you need additional information or have any
+ * questions.
+ */
+package com.oracle.jvmci.asm.sparc;
+
+import static com.oracle.jvmci.sparc.SPARC.*;
+
+import com.oracle.jvmci.code.*;
+import com.oracle.jvmci.sparc.*;
+
+public class SPARCAddress extends AbstractAddress {
+
+    private final Register base;
+    private final Register index;
+    private final int displacement;
+
+    /**
+     * Creates an {@link SPARCAddress} with given base register, no scaling and a given
+     * displacement.
+     *
+     * @param base the base register
+     * @param displacement the displacement
+     */
+    public SPARCAddress(Register base, int displacement) {
+        this.base = base;
+        this.index = Register.None;
+        this.displacement = displacement;
+    }
+
+    /**
+     * Creates an {@link SPARCAddress} with given base register, no scaling and a given index.
+     *
+     * @param base the base register
+     * @param index the index register
+     */
+    public SPARCAddress(Register base, Register index) {
+        this.base = base;
+        this.index = index;
+        this.displacement = 0;
+    }
+
+    @Override
+    public String toString() {
+        StringBuilder s = new StringBuilder();
+        s.append("[");
+        String sep = "";
+        if (!getBase().equals(Register.None)) {
+            s.append(getBase());
+            sep = " + ";
+        }
+        if (!getIndex().equals(Register.None)) {
+            s.append(sep).append(getIndex());
+            sep = " + ";
+        } else {
+            if (getDisplacement() < 0) {
+                s.append(" - ").append(-getDisplacement());
+            } else if (getDisplacement() > 0) {
+                s.append(sep).append(getDisplacement());
+            }
+        }
+        s.append("]");
+        return s.toString();
+    }
+
+    /**
+     * @return Base register that defines the start of the address computation. If not present, is
+     *         denoted by {@link Register#None}.
+     */
+    public Register getBase() {
+        return base;
+    }
+
+    /**
+     * @return Index register, the value of which is added to {@link #getBase}. If not present, is
+     *         denoted by {@link Register#None}.
+     */
+    public Register getIndex() {
+        return index;
+    }
+
+    /**
+     * @return true if this address has an index register
+     */
+    public boolean hasIndex() {
+        return !getIndex().equals(Register.None);
+    }
+
+    /**
+     * This method adds the stack-bias to the displacement if the base register is either
+     * {@link SPARC#sp} or {@link SPARC#fp}.
+     *
+     * @return Optional additive displacement.
+     */
+    public int getDisplacement() {
+        if (hasIndex()) {
+            throw new InternalError("address has index register");
+        }
+        // TODO Should we also hide the register save area size here?
+        if (getBase().equals(sp) || getBase().equals(fp)) {
+            return displacement + STACK_BIAS;
+        }
+        return displacement;
+    }
+}
--- /dev/null	Thu Jan 01 00:00:00 1970 +0000
+++ b/graal/com.oracle.jvmci.asm.sparc/src/com/oracle/jvmci/asm/sparc/SPARCAssembler.java	Wed Jun 03 18:06:44 2015 +0200
@@ -0,0 +1,1747 @@
+/*
+ * Copyright (c) 2009, 2014, Oracle and/or its affiliates. All rights reserved.
+ * DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER.
+ *
+ * This code is free software; you can redistribute it and/or modify it
+ * under the terms of the GNU General Public License version 2 only, as
+ * published by the Free Software Foundation.
+ *
+ * This code is distributed in the hope that it will be useful, but WITHOUT
+ * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
+ * FITNESS FOR A PARTICULAR PURPOSE.  See the GNU General Public License
+ * version 2 for more details (a copy is included in the LICENSE file that
+ * accompanied this code).
+ *
+ * You should have received a copy of the GNU General Public License version
+ * 2 along with this work; if not, write to the Free Software Foundation,
+ * Inc., 51 Franklin St, Fifth Floor, Boston, MA 02110-1301 USA.
+ *
+ * Please contact Oracle, 500 Oracle Parkway, Redwood Shores, CA 94065 USA
+ * or visit www.oracle.com if you need additional information or have any
+ * questions.
+ */
+package com.oracle.jvmci.asm.sparc;
+
+import static com.oracle.jvmci.asm.sparc.SPARCAssembler.CC.*;
+import static com.oracle.jvmci.asm.sparc.SPARCAssembler.ConditionFlag.*;
+import static com.oracle.jvmci.asm.sparc.SPARCAssembler.Op.*;
+import static com.oracle.jvmci.asm.sparc.SPARCAssembler.Op3s.*;
+import static com.oracle.jvmci.asm.sparc.SPARCAssembler.Opfs.*;
+import static com.oracle.jvmci.sparc.SPARC.*;
+import static java.lang.String.*;
+
+import com.oracle.jvmci.asm.*;
+import com.oracle.jvmci.code.*;
+import com.oracle.jvmci.meta.*;
+import com.oracle.jvmci.sparc.*;
+import com.oracle.jvmci.sparc.SPARC.CPUFeature;
+
+/**
+ * This class implements an assembler that can encode most SPARC instructions.
+ */
+public abstract class SPARCAssembler extends Assembler {
+
+    /**
+     * Constructs an assembler for the SPARC architecture.
+     *
+     * @param registerConfig the register configuration used to bind {@link Register#Frame} and
+     *            {@link Register#CallerFrame} to physical registers. This value can be null if this
+     *            assembler instance will not be used to assemble instructions using these logical
+     *            registers.
+     */
+    public SPARCAssembler(TargetDescription target, RegisterConfig registerConfig) {
+        super(target);
+    }
+
+    public static final int CCR_ICC_SHIFT = 0;
+    public static final int CCR_XCC_SHIFT = 4;
+    public static final int CCR_C_SHIFT = 0;
+    public static final int CCR_V_SHIFT = 1;
+    public static final int CCR_Z_SHIFT = 2;
+    public static final int CCR_N_SHIFT = 3;
+
+    protected static final int OP_SHIFT = 30;
+    protected static final int CBCOND_SHIFT = 28;
+    protected static final int OP2_SHIFT = 22;
+    protected static final int A_SHIFT = 29;
+
+    protected static final int A_MASK = 0b0010_0000_0000_0000_0000_0000_0000_0000;
+    protected static final int OP_MASK = 0b1100_0000_0000_0000_0000_0000_0000_0000;
+    protected static final int CBCOND_MASK = 0b0001_0000_0000_0000_0000_0000_0000_0000; // Used for
+    // distinguish CBcond and BPr instructions
+    protected static final int OP2_MASK = 0b0000_0001_1100_0000_0000_0000_0000_0000;
+
+    protected static final int DISP22_SHIFT = 0;
+    protected static final int DISP22_MASK = 0b00000000001111111111111111111111;
+
+    protected static final int DISP19_SHIFT = 0;
+    protected static final int DISP19_MASK = 0b00000000000001111111111111111111;
+
+    protected static final int D16HI_SHIFT = 20;
+    protected static final int D16HI_MASK = 0b0000_0000_0011_0000_0000_0000_0000_0000;
+    protected static final int D16LO_SHIFT = 0;
+    protected static final int D16LO_MASK = 0b0000_0000_0000_0000_0011_1111_1111_1111;
+
+    protected static final int D10LO_MASK = 0b0000_0000_0000_0000_0001_1111_1110_0000;
+    protected static final int D10HI_MASK = 0b0000_0000_0001_1000_0000_0000_0000_0000;
+    protected static final int D10LO_SHIFT = 5;
+    protected static final int D10HI_SHIFT = 19;
+
+    public enum Ops {
+        // @formatter:off
+
+        BranchOp(0b00),
+        CallOp(0b01),
+        ArithOp(0b10),
+        LdstOp(0b11);
+
+        // @formatter:on
+
+        private final int value;
+
+        private Ops(int value) {
+            this.value = value;
+        }
+
+        public int getValue() {
+            return value;
+        }
+
+        public boolean appliesTo(int instructionWord) {
+            int opShift = 30;
+            return (instructionWord >>> opShift) == value;
+        }
+    }
+
+    public enum Op {
+        Op00(0b00),
+        Op01(0b01),
+        Op10(0b10),
+        Op11(0b11);
+        int op;
+
+        Op(int op) {
+            this.op = op;
+        }
+    }
+
+    public enum Op2s {
+        // @formatter:off
+
+        Illtrap(0b000),
+        Bpr    (0b011),
+        Fb     (0b110),
+        Fbp    (0b101),
+        Br     (0b010),
+        Bp     (0b001),
+        Cb     (0b111),
+        Sethi  (0b100);
+
+
+        // @formatter:on
+
+        private final int value;
+
+        private Op2s(int value) {
+            this.value = value;
+        }
+
+        public int getValue() {
+            return value;
+        }
+
+        public static Op2s byValue(int value) {
+            for (Op2s op : values()) {
+                if (op.getValue() == value) {
+                    return op;
+                }
+            }
+            return null;
+        }
+    }
+
+    public enum Op3s {
+        // @formatter:off
+
+        Add(0x00, "add", Op10),
+        And(0x01, "and", Op10),
+        Or(0x02, "or", Op10),
+        Xor(0x03, "xor", Op10),
+        Sub(0x04, "sub", Op10),
+        Andn(0x05, "andn", Op10),
+        Orn(0x06, "orn", Op10),
+        Xnor(0x07, "xnor", Op10),
+        Addc(0x08, "addc", Op10),
+        Mulx(0x09, "mulx", Op10),
+        Umul(0x0A, "umul", Op10),
+        Smul(0x0B, "smul", Op10),
+        Subc(0x0C, "subc", Op10),
+        Udivx(0x0D, "udivx", Op10),
+        Udiv(0x0E, "udiv", Op10),
+        Sdiv(0x0F, "sdiv", Op10),
+
+        Addcc(0x10, "addcc", Op10),
+        Andcc(0x11, "andcc", Op10),
+        Orcc(0x12, "orcc", Op10),
+        Xorcc(0x13, "xorcc", Op10),
+        Subcc(0x14, "subcc", Op10),
+        Andncc(0x15, "andncc", Op10),
+        Orncc(0x16, "orncc", Op10),
+        Xnorcc(0x17, "xnorcc", Op10),
+        Addccc(0x18, "addccc", Op10),
+
+        Umulcc(0x1A, "umulcc", Op10),
+        Smulcc(0x1B, "smulcc", Op10),
+        Subccc(0x1C, "subccc", Op10),
+        Udivcc(0x1E, "udivcc", Op10),
+        Sdivcc(0x1F, "sdivcc", Op10),
+
+        Taddcc(0x20, "taddcc", Op10),
+        Tsubcc(0x21, "tsubcc", Op10),
+        Taddcctv(0x22, "taddcctv", Op10),
+        Tsubcctv(0x23, "tsubcctv", Op10),
+        Mulscc(0x24, "mulscc", Op10),
+        Sll(0x25, "sll", Op10),
+        Sllx(0x25, "sllx", Op10),
+        Srl(0x26, "srl", Op10),
+        Srlx(0x26, "srlx", Op10),
+        Sra(0x27, "srax", Op10),
+        Srax(0x27, "srax", Op10),
+        Membar(0x28, "membar", Op10),
+
+        Flushw(0x2B, "flushw", Op10),
+        Movcc(0x2C, "movcc", Op10),
+        Sdivx(0x2D, "sdivx", Op10),
+        Popc(0x2E, "popc", Op10),
+        Movr(0x2F, "movr", Op10),
+
+        Fpop1(0b11_0100, "fpop1", Op10),
+        Fpop2(0b11_0101, "fpop2", Op10),
+        Impdep1(0b11_0110, "impdep1", Op10),
+        Impdep2(0b11_0111, "impdep2", Op10),
+        Jmpl(0x38, "jmpl", Op10),
+        Rett(0x39, "rett", Op10),
+        Trap(0x3a, "trap", Op10),
+        Flush(0x3b, "flush", Op10),
+        Save(0x3c, "save", Op10),
+        Restore(0x3d, "restore", Op10),
+        Retry(0x3e, "retry", Op10),
+
+
+        Casa(0b111100, "casa", Op11),
+        Casxa(0b111110, "casxa", Op11),
+        Prefetch(0b101101, "prefetch", Op11),
+        Prefetcha(0b111101, "prefetcha", Op11),
+
+        Lduw  (0b00_0000, "lduw", Op11),
+        Ldub  (0b00_0001, "ldub", Op11),
+        Lduh  (0b00_0010, "lduh", Op11),
+        Stw   (0b00_0100, "stw", Op11),
+        Stb   (0b00_0101, "stb", Op11),
+        Sth   (0b00_0110, "sth", Op11),
+        Ldsw  (0b00_1000, "ldsw", Op11),
+        Ldsb  (0b00_1001, "ldsb", Op11),
+        Ldsh  (0b00_1010, "ldsh", Op11),
+        Ldx   (0b00_1011, "ldx", Op11),
+        Stx   (0b00_1110, "stx", Op11),
+
+        Ldf   (0b10_0000, "ldf", Op11),
+        Ldfsr (0b10_0001, "ldfsr", Op11),
+        Ldaf  (0b10_0010, "ldaf", Op11),
+        Lddf  (0b10_0011, "lddf", Op11),
+        Stf   (0b10_0100, "stf", Op11),
+        Stfsr (0b10_0101, "stfsr", Op11),
+        Staf  (0x10_0110, "staf", Op11),
+        Stdf  (0b10_0111, "stdf", Op11),
+
+        Rd    (0b10_1000, "rd", Op10),
+        Wr    (0b11_0000, "wr", Op10),
+        Fcmp  (0b11_0101, "fcmp", Op10),
+
+        Ldxa  (0b01_1011, "ldxa", Op11),
+        Lduwa (0b01_0000, "lduwa", Op11),
+
+        Tcc(0b11_1010, "tcc", Op10);
+
+        // @formatter:on
+
+        private final int value;
+        private final String operator;
+        private final Op op;
+
+        private Op3s(int value, String name, Op op) {
+            this.value = value;
+            this.operator = name;
+            this.op = op;
+        }
+
+        public int getValue() {
+            return value;
+        }
+
+        public String getOperator() {
+            return operator;
+        }
+
+        public boolean appliesTo(int instructionWord) {
+            return ((instructionWord >>> 19) & 0b1_1111) == value;
+        }
+    }
+
+    public enum Opfs {
+        // @formatter:off
+
+        Fmovs(0b0_0000_0001, "fmovs"),
+        Fmovd(0b0_0000_0010, "fmovd"),
+        Fmovq(0b0_0000_0011, "fmovq"),
+        Fmovscc(0b00_0001, "fmovscc"),
+        Fmovdcc(0b00_0010, "fmovdcc"),
+        Fnegs(0x05, "fnegs"),
+        Fnegd(0x06, "fnegd"),
+        Fnegq(0x07, "fnegq"),
+        Fabss(0x09, "fabss"),
+        Fabsd(0x0A, "fabsd"),
+        Fabsq(0x0B, "fabsq"),
+
+        // start VIS1
+        Edge8cc(0x0, "edge8cc"),
+        Edge8n(0x1, "edge8n"),
+        Edge8lcc(0x2, "edge8lcc"),
+        Edge8ln(0x3, "edge8ln"),
+        Edge16cc(0x4, "edge16cc"),
+        Edge16n(0x5, "edge16n"),
+        Edge16lcc(0x6, "edge16lcc"),
+        Edge16ln(0x7, "edge16ln"),
+        Edge32cc(0x8, "edge32cc"),
+        Edge32n(0x9, "edge32n"),
+        Edge32lcc(0xA, "edge32lcc"),
+        Edge32ln(0xB, "edge32ln"),
+        Array8(0x10, "array8"),
+        Array16(0x12, "array16"),
+        Array32(0x14, "array32"),
+        AlignAddress(0x18, "alignaddress"),
+        AlignAddressLittle(0x1A, "alignaddress_little"),
+        Fpcmple16(0x20, "fpcmple16"),
+        Fpcmpne16(0x22, "fpcmpne16"),
+        Fpcmple32(0x24, "fpcmple32"),
+        Fpcmpne32(0x26, "fpcmpne32"),
+        Fpcmpgt16(0x28, "fpcmpgt16"),
+        Fpcmpeq16(0x2A, "fpcmpeq16"),
+        Fpcmpgt32(0x2C, "fpcmpgt32"),
+        Fpcmpeq32(0x2E, "fpcmpeq32"),
+        Fmul8x16(0x31, "fmul8x16"),
+        Fmul8x16au(0x33, "fmul8x16au"),
+        Fmul8x16al(0x35, "fmul8x16al"),
+        Fmul8sux16(0x36, "fmul8sux16"),
+        Fmul8ulx16(0x37, "fmul8ulx16"),
+        Fmuld8sux16(0x38, "fmuld8sux16"),
+        Fmuld8ulx16(0x39, "fmuld8ulx16"),
+        Fpack32(0x3A, "fpack32"),
+        Fpack16(0x3B, "fpack16"),
+        Fpackfix(0x3D, "fpackfix"),
+        Faligndatag(0x48, "faligndata"),
+        Fpmerge(0x4B, "fpmerge"),
+        Fpadd16(0x50, "fpadd16"),
+        Fpadd16s(0x51, "fpadd16s"),
+        Fpadd32(0x52, "fpadd32"),
+        Fpadd32s(0x53, "fpadd32s"),
+        Fpsub16(0x54, "fpadd16"),
+        Fpsub16s(0x55, "fpadd16s"),
+        Fpsub32(0x56, "fpadd32"),
+        Fpsub32s(0x57, "fpadd32s"),
+        Fzerod(0x60, "fzerod"),
+        Fzeros(0x61, "fzeros"),
+        Fnot2d(0x66, "fnot1d"),
+        Fnot2s(0x67, "fnot1s"),
+        Fnot1d(0x6A, "fnot1d"),
+        Fnot1s(0x6B, "fnot1s"),
+        Fsrc1d(0x74, "fsrc1d"),
+        Fsrc1s(0x75, "fsrc1s"),
+        Fsrc2d(0x78, "fsrc2d"),
+        Fsrc2s(0x79, "fsrc2s"),
+        Foned(0x7E, "foned"),
+        Fones(0x7F, "fones"),
+        Fandd(0b0_0111_0000, "fandd"),
+        Fands(0b0_0111_0001, "fands"),
+        Fxord(0b0_0110_1100, "fxord"),
+        Fxors(0b0_0110_1101, "fxors"),
+        // end VIS1
+
+        // start VIS2
+        Bmask(0x19, "bmask"),
+        Bshuffle(0x4c, "bshuffle"),
+        // end VIS2 only
+
+        // start VIS3
+        Addxc(0x11, "addxc"),
+        Addxccc(0x13, "addxccc"),
+        Cmask8(0x1B, "cmask8"),
+        Cmask16(0x1D, "cmask16"),
+        Cmask32(0x1F, "cmask32"),
+        Fmean16(0x40, "fmean16"),
+        Fnadds(0x51, "fnadds"),
+        Fnaddd(0x52, "fnaddd"),
+        Fnmuls(0x59, "fnmuls"),
+        Fnmuld(0x5A, "fnmuld"),
+        Fnsmuld(0x79, "fnsmuld"),
+        Fnhadds(0x71, "fnhadds"),
+        Fnhaddd(0x72, "fnhaddd"),
+        Movdtox(0x110, "movdtox"),
+        Movstouw(0x111, "movstouw"),
+        Movstosw(0x113, "movstosw"),
+        Movxtod(0x118, "movxtod"),
+        Movwtos(0b1_0001_1001, "movwtos"),
+        UMulxhi(0b0_0001_0110, "umulxhi"),
+        Lzcnt  (0b0_0001_0111, "lzcnt"),
+        // end VIS3
+
+        // start CAMMELLIA
+        CammelliaFl(0x13C, "cammelia_fl"),
+        CammelliaFli(0x13D, "cammellia_fli"),
+        // end CAMMELLIA
+
+        // start CRYPTO
+        Crc32c(0x147, "crc32c"),
+        // end CRYPTO
+
+        // start OSA 2011
+        Fpadd64(0x44, "fpadd64"),
+        Fpsub64(0x46, "fpsub64"),
+        Fpadds16(0x58, "fpadds16"),
+        Fpadds16s(0x59, "fpadds16"),
+        Fpadds32(0x5A, "fpadds32"),
+        Fpadds32s(0x5B, "fpadds32s"),
+        Fpsubs16(0x5C, "fpsubs16"),
+        Fpsubs16s(0x5D, "fpsubs16s"),
+        Fpsubs32(0x5E, "fpsubs32"),
+        Fpsubs32s(0x5F, "fpsubs32s"),
+        Fpcmpne8(0x122, "fpcmpne8"),
+        Fpcmpeq8(0x12C, "fpcmpeq8"),
+        // end OSA 2011
+
+        Fadds(0x41, "fadds"),
+        Faddd(0x42, "faddd"),
+        Faddq(0x43, "faddq"),
+        Fsubs(0x45, "fsubs"),
+        Fsubd(0x46, "fsubd"),
+        Fsubq(0x47, "fsubq"),
+        Fmuls(0x49, "fmuls"),
+        Fmuld(0x4A, "fmuld"),
+        Fdivs(0x4D, "fdivs"),
+        Fdivd(0x4E, "fdivd"),
+        Fdivq(0x4F, "fdivq"),
+
+        Fsqrts(0x29, "fsqrts"),
+        Fsqrtd(0x2A, "fsqrtd"),
+        Fsqrtq(0x2B, "fsqrtq"),
+
+        Fsmuld(0x69, "fsmuld"),
+        Fmulq(0x6B, "fmulq"),
+        Fdmuldq(0x6E, "fdmulq"),
+
+        Fstoi(0xD1, "fstoi"),
+        Fdtoi(0xD2, "fdtoi"),
+        Fstox(0x81, "fstox"),
+        Fdtox(0x82, "fdtox"),
+        Fxtos(0x84, "fxtos"),
+        Fxtod(0x88, "fxtod"),
+        Fxtoq(0x8C, "fxtoq"),
+        Fitos(0xC4, "fitos"),
+        Fdtos(0xC6, "fdtos"),
+        Fitod(0xC8, "fitod"),
+        Fstod(0xC9, "fstod"),
+        Fitoq(0xCC, "fitoq"),
+
+
+        Fcmps(0x51, "fcmps"),
+        Fcmpd(0x52, "fcmpd"),
+        Fcmpq(0x53, "fcmpq");
+
+        // @formatter:on
+
+        private final int value;
+        private final String operator;
+
+        private Opfs(int value, String op) {
+            this.value = value;
+            this.operator = op;
+        }
+
+        public int getValue() {
+            return value;
+        }
+
+        public String getOperator() {
+            return operator;
+        }
+    }
+
+    public enum Annul {
+        ANNUL(1),
+        NOT_ANNUL(0);
+        public final int flag;
+
+        Annul(int flag) {
+            this.flag = flag;
+        }
+    }
+
+    public enum BranchPredict {
+        PREDICT_TAKEN(1),
+        PREDICT_NOT_TAKEN(0);
+        public final int flag;
+
+        BranchPredict(int flag) {
+            this.flag = flag;
+        }
+    }
+
+    public enum MembarMask {
+        // @formatter:off
+
+        StoreStore(1 << 3, "storestore"),
+        LoadStore(1 << 2, "loadstore"),
+        StoreLoad(1 << 1, "storeload"),
+        LoadLoad(1 << 0, "loadload"),
+        Sync(1 << 6, "sync"),
+        MemIssue(1 << 5, "memissue"),
+        LookAside(1 << 4, "lookaside");
+
+        // @formatter:on
+
+        private final int value;
+        private final String operator;
+
+        private MembarMask(int value, String op) {
+            this.value = value;
+            this.operator = op;
+        }
+
+        public int getValue() {
+            return value | 0x2000;
+        }
+
+        public String getOperator() {
+            return operator;
+        }
+    }
+
+    /**
+     * Condition Codes to use for instruction.
+     */
+    public enum CC {
+        // @formatter:off
+        /**
+         * Condition is considered as 32bit operation condition.
+         */
+        Icc(0b00, "icc", false),
+        /**
+         * Condition is considered as 64bit operation condition.
+         */
+        Xcc(0b10, "xcc", false),
+        Fcc0(0b00, "fcc0", true),
+        Fcc1(0b01, "fcc1", true),
+        Fcc2(0b10, "fcc2", true),
+        Fcc3(0b11, "fcc3", true);
+
+        // @formatter:on
+
+        private final int value;
+        private final String operator;
+        private boolean isFloat;
+
+        private CC(int value, String op, boolean isFloat) {
+            this.value = value;
+            this.operator = op;
+            this.isFloat = isFloat;
+        }
+
+        public int getValue() {
+            return value;
+        }
+
+        public String getOperator() {
+            return operator;
+        }
+
+        public static CC forKind(Kind kind) {
+            boolean isInt = kind == Kind.Boolean || kind == Kind.Byte || kind == Kind.Char || kind == Kind.Short || kind == Kind.Int;
+            boolean isFloat = kind == Kind.Float || kind == Kind.Double;
+            boolean isLong = kind == Kind.Long || kind == Kind.Object;
+            assert isInt || isFloat || isLong;
+            if (isLong) {
+                return Xcc;
+            } else if (isInt) {
+                return Icc;
+            } else if (isFloat) {
+                return Fcc0;
+            } else {
+                throw new InternalError();
+            }
+        }
+    }
+
+    public enum ConditionFlag {
+        // @formatter:off
+
+        // for FBfcc & FBPfcc instruction
+        F_Never(0, "f_never"),
+        F_NotEqual(1, "f_notEqual"),
+        F_LessOrGreater(2, "f_lessOrGreater"),
+        F_UnorderedOrLess(3, "f_unorderedOrLess"),
+        F_Less(4, "f_less"),
+        F_UnorderedOrGreater(5, "f_unorderedOrGreater"),
+        F_Greater(6, "f_greater"),
+        F_Unordered(7, "f_unordered"),
+        F_Always(8, "f_always"),
+        F_Equal(9, "f_equal"),
+        F_UnorderedOrEqual(10, "f_unorderedOrEqual"),
+        F_GreaterOrEqual(11, "f_greaterOrEqual"),
+        F_UnorderedGreaterOrEqual(12, "f_unorderedGreaterOrEqual"),
+        F_LessOrEqual(13, "f_lessOrEqual"),
+        F_UnorderedOrLessOrEqual(14, "f_unorderedOrLessOrEqual"),
+        F_Ordered(15, "f_ordered"),
+
+        // for integers
+        Never(0, "never"),
+        Equal(1, "equal", true),
+        Zero(1, "zero"),
+        LessEqual(2, "lessEqual", true),
+        Less(3, "less", true),
+        LessEqualUnsigned(4, "lessEqualUnsigned", true),
+        LessUnsigned(5, "lessUnsigned", true),
+        CarrySet(5, "carrySet"),
+        Negative(6, "negative", true),
+        OverflowSet(7, "overflowSet", true),
+        Always(8, "always"),
+        NotEqual(9, "notEqual", true),
+        NotZero(9, "notZero"),
+        Greater(10, "greater", true),
+        GreaterEqual(11, "greaterEqual", true),
+        GreaterUnsigned(12, "greaterUnsigned", true),
+        GreaterEqualUnsigned(13, "greaterEqualUnsigned", true),
+        CarryClear(13, "carryClear"),
+        Positive(14, "positive", true),
+        OverflowClear(15, "overflowClear", true);
+
+        // @formatter:on
+
+        private final int value;
+        private final String operator;
+        private boolean forCBcond = false;
+
+        private ConditionFlag(int value, String op) {
+            this(value, op, false);
+        }
+
+        private ConditionFlag(int value, String op, boolean cbcond) {
+            this.value = value;
+            this.operator = op;
+            this.forCBcond = cbcond;
+        }
+
+        public boolean isCBCond() {
+            return forCBcond;
+        }
+
+        public int getValue() {
+            return value;
+        }
+
+        public String getOperator() {
+            return operator;
+        }
+
+        public ConditionFlag negate() {
+            //@formatter:off
+            switch (this) {
+                case F_Never                  : return F_Always;
+                case F_Always                 : return F_Never;
+                case F_NotEqual               : return F_Equal;
+                case F_Equal                  : return F_NotEqual;
+                case F_LessOrGreater          : return F_UnorderedOrEqual;
+                case F_UnorderedOrEqual       : return F_LessOrGreater;
+                case F_Less                   : return F_UnorderedGreaterOrEqual;
+                case F_UnorderedGreaterOrEqual: return F_Less;
+                case F_LessOrEqual            : return F_UnorderedOrGreater;
+                case F_UnorderedOrGreater     : return F_LessOrEqual;
+                case F_Greater                : return F_UnorderedOrLessOrEqual;
+                case F_UnorderedOrLessOrEqual : return F_Greater;
+                case F_GreaterOrEqual         : return F_UnorderedOrLess;
+                case F_UnorderedOrLess        : return F_GreaterOrEqual;
+                case F_Unordered              : return F_Ordered;
+                case F_Ordered                : return F_Unordered;
+                case Never                    : return Always;
+                case Always                   : return Never;
+                case Equal                    : return NotEqual;
+                case NotEqual                 : return Equal;
+                case Zero                     : return NotZero;
+                case NotZero                  : return Zero;
+                case LessEqual                : return Greater;
+                case Greater                  : return LessEqual;
+                case Less                     : return GreaterEqual;
+                case GreaterEqual             : return Less;
+                case LessEqualUnsigned        : return GreaterUnsigned;
+                case GreaterUnsigned          : return LessEqualUnsigned;
+                case LessUnsigned             : return GreaterEqualUnsigned;
+                case GreaterEqualUnsigned     : return LessUnsigned;
+                case CarrySet                 : return CarryClear;
+                case CarryClear               : return CarrySet;
+                case Negative                 : return Positive;
+                case Positive                 : return Negative;
+                case OverflowSet              : return OverflowClear;
+                case OverflowClear            : return OverflowSet;
+                default:
+                    throw new InternalError();
+            }
+            //@formatter:on
+        }
+
+        public ConditionFlag mirror() {
+            switch (this) {
+            //@formatter:off
+                case F_Less                   : return F_Greater;
+                case F_Greater                : return F_Less;
+                case F_LessOrEqual            : return F_GreaterOrEqual;
+                case F_UnorderedGreaterOrEqual: return F_UnorderedOrLessOrEqual;
+                case F_UnorderedOrGreater     : return F_UnorderedOrLess;
+                case F_UnorderedOrLessOrEqual : return F_UnorderedGreaterOrEqual;
+                case F_GreaterOrEqual         : return F_LessOrEqual;
+                case F_UnorderedOrLess        : return F_UnorderedOrGreater;
+                case LessEqual                : return GreaterEqual;
+                case Greater                  : return Less;
+                case Less                     : return Greater;
+                case GreaterEqual             : return LessEqual;
+                case LessEqualUnsigned        : return GreaterEqualUnsigned;
+                case GreaterUnsigned          : return LessUnsigned;
+                case LessUnsigned             : return GreaterUnsigned;
+                case GreaterEqualUnsigned     : return LessEqualUnsigned;
+                default:
+                    return this;
+                //@formatter:on
+            }
+        }
+
+    }
+
+    public enum RCondition {
+        // @formatter:off
+
+        Rc_z(0b001, "rc_z"),
+        Rc_lez(0b010, "rc_lez"),
+        Rc_lz(0b011, "rc_lz"),
+        Rc_nz(0b101, "rc_nz"),
+        Rc_gz(0b110, "rc_gz"),
+        Rc_gez(0b111, "rc_gez"),
+        Rc_last(Rc_gez.getValue(), "rc_last");
+
+        // @formatter:on
+
+        private final int value;
+        private final String operator;
+
+        private RCondition(int value, String op) {
+            this.value = value;
+            this.operator = op;
+        }
+
+        public int getValue() {
+            return value;
+        }
+
+        public String getOperator() {
+            return operator;
+        }
+    }
+
+    /**
+     * Represents the <b>Address Space Identifier</b> defined in the SPARC architecture.
+     */
+    public enum Asi {
+        // @formatter:off
+
+        INVALID(-1),
+        ASI_PRIMARY(0x80),
+        ASI_PRIMARY_NOFAULT(0x82),
+        ASI_PRIMARY_LITTLE(0x88),
+        // Block initializing store
+        ASI_ST_BLKINIT_PRIMARY(0xE2),
+        // Most-Recently-Used (MRU) BIS variant
+        ASI_ST_BLKINIT_MRU_PRIMARY(0xF2);
+
+        // @formatter:on
+
+        private final int value;
+
+        private Asi(int value) {
+            this.value = value;
+        }
+
+        public int getValue() {
+            return value;
+        }
+
+        public boolean isValid() {
+            return value != INVALID.getValue();
+        }
+    }
+
+    public enum Fcn {
+        SeveralWritesAndPossiblyReads(2),
+        SeveralReadsWeak(0),
+        OneRead(1),
+        OneWrite(3),
+        Page(4),
+        NearestUnifiedCache(17),
+        SeveralReadsStrong(20),
+        OneReadStrong(21),
+        SeveralWritesAndPossiblyReadsStrong(22),
+        OneWriteStrong(23);
+
+        private final int value;
+
+        private Fcn(int value) {
+            this.value = value;
+        }
+
+        public int getValue() {
+            return value;
+        }
+    }
+
+    public boolean hasFeature(CPUFeature feature) {
+        return ((SPARC) this.target.arch).features.contains(feature);
+    }
+
+    public static final int simm(int x, int nbits) {
+        // assert_signed_range(x, nbits);
+        return x & ((1 << nbits) - 1);
+    }
+
+    public static final boolean isImm(int x, int nbits) {
+        // assert_signed_range(x, nbits);
+        return simm(x, nbits) == x;
+    }
+
+    /**
+     * Minimum value for signed immediate ranges.
+     */
+    public static long minSimm(long nbits) {
+        return -(1L << (nbits - 1));
+    }
+
+    /**
+     * Maximum value for signed immediate ranges.
+     */
+    public static long maxSimm(long nbits) {
+        return (1L << (nbits - 1)) - 1;
+    }
+
+    /**
+     * Test if imm is within signed immediate range for nbits.
+     */
+    public static boolean isSimm(long imm, int nbits) {
+        return minSimm(nbits) <= imm && imm <= maxSimm(nbits);
+    }
+
+    public static boolean isSimm10(long imm) {
+        return isSimm(imm, 10);
+    }
+
+    public static boolean isSimm11(long imm) {
+        return isSimm(imm, 11);
+    }
+
+    public static boolean isSimm11(JavaConstant constant) {
+        return constant.isNull() || isSimm11(constant.asLong());
+    }
+
+    public static boolean isSimm5(JavaConstant constant) {
+        return constant.isNull() || isSimm(constant.asLong(), 5);
+    }
+
+    public static boolean isSimm13(int imm) {
+        return isSimm(imm, 13);
+    }
+
+    public static boolean isSimm13(JavaConstant constant) {
+        return constant.isNull() || isSimm13(constant.asLong());
+    }
+
+    public static boolean isSimm13(long imm) {
+        return NumUtil.isInt(imm) && isSimm(imm, 13);
+    }
+
+    public static boolean isWordDisp30(long imm) {
+        return isSimm(imm, 30 + 2);
+    }
+
+    public static final int hi22(int x) {
+        return x >>> 10;
+    }
+
+    public static final int lo10(int x) {
+        return x & ((1 << 10) - 1);
+    }
+
+    // @formatter:off
+    /**
+     * Instruction format for Fmt00 instructions. This abstraction is needed as it
+     * makes the patching easier later on.
+     * <pre>
+     * | 00  |    a   | op2 |               b                         |
+     * |31 30|29    25|24 22|21                                      0|
+     * </pre>
+     */
+    // @formatter:on
+    protected void fmt00(int a, int op2, int b) {
+        assert isImm(a, 5) && isImm(op2, 3) && isImm(b, 22) : String.format("a: 0x%x op2: 0x%x b: 0x%x", a, op2, b);
+        this.emitInt(a << 25 | op2 << 22 | b);
+    }
+
+    private void op3(Op3s op3, Opfs opf, Register rs1, Register rs2, Register rd) {
+        int b = opf.value << 5 | (rs2 == null ? 0 : rs2.encoding);
+        fmt(op3.op.op, rd.encoding, op3.value, rs1 == null ? 0 : rs1.encoding, b);
+    }
+
+    protected void op3(Op3s op3, Register rs1, Register rs2, Register rd) {
+        int b = rs2 == null ? 0 : rs2.encoding;
+        int xBit = getXBit(op3);
+        fmt(op3.op.op, rd.encoding, op3.value, rs1 == null ? 0 : rs1.encoding, b | xBit);
+    }
+
+    protected void op3(Op3s op3, Register rs1, int simm13, Register rd) {
+        assert isSimm13(simm13);
+        int i = 1 << 13;
+        int simm13WithX = simm13 | getXBit(op3);
+        fmt(op3.op.op, rd.encoding, op3.value, rs1.encoding, i | simm13WithX & ((1 << 13) - 1));
+    }
+
+    // @formatter:off
+    /**
+     * Branch on Integer Condition Codes.
+     * <pre>
+     * | 00  |annul| cond| 010 |               disp22                 |
+     * |31 30|29   |28 25|24 22|21                                   0|
+     * </pre>
+     */
+    // @formatter:on
+    public void bicc(ConditionFlag cond, Annul annul, Label l) {
+        bcc(Op2s.Br, cond, annul, l);
+    }
+
+    // @formatter:off
+    /**
+     * Branch on Floating-Point Condition Codes.
+     * <pre>
+     * | 00  |annul| cond| 110 |               disp22                 |
+     * |31 30|29   |28 25|24 22|21                                   0|
+     * </pre>
+     */
+    // @formatter:on
+    public void fbcc(ConditionFlag cond, Annul annul, Label l) {
+        bcc(Op2s.Fb, cond, annul, l);
+    }
+
+    // @formatter:off
+    /**
+     * Branch on (Integer|Floatingpoint) Condition Codes.
+     * <pre>
+     * | 00  |annul| cond| op2 |               disp22                 |
+     * |31 30|29   |28 25|24 22|21                                   0|
+     * </pre>
+     */
+    // @formatter:on
+    private void bcc(Op2s op2, ConditionFlag cond, Annul annul, Label l) {
+        int pos = !l.isBound() ? patchUnbound(l) : (l.position() - position()) / 4;
+        final int disp = 22;
+        assert isSimm(pos, disp);
+        pos &= (1 << disp) - 1;
+        int a = (annul.flag << 4) | cond.getValue();
+        fmt00(a, op2.getValue(), pos);
+    }
+
+    // @formatter:off
+    /**
+     * Branch on Integer Condition Codes with Prediction.
+     * <pre>
+     * | 00  |an|cond | 001 |cc1 2|p |           disp19               |
+     * |31 30|29|28 25|24 22|21 20|19|                               0|
+     * </pre>
+     */
+    // @formatter:on
+    public void bpcc(ConditionFlag cond, Annul annul, Label l, CC cc, BranchPredict predictTaken) {
+        bpcc(Op2s.Bp, cond, annul, l, cc, predictTaken);
+    }
+
+    // @formatter:off
+    /**
+     * Branch on Integer Condition Codes with Prediction.
+     * <pre>
+     * | 00  |an|cond | 101 |cc1 2|p |           disp19               |
+     * |31 30|29|28 25|24 22|21 20|19|                               0|
+     * </pre>
+     */
+    // @formatter:on
+    public void fbpcc(ConditionFlag cond, Annul annul, Label l, CC cc, BranchPredict predictTaken) {
+        bpcc(Op2s.Fbp, cond, annul, l, cc, predictTaken);
+    }
+
+    // @formatter:off
+    /**
+     * Used for fbpcc (Float) and bpcc (Integer).
+     * <pre>
+     * | 00  |an|cond | op2 |cc1 2|p |           disp19               |
+     * |31 30|29|28 25|24 22|21 20|19|                               0|
+     * </pre>
+     */
+    // @formatter:on
+    private void bpcc(Op2s op2, ConditionFlag cond, Annul annul, Label l, CC cc, BranchPredict predictTaken) {
+        int pos = !l.isBound() ? patchUnbound(l) : (l.position() - position()) / 4;
+        final int disp = 19;
+        assert isSimm(pos, disp);
+        pos &= (1 << disp) - 1;
+        int a = (annul.flag << 4) | cond.getValue();
+        int b = (cc.getValue() << 20) | ((predictTaken.flag) << 19) | pos;
+        fmt00(a, op2.getValue(), b);
+    }
+
+    // @formatter:off
+    /**
+     * Branch on Integer Register with Prediction.
+     * <pre>
+     * | 00  |an| 0|rcond | 011 |d16hi|p | rs1 |    d16lo             |
+     * |31 30|29|28|27 25 |24 22|21 20|19|18 14|                     0|
+     * </pre>
+     */
+    // @formatter:on
+    public void bpr(RCondition cond, Annul annul, Label l, BranchPredict predictTaken, Register rs1) {
+        int pos = !l.isBound() ? patchUnbound(l) : (l.position() - position()) / 4;
+        final int disp = 16;
+        assert isSimm(pos, disp);
+        pos &= (1 << disp) - 1;
+        int a = (annul.flag << 4) | cond.getValue();
+        int d16hi = (pos >> 13) << 13;
+        int d16lo = d16hi ^ pos;
+        int b = (d16hi << 20) | (predictTaken.flag << 19) | (rs1.encoding() << 14) | d16lo;
+        fmt00(a, Op2s.Bpr.getValue(), b);
+    }
+
+    private int patchUnbound(Label label) {
+        label.addPatchAt(position());
+        return 0;
+    }
+
+    public void cbcondw(ConditionFlag cf, Register rs1, Register rs2, Label lab) {
+        cbcond(0, 0, cf, rs1, rs2.encoding, lab);
+    }
+
+    public void cbcondw(ConditionFlag cf, Register rs1, int rs2, Label lab) {
+        assert isSimm(rs2, 5);
+        cbcond(0, 1, cf, rs1, rs2 & ((1 << 5) - 1), lab);
+    }
+
+    public void cbcondx(ConditionFlag cf, Register rs1, Register rs2, Label lab) {
+        cbcond(1, 0, cf, rs1, rs2.encoding, lab);
+    }
+
+    public void cbcondx(ConditionFlag cf, Register rs1, int rs2, Label lab) {
+        assert isSimm(rs2, 5);
+        cbcond(1, 1, cf, rs1, rs2 & ((1 << 5) - 1), lab);
+    }
+
+    private void cbcond(int cc2, int i, ConditionFlag cf, Register rs1, int rs2, Label l) {
+        int disp10 = !l.isBound() ? patchUnbound(l) : (l.position() - position()) / 4;
+        assert isSimm(disp10, 10) && isImm(rs2, 5);
+        disp10 &= (1 << 10) - 1;
+        final int cLo = cf.value & 0b111;
+        final int cHi = cf.value >> 3;
+        final int d10Lo = disp10 & ((1 << 8) - 1);
+        final int d10Hi = disp10 >> 8;
+        int a = cHi << 4 | 0b1000 | cLo;
+        int b = cc2 << 21 | d10Hi << D10HI_SHIFT | rs1.encoding << 14 | i << 13 | d10Lo << D10LO_SHIFT | rs2;
+        fmt00(a, Op2s.Bpr.value, b);
+    }
+
+    // @formatter:off
+    /**
+     * NOP.
+     * <pre>
+     * | 00  |00000| 100 |                0                    |
+     * |31 30|29 25|24 22|21                                  0|
+     * </pre>
+     */
+    // @formatter:on
+    public void nop() {
+        emitInt(1 << 24);
+    }
+
+    public void sethi(int imm22, Register dst) {
+        fmt00(dst.encoding, Op2s.Sethi.value, imm22);
+    }
+
+    // @formatter:off
+    /**
+     * Instruction format for calls.
+     * <pre>
+     * | 01  |                      disp30                             |
+     * |31 30|29                                                      0|
+     * </pre>
+     */
+    // @formatter:on
+    public void call(int disp30) {
+        assert isImm(disp30, 30);
+        int instr = 1 << 30;
+        instr |= disp30;
+        emitInt(instr);
+    }
+
+    public void add(Register rs1, Register rs2, Register rd) {
+        op3(Add, rs1, rs2, rd);
+    }
+
+    public void add(Register rs1, int simm13, Register rd) {
+        op3(Add, rs1, simm13, rd);
+    }
+
+    public void addc(Register rs1, Register rs2, Register rd) {
+        op3(Addc, rs1, rs2, rd);
+    }
+
+    public void addc(Register rs1, int simm13, Register rd) {
+        op3(Addc, rs1, simm13, rd);
+    }
+
+    public void addcc(Register rs1, Register rs2, Register rd) {
+        op3(Addcc, rs1, rs2, rd);
+    }
+
+    public void addcc(Register rs1, int simm13, Register rd) {
+        op3(Addcc, rs1, simm13, rd);
+    }
+
+    public void and(Register rs1, Register rs2, Register rd) {
+        op3(And, rs1, rs2, rd);
+    }
+
+    public void and(Register rs1, int simm13, Register rd) {
+        op3(And, rs1, simm13, rd);
+    }
+
+    public void andcc(Register rs1, Register rs2, Register rd) {
+        op3(Andcc, rs1, rs2, rd);
+    }
+
+    public void andcc(Register rs1, int simm13, Register rd) {
+        op3(Andcc, rs1, simm13, rd);
+    }
+
+    public void andn(Register rs1, Register rs2, Register rd) {
+        op3(Andn, rs1, rs2, rd);
+    }
+
+    public void andn(Register rs1, int simm13, Register rd) {
+        op3(Andn, rs1, simm13, rd);
+    }
+
+    public void andncc(Register rs1, Register rs2, Register rd) {
+        op3(Andncc, rs1, rs2, rd);
+    }
+
+    public void andncc(Register rs1, int simm13, Register rd) {
+        op3(Andncc, rs1, simm13, rd);
+    }
+
+    public void movwtos(Register rs2, Register rd) {
+        assert isSingleFloatRegister(rd) && isCPURegister(rs2) : String.format("%s %s", rs2, rd);
+        op3(Impdep1, Movwtos, null, rs2, rd);
+    }
+
+    public void umulxhi(Register rs1, Register rs2, Register rd) {
+        op3(Impdep1, UMulxhi, rs1, rs2, rd);
+    }
+
+    public void fdtos(Register rs2, Register rd) {
+        assert isSingleFloatRegister(rd) && isDoubleFloatRegister(rs2) : String.format("%s %s", rs2, rd);
+        op3(Fpop1, Fdtos, null, rs2, rd);
+    }
+
+    public void movstouw(Register rs2, Register rd) {
+        assert isSingleFloatRegister(rs2) && isCPURegister(rd) : String.format("%s %s", rs2, rd);
+        op3(Impdep1, Movstosw, null, rs2, rd);
+    }
+
+    public void movstosw(Register rs2, Register rd) {
+        assert isSingleFloatRegister(rs2) && isCPURegister(rd) : String.format("%s %s", rs2, rd);
+        op3(Impdep1, Movstosw, null, rs2, rd);
+    }
+
+    public void movdtox(Register rs2, Register rd) {
+        assert isDoubleFloatRegister(rs2) && isCPURegister(rd) : String.format("%s %s", rs2, rd);
+        op3(Impdep1, Movdtox, null, rs2, rd);
+    }
+
+    public void movxtod(Register rs2, Register rd) {
+        assert isCPURegister(rs2) && isDoubleFloatRegister(rd) : String.format("%s %s", rs2, rd);
+        op3(Impdep1, Movxtod, null, rs2, rd);
+    }
+
+    public void fadds(Register rs1, Register rs2, Register rd) {
+        op3(Fpop1, Fadds, rs1, rs2, rd);
+    }
+
+    public void faddd(Register rs1, Register rs2, Register rd) {
+        op3(Fpop1, Faddd, rs1, rs2, rd);
+    }
+
+    public void faddq(Register rs1, Register rs2, Register rd) {
+        op3(Fpop1, Faddq, rs1, rs2, rd);
+    }
+
+    public void fdivs(Register rs1, Register rs2, Register rd) {
+        op3(Fpop1, Fdivs, rs1, rs2, rd);
+    }
+
+    public void fdivd(Register rs1, Register rs2, Register rd) {
+        op3(Fpop1, Fdivd, rs1, rs2, rd);
+    }
+
+    public void fmovs(Register rs2, Register rd) {
+        op3(Fpop1, Fmovs, null, rs2, rd);
+    }
+
+    public void fmovd(Register rs2, Register rd) {
+        op3(Fpop1, Fmovd, null, rs2, rd);
+    }
+
+    public void fmuls(Register rs1, Register rs2, Register rd) {
+        op3(Fpop1, Fmuls, rs1, rs2, rd);
+    }
+
+    public void fsmuld(Register rs1, Register rs2, Register rd) {
+        op3(Fpop1, Fsmuld, rs1, rs2, rd);
+    }
+
+    public void fmuld(Register rs1, Register rs2, Register rd) {
+        op3(Fpop1, Fmuld, rs1, rs2, rd);
+    }
+
+    public void fnegs(Register rs2, Register rd) {
+        op3(Fpop1, Fnegs, null, rs2, rd);
+    }
+
+    public void fnegd(Register rs2, Register rd) {
+        op3(Fpop1, Fnegd, null, rs2, rd);
+    }
+
+    /**
+     * Helper method to determine if the instruction needs the X bit set.
+     */
+    private static int getXBit(Op3s op3) {
+        switch (op3) {
+            case Sllx:
+            case Srax:
+            case Srlx:
+                return 1 << 12;
+            default:
+                return 0;
+        }
+    }
+
+    public void fstoi(Register rs2, Register rd) {
+        op3(Fpop1, Fstoi, null, rs2, rd);
+    }
+
+    public void fstox(Register rs2, Register rd) {
+        op3(Fpop1, Fstox, null, rs2, rd);
+    }
+
+    public void fdtox(Register rs2, Register rd) {
+        op3(Fpop1, Fdtox, null, rs2, rd);
+    }
+
+    public void fstod(Register rs2, Register rd) {
+        op3(Fpop1, Fstod, null, rs2, rd);
+    }
+
+    public void fdtoi(Register rs2, Register rd) {
+        op3(Fpop1, Fdtoi, null, rs2, rd);
+    }
+
+    public void fitos(Register rs2, Register rd) {
+        op3(Fpop1, Fitos, null, rs2, rd);
+    }
+
+    public void fitod(Register rs2, Register rd) {
+        op3(Fpop1, Fitod, null, rs2, rd);
+    }
+
+    public void fxtos(Register rs2, Register rd) {
+        op3(Fpop1, Fxtos, null, rs2, rd);
+    }
+
+    public void fxtod(Register rs2, Register rd) {
+        op3(Fpop1, Fxtod, null, rs2, rd);
+    }
+
+    public void fzeros(Register rd) {
+        op3(Impdep1, Fzeros, null, null, rd);
+    }
+
+    public void fzerod(Register rd) {
+        op3(Impdep1, Fzerod, null, null, rd);
+    }
+
+    public void flushw() {
+        op3(Flushw, g0, g0, g0);
+    }
+
+    public void fsqrtd(Register rs2, Register rd) {
+        op3(Fpop1, Fsqrtd, null, rs2, rd);
+    }
+
+    public void fsqrts(Register rs2, Register rd) {
+        op3(Fpop1, Fsqrts, null, rs2, rd);
+    }
+
+    public void fabss(Register rs2, Register rd) {
+        op3(Fpop1, Fabss, null, rs2, rd);
+    }
+
+    public void fabsd(Register rs2, Register rd) {
+        op3(Fpop1, Fabsd, null, rs2, rd);
+    }
+
+    public void fsubs(Register rs1, Register rs2, Register rd) {
+        op3(Fpop1, Fsubs, rs1, rs2, rd);
+    }
+
+    public void fsubd(Register rs1, Register rs2, Register rd) {
+        op3(Fpop1, Fsubd, rs1, rs2, rd);
+    }
+
+    // @formatter:off
+    /**
+     * Instruction format for fcmp.
+     * <pre>
+     * | 10  | --- |cc1|cc0|desc |   rs1   |   opf  | rs2 |
+     * |31 30|29 27|26 |25 |24 19|18     14|13     5|4   0|
+     * </pre>
+     */
+    // @formatter:on
+    public void fcmp(CC cc, Opfs opf, Register rs1, Register rs2) {
+        int a = cc.value;
+        int b = opf.value << 5 | rs2.encoding;
+        fmt10(a, Fcmp.value, rs1.encoding, b);
+    }
+
+    // @formatter:off
+    /**
+     * Instruction format for most arithmetic stuff.
+     * <pre>
+     * |  10 | rd  | op3 | rs1 |   b   |
+     * |31 30|29 25|24 19|18 14|13    0|
+     * </pre>
+     */
+    // @formatter:on
+    protected void fmt10(int rd, int op3, int rs1, int b) {
+        fmt(0b10, rd, op3, rs1, b);
+    }
+
+    // @formatter:off
+    /**
+     * Instruction format for most arithmetic stuff.
+     * <pre>
+     * |  op | rd  | op3 | rs1 |   b   |
+     * |31 30|29 25|24 19|18 14|13    0|
+     * </pre>
+     */
+    // @formatter:on
+    protected void fmt(int op, int rd, int op3, int rs1, int b) {
+        assert isImm(rd, 5) && isImm(op3, 6) && isImm(b, 14) : String.format("rd: 0x%x op3: 0x%x b: 0x%x", rd, op3, b);
+        int instr = op << 30 | rd << 25 | op3 << 19 | rs1 << 14 | b;
+        emitInt(instr);
+    }
+
+    public void illtrap(int const22) {
+        fmt00(0, Op2s.Illtrap.value, const22);
+    }
+
+    public void jmpl(Register rs1, Register rs2, Register rd) {
+        op3(Jmpl, rs1, rs2, rd);
+    }
+
+    public void jmpl(Register rs1, int simm13, Register rd) {
+        op3(Jmpl, rs1, simm13, rd);
+    }
+
+    public void fmovdcc(ConditionFlag cond, CC cc, Register rs2, Register rd) {
+        fmovcc(cond, cc, rs2, rd, Fmovdcc.value);
+    }
+
+    public void fmovscc(ConditionFlag cond, CC cc, Register rs2, Register rd) {
+        fmovcc(cond, cc, rs2, rd, Fmovscc.value);
+    }
+
+    private void fmovcc(ConditionFlag cond, CC cc, Register rs2, Register rd, int opfLow) {
+        int opfCC = cc.value;
+        int a = opfCC << 11 | opfLow << 5 | rs2.encoding;
+        fmt10(rd.encoding, Fpop2.value, cond.value, a);
+    }
+
+    public void movcc(ConditionFlag conditionFlag, CC cc, Register rs2, Register rd) {
+        movcc(conditionFlag, cc, 0, rs2.encoding, rd);
+    }
+
+    public void movcc(ConditionFlag conditionFlag, CC cc, int simm11, Register rd) {
+        assert isSimm11(simm11);
+        movcc(conditionFlag, cc, 1, simm11 & ((1 << 11) - 1), rd);
+    }
+
+    private void movcc(ConditionFlag conditionFlag, CC cc, int i, int imm, Register rd) {
+        int cc01 = 0b11 & cc.value;
+        int cc2 = cc.isFloat ? 0 : 1;
+        int a = cc2 << 4 | conditionFlag.value;
+        int b = cc01 << 11 | i << 13 | imm;
+        fmt10(rd.encoding, Movcc.value, a, b);
+    }
+
+    public void mulx(Register rs1, Register rs2, Register rd) {
+        op3(Mulx, rs1, rs2, rd);
+    }
+
+    public void mulx(Register rs1, int simm13, Register rd) {
+        op3(Mulx, rs1, simm13, rd);
+    }
+
+    public void or(Register rs1, Register rs2, Register rd) {
+        assert isCPURegister(rs1, rs2, rd) : String.format("%s %s %s", rs1, rs2, rd);
+        op3(Or, rs1, rs2, rd);
+    }
+
+    public void or(Register rs1, int simm13, Register rd) {
+        assert isCPURegister(rs1, rd) : String.format("%s %s", rs1, rd);
+        op3(Or, rs1, simm13, rd);
+    }
+
+    public void popc(Register rs2, Register rd) {
+        op3(Popc, g0, rs2, rd);
+    }
+
+    public void popc(int simm13, Register rd) {
+        op3(Popc, g0, simm13, rd);
+    }
+
+    public void prefetch(SPARCAddress addr, Fcn fcn) {
+        Register rs1 = addr.getBase();
+        if (addr.getIndex().equals(Register.None)) {
+            int dis = addr.getDisplacement();
+            assert isSimm13(dis);
+            fmt(Prefetch.op.op, fcn.value, Prefetch.value, rs1.encoding, 1 << 13 | dis & ((1 << 13) - 1));
+        } else {
+            Register rs2 = addr.getIndex();
+            fmt(Prefetch.op.op, fcn.value, Prefetch.value, rs1.encoding, rs2.encoding);
+        }
+    }
+
+    // A.44 Read State Register
+
+    public void rdpc(Register rd) {
+        op3(Rd, r5, g0, rd);
+    }
+
+    public void restore(Register rs1, Register rs2, Register rd) {
+        op3(Restore, rs1, rs2, rd);
+    }
+
+    public static final int PC_RETURN_OFFSET = 8;
+
+    public void save(Register rs1, Register rs2, Register rd) {
+        op3(Save, rs1, rs2, rd);
+    }
+
+    public void save(Register rs1, int simm13, Register rd) {
+        op3(Save, rs1, simm13, rd);
+    }
+
+    public void sdivx(Register rs1, Register rs2, Register rd) {
+        op3(Sdivx, rs1, rs2, rd);
+    }
+
+    public void sdivx(Register rs1, int simm13, Register rd) {
+        op3(Sdivx, rs1, simm13, rd);
+    }
+
+    public void udivx(Register rs1, Register rs2, Register rd) {
+        op3(Udivx, rs1, rs2, rd);
+    }
+
+    public void udivx(Register rs1, int simm13, Register rd) {
+        op3(Udivx, rs1, simm13, rd);
+    }
+
+    public void sll(Register rs1, Register rs2, Register rd) {
+        op3(Sll, rs1, rs2, rd);
+    }
+
+    public void sll(Register rs1, int shcnt32, Register rd) {
+        assert isImm(shcnt32, 5);
+        op3(Sll, rs1, shcnt32, rd);
+    }
+
+    public void sllx(Register rs1, Register rs2, Register rd) {
+        op3(Sllx, rs1, rs2, rd);
+    }
+
+    public void sllx(Register rs1, int shcnt64, Register rd) {
+        assert isImm(shcnt64, 6);
+        op3(Sllx, rs1, shcnt64, rd);
+    }
+
+    public void sra(Register rs1, Register rs2, Register rd) {
+        op3(Sra, rs1, rs2, rd);
+    }
+
+    public void sra(Register rs1, int simm13, Register rd) {
+        op3(Sra, rs1, simm13, rd);
+    }
+
+    public void srax(Register rs1, Register rs2, Register rd) {
+        op3(Srax, rs1, rs2, rd);
+    }
+
+    public void srax(Register rs1, int shcnt64, Register rd) {
+        assert isImm(shcnt64, 6);
+        op3(Srax, rs1, shcnt64, rd);
+    }
+
+    public void srl(Register rs1, Register rs2, Register rd) {
+        op3(Srl, rs1, rs2, rd);
+    }
+
+    public void srl(Register rs1, int simm13, Register rd) {
+        op3(Srl, rs1, simm13, rd);
+    }
+
+    public void srlx(Register rs1, Register rs2, Register rd) {
+        op3(Srlx, rs1, rs2, rd);
+    }
+
+    public void srlx(Register rs1, int shcnt64, Register rd) {
+        assert isImm(shcnt64, 6);
+        op3(Srlx, rs1, shcnt64, rd);
+    }
+
+    public void fandd(Register rs1, Register rs2, Register rd) {
+        op3(Impdep1, Fandd, rs1, rs2, rd);
+    }
+
+    public void sub(Register rs1, Register rs2, Register rd) {
+        op3(Sub, rs1, rs2, rd);
+    }
+
+    public void sub(Register rs1, int simm13, Register rd) {
+        op3(Sub, rs1, simm13, rd);
+    }
+
+    public void subcc(Register rs1, Register rs2, Register rd) {
+        op3(Subcc, rs1, rs2, rd);
+    }
+
+    public void subcc(Register rs1, int simm13, Register rd) {
+        op3(Subcc, rs1, simm13, rd);
+    }
+
+    public void ta(int trap) {
+        tcc(Icc, Always, trap);
+    }
+
+    public void tcc(CC cc, ConditionFlag flag, int trap) {
+        assert isImm(trap, 8);
+        int b = cc.value << 11;
+        b |= 1 << 13;
+        b |= trap;
+        fmt10(flag.value, Op3s.Tcc.getValue(), 0, b);
+    }
+
+    public void wrccr(Register rs1, Register rs2) {
+        op3(Wr, rs1, rs2, r2);
+    }
+
+    public void wrccr(Register rs1, int simm13) {
+        op3(Wr, rs1, simm13, r2);
+    }
+
+    public void xor(Register rs1, Register rs2, Register rd) {
+        op3(Xor, rs1, rs2, rd);
+    }
+
+    public void xor(Register rs1, int simm13, Register rd) {
+        op3(Xor, rs1, simm13, rd);
+    }
+
+    public void xorcc(Register rs1, Register rs2, Register rd) {
+        op3(Xorcc, rs1, rs2, rd);
+    }
+
+    public void xorcc(Register rs1, int simm13, Register rd) {
+        op3(Xorcc, rs1, simm13, rd);
+    }
+
+    public void xnor(Register rs1, Register rs2, Register rd) {
+        op3(Xnor, rs1, rs2, rd);
+    }
+
+    public void xnor(Register rs1, int simm13, Register rd) {
+        op3(Xnor, rs1, simm13, rd);
+    }
+
+    /*
+     * Load/Store
+     */
+    protected void ld(Op3s op3, SPARCAddress addr, Register rd, Asi asi) {
+        Register rs1 = addr.getBase();
+        if (!addr.getIndex().equals(Register.None)) {
+            Register rs2 = addr.getIndex();
+            if (asi != null) {
+                int b = rs2.encoding;
+                b |= asi.value << 5;
+                fmt(op3.op.op, rd.encoding, op3.value, rs1.encoding, b);
+            } else {
+                op3(op3, rs1, rs2, rd);
+            }
+        } else {
+            int imm = addr.getDisplacement();
+            op3(op3, rs1, imm, rd);
+        }
+    }
+
+    protected void ld(Op3s op3, SPARCAddress addr, Register rd) {
+        ld(op3, addr, rd, null);
+    }
+
+    public void lddf(SPARCAddress src, Register dst) {
+        assert isDoubleFloatRegister(dst) : dst;
+        ld(Lddf, src, dst);
+    }
+
+    public void ldf(SPARCAddress src, Register dst) {
+        assert isSingleFloatRegister(dst) : dst;
+        ld(Ldf, src, dst);
+    }
+
+    public void lduh(SPARCAddress src, Register dst) {
+        assert isCPURegister(dst) : dst;
+        ld(Lduh, src, dst);
+    }
+
+    public void ldsh(SPARCAddress src, Register dst) {
+        assert isCPURegister(dst) : dst;
+        ld(Ldsh, src, dst);
+    }
+
+    public void ldub(SPARCAddress src, Register dst) {
+        assert isCPURegister(dst) : dst;
+        ld(Ldub, src, dst);
+    }
+
+    public void ldsb(SPARCAddress src, Register dst) {
+        assert isCPURegister(dst) : dst;
+        ld(Ldsb, src, dst);
+    }
+
+    public void lduw(SPARCAddress src, Register dst) {
+        assert isCPURegister(dst) : dst;
+        ld(Lduw, src, dst);
+    }
+
+    public void ldsw(SPARCAddress src, Register dst) {
+        assert isCPURegister(dst) : dst;
+        ld(Ldsw, src, dst);
+    }
+
+    public void ldx(SPARCAddress src, Register dst) {
+        assert isCPURegister(dst) : dst;
+        ld(Ldx, src, dst);
+    }
+
+    public void ldxa(Register rs1, Register rs2, Register rd, Asi asi) {
+        assert SPARC.isCPURegister(rs1, rs2, rd) : format("%s %s %s", rs1, rs2, rd);
+        ld(Ldxa, new SPARCAddress(rs1, rs2), rd, asi);
+    }
+
+    public void lduwa(Register rs1, Register rs2, Register rd, Asi asi) {
+        assert SPARC.isCPURegister(rs1, rs2, rd) : format("%s %s %s", rs1, rs2, rd);
+        ld(Lduwa, new SPARCAddress(rs1, rs2), rd, asi);
+    }
+
+    protected void st(Op3s op3, Register rs1, SPARCAddress dest) {
+        ld(op3, dest, rs1);
+    }
+
+    public void stdf(Register rd, SPARCAddress addr) {
+        assert isDoubleFloatRegister(rd) : rd;
+        st(Stdf, rd, addr);
+    }
+
+    public void stf(Register rd, SPARCAddress addr) {
+        assert isSingleFloatRegister(rd) : rd;
+        st(Stf, rd, addr);
+    }
+
+    public void stb(Register rd, SPARCAddress addr) {
+        assert isCPURegister(rd) : rd;
+        st(Stb, rd, addr);
+    }
+
+    public void sth(Register rd, SPARCAddress addr) {
+        assert isCPURegister(rd) : rd;
+        st(Sth, rd, addr);
+    }
+
+    public void stw(Register rd, SPARCAddress addr) {
+        assert isCPURegister(rd) : rd;
+        st(Stw, rd, addr);
+    }
+
+    public void stx(Register rd, SPARCAddress addr) {
+        assert isCPURegister(rd) : rd;
+        st(Stx, rd, addr);
+    }
+
+    public void membar(int barriers) {
+        op3(Membar, r15, barriers, g0);
+    }
+
+    public void casa(Register rs1, Register rs2, Register rd, Asi asi) {
+        ld(Casa, new SPARCAddress(rs1, rs2), rd, asi);
+    }
+
+    public void casxa(Register rs1, Register rs2, Register rd, Asi asi) {
+        ld(Casxa, new SPARCAddress(rs1, rs2), rd, asi);
+    }
+
+    @Override
+    public InstructionCounter getInstructionCounter() {
+        return new SPARCInstructionCounter(this);
+    }
+
+    public void patchAddImmediate(int position, int simm13) {
+        int inst = getInt(position);
+        assert SPARCAssembler.isSimm13(simm13) : simm13;
+        assert (inst >>> 30) == 0b10 : String.format("0x%x", inst);
+        assert ((inst >>> 18) & 0b11_1111) == 0 : String.format("0x%x", inst);
+        assert (inst & (1 << 13)) != 0 : String.format("0x%x", inst);
+        inst = inst & (~((1 << 13) - 1));
+        inst |= simm13 & ((1 << 12) - 1);
+        emitInt(inst, position);
+    }
+
+    public void fpadd32(Register rs1, Register rs2, Register rd) {
+        op3(Impdep1, Fpadd32, rs1, rs2, rd);
+    }
+
+    public boolean isCbcond(int i) {
+        return (i & 0xC1C00000) == 0xC00000;
+    }
+}
--- /dev/null	Thu Jan 01 00:00:00 1970 +0000
+++ b/graal/com.oracle.jvmci.asm.sparc/src/com/oracle/jvmci/asm/sparc/SPARCInstructionCounter.java	Wed Jun 03 18:06:44 2015 +0200
@@ -0,0 +1,126 @@
+/*
+ * Copyright (c) 2009, 2015, Oracle and/or its affiliates. All rights reserved.
+ * DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER.
+ *
+ * This code is free software; you can redistribute it and/or modify it
+ * under the terms of the GNU General Public License version 2 only, as
+ * published by the Free Software Foundation.
+ *
+ * This code is distributed in the hope that it will be useful, but WITHOUT
+ * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
+ * FITNESS FOR A PARTICULAR PURPOSE.  See the GNU General Public License
+ * version 2 for more details (a copy is included in the LICENSE file that
+ * accompanied this code).
+ *
+ * You should have received a copy of the GNU General Public License version
+ * 2 along with this work; if not, write to the Free Software Foundation,
+ * Inc., 51 Franklin St, Fifth Floor, Boston, MA 02110-1301 USA.
+ *
+ * Please contact Oracle, 500 Oracle Parkway, Redwood Shores, CA 94065 USA
+ * or visit www.oracle.com if you need additional information or have any
+ * questions.
+ */
+package com.oracle.jvmci.asm.sparc;
+
+import java.util.*;
+
+import com.oracle.jvmci.asm.Assembler.*;
+
+public class SPARCInstructionCounter implements InstructionCounter {
+    // Use a treemap to keep the order in the output
+    private static final TreeMap<String, SPARCInstructionMatch> INSTRUCTION_MATCHER = new TreeMap<>();
+    static {
+        // @formatter:off
+        INSTRUCTION_MATCHER.put("nop", new SPARCInstructionMatch(0xFFFF_FFFF, 0x0100_0000));
+        INSTRUCTION_MATCHER.put("st", new OP3LowBitsMatcher(0b11, 0x4, 0x5, 0x6, 0x7, 0xe, 0xf));
+        INSTRUCTION_MATCHER.put("ld", new OP3LowBitsMatcher(0b11, 0x0, 0x1, 0x2, 0x3, 0x8, 0x9, 0xa, 0xb, 0xc, 0xd));
+        INSTRUCTION_MATCHER.put("all", new SPARCInstructionMatch(0x0, 0x0));
+        // @formatter:on
+    }
+    private final SPARCAssembler asm;
+
+    public SPARCInstructionCounter(SPARCAssembler asm) {
+        super();
+        this.asm = asm;
+    }
+
+    @Override
+    public int[] countInstructions(String[] instructionTypes, int beginPc, int endPc) {
+        SPARCInstructionMatch[] matchers = new SPARCInstructionMatch[instructionTypes.length];
+        for (int i = 0; i < instructionTypes.length; i++) {
+            String typeName = instructionTypes[i];
+            matchers[i] = INSTRUCTION_MATCHER.get(typeName);
+            if (matchers[i] == null) {
+                throw new IllegalArgumentException(String.format("Unknown instruction class %s, supported types are: %s", typeName, INSTRUCTION_MATCHER.keySet()));
+            }
+        }
+        return countBetween(matchers, beginPc, endPc);
+    }
+
+    private int[] countBetween(SPARCInstructionMatch[] matchers, int startPc, int endPc) {
+        int[] counts = new int[matchers.length];
+        for (int p = startPc; p < endPc; p += 4) {
+            int instr = asm.getInt(p);
+            for (int i = 0; i < matchers.length; i++) {
+                SPARCInstructionMatch matcher = matchers[i];
+                if (matcher.matches(instr)) {
+                    counts[i]++;
+                }
+            }
+        }
+        return counts;
+    }
+
+    @Override
+    public String[] getSupportedInstructionTypes() {
+        return INSTRUCTION_MATCHER.keySet().toArray(new String[0]);
+    }
+
+    /**
+     * Tests the lower 3 bits of the op3 field.
+     */
+    private static class OP3LowBitsMatcher extends SPARCInstructionMatch {
+        private final int[] op3b03;
+        private final int op;
+
+        public OP3LowBitsMatcher(int op, int... op3b03) {
+            super(0, 0);
+            this.op = op;
+            this.op3b03 = op3b03;
+        }
+
+        @Override
+        public boolean matches(int instruction) {
+            if (instruction >>> 30 != op) {
+                return false;
+            }
+            int op3lo = (instruction >> 19) & ((1 << 4) - 1);
+            for (int op3Part : op3b03) {
+                if (op3Part == op3lo) {
+                    return true;
+                }
+            }
+            return false;
+        }
+    }
+
+    private static class SPARCInstructionMatch {
+        private final int mask;
+        private final int[] patterns;
+
+        public SPARCInstructionMatch(int mask, int... patterns) {
+            super();
+            this.mask = mask;
+            this.patterns = patterns;
+        }
+
+        public boolean matches(int instruction) {
+            for (int pattern : patterns) {
+                if ((instruction & mask) == pattern) {
+                    return true;
+                }
+            }
+            return false;
+        }
+    }
+}
--- /dev/null	Thu Jan 01 00:00:00 1970 +0000
+++ b/graal/com.oracle.jvmci.asm.sparc/src/com/oracle/jvmci/asm/sparc/SPARCMacroAssembler.java	Wed Jun 03 18:06:44 2015 +0200
@@ -0,0 +1,416 @@
+/*
+ * Copyright (c) 2013, Oracle and/or its affiliates. All rights reserved.
+ * DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER.
+ *
+ * This code is free software; you can redistribute it and/or modify it
+ * under the terms of the GNU General Public License version 2 only, as
+ * published by the Free Software Foundation.
+ *
+ * This code is distributed in the hope that it will be useful, but WITHOUT
+ * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
+ * FITNESS FOR A PARTICULAR PURPOSE.  See the GNU General Public License
+ * version 2 for more details (a copy is included in the LICENSE file that
+ * accompanied this code).
+ *
+ * You should have received a copy of the GNU General Public License version
+ * 2 along with this work; if not, write to the Free Software Foundation,
+ * Inc., 51 Franklin St, Fifth Floor, Boston, MA 02110-1301 USA.
+ *
+ * Please contact Oracle, 500 Oracle Parkway, Redwood Shores, CA 94065 USA
+ * or visit www.oracle.com if you need additional information or have any
+ * questions.
+ */
+package com.oracle.jvmci.asm.sparc;
+
+import static com.oracle.jvmci.asm.sparc.SPARCAssembler.Annul.*;
+import static com.oracle.jvmci.asm.sparc.SPARCAssembler.ConditionFlag.*;
+import static com.oracle.jvmci.sparc.SPARC.*;
+
+import java.util.function.*;
+
+import com.oracle.jvmci.asm.*;
+import com.oracle.jvmci.code.*;
+
+public class SPARCMacroAssembler extends SPARCAssembler {
+
+    /**
+     * A sentinel value used as a place holder in an instruction stream for an address that will be
+     * patched.
+     */
+    private static final SPARCAddress Placeholder = new SPARCAddress(g0, 0);
+    private final ScratchRegister[] scratchRegister = new ScratchRegister[]{new ScratchRegister(g1), new ScratchRegister(g3)};
+    // Points to the next free scratch register
+    private int nextFreeScratchRegister = 0;
+
+    public SPARCMacroAssembler(TargetDescription target, RegisterConfig registerConfig) {
+        super(target, registerConfig);
+    }
+
+    @Override
+    public void align(int modulus) {
+        while (position() % modulus != 0) {
+            nop();
+        }
+    }
+
+    @Override
+    public void jmp(Label l) {
+        bicc(Always, NOT_ANNUL, l);
+        nop();  // delay slot
+    }
+
+    @Override
+    protected final void patchJumpTarget(int branch, int branchTarget) {
+        final int disp = (branchTarget - branch) / 4;
+        final int inst = getInt(branch);
+        Op2s op2 = Op2s.byValue((inst & OP2_MASK) >> OP2_SHIFT);
+        int maskBits;
+        int setBits;
+        switch (op2) {
+            case Br:
+            case Fb:
+            case Sethi:
+            case Illtrap:
+                // Disp 22 in the lower 22 bits
+                assert isSimm(disp, 22);
+                setBits = disp << DISP22_SHIFT;
+                maskBits = DISP22_MASK;
+                break;
+            case Fbp:
+            case Bp:
+                // Disp 19 in the lower 19 bits
+                assert isSimm(disp, 19);
+                setBits = disp << DISP19_SHIFT;
+                maskBits = DISP19_MASK;
+                break;
+            case Bpr:
+                boolean isCBcond = (inst & CBCOND_MASK) != 0;
+                if (isCBcond) {
+                    assert isSimm10(disp) : String.format("%d: instruction: 0x%x", disp, inst);
+                    int d10Split = 0;
+                    d10Split |= (disp & 0b11_0000_0000) << D10HI_SHIFT - 8;
+                    d10Split |= (disp & 0b00_1111_1111) << D10LO_SHIFT;
+                    setBits = d10Split;
+                    maskBits = D10LO_MASK | D10HI_MASK;
+                } else {
+                    assert isSimm(disp, 16);
+                    int d16Split = 0;
+                    d16Split |= (disp & 0b1100_0000_0000_0000) << D16HI_SHIFT - 14;
+                    d16Split |= (disp & 0b0011_1111_1111_1111) << D16LO_SHIFT;
+                    setBits = d16Split;
+                    maskBits = D16HI_MASK | D16LO_MASK;
+                }
+                break;
+            default:
+                throw new InternalError("Unknown op2 " + op2);
+        }
+        int newInst = ~maskBits & inst;
+        newInst |= setBits;
+        emitInt(newInst, branch);
+    }
+
+    @Override
+    public AbstractAddress makeAddress(Register base, int displacement) {
+        return new SPARCAddress(base, displacement);
+    }
+
+    @Override
+    public AbstractAddress getPlaceholder() {
+        return Placeholder;
+    }
+
+    @Override
+    public final void ensureUniquePC() {
+        nop();
+    }
+
+    public void cas(Register rs1, Register rs2, Register rd) {
+        casa(rs1, rs2, rd, Asi.ASI_PRIMARY);
+    }
+
+    public void casx(Register rs1, Register rs2, Register rd) {
+        casxa(rs1, rs2, rd, Asi.ASI_PRIMARY);
+    }
+
+    public void clr(Register dst) {
+        or(g0, g0, dst);
+    }
+
+    public void clrb(SPARCAddress addr) {
+        stb(g0, addr);
+    }
+
+    public void clrh(SPARCAddress addr) {
+        sth(g0, addr);
+    }
+
+    public void clrx(SPARCAddress addr) {
+        stx(g0, addr);
+    }
+
+    public void cmp(Register rs1, Register rs2) {
+        subcc(rs1, rs2, g0);
+    }
+
+    public void cmp(Register rs1, int simm13) {
+        subcc(rs1, simm13, g0);
+    }
+
+    public void dec(Register rd) {
+        sub(rd, 1, rd);
+    }
+
+    public void dec(int simm13, Register rd) {
+        sub(rd, simm13, rd);
+    }
+
+    public void jmp(SPARCAddress address) {
+        jmpl(address.getBase(), address.getDisplacement(), g0);
+    }
+
+    public void jmp(Register rd) {
+        jmpl(rd, 0, g0);
+    }
+
+    public void neg(Register rs1, Register rd) {
+        sub(g0, rs1, rd);
+    }
+
+    public void neg(Register rd) {
+        sub(g0, rd, rd);
+    }
+
+    public void mov(Register rs, Register rd) {
+        or(g0, rs, rd);
+    }
+
+    public void mov(int simm13, Register rd) {
+        or(g0, simm13, rd);
+    }
+
+    public void not(Register rs1, Register rd) {
+        xnor(rs1, g0, rd);
+    }
+
+    public void not(Register rd) {
+        xnor(rd, g0, rd);
+    }
+
+    public void restoreWindow() {
+        restore(g0, g0, g0);
+    }
+
+    public void ret() {
+        jmpl(i7, 8, g0);
+    }
+
+    /**
+     * This instruction is like sethi but for 64-bit values.
+     */
+    public static class Sethix {
+
+        private static final int INSTRUCTION_SIZE = 7;
+
+        private long value;
+        private Register dst;
+        private boolean forceRelocatable;
+        private boolean delayed = false;
+        private Consumer<SPARCAssembler> delayedInstructionEmitter;
+
+        public Sethix(long value, Register dst, boolean forceRelocatable, boolean delayed) {
+            this(value, dst, forceRelocatable);
+            assert !(forceRelocatable && delayed) : "Relocatable sethix cannot be delayed";
+            this.delayed = delayed;
+        }
+
+        public Sethix(long value, Register dst, boolean forceRelocatable) {
+            this.value = value;
+            this.dst = dst;
+            this.forceRelocatable = forceRelocatable;
+        }
+
+        public Sethix(long value, Register dst) {
+            this(value, dst, false);
+        }
+
+        private void emitInstruction(Consumer<SPARCAssembler> cb, SPARCMacroAssembler masm) {
+            if (delayed) {
+                if (this.delayedInstructionEmitter != null) {
+                    delayedInstructionEmitter.accept(masm);
+                }
+                delayedInstructionEmitter = cb;
+            } else {
+                cb.accept(masm);
+            }
+        }
+
+        public void emit(SPARCMacroAssembler masm) {
+            final int hi = (int) (value >> 32);
+            final int lo = (int) (value & ~0);
+
+            // This is the same logic as MacroAssembler::internal_set.
+            final int startPc = masm.position();
+
+            if (hi == 0 && lo >= 0) {
+                Consumer<SPARCAssembler> cb = eMasm -> eMasm.sethi(hi22(lo), dst);
+                emitInstruction(cb, masm);
+            } else if (hi == -1) {
+                Consumer<SPARCAssembler> cb = eMasm -> eMasm.sethi(hi22(~lo), dst);
+                emitInstruction(cb, masm);
+                cb = eMasm -> eMasm.xor(dst, ~lo10(~0), dst);
+                emitInstruction(cb, masm);
+            } else {
+                final int shiftcnt;
+                final int shiftcnt2;
+                Consumer<SPARCAssembler> cb = eMasm -> eMasm.sethi(hi22(hi), dst);
+                emitInstruction(cb, masm);
+                if ((hi & 0x3ff) != 0) {                                  // Any bits?
+                    // msb 32-bits are now in lsb 32
+                    cb = eMasm -> eMasm.or(dst, hi & 0x3ff, dst);
+                    emitInstruction(cb, masm);
+                }
+                if ((lo & 0xFFFFFC00) != 0) {                             // done?
+                    if (((lo >> 20) & 0xfff) != 0) {                      // Any bits set?
+                        // Make room for next 12 bits
+                        cb = eMasm -> eMasm.sllx(dst, 12, dst);
+                        emitInstruction(cb, masm);
+                        // Or in next 12
+                        cb = eMasm -> eMasm.or(dst, (lo >> 20) & 0xfff, dst);
+                        emitInstruction(cb, masm);
+                        shiftcnt = 0;                                     // We already shifted
+                    } else {
+                        shiftcnt = 12;
+                    }
+                    if (((lo >> 10) & 0x3ff) != 0) {
+                        // Make room for last 10 bits
+                        cb = eMasm -> eMasm.sllx(dst, shiftcnt + 10, dst);
+                        emitInstruction(cb, masm);
+                        // Or in next 10
+                        cb = eMasm -> eMasm.or(dst, (lo >> 10) & 0x3ff, dst);
+                        emitInstruction(cb, masm);
+                        shiftcnt2 = 0;
+                    } else {
+                        shiftcnt2 = 10;
+                    }
+                    // Shift leaving disp field 0'd
+                    cb = eMasm -> eMasm.sllx(dst, shiftcnt2 + 10, dst);
+                    emitInstruction(cb, masm);
+                } else {
+                    cb = eMasm -> eMasm.sllx(dst, 32, dst);
+                    emitInstruction(cb, masm);
+                }
+            }
+            // Pad out the instruction sequence so it can be patched later.
+            if (forceRelocatable) {
+                while (masm.position() < (startPc + (INSTRUCTION_SIZE * 4))) {
+                    Consumer<SPARCAssembler> cb = eMasm -> eMasm.nop();
+                    emitInstruction(cb, masm);
+                }
+            }
+        }
+
+        public void emitDelayed(SPARCMacroAssembler masm) {
+            assert delayedInstructionEmitter != null;
+            delayedInstructionEmitter.accept(masm);
+        }
+    }
+
+    public static class Setx {
+
+        private long value;
+        private Register dst;
+        private boolean forceRelocatable;
+        private boolean delayed = false;
+        private boolean delayedFirstEmitted = false;
+        private Sethix sethix;
+        private Consumer<SPARCMacroAssembler> delayedAdd;
+
+        public Setx(long value, Register dst, boolean forceRelocatable, boolean delayed) {
+            assert !(forceRelocatable && delayed) : "Cannot use relocatable setx as delayable";
+            this.value = value;
+            this.dst = dst;
+            this.forceRelocatable = forceRelocatable;
+            this.delayed = delayed;
+        }
+
+        public Setx(long value, Register dst, boolean forceRelocatable) {
+            this(value, dst, forceRelocatable, false);
+        }
+
+        public Setx(long value, Register dst) {
+            this(value, dst, false);
+        }
+
+        public void emit(SPARCMacroAssembler masm) {
+            assert !delayed;
+            doEmit(masm);
+        }
+
+        private void doEmit(SPARCMacroAssembler masm) {
+            sethix = new Sethix(value, dst, forceRelocatable, delayed);
+            sethix.emit(masm);
+            int lo = (int) (value & ~0);
+            if (lo10(lo) != 0 || forceRelocatable) {
+                Consumer<SPARCMacroAssembler> add = eMasm -> eMasm.add(dst, lo10(lo), dst);
+                if (delayed) {
+                    sethix.emitDelayed(masm);
+                    sethix = null;
+                    delayedAdd = add;
+                } else {
+                    sethix = null;
+                    add.accept(masm);
+                }
+            }
+        }
+
+        public void emitFirstPartOfDelayed(SPARCMacroAssembler masm) {
+            assert !forceRelocatable : "Cannot use delayed mode with relocatable setx";
+            assert delayed : "Can only be used in delayed mode";
+            doEmit(masm);
+            delayedFirstEmitted = true;
+        }
+
+        public void emitSecondPartOfDelayed(SPARCMacroAssembler masm) {
+            assert !forceRelocatable : "Cannot use delayed mode with relocatable setx";
+            assert delayed : "Can only be used in delayed mode";
+            assert delayedFirstEmitted : "First part has not been emitted so far.";
+            assert delayedAdd == null && sethix != null || delayedAdd != null && sethix == null : "Either add or sethix must be set";
+            if (delayedAdd != null) {
+                delayedAdd.accept(masm);
+            } else {
+                sethix.emitDelayed(masm);
+            }
+
+        }
+    }
+
+    public void signx(Register rs, Register rd) {
+        sra(rs, g0, rd);
+    }
+
+    public void signx(Register rd) {
+        sra(rd, g0, rd);
+    }
+
+    public ScratchRegister getScratchRegister() {
+        return scratchRegister[nextFreeScratchRegister++];
+    }
+
+    public class ScratchRegister implements AutoCloseable {
+        private final Register register;
+
+        public ScratchRegister(Register register) {
+            super();
+            this.register = register;
+        }
+
+        public Register getRegister() {
+            return register;
+        }
+
+        public void close() {
+            assert nextFreeScratchRegister > 0 : "Close called too often";
+            nextFreeScratchRegister--;
+        }
+    }
+}
--- /dev/null	Thu Jan 01 00:00:00 1970 +0000
+++ b/graal/com.oracle.jvmci.asm/overview.html	Wed Jun 03 18:06:44 2015 +0200
@@ -0,0 +1,36 @@
+<!DOCTYPE HTML PUBLIC "-//W3C//DTD HTML 3.2 Final//EN">
+<html>
+<head>
+<!--
+
+Copyright (c) 2012, Oracle and/or its affiliates. All rights reserved.
+DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER.
+
+This code is free software; you can redistribute it and/or modify it
+under the terms of the GNU General Public License version 2 only, as
+published by the Free Software Foundation.  Oracle designates this
+particular file as subject to the "Classpath" exception as provided
+by Oracle in the LICENSE file that accompanied this code.
+
+This code is distributed in the hope that it will be useful, but WITHOUT
+ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
+FITNESS FOR A PARTICULAR PURPOSE.  See the GNU General Public License
+version 2 for more details (a copy is included in the LICENSE file that
+accompanied this code).
+
+You should have received a copy of the GNU General Public License version
+2 along with this work; if not, write to the Free Software Foundation,
+Inc., 51 Franklin St, Fifth Floor, Boston, MA 02110-1301 USA.
+
+Please contact Oracle, 500 Oracle Parkway, Redwood Shores, CA 94065 USA
+or visit www.oracle.com if you need additional information or have any
+questions.
+-->
+
+</head>
+<body>
+
+Documentation for the <code>com.oracle.max.asm</code> project.
+
+</body>
+</html>
--- /dev/null	Thu Jan 01 00:00:00 1970 +0000
+++ b/graal/com.oracle.jvmci.asm/src/com/oracle/jvmci/asm/AsmOptions.java	Wed Jun 03 18:06:44 2015 +0200
@@ -0,0 +1,28 @@
+/*
+ * Copyright (c) 2011, 2011, Oracle and/or its affiliates. All rights reserved.
+ * DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER.
+ *
+ * This code is free software; you can redistribute it and/or modify it
+ * under the terms of the GNU General Public License version 2 only, as
+ * published by the Free Software Foundation.
+ *
+ * This code is distributed in the hope that it will be useful, but WITHOUT
+ * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
+ * FITNESS FOR A PARTICULAR PURPOSE.  See the GNU General Public License
+ * version 2 for more details (a copy is included in the LICENSE file that
+ * accompanied this code).
+ *
+ * You should have received a copy of the GNU General Public License version
+ * 2 along with this work; if not, write to the Free Software Foundation,
+ * Inc., 51 Franklin St, Fifth Floor, Boston, MA 02110-1301 USA.
+ *
+ * Please contact Oracle, 500 Oracle Parkway, Redwood Shores, CA 94065 USA
+ * or visit www.oracle.com if you need additional information or have any
+ * questions.
+ */
+package com.oracle.jvmci.asm;
+
+public class AsmOptions {
+
+    public static int InitialCodeBufferSize = 232;
+}
--- /dev/null	Thu Jan 01 00:00:00 1970 +0000
+++ b/graal/com.oracle.jvmci.asm/src/com/oracle/jvmci/asm/Assembler.java	Wed Jun 03 18:06:44 2015 +0200
@@ -0,0 +1,259 @@
+/*
+ * Copyright (c) 2009, 2011, Oracle and/or its affiliates. All rights reserved.
+ * DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER.
+ *
+ * This code is free software; you can redistribute it and/or modify it
+ * under the terms of the GNU General Public License version 2 only, as
+ * published by the Free Software Foundation.
+ *
+ * This code is distributed in the hope that it will be useful, but WITHOUT
+ * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
+ * FITNESS FOR A PARTICULAR PURPOSE.  See the GNU General Public License
+ * version 2 for more details (a copy is included in the LICENSE file that
+ * accompanied this code).
+ *
+ * You should have received a copy of the GNU General Public License version
+ * 2 along with this work; if not, write to the Free Software Foundation,
+ * Inc., 51 Franklin St, Fifth Floor, Boston, MA 02110-1301 USA.
+ *
+ * Please contact Oracle, 500 Oracle Parkway, Redwood Shores, CA 94065 USA
+ * or visit www.oracle.com if you need additional information or have any
+ * questions.
+ */
+package com.oracle.jvmci.asm;
+
+import java.nio.*;
+import java.util.*;
+
+import com.oracle.jvmci.code.*;
+
+/**
+ * The platform-independent base class for the assembler.
+ */
+public abstract class Assembler {
+
+    public final TargetDescription target;
+    private List<LabelHint> jumpDisplacementHints;
+
+    /**
+     * Backing code buffer.
+     */
+    private final Buffer codeBuffer;
+
+    public Assembler(TargetDescription target) {
+        this.target = target;
+        if (target.arch.getByteOrder() == ByteOrder.BIG_ENDIAN) {
+            this.codeBuffer = new Buffer.BigEndian();
+        } else {
+            this.codeBuffer = new Buffer.LittleEndian();
+        }
+    }
+
+    /**
+     * Returns the current position of the underlying code buffer.
+     *
+     * @return current position in code buffer
+     */
+    public int position() {
+        return codeBuffer.position();
+    }
+
+    public final void emitByte(int x) {
+        codeBuffer.emitByte(x);
+    }
+
+    public final void emitShort(int x) {
+        codeBuffer.emitShort(x);
+    }
+
+    public final void emitInt(int x) {
+        codeBuffer.emitInt(x);
+    }
+
+    public final void emitLong(long x) {
+        codeBuffer.emitLong(x);
+    }
+
+    public final void emitByte(int b, int pos) {
+        codeBuffer.emitByte(b, pos);
+    }
+
+    public final void emitShort(int b, int pos) {
+        codeBuffer.emitShort(b, pos);
+    }
+
+    public final void emitInt(int b, int pos) {
+        codeBuffer.emitInt(b, pos);
+    }
+
+    public final void emitLong(long b, int pos) {
+        codeBuffer.emitLong(b, pos);
+    }
+
+    public final int getByte(int pos) {
+        return codeBuffer.getByte(pos);
+    }
+
+    public final int getShort(int pos) {
+        return codeBuffer.getShort(pos);
+    }
+
+    public final int getInt(int pos) {
+        return codeBuffer.getInt(pos);
+    }
+
+    private static final String NEWLINE = System.getProperty("line.separator");
+
+    /**
+     * Some GPU architectures have a text based encoding.
+     */
+    public final void emitString(String x) {
+        emitString0("\t");  // XXX REMOVE ME pretty-printing
+        emitString0(x);
+        emitString0(NEWLINE);
+    }
+
+    // XXX for pretty-printing
+    public final void emitString0(String x) {
+        codeBuffer.emitBytes(x.getBytes(), 0, x.length());
+    }
+
+    public void emitString(String s, int pos) {
+        codeBuffer.emitBytes(s.getBytes(), pos);
+    }
+
+    /**
+     * Closes this assembler. No extra data can be written to this assembler after this call.
+     *
+     * @param trimmedCopy if {@code true}, then a copy of the underlying byte array up to (but not
+     *            including) {@code position()} is returned
+     * @return the data in this buffer or a trimmed copy if {@code trimmedCopy} is {@code true}
+     */
+    public byte[] close(boolean trimmedCopy) {
+        return codeBuffer.close(trimmedCopy);
+    }
+
+    public void bind(Label l) {
+        assert !l.isBound() : "can bind label only once";
+        l.bind(position());
+        l.patchInstructions(this);
+    }
+
+    public abstract void align(int modulus);
+
+    public abstract void jmp(Label l);
+
+    protected abstract void patchJumpTarget(int branch, int jumpTarget);
+
+    private Map<Label, String> nameMap;
+
+    /**
+     * Creates a name for a label.
+     *
+     * @param l the label for which a name is being created
+     * @param id a label identifier that is unique with the scope of this assembler
+     * @return a label name in the form of "L123"
+     */
+    protected String createLabelName(Label l, int id) {
+        return "L" + id;
+    }
+
+    /**
+     * Gets a name for a label, creating it if it does not yet exist. By default, the returned name
+     * is only unique with the scope of this assembler.
+     */
+    public String nameOf(Label l) {
+        if (nameMap == null) {
+            nameMap = new HashMap<>();
+        }
+        String name = nameMap.get(l);
+        if (name == null) {
+            name = createLabelName(l, nameMap.size());
+            nameMap.put(l, name);
+        }
+        return name;
+    }
+
+    /**
+     * This is used by the CompilationResultBuilder to convert a {@link StackSlot} to an
+     * {@link AbstractAddress}.
+     */
+    public abstract AbstractAddress makeAddress(Register base, int displacement);
+
+    /**
+     * Returns a target specific placeholder address that can be used for code patching.
+     */
+    public abstract AbstractAddress getPlaceholder();
+
+    /**
+     * Emits a NOP instruction to advance the current PC.
+     */
+    public abstract void ensureUniquePC();
+
+    public void reset() {
+        codeBuffer.reset();
+        captureLabelPositions();
+    }
+
+    private void captureLabelPositions() {
+        if (jumpDisplacementHints == null) {
+            return;
+        }
+        for (LabelHint request : this.jumpDisplacementHints) {
+            request.capture();
+        }
+    }
+
+    public LabelHint requestLabelHint(Label label) {
+        if (jumpDisplacementHints == null) {
+            jumpDisplacementHints = new ArrayList<>();
+        }
+        LabelHint hint = new LabelHint(label, position());
+        this.jumpDisplacementHints.add(hint);
+        return hint;
+    }
+
+    public InstructionCounter getInstructionCounter() {
+        throw new UnsupportedOperationException("Instruction counter is not implemented for " + this);
+    }
+
+    public static class LabelHint {
+        private Label label;
+        private int forPosition;
+        private int capturedTarget = -1;
+
+        protected LabelHint(Label label, int lastPosition) {
+            super();
+            this.label = label;
+            this.forPosition = lastPosition;
+        }
+
+        protected void capture() {
+            this.capturedTarget = label.position();
+        }
+
+        public int getTarget() {
+            assert isValid();
+            return capturedTarget;
+        }
+
+        public int getPosition() {
+            assert isValid();
+            return forPosition;
+        }
+
+        public boolean isValid() {
+            return capturedTarget >= 0;
+        }
+    }
+
+    /**
+     * Instruction counter class which gives the user of the assembler to count different kinds of
+     * instructions in the generated assembler code.
+     */
+    public interface InstructionCounter {
+        String[] getSupportedInstructionTypes();
+
+        int[] countInstructions(String[] instructionTypes, int beginPc, int endPc);
+    }
+}
--- /dev/null	Thu Jan 01 00:00:00 1970 +0000
+++ b/graal/com.oracle.jvmci.asm/src/com/oracle/jvmci/asm/Buffer.java	Wed Jun 03 18:06:44 2015 +0200
@@ -0,0 +1,239 @@
+/*
+ * Copyright (c) 2009, 2014, Oracle and/or its affiliates. All rights reserved.
+ * DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER.
+ *
+ * This code is free software; you can redistribute it and/or modify it
+ * under the terms of the GNU General Public License version 2 only, as
+ * published by the Free Software Foundation.
+ *
+ * This code is distributed in the hope that it will be useful, but WITHOUT
+ * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
+ * FITNESS FOR A PARTICULAR PURPOSE.  See the GNU General Public License
+ * version 2 for more details (a copy is included in the LICENSE file that
+ * accompanied this code).
+ *
+ * You should have received a copy of the GNU General Public License version
+ * 2 along with this work; if not, write to the Free Software Foundation,
+ * Inc., 51 Franklin St, Fifth Floor, Boston, MA 02110-1301 USA.
+ *
+ * Please contact Oracle, 500 Oracle Parkway, Redwood Shores, CA 94065 USA
+ * or visit www.oracle.com if you need additional information or have any
+ * questions.
+ */
+package com.oracle.jvmci.asm;
+
+import java.util.*;
+
+/**
+ * Code buffer management for the assembler. Support for little endian and big endian architectures
+ * is implemented using subclasses.
+ */
+abstract class Buffer {
+
+    protected byte[] data;
+    protected int position;
+
+    public Buffer() {
+        data = new byte[AsmOptions.InitialCodeBufferSize];
+    }
+
+    public int position() {
+        return position;
+    }
+
+    public void setPosition(int position) {
+        assert position >= 0 && position <= data.length;
+        this.position = position;
+    }
+
+    /**
+     * Closes this buffer. No extra data can be written to this buffer after this call.
+     *
+     * @param trimmedCopy if {@code true}, then a copy of the underlying byte array up to (but not
+     *            including) {@code position()} is returned
+     * @return the data in this buffer or a trimmed copy if {@code trimmedCopy} is {@code true}
+     */
+    public byte[] close(boolean trimmedCopy) {
+        byte[] result = trimmedCopy ? Arrays.copyOf(data, position()) : data;
+        data = null;
+        return result;
+    }
+
+    public byte[] copyData(int start, int end) {
+        if (data == null) {
+            return null;
+        }
+        return Arrays.copyOfRange(data, start, end);
+    }
+
+    /**
+     * Copies the data from this buffer into a given array.
+     *
+     * @param dst the destination array
+     * @param off starting position in {@code dst}
+     * @param len number of bytes to copy
+     */
+    public void copyInto(byte[] dst, int off, int len) {
+        System.arraycopy(data, 0, dst, off, len);
+    }
+
+    protected void ensureSize(int length) {
+        if (length >= data.length) {
+            data = Arrays.copyOf(data, length * 4);
+        }
+    }
+
+    public void emitBytes(byte[] arr, int off, int len) {
+        ensureSize(position + len);
+        System.arraycopy(arr, off, data, position, len);
+        position += len;
+    }
+
+    public void emitByte(int b) {
+        position = emitByte(b, position);
+    }
+
+    public void emitShort(int b) {
+        position = emitShort(b, position);
+    }
+
+    public void emitInt(int b) {
+        position = emitInt(b, position);
+    }
+
+    public void emitLong(long b) {
+        position = emitLong(b, position);
+    }
+
+    public int emitBytes(byte[] arr, int pos) {
+        final int len = arr.length;
+        final int newPos = pos + len;
+        ensureSize(newPos);
+        System.arraycopy(arr, 0, data, pos, len);
+        return newPos;
+    }
+
+    public int emitByte(int b, int pos) {
+        assert NumUtil.isUByte(b) || NumUtil.isByte(b);
+        int newPos = pos + 1;
+        ensureSize(newPos);
+        data[pos] = (byte) (b & 0xFF);
+        return newPos;
+    }
+
+    public abstract int emitShort(int b, int pos);
+
+    public abstract int emitInt(int b, int pos);
+
+    public abstract int emitLong(long b, int pos);
+
+    public int getByte(int pos) {
+        return data[pos] & 0xff;
+    }
+
+    public abstract int getShort(int pos);
+
+    public abstract int getInt(int pos);
+
+    public static final class BigEndian extends Buffer {
+
+        @Override
+        public int emitShort(int b, int pos) {
+            assert NumUtil.isUShort(b) || NumUtil.isShort(b);
+            int newPos = pos + 2;
+            ensureSize(pos + 2);
+            data[pos] = (byte) ((b >> 8) & 0xFF);
+            data[pos + 1] = (byte) (b & 0xFF);
+            return newPos;
+        }
+
+        @Override
+        public int emitInt(int b, int pos) {
+            int newPos = pos + 4;
+            ensureSize(newPos);
+            data[pos] = (byte) ((b >> 24) & 0xFF);
+            data[pos + 1] = (byte) ((b >> 16) & 0xFF);
+            data[pos + 2] = (byte) ((b >> 8) & 0xFF);
+            data[pos + 3] = (byte) (b & 0xFF);
+            return newPos;
+        }
+
+        @Override
+        public int emitLong(long b, int pos) {
+            int newPos = pos + 8;
+            ensureSize(newPos);
+            data[pos] = (byte) ((b >> 56) & 0xFF);
+            data[pos + 1] = (byte) ((b >> 48) & 0xFF);
+            data[pos + 2] = (byte) ((b >> 40) & 0xFF);
+            data[pos + 3] = (byte) ((b >> 32) & 0xFF);
+            data[pos + 4] = (byte) ((b >> 24) & 0xFF);
+            data[pos + 5] = (byte) ((b >> 16) & 0xFF);
+            data[pos + 6] = (byte) ((b >> 8) & 0xFF);
+            data[pos + 7] = (byte) (b & 0xFF);
+            return newPos;
+        }
+
+        @Override
+        public int getShort(int pos) {
+            return (data[pos + 0] & 0xff) << 8 | (data[pos + 1] & 0xff) << 0;
+        }
+
+        @Override
+        public int getInt(int pos) {
+            return (data[pos + 0] & 0xff) << 24 | (data[pos + 1] & 0xff) << 16 | (data[pos + 2] & 0xff) << 8 | (data[pos + 3] & 0xff) << 0;
+        }
+    }
+
+    public static final class LittleEndian extends Buffer {
+
+        @Override
+        public int emitShort(int b, int pos) {
+            assert NumUtil.isUShort(b) || NumUtil.isShort(b);
+            int newPos = pos + 2;
+            ensureSize(newPos);
+            data[pos] = (byte) (b & 0xFF);
+            data[pos + 1] = (byte) ((b >> 8) & 0xFF);
+            return newPos;
+        }
+
+        @Override
+        public int emitInt(int b, int pos) {
+            int newPos = pos + 4;
+            ensureSize(newPos);
+            data[pos] = (byte) (b & 0xFF);
+            data[pos + 1] = (byte) ((b >> 8) & 0xFF);
+            data[pos + 2] = (byte) ((b >> 16) & 0xFF);
+            data[pos + 3] = (byte) ((b >> 24) & 0xFF);
+            return newPos;
+        }
+
+        @Override
+        public int emitLong(long b, int pos) {
+            int newPos = pos + 8;
+            ensureSize(newPos);
+            data[pos] = (byte) (b & 0xFF);
+            data[pos + 1] = (byte) ((b >> 8) & 0xFF);
+            data[pos + 2] = (byte) ((b >> 16) & 0xFF);
+            data[pos + 3] = (byte) ((b >> 24) & 0xFF);
+            data[pos + 4] = (byte) ((b >> 32) & 0xFF);
+            data[pos + 5] = (byte) ((b >> 40) & 0xFF);
+            data[pos + 6] = (byte) ((b >> 48) & 0xFF);
+            data[pos + 7] = (byte) ((b >> 56) & 0xFF);
+            return newPos;
+        }
+
+        @Override
+        public int getShort(int pos) {
+            return (data[pos + 1] & 0xff) << 8 | (data[pos + 0] & 0xff) << 0;
+        }
+
+        @Override
+        public int getInt(int pos) {
+            return (data[pos + 3] & 0xff) << 24 | (data[pos + 2] & 0xff) << 16 | (data[pos + 1] & 0xff) << 8 | (data[pos + 0] & 0xff) << 0;
+        }
+    }
+
+    public void reset() {
+        position = 0;
+    }
+}
--- /dev/null	Thu Jan 01 00:00:00 1970 +0000
+++ b/graal/com.oracle.jvmci.asm/src/com/oracle/jvmci/asm/Label.java	Wed Jun 03 18:06:44 2015 +0200
@@ -0,0 +1,106 @@
+/*
+ * Copyright (c) 2009, 2011, Oracle and/or its affiliates. All rights reserved.
+ * DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER.
+ *
+ * This code is free software; you can redistribute it and/or modify it
+ * under the terms of the GNU General Public License version 2 only, as
+ * published by the Free Software Foundation.
+ *
+ * This code is distributed in the hope that it will be useful, but WITHOUT
+ * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
+ * FITNESS FOR A PARTICULAR PURPOSE.  See the GNU General Public License
+ * version 2 for more details (a copy is included in the LICENSE file that
+ * accompanied this code).
+ *
+ * You should have received a copy of the GNU General Public License version
+ * 2 along with this work; if not, write to the Free Software Foundation,
+ * Inc., 51 Franklin St, Fifth Floor, Boston, MA 02110-1301 USA.
+ *
+ * Please contact Oracle, 500 Oracle Parkway, Redwood Shores, CA 94065 USA
+ * or visit www.oracle.com if you need additional information or have any
+ * questions.
+ */
+package com.oracle.jvmci.asm;
+
+import java.util.*;
+
+/**
+ * This class represents a label within assembly code.
+ */
+public final class Label {
+
+    private int position = -1;
+    private int blockId = -1;
+
+    /**
+     * References to instructions that jump to this unresolved label. These instructions need to be
+     * patched when the label is bound using the {@link #patchInstructions(Assembler)} method.
+     */
+    private ArrayList<Integer> patchPositions = null;
+
+    /**
+     * Returns the position of this label in the code buffer.
+     *
+     * @return the position
+     */
+    public int position() {
+        assert position >= 0 : "Unbound label is being referenced";
+        return position;
+    }
+
+    public Label() {
+    }
+
+    public Label(int id) {
+        blockId = id;
+    }
+
+    public int getBlockId() {
+        return blockId;
+    }
+
+    /**
+     * Binds the label to the specified position.
+     *
+     * @param pos the position
+     */
+    protected void bind(int pos) {
+        this.position = pos;
+        assert isBound();
+    }
+
+    public boolean isBound() {
+        return position >= 0;
+    }
+
+    public void addPatchAt(int branchLocation) {
+        assert !isBound() : "Label is already bound " + this + " " + branchLocation + " at position " + position;
+        if (patchPositions == null) {
+            patchPositions = new ArrayList<>(2);
+        }
+        patchPositions.add(branchLocation);
+    }
+
+    protected void patchInstructions(Assembler masm) {
+        assert isBound() : "Label should be bound";
+        if (patchPositions != null) {
+            int target = position;
+            for (int i = 0; i < patchPositions.size(); ++i) {
+                int pos = patchPositions.get(i);
+                masm.patchJumpTarget(pos, target);
+            }
+        }
+    }
+
+    public void reset() {
+        if (this.patchPositions != null) {
+            this.patchPositions.clear();
+        }
+        this.position = -1;
+    }
+
+    @Override
+    public String toString() {
+        return isBound() ? String.valueOf(position()) : "?";
+    }
+}
--- /dev/null	Thu Jan 01 00:00:00 1970 +0000
+++ b/graal/com.oracle.jvmci.asm/src/com/oracle/jvmci/asm/NumUtil.java	Wed Jun 03 18:06:44 2015 +0200
@@ -0,0 +1,112 @@
+/*
+ * Copyright (c) 2011, 2013, Oracle and/or its affiliates. All rights reserved.
+ * DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER.
+ *
+ * This code is free software; you can redistribute it and/or modify it
+ * under the terms of the GNU General Public License version 2 only, as
+ * published by the Free Software Foundation.
+ *
+ * This code is distributed in the hope that it will be useful, but WITHOUT
+ * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
+ * FITNESS FOR A PARTICULAR PURPOSE.  See the GNU General Public License
+ * version 2 for more details (a copy is included in the LICENSE file that
+ * accompanied this code).
+ *
+ * You should have received a copy of the GNU General Public License version
+ * 2 along with this work; if not, write to the Free Software Foundation,
+ * Inc., 51 Franklin St, Fifth Floor, Boston, MA 02110-1301 USA.
+ *
+ * Please contact Oracle, 500 Oracle Parkway, Redwood Shores, CA 94065 USA
+ * or visit www.oracle.com if you need additional information or have any
+ * questions.
+ */
+package com.oracle.jvmci.asm;
+
+// JaCoCo Exclude
+
+/**
+ * A collection of static utility functions that check ranges of numbers.
+ */
+public class NumUtil {
+
+    public static boolean isShiftCount(int x) {
+        return 0 <= x && x < 32;
+    }
+
+    /**
+     * Determines if a given {@code int} value is the range of unsigned byte values.
+     */
+    public static boolean isUByte(int x) {
+        return (x & 0xff) == x;
+    }
+
+    /**
+     * Determines if a given {@code int} value is the range of signed byte values.
+     */
+    public static boolean isByte(int x) {
+        return (byte) x == x;
+    }
+
+    /**
+     * Determines if a given {@code long} value is the range of unsigned byte values.
+     */
+    public static boolean isUByte(long x) {
+        return (x & 0xffL) == x;
+    }
+
+    /**
+     * Determines if a given {@code long} value is the range of signed byte values.
+     */
+    public static boolean isByte(long l) {
+        return (byte) l == l;
+    }
+
+    /**
+     * Determines if a given {@code long} value is the range of unsigned int values.
+     */
+    public static boolean isUInt(long x) {
+        return (x & 0xffffffffL) == x;
+    }
+
+    /**
+     * Determines if a given {@code long} value is the range of signed int values.
+     */
+    public static boolean isInt(long l) {
+        return (int) l == l;
+    }
+
+    /**
+     * Determines if a given {@code int} value is the range of signed short values.
+     */
+    public static boolean isShort(int x) {
+        return (short) x == x;
+    }
+
+    /**
+     * Determines if a given {@code long} value is the range of signed short values.
+     */
+    public static boolean isShort(long x) {
+        return (short) x == x;
+    }
+
+    public static boolean isUShort(int s) {
+        return s == (s & 0xFFFF);
+    }
+
+    public static boolean is32bit(long x) {
+        return -0x80000000L <= x && x < 0x80000000L;
+    }
+
+    public static short safeToShort(int v) {
+        assert isShort(v);
+        return (short) v;
+    }
+
+    public static int roundUp(int number, int mod) {
+        return ((number + mod - 1) / mod) * mod;
+    }
+
+    public static long roundUp(long number, long mod) {
+        return ((number + mod - 1L) / mod) * mod;
+    }
+}
--- a/make/defs.make	Wed Jun 03 17:12:05 2015 +0200
+++ b/make/defs.make	Wed Jun 03 18:06:44 2015 +0200
@@ -364,6 +364,7 @@
 EXPORT_LIST += $(EXPORT_JRE_LIB_JVMCI_DIR)/graal.jar
 EXPORT_LIST += $(EXPORT_JRE_LIB_JVMCI_DIR)/graal-truffle.jar
 EXPORT_LIST += $(EXPORT_JRE_LIB_JVMCI_DIR)/jvmci-api.jar
+EXPORT_LIST += $(EXPORT_JRE_LIB_JVMCI_DIR)/jvmci-util.jar
 EXPORT_LIST += $(EXPORT_JRE_LIB_JVMCI_DIR)/jvmci-hotspot.jar
 
 EXPORT_LIST += $(EXPORT_JRE_LIB_JVMCI_SERVICES_DIR)/com.oracle.jvmci.hotspot.HotSpotJVMCIBackendFactory
--- a/mx/mx_graal.py	Wed Jun 03 17:12:05 2015 +0200
+++ b/mx/mx_graal.py	Wed Jun 03 18:06:44 2015 +0200
@@ -626,7 +626,11 @@
     """
     zf = zipfile.ZipFile(dist.path, 'r')
     graalClassfilePath = 'com/oracle/graal/api/runtime/Graal.class'
-    graalClassfile = zf.read(graalClassfilePath)
+    try:
+        graalClassfile = zf.read(graalClassfilePath)
+    except KeyError:
+        mx.log(graalClassfilePath + ' is not present in ' + dist.path)
+        return
     placeholder = '@@@@@@@@@@@@@@@@graal.version@@@@@@@@@@@@@@@@'
     placeholderLen = len(placeholder)
     versionSpec = '{:' + str(placeholderLen) + '}'
--- a/mx/suite.py	Wed Jun 03 17:12:05 2015 +0200
+++ b/mx/suite.py	Wed Jun 03 18:06:44 2015 +0200
@@ -304,6 +304,39 @@
       "workingSets" : "JVMCI,SPARC",
     },
 
+    "com.oracle.jvmci.asm" : {
+      "subDir" : "graal",
+      "sourceDirs" : ["src"],
+      "dependencies" : ["com.oracle.jvmci.code"],
+      "checkstyle" : "com.oracle.graal.graph",
+      "javaCompliance" : "1.8",
+      "workingSets" : "Graal,Assembler",
+    },
+
+    "com.oracle.jvmci.asm.amd64" : {
+      "subDir" : "graal",
+      "sourceDirs" : ["src"],
+      "dependencies" : [
+        "com.oracle.jvmci.asm",
+        "com.oracle.jvmci.amd64",
+      ],
+      "checkstyle" : "com.oracle.graal.graph",
+      "javaCompliance" : "1.8",
+      "workingSets" : "Graal,Assembler,AMD64",
+    },
+
+    "com.oracle.jvmci.asm.sparc" : {
+      "subDir" : "graal",
+      "sourceDirs" : ["src"],
+      "dependencies" : [
+        "com.oracle.jvmci.asm",
+        "com.oracle.jvmci.sparc",
+      ],
+      "checkstyle" : "com.oracle.graal.graph",
+      "javaCompliance" : "1.8",
+      "workingSets" : "Graal,Assembler,SPARC",
+    },
+
     # ------------- JVMCI:HotSpot -------------
 
     "com.oracle.jvmci.hotspot" : {
@@ -503,6 +536,7 @@
       "subDir" : "graal",
       "sourceDirs" : ["src"],
       "dependencies" : [
+        "com.oracle.graal.hotspot",
         "com.oracle.jvmci.hotspot.sparc",
         "com.oracle.graal.compiler.sparc",
         "com.oracle.graal.replacements.sparc",
@@ -529,7 +563,7 @@
       "subDir" : "graal",
       "sourceDirs" : ["src"],
       "dependencies" : [
-        "com.oracle.graal.asm.amd64",
+        "com.oracle.jvmci.asm.amd64",
         "com.oracle.graal.hotspot.test",
       ],
       "checkstyle" : "com.oracle.graal.graph",
@@ -588,7 +622,7 @@
       "sourceDirs" : ["src"],
       "dependencies" : [
         "com.oracle.graal.compiler.common",
-        "com.oracle.graal.asm",
+        "com.oracle.jvmci.asm",
       ],
       "checkstyle" : "com.oracle.graal.graph",
       "javaCompliance" : "1.8",
@@ -623,7 +657,7 @@
       "sourceDirs" : ["src"],
       "dependencies" : [
         "com.oracle.graal.lir",
-        "com.oracle.graal.asm.amd64",
+        "com.oracle.jvmci.asm.amd64",
       ],
       "checkstyle" : "com.oracle.graal.graph",
       "javaCompliance" : "1.8",
@@ -633,7 +667,10 @@
     "com.oracle.graal.lir.sparc" : {
       "subDir" : "graal",
       "sourceDirs" : ["src"],
-      "dependencies" : ["com.oracle.graal.asm.sparc"],
+      "dependencies" : [
+        "com.oracle.jvmci.asm.sparc",
+        "com.oracle.graal.compiler",
+      ],
       "checkstyle" : "com.oracle.graal.graph",
       "javaCompliance" : "1.8",
       "workingSets" : "Graal,LIR,SPARC",
@@ -987,15 +1024,6 @@
       "jacoco" : "exclude",
     },
 
-    "com.oracle.graal.asm" : {
-      "subDir" : "graal",
-      "sourceDirs" : ["src"],
-      "dependencies" : ["com.oracle.jvmci.code"],
-      "checkstyle" : "com.oracle.graal.graph",
-      "javaCompliance" : "1.8",
-      "workingSets" : "Graal,Assembler",
-    },
-
     "com.oracle.graal.asm.test" : {
       "subDir" : "graal",
       "sourceDirs" : ["src"],
@@ -1008,42 +1036,18 @@
       "workingSets" : "Graal,Assembler,Test",
     },
 
-    "com.oracle.graal.asm.amd64" : {
-      "subDir" : "graal",
-      "sourceDirs" : ["src"],
-      "dependencies" : [
-        "com.oracle.graal.asm",
-        "com.oracle.jvmci.amd64",
-      ],
-      "checkstyle" : "com.oracle.graal.graph",
-      "javaCompliance" : "1.8",
-      "workingSets" : "Graal,Assembler,AMD64",
-    },
-
     "com.oracle.graal.asm.amd64.test" : {
       "subDir" : "graal",
       "sourceDirs" : ["src"],
       "dependencies" : [
         "com.oracle.graal.asm.test",
-        "com.oracle.graal.asm.amd64",
+        "com.oracle.jvmci.asm.amd64",
       ],
       "checkstyle" : "com.oracle.graal.graph",
       "javaCompliance" : "1.8",
       "workingSets" : "Graal,Assembler,AMD64,Test",
     },
 
-    "com.oracle.graal.asm.sparc" : {
-      "subDir" : "graal",
-      "sourceDirs" : ["src"],
-      "dependencies" : [
-        "com.oracle.graal.hotspot",
-        "com.oracle.jvmci.sparc",
-      ],
-      "checkstyle" : "com.oracle.graal.graph",
-      "javaCompliance" : "1.8",
-      "workingSets" : "Graal,Assembler,SPARC",
-    },
-
     # ------------- Truffle -------------
 
     "com.oracle.truffle.api" : {
@@ -1288,7 +1292,7 @@
       "sourceDirs" : ["src"],
       "dependencies" : [
         "com.oracle.graal.truffle.hotspot",
-        "com.oracle.graal.asm.sparc",
+        "com.oracle.jvmci.asm.sparc",
       ],
       "checkstyle" : "com.oracle.graal.graph",
       "javaCompliance" : "1.8",
@@ -1330,8 +1334,8 @@
       "subDir" : "graal",
       "sourcesPath" : "build/jvmci-util.src.zip",
       "dependencies" : [
-        "com.oracle.jvmci.amd64",
-        "com.oracle.jvmci.sparc",
+        "com.oracle.jvmci.asm.amd64",
+        "com.oracle.jvmci.asm.sparc",
       ],
       "distDependencies" : [
         "JVMCI_API",