changeset 23217:a1bfeec72458

AArch64 Graal Port
author twisti
date Thu, 24 Dec 2015 11:43:35 -1000
parents 16b472b9ca55
children 75a807751aa6
files graal/com.oracle.graal.asm.aarch64.test/src/com/oracle/graal/asm/aarch64/test/AArch64MacroAssemblerTest.java graal/com.oracle.graal.asm.aarch64.test/src/com/oracle/graal/asm/aarch64/test/TestProtectedAssembler.java graal/com.oracle.graal.asm.aarch64/src/com/oracle/graal/asm/aarch64/AArch64Address.java graal/com.oracle.graal.asm.aarch64/src/com/oracle/graal/asm/aarch64/AArch64Assembler.java graal/com.oracle.graal.asm.aarch64/src/com/oracle/graal/asm/aarch64/AArch64MacroAssembler.java graal/com.oracle.graal.asm/src/com/oracle/graal/asm/NumUtil.java graal/com.oracle.graal.compiler.aarch64/src/com/oracle/graal/compiler/aarch64/AArch64AddressLowering.java graal/com.oracle.graal.compiler.aarch64/src/com/oracle/graal/compiler/aarch64/AArch64ArithmeticLIRGenerator.java graal/com.oracle.graal.compiler.aarch64/src/com/oracle/graal/compiler/aarch64/AArch64FloatConvertOp.java graal/com.oracle.graal.compiler.aarch64/src/com/oracle/graal/compiler/aarch64/AArch64LIRGenerator.java graal/com.oracle.graal.compiler.aarch64/src/com/oracle/graal/compiler/aarch64/AArch64LIRKindTool.java graal/com.oracle.graal.compiler.aarch64/src/com/oracle/graal/compiler/aarch64/AArch64MoveFactory.java graal/com.oracle.graal.compiler.aarch64/src/com/oracle/graal/compiler/aarch64/AArch64NodeLIRBuilder.java graal/com.oracle.graal.compiler.aarch64/src/com/oracle/graal/compiler/aarch64/AArch64NodeMatchRules.java graal/com.oracle.graal.compiler.aarch64/src/com/oracle/graal/compiler/aarch64/AArch64SuitesProvider.java graal/com.oracle.graal.hotspot.aarch64/src/com/oracle/graal/hotspot/aarch64/AArch64HotSpotBackend.java graal/com.oracle.graal.hotspot.aarch64/src/com/oracle/graal/hotspot/aarch64/AArch64HotSpotBackendFactory.java graal/com.oracle.graal.hotspot.aarch64/src/com/oracle/graal/hotspot/aarch64/AArch64HotSpotCRuntimeCallEpilogueOp.java graal/com.oracle.graal.hotspot.aarch64/src/com/oracle/graal/hotspot/aarch64/AArch64HotSpotCRuntimeCallPrologueOp.java graal/com.oracle.graal.hotspot.aarch64/src/com/oracle/graal/hotspot/aarch64/AArch64HotSpotDeoptimizeCallerOp.java graal/com.oracle.graal.hotspot.aarch64/src/com/oracle/graal/hotspot/aarch64/AArch64HotSpotDeoptimizeOp.java graal/com.oracle.graal.hotspot.aarch64/src/com/oracle/graal/hotspot/aarch64/AArch64HotSpotDirectStaticCallOp.java graal/com.oracle.graal.hotspot.aarch64/src/com/oracle/graal/hotspot/aarch64/AArch64HotSpotDirectVirtualCallOp.java graal/com.oracle.graal.hotspot.aarch64/src/com/oracle/graal/hotspot/aarch64/AArch64HotSpotEpilogueOp.java graal/com.oracle.graal.hotspot.aarch64/src/com/oracle/graal/hotspot/aarch64/AArch64HotSpotForeignCallsProvider.java graal/com.oracle.graal.hotspot.aarch64/src/com/oracle/graal/hotspot/aarch64/AArch64HotSpotJumpToExceptionHandlerInCallerOp.java graal/com.oracle.graal.hotspot.aarch64/src/com/oracle/graal/hotspot/aarch64/AArch64HotSpotLIRGenerationResult.java graal/com.oracle.graal.hotspot.aarch64/src/com/oracle/graal/hotspot/aarch64/AArch64HotSpotLIRGenerator.java graal/com.oracle.graal.hotspot.aarch64/src/com/oracle/graal/hotspot/aarch64/AArch64HotSpotLIRKindTool.java graal/com.oracle.graal.hotspot.aarch64/src/com/oracle/graal/hotspot/aarch64/AArch64HotSpotLoweringProvider.java graal/com.oracle.graal.hotspot.aarch64/src/com/oracle/graal/hotspot/aarch64/AArch64HotSpotMove.java graal/com.oracle.graal.hotspot.aarch64/src/com/oracle/graal/hotspot/aarch64/AArch64HotSpotMoveFactory.java graal/com.oracle.graal.hotspot.aarch64/src/com/oracle/graal/hotspot/aarch64/AArch64HotSpotNodeLIRBuilder.java graal/com.oracle.graal.hotspot.aarch64/src/com/oracle/graal/hotspot/aarch64/AArch64HotSpotRegisterAllocationConfig.java graal/com.oracle.graal.hotspot.aarch64/src/com/oracle/graal/hotspot/aarch64/AArch64HotSpotReturnOp.java graal/com.oracle.graal.hotspot.aarch64/src/com/oracle/graal/hotspot/aarch64/AArch64HotSpotSafepointOp.java graal/com.oracle.graal.hotspot.aarch64/src/com/oracle/graal/hotspot/aarch64/AArch64HotSpotUnwindOp.java graal/com.oracle.graal.hotspot.aarch64/src/com/oracle/graal/hotspot/aarch64/AArch64IndirectCallOp.java graal/com.oracle.graal.lir.aarch64/src/com/oracle/graal/lir/aarch64/AArch64AddressValue.java graal/com.oracle.graal.lir.aarch64/src/com/oracle/graal/lir/aarch64/AArch64ArithmeticLIRGeneratorTool.java graal/com.oracle.graal.lir.aarch64/src/com/oracle/graal/lir/aarch64/AArch64ArithmeticOp.java graal/com.oracle.graal.lir.aarch64/src/com/oracle/graal/lir/aarch64/AArch64BitManipulationOp.java graal/com.oracle.graal.lir.aarch64/src/com/oracle/graal/lir/aarch64/AArch64BlockEndOp.java graal/com.oracle.graal.lir.aarch64/src/com/oracle/graal/lir/aarch64/AArch64BreakpointOp.java graal/com.oracle.graal.lir.aarch64/src/com/oracle/graal/lir/aarch64/AArch64Call.java graal/com.oracle.graal.lir.aarch64/src/com/oracle/graal/lir/aarch64/AArch64Compare.java graal/com.oracle.graal.lir.aarch64/src/com/oracle/graal/lir/aarch64/AArch64ControlFlow.java graal/com.oracle.graal.lir.aarch64/src/com/oracle/graal/lir/aarch64/AArch64FrameMap.java graal/com.oracle.graal.lir.aarch64/src/com/oracle/graal/lir/aarch64/AArch64FrameMapBuilder.java graal/com.oracle.graal.lir.aarch64/src/com/oracle/graal/lir/aarch64/AArch64LIRInstruction.java graal/com.oracle.graal.lir.aarch64/src/com/oracle/graal/lir/aarch64/AArch64Move.java graal/com.oracle.graal.lir.aarch64/src/com/oracle/graal/lir/aarch64/AArch64PauseOp.java graal/com.oracle.graal.lir.aarch64/src/com/oracle/graal/lir/aarch64/AArch64ReinterpretOp.java graal/com.oracle.graal.lir.aarch64/src/com/oracle/graal/lir/aarch64/AArch64SignExtendOp.java graal/com.oracle.graal.replacements.aarch64/src/com/oracle/graal/replacements/aarch64/AArch64CountLeadingZerosNode.java graal/com.oracle.graal.replacements.aarch64/src/com/oracle/graal/replacements/aarch64/AArch64FloatArithmeticSnippets.java graal/com.oracle.graal.replacements.aarch64/src/com/oracle/graal/replacements/aarch64/AArch64GraphBuilderPlugins.java graal/com.oracle.graal.replacements.aarch64/src/com/oracle/graal/replacements/aarch64/AArch64IntegerArithmeticSnippets.java graal/com.oracle.graal.replacements.aarch64/src/com/oracle/graal/replacements/aarch64/AArch64IntegerSubstitutions.java graal/com.oracle.graal.replacements.aarch64/src/com/oracle/graal/replacements/aarch64/AArch64LongSubstitutions.java graal/com.oracle.graal.replacements.aarch64/src/com/oracle/graal/replacements/aarch64/AArch64MathSubstitutions.java mx.graal/suite.py
diffstat 62 files changed, 12195 insertions(+), 1 deletions(-) [+]
line wrap: on
line diff
--- /dev/null	Thu Jan 01 00:00:00 1970 +0000
+++ b/graal/com.oracle.graal.asm.aarch64.test/src/com/oracle/graal/asm/aarch64/test/AArch64MacroAssemblerTest.java	Thu Dec 24 11:43:35 2015 -1000
@@ -0,0 +1,284 @@
+/*
+ * Copyright (c) 2013, 2015, Oracle and/or its affiliates. All rights reserved.
+ * DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER.
+ *
+ * This code is free software; you can redistribute it and/or modify it
+ * under the terms of the GNU General Public License version 2 only, as
+ * published by the Free Software Foundation.
+ *
+ * This code is distributed in the hope that it will be useful, but WITHOUT
+ * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
+ * FITNESS FOR A PARTICULAR PURPOSE.  See the GNU General Public License
+ * version 2 for more details (a copy is included in the LICENSE file that
+ * accompanied this code).
+ *
+ * You should have received a copy of the GNU General Public License version
+ * 2 along with this work; if not, write to the Free Software Foundation,
+ * Inc., 51 Franklin St, Fifth Floor, Boston, MA 02110-1301 USA.
+ *
+ * Please contact Oracle, 500 Oracle Parkway, Redwood Shores, CA 94065 USA
+ * or visit www.oracle.com if you need additional information or have any
+ * questions.
+ */
+
+package com.oracle.graal.asm.aarch64.test;
+
+import static org.junit.Assert.assertArrayEquals;
+
+import java.util.EnumSet;
+
+import org.junit.Assert;
+import org.junit.Before;
+import org.junit.Test;
+
+import com.oracle.graal.asm.NumUtil;
+import com.oracle.graal.asm.aarch64.AArch64Address;
+import com.oracle.graal.asm.aarch64.AArch64Assembler;
+import com.oracle.graal.asm.aarch64.AArch64MacroAssembler;
+import com.oracle.graal.asm.aarch64.AArch64MacroAssembler.AddressGenerationPlan;
+import com.oracle.graal.test.GraalTest;
+
+import jdk.vm.ci.aarch64.AArch64;
+import jdk.vm.ci.aarch64.AArch64.CPUFeature;
+import jdk.vm.ci.code.Architecture;
+import jdk.vm.ci.code.Register;
+import jdk.vm.ci.code.TargetDescription;
+
+public class AArch64MacroAssemblerTest extends GraalTest {
+
+    private AArch64MacroAssembler masm;
+    private TestProtectedAssembler asm;
+    private Register base;
+    private Register index;
+    private Register scratch;
+
+    private static EnumSet<AArch64.CPUFeature> computeFeatures() {
+        EnumSet<AArch64.CPUFeature> features = EnumSet.noneOf(AArch64.CPUFeature.class);
+        features.add(CPUFeature.FP);
+        return features;
+    }
+
+    private static EnumSet<AArch64.Flag> computeFlags() {
+        EnumSet<AArch64.Flag> flags = EnumSet.noneOf(AArch64.Flag.class);
+        return flags;
+    }
+
+    private static TargetDescription createTarget() {
+        final int stackFrameAlignment = 16;
+        final int implicitNullCheckLimit = 4096;
+        final boolean inlineObjects = true;
+        Architecture arch = new AArch64(computeFeatures(), computeFlags());
+        return new TargetDescription(arch, true, stackFrameAlignment, implicitNullCheckLimit, inlineObjects);
+    }
+
+    @Before
+    public void setupEnvironment() {
+        TargetDescription target = createTarget();
+        masm = new AArch64MacroAssembler(target);
+        asm = new TestProtectedAssembler(target);
+        base = AArch64.r10;
+        index = AArch64.r13;
+        scratch = AArch64.r15;
+    }
+
+    @Test
+    public void testGenerateAddressPlan() {
+        AddressGenerationPlan plan = AArch64MacroAssembler.generateAddressPlan(NumUtil.getNbitNumberInt(8), false, 0);
+        Assert.assertTrue(plan.workPlan == AddressGenerationPlan.WorkPlan.NO_WORK && !plan.needsScratch &&
+                        (plan.addressingMode == AArch64Address.AddressingMode.IMMEDIATE_SCALED || plan.addressingMode == AArch64Address.AddressingMode.IMMEDIATE_UNSCALED));
+
+        plan = AArch64MacroAssembler.generateAddressPlan(NumUtil.getNbitNumberInt(8), false, 1);
+        Assert.assertTrue(plan.workPlan == AddressGenerationPlan.WorkPlan.NO_WORK && !plan.needsScratch &&
+                        (plan.addressingMode == AArch64Address.AddressingMode.IMMEDIATE_SCALED || plan.addressingMode == AArch64Address.AddressingMode.IMMEDIATE_UNSCALED));
+
+        plan = AArch64MacroAssembler.generateAddressPlan(-NumUtil.getNbitNumberInt(8) - 1, false, 0);
+        Assert.assertTrue(plan.workPlan == AddressGenerationPlan.WorkPlan.NO_WORK && !plan.needsScratch && plan.addressingMode == AArch64Address.AddressingMode.IMMEDIATE_UNSCALED);
+
+        plan = AArch64MacroAssembler.generateAddressPlan(NumUtil.getNbitNumberInt(12), false, 1);
+        Assert.assertTrue(plan.workPlan == AddressGenerationPlan.WorkPlan.NO_WORK && !plan.needsScratch && plan.addressingMode == AArch64Address.AddressingMode.IMMEDIATE_SCALED);
+
+        plan = AArch64MacroAssembler.generateAddressPlan(NumUtil.getNbitNumberInt(12) << 2, false, 4);
+        Assert.assertTrue(plan.workPlan == AddressGenerationPlan.WorkPlan.NO_WORK && !plan.needsScratch && plan.addressingMode == AArch64Address.AddressingMode.IMMEDIATE_SCALED);
+
+        plan = AArch64MacroAssembler.generateAddressPlan(0, false, 8);
+        Assert.assertTrue(plan.workPlan == AddressGenerationPlan.WorkPlan.NO_WORK && !plan.needsScratch && plan.addressingMode == AArch64Address.AddressingMode.REGISTER_OFFSET);
+
+        plan = AArch64MacroAssembler.generateAddressPlan(0, false, 0);
+        Assert.assertTrue(plan.workPlan == AddressGenerationPlan.WorkPlan.NO_WORK && !plan.needsScratch && plan.addressingMode == AArch64Address.AddressingMode.REGISTER_OFFSET);
+
+        plan = AArch64MacroAssembler.generateAddressPlan(NumUtil.getNbitNumberInt(9), false, 0);
+        Assert.assertTrue(plan.workPlan == AddressGenerationPlan.WorkPlan.ADD_TO_BASE && !plan.needsScratch && plan.addressingMode == AArch64Address.AddressingMode.REGISTER_OFFSET);
+
+        plan = AArch64MacroAssembler.generateAddressPlan(NumUtil.getNbitNumberInt(12), false, 8);
+        Assert.assertTrue(plan.workPlan == AddressGenerationPlan.WorkPlan.ADD_TO_BASE && !plan.needsScratch && plan.addressingMode == AArch64Address.AddressingMode.REGISTER_OFFSET);
+
+        plan = AArch64MacroAssembler.generateAddressPlan(NumUtil.getNbitNumberInt(13), false, 8);
+        Assert.assertTrue(plan.workPlan == AddressGenerationPlan.WorkPlan.ADD_TO_BASE && plan.needsScratch && plan.addressingMode == AArch64Address.AddressingMode.REGISTER_OFFSET);
+
+        plan = AArch64MacroAssembler.generateAddressPlan(-NumUtil.getNbitNumberInt(12), false, 8);
+        Assert.assertTrue(plan.workPlan == AddressGenerationPlan.WorkPlan.ADD_TO_BASE && !plan.needsScratch && plan.addressingMode == AArch64Address.AddressingMode.REGISTER_OFFSET);
+
+        plan = AArch64MacroAssembler.generateAddressPlan(-(NumUtil.getNbitNumberInt(12) << 12), false, 8);
+        Assert.assertTrue(plan.workPlan == AddressGenerationPlan.WorkPlan.ADD_TO_BASE && !plan.needsScratch && plan.addressingMode == AArch64Address.AddressingMode.REGISTER_OFFSET);
+
+        plan = AArch64MacroAssembler.generateAddressPlan(NumUtil.getNbitNumberInt(12), true, 8);
+        Assert.assertTrue(plan.workPlan == AddressGenerationPlan.WorkPlan.ADD_TO_BASE && !plan.needsScratch && plan.addressingMode == AArch64Address.AddressingMode.REGISTER_OFFSET);
+
+        plan = AArch64MacroAssembler.generateAddressPlan(NumUtil.getNbitNumberInt(12) << 3, true, 8);
+        Assert.assertTrue(plan.workPlan == AddressGenerationPlan.WorkPlan.ADD_TO_INDEX && !plan.needsScratch && plan.addressingMode == AArch64Address.AddressingMode.REGISTER_OFFSET);
+
+        plan = AArch64MacroAssembler.generateAddressPlan(NumUtil.getNbitNumberInt(13) << 3, true, 8);
+        Assert.assertTrue(plan.workPlan == AddressGenerationPlan.WorkPlan.ADD_TO_INDEX && plan.needsScratch && plan.addressingMode == AArch64Address.AddressingMode.REGISTER_OFFSET);
+    }
+
+    @Test
+    public void testMakeAddressNoAction() {
+        AArch64Address address = masm.makeAddress(base, NumUtil.getNbitNumberInt(12) << 3, AArch64.zr, false, 8, null, false);
+        Assert.assertTrue(address.isScaled() && address.getAddressingMode() == AArch64Address.AddressingMode.IMMEDIATE_SCALED && address.getBase().equals(base) &&
+                        address.getOffset().equals(AArch64.zr) && address.getImmediateRaw() == NumUtil.getNbitNumberInt(12));
+        // No code generated.
+        compareAssembly();
+    }
+
+    @Test
+    public void testMakeAddressAddIndex() {
+        AArch64Address address = masm.makeAddress(base, NumUtil.getNbitNumberInt(8) << 5, index, false, 8, null, true);
+        Assert.assertTrue(address.isScaled() && address.getAddressingMode() == AArch64Address.AddressingMode.REGISTER_OFFSET && address.getBase().equals(base) && address.getOffset().equals(index));
+        asm.add(64, index, index, NumUtil.getNbitNumberInt(8) << 2);
+        compareAssembly();
+    }
+
+    @Test
+    public void testMakeAddressAddIndexNoOverwrite() {
+        AArch64Address address = masm.makeAddress(base, NumUtil.getNbitNumberInt(8) << 5, index, false, 8, scratch, false);
+        Assert.assertTrue(address.isScaled() && address.getAddressingMode() == AArch64Address.AddressingMode.REGISTER_OFFSET && address.getBase().equals(base) && address.getOffset().equals(scratch));
+        asm.add(64, scratch, index, NumUtil.getNbitNumberInt(8) << 2);
+        compareAssembly();
+    }
+
+    @Test
+    public void testMakeAddressAddBaseNoOverwrite() {
+        AArch64Address address = masm.makeAddress(base, NumUtil.getNbitNumberInt(12), index, false, 8, scratch, false);
+        Assert.assertTrue(address.isScaled() && address.getAddressingMode() == AArch64Address.AddressingMode.REGISTER_OFFSET && address.getBase().equals(scratch) && address.getOffset().equals(index));
+        asm.add(64, scratch, base, NumUtil.getNbitNumberInt(12));
+        compareAssembly();
+    }
+
+    @Test
+    public void testMakeAddressAddBase() {
+        AArch64Address address = masm.makeAddress(base, NumUtil.getNbitNumberInt(12), index, false, 8, null, true);
+        Assert.assertTrue(address.isScaled() && address.getAddressingMode() == AArch64Address.AddressingMode.REGISTER_OFFSET && address.getBase().equals(base) && address.getOffset().equals(index));
+        asm.add(64, base, base, NumUtil.getNbitNumberInt(12));
+        compareAssembly();
+    }
+
+    @Test
+    public void testMakeAddressAddIndexNoOverwriteExtend() {
+        AArch64Address address = masm.makeAddress(base, NumUtil.getNbitNumberInt(8) << 5, index, true, 8, scratch, false);
+        Assert.assertTrue(address.isScaled() && address.getAddressingMode() == AArch64Address.AddressingMode.EXTENDED_REGISTER_OFFSET && address.getBase().equals(base) &&
+                        address.getOffset().equals(scratch) && address.getExtendType() == AArch64Assembler.ExtendType.SXTW);
+        asm.add(32, scratch, index, NumUtil.getNbitNumberInt(8) << 2);
+        compareAssembly();
+    }
+
+    @Test
+    public void testMakeAddressAddIndexExtend() {
+        AArch64Address address = masm.makeAddress(base, NumUtil.getNbitNumberInt(8) << 5, index, true, 8, scratch, true);
+        Assert.assertTrue(address.isScaled() && address.getAddressingMode() == AArch64Address.AddressingMode.EXTENDED_REGISTER_OFFSET && address.getBase().equals(base) &&
+                        address.getOffset().equals(index) && address.getExtendType() == AArch64Assembler.ExtendType.SXTW);
+        asm.add(32, index, index, NumUtil.getNbitNumberInt(8) << 2);
+        compareAssembly();
+    }
+
+    @Test
+    public void testLoadAddressUnscaled() {
+        Register dst = AArch64.r26;
+        AArch64Address address = AArch64Address.createUnscaledImmediateAddress(base, NumUtil.getNbitNumberInt(8));
+        masm.loadAddress(dst, address, 8);
+        asm.add(64, dst, base, NumUtil.getNbitNumberInt(8));
+        compareAssembly();
+    }
+
+    @Test
+    public void testLoadAddressUnscaled2() {
+        Register dst = AArch64.r26;
+        AArch64Address address = AArch64Address.createUnscaledImmediateAddress(base, -NumUtil.getNbitNumberInt(8));
+        masm.loadAddress(dst, address, 8);
+        asm.sub(64, dst, base, NumUtil.getNbitNumberInt(8));
+        compareAssembly();
+    }
+
+    @Test
+    public void testLoadAddressScaled() {
+        Register dst = AArch64.r26;
+        AArch64Address address = AArch64Address.createScaledImmediateAddress(base, NumUtil.getNbitNumberInt(12));
+        masm.loadAddress(dst, address, 8);
+        asm.add(64, dst, base, NumUtil.getNbitNumberInt(9) << 3);
+        asm.add(64, dst, dst, NumUtil.getNbitNumberInt(3) << 12);
+        compareAssembly();
+    }
+
+    @Test
+    public void testLoadAddressScaledLowerOnly() {
+        Register dst = AArch64.r26;
+        AArch64Address address = AArch64Address.createScaledImmediateAddress(base, NumUtil.getNbitNumberInt(5));
+        masm.loadAddress(dst, address, 8);
+        asm.add(64, dst, base, NumUtil.getNbitNumberInt(5) << 3);
+        compareAssembly();
+    }
+
+    @Test
+    public void testLoadAddressScaledHigherOnly() {
+        Register dst = AArch64.r26;
+        AArch64Address address = AArch64Address.createScaledImmediateAddress(base, 1 << 11);
+        masm.loadAddress(dst, address, 8);
+        asm.add(64, dst, base, 1 << 11 << 3);
+        compareAssembly();
+    }
+
+    @Test
+    public void testLoadAddressRegisterOffsetUnscaled() {
+        Register dst = AArch64.r26;
+        AArch64Address address = AArch64Address.createRegisterOffsetAddress(base, index, false);
+        masm.loadAddress(dst, address, 4);
+        asm.add(64, dst, base, index, AArch64Assembler.ShiftType.LSL, 0);
+        compareAssembly();
+    }
+
+    @Test
+    public void testLoadAddressRegisterOffsetScaled() {
+        Register dst = AArch64.r26;
+        AArch64Address address = AArch64Address.createRegisterOffsetAddress(base, index, true);
+        masm.loadAddress(dst, address, 4);
+        asm.add(64, dst, base, index, AArch64Assembler.ShiftType.LSL, 2);
+        compareAssembly();
+    }
+
+    @Test
+    public void testLoadAddressExtendedRegisterOffsetUnscaled() {
+        Register dst = AArch64.r26;
+        AArch64Address address = AArch64Address.createExtendedRegisterOffsetAddress(base, index, false, AArch64Assembler.ExtendType.SXTW);
+        masm.loadAddress(dst, address, 4);
+        asm.add(64, dst, base, index, AArch64Assembler.ExtendType.SXTW, 0);
+        compareAssembly();
+    }
+
+    @Test
+    public void testLoadAddressExtendedRegisterOffsetScaled() {
+        Register dst = AArch64.r26;
+        AArch64Address address = AArch64Address.createExtendedRegisterOffsetAddress(base, index, true, AArch64Assembler.ExtendType.SXTW);
+        masm.loadAddress(dst, address, 4);
+        asm.add(64, dst, base, index, AArch64Assembler.ExtendType.SXTW, 2);
+        compareAssembly();
+    }
+
+    /**
+     * Compares assembly generated by the macro assembler to the hand-generated assembly.
+     */
+    private void compareAssembly() {
+        byte[] expected = asm.close(true);
+        byte[] actual = masm.close(true);
+        assertArrayEquals(expected, actual);
+    }
+
+}
--- /dev/null	Thu Jan 01 00:00:00 1970 +0000
+++ b/graal/com.oracle.graal.asm.aarch64.test/src/com/oracle/graal/asm/aarch64/test/TestProtectedAssembler.java	Thu Dec 24 11:43:35 2015 -1000
@@ -0,0 +1,550 @@
+/*
+ * Copyright (c) 2013, 2015, Oracle and/or its affiliates. All rights reserved.
+ * DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER.
+ *
+ * This code is free software; you can redistribute it and/or modify it
+ * under the terms of the GNU General Public License version 2 only, as
+ * published by the Free Software Foundation.
+ *
+ * This code is distributed in the hope that it will be useful, but WITHOUT
+ * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
+ * FITNESS FOR A PARTICULAR PURPOSE.  See the GNU General Public License
+ * version 2 for more details (a copy is included in the LICENSE file that
+ * accompanied this code).
+ *
+ * You should have received a copy of the GNU General Public License version
+ * 2 along with this work; if not, write to the Free Software Foundation,
+ * Inc., 51 Franklin St, Fifth Floor, Boston, MA 02110-1301 USA.
+ *
+ * Please contact Oracle, 500 Oracle Parkway, Redwood Shores, CA 94065 USA
+ * or visit www.oracle.com if you need additional information or have any
+ * questions.
+ */
+
+package com.oracle.graal.asm.aarch64.test;
+
+import com.oracle.graal.asm.AbstractAddress;
+import com.oracle.graal.asm.Label;
+import com.oracle.graal.asm.aarch64.AArch64Address;
+import com.oracle.graal.asm.aarch64.AArch64Assembler;
+
+import jdk.vm.ci.code.Register;
+import jdk.vm.ci.code.TargetDescription;
+
+/**
+ * Cheat so that we can test protected functions of assembler.
+ */
+class TestProtectedAssembler extends AArch64Assembler {
+
+    public TestProtectedAssembler(TargetDescription target) {
+        super(target);
+    }
+
+    @Override
+    protected void cbnz(int size, Register reg, int imm21, int pos) {
+        super.cbnz(size, reg, imm21, pos);
+    }
+
+    @Override
+    protected void cbz(int size, Register reg, int imm21, int pos) {
+        super.cbz(size, reg, imm21, pos);
+    }
+
+    @Override
+    public void ands(int size, Register dst, Register src, long bimm) {
+        super.ands(size, dst, src, bimm);
+    }
+
+    @Override
+    protected void b(ConditionFlag condition, int imm21) {
+        super.b(condition, imm21);
+    }
+
+    @Override
+    protected void b(ConditionFlag condition, int imm21, int pos) {
+        super.b(condition, imm21, pos);
+    }
+
+    @Override
+    protected void cbnz(int size, Register reg, int imm21) {
+        super.cbnz(size, reg, imm21);
+    }
+
+    @Override
+    protected void cbz(int size, Register reg, int imm21) {
+        super.cbz(size, reg, imm21);
+    }
+
+    @Override
+    protected void b(int imm28) {
+        super.b(imm28);
+    }
+
+    @Override
+    protected void b(int imm28, int pos) {
+        super.b(imm28, pos);
+    }
+
+    @Override
+    public void bl(int imm28) {
+        super.bl(imm28);
+    }
+
+    @Override
+    public void blr(Register reg) {
+        super.blr(reg);
+    }
+
+    @Override
+    protected void br(Register reg) {
+        super.br(reg);
+    }
+
+    @Override
+    public void ret(Register reg) {
+        super.ret(reg);
+    }
+
+    @Override
+    public void ldr(int srcSize, Register rt, AArch64Address address) {
+        super.ldr(srcSize, rt, address);
+    }
+
+    @Override
+    public void ldrs(int targetSize, int srcSize, Register rt, AArch64Address address) {
+        super.ldrs(targetSize, srcSize, rt, address);
+    }
+
+    @Override
+    public void str(int destSize, Register rt, AArch64Address address) {
+        super.str(destSize, rt, address);
+    }
+
+    @Override
+    protected void ldxr(int size, Register rt, AArch64Address address) {
+        super.ldxr(size, rt, address);
+    }
+
+    @Override
+    protected void stxr(int size, Register rs, Register rt, AArch64Address address) {
+        super.stxr(size, rs, rt, address);
+    }
+
+    @Override
+    protected void ldar(int size, Register rt, AArch64Address address) {
+        super.ldar(size, rt, address);
+    }
+
+    @Override
+    protected void stlr(int size, Register rt, AArch64Address address) {
+        super.stlr(size, rt, address);
+    }
+
+    @Override
+    public void ldaxr(int size, Register rt, AArch64Address address) {
+        super.ldaxr(size, rt, address);
+    }
+
+    @Override
+    public void stlxr(int size, Register rs, Register rt, AArch64Address address) {
+        super.stlxr(size, rs, rt, address);
+    }
+
+    @Override
+    public void adr(Register dst, int imm21) {
+        super.adr(dst, imm21);
+    }
+
+    @Override
+    protected void add(int size, Register dst, Register src, int aimm) {
+        super.add(size, dst, src, aimm);
+    }
+
+    @Override
+    protected void adds(int size, Register dst, Register src, int aimm) {
+        super.adds(size, dst, src, aimm);
+    }
+
+    @Override
+    protected void sub(int size, Register dst, Register src, int aimm) {
+        super.sub(size, dst, src, aimm);
+    }
+
+    @Override
+    protected void subs(int size, Register dst, Register src, int aimm) {
+        super.subs(size, dst, src, aimm);
+    }
+
+    @Override
+    public void and(int size, Register dst, Register src, long bimm) {
+        super.and(size, dst, src, bimm);
+    }
+
+    @Override
+    public void eor(int size, Register dst, Register src, long bimm) {
+        super.eor(size, dst, src, bimm);
+    }
+
+    @Override
+    protected void orr(int size, Register dst, Register src, long bimm) {
+        super.orr(size, dst, src, bimm);
+    }
+
+    @Override
+    protected void movz(int size, Register dst, int uimm16, int shiftAmt) {
+        super.movz(size, dst, uimm16, shiftAmt);
+    }
+
+    @Override
+    protected void movn(int size, Register dst, int uimm16, int shiftAmt) {
+        super.movn(size, dst, uimm16, shiftAmt);
+    }
+
+    @Override
+    protected void movk(int size, Register dst, int uimm16, int pos) {
+        super.movk(size, dst, uimm16, pos);
+    }
+
+    @Override
+    protected void bfm(int size, Register dst, Register src, int r, int s) {
+        super.bfm(size, dst, src, r, s);
+    }
+
+    @Override
+    protected void ubfm(int size, Register dst, Register src, int r, int s) {
+        super.ubfm(size, dst, src, r, s);
+    }
+
+    @Override
+    protected void sbfm(int size, Register dst, Register src, int r, int s) {
+        super.sbfm(size, dst, src, r, s);
+    }
+
+    @Override
+    protected void extr(int size, Register dst, Register src1, Register src2, int lsb) {
+        super.extr(size, dst, src1, src2, lsb);
+    }
+
+    @Override
+    protected void adds(int size, Register dst, Register src1, Register src2, ShiftType shiftType, int imm) {
+        super.adds(size, dst, src1, src2, shiftType, imm);
+    }
+
+    @Override
+    protected void subs(int size, Register dst, Register src1, Register src2, ShiftType shiftType, int imm) {
+        super.subs(size, dst, src1, src2, shiftType, imm);
+    }
+
+    @Override
+    protected void add(int size, Register dst, Register src1, Register src2, ShiftType shiftType, int imm) {
+        super.add(size, dst, src1, src2, shiftType, imm);
+    }
+
+    @Override
+    protected void sub(int size, Register dst, Register src1, Register src2, ShiftType shiftType, int imm) {
+        super.sub(size, dst, src1, src2, shiftType, imm);
+    }
+
+    @Override
+    public void add(int size, Register dst, Register src1, Register src2, ExtendType extendType, int shiftAmt) {
+        super.add(size, dst, src1, src2, extendType, shiftAmt);
+    }
+
+    @Override
+    protected void adds(int size, Register dst, Register src1, Register src2, ExtendType extendType, int shiftAmt) {
+        super.adds(size, dst, src1, src2, extendType, shiftAmt);
+    }
+
+    @Override
+    protected void sub(int size, Register dst, Register src1, Register src2, ExtendType extendType, int shiftAmt) {
+        super.sub(size, dst, src1, src2, extendType, shiftAmt);
+    }
+
+    @Override
+    protected void subs(int size, Register dst, Register src1, Register src2, ExtendType extendType, int shiftAmt) {
+        super.subs(size, dst, src1, src2, extendType, shiftAmt);
+    }
+
+    @Override
+    protected void and(int size, Register dst, Register src1, Register src2, ShiftType shiftType, int shiftAmt) {
+        super.and(size, dst, src1, src2, shiftType, shiftAmt);
+    }
+
+    @Override
+    protected void ands(int size, Register dst, Register src1, Register src2, ShiftType shiftType, int shiftAmt) {
+        super.ands(size, dst, src1, src2, shiftType, shiftAmt);
+    }
+
+    @Override
+    protected void bic(int size, Register dst, Register src1, Register src2, ShiftType shiftType, int shiftAmt) {
+        super.bic(size, dst, src1, src2, shiftType, shiftAmt);
+    }
+
+    @Override
+    protected void bics(int size, Register dst, Register src1, Register src2, ShiftType shiftType, int shiftAmt) {
+        super.bics(size, dst, src1, src2, shiftType, shiftAmt);
+    }
+
+    @Override
+    protected void eon(int size, Register dst, Register src1, Register src2, ShiftType shiftType, int shiftAmt) {
+        super.eon(size, dst, src1, src2, shiftType, shiftAmt);
+    }
+
+    @Override
+    protected void eor(int size, Register dst, Register src1, Register src2, ShiftType shiftType, int shiftAmt) {
+        super.eor(size, dst, src1, src2, shiftType, shiftAmt);
+    }
+
+    @Override
+    protected void orr(int size, Register dst, Register src1, Register src2, ShiftType shiftType, int shiftAmt) {
+        super.orr(size, dst, src1, src2, shiftType, shiftAmt);
+    }
+
+    @Override
+    protected void orn(int size, Register dst, Register src1, Register src2, ShiftType shiftType, int shiftAmt) {
+        super.orn(size, dst, src1, src2, shiftType, shiftAmt);
+    }
+
+    @Override
+    protected void asr(int size, Register dst, Register src1, Register src2) {
+        super.asr(size, dst, src1, src2);
+    }
+
+    @Override
+    protected void lsl(int size, Register dst, Register src1, Register src2) {
+        super.lsl(size, dst, src1, src2);
+    }
+
+    @Override
+    protected void lsr(int size, Register dst, Register src1, Register src2) {
+        super.lsr(size, dst, src1, src2);
+    }
+
+    @Override
+    protected void ror(int size, Register dst, Register src1, Register src2) {
+        super.ror(size, dst, src1, src2);
+    }
+
+    @Override
+    protected void cls(int size, Register dst, Register src) {
+        super.cls(size, dst, src);
+    }
+
+    @Override
+    public void clz(int size, Register dst, Register src) {
+        super.clz(size, dst, src);
+    }
+
+    @Override
+    protected void rbit(int size, Register dst, Register src) {
+        super.rbit(size, dst, src);
+    }
+
+    @Override
+    public void rev(int size, Register dst, Register src) {
+        super.rev(size, dst, src);
+    }
+
+    @Override
+    protected void csel(int size, Register dst, Register src1, Register src2, ConditionFlag condition) {
+        super.csel(size, dst, src1, src2, condition);
+    }
+
+    @Override
+    protected void csneg(int size, Register dst, Register src1, Register src2, ConditionFlag condition) {
+        super.csneg(size, dst, src1, src2, condition);
+    }
+
+    @Override
+    protected void csinc(int size, Register dst, Register src1, Register src2, ConditionFlag condition) {
+        super.csinc(size, dst, src1, src2, condition);
+    }
+
+    @Override
+    protected void madd(int size, Register dst, Register src1, Register src2, Register src3) {
+        super.madd(size, dst, src1, src2, src3);
+    }
+
+    @Override
+    protected void msub(int size, Register dst, Register src1, Register src2, Register src3) {
+        super.msub(size, dst, src1, src2, src3);
+    }
+
+    @Override
+    public void sdiv(int size, Register dst, Register src1, Register src2) {
+        super.sdiv(size, dst, src1, src2);
+    }
+
+    @Override
+    public void udiv(int size, Register dst, Register src1, Register src2) {
+        super.udiv(size, dst, src1, src2);
+    }
+
+    @Override
+    public void fldr(int size, Register rt, AArch64Address address) {
+        super.fldr(size, rt, address);
+    }
+
+    @Override
+    public void fstr(int size, Register rt, AArch64Address address) {
+        super.fstr(size, rt, address);
+    }
+
+    @Override
+    protected void fmov(int size, Register dst, Register src) {
+        super.fmov(size, dst, src);
+    }
+
+    @Override
+    protected void fmovFpu2Cpu(int size, Register dst, Register src) {
+        super.fmovFpu2Cpu(size, dst, src);
+    }
+
+    @Override
+    protected void fmovCpu2Fpu(int size, Register dst, Register src) {
+        super.fmovCpu2Fpu(size, dst, src);
+    }
+
+    @Override
+    protected void fmov(int size, Register dst, double imm) {
+        super.fmov(size, dst, imm);
+    }
+
+    @Override
+    public void fcvt(int srcSize, Register dst, Register src) {
+        super.fcvt(srcSize, dst, src);
+    }
+
+    @Override
+    public void fcvtzs(int targetSize, int srcSize, Register dst, Register src) {
+        super.fcvtzs(targetSize, srcSize, dst, src);
+    }
+
+    @Override
+    public void scvtf(int targetSize, int srcSize, Register dst, Register src) {
+        super.scvtf(targetSize, srcSize, dst, src);
+    }
+
+    @Override
+    protected void frintz(int size, Register dst, Register src) {
+        super.frintz(size, dst, src);
+    }
+
+    @Override
+    public void fabs(int size, Register dst, Register src) {
+        super.fabs(size, dst, src);
+    }
+
+    @Override
+    public void fneg(int size, Register dst, Register src) {
+        super.fneg(size, dst, src);
+    }
+
+    @Override
+    public void fsqrt(int size, Register dst, Register src) {
+        super.fsqrt(size, dst, src);
+    }
+
+    @Override
+    public void fadd(int size, Register dst, Register src1, Register src2) {
+        super.fadd(size, dst, src1, src2);
+    }
+
+    @Override
+    public void fsub(int size, Register dst, Register src1, Register src2) {
+        super.fsub(size, dst, src1, src2);
+    }
+
+    @Override
+    public void fmul(int size, Register dst, Register src1, Register src2) {
+        super.fmul(size, dst, src1, src2);
+    }
+
+    @Override
+    public void fdiv(int size, Register dst, Register src1, Register src2) {
+        super.fdiv(size, dst, src1, src2);
+    }
+
+    @Override
+    protected void fmadd(int size, Register dst, Register src1, Register src2, Register src3) {
+        super.fmadd(size, dst, src1, src2, src3);
+    }
+
+    @Override
+    protected void fmsub(int size, Register dst, Register src1, Register src2, Register src3) {
+        super.fmsub(size, dst, src1, src2, src3);
+    }
+
+    @Override
+    public void fcmp(int size, Register src1, Register src2) {
+        super.fcmp(size, src1, src2);
+    }
+
+    @Override
+    public void fccmp(int size, Register src1, Register src2, int uimm4, ConditionFlag condition) {
+        super.fccmp(size, src1, src2, uimm4, condition);
+    }
+
+    @Override
+    public void fcmpZero(int size, Register src) {
+        super.fcmpZero(size, src);
+    }
+
+    @Override
+    protected void fcsel(int size, Register dst, Register src1, Register src2, ConditionFlag condition) {
+        super.fcsel(size, dst, src1, src2, condition);
+    }
+
+    @Override
+    protected void hlt(int uimm16) {
+        super.hlt(uimm16);
+    }
+
+    @Override
+    protected void brk(int uimm16) {
+        super.brk(uimm16);
+    }
+
+    @Override
+    protected void hint(SystemHint hint) {
+        super.hint(hint);
+    }
+
+    @Override
+    protected void clrex() {
+        super.clrex();
+    }
+
+    @Override
+    public void dmb(BarrierKind barrierKind) {
+        super.dmb(barrierKind);
+    }
+
+    @Override
+    public void align(int modulus) {
+    }
+
+    @Override
+    public void jmp(Label l) {
+    }
+
+    @Override
+    protected void patchJumpTarget(int branch, int jumpTarget) {
+
+    }
+
+    @Override
+    public AbstractAddress makeAddress(Register base, int displacement) {
+        throw new UnsupportedOperationException();
+    }
+
+    @Override
+    public AbstractAddress getPlaceholder() {
+        throw new UnsupportedOperationException();
+    }
+
+    @Override
+    public void ensureUniquePC() {
+        throw new UnsupportedOperationException();
+    }
+}
--- /dev/null	Thu Jan 01 00:00:00 1970 +0000
+++ b/graal/com.oracle.graal.asm.aarch64/src/com/oracle/graal/asm/aarch64/AArch64Address.java	Thu Dec 24 11:43:35 2015 -1000
@@ -0,0 +1,319 @@
+/*
+ * Copyright (c) 2013, Oracle and/or its affiliates. All rights reserved.
+ * DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER.
+ *
+ * This code is free software; you can redistribute it and/or modify it
+ * under the terms of the GNU General Public License version 2 only, as
+ * published by the Free Software Foundation.
+ *
+ * This code is distributed in the hope that it will be useful, but WITHOUT
+ * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
+ * FITNESS FOR A PARTICULAR PURPOSE.  See the GNU General Public License
+ * version 2 for more details (a copy is included in the LICENSE file that
+ * accompanied this code).
+ *
+ * You should have received a copy of the GNU General Public License version
+ * 2 along with this work; if not, write to the Free Software Foundation,
+ * Inc., 51 Franklin St, Fifth Floor, Boston, MA 02110-1301 USA.
+ *
+ * Please contact Oracle, 500 Oracle Parkway, Redwood Shores, CA 94065 USA
+ * or visit www.oracle.com if you need additional information or have any
+ * questions.
+ */
+package com.oracle.graal.asm.aarch64;
+
+import static jdk.vm.ci.aarch64.AArch64.zr;
+
+import jdk.vm.ci.aarch64.AArch64;
+import jdk.vm.ci.code.Register;
+import com.oracle.graal.asm.NumUtil;
+import com.oracle.graal.asm.AbstractAddress;
+
+import jdk.vm.ci.common.JVMCIError;
+
+/**
+ * Represents an address in target machine memory, specified using one of the different addressing
+ * modes of the AArch64 ISA. - Base register only - Base register + immediate or register with
+ * shifted offset - Pre-indexed: base + immediate offset are written back to base register, value
+ * used in instruction is base + offset - Post-indexed: base + offset (immediate or register) are
+ * written back to base register, value used in instruction is base only - Literal: PC + 19-bit
+ * signed word aligned offset
+ * <p>
+ * Not all addressing modes are supported for all instructions.
+ */
+public final class AArch64Address extends AbstractAddress {
+    // Placeholder for addresses that get patched later.
+    public static final AArch64Address PLACEHOLDER = createPcLiteralAddress(0);
+
+    public enum AddressingMode {
+        /**
+         * base + uimm12 << log2(memory_transfer_size).
+         */
+        IMMEDIATE_SCALED,
+        /**
+         * base + imm9.
+         */
+        IMMEDIATE_UNSCALED,
+        /**
+         * base.
+         */
+        BASE_REGISTER_ONLY,
+        /**
+         * base + offset [<< log2(memory_transfer_size)].
+         */
+        REGISTER_OFFSET,
+        /**
+         * base + extend(offset) [<< log2(memory_transfer_size)].
+         */
+        EXTENDED_REGISTER_OFFSET,
+        /**
+         * PC + imm21 (word aligned).
+         */
+        PC_LITERAL,
+        /**
+         * address = base. base is updated to base + imm9
+         */
+        IMMEDIATE_POST_INDEXED,
+        /**
+         * address = base + imm9. base is updated to base + imm9
+         */
+        IMMEDIATE_PRE_INDEXED,
+        AddressingMode,
+    }
+
+    private final Register base;
+    private final Register offset;
+    private final int immediate;
+    /**
+     * Should register offset be scaled or not.
+     */
+    private final boolean scaled;
+    private final AArch64Assembler.ExtendType extendType;
+    private final AddressingMode addressingMode;
+
+    /**
+     * General address generation mechanism. Accepted values for all parameters depend on the
+     * addressingMode. Null is never accepted for a register, if an addressMode doesn't use a
+     * register the register has to be the zero-register. extendType has to be null for every
+     * addressingMode except EXTENDED_REGISTER_OFFSET.
+     */
+    public static AArch64Address createAddress(AddressingMode addressingMode, Register base, Register offset, int immediate, boolean isScaled, AArch64Assembler.ExtendType extendType) {
+        return new AArch64Address(base, offset, immediate, isScaled, extendType, addressingMode);
+    }
+
+    /**
+     * @param base may not be null or the zero-register.
+     * @param imm9 Signed 9 bit immediate value.
+     * @return AArch64Address specifying a post-indexed immediate address pointing to base. After
+     *         ldr/str instruction, base is updated to point to base + imm9
+     */
+    public static AArch64Address createPostIndexedImmediateAddress(Register base, int imm9) {
+        return new AArch64Address(base, zr, imm9, false, null, AddressingMode.IMMEDIATE_POST_INDEXED);
+    }
+
+    /**
+     * @param base may not be null or the zero-register.
+     * @param imm9 Signed 9 bit immediate value.
+     * @return AArch64Address specifying a pre-indexed immediate address pointing to base + imm9.
+     *         After ldr/str instruction, base is updated to point to base + imm9
+     */
+    public static AArch64Address createPreIndexedImmediateAddress(Register base, int imm9) {
+        return new AArch64Address(base, zr, imm9, false, null, AddressingMode.IMMEDIATE_PRE_INDEXED);
+    }
+
+    /**
+     * @param base may not be null or the zero-register.
+     * @param imm12 Unsigned 12 bit immediate value. This is scaled by the word access size. This
+     *            means if this address is used to load/store a word, the immediate is shifted by 2
+     *            (log2Ceil(4)).
+     * @return AArch64Address specifying a signed address of the form base + imm12 <<
+     *         log2(memory_transfer_size).
+     */
+    public static AArch64Address createScaledImmediateAddress(Register base, int imm12) {
+        return new AArch64Address(base, zr, imm12, true, null, AddressingMode.IMMEDIATE_SCALED);
+    }
+
+    /**
+     * @param base may not be null or the zero-register.
+     * @param imm9 Signed 9 bit immediate value.
+     * @return AArch64Address specifying an unscaled immediate address of the form base + imm9
+     */
+    public static AArch64Address createUnscaledImmediateAddress(Register base, int imm9) {
+        return new AArch64Address(base, zr, imm9, false, null, AddressingMode.IMMEDIATE_UNSCALED);
+    }
+
+    /**
+     * @param base May not be null or the zero register.
+     * @return AArch64Address specifying the address pointed to by base.
+     */
+    public static AArch64Address createBaseRegisterOnlyAddress(Register base) {
+        return createRegisterOffsetAddress(base, zr, false);
+    }
+
+    /**
+     * @param base may not be null or the zero-register.
+     * @param offset Register specifying some offset, optionally scaled by the memory_transfer_size.
+     *            May not be null or the stackpointer.
+     * @param scaled Specifies whether offset should be scaled by memory_transfer_size or not.
+     * @return AArch64Address specifying a register offset address of the form base + offset [<<
+     *         log2 (memory_transfer_size)]
+     */
+    public static AArch64Address createRegisterOffsetAddress(Register base, Register offset, boolean scaled) {
+        return new AArch64Address(base, offset, 0, scaled, null, AddressingMode.REGISTER_OFFSET);
+    }
+
+    /**
+     * @param base may not be null or the zero-register.
+     * @param offset Word register specifying some offset, optionally scaled by the
+     *            memory_transfer_size. May not be null or the stackpointer.
+     * @param scaled Specifies whether offset should be scaled by memory_transfer_size or not.
+     * @param extendType Describes whether register is zero- or sign-extended. May not be null.
+     * @return AArch64Address specifying an extended register offset of the form base +
+     *         extendType(offset) [<< log2(memory_transfer_size)]
+     */
+    public static AArch64Address createExtendedRegisterOffsetAddress(Register base, Register offset, boolean scaled, AArch64Assembler.ExtendType extendType) {
+        return new AArch64Address(base, offset, 0, scaled, extendType, AddressingMode.EXTENDED_REGISTER_OFFSET);
+    }
+
+    /**
+     * @param imm21 Signed 21-bit offset, word aligned.
+     * @return AArch64Address specifying a PC-literal address of the form PC + offset
+     */
+    public static AArch64Address createPcLiteralAddress(int imm21) {
+        return new AArch64Address(zr, zr, imm21, false, null, AddressingMode.PC_LITERAL);
+    }
+
+    private AArch64Address(Register base, Register offset, int immediate, boolean scaled, AArch64Assembler.ExtendType extendType, AddressingMode addressingMode) {
+        this.base = base;
+        this.offset = offset;
+        if ((addressingMode == AddressingMode.REGISTER_OFFSET || addressingMode == AddressingMode.EXTENDED_REGISTER_OFFSET) && offset.equals(zr)) {
+            this.addressingMode = AddressingMode.BASE_REGISTER_ONLY;
+        } else {
+            this.addressingMode = addressingMode;
+        }
+        this.immediate = immediate;
+        this.scaled = scaled;
+        this.extendType = extendType;
+        assert verify();
+    }
+
+    private boolean verify() {
+        assert addressingMode != null;
+        assert base.getRegisterCategory().equals(AArch64.CPU) && offset.getRegisterCategory().equals(AArch64.CPU);
+
+        switch (addressingMode) {
+            case IMMEDIATE_SCALED:
+                return !base.equals(zr) && offset.equals(zr) && extendType == null && NumUtil.isUnsignedNbit(12, immediate);
+            case IMMEDIATE_UNSCALED:
+                return !base.equals(zr) && offset.equals(zr) && extendType == null && NumUtil.isSignedNbit(9, immediate);
+            case BASE_REGISTER_ONLY:
+                return !base.equals(zr) && offset.equals(zr) && extendType == null && immediate == 0;
+            case REGISTER_OFFSET:
+                return !base.equals(zr) && offset.getRegisterCategory().equals(AArch64.CPU) && extendType == null && immediate == 0;
+            case EXTENDED_REGISTER_OFFSET:
+                return !base.equals(zr) && offset.getRegisterCategory().equals(AArch64.CPU) && (extendType == AArch64Assembler.ExtendType.SXTW || extendType == AArch64Assembler.ExtendType.UXTW) &&
+                                immediate == 0;
+            case PC_LITERAL:
+                return base.equals(zr) && offset.equals(zr) && extendType == null && NumUtil.isSignedNbit(21, immediate) && ((immediate & 0x3) == 0);
+            case IMMEDIATE_POST_INDEXED:
+            case IMMEDIATE_PRE_INDEXED:
+                return !base.equals(zr) && offset.equals(zr) && extendType == null && NumUtil.isSignedNbit(9, immediate);
+            default:
+                throw JVMCIError.shouldNotReachHere();
+        }
+    }
+
+    public Register getBase() {
+        return base;
+    }
+
+    public Register getOffset() {
+        return offset;
+    }
+
+    /**
+     * @return immediate in correct representation for the given addressing mode. For example in
+     *         case of <code>addressingMode ==IMMEDIATE_UNSCALED </code> the value will be returned
+     *         as the 9bit signed representation.
+     */
+    public int getImmediate() {
+        switch (addressingMode) {
+            case IMMEDIATE_UNSCALED:
+            case IMMEDIATE_POST_INDEXED:
+            case IMMEDIATE_PRE_INDEXED:
+                // 9-bit signed value
+                return immediate & NumUtil.getNbitNumberInt(9);
+            case IMMEDIATE_SCALED:
+                // Unsigned value can be returned as-is.
+                return immediate;
+            case PC_LITERAL:
+                // 21-bit signed value, but lower 2 bits are always 0 and are shifted out.
+                return (immediate >> 2) & NumUtil.getNbitNumberInt(19);
+            default:
+                throw JVMCIError.shouldNotReachHere("Should only be called for addressing modes that use immediate values.");
+        }
+    }
+
+    /**
+     * @return Raw immediate as a 32-bit signed value.
+     */
+    public int getImmediateRaw() {
+        switch (addressingMode) {
+            case IMMEDIATE_UNSCALED:
+            case IMMEDIATE_SCALED:
+            case IMMEDIATE_POST_INDEXED:
+            case IMMEDIATE_PRE_INDEXED:
+            case PC_LITERAL:
+                return immediate;
+            default:
+                throw JVMCIError.shouldNotReachHere("Should only be called for addressing modes that use immediate values.");
+        }
+    }
+
+    public boolean isScaled() {
+        return scaled;
+    }
+
+    public AArch64Assembler.ExtendType getExtendType() {
+        return extendType;
+    }
+
+    public AddressingMode getAddressingMode() {
+        return addressingMode;
+    }
+
+    public String toString(int log2TransferSize) {
+        int shiftVal = scaled ? log2TransferSize : 0;
+        switch (addressingMode) {
+            case IMMEDIATE_SCALED:
+                return String.format("[X%d, %d]", base.encoding, immediate << log2TransferSize);
+            case IMMEDIATE_UNSCALED:
+                return String.format("[X%d, %d]", base.encoding, immediate);
+            case BASE_REGISTER_ONLY:
+                return String.format("[X%d]", base.encoding);
+            case EXTENDED_REGISTER_OFFSET:
+                if (shiftVal != 0) {
+                    return String.format("[X%d, W%d, %s %d]", base.encoding, offset.encoding, extendType.name(), shiftVal);
+                } else {
+                    return String.format("[X%d, W%d, %s]", base.encoding, offset.encoding, extendType.name());
+                }
+            case REGISTER_OFFSET:
+                if (shiftVal != 0) {
+                    return String.format("[X%d, X%d, LSL %d]", base.encoding, offset.encoding, shiftVal);
+                } else {
+                    // LSL 0 may be optional, but still encoded differently so we always leave it
+                    // off
+                    return String.format("[X%d, X%d]", base.encoding, offset.encoding);
+                }
+            case PC_LITERAL:
+                return String.format(".%s%d", immediate >= 0 ? "+" : "", immediate);
+            case IMMEDIATE_POST_INDEXED:
+                return String.format("[X%d],%d", base.encoding, immediate);
+            case IMMEDIATE_PRE_INDEXED:
+                return String.format("[X%d,%d]!", base.encoding, immediate);
+            default:
+                throw JVMCIError.shouldNotReachHere();
+        }
+    }
+
+}
--- /dev/null	Thu Jan 01 00:00:00 1970 +0000
+++ b/graal/com.oracle.graal.asm.aarch64/src/com/oracle/graal/asm/aarch64/AArch64Assembler.java	Thu Dec 24 11:43:35 2015 -1000
@@ -0,0 +1,2490 @@
+/*
+ * Copyright (c) 2013, 2015, Oracle and/or its affiliates. All rights reserved.
+ * DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER.
+ *
+ * This code is free software; you can redistribute it and/or modify it
+ * under the terms of the GNU General Public License version 2 only, as
+ * published by the Free Software Foundation.
+ *
+ * This code is distributed in the hope that it will be useful, but WITHOUT
+ * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
+ * FITNESS FOR A PARTICULAR PURPOSE.  See the GNU General Public License
+ * version 2 for more details (a copy is included in the LICENSE file that
+ * accompanied this code).
+ *
+ * You should have received a copy of the GNU General Public License version
+ * 2 along with this work; if not, write to the Free Software Foundation,
+ * Inc., 51 Franklin St, Fifth Floor, Boston, MA 02110-1301 USA.
+ *
+ * Please contact Oracle, 500 Oracle Parkway, Redwood Shores, CA 94065 USA
+ * or visit www.oracle.com if you need additional information or have any
+ * questions.
+ */
+package com.oracle.graal.asm.aarch64;
+
+import static com.oracle.graal.asm.aarch64.AArch64Assembler.InstructionType.floatFromSize;
+import static com.oracle.graal.asm.aarch64.AArch64Assembler.InstructionType.generalFromSize;
+import static jdk.vm.ci.aarch64.AArch64.CPU;
+import static jdk.vm.ci.aarch64.AArch64.SIMD;
+import static jdk.vm.ci.aarch64.AArch64.sp;
+import static jdk.vm.ci.aarch64.AArch64.zr;
+
+import java.util.Arrays;
+
+import com.oracle.graal.asm.Assembler;
+import com.oracle.graal.asm.NumUtil;
+import com.oracle.graal.asm.aarch64.AArch64Address.AddressingMode;
+
+import jdk.vm.ci.aarch64.AArch64;
+import jdk.vm.ci.code.Register;
+import jdk.vm.ci.code.TargetDescription;
+import jdk.vm.ci.common.JVMCIError;
+import jdk.vm.ci.meta.JavaKind;
+
+public abstract class AArch64Assembler extends Assembler {
+
+    public static class LogicalImmediateTable {
+
+        private static final Immediate[] IMMEDIATE_TABLE = buildImmediateTable();
+
+        private static final int ImmediateOffset = 10;
+        private static final int ImmediateRotateOffset = 16;
+        private static final int ImmediateSizeOffset = 22;
+
+        /**
+         * Specifies whether immediate can be represented in all cases (YES), as a 64bit instruction
+         * (SIXTY_FOUR_BIT_ONLY) or not at all (NO).
+         */
+        static enum Representable {
+            YES,
+            SIXTY_FOUR_BIT_ONLY,
+            NO
+        }
+
+        /**
+         * Tests whether an immediate can be encoded for logical instructions.
+         *
+         * @param is64bit if true immediate is considered a 64-bit pattern. If false we may use a
+         *            64-bit instruction to load the 32-bit pattern into a register.
+         * @return enum specifying whether immediate can be used for 32- and 64-bit logical
+         *         instructions ({@code #Representable.YES}), for 64-bit instructions only (
+         *         {@code #Representable.SIXTY_FOUR_BIT_ONLY}) or not at all (
+         *         {@code #Representable.NO} ).
+         */
+        public static Representable isRepresentable(boolean is64bit, long immediate) {
+            int pos = getLogicalImmTablePos(is64bit, immediate);
+            if (pos < 0) {
+                // if 32bit instruction we can try again as 64bit immediate which may succeed.
+                // i.e. 0xffffffff fails as a 32bit immediate but works as 64bit one.
+                if (!is64bit) {
+                    assert NumUtil.isUnsignedNbit(32, immediate);
+                    pos = getLogicalImmTablePos(true, immediate);
+                    return pos >= 0 ? Representable.SIXTY_FOUR_BIT_ONLY : Representable.NO;
+                }
+                return Representable.NO;
+            }
+            Immediate imm = IMMEDIATE_TABLE[pos];
+            return imm.only64bit() ? Representable.SIXTY_FOUR_BIT_ONLY : Representable.YES;
+        }
+
+        public static Representable isRepresentable(int immediate) {
+            return isRepresentable(false, immediate & 0xFFFF_FFFFL);
+        }
+
+        public static int getLogicalImmEncoding(boolean is64bit, long value) {
+            int pos = getLogicalImmTablePos(is64bit, value);
+            assert pos >= 0 : "Value cannot be represented as logical immediate";
+            Immediate imm = IMMEDIATE_TABLE[pos];
+            assert is64bit || !imm.only64bit() : "Immediate can only be represented for 64bit, but 32bit instruction specified";
+            return IMMEDIATE_TABLE[pos].encoding;
+        }
+
+        /**
+         * @param is64bit if true also allow 64-bit only encodings to be returned.
+         * @return If positive the return value is the position into the IMMEDIATE_TABLE for the
+         *         given immediate, if negative the immediate cannot be encoded.
+         */
+        private static int getLogicalImmTablePos(boolean is64bit, long value) {
+            Immediate imm;
+            if (!is64bit) {
+                // 32bit instructions can only have 32bit immediates.
+                if (!NumUtil.isUnsignedNbit(32, value)) {
+                    return -1;
+                }
+                // If we have a 32bit instruction (and therefore immediate) we have to duplicate it
+                // across 64bit to find it in the table.
+                imm = new Immediate(value << 32 | value);
+            } else {
+                imm = new Immediate(value);
+            }
+            int pos = Arrays.binarySearch(IMMEDIATE_TABLE, imm);
+            if (pos < 0) {
+                return -1;
+            }
+            if (!is64bit && IMMEDIATE_TABLE[pos].only64bit()) {
+                return -1;
+            }
+            return pos;
+        }
+
+        /**
+         * To quote 5.4.2: [..] an immediate is a 32 or 64 bit pattern viewed as a vector of
+         * identical elements of size e = 2, 4, 8, 16, 32 or (in the case of bimm64) 64 bits. Each
+         * element contains the same sub-pattern: a single run of 1 to e-1 non-zero bits, rotated by
+         * 0 to e-1 bits. It is encoded in the following: 10-16: rotation amount (6bit) starting
+         * from 1s in the LSB (i.e. 0111->1011->1101->1110) 16-22: This stores a combination of the
+         * number of set bits and the pattern size. The pattern size is encoded as follows (x is
+         * used to store the number of 1 bits - 1) e pattern 2 1111xx 4 1110xx 8 110xxx 16 10xxxx 32
+         * 0xxxxx 64 xxxxxx 22: if set we have an instruction with 64bit pattern?
+         */
+        private static final class Immediate implements Comparable<Immediate> {
+            public final long imm;
+            public final int encoding;
+
+            public Immediate(long imm, boolean is64, int s, int r) {
+                this.imm = imm;
+                this.encoding = computeEncoding(is64, s, r);
+            }
+
+            // Used to be able to binary search for an immediate in the table.
+            public Immediate(long imm) {
+                this(imm, false, 0, 0);
+            }
+
+            /**
+             * Returns true if this pattern is only representable as 64bit.
+             */
+            public boolean only64bit() {
+                return (encoding & (1 << ImmediateSizeOffset)) != 0;
+            }
+
+            private static int computeEncoding(boolean is64, int s, int r) {
+                int sf = is64 ? 1 : 0;
+                return sf << ImmediateSizeOffset | r << ImmediateRotateOffset | s << ImmediateOffset;
+            }
+
+            @Override
+            public int compareTo(Immediate o) {
+                return Long.compare(imm, o.imm);
+            }
+        }
+
+        private static Immediate[] buildImmediateTable() {
+            final int nrImmediates = 5334;
+            final Immediate[] table = new Immediate[nrImmediates];
+            int nrImms = 0;
+            for (int logE = 1; logE <= 6; logE++) {
+                int e = 1 << logE;
+                long mask = NumUtil.getNbitNumberLong(e);
+                for (int nrOnes = 1; nrOnes < e; nrOnes++) {
+                    long val = (1L << nrOnes) - 1;
+                    // r specifies how much we rotate the value
+                    for (int r = 0; r < e; r++) {
+                        long immediate = (val >>> r | val << (e - r)) & mask;
+                        // Duplicate pattern to fill whole 64bit range.
+                        switch (logE) {
+                            case 1:
+                                immediate |= immediate << 2;
+                                immediate |= immediate << 4;
+                                immediate |= immediate << 8;
+                                immediate |= immediate << 16;
+                                immediate |= immediate << 32;
+                                break;
+                            case 2:
+                                immediate |= immediate << 4;
+                                immediate |= immediate << 8;
+                                immediate |= immediate << 16;
+                                immediate |= immediate << 32;
+                                break;
+                            case 3:
+                                immediate |= immediate << 8;
+                                immediate |= immediate << 16;
+                                immediate |= immediate << 32;
+                                break;
+                            case 4:
+                                immediate |= immediate << 16;
+                                immediate |= immediate << 32;
+                                break;
+                            case 5:
+                                immediate |= immediate << 32;
+                                break;
+                        }
+                        // 5 - logE can underflow to -1, but we shift this bogus result
+                        // out of the masked area.
+                        int sizeEncoding = (1 << (5 - logE)) - 1;
+                        int s = ((sizeEncoding << (logE + 1)) & 0x3f) | (nrOnes - 1);
+                        table[nrImms++] = new Immediate(immediate, /* is64bit */e == 64, s, r);
+                    }
+                }
+            }
+            Arrays.sort(table);
+            assert nrImms == nrImmediates : nrImms + " instead of " + nrImmediates + " in table.";
+            assert checkDuplicates(table) : "Duplicate values in table.";
+            return table;
+        }
+
+        private static boolean checkDuplicates(Immediate[] table) {
+            for (int i = 0; i < table.length - 1; i++) {
+                if (table[i].imm >= table[i + 1].imm) {
+                    return false;
+                }
+            }
+            return true;
+        }
+    }
+
+    private static final int RdOffset = 0;
+    private static final int Rs1Offset = 5;
+    private static final int Rs2Offset = 16;
+    private static final int Rs3Offset = 10;
+    private static final int RtOffset = 0;
+
+    /**
+     * Enumeration of all different instruction kinds: General32/64 are the general instructions
+     * (integer, branch, etc.), for 32-, respectively 64-bit operands. FP32/64 is the encoding for
+     * the 32/64bit float operations
+     */
+    protected enum InstructionType {
+        General32(0x00000000, 32, true),
+        General64(0x80000000, 64, true),
+        FP32(0x00000000, 32, false),
+        FP64(0x00400000, 64, false);
+
+        public final int encoding;
+        public final int width;
+        public final boolean isGeneral;
+
+        private InstructionType(int encoding, int width, boolean isGeneral) {
+            this.encoding = encoding;
+            this.width = width;
+            this.isGeneral = isGeneral;
+        }
+
+        public static InstructionType generalFromSize(int size) {
+            assert size == 32 || size == 64;
+            return size == 32 ? General32 : General64;
+        }
+
+        public static InstructionType floatFromSize(int size) {
+            assert size == 32 || size == 64;
+            return size == 32 ? FP32 : FP64;
+        }
+
+    }
+
+    private static final int ImmediateOffset = 10;
+    private static final int ImmediateRotateOffset = 16;
+    private static final int ImmediateSizeOffset = 22;
+    private static final int ExtendTypeOffset = 13;
+
+    private static final int AddSubImmOp = 0x11000000;
+    // If 1 the immediate is interpreted as being left-shifted by 12 bits.
+    private static final int AddSubShiftOffset = 22;
+    private static final int AddSubSetFlag = 0x20000000;
+
+    private static final int LogicalImmOp = 0x12000000;
+
+    private static final int MoveWideImmOp = 0x12800000;
+    private static final int MoveWideImmOffset = 5;
+    private static final int MoveWideShiftOffset = 21;
+
+    private static final int BitfieldImmOp = 0x13000000;
+
+    private static final int AddSubShiftedOp = 0x0B000000;
+    private static final int ShiftTypeOffset = 22;
+
+    private static final int AddSubExtendedOp = 0x0B200000;
+
+    private static final int MulOp = 0x1B000000;
+    private static final int DataProcessing1SourceOp = 0x5AC00000;
+    private static final int DataProcessing2SourceOp = 0x1AC00000;
+
+    private static final int Fp1SourceOp = 0x1E204000;
+    private static final int Fp2SourceOp = 0x1E200800;
+    private static final int Fp3SourceOp = 0x1F000000;
+
+    private static final int FpConvertOp = 0x1E200000;
+    private static final int FpImmOp = 0x1E201000;
+    private static final int FpImmOffset = 13;
+
+    private static final int FpCmpOp = 0x1E202000;
+
+    private static final int PcRelImmHiOffset = 5;
+    private static final int PcRelImmLoOffset = 29;
+
+    private static final int PcRelImmOp = 0x10000000;
+
+    private static final int UnconditionalBranchImmOp = 0x14000000;
+    private static final int UnconditionalBranchRegOp = 0xD6000000;
+    private static final int CompareBranchOp = 0x34000000;
+
+    private static final int ConditionalBranchImmOffset = 5;
+
+    private static final int ConditionalSelectOp = 0x1A800000;
+    private static final int ConditionalConditionOffset = 12;
+
+    private static final int LoadStoreScaledOp = 0x39000000;
+    private static final int LoadStoreUnscaledOp = 0x38000000;
+    private static final int LoadStoreRegisterOp = 0x38200800;
+    private static final int LoadLiteralOp = 0x18000000;
+    private static final int LoadStorePostIndexedOp = 0x38000400;
+    private static final int LoadStorePreIndexedOp = 0x38000C00;
+
+    private static final int LoadStoreUnscaledImmOffset = 12;
+    private static final int LoadStoreScaledImmOffset = 10;
+    private static final int LoadStoreScaledRegOffset = 12;
+    private static final int LoadStoreIndexedImmOffset = 12;
+    private static final int LoadStoreTransferSizeOffset = 30;
+    private static final int LoadStoreFpFlagOffset = 26;
+    private static final int LoadLiteralImmeOffset = 5;
+
+    private static final int LogicalShiftOp = 0x0A000000;
+
+    private static final int ExceptionOp = 0xD4000000;
+    private static final int SystemImmediateOffset = 5;
+
+    @SuppressWarnings("unused") private static final int SimdImmediateOffset = 16;
+
+    private static final int BarrierOp = 0xD503301F;
+    private static final int BarrierKindOffset = 8;
+
+    /**
+     * Encoding for all instructions.
+     */
+    private enum Instruction {
+        BCOND(0x54000000),
+        CBNZ(0x01000000),
+        CBZ(0x00000000),
+
+        B(0x00000000),
+        BL(0x80000000),
+        BR(0x001F0000),
+        BLR(0x003F0000),
+        RET(0x005F0000),
+
+        LDR(0x00000000),
+        LDRS(0x00800000),
+        LDXR(0x081f7c00),
+        LDAR(0x8dffc00),
+        LDAXR(0x85ffc00),
+
+        STR(0x00000000),
+        STXR(0x08007c00),
+        STLR(0x089ffc00),
+        STLXR(0x0800fc00),
+
+        ADR(0x00000000),
+        ADRP(0x80000000),
+
+        ADD(0x00000000),
+        ADDS(ADD.encoding | AddSubSetFlag),
+        SUB(0x40000000),
+        SUBS(SUB.encoding | AddSubSetFlag),
+
+        NOT(0x00200000),
+        AND(0x00000000),
+        BIC(AND.encoding | NOT.encoding),
+        ORR(0x20000000),
+        ORN(ORR.encoding | NOT.encoding),
+        EOR(0x40000000),
+        EON(EOR.encoding | NOT.encoding),
+        ANDS(0x60000000),
+        BICS(ANDS.encoding | NOT.encoding),
+
+        ASRV(0x00002800),
+        RORV(0x00002C00),
+        LSRV(0x00002400),
+        LSLV(0x00002000),
+
+        CLS(0x00001400),
+        CLZ(0x00001000),
+        RBIT(0x00000000),
+        REVX(0x00000C00),
+        REVW(0x00000800),
+
+        MOVN(0x00000000),
+        MOVZ(0x40000000),
+        MOVK(0x60000000),
+
+        CSEL(0x00000000),
+        CSNEG(0x40000400),
+        CSINC(0x00000400),
+
+        BFM(0x20000000),
+        SBFM(0x00000000),
+        UBFM(0x40000000),
+        EXTR(0x13800000),
+
+        MADD(0x00000000),
+        MSUB(0x00008000),
+        SDIV(0x00000C00),
+        UDIV(0x00000800),
+
+        FMOV(0x00000000),
+        FMOVCPU2FPU(0x00070000),
+        FMOVFPU2CPU(0x00060000),
+
+        FCVTDS(0x00028000),
+        FCVTSD(0x00020000),
+
+        FCVTZS(0x00180000),
+        SCVTF(0x00020000),
+
+        FABS(0x00008000),
+        FSQRT(0x00018000),
+        FNEG(0x00010000),
+
+        FRINTZ(0x00058000),
+
+        FADD(0x00002000),
+        FSUB(0x00003000),
+        FMUL(0x00000000),
+        FDIV(0x00001000),
+        FMAX(0x00004000),
+        FMIN(0x00005000),
+
+        FMADD(0x00000000),
+        FMSUB(0x00008000),
+
+        FCMP(0x00000000),
+        FCMPZERO(0x00000008),
+        FCCMP(0x1E200400),
+        FCSEL(0x1E200C00),
+
+        INS(0x4e081c00),
+        UMOV(0x4e083c00),
+
+        CNT(0xe205800),
+        USRA(0x6f001400),
+
+        HLT(0x00400000),
+        BRK(0x00200000),
+
+        CLREX(0xd5033f5f),
+        HINT(0xD503201F),
+        DMB(0x000000A0),
+
+        BLR_NATIVE(0xc0000000);
+
+        public final int encoding;
+
+        private Instruction(int encoding) {
+            this.encoding = encoding;
+        }
+
+    }
+
+    public enum ShiftType {
+        LSL(0),
+        LSR(1),
+        ASR(2),
+        ROR(3);
+
+        public final int encoding;
+
+        private ShiftType(int encoding) {
+            this.encoding = encoding;
+        }
+    }
+
+    public enum ExtendType {
+        UXTB(0),
+        UXTH(1),
+        UXTW(2),
+        UXTX(3),
+        SXTB(4),
+        SXTH(5),
+        SXTW(6),
+        SXTX(7);
+
+        public final int encoding;
+
+        private ExtendType(int encoding) {
+            this.encoding = encoding;
+        }
+    }
+
+    /**
+     * Condition Flags for branches. See 4.3
+     */
+    public enum ConditionFlag {
+        // Integer | Floating-point meanings
+        /**
+         * Equal | Equal.
+         */
+        EQ(0x0),
+        /**
+         * Not Equal | Not equal or unordered.
+         */
+        NE(0x1),
+        /**
+         * Unsigned Higher or Same | Greater than, equal or unordered.
+         */
+        HS(0x2),
+        /**
+         * unsigned lower | less than.
+         */
+        LO(0x3),
+        /**
+         * minus (negative) | less than.
+         */
+        MI(0x4),
+        /**
+         * plus (positive or zero) | greater than, equal or unordered.
+         */
+        PL(0x5),
+        /**
+         * overflow set | unordered.
+         */
+        VS(0x6),
+        /**
+         * overflow clear | ordered.
+         */
+        VC(0x7),
+        /**
+         * unsigned higher | greater than or unordered.
+         */
+        HI(0x8),
+        /**
+         * unsigned lower or same | less than or equal.
+         */
+        LS(0x9),
+        /**
+         * signed greater than or equal | greater than or equal.
+         */
+        GE(0xA),
+        /**
+         * signed less than | less than or unordered.
+         */
+        LT(0xB),
+        /**
+         * signed greater than | greater than.
+         */
+        GT(0xC),
+        /**
+         * signed less than or equal | less than, equal or unordered.
+         */
+        LE(0xD),
+        /**
+         * always | always.
+         */
+        AL(0xE),
+        /**
+         * always | always (identical to AL, just to have valid 0b1111 encoding).
+         */
+        NV(0xF);
+
+        public final int encoding;
+
+        private ConditionFlag(int encoding) {
+            this.encoding = encoding;
+        }
+
+        /**
+         * @return ConditionFlag specified by decoding.
+         */
+        public static ConditionFlag fromEncoding(int encoding) {
+            return values()[encoding];
+        }
+
+        public ConditionFlag negate() {
+            switch (this) {
+                case EQ:
+                    return NE;
+                case NE:
+                    return EQ;
+                case HS:
+                    return LO;
+                case LO:
+                    return HS;
+                case MI:
+                    return PL;
+                case PL:
+                    return MI;
+                case VS:
+                    return VC;
+                case VC:
+                    return VS;
+                case HI:
+                    return LS;
+                case LS:
+                    return HI;
+                case GE:
+                    return LT;
+                case LT:
+                    return GE;
+                case GT:
+                    return LE;
+                case LE:
+                    return GT;
+                case AL:
+                case NV:
+                default:
+                    throw JVMCIError.shouldNotReachHere();
+            }
+        }
+    }
+
+    public AArch64Assembler(TargetDescription target) {
+        super(target);
+    }
+
+    /* Conditional Branch (5.2.1) */
+
+    /**
+     * Branch conditionally.
+     *
+     * @param condition may not be null.
+     * @param imm21 Signed 21-bit offset, has to be word aligned.
+     */
+    protected void b(ConditionFlag condition, int imm21) {
+        b(condition, imm21, -1);
+    }
+
+    /**
+     * Branch conditionally. Inserts instruction into code buffer at pos.
+     *
+     * @param condition may not be null.
+     * @param imm21 Signed 21-bit offset, has to be word aligned.
+     * @param pos Position at which instruction is inserted into buffer. -1 means insert at end.
+     */
+    protected void b(ConditionFlag condition, int imm21, int pos) {
+        if (pos == -1) {
+            emitInt(Instruction.BCOND.encoding | getConditionalBranchImm(imm21) | condition.encoding);
+        } else {
+            emitInt(Instruction.BCOND.encoding | getConditionalBranchImm(imm21) | condition.encoding, pos);
+        }
+    }
+
+    /**
+     * Compare register and branch if non-zero.
+     *
+     * @param reg general purpose register. May not be null, zero-register or stackpointer.
+     * @param size Instruction size in bits. Should be either 32 or 64.
+     * @param imm21 Signed 21-bit offset, has to be word aligned.
+     */
+    protected void cbnz(int size, Register reg, int imm21) {
+        conditionalBranchInstruction(reg, imm21, generalFromSize(size), Instruction.CBNZ, -1);
+    }
+
+    /**
+     * Compare register and branch if non-zero.
+     *
+     * @param reg general purpose register. May not be null, zero-register or stackpointer.
+     * @param size Instruction size in bits. Should be either 32 or 64.
+     * @param imm21 Signed 21-bit offset, has to be word aligned.
+     * @param pos Position at which instruction is inserted into buffer. -1 means insert at end.
+     */
+    protected void cbnz(int size, Register reg, int imm21, int pos) {
+        conditionalBranchInstruction(reg, imm21, generalFromSize(size), Instruction.CBNZ, pos);
+    }
+
+    /**
+     * Compare and branch if zero.
+     *
+     * @param reg general purpose register. May not be null, zero-register or stackpointer.
+     * @param size Instruction size in bits. Should be either 32 or 64.
+     * @param imm21 Signed 21-bit offset, has to be word aligned.
+     */
+    protected void cbz(int size, Register reg, int imm21) {
+        conditionalBranchInstruction(reg, imm21, generalFromSize(size), Instruction.CBZ, -1);
+    }
+
+    /**
+     * Compare register and branch if zero.
+     *
+     * @param reg general purpose register. May not be null, zero-register or stackpointer.
+     * @param size Instruction size in bits. Should be either 32 or 64.
+     * @param imm21 Signed 21-bit offset, has to be word aligned.
+     * @param pos Position at which instruction is inserted into buffer. -1 means insert at end.
+     */
+    protected void cbz(int size, Register reg, int imm21, int pos) {
+        conditionalBranchInstruction(reg, imm21, generalFromSize(size), Instruction.CBZ, pos);
+    }
+
+    private void conditionalBranchInstruction(Register reg, int imm21, InstructionType type, Instruction instr, int pos) {
+        assert reg.getRegisterCategory().equals(CPU);
+        int instrEncoding = instr.encoding | CompareBranchOp;
+        if (pos == -1) {
+            emitInt(type.encoding | instrEncoding | getConditionalBranchImm(imm21) | rd(reg));
+        } else {
+            emitInt(type.encoding | instrEncoding | getConditionalBranchImm(imm21) | rd(reg), pos);
+        }
+    }
+
+    private static int getConditionalBranchImm(int imm21) {
+        assert NumUtil.isSignedNbit(21, imm21) && (imm21 & 0x3) == 0 : "Immediate has to be 21bit signed number and word aligned";
+        int imm = (imm21 & NumUtil.getNbitNumberInt(21)) >> 2;
+        return imm << ConditionalBranchImmOffset;
+    }
+
+    /* Unconditional Branch (immediate) (5.2.2) */
+
+    /**
+     * @param imm28 Signed 28-bit offset, has to be word aligned.
+     */
+    protected void b(int imm28) {
+        unconditionalBranchImmInstruction(imm28, Instruction.B, -1);
+    }
+
+    /**
+     *
+     * @param imm28 Signed 28-bit offset, has to be word aligned.
+     * @param pos Position where instruction is inserted into code buffer.
+     */
+    protected void b(int imm28, int pos) {
+        unconditionalBranchImmInstruction(imm28, Instruction.B, pos);
+    }
+
+    /**
+     * Branch and link return address to register X30.
+     *
+     * @param imm28 Signed 28-bit offset, has to be word aligned.
+     */
+    public void bl(int imm28) {
+        unconditionalBranchImmInstruction(imm28, Instruction.BL, -1);
+    }
+
+    private void unconditionalBranchImmInstruction(int imm28, Instruction instr, int pos) {
+        assert NumUtil.isSignedNbit(28, imm28) && (imm28 & 0x3) == 0 : "Immediate has to be 28bit signed number and word aligned";
+        int imm = (imm28 & NumUtil.getNbitNumberInt(28)) >> 2;
+        int instrEncoding = instr.encoding | UnconditionalBranchImmOp;
+        if (pos == -1) {
+            emitInt(instrEncoding | imm);
+        } else {
+            emitInt(instrEncoding | imm, pos);
+        }
+    }
+
+    /* Unconditional Branch (register) (5.2.3) */
+
+    /**
+     * Branches to address in register and writes return address into register X30.
+     *
+     * @param reg general purpose register. May not be null, zero-register or stackpointer.
+     */
+    public void blr(Register reg) {
+        unconditionalBranchRegInstruction(reg, Instruction.BLR);
+    }
+
+    /**
+     * Branches to address in register.
+     *
+     * @param reg general purpose register. May not be null, zero-register or stackpointer.
+     */
+    protected void br(Register reg) {
+        unconditionalBranchRegInstruction(reg, Instruction.BR);
+    }
+
+    /**
+     * Return to address in register.
+     *
+     * @param reg general purpose register. May not be null, zero-register or stackpointer.
+     */
+    public void ret(Register reg) {
+        unconditionalBranchRegInstruction(reg, Instruction.RET);
+    }
+
+    private void unconditionalBranchRegInstruction(Register reg, Instruction instr) {
+        assert reg.getRegisterCategory().equals(CPU) && !reg.equals(zr) && !reg.equals(sp);
+        final int instrEncoding = instr.encoding | UnconditionalBranchRegOp;
+        emitInt(instrEncoding | rs1(reg));
+    }
+
+    /* Load-Store Single Register (5.3.1) */
+
+    /**
+     * Loads a srcSize value from address into rt zero-extending it.
+     *
+     * @param srcSize size of memory read in bits. Must be 8, 16, 32 or 64.
+     * @param rt general purpose register. May not be null or stackpointer.
+     * @param address all addressing modes allowed. May not be null.
+     */
+    public void ldr(int srcSize, Register rt, AArch64Address address) {
+        assert rt.getRegisterCategory().equals(CPU);
+        assert srcSize == 8 || srcSize == 16 || srcSize == 32 || srcSize == 64;
+        int transferSize = NumUtil.log2Ceil(srcSize / 8);
+        loadStoreInstruction(rt, address, InstructionType.General32, Instruction.LDR, transferSize);
+    }
+
+    /**
+     * Loads a srcSize value from address into rt sign-extending it.
+     *
+     * @param targetSize size of target register in bits. Must be 32 or 64.
+     * @param srcSize size of memory read in bits. Must be 8, 16 or 32, but may not be equivalent to
+     *            targetSize.
+     * @param rt general purpose register. May not be null or stackpointer.
+     * @param address all addressing modes allowed. May not be null.
+     */
+    protected void ldrs(int targetSize, int srcSize, Register rt, AArch64Address address) {
+        assert rt.getRegisterCategory().equals(CPU);
+        assert (srcSize == 8 || srcSize == 16 || srcSize == 32) && srcSize != targetSize;
+        int transferSize = NumUtil.log2Ceil(srcSize / 8);
+        loadStoreInstruction(rt, address, generalFromSize(targetSize), Instruction.LDRS, transferSize);
+    }
+
+    /**
+     * Stores register rt into memory pointed by address.
+     *
+     * @param destSize number of bits written to memory. Must be 8, 16, 32 or 64.
+     * @param rt general purpose register. May not be null or stackpointer.
+     * @param address all addressing modes allowed. May not be null.
+     */
+    public void str(int destSize, Register rt, AArch64Address address) {
+        assert rt.getRegisterCategory().equals(CPU);
+        assert destSize == 8 || destSize == 16 || destSize == 32 || destSize == 64;
+        int transferSize = NumUtil.log2Ceil(destSize / 8);
+        loadStoreInstruction(rt, address, InstructionType.General64, Instruction.STR, transferSize);
+    }
+
+    private void loadStoreInstruction(Register reg, AArch64Address address, InstructionType type, Instruction instr, int log2TransferSize) {
+        assert log2TransferSize >= 0 && log2TransferSize < 4;
+        int transferSizeEncoding = log2TransferSize << LoadStoreTransferSizeOffset;
+        int is32Bit = type.width == 32 ? 1 << ImmediateSizeOffset : 0;
+        int isFloat = !type.isGeneral ? 1 << LoadStoreFpFlagOffset : 0;
+        int memop = instr.encoding | transferSizeEncoding | is32Bit | isFloat | rt(reg);
+        switch (address.getAddressingMode()) {
+            case IMMEDIATE_SCALED:
+                emitInt(memop | LoadStoreScaledOp | address.getImmediate() << LoadStoreScaledImmOffset | rs1(address.getBase()));
+                break;
+            case IMMEDIATE_UNSCALED:
+                emitInt(memop | LoadStoreUnscaledOp | address.getImmediate() << LoadStoreUnscaledImmOffset | rs1(address.getBase()));
+                break;
+            case BASE_REGISTER_ONLY:
+                emitInt(memop | LoadStoreScaledOp | rs1(address.getBase()));
+                break;
+            case EXTENDED_REGISTER_OFFSET:
+            case REGISTER_OFFSET:
+                ExtendType extendType = address.getAddressingMode() == AddressingMode.EXTENDED_REGISTER_OFFSET ? address.getExtendType() : ExtendType.UXTX;
+                boolean shouldScale = address.isScaled() && log2TransferSize != 0;
+                emitInt(memop | LoadStoreRegisterOp | rs2(address.getOffset()) | extendType.encoding << ExtendTypeOffset | (shouldScale ? 1 : 0) << LoadStoreScaledRegOffset | rs1(address.getBase()));
+                break;
+            case PC_LITERAL:
+                assert log2TransferSize >= 2 : "PC literal loads only works for load/stores of 32-bit and larger";
+                transferSizeEncoding = (log2TransferSize - 2) << LoadStoreTransferSizeOffset;
+                emitInt(transferSizeEncoding | isFloat | LoadLiteralOp | rd(reg) | address.getImmediate() << LoadLiteralImmeOffset);
+                break;
+            case IMMEDIATE_POST_INDEXED:
+                emitInt(memop | LoadStorePostIndexedOp | rs1(address.getBase()) | address.getImmediate() << LoadStoreIndexedImmOffset);
+                break;
+            case IMMEDIATE_PRE_INDEXED:
+                emitInt(memop | LoadStorePreIndexedOp | rs1(address.getBase()) | address.getImmediate() << LoadStoreIndexedImmOffset);
+                break;
+            default:
+                throw JVMCIError.shouldNotReachHere();
+        }
+    }
+
+    /* Load-Store Exclusive (5.3.6) */
+
+    /**
+     * Load address exclusive. Natural alignment of address is required.
+     *
+     * @param size size of memory read in bits. Must be 8, 16, 32 or 64.
+     * @param rt general purpose register. May not be null or stackpointer.
+     * @param address Has to be {@link AddressingMode#BASE_REGISTER_ONLY BASE_REGISTER_ONLY}. May
+     *            not be null.
+     */
+    protected void ldxr(int size, Register rt, AArch64Address address) {
+        assert size == 8 || size == 16 || size == 32 || size == 64;
+        int transferSize = NumUtil.log2Ceil(size / 8);
+        exclusiveLoadInstruction(rt, address, transferSize, Instruction.LDXR);
+    }
+
+    /**
+     * Store address exclusive. Natural alignment of address is required. rs and rt may not point to
+     * the same register.
+     *
+     * @param size size of bits written to memory. Must be 8, 16, 32 or 64.
+     * @param rs general purpose register. Set to exclusive access status. 0 means success,
+     *            everything else failure. May not be null, or stackpointer.
+     * @param rt general purpose register. May not be null or stackpointer.
+     * @param address Has to be {@link AddressingMode#BASE_REGISTER_ONLY BASE_REGISTER_ONLY}. May
+     *            not be null.
+     */
+    protected void stxr(int size, Register rs, Register rt, AArch64Address address) {
+        assert size == 8 || size == 16 || size == 32 || size == 64;
+        int transferSize = NumUtil.log2Ceil(size / 8);
+        exclusiveStoreInstruction(rs, rt, address, transferSize, Instruction.STXR);
+    }
+
+    /* Load-Acquire/Store-Release (5.3.7) */
+
+    /* non exclusive access */
+    /**
+     * Load acquire. Natural alignment of address is required.
+     *
+     * @param size size of memory read in bits. Must be 8, 16, 32 or 64.
+     * @param rt general purpose register. May not be null or stackpointer.
+     * @param address Has to be {@link AddressingMode#BASE_REGISTER_ONLY BASE_REGISTER_ONLY}. May
+     *            not be null.
+     */
+    protected void ldar(int size, Register rt, AArch64Address address) {
+        assert size == 8 || size == 16 || size == 32 || size == 64;
+        int transferSize = NumUtil.log2Ceil(size / 8);
+        exclusiveLoadInstruction(rt, address, transferSize, Instruction.LDAR);
+    }
+
+    /**
+     * Store-release. Natural alignment of address is required.
+     *
+     * @param size size of bits written to memory. Must be 8, 16, 32 or 64.
+     * @param rt general purpose register. May not be null or stackpointer.
+     * @param address Has to be {@link AddressingMode#BASE_REGISTER_ONLY BASE_REGISTER_ONLY}. May
+     *            not be null.
+     */
+    protected void stlr(int size, Register rt, AArch64Address address) {
+        assert size == 8 || size == 16 || size == 32 || size == 64;
+        int transferSize = NumUtil.log2Ceil(size / 8);
+        // Hack: Passing the zero-register means it is ignored when building the encoding.
+        exclusiveStoreInstruction(AArch64.r0, rt, address, transferSize, Instruction.STLR);
+    }
+
+    /* exclusive access */
+    /**
+     * Load acquire exclusive. Natural alignment of address is required.
+     *
+     * @param size size of memory read in bits. Must be 8, 16, 32 or 64.
+     * @param rt general purpose register. May not be null or stackpointer.
+     * @param address Has to be {@link AddressingMode#BASE_REGISTER_ONLY BASE_REGISTER_ONLY}. May
+     *            not be null.
+     */
+    public void ldaxr(int size, Register rt, AArch64Address address) {
+        assert size == 8 || size == 16 || size == 32 || size == 64;
+        int transferSize = NumUtil.log2Ceil(size / 8);
+        exclusiveLoadInstruction(rt, address, transferSize, Instruction.LDAXR);
+    }
+
+    /**
+     * Store-release exclusive. Natural alignment of address is required. rs and rt may not point to
+     * the same register.
+     *
+     * @param size size of bits written to memory. Must be 8, 16, 32 or 64.
+     * @param rs general purpose register. Set to exclusive access status. 0 means success,
+     *            everything else failure. May not be null, or stackpointer.
+     * @param rt general purpose register. May not be null or stackpointer.
+     * @param address Has to be {@link AddressingMode#BASE_REGISTER_ONLY BASE_REGISTER_ONLY}. May
+     *            not be null.
+     */
+    public void stlxr(int size, Register rs, Register rt, AArch64Address address) {
+        assert size == 8 || size == 16 || size == 32 || size == 64;
+        int transferSize = NumUtil.log2Ceil(size / 8);
+        exclusiveStoreInstruction(rs, rt, address, transferSize, Instruction.STLXR);
+    }
+
+    private void exclusiveLoadInstruction(Register reg, AArch64Address address, int log2TransferSize, Instruction instr) {
+        assert address.getAddressingMode() == AddressingMode.BASE_REGISTER_ONLY;
+        assert log2TransferSize >= 0 && log2TransferSize < 4;
+        assert reg.getRegisterCategory().equals(CPU);
+        int transferSizeEncoding = log2TransferSize << LoadStoreTransferSizeOffset;
+        int instrEncoding = instr.encoding;
+        emitInt(transferSizeEncoding | instrEncoding | 1 << ImmediateSizeOffset | rt(reg) | rs1(address.getBase()));
+    }
+
+    /**
+     * Stores data from rt into address and sets rs to the returned exclusive access status.
+     *
+     * @param rs general purpose register into which the exclusive access status is written. May not
+     *            be null.
+     * @param rt general purpose register containing data to be written to memory at address. May
+     *            not be null
+     * @param address Address in base register without offset form specifying where rt is written
+     *            to.
+     * @param log2TransferSize log2Ceil of memory transfer size.
+     */
+    private void exclusiveStoreInstruction(Register rs, Register rt, AArch64Address address, int log2TransferSize, Instruction instr) {
+        assert address.getAddressingMode() == AddressingMode.BASE_REGISTER_ONLY;
+        assert log2TransferSize >= 0 && log2TransferSize < 4;
+        assert rt.getRegisterCategory().equals(CPU) && rs.getRegisterCategory().equals(CPU) && !rs.equals(rt);
+        int transferSizeEncoding = log2TransferSize << LoadStoreTransferSizeOffset;
+        int instrEncoding = instr.encoding;
+        emitInt(transferSizeEncoding | instrEncoding | rs2(rs) | rt(rt) | rs1(address.getBase()));
+    }
+
+    /* PC-relative Address Calculation (5.4.4) */
+
+    /**
+     * Address of page: sign extends 21-bit offset, shifts if left by 12 and adds it to the value of
+     * the PC with its bottom 12-bits cleared, writing the result to dst.
+     *
+     * @param dst general purpose register. May not be null, zero-register or stackpointer.
+     * @param imm Signed 33-bit offset with lower 12bits clear.
+     */
+    // protected void adrp(Register dst, long imm) {
+    // assert (imm & NumUtil.getNbitNumberInt(12)) == 0 : "Lower 12-bit of immediate must be zero.";
+    // assert NumUtil.isSignedNbit(33, imm);
+    // addressCalculationInstruction(dst, (int) (imm >>> 12), Instruction.ADRP);
+    // }
+
+    /**
+     * Adds a 21-bit signed offset to the program counter and writes the result to dst.
+     *
+     * @param dst general purpose register. May not be null, zero-register or stackpointer.
+     * @param imm21 Signed 21-bit offset.
+     */
+    public void adr(Register dst, int imm21) {
+        addressCalculationInstruction(dst, imm21, Instruction.ADR);
+    }
+
+    private void addressCalculationInstruction(Register dst, int imm21, Instruction instr) {
+        assert dst.getRegisterCategory().equals(CPU);
+        int instrEncoding = instr.encoding | PcRelImmOp;
+        emitInt(instrEncoding | rd(dst) | getPcRelativeImmEncoding(imm21));
+    }
+
+    private static int getPcRelativeImmEncoding(int imm21) {
+        assert NumUtil.isSignedNbit(21, imm21);
+        int imm = imm21 & NumUtil.getNbitNumberInt(21);
+        // higher 19 bit
+        int immHi = (imm >> 2) << PcRelImmHiOffset;
+        // lower 2 bit
+        int immLo = (imm & 0x3) << PcRelImmLoOffset;
+        return immHi | immLo;
+    }
+
+    /* Arithmetic (Immediate) (5.4.1) */
+
+    /**
+     * dst = src + aimm.
+     *
+     * @param size register size. Has to be 32 or 64.
+     * @param dst general purpose register. May not be null or zero-register.
+     * @param src general purpose register. May not be null or zero-register.
+     * @param aimm arithmetic immediate. Either unsigned 12-bit value or unsigned 24-bit value with
+     *            the lower 12-bit cleared.
+     */
+    protected void add(int size, Register dst, Register src, int aimm) {
+        assert !dst.equals(zr);
+        assert !src.equals(zr);
+        addSubImmInstruction(dst, src, aimm, generalFromSize(size), Instruction.ADD);
+    }
+
+    /**
+     * dst = src + aimm and sets condition flags.
+     *
+     * @param size register size. Has to be 32 or 64.
+     * @param dst general purpose register. May not be null or stackpointer.
+     * @param src general purpose register. May not be null or zero-register.
+     * @param aimm arithmetic immediate. Either unsigned 12-bit value or unsigned 24-bit value with
+     *            the lower 12-bit cleared.
+     */
+    protected void adds(int size, Register dst, Register src, int aimm) {
+        assert !dst.equals(sp);
+        assert !src.equals(zr);
+        addSubImmInstruction(dst, src, aimm, generalFromSize(size), Instruction.ADDS);
+    }
+
+    /**
+     * dst = src - aimm.
+     *
+     * @param size register size. Has to be 32 or 64.
+     * @param dst general purpose register. May not be null or zero-register.
+     * @param src general purpose register. May not be null or zero-register.
+     * @param aimm arithmetic immediate. Either unsigned 12-bit value or unsigned 24-bit value with
+     *            the lower 12-bit cleared.
+     */
+    protected void sub(int size, Register dst, Register src, int aimm) {
+        assert !dst.equals(zr);
+        assert !src.equals(zr);
+        addSubImmInstruction(dst, src, aimm, generalFromSize(size), Instruction.SUB);
+    }
+
+    /**
+     * dst = src - aimm and sets condition flags.
+     *
+     * @param size register size. Has to be 32 or 64.
+     * @param dst general purpose register. May not be null or stackpointer.
+     * @param src general purpose register. May not be null or zero-register.
+     * @param aimm arithmetic immediate. Either unsigned 12-bit value or unsigned 24-bit value with
+     *            the lower 12-bit cleared.
+     */
+    protected void subs(int size, Register dst, Register src, int aimm) {
+        assert !dst.equals(sp);
+        assert !src.equals(zr);
+        addSubImmInstruction(dst, src, aimm, generalFromSize(size), Instruction.SUBS);
+    }
+
+    private void addSubImmInstruction(Register dst, Register src, int aimm, InstructionType type, Instruction instr) {
+        int instrEncoding = instr.encoding | AddSubImmOp;
+        emitInt(type.encoding | instrEncoding | encodeAimm(aimm) | rd(dst) | rs1(src));
+    }
+
+    /**
+     * Encodes arithmetic immediate.
+     *
+     * @param imm Immediate has to be either an unsigned 12bit value or un unsigned 24bit value with
+     *            the lower 12 bits 0.
+     * @return Representation of immediate for use with arithmetic instructions.
+     */
+    private static int encodeAimm(int imm) {
+        assert isAimm(imm) : "Immediate has to be legal arithmetic immediate value " + imm;
+        if (NumUtil.isUnsignedNbit(12, imm)) {
+            return imm << ImmediateOffset;
+        } else {
+            // First 12 bit are 0, so shift immediate 12 bit and set flag to indicate
+            // shifted immediate value.
+            return (imm >>> 12 << ImmediateOffset) | (1 << AddSubShiftOffset);
+        }
+    }
+
+    /**
+     * Checks whether immediate can be encoded as an arithmetic immediate.
+     *
+     * @param imm Immediate has to be either an unsigned 12bit value or un unsigned 24bit value with
+     *            the lower 12 bits 0.
+     * @return true if valid arithmetic immediate, false otherwise.
+     */
+    protected static boolean isAimm(int imm) {
+        return NumUtil.isUnsignedNbit(12, imm) || NumUtil.isUnsignedNbit(12, imm >>> 12) && (imm & 0xfff) == 0;
+    }
+
+    /* Logical (immediate) (5.4.2) */
+
+    /**
+     * dst = src & bimm.
+     *
+     * @param size register size. Has to be 32 or 64.
+     * @param dst general purpose register. May not be null or zero-register.
+     * @param src general purpose register. May not be null or stack-pointer.
+     * @param bimm logical immediate. See {@link LogicalImmediateTable} for exact definition.
+     */
+    public void and(int size, Register dst, Register src, long bimm) {
+        assert !dst.equals(zr);
+        assert !src.equals(sp);
+        logicalImmInstruction(dst, src, bimm, generalFromSize(size), Instruction.AND);
+    }
+
+    /**
+     * dst = src & bimm and sets condition flags.
+     *
+     * @param size register size. Has to be 32 or 64.
+     * @param dst general purpose register. May not be null or stack-pointer.
+     * @param src general purpose register. May not be null or stack-pointer.
+     * @param bimm logical immediate. See {@link LogicalImmediateTable} for exact definition.
+     */
+    public void ands(int size, Register dst, Register src, long bimm) {
+        assert !dst.equals(sp);
+        assert !src.equals(sp);
+        logicalImmInstruction(dst, src, bimm, generalFromSize(size), Instruction.ANDS);
+    }
+
+    /**
+     * dst = src ^ bimm.
+     *
+     * @param size register size. Has to be 32 or 64.
+     * @param dst general purpose register. May not be null or zero-register.
+     * @param src general purpose register. May not be null or stack-pointer.
+     * @param bimm logical immediate. See {@link LogicalImmediateTable} for exact definition.
+     */
+    public void eor(int size, Register dst, Register src, long bimm) {
+        assert !dst.equals(zr);
+        assert !src.equals(sp);
+        logicalImmInstruction(dst, src, bimm, generalFromSize(size), Instruction.EOR);
+    }
+
+    /**
+     * dst = src | bimm.
+     *
+     * @param size register size. Has to be 32 or 64.
+     * @param dst general purpose register. May not be null or zero-register.
+     * @param src general purpose register. May not be null or stack-pointer.
+     * @param bimm logical immediate. See {@link LogicalImmediateTable} for exact definition.
+     */
+    protected void orr(int size, Register dst, Register src, long bimm) {
+        assert !dst.equals(zr);
+        assert !src.equals(sp);
+        logicalImmInstruction(dst, src, bimm, generalFromSize(size), Instruction.ORR);
+    }
+
+    protected void logicalImmInstruction(Register dst, Register src, long bimm, InstructionType type, Instruction instr) {
+        // Mask higher bits off, since we always pass longs around even for the 32-bit instruction.
+        long bimmValue;
+        if (type == InstructionType.General32) {
+            assert (bimm >> 32) == 0 || (bimm >> 32) == -1L : "Higher order bits for 32-bit instruction must either all be 0 or 1.";
+            bimmValue = bimm & NumUtil.getNbitNumberLong(32);
+        } else {
+            bimmValue = bimm;
+        }
+        int immEncoding = LogicalImmediateTable.getLogicalImmEncoding(type == InstructionType.General64, bimmValue);
+        int instrEncoding = instr.encoding | LogicalImmOp;
+        emitInt(type.encoding | instrEncoding | immEncoding | rd(dst) | rs1(src));
+    }
+
+    /* Move (wide immediate) (5.4.3) */
+
+    /**
+     * dst = uimm16 << shiftAmt.
+     *
+     * @param size register size. Has to be 32 or 64.
+     * @param dst general purpose register. May not be null, stackpointer or zero-register.
+     * @param uimm16 16-bit unsigned immediate
+     * @param shiftAmt amount by which uimm16 is left shifted. Can be any multiple of 16 smaller
+     *            than size.
+     */
+    protected void movz(int size, Register dst, int uimm16, int shiftAmt) {
+        moveWideImmInstruction(dst, uimm16, shiftAmt, generalFromSize(size), Instruction.MOVZ);
+    }
+
+    /**
+     * dst = ~(uimm16 << shiftAmt).
+     *
+     * @param size register size. Has to be 32 or 64.
+     * @param dst general purpose register. May not be null, stackpointer or zero-register.
+     * @param uimm16 16-bit unsigned immediate
+     * @param shiftAmt amount by which uimm16 is left shifted. Can be any multiple of 16 smaller
+     *            than size.
+     */
+    protected void movn(int size, Register dst, int uimm16, int shiftAmt) {
+        moveWideImmInstruction(dst, uimm16, shiftAmt, generalFromSize(size), Instruction.MOVN);
+    }
+
+    /**
+     * dst<pos+15:pos> = uimm16.
+     *
+     * @param size register size. Has to be 32 or 64.
+     * @param dst general purpose register. May not be null, stackpointer or zero-register.
+     * @param uimm16 16-bit unsigned immediate
+     * @param pos position into which uimm16 is inserted. Can be any multiple of 16 smaller than
+     *            size.
+     */
+    protected void movk(int size, Register dst, int uimm16, int pos) {
+        moveWideImmInstruction(dst, uimm16, pos, generalFromSize(size), Instruction.MOVK);
+    }
+
+    private void moveWideImmInstruction(Register dst, int uimm16, int shiftAmt, InstructionType type, Instruction instr) {
+        assert dst.getRegisterCategory().equals(CPU);
+        assert NumUtil.isUnsignedNbit(16, uimm16) : "Immediate has to be unsigned 16bit";
+        assert shiftAmt == 0 || shiftAmt == 16 || (type == InstructionType.General64 && (shiftAmt == 32 || shiftAmt == 48)) : "Invalid shift amount: " + shiftAmt;
+        int shiftValue = shiftAmt >> 4;
+        int instrEncoding = instr.encoding | MoveWideImmOp;
+        emitInt(type.encoding | instrEncoding | rd(dst) | uimm16 << MoveWideImmOffset | shiftValue << MoveWideShiftOffset);
+    }
+
+    /* Bitfield Operations (5.4.5) */
+
+    /**
+     * Bitfield move.
+     *
+     * @param size register size. Has to be 32 or 64.
+     * @param dst general purpose register. May not be null, stackpointer or zero-register.
+     * @param src general purpose register. May not be null, stackpointer or zero-register.
+     * @param r must be in the range 0 to size - 1
+     * @param s must be in the range 0 to size - 1
+     */
+    protected void bfm(int size, Register dst, Register src, int r, int s) {
+        bitfieldInstruction(dst, src, r, s, generalFromSize(size), Instruction.BFM);
+    }
+
+    /**
+     * Unsigned bitfield move.
+     *
+     * @param size register size. Has to be 32 or 64.
+     * @param dst general purpose register. May not be null, stackpointer or zero-register.
+     * @param src general purpose register. May not be null, stackpointer or zero-register.
+     * @param r must be in the range 0 to size - 1
+     * @param s must be in the range 0 to size - 1
+     */
+    protected void ubfm(int size, Register dst, Register src, int r, int s) {
+        bitfieldInstruction(dst, src, r, s, generalFromSize(size), Instruction.UBFM);
+    }
+
+    /**
+     * Signed bitfield move.
+     *
+     * @param size register size. Has to be 32 or 64.
+     * @param dst general purpose register. May not be null, stackpointer or zero-register.
+     * @param src general purpose register. May not be null, stackpointer or zero-register.
+     * @param r must be in the range 0 to size - 1
+     * @param s must be in the range 0 to size - 1
+     */
+    protected void sbfm(int size, Register dst, Register src, int r, int s) {
+        bitfieldInstruction(dst, src, r, s, generalFromSize(size), Instruction.SBFM);
+    }
+
+    private void bitfieldInstruction(Register dst, Register src, int r, int s, InstructionType type, Instruction instr) {
+        assert !dst.equals(sp) && !dst.equals(zr);
+        assert !src.equals(sp) && !src.equals(zr);
+        assert s >= 0 && s < type.width && r >= 0 && r < type.width;
+        int instrEncoding = instr.encoding | BitfieldImmOp;
+        int sf = type == InstructionType.General64 ? 1 << ImmediateSizeOffset : 0;
+        emitInt(type.encoding | instrEncoding | sf | r << ImmediateRotateOffset | s << ImmediateOffset | rd(dst) | rs1(src));
+    }
+
+    /* Extract (Immediate) (5.4.6) */
+
+    /**
+     * Extract. dst = src1:src2<lsb+31:lsb>
+     *
+     * @param size register size. Has to be 32 or 64.
+     * @param dst general purpose register. May not be null or stackpointer.
+     * @param src1 general purpose register. May not be null or stackpointer.
+     * @param src2 general purpose register. May not be null or stackpointer.
+     * @param lsb must be in range 0 to size - 1.
+     */
+    protected void extr(int size, Register dst, Register src1, Register src2, int lsb) {
+        extractInstruction(dst, src1, src2, lsb, generalFromSize(size));
+    }
+
+    private void extractInstruction(Register dst, Register src1, Register src2, int lsb, InstructionType type) {
+        assert !dst.equals(sp);
+        assert !src1.equals(sp);
+        assert !src2.equals(sp);
+        assert lsb >= 0 && lsb < type.width;
+        int sf = type == InstructionType.General64 ? 1 << ImmediateSizeOffset : 0;
+        emitInt(type.encoding | Instruction.EXTR.encoding | sf | lsb << ImmediateOffset | rd(dst) | rs1(src1) | rs2(src2));
+    }
+
+    /* Arithmetic (shifted register) (5.5.1) */
+
+    /**
+     * dst = src1 + shiftType(src2, imm).
+     *
+     * @param size register size. Has to be 32 or 64.
+     * @param dst general purpose register. May not be null or stackpointer.
+     * @param src1 general purpose register. May not be null or stackpointer.
+     * @param src2 general purpose register. May not be null or stackpointer.
+     * @param shiftType any type but ROR.
+     * @param imm must be in range 0 to size - 1.
+     */
+    protected void add(int size, Register dst, Register src1, Register src2, ShiftType shiftType, int imm) {
+        addSubShiftedInstruction(dst, src1, src2, shiftType, imm, generalFromSize(size), Instruction.ADD);
+    }
+
+    /**
+     * dst = src1 + shiftType(src2, imm) and sets condition flags.
+     *
+     * @param size register size. Has to be 32 or 64.
+     * @param dst general purpose register. May not be null or stackpointer.
+     * @param src1 general purpose register. May not be null or stackpointer.
+     * @param src2 general purpose register. May not be null or stackpointer.
+     * @param shiftType any type but ROR.
+     * @param imm must be in range 0 to size - 1.
+     */
+    protected void adds(int size, Register dst, Register src1, Register src2, ShiftType shiftType, int imm) {
+        addSubShiftedInstruction(dst, src1, src2, shiftType, imm, generalFromSize(size), Instruction.ADDS);
+    }
+
+    /**
+     * dst = src1 - shiftType(src2, imm).
+     *
+     * @param size register size. Has to be 32 or 64.
+     * @param dst general purpose register. May not be null or stackpointer.
+     * @param src1 general purpose register. May not be null or stackpointer.
+     * @param src2 general purpose register. May not be null or stackpointer.
+     * @param shiftType any type but ROR.
+     * @param imm must be in range 0 to size - 1.
+     */
+    protected void sub(int size, Register dst, Register src1, Register src2, ShiftType shiftType, int imm) {
+        addSubShiftedInstruction(dst, src1, src2, shiftType, imm, generalFromSize(size), Instruction.SUB);
+    }
+
+    /**
+     * dst = src1 - shiftType(src2, imm) and sets condition flags.
+     *
+     * @param size register size. Has to be 32 or 64.
+     * @param dst general purpose register. May not be null or stackpointer.
+     * @param src1 general purpose register. May not be null or stackpointer.
+     * @param src2 general purpose register. May not be null or stackpointer.
+     * @param shiftType any type but ROR.
+     * @param imm must be in range 0 to size - 1.
+     */
+    protected void subs(int size, Register dst, Register src1, Register src2, ShiftType shiftType, int imm) {
+        addSubShiftedInstruction(dst, src1, src2, shiftType, imm, generalFromSize(size), Instruction.SUBS);
+    }
+
+    private void addSubShiftedInstruction(Register dst, Register src1, Register src2, ShiftType shiftType, int imm, InstructionType type, Instruction instr) {
+        assert shiftType != ShiftType.ROR;
+        assert imm >= 0 && imm < type.width;
+        int instrEncoding = instr.encoding | AddSubShiftedOp;
+        emitInt(type.encoding | instrEncoding | imm << ImmediateOffset | shiftType.encoding << ShiftTypeOffset | rd(dst) | rs1(src1) | rs2(src2));
+    }
+
+    /* Arithmetic (extended register) (5.5.2) */
+    /**
+     * dst = src1 + extendType(src2) << imm.
+     *
+     * @param size register size. Has to be 32 or 64.
+     * @param dst general purpose register. May not be null or zero-register..
+     * @param src1 general purpose register. May not be null or zero-register.
+     * @param src2 general purpose register. May not be null or stackpointer.
+     * @param extendType defines how src2 is extended to the same size as src1.
+     * @param shiftAmt must be in range 0 to 4.
+     */
+    public void add(int size, Register dst, Register src1, Register src2, ExtendType extendType, int shiftAmt) {
+        assert !dst.equals(zr);
+        assert !src1.equals(zr);
+        assert !src2.equals(sp);
+        addSubExtendedInstruction(dst, src1, src2, extendType, shiftAmt, generalFromSize(size), Instruction.ADD);
+    }
+
+    /**
+     * dst = src1 + extendType(src2) << imm and sets condition flags.
+     *
+     * @param size register size. Has to be 32 or 64.
+     * @param dst general purpose register. May not be null or stackpointer..
+     * @param src1 general purpose register. May not be null or zero-register.
+     * @param src2 general purpose register. May not be null or stackpointer.
+     * @param extendType defines how src2 is extended to the same size as src1.
+     * @param shiftAmt must be in range 0 to 4.
+     */
+    protected void adds(int size, Register dst, Register src1, Register src2, ExtendType extendType, int shiftAmt) {
+        assert !dst.equals(sp);
+        assert !src1.equals(zr);
+        assert !src2.equals(sp);
+        addSubExtendedInstruction(dst, src1, src2, extendType, shiftAmt, generalFromSize(size), Instruction.ADDS);
+    }
+
+    /**
+     * dst = src1 - extendType(src2) << imm.
+     *
+     * @param size register size. Has to be 32 or 64.
+     * @param dst general purpose register. May not be null or zero-register..
+     * @param src1 general purpose register. May not be null or zero-register.
+     * @param src2 general purpose register. May not be null or stackpointer.
+     * @param extendType defines how src2 is extended to the same size as src1.
+     * @param shiftAmt must be in range 0 to 4.
+     */
+    protected void sub(int size, Register dst, Register src1, Register src2, ExtendType extendType, int shiftAmt) {
+        assert !dst.equals(zr);
+        assert !src1.equals(zr);
+        assert !src2.equals(sp);
+        addSubExtendedInstruction(dst, src1, src2, extendType, shiftAmt, generalFromSize(size), Instruction.SUB);
+    }
+
+    /**
+     * dst = src1 - extendType(src2) << imm and sets flags.
+     *
+     * @param size register size. Has to be 32 or 64.
+     * @param dst general purpose register. May not be null or stackpointer..
+     * @param src1 general purpose register. May not be null or zero-register.
+     * @param src2 general purpose register. May not be null or stackpointer.
+     * @param extendType defines how src2 is extended to the same size as src1.
+     * @param shiftAmt must be in range 0 to 4.
+     */
+    protected void subs(int size, Register dst, Register src1, Register src2, ExtendType extendType, int shiftAmt) {
+        assert !dst.equals(sp);
+        assert !src1.equals(zr);
+        assert !src2.equals(sp);
+        addSubExtendedInstruction(dst, src1, src2, extendType, shiftAmt, generalFromSize(size), Instruction.SUBS);
+    }
+
+    private void addSubExtendedInstruction(Register dst, Register src1, Register src2, ExtendType extendType, int shiftAmt, InstructionType type, Instruction instr) {
+        assert shiftAmt >= 0 && shiftAmt <= 4;
+        int instrEncoding = instr.encoding | AddSubExtendedOp;
+        emitInt(type.encoding | instrEncoding | shiftAmt << ImmediateOffset | extendType.encoding << ExtendTypeOffset | rd(dst) | rs1(src1) | rs2(src2));
+    }
+
+    /* Logical (shifted register) (5.5.3) */
+    /**
+     * dst = src1 & shiftType(src2, imm).
+     *
+     * @param size register size. Has to be 32 or 64.
+     * @param dst general purpose register. May not be null or stackpointer.
+     * @param src1 general purpose register. May not be null or stackpointer.
+     * @param src2 general purpose register. May not be null or stackpointer.
+     * @param shiftType all types allowed, may not be null.
+     * @param shiftAmt must be in range 0 to size - 1.
+     */
+    protected void and(int size, Register dst, Register src1, Register src2, ShiftType shiftType, int shiftAmt) {
+        logicalRegInstruction(dst, src1, src2, shiftType, shiftAmt, generalFromSize(size), Instruction.AND);
+    }
+
+    /**
+     * dst = src1 & shiftType(src2, imm) and sets condition flags.
+     *
+     * @param size register size. Has to be 32 or 64.
+     * @param dst general purpose register. May not be null or stackpointer.
+     * @param src1 general purpose register. May not be null or stackpointer.
+     * @param src2 general purpose register. May not be null or stackpointer.
+     * @param shiftType all types allowed, may not be null.
+     * @param shiftAmt must be in range 0 to size - 1.
+     */
+    protected void ands(int size, Register dst, Register src1, Register src2, ShiftType shiftType, int shiftAmt) {
+        logicalRegInstruction(dst, src1, src2, shiftType, shiftAmt, generalFromSize(size), Instruction.ANDS);
+    }
+
+    /**
+     * dst = src1 & ~(shiftType(src2, imm)).
+     *
+     * @param size register size. Has to be 32 or 64.
+     * @param dst general purpose register. May not be null or stackpointer.
+     * @param src1 general purpose register. May not be null or stackpointer.
+     * @param src2 general purpose register. May not be null or stackpointer.
+     * @param shiftType all types allowed, may not be null.
+     * @param shiftAmt must be in range 0 to size - 1.
+     */
+    protected void bic(int size, Register dst, Register src1, Register src2, ShiftType shiftType, int shiftAmt) {
+        logicalRegInstruction(dst, src1, src2, shiftType, shiftAmt, generalFromSize(size), Instruction.BIC);
+    }
+
+    /**
+     * dst = src1 & ~(shiftType(src2, imm)) and sets condition flags.
+     *
+     * @param size register size. Has to be 32 or 64.
+     * @param dst general purpose register. May not be null or stackpointer.
+     * @param src1 general purpose register. May not be null or stackpointer.
+     * @param src2 general purpose register. May not be null or stackpointer.
+     * @param shiftType all types allowed, may not be null.
+     * @param shiftAmt must be in range 0 to size - 1.
+     */
+    protected void bics(int size, Register dst, Register src1, Register src2, ShiftType shiftType, int shiftAmt) {
+        logicalRegInstruction(dst, src1, src2, shiftType, shiftAmt, generalFromSize(size), Instruction.BICS);
+    }
+
+    /**
+     * dst = src1 ^ ~(shiftType(src2, imm)).
+     *
+     * @param size register size. Has to be 32 or 64.
+     * @param dst general purpose register. May not be null or stackpointer.
+     * @param src1 general purpose register. May not be null or stackpointer.
+     * @param src2 general purpose register. May not be null or stackpointer.
+     * @param shiftType all types allowed, may not be null.
+     * @param shiftAmt must be in range 0 to size - 1.
+     */
+    protected void eon(int size, Register dst, Register src1, Register src2, ShiftType shiftType, int shiftAmt) {
+        logicalRegInstruction(dst, src1, src2, shiftType, shiftAmt, generalFromSize(size), Instruction.EON);
+    }
+
+    /**
+     * dst = src1 ^ shiftType(src2, imm).
+     *
+     * @param size register size. Has to be 32 or 64.
+     * @param dst general purpose register. May not be null or stackpointer.
+     * @param src1 general purpose register. May not be null or stackpointer.
+     * @param src2 general purpose register. May not be null or stackpointer.
+     * @param shiftType all types allowed, may not be null.
+     * @param shiftAmt must be in range 0 to size - 1.
+     */
+    protected void eor(int size, Register dst, Register src1, Register src2, ShiftType shiftType, int shiftAmt) {
+        logicalRegInstruction(dst, src1, src2, shiftType, shiftAmt, generalFromSize(size), Instruction.EOR);
+    }
+
+    /**
+     * dst = src1 | shiftType(src2, imm).
+     *
+     * @param size register size. Has to be 32 or 64.
+     * @param dst general purpose register. May not be null or stackpointer.
+     * @param src1 general purpose register. May not be null or stackpointer.
+     * @param src2 general purpose register. May not be null or stackpointer.
+     * @param shiftType all types allowed, may not be null.
+     * @param shiftAmt must be in range 0 to size - 1.
+     */
+    protected void orr(int size, Register dst, Register src1, Register src2, ShiftType shiftType, int shiftAmt) {
+        logicalRegInstruction(dst, src1, src2, shiftType, shiftAmt, generalFromSize(size), Instruction.ORR);
+    }
+
+    /**
+     * dst = src1 | ~(shiftType(src2, imm)).
+     *
+     * @param size register size. Has to be 32 or 64.
+     * @param dst general purpose register. May not be null or stackpointer.
+     * @param src1 general purpose register. May not be null or stackpointer.
+     * @param src2 general purpose register. May not be null or stackpointer.
+     * @param shiftType all types allowed, may not be null.
+     * @param shiftAmt must be in range 0 to size - 1.
+     */
+    protected void orn(int size, Register dst, Register src1, Register src2, ShiftType shiftType, int shiftAmt) {
+        logicalRegInstruction(dst, src1, src2, shiftType, shiftAmt, generalFromSize(size), Instruction.ORN);
+    }
+
+    private void logicalRegInstruction(Register dst, Register src1, Register src2, ShiftType shiftType, int shiftAmt, InstructionType type, Instruction instr) {
+        assert !dst.equals(sp);
+        assert !src1.equals(sp);
+        assert !src2.equals(sp);
+        assert shiftAmt >= 0 && shiftAmt < type.width;
+        int instrEncoding = instr.encoding | LogicalShiftOp;
+        emitInt(type.encoding | instrEncoding | shiftAmt << ImmediateOffset | shiftType.encoding << ShiftTypeOffset | rd(dst) | rs1(src1) | rs2(src2));
+    }
+
+    /* Variable Shift (5.5.4) */
+    /**
+     * dst = src1 >> (src2 & log2(size)).
+     *
+     * @param size register size. Has to be 32 or 64.
+     * @param dst general purpose register. May not be null or stackpointer.
+     * @param src1 general purpose register. May not be null or stackpointer.
+     * @param src2 general purpose register. May not be null or stackpointer.
+     */
+    protected void asr(int size, Register dst, Register src1, Register src2) {
+        dataProcessing2SourceOp(dst, src1, src2, generalFromSize(size), Instruction.ASRV);
+    }
+
+    /**
+     * dst = src1 << (src2 & log2(size)).
+     *
+     * @param size register size. Has to be 32 or 64.
+     * @param dst general purpose register. May not be null or stackpointer.
+     * @param src1 general purpose register. May not be null or stackpointer.
+     * @param src2 general purpose register. May not be null or stackpointer.
+     */
+    protected void lsl(int size, Register dst, Register src1, Register src2) {
+        dataProcessing2SourceOp(dst, src1, src2, generalFromSize(size), Instruction.LSLV);
+    }
+
+    /**
+     * dst = src1 >>> (src2 & log2(size)).
+     *
+     * @param size register size. Has to be 32 or 64.
+     * @param dst general purpose register. May not be null or stackpointer.
+     * @param src1 general purpose register. May not be null or stackpointer.
+     * @param src2 general purpose register. May not be null or stackpointer.
+     */
+    protected void lsr(int size, Register dst, Register src1, Register src2) {
+        dataProcessing2SourceOp(dst, src1, src2, generalFromSize(size), Instruction.LSRV);
+    }
+
+    /**
+     * dst = rotateRight(src1, (src2 & log2(size))).
+     *
+     * @param size register size. Has to be 32 or 64.
+     * @param dst general purpose register. May not be null or stackpointer.
+     * @param src1 general purpose register. May not be null or stackpointer.
+     * @param src2 general purpose register. May not be null or stackpointer.
+     */
+    protected void ror(int size, Register dst, Register src1, Register src2) {
+        dataProcessing2SourceOp(dst, src1, src2, generalFromSize(size), Instruction.RORV);
+    }
+
+    /* Bit Operations (5.5.5) */
+
+    /**
+     * Counts leading sign bits. Sets Wd to the number of consecutive bits following the topmost bit
+     * in dst, that are the same as the topmost bit. The count does not include the topmost bit
+     * itself , so the result will be in the range 0 to size-1 inclusive.
+     *
+     * @param size register size. Has to be 32 or 64.
+     * @param dst general purpose register. May not be null, zero-register or the stackpointer.
+     * @param src source register. May not be null, zero-register or the stackpointer.
+     */
+    protected void cls(int size, Register dst, Register src) {
+        dataProcessing1SourceOp(dst, src, generalFromSize(size), Instruction.CLS);
+    }
+
+    /**
+     * Counts leading zeros.
+     *
+     * @param size register size. Has to be 32 or 64.
+     * @param dst general purpose register. May not be null, zero-register or the stackpointer.
+     * @param src source register. May not be null, zero-register or the stackpointer.
+     */
+    public void clz(int size, Register dst, Register src) {
+        dataProcessing1SourceOp(dst, src, generalFromSize(size), Instruction.CLZ);
+    }
+
+    /**
+     * Reverses bits.
+     *
+     * @param size register size. Has to be 32 or 64.
+     * @param dst general purpose register. May not be null, zero-register or the stackpointer.
+     * @param src source register. May not be null, zero-register or the stackpointer.
+     */
+    protected void rbit(int size, Register dst, Register src) {
+        dataProcessing1SourceOp(dst, src, generalFromSize(size), Instruction.RBIT);
+    }
+
+    /**
+     * Reverses bytes.
+     *
+     * @param size register size. Has to be 32 or 64.
+     * @param dst general purpose register. May not be null or the stackpointer.
+     * @param src source register. May not be null or the stackpointer.
+     */
+    public void rev(int size, Register dst, Register src) {
+        if (size == 64) {
+            dataProcessing1SourceOp(dst, src, generalFromSize(size), Instruction.REVX);
+        } else {
+            assert size == 32;
+            dataProcessing1SourceOp(dst, src, generalFromSize(size), Instruction.REVW);
+        }
+    }
+
+    /* Conditional Data Processing (5.5.6) */
+
+    /**
+     * Conditional select. dst = src1 if condition else src2.
+     *
+     * @param size register size. Has to be 32 or 64.
+     * @param dst general purpose register. May not be null or the stackpointer.
+     * @param src1 general purpose register. May not be null or the stackpointer.
+     * @param src2 general purpose register. May not be null or the stackpointer.
+     * @param condition any condition flag. May not be null.
+     */
+    protected void csel(int size, Register dst, Register src1, Register src2, ConditionFlag condition) {
+        conditionalSelectInstruction(dst, src1, src2, condition, generalFromSize(size), Instruction.CSEL);
+    }
+
+    /**
+     * Conditional select negate. dst = src1 if condition else -src2.
+     *
+     * @param size register size. Has to be 32 or 64.
+     * @param dst general purpose register. May not be null or the stackpointer.
+     * @param src1 general purpose register. May not be null or the stackpointer.
+     * @param src2 general purpose register. May not be null or the stackpointer.
+     * @param condition any condition flag. May not be null.
+     */
+    protected void csneg(int size, Register dst, Register src1, Register src2, ConditionFlag condition) {
+        conditionalSelectInstruction(dst, src1, src2, condition, generalFromSize(size), Instruction.CSNEG);
+    }
+
+    /**
+     * Conditional increase. dst = src1 if condition else src2 + 1.
+     *
+     * @param size register size. Has to be 32 or 64.
+     * @param dst general purpose register. May not be null or the stackpointer.
+     * @param src1 general purpose register. May not be null or the stackpointer.
+     * @param src2 general purpose register. May not be null or the stackpointer.
+     * @param condition any condition flag. May not be null.
+     */
+    protected void csinc(int size, Register dst, Register src1, Register src2, ConditionFlag condition) {
+        conditionalSelectInstruction(dst, src1, src2, condition, generalFromSize(size), Instruction.CSINC);
+    }
+
+    private void conditionalSelectInstruction(Register dst, Register src1, Register src2, ConditionFlag condition, InstructionType type, Instruction instr) {
+        assert !dst.equals(sp);
+        assert !src1.equals(sp);
+        assert !src2.equals(sp);
+        int instrEncoding = instr.encoding | ConditionalSelectOp;
+        emitInt(type.encoding | instrEncoding | rd(dst) | rs1(src1) | rs2(src2) | condition.encoding << ConditionalConditionOffset);
+    }
+
+    /* Integer Multiply/Divide (5.6) */
+
+    /**
+     * dst = src1 * src2 + src3.
+     *
+     * @param size register size. Has to be 32 or 64.
+     * @param dst general purpose register. May not be null or the stackpointer.
+     * @param src1 general purpose register. May not be null or the stackpointer.
+     * @param src2 general purpose register. May not be null or the stackpointer.
+     * @param src3 general purpose register. May not be null or the stackpointer.
+     */
+    protected void madd(int size, Register dst, Register src1, Register src2, Register src3) {
+        mulInstruction(dst, src1, src2, src3, generalFromSize(size), Instruction.MADD);
+    }
+
+    /**
+     * dst = src3 - src1 * src2.
+     *
+     * @param size register size. Has to be 32 or 64.
+     * @param dst general purpose register. May not be null or the stackpointer.
+     * @param src1 general purpose register. May not be null or the stackpointer.
+     * @param src2 general purpose register. May not be null or the stackpointer.
+     * @param src3 general purpose register. May not be null or the stackpointer.
+     */
+    protected void msub(int size, Register dst, Register src1, Register src2, Register src3) {
+        mulInstruction(dst, src1, src2, src3, generalFromSize(size), Instruction.MSUB);
+    }
+
+    /**
+     * Signed multiply high. dst = (src1 * src2)[127:64]
+     *
+     * @param dst general purpose register. May not be null or the stackpointer.
+     * @param src1 general purpose register. May not be null or the stackpointer.
+     * @param src2 general purpose register. May not be null or the stackpointer.
+     */
+    protected void smulh(Register dst, Register src1, Register src2) {
+        assert !dst.equals(sp);
+        assert !src1.equals(sp);
+        assert !src2.equals(sp);
+        emitInt(0b10011011010 << 21 | dst.encoding | src1.encoding << 5 | src2.encoding << 16 | 0b011111 << 10);
+    }
+
+    /**
+     * unsigned multiply high. dst = (src1 * src2)[127:64]
+     *
+     * @param dst general purpose register. May not be null or the stackpointer.
+     * @param src1 general purpose register. May not be null or the stackpointer.
+     * @param src2 general purpose register. May not be null or the stackpointer.
+     */
+    protected void umulh(Register dst, Register src1, Register src2) {
+        assert !dst.equals(sp);
+        assert !src1.equals(sp);
+        assert !src2.equals(sp);
+        emitInt(0b10011011110 << 21 | dst.encoding | src1.encoding << 5 | src2.encoding << 16 | 0b011111 << 10);
+    }
+
+    /**
+     * unsigned multiply add-long. xDst = xSrc3 + (wSrc1 * wSrc2)
+     *
+     * @param dst general purpose register. May not be null or the stackpointer.
+     * @param src1 general purpose register. May not be null or the stackpointer.
+     * @param src2 general purpose register. May not be null or the stackpointer.
+     * @param src3 general purpose register. May not be null or the stackpointer.
+     */
+    protected void umaddl(Register dst, Register src1, Register src2, Register src3) {
+        assert !dst.equals(sp);
+        assert !src1.equals(sp);
+        assert !src2.equals(sp);
+        assert !src3.equals(sp);
+        emitInt(0b10011011101 << 21 | dst.encoding | src1.encoding << 5 | src2.encoding << 16 | 0b011111 << 10);
+    }
+
+    /**
+     * signed multiply add-long. xDst = xSrc3 + (wSrc1 * wSrc2)
+     *
+     * @param dst general purpose register. May not be null or the stackpointer.
+     * @param src1 general purpose register. May not be null or the stackpointer.
+     * @param src2 general purpose register. May not be null or the stackpointer.
+     * @param src3 general purpose register. May not be null or the stackpointer.
+     */
+    protected void smaddl(Register dst, Register src1, Register src2, Register src3) {
+        assert !dst.equals(sp);
+        assert !src1.equals(sp);
+        assert !src2.equals(sp);
+        assert !src3.equals(sp);
+        emitInt(0b10011011001 << 21 | dst.encoding | src1.encoding << 5 | src2.encoding << 16 | src3.encoding << 10);
+    }
+
+    private void mulInstruction(Register dst, Register src1, Register src2, Register src3, InstructionType type, Instruction instr) {
+        assert !dst.equals(sp);
+        assert !src1.equals(sp);
+        assert !src2.equals(sp);
+        assert !src3.equals(sp);
+        int instrEncoding = instr.encoding | MulOp;
+        emitInt(type.encoding | instrEncoding | rd(dst) | rs1(src1) | rs2(src2) | rs3(src3));
+    }
+
+    /**
+     * Signed divide. dst = src1 / src2.
+     *
+     * @param size register size. Has to be 32 or 64.
+     * @param dst general purpose register. May not be null or the stackpointer.
+     * @param src1 general purpose register. May not be null or the stackpointer.
+     * @param src2 general purpose register. May not be null or the stackpointer.
+     */
+    public void sdiv(int size, Register dst, Register src1, Register src2) {
+        dataProcessing2SourceOp(dst, src1, src2, generalFromSize(size), Instruction.SDIV);
+    }
+
+    /**
+     * Unsigned divide. dst = src1 / src2.
+     *
+     * @param size register size. Has to be 32 or 64.
+     * @param dst general purpose register. May not be null or the stackpointer.
+     * @param src1 general purpose register. May not be null or the stackpointer.
+     * @param src2 general purpose register. May not be null or the stackpointer.
+     */
+    public void udiv(int size, Register dst, Register src1, Register src2) {
+        dataProcessing2SourceOp(dst, src1, src2, generalFromSize(size), Instruction.UDIV);
+    }
+
+    private void dataProcessing2SourceOp(Register dst, Register src1, Register src2, InstructionType type, Instruction instr) {
+        assert !dst.equals(sp);
+        assert !src1.equals(sp);
+        assert !src2.equals(sp);
+        int instrEncoding = instr.encoding | DataProcessing2SourceOp;
+        emitInt(type.encoding | instrEncoding | rd(dst) | rs1(src1) | rs2(src2));
+    }
+
+    private void dataProcessing1SourceOp(Register dst, Register src, InstructionType type, Instruction instr) {
+        int instrEncoding = instr.encoding | DataProcessing1SourceOp;
+        emitInt(type.encoding | instrEncoding | rd(dst) | rs1(src));
+    }
+
+    /* Floating point operations */
+
+    /* Load-Store Single FP register (5.7.1.1) */
+    /**
+     * Floating point load.
+     *
+     * @param size number of bits read from memory into rt. Must be 32 or 64.
+     * @param rt floating point register. May not be null.
+     * @param address all addressing modes allowed. May not be null.
+     */
+    public void fldr(int size, Register rt, AArch64Address address) {
+        assert rt.getRegisterCategory().equals(SIMD);
+        assert size == 32 || size == 64;
+        int transferSize = NumUtil.log2Ceil(size / 8);
+        loadStoreInstruction(rt, address, InstructionType.FP32, Instruction.LDR, transferSize);
+    }
+
+    /**
+     * Floating point store.
+     *
+     * @param size number of bits read from memory into rt. Must be 32 or 64.
+     * @param rt floating point register. May not be null.
+     * @param address all addressing modes allowed. May not be null.
+     */
+    public void fstr(int size, Register rt, AArch64Address address) {
+        assert rt.getRegisterCategory().equals(SIMD);
+        assert size == 32 || size == 64;
+        int transferSize = NumUtil.log2Ceil(size / 8);
+        loadStoreInstruction(rt, address, InstructionType.FP64, Instruction.STR, transferSize);
+    }
+
+    /* Floating-point Move (register) (5.7.2) */
+
+    /**
+     * Floating point move.
+     *
+     * @param size register size. Has to be 32 or 64.
+     * @param dst floating point register. May not be null.
+     * @param src floating point register. May not be null.
+     */
+    protected void fmov(int size, Register dst, Register src) {
+        fpDataProcessing1Source(dst, src, floatFromSize(size), Instruction.FMOV);
+    }
+
+    /**
+     * Move size bits from floating point register unchanged to general purpose register.
+     *
+     * @param size number of bits read from memory into rt. Must be 32 or 64.
+     * @param dst general purpose register. May not be null, stack-pointer or zero-register
+     * @param src floating point register. May not be null.
+     */
+    protected void fmovFpu2Cpu(int size, Register dst, Register src) {
+        assert dst.getRegisterCategory().equals(CPU);
+        assert src.getRegisterCategory().equals(SIMD);
+        fmovCpuFpuInstruction(dst, src, size == 64, Instruction.FMOVFPU2CPU);
+    }
+
+    /**
+     * Move size bits from general purpose register unchanged to floating point register.
+     *
+     * @param size register size. Has to be 32 or 64.
+     * @param dst floating point register. May not be null.
+     * @param src general purpose register. May not be null or stack-pointer.
+     */
+    protected void fmovCpu2Fpu(int size, Register dst, Register src) {
+        assert dst.getRegisterCategory().equals(SIMD);
+        assert src.getRegisterCategory().equals(CPU);
+        fmovCpuFpuInstruction(dst, src, size == 64, Instruction.FMOVCPU2FPU);
+    }
+
+    private void fmovCpuFpuInstruction(Register dst, Register src, boolean is64bit, Instruction instr) {
+        int instrEncoding = instr.encoding | FpConvertOp;
+        int sf = is64bit ? InstructionType.FP64.encoding | InstructionType.General64.encoding : InstructionType.FP32.encoding | InstructionType.General32.encoding;
+        emitInt(sf | instrEncoding | rd(dst) | rs1(src));
+    }
+
+    /* Floating-point Move (immediate) (5.7.3) */
+
+    /**
+     * Move immediate into register.
+     *
+     * @param size register size. Has to be 32 or 64.
+     * @param dst floating point register. May not be null.
+     * @param imm immediate that is loaded into dst. If size is 32 only float immediates can be
+     *            loaded, i.e. (float) imm == imm must be true. In all cases
+     *            {@code isFloatImmediate}, respectively {@code #isDoubleImmediate} must be true
+     *            depending on size.
+     */
+    protected void fmov(int size, Register dst, double imm) {
+        fmovImmInstruction(dst, imm, floatFromSize(size));
+    }
+
+    private void fmovImmInstruction(Register dst, double imm, InstructionType type) {
+        assert dst.getRegisterCategory().equals(SIMD);
+        int immEncoding;
+        if (type == InstructionType.FP64) {
+            immEncoding = getDoubleImmediate(imm);
+        } else {
+            assert imm == (float) imm : "float mov must use an immediate that can be represented using a float.";
+            immEncoding = getFloatImmediate((float) imm);
+        }
+        int instrEncoding = Instruction.FMOV.encoding | FpImmOp;
+        emitInt(type.encoding | instrEncoding | immEncoding | rd(dst));
+    }
+
+    private static int getDoubleImmediate(double imm) {
+        assert isDoubleImmediate(imm);
+        // bits: aBbb.bbbb.bbcd.efgh.0000.0000.0000.0000
+        // 0000.0000.0000.0000.0000.0000.0000.0000
+        long repr = Double.doubleToRawLongBits(imm);
+        int a = (int) (repr >>> 63) << 7;
+        int b = (int) ((repr >>> 61) & 0x1) << 6;
+        int cToH = (int) (repr >>> 48) & 0x3f;
+        return (a | b | cToH) << FpImmOffset;
+    }
+
+    protected static boolean isDoubleImmediate(double imm) {
+        // Valid values will have the form:
+        // aBbb.bbbb.bbcd.efgh.0000.0000.0000.0000
+        // 0000.0000.0000.0000.0000.0000.0000.0000
+        long bits = Double.doubleToRawLongBits(imm);
+        // lower 48 bits are cleared
+        if ((bits & NumUtil.getNbitNumberLong(48)) != 0) {
+            return false;
+        }
+        // bits[61..54] are all set or all cleared.
+        long pattern = (bits >> 54) & NumUtil.getNbitNumberLong(7);
+        if (pattern != 0 && pattern != NumUtil.getNbitNumberLong(7)) {
+            return false;
+        }
+        // bits[62] and bits[61] are opposites.
+        return ((bits ^ (bits << 1)) & (1L << 62)) != 0;
+    }
+
+    private static int getFloatImmediate(float imm) {
+        assert isFloatImmediate(imm);
+        // bits: aBbb.bbbc.defg.h000.0000.0000.0000.0000
+        int repr = Float.floatToRawIntBits(imm);
+        int a = (repr >>> 31) << 7;
+        int b = ((repr >>> 29) & 0x1) << 6;
+        int cToH = (repr >>> 19) & NumUtil.getNbitNumberInt(6);
+        return (a | b | cToH) << FpImmOffset;
+    }
+
+    protected static boolean isFloatImmediate(float imm) {
+        // Valid values will have the form:
+        // aBbb.bbbc.defg.h000.0000.0000.0000.0000
+        int bits = Float.floatToRawIntBits(imm);
+        // lower 20 bits are cleared.
+        if ((bits & NumUtil.getNbitNumberInt(19)) != 0) {
+            return false;
+        }
+        // bits[29..25] are all set or all cleared
+        int pattern = (bits >> 25) & NumUtil.getNbitNumberInt(5);
+        if (pattern != 0 && pattern != NumUtil.getNbitNumberInt(5)) {
+            return false;
+        }
+        // bits[29] and bits[30] have to be opposite
+        return ((bits ^ (bits << 1)) & (1 << 30)) != 0;
+    }
+
+    /* Convert Floating-point Precision (5.7.4.1) */
+    /* Converts float to double and vice-versa */
+
+    /**
+     * Convert float to double and vice-versa.
+     *
+     * @param srcSize size of source register in bits.
+     * @param dst floating point register. May not be null.
+     * @param src floating point register. May not be null.
+     */
+    public void fcvt(int srcSize, Register dst, Register src) {
+        if (srcSize == 32) {
+            fpDataProcessing1Source(dst, src, floatFromSize(srcSize), Instruction.FCVTDS);
+        } else {
+            fpDataProcessing1Source(dst, src, floatFromSize(srcSize), Instruction.FCVTSD);
+        }
+    }
+
+    /* Convert to Integer (5.7.4.2) */
+
+    /**
+     * Convert floating point to integer. Rounds towards zero.
+     *
+     * @param targetSize size of integer register. 32 or 64.
+     * @param srcSize size of floating point register. 32 or 64.
+     * @param dst general purpose register. May not be null, the zero-register or the stackpointer.
+     * @param src floating point register. May not be null.
+     */
+    public void fcvtzs(int targetSize, int srcSize, Register dst, Register src) {
+        assert !dst.equals(zr) && !dst.equals(sp);
+        assert src.getRegisterCategory().equals(SIMD);
+        fcvtCpuFpuInstruction(dst, src, generalFromSize(targetSize), floatFromSize(srcSize), Instruction.FCVTZS);
+    }
+
+    /* Convert from Integer (5.7.4.2) */
+    /**
+     * Converts integer to floating point. Uses rounding mode defined by FCPR.
+     *
+     * @param targetSize size of floating point register. 32 or 64.
+     * @param srcSize size of integer register. 32 or 64.
+     * @param dst floating point register. May not be null.
+     * @param src general purpose register. May not be null or the stackpointer.
+     */
+    public void scvtf(int targetSize, int srcSize, Register dst, Register src) {
+        assert dst.getRegisterCategory().equals(SIMD);
+        assert !src.equals(sp);
+        fcvtCpuFpuInstruction(dst, src, floatFromSize(targetSize), generalFromSize(srcSize), Instruction.SCVTF);
+    }
+
+    private void fcvtCpuFpuInstruction(Register dst, Register src, InstructionType type1, InstructionType type2, Instruction instr) {
+        int instrEncoding = instr.encoding | FpConvertOp;
+        emitInt(type1.encoding | type2.encoding | instrEncoding | rd(dst) | rs1(src));
+    }
+
+    /* Floating-point Round to Integral (5.7.5) */
+
+    /**
+     * Rounds floating-point to integral. Rounds towards zero.
+     *
+     * @param size register size.
+     * @param dst floating point register. May not be null.
+     * @param src floating point register. May not be null.
+     */
+    protected void frintz(int size, Register dst, Register src) {
+        fpDataProcessing1Source(dst, src, floatFromSize(size), Instruction.FRINTZ);
+    }
+
+    /* Floating-point Arithmetic (1 source) (5.7.6) */
+
+    /**
+     * dst = |src|.
+     *
+     * @param size register size.
+     * @param dst floating point register. May not be null.
+     * @param src floating point register. May not be null.
+     */
+    public void fabs(int size, Register dst, Register src) {
+        fpDataProcessing1Source(dst, src, floatFromSize(size), Instruction.FABS);
+    }
+
+    /**
+     * dst = -neg.
+     *
+     * @param size register size.
+     * @param dst floating point register. May not be null.
+     * @param src floating point register. May not be null.
+     */
+    public void fneg(int size, Register dst, Register src) {
+        fpDataProcessing1Source(dst, src, floatFromSize(size), Instruction.FNEG);
+    }
+
+    /**
+     * dst = Sqrt(src).
+     *
+     * @param size register size.
+     * @param dst floating point register. May not be null.
+     * @param src floating point register. May not be null.
+     */
+    public void fsqrt(int size, Register dst, Register src) {
+        fpDataProcessing1Source(dst, src, floatFromSize(size), Instruction.FSQRT);
+    }
+
+    private void fpDataProcessing1Source(Register dst, Register src, InstructionType type, Instruction instr) {
+        assert dst.getRegisterCategory().equals(SIMD);
+        assert src.getRegisterCategory().equals(SIMD);
+        int instrEncoding = instr.encoding | Fp1SourceOp;
+        emitInt(type.encoding | instrEncoding | rd(dst) | rs1(src));
+    }
+
+    /* Floating-point Arithmetic (2 source) (5.7.7) */
+
+    /**
+     * dst = src1 + src2.
+     *
+     * @param size register size.
+     * @param dst floating point register. May not be null.
+     * @param src1 floating point register. May not be null.
+     * @param src2 floating point register. May not be null.
+     */
+    public void fadd(int size, Register dst, Register src1, Register src2) {
+        fpDataProcessing2Source(dst, src1, src2, floatFromSize(size), Instruction.FADD);
+    }
+
+    /**
+     * dst = src1 - src2.
+     *
+     * @param size register size.
+     * @param dst floating point register. May not be null.
+     * @param src1 floating point register. May not be null.
+     * @param src2 floating point register. May not be null.
+     */
+    public void fsub(int size, Register dst, Register src1, Register src2) {
+        fpDataProcessing2Source(dst, src1, src2, floatFromSize(size), Instruction.FSUB);
+    }
+
+    /**
+     * dst = src1 * src2.
+     *
+     * @param size register size.
+     * @param dst floating point register. May not be null.
+     * @param src1 floating point register. May not be null.
+     * @param src2 floating point register. May not be null.
+     */
+    public void fmul(int size, Register dst, Register src1, Register src2) {
+        fpDataProcessing2Source(dst, src1, src2, floatFromSize(size), Instruction.FMUL);
+    }
+
+    /**
+     * dst = src1 / src2.
+     *
+     * @param size register size.
+     * @param dst floating point register. May not be null.
+     * @param src1 floating point register. May not be null.
+     * @param src2 floating point register. May not be null.
+     */
+    public void fdiv(int size, Register dst, Register src1, Register src2) {
+        fpDataProcessing2Source(dst, src1, src2, floatFromSize(size), Instruction.FDIV);
+    }
+
+    private void fpDataProcessing2Source(Register dst, Register src1, Register src2, InstructionType type, Instruction instr) {
+        assert dst.getRegisterCategory().equals(SIMD);
+        assert src1.getRegisterCategory().equals(SIMD);
+        assert src2.getRegisterCategory().equals(SIMD);
+        int instrEncoding = instr.encoding | Fp2SourceOp;
+        emitInt(type.encoding | instrEncoding | rd(dst) | rs1(src1) | rs2(src2));
+    }
+
+    /* Floating-point Multiply-Add (5.7.9) */
+
+    /**
+     * dst = src1 * src2 + src3.
+     *
+     * @param size register size.
+     * @param dst floating point register. May not be null.
+     * @param src1 floating point register. May not be null.
+     * @param src2 floating point register. May not be null.
+     * @param src3 floating point register. May not be null.
+     */
+    protected void fmadd(int size, Register dst, Register src1, Register src2, Register src3) {
+        fpDataProcessing3Source(dst, src1, src2, src3, floatFromSize(size), Instruction.FMADD);
+    }
+
+    /**
+     * dst = src3 - src1 * src2.
+     *
+     * @param size register size.
+     * @param dst floating point register. May not be null.
+     * @param src1 floating point register. May not be null.
+     * @param src2 floating point register. May not be null.
+     * @param src3 floating point register. May not be null.
+     */
+    protected void fmsub(int size, Register dst, Register src1, Register src2, Register src3) {
+        fpDataProcessing3Source(dst, src1, src2, src3, floatFromSize(size), Instruction.FMSUB);
+    }
+
+    private void fpDataProcessing3Source(Register dst, Register src1, Register src2, Register src3, InstructionType type, Instruction instr) {
+        assert dst.getRegisterCategory().equals(SIMD);
+        assert src1.getRegisterCategory().equals(SIMD);
+        assert src2.getRegisterCategory().equals(SIMD);
+        assert src3.getRegisterCategory().equals(SIMD);
+        int instrEncoding = instr.encoding | Fp3SourceOp;
+        emitInt(type.encoding | instrEncoding | rd(dst) | rs1(src1) | rs2(src2) | rs3(src3));
+    }
+
+    /* Floating-point Comparison (5.7.10) */
+
+    /**
+     * Compares src1 to src2.
+     *
+     * @param size register size.
+     * @param src1 floating point register. May not be null.
+     * @param src2 floating point register. May not be null.
+     */
+    public void fcmp(int size, Register src1, Register src2) {
+        fcmpInstruction(src1, src2, floatFromSize(size));
+    }
+
+    private void fcmpInstruction(Register src1, Register src2, InstructionType type) {
+        assert src1.getRegisterCategory().equals(SIMD);
+        assert src2.getRegisterCategory().equals(SIMD);
+        int instrEncoding = Instruction.FCMP.encoding | FpCmpOp;
+        emitInt(type.encoding | instrEncoding | rs1(src1) | rs2(src2));
+    }
+
+    /**
+     * Conditional compare. NZCV = fcmp(src1, src2) if condition else uimm4.
+     *
+     * @param size register size.
+     * @param src1 floating point register. May not be null.
+     * @param src2 floating point register. May not be null.
+     * @param uimm4 condition flags that are used if condition is false.
+     * @param condition every condition allowed. May not be null.
+     */
+    public void fccmp(int size, Register src1, Register src2, int uimm4, ConditionFlag condition) {
+        fConditionalCompareInstruction(src1, src2, uimm4, condition, floatFromSize(size));
+    }
+
+    private void fConditionalCompareInstruction(Register src1, Register src2, int uimm4, ConditionFlag condition, InstructionType type) {
+        assert NumUtil.isUnsignedNbit(4, uimm4);
+        assert src1.getRegisterCategory().equals(SIMD);
+        assert src2.getRegisterCategory().equals(SIMD);
+        emitInt(type.encoding | Instruction.FCCMP.encoding | uimm4 | condition.encoding << ConditionalConditionOffset | rs1(src1) | rs2(src2));
+    }
+
+    /**
+     * Compare register to 0.0 .
+     *
+     * @param size register size.
+     * @param src floating point register. May not be null.
+     */
+    public void fcmpZero(int size, Register src) {
+        fcmpZeroInstruction(src, floatFromSize(size));
+    }
+
+    private void fcmpZeroInstruction(Register src, InstructionType type) {
+        assert src.getRegisterCategory().equals(SIMD);
+        int instrEncoding = Instruction.FCMPZERO.encoding | FpCmpOp;
+        emitInt(type.encoding | instrEncoding | rs1(src));
+    }
+
+    /* Floating-point Conditional Select (5.7.11) */
+
+    /**
+     * Conditional select. dst = src1 if condition else src2.
+     *
+     * @param size register size.
+     * @param dst floating point register. May not be null.
+     * @param src1 floating point register. May not be null.
+     * @param src2 floating point register. May not be null.
+     * @param condition every condition allowed. May not be null.
+     */
+    protected void fcsel(int size, Register dst, Register src1, Register src2, ConditionFlag condition) {
+        fConditionalSelect(dst, src1, src2, condition, floatFromSize(size));
+    }
+
+    private void fConditionalSelect(Register dst, Register src1, Register src2, ConditionFlag condition, InstructionType type) {
+        assert dst.getRegisterCategory().equals(SIMD);
+        assert src1.getRegisterCategory().equals(SIMD);
+        assert src2.getRegisterCategory().equals(SIMD);
+        emitInt(type.encoding | Instruction.FCSEL.encoding | rd(dst) | rs1(src1) | rs2(src2) | condition.encoding << ConditionalConditionOffset);
+    }
+
+    /* Debug exceptions (5.9.1.2) */
+
+    /**
+     * Halting mode software breakpoint: Enters halting mode debug state if enabled, else treated as
+     * UNALLOCATED instruction.
+     *
+     * @param uimm16 Arbitrary 16-bit unsigned payload.
+     */
+    protected void hlt(int uimm16) {
+        exceptionInstruction(uimm16, Instruction.HLT);
+    }
+
+    /**
+     * Monitor mode software breakpoint: exception routed to a debug monitor executing in a higher
+     * exception level.
+     *
+     * @param uimm16 Arbitrary 16-bit unsigned payload.
+     */
+    protected void brk(int uimm16) {
+        exceptionInstruction(uimm16, Instruction.BRK);
+    }
+
+    /* Architectural hints (5.9.4) */
+    public enum SystemHint {
+        NOP(0x0),
+        YIELD(0x1),
+        WFE(0x2),
+        WFI(0x3),
+        SEV(0x4),
+        SEVL(0x5);
+
+        private final int encoding;
+
+        private SystemHint(int encoding) {
+            this.encoding = encoding;
+        }
+    }
+
+    /**
+     * Architectural hints.
+     *
+     * @param hint Can be any of the defined hints. May not be null.
+     */
+    protected void hint(SystemHint hint) {
+        emitInt(Instruction.HINT.encoding | hint.encoding << SystemImmediateOffset);
+    }
+
+    private void exceptionInstruction(int uimm16, Instruction instr) {
+        assert NumUtil.isUnsignedNbit(16, uimm16);
+        int instrEncoding = instr.encoding | ExceptionOp;
+        emitInt(instrEncoding | uimm16 << SystemImmediateOffset);
+    }
+
+    /**
+     * Clear Exclusive: clears the local record of the executing processor that an address has had a
+     * request for an exclusive access.
+     */
+    protected void clrex() {
+        emitInt(Instruction.CLREX.encoding);
+    }
+
+    /**
+     * Possible barrier definitions for Aarch64. LOAD_LOAD and LOAD_STORE map to the same underlying
+     * barrier.
+     *
+     * We only need synchronization across the inner shareable domain (see B2-90 in the Reference
+     * documentation).
+     */
+    public enum BarrierKind {
+        LOAD_LOAD(0x9, "ISHLD"),
+        LOAD_STORE(0x9, "ISHLD"),
+        STORE_STORE(0xA, "ISHST"),
+        ANY_ANY(0xB, "ISH");
+
+        public final int encoding;
+        public final String optionName;
+
+        private BarrierKind(int encoding, String optionName) {
+            this.encoding = encoding;
+            this.optionName = optionName;
+        }
+    }
+
+    /**
+     * Data Memory Barrier.
+     *
+     * @param barrierKind barrier that is issued. May not be null.
+     */
+    public void dmb(BarrierKind barrierKind) {
+        barrierInstruction(barrierKind, Instruction.DMB);
+    }
+
+    private void barrierInstruction(BarrierKind barrierKind, Instruction instr) {
+        int instrEncoding = instr.encoding | BarrierOp;
+        emitInt(instrEncoding | barrierKind.encoding << BarrierKindOffset);
+    }
+
+    // Artificial instructions for simulator. These instructions are illegal in the normal aarch64
+    // ISA,
+    // but have special meaning for the simulator
+
+    /**
+     * Branch and link register instruction with the target code being native, i.e. not aarch64.
+     *
+     * The simulator has to do extra work so needs to know the number of arguments (both gp and fp)
+     * as well as the type of the return value. See assembler_aarch64.hpp.
+     *
+     * @param reg general purpose register. May not be null, zero-register or stackpointer. Contains
+     *            address of target method.
+     * @param gpArgs number of general purpose arguments passed to the function. 4-bit unsigned.
+     * @param fpArgs number of floating point arguments passed to the function. 4-bit unsigned.
+     * @param returnType returnType of function. May not be null, or Kind.ILLEGAL.
+     */
+    public void blrNative(Register reg, int gpArgs, int fpArgs, JavaKind returnType) {
+        assert reg.getRegisterCategory().equals(CPU) && NumUtil.isUnsignedNbit(4, gpArgs) && NumUtil.isUnsignedNbit(4, fpArgs) && returnType != null;
+        emitInt(Instruction.BLR_NATIVE.encoding | reg.encoding | getReturnTypeEncoding(returnType) << 5 | fpArgs << 7 | gpArgs << 11);
+    }
+
+    private static int getReturnTypeEncoding(JavaKind returnType) {
+        // See assembler_aarch64.hpp for encoding details
+        switch (returnType) {
+            case Boolean:
+            case Byte:
+            case Short:
+            case Char:
+            case Int:
+            case Long:
+            case Object:
+                return 1;
+            case Float:
+                return 2;
+            case Double:
+                return 3;
+            case Void:
+            case Illegal:
+                // Void functions use a result of Kind.Illegal apparently
+                return 0;
+            default:
+                throw JVMCIError.shouldNotReachHere("Illegal kind");
+        }
+    }
+
+    /* Helper functions */
+    private static int rd(Register reg) {
+        return reg.encoding << RdOffset;
+    }
+
+    private static int rs1(Register reg) {
+        return reg.encoding << Rs1Offset;
+    }
+
+    private static int rs2(Register reg) {
+        return reg.encoding << Rs2Offset;
+    }
+
+    private static int rs3(Register reg) {
+        return reg.encoding << Rs3Offset;
+    }
+
+    private static int rt(Register reg) {
+        return reg.encoding << RtOffset;
+    }
+
+}
--- /dev/null	Thu Jan 01 00:00:00 1970 +0000
+++ b/graal/com.oracle.graal.asm.aarch64/src/com/oracle/graal/asm/aarch64/AArch64MacroAssembler.java	Thu Dec 24 11:43:35 2015 -1000
@@ -0,0 +1,1366 @@
+/*
+ * Copyright (c) 2013, 2015, Oracle and/or its affiliates. All rights reserved.
+ * DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER.
+ *
+ * This code is free software; you can redistribute it and/or modify it
+ * under the terms of the GNU General Public License version 2 only, as
+ * published by the Free Software Foundation.
+ *
+ * This code is distributed in the hope that it will be useful, but WITHOUT
+ * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
+ * FITNESS FOR A PARTICULAR PURPOSE.  See the GNU General Public License
+ * version 2 for more details (a copy is included in the LICENSE file that
+ * accompanied this code).
+ *
+ * You should have received a copy of the GNU General Public License version
+ * 2 along with this work; if not, write to the Free Software Foundation,
+ * Inc., 51 Franklin St, Fifth Floor, Boston, MA 02110-1301 USA.
+ *
+ * Please contact Oracle, 500 Oracle Parkway, Redwood Shores, CA 94065 USA
+ * or visit www.oracle.com if you need additional information or have any
+ * questions.
+ */
+
+package com.oracle.graal.asm.aarch64;
+
+import static com.oracle.graal.asm.aarch64.AArch64Address.AddressingMode.BASE_REGISTER_ONLY;
+import static com.oracle.graal.asm.aarch64.AArch64Address.AddressingMode.EXTENDED_REGISTER_OFFSET;
+import static com.oracle.graal.asm.aarch64.AArch64Address.AddressingMode.IMMEDIATE_SCALED;
+import static com.oracle.graal.asm.aarch64.AArch64Address.AddressingMode.IMMEDIATE_UNSCALED;
+import static com.oracle.graal.asm.aarch64.AArch64Address.AddressingMode.REGISTER_OFFSET;
+import static com.oracle.graal.asm.aarch64.AArch64MacroAssembler.AddressGenerationPlan.WorkPlan.ADD_TO_BASE;
+import static com.oracle.graal.asm.aarch64.AArch64MacroAssembler.AddressGenerationPlan.WorkPlan.ADD_TO_INDEX;
+import static com.oracle.graal.asm.aarch64.AArch64MacroAssembler.AddressGenerationPlan.WorkPlan.NO_WORK;
+import static jdk.vm.ci.aarch64.AArch64.CPU;
+import static jdk.vm.ci.aarch64.AArch64.r8;
+import static jdk.vm.ci.aarch64.AArch64.r9;
+import static jdk.vm.ci.aarch64.AArch64.sp;
+import static jdk.vm.ci.aarch64.AArch64.zr;
+
+import com.oracle.graal.asm.AbstractAddress;
+import com.oracle.graal.asm.Label;
+import com.oracle.graal.asm.NumUtil;
+
+import jdk.vm.ci.aarch64.AArch64;
+import jdk.vm.ci.code.Register;
+import jdk.vm.ci.code.TargetDescription;
+import jdk.vm.ci.common.JVMCIError;
+
+public class AArch64MacroAssembler extends AArch64Assembler {
+
+    private final ScratchRegister[] scratchRegister = new ScratchRegister[]{new ScratchRegister(r8), new ScratchRegister(r9)};
+
+    // Points to the next free scratch register
+    private int nextFreeScratchRegister = 0;
+
+    public AArch64MacroAssembler(TargetDescription target) {
+        super(target);
+    }
+
+    public class ScratchRegister implements AutoCloseable {
+        private final Register register;
+
+        public ScratchRegister(Register register) {
+            this.register = register;
+        }
+
+        public Register getRegister() {
+            return register;
+        }
+
+        public void close() {
+            assert nextFreeScratchRegister > 0 : "Close called too often";
+            nextFreeScratchRegister--;
+        }
+    }
+
+    public ScratchRegister getScratchRegister() {
+        return scratchRegister[nextFreeScratchRegister++];
+    }
+
+    /**
+     * Specifies what actions have to be taken to turn an arbitrary address of the form
+     * {@code base + displacement [+ index [<< scale]]} into a valid AArch64Address.
+     */
+    public static class AddressGenerationPlan {
+        public final WorkPlan workPlan;
+        public final AArch64Address.AddressingMode addressingMode;
+        public final boolean needsScratch;
+
+        public enum WorkPlan {
+            /**
+             * Can be used as-is without extra work.
+             */
+            NO_WORK,
+            /**
+             * Add scaled displacement to index register.
+             */
+            ADD_TO_INDEX,
+            /**
+             * Add unscaled displacement to base register.
+             */
+            ADD_TO_BASE,
+        }
+
+        /**
+         * @param workPlan Work necessary to generate a valid address.
+         * @param addressingMode Addressing mode of generated address.
+         * @param needsScratch True if generating address needs a scatch register, false otherwise.
+         */
+        public AddressGenerationPlan(WorkPlan workPlan, AArch64Address.AddressingMode addressingMode, boolean needsScratch) {
+            this.workPlan = workPlan;
+            this.addressingMode = addressingMode;
+            this.needsScratch = needsScratch;
+        }
+    }
+
+    /**
+     * Generates an addressplan for an address of the form
+     * {@code base + displacement [+ index [<< log2(transferSize)]]} with the index register and
+     * scaling being optional.
+     *
+     * @param displacement an arbitrary displacement.
+     * @param hasIndexRegister true if the address uses an index register, false otherwise. non null
+     * @param transferSize the memory transfer size in bytes. The log2 of this specifies how much
+     *            the index register is scaled. If 0 no scaling is assumed. Can be 0, 1, 2, 4 or 8.
+     * @return AddressGenerationPlan that specifies the actions necessary to generate a valid
+     *         AArch64Address for the given parameters.
+     */
+    public static AddressGenerationPlan generateAddressPlan(long displacement, boolean hasIndexRegister, int transferSize) {
+        assert transferSize == 0 || transferSize == 1 || transferSize == 2 || transferSize == 4 || transferSize == 8;
+        boolean indexScaled = transferSize != 0;
+        int log2Scale = NumUtil.log2Ceil(transferSize);
+        long scaledDisplacement = displacement >> log2Scale;
+        boolean displacementScalable = indexScaled && (displacement & (transferSize - 1)) == 0;
+        if (displacement == 0) {
+            // register offset without any work beforehand.
+            return new AddressGenerationPlan(NO_WORK, REGISTER_OFFSET, false);
+        } else {
+            if (hasIndexRegister) {
+                if (displacementScalable) {
+                    boolean needsScratch = !isArithmeticImmediate(scaledDisplacement);
+                    return new AddressGenerationPlan(ADD_TO_INDEX, REGISTER_OFFSET, needsScratch);
+                } else {
+                    boolean needsScratch = !isArithmeticImmediate(displacement);
+                    return new AddressGenerationPlan(ADD_TO_BASE, REGISTER_OFFSET, needsScratch);
+                }
+            } else {
+                if (NumUtil.isSignedNbit(9, displacement)) {
+                    return new AddressGenerationPlan(NO_WORK, IMMEDIATE_UNSCALED, false);
+                } else if (displacementScalable && NumUtil.isUnsignedNbit(12, scaledDisplacement)) {
+                    return new AddressGenerationPlan(NO_WORK, IMMEDIATE_SCALED, false);
+                } else {
+                    boolean needsScratch = !isArithmeticImmediate(displacement);
+                    return new AddressGenerationPlan(ADD_TO_BASE, REGISTER_OFFSET, needsScratch);
+                }
+            }
+        }
+    }
+
+    /**
+     * Returns an AArch64Address pointing to
+     * {@code base + displacement + index << log2(transferSize)}.
+     *
+     * @param base general purpose register. May not be null or the zero register.
+     * @param displacement arbitrary displacement added to base.
+     * @param index general purpose register. May not be null or the stack pointer.
+     * @param signExtendIndex if true consider index register a word register that should be
+     *            sign-extended before being added.
+     * @param transferSize the memory transfer size in bytes. The log2 of this specifies how much
+     *            the index register is scaled. If 0 no scaling is assumed. Can be 0, 1, 2, 4 or 8.
+     * @param additionalReg additional register used either as a scratch register or as part of the
+     *            final address, depending on whether allowOverwrite is true or not. May not be null
+     *            or stackpointer.
+     * @param allowOverwrite if true allows to change value of base or index register to generate
+     *            address.
+     * @return AArch64Address pointing to memory at
+     *         {@code base + displacement + index << log2(transferSize)}.
+     */
+    public AArch64Address makeAddress(Register base, long displacement, Register index, boolean signExtendIndex, int transferSize, Register additionalReg, boolean allowOverwrite) {
+        AddressGenerationPlan plan = generateAddressPlan(displacement, !index.equals(zr), transferSize);
+        assert allowOverwrite || !zr.equals(additionalReg) || plan.workPlan == NO_WORK;
+        assert !plan.needsScratch || !zr.equals(additionalReg);
+        int log2Scale = NumUtil.log2Ceil(transferSize);
+        long scaledDisplacement = displacement >> log2Scale;
+        Register newIndex = index;
+        Register newBase = base;
+        int immediate;
+        switch (plan.workPlan) {
+            case NO_WORK:
+                if (plan.addressingMode == IMMEDIATE_SCALED) {
+                    immediate = (int) scaledDisplacement;
+                } else {
+                    immediate = (int) displacement;
+                }
+                break;
+            case ADD_TO_INDEX:
+                newIndex = allowOverwrite ? index : additionalReg;
+                if (plan.needsScratch) {
+                    mov(additionalReg, scaledDisplacement);
+                    add(signExtendIndex ? 32 : 64, newIndex, index, additionalReg);
+                } else {
+                    add(signExtendIndex ? 32 : 64, newIndex, index, (int) scaledDisplacement);
+                }
+                immediate = 0;
+                break;
+            case ADD_TO_BASE:
+                newBase = allowOverwrite ? base : additionalReg;
+                if (plan.needsScratch) {
+                    mov(additionalReg, displacement);
+                    add(64, newBase, base, additionalReg);
+                } else {
+                    add(64, newBase, base, (int) displacement);
+                }
+                immediate = 0;
+                break;
+            default:
+                throw JVMCIError.shouldNotReachHere();
+        }
+        AArch64Address.AddressingMode addressingMode = plan.addressingMode;
+        ExtendType extendType = null;
+        if (addressingMode == REGISTER_OFFSET) {
+            if (newIndex.equals(zr)) {
+                addressingMode = BASE_REGISTER_ONLY;
+            } else if (signExtendIndex) {
+                addressingMode = EXTENDED_REGISTER_OFFSET;
+                extendType = ExtendType.SXTW;
+            }
+        }
+        return AArch64Address.createAddress(addressingMode, newBase, newIndex, immediate, transferSize != 0, extendType);
+    }
+
+    /**
+     * Returns an AArch64Address pointing to {@code base + displacement}. Specifies the memory
+     * transfer size to allow some optimizations when building the address.
+     *
+     * @param base general purpose register. May not be null or the zero register.
+     * @param displacement arbitrary displacement added to base.
+     * @param transferSize the memory transfer size in bytes.
+     * @param additionalReg additional register used either as a scratch register or as part of the
+     *            final address, depending on whether allowOverwrite is true or not. May not be
+     *            null, zero register or stackpointer.
+     * @param allowOverwrite if true allows to change value of base or index register to generate
+     *            address.
+     * @return AArch64Address pointing to memory at {@code base + displacement}.
+     */
+    public AArch64Address makeAddress(Register base, long displacement, Register additionalReg, int transferSize, boolean allowOverwrite) {
+        assert additionalReg.getRegisterCategory().equals(CPU);
+        return makeAddress(base, displacement, zr, /* sign-extend */false, transferSize, additionalReg, allowOverwrite);
+    }
+
+    /**
+     * Returns an AArch64Address pointing to {@code base + displacement}. Fails if address cannot be
+     * represented without overwriting base register or using a scratch register.
+     *
+     * @param base general purpose register. May not be null or the zero register.
+     * @param displacement arbitrary displacement added to base.
+     * @param transferSize the memory transfer size in bytes. The log2 of this specifies how much
+     *            the index register is scaled. If 0 no scaling is assumed. Can be 0, 1, 2, 4 or 8.
+     * @return AArch64Address pointing to memory at {@code base + displacement}.
+     */
+    public AArch64Address makeAddress(Register base, long displacement, int transferSize) {
+        return makeAddress(base, displacement, zr, /* signExtend */false, transferSize, zr, /* allowOverwrite */false);
+    }
+
+    /**
+     * Loads memory address into register.
+     *
+     * @param dst general purpose register. May not be null, zero-register or stackpointer.
+     * @param address address whose value is loaded into dst. May not be null,
+     *            {@link com.oracle.graal.asm.aarch64.AArch64Address.AddressingMode#IMMEDIATE_POST_INDEXED
+     *            POST_INDEXED} or
+     *            {@link com.oracle.graal.asm.aarch64.AArch64Address.AddressingMode#IMMEDIATE_PRE_INDEXED
+     *            IMMEDIATE_PRE_INDEXED}
+     * @param transferSize the memory transfer size in bytes. The log2 of this specifies how much
+     *            the index register is scaled. Can be 1, 2, 4 or 8.
+     */
+    public void loadAddress(Register dst, AArch64Address address, int transferSize) {
+        assert transferSize == 1 || transferSize == 2 || transferSize == 4 || transferSize == 8;
+        assert dst.getRegisterCategory().equals(CPU);
+        int shiftAmt = NumUtil.log2Ceil(transferSize);
+        switch (address.getAddressingMode()) {
+            case IMMEDIATE_SCALED:
+                int scaledImmediate = address.getImmediateRaw() << shiftAmt;
+                int lowerBits = scaledImmediate & NumUtil.getNbitNumberInt(12);
+                int higherBits = scaledImmediate & ~NumUtil.getNbitNumberInt(12);
+                boolean firstAdd = true;
+                if (lowerBits != 0) {
+                    add(64, dst, address.getBase(), lowerBits);
+                    firstAdd = false;
+                }
+                if (higherBits != 0) {
+                    Register src = firstAdd ? address.getBase() : dst;
+                    add(64, dst, src, higherBits);
+                }
+                break;
+            case IMMEDIATE_UNSCALED:
+                int immediate = address.getImmediateRaw();
+                add(64, dst, address.getBase(), immediate);
+                break;
+            case REGISTER_OFFSET:
+                add(64, dst, address.getBase(), address.getOffset(), ShiftType.LSL, address.isScaled() ? shiftAmt : 0);
+                break;
+            case EXTENDED_REGISTER_OFFSET:
+                add(64, dst, address.getBase(), address.getOffset(), address.getExtendType(), address.isScaled() ? shiftAmt : 0);
+                break;
+            case PC_LITERAL:
+                super.adr(dst, address.getImmediateRaw());
+                break;
+            case BASE_REGISTER_ONLY:
+                movx(dst, address.getBase());
+                break;
+            default:
+                throw JVMCIError.shouldNotReachHere();
+        }
+    }
+
+    public void movx(Register dst, Register src) {
+        mov(64, dst, src);
+    }
+
+    public void mov(int size, Register dst, Register src) {
+        if (dst.equals(src)) {
+            return;
+        }
+        if (dst.equals(sp) || src.equals(sp)) {
+            add(size, dst, src, 0);
+        } else {
+            or(size, dst, src, zr);
+        }
+    }
+
+    /**
+     * Generates a move 64-bit immediate code sequence. The immediate may later be updated by
+     * HotSpot.
+     *
+     * @param dst general purpose register. May not be null, stackpointer or zero-register.
+     * @param imm
+     */
+    public void forceMov(Register dst, long imm, boolean optimize) {
+        // We have to move all non zero parts of the immediate in 16-bit chunks
+        boolean firstMove = true;
+        for (int offset = 0; offset < 64; offset += 16) {
+            int chunk = (int) (imm >> offset) & NumUtil.getNbitNumberInt(16);
+            if (optimize && chunk == 0) {
+                continue;
+            }
+            if (firstMove) {
+                movz(64, dst, chunk, offset);
+                firstMove = false;
+            } else {
+                movk(64, dst, chunk, offset);
+            }
+        }
+        assert !firstMove;
+    }
+
+    public void forceMov(Register dst, long imm) {
+        forceMov(dst, imm, /* optimize */false);
+    }
+
+    /**
+     * Generates a move 64-bit immediate code sequence. The immediate may later be updated by
+     * HotSpot.
+     *
+     * @param dst general purpose register. May not be null, stackpointer or zero-register.
+     */
+    public void forceMov(Register dst, int imm) {
+        forceMov(dst, imm & 0xFFFF_FFFFL);
+    }
+
+    /**
+     * Loads immediate into register.
+     *
+     * @param dst general purpose register. May not be null, zero-register or stackpointer.
+     * @param imm immediate loaded into register.
+     */
+    public void mov(Register dst, long imm) {
+        assert dst.getRegisterCategory().equals(CPU);
+        if (imm == 0L) {
+            movx(dst, zr);
+        } else if (LogicalImmediateTable.isRepresentable(true, imm) != LogicalImmediateTable.Representable.NO) {
+            or(64, dst, zr, imm);
+        } else if (imm >> 32 == -1L && (int) imm < 0 && LogicalImmediateTable.isRepresentable((int) imm) != LogicalImmediateTable.Representable.NO) {
+            // If the higher 32-bit are 1s and the sign bit of the lower 32-bits is set *and* we can
+            // represent the lower 32 bits as a logical immediate we can create the lower 32-bit and
+            // then sign extend
+            // them. This allows us to cover immediates like ~1L with 2 instructions.
+            mov(dst, (int) imm);
+            sxt(64, 32, dst, dst);
+        } else {
+            forceMov(dst, imm, /* optimize move */true);
+        }
+    }
+
+    /**
+     * Loads immediate into register.
+     *
+     * @param dst general purpose register. May not be null, zero-register or stackpointer.
+     * @param imm immediate loaded into register.
+     */
+    public void mov(Register dst, int imm) {
+        mov(dst, imm & 0xFFFF_FFFFL);
+    }
+
+    /**
+     * @return Number of instructions necessary to load immediate into register.
+     */
+    public static int nrInstructionsToMoveImmediate(long imm) {
+        if (imm == 0L || LogicalImmediateTable.isRepresentable(true, imm) != LogicalImmediateTable.Representable.NO) {
+            return 1;
+        }
+        if (imm >> 32 == -1L && (int) imm < 0 && LogicalImmediateTable.isRepresentable((int) imm) != LogicalImmediateTable.Representable.NO) {
+            // If the higher 32-bit are 1s and the sign bit of the lower 32-bits is set *and* we can
+            // represent the lower 32 bits as a logical immediate we can create the lower 32-bit and
+            // then sign extend
+            // them. This allows us to cover immediates like ~1L with 2 instructions.
+            return 2;
+        }
+        int nrInstructions = 0;
+        for (int offset = 0; offset < 64; offset += 16) {
+            int part = (int) (imm >> offset) & NumUtil.getNbitNumberInt(16);
+            if (part != 0) {
+                nrInstructions++;
+            }
+        }
+        return nrInstructions;
+    }
+
+    /**
+     * Loads a srcSize value from address into rt sign-extending it if necessary.
+     *
+     * @param targetSize size of target register in bits. Must be 32 or 64.
+     * @param srcSize size of memory read in bits. Must be 8, 16 or 32 and smaller or equal to
+     *            targetSize.
+     * @param rt general purpose register. May not be null or stackpointer.
+     * @param address all addressing modes allowed. May not be null.
+     */
+    @Override
+    public void ldrs(int targetSize, int srcSize, Register rt, AArch64Address address) {
+        assert targetSize == 32 || targetSize == 64;
+        assert srcSize <= targetSize;
+        if (targetSize == srcSize) {
+            super.ldr(srcSize, rt, address);
+        } else {
+            super.ldrs(targetSize, srcSize, rt, address);
+        }
+    }
+
+    /**
+     * Conditional move. dst = src1 if condition else src2.
+     *
+     * @param size register size. Has to be 32 or 64.
+     * @param result general purpose register. May not be null or the stackpointer.
+     * @param trueValue general purpose register. May not be null or the stackpointer.
+     * @param falseValue general purpose register. May not be null or the stackpointer.
+     * @param cond any condition flag. May not be null.
+     */
+    public void cmov(int size, Register result, Register trueValue, Register falseValue, ConditionFlag cond) {
+        super.csel(size, result, trueValue, falseValue, cond);
+    }
+
+    /**
+     * Conditional set. dst = 1 if condition else 0.
+     *
+     * @param dst general purpose register. May not be null or stackpointer.
+     * @param condition any condition. May not be null.
+     */
+    public void cset(Register dst, ConditionFlag condition) {
+        super.csinc(32, dst, zr, zr, condition.negate());
+    }
+
+    /**
+     * dst = src1 + src2.
+     *
+     * @param size register size. Has to be 32 or 64.
+     * @param dst general purpose register. May not be null or stackpointer.
+     * @param src1 general purpose register. May not be null or stackpointer.
+     * @param src2 general purpose register. May not be null or stackpointer.
+     */
+    public void add(int size, Register dst, Register src1, Register src2) {
+        super.add(size, dst, src1, src2, ShiftType.LSL, 0);
+    }
+
+    /**
+     * dst = src1 + src2 and sets condition flags.
+     *
+     * @param size register size. Has to be 32 or 64.
+     * @param dst general purpose register. May not be null or stackpointer.
+     * @param src1 general purpose register. May not be null or stackpointer.
+     * @param src2 general purpose register. May not be null or stackpointer.
+     */
+    public void adds(int size, Register dst, Register src1, Register src2) {
+        super.adds(size, dst, src1, src2, getNopExtendType(size), 0);
+    }
+
+    /**
+     * dst = src1 - src2 and sets condition flags.
+     *
+     * @param size register size. Has to be 32 or 64.
+     * @param dst general purpose register. May not be null or stackpointer.
+     * @param src1 general purpose register. May not be null or stackpointer.
+     * @param src2 general purpose register. May not be null or stackpointer.
+     */
+    public void subs(int size, Register dst, Register src1, Register src2) {
+        super.subs(size, dst, src1, src2, getNopExtendType(size), 0);
+    }
+
+    /**
+     * Returns the ExtendType for the given size that corresponds to a no-op.
+     *
+     * I.e. when doing add X0, X1, X2, the actual instruction has the form add X0, X1, X2 UXTX.
+     *
+     * @param size
+     */
+    private static ExtendType getNopExtendType(int size) {
+        if (size == 64) {
+            return ExtendType.UXTX;
+        } else if (size == 32) {
+            return ExtendType.UXTW;
+        } else {
+            throw JVMCIError.shouldNotReachHere("No-op ");
+        }
+    }
+
+    /**
+     * dst = src1 - src2.
+     *
+     * @param size register size. Has to be 32 or 64.
+     * @param dst general purpose register. May not be null or stackpointer.
+     * @param src1 general purpose register. May not be null or stackpointer.
+     * @param src2 general purpose register. May not be null or stackpointer.
+     */
+    public void sub(int size, Register dst, Register src1, Register src2) {
+        super.sub(size, dst, src1, src2, ShiftType.LSL, 0);
+    }
+
+    /**
+     * dst = src1 + shiftType(src2, shiftAmt & (size - 1)).
+     *
+     * @param size register size. Has to be 32 or 64.
+     * @param dst general purpose register. May not be null or stackpointer.
+     * @param src1 general purpose register. May not be null or stackpointer.
+     * @param src2 general purpose register. May not be null or stackpointer.
+     * @param shiftType any type but ROR.
+     * @param shiftAmt arbitrary shift amount.
+     */
+    @Override
+    public void add(int size, Register dst, Register src1, Register src2, ShiftType shiftType, int shiftAmt) {
+        int shift = clampShiftAmt(size, shiftAmt);
+        super.add(size, dst, src1, src2, shiftType, shift);
+    }
+
+    /**
+     * dst = src1 + shiftType(src2, shiftAmt & (size-1)) and sets condition flags.
+     *
+     * @param size register size. Has to be 32 or 64.
+     * @param dst general purpose register. May not be null or stackpointer.
+     * @param src1 general purpose register. May not be null or stackpointer.
+     * @param src2 general purpose register. May not be null or stackpointer.
+     * @param shiftType any type but ROR.
+     * @param shiftAmt arbitrary shift amount.
+     */
+    @Override
+    public void sub(int size, Register dst, Register src1, Register src2, ShiftType shiftType, int shiftAmt) {
+        int shift = clampShiftAmt(size, shiftAmt);
+        super.sub(size, dst, src1, src2, shiftType, shift);
+    }
+
+    /**
+     * dst = -src1.
+     *
+     * @param size register size. Has to be 32 or 64.
+     * @param dst general purpose register. May not be null or stackpointer.
+     * @param src general purpose register. May not be null or stackpointer.
+     */
+    public void neg(int size, Register dst, Register src) {
+        sub(size, dst, zr, src);
+    }
+
+    /**
+     * dst = src + immediate.
+     *
+     * @param size register size. Has to be 32 or 64.
+     * @param dst general purpose register. May not be null or stackpointer.
+     * @param src general purpose register. May not be null or stackpointer.
+     * @param immediate arithmetic immediate
+     */
+    @Override
+    public void add(int size, Register dst, Register src, int immediate) {
+        if (immediate < 0) {
+            sub(size, dst, src, -immediate);
+        } else if (!(dst.equals(src) && immediate == 0)) {
+            super.add(size, dst, src, immediate);
+        }
+    }
+
+    /**
+     * dst = src + aimm and sets condition flags.
+     *
+     * @param size register size. Has to be 32 or 64.
+     * @param dst general purpose register. May not be null or stackpointer.
+     * @param src general purpose register. May not be null or zero-register.
+     * @param immediate arithmetic immediate.
+     */
+    @Override
+    public void adds(int size, Register dst, Register src, int immediate) {
+        if (immediate < 0) {
+            subs(size, dst, src, -immediate);
+        } else if (!(dst.equals(src) && immediate == 0)) {
+            super.adds(size, dst, src, immediate);
+        }
+    }
+
+    /**
+     * dst = src - immediate.
+     *
+     * @param size register size. Has to be 32 or 64.
+     * @param dst general purpose register. May not be null or stackpointer.
+     * @param src general purpose register. May not be null or stackpointer.
+     * @param immediate arithmetic immediate
+     */
+    @Override
+    public void sub(int size, Register dst, Register src, int immediate) {
+        if (immediate < 0) {
+            add(size, dst, src, -immediate);
+        } else if (!dst.equals(src) || immediate != 0) {
+            super.sub(size, dst, src, immediate);
+        }
+    }
+
+    /**
+     * dst = src - aimm and sets condition flags.
+     *
+     * @param size register size. Has to be 32 or 64.
+     * @param dst general purpose register. May not be null or stackpointer.
+     * @param src general purpose register. May not be null or zero-register.
+     * @param immediate arithmetic immediate.
+     */
+    @Override
+    public void subs(int size, Register dst, Register src, int immediate) {
+        if (immediate < 0) {
+            adds(size, dst, src, -immediate);
+        } else if (!dst.equals(src) || immediate != 0) {
+            super.sub(size, dst, src, immediate);
+        }
+    }
+
+    /**
+     * dst = src1 * src2.
+     *
+     * @param size register size. Has to be 32 or 64.
+     * @param dst general purpose register. May not be null or the stackpointer.
+     * @param src1 general purpose register. May not be null or the stackpointer.
+     * @param src2 general purpose register. May not be null or the stackpointer.
+     */
+    public void mul(int size, Register dst, Register src1, Register src2) {
+        super.madd(size, dst, src1, src2, zr);
+    }
+
+    /**
+     * unsigned multiply high. dst = (src1 * src2) >> size
+     *
+     * @param size register size. Has to be 32 or 64.
+     * @param dst general purpose register. May not be null or the stackpointer.
+     * @param src1 general purpose register. May not be null or the stackpointer.
+     * @param src2 general purpose register. May not be null or the stackpointer.
+     */
+    public void umulh(int size, Register dst, Register src1, Register src2) {
+        assert size == 32 || size == 64;
+        if (size == 64) {
+            super.umulh(dst, src1, src2);
+        } else {
+            // xDst = wSrc1 * wSrc2
+            super.umaddl(dst, src1, src2, zr);
+            // xDst = xDst >> 32
+            lshr(64, dst, dst, 32);
+        }
+    }
+
+    /**
+     * signed multiply high. dst = (src1 * src2) >> size
+     *
+     * @param size register size. Has to be 32 or 64.
+     * @param dst general purpose register. May not be null or the stackpointer.
+     * @param src1 general purpose register. May not be null or the stackpointer.
+     * @param src2 general purpose register. May not be null or the stackpointer.
+     */
+    public void smulh(int size, Register dst, Register src1, Register src2) {
+        assert size == 32 || size == 64;
+        if (size == 64) {
+            super.smulh(dst, src1, src2);
+        } else {
+            // xDst = wSrc1 * wSrc2
+            super.smaddl(dst, src1, src2, zr);
+            // xDst = xDst >> 32
+            lshr(64, dst, dst, 32);
+        }
+    }
+
+    /**
+     * dst = src1 % src2. Signed.
+     *
+     * @param size register size. Has to be 32 or 64.
+     * @param dst general purpose register. May not be null or the stackpointer.
+     * @param n numerator. General purpose register. May not be null or the stackpointer.
+     * @param d denominator. General purpose register. Divisor May not be null or the stackpointer.
+     */
+    public void rem(int size, Register dst, Register n, Register d) {
+        // There is no irem or similar instruction. Instead we use the relation:
+        // n % d = n - Floor(n / d) * d if nd >= 0
+        // n % d = n - Ceil(n / d) * d else
+        // Which is equivalent to n - TruncatingDivision(n, d) * d
+        super.sdiv(size, dst, n, d);
+        super.msub(size, dst, dst, d, n);
+    }
+
+    /**
+     * dst = src1 % src2. Unsigned.
+     *
+     * @param size register size. Has to be 32 or 64.
+     * @param dst general purpose register. May not be null or the stackpointer.
+     * @param n numerator. General purpose register. May not be null or the stackpointer.
+     * @param d denominator. General purpose register. Divisor May not be null or the stackpointer.
+     */
+    public void urem(int size, Register dst, Register n, Register d) {
+        // There is no irem or similar instruction. Instead we use the relation:
+        // n % d = n - Floor(n / d) * d
+        // Which is equivalent to n - TruncatingDivision(n, d) * d
+        super.udiv(size, dst, n, d);
+        super.msub(size, dst, dst, d, n);
+    }
+
+    /**
+     * @return True if immediate can be used directly for arithmetic instructions (add/sub), false
+     *         otherwise.
+     */
+    public static boolean isArithmeticImmediate(long imm) {
+        // If we have a negative immediate we just use the opposite operator. I.e.: x - (-5) == x +
+        // 5.
+        return NumUtil.isInt(Math.abs(imm)) && isAimm((int) Math.abs(imm));
+    }
+
+    /**
+     * @return True if immediate can be used directly with comparison instructions, false otherwise.
+     */
+    public static boolean isComparisonImmediate(long imm) {
+        return isArithmeticImmediate(imm);
+    }
+
+    /**
+     * @return True if immediate can be moved directly into a register, false otherwise.
+     */
+    public static boolean isMovableImmediate(long imm) {
+        // Moves allow a 16bit immediate value that can be shifted by multiples of 16.
+        // Positions of first, respectively last set bit.
+        int start = Long.numberOfTrailingZeros(imm);
+        int end = 64 - Long.numberOfLeadingZeros(imm);
+        int length = end - start;
+        if (length > 16) {
+            return false;
+        }
+        // We can shift the necessary part of the immediate (i.e. everything between the first and
+        // last set bit) by as much as 16 - length around to arrive at a valid shift amount
+        int tolerance = 16 - length;
+        int prevMultiple = NumUtil.roundDown(start, 16);
+        int nextMultiple = NumUtil.roundUp(start, 16);
+        return start - prevMultiple <= tolerance || nextMultiple - start <= tolerance;
+    }
+
+    /**
+     * dst = src << (shiftAmt & (size - 1)).
+     *
+     * @param size register size. Has to be 32 or 64.
+     * @param dst general purpose register. May not be null, stackpointer or zero-register.
+     * @param src general purpose register. May not be null, stackpointer or zero-register.
+     * @param shiftAmt amount by which src is shifted.
+     */
+    public void shl(int size, Register dst, Register src, long shiftAmt) {
+        int shift = clampShiftAmt(size, shiftAmt);
+        super.ubfm(size, dst, src, (size - shift) & (size - 1), size - 1 - shift);
+    }
+
+    /**
+     * dst = src1 << (src2 & (size - 1)).
+     *
+     * @param size register size. Has to be 32 or 64.
+     * @param dst general purpose register. May not be null or stackpointer.
+     * @param src general purpose register. May not be null or stackpointer.
+     * @param shift general purpose register. May not be null or stackpointer.
+     */
+    public void shl(int size, Register dst, Register src, Register shift) {
+        super.lsl(size, dst, src, shift);
+    }
+
+    /**
+     * dst = src >>> (shiftAmt & (size - 1)).
+     *
+     * @param size register size. Has to be 32 or 64.
+     * @param dst general purpose register. May not be null, stackpointer or zero-register.
+     * @param src general purpose register. May not be null, stackpointer or zero-register.
+     * @param shiftAmt amount by which src is shifted.
+     */
+    public void lshr(int size, Register dst, Register src, long shiftAmt) {
+        int shift = clampShiftAmt(size, shiftAmt);
+        super.ubfm(size, dst, src, shift, size - 1);
+    }
+
+    /**
+     * dst = src1 >>> (src2 & (size - 1)).
+     *
+     * @param size register size. Has to be 32 or 64.
+     * @param dst general purpose register. May not be null or stackpointer.
+     * @param src general purpose register. May not be null or stackpointer.
+     * @param shift general purpose register. May not be null or stackpointer.
+     */
+    public void lshr(int size, Register dst, Register src, Register shift) {
+        super.lsr(size, dst, src, shift);
+    }
+
+    /**
+     * dst = src >> (shiftAmt & log2(size)).
+     *
+     * @param size register size. Has to be 32 or 64.
+     * @param dst general purpose register. May not be null, stackpointer or zero-register.
+     * @param src general purpose register. May not be null, stackpointer or zero-register.
+     * @param shiftAmt amount by which src is shifted.
+     */
+    public void ashr(int size, Register dst, Register src, long shiftAmt) {
+        int shift = clampShiftAmt(size, shiftAmt);
+        super.sbfm(size, dst, src, shift, size - 1);
+    }
+
+    /**
+     * dst = src1 >> (src2 & log2(size)).
+     *
+     * @param size register size. Has to be 32 or 64.
+     * @param dst general purpose register. May not be null or stackpointer.
+     * @param src general purpose register. May not be null or stackpointer.
+     * @param shift general purpose register. May not be null or stackpointer.
+     */
+    public void ashr(int size, Register dst, Register src, Register shift) {
+        super.asr(size, dst, src, shift);
+    }
+
+    /**
+     * Clamps shiftAmt into range 0 <= shiftamt < size according to JLS.
+     *
+     * @param size size of operation.
+     * @param shiftAmt arbitrary shift amount.
+     * @return value between 0 and size - 1 inclusive that is equivalent to shiftAmt according to
+     *         JLS.
+     */
+    private static int clampShiftAmt(int size, long shiftAmt) {
+        return (int) (shiftAmt & (size - 1));
+    }
+
+    /**
+     * dst = src1 & src2.
+     *
+     * @param size register size. Has to be 32 or 64.
+     * @param dst general purpose register. May not be null or stackpointer.
+     * @param src1 general purpose register. May not be null or stackpointer.
+     * @param src2 general purpose register. May not be null or stackpointer.
+     */
+    public void and(int size, Register dst, Register src1, Register src2) {
+        super.and(size, dst, src1, src2, ShiftType.LSL, 0);
+    }
+
+    /**
+     * dst = src1 ^ src2.
+     *
+     * @param size register size. Has to be 32 or 64.
+     * @param dst general purpose register. May not be null or stackpointer.
+     * @param src1 general purpose register. May not be null or stackpointer.
+     * @param src2 general purpose register. May not be null or stackpointer.
+     */
+    public void eor(int size, Register dst, Register src1, Register src2) {
+        super.eor(size, dst, src1, src2, ShiftType.LSL, 0);
+    }
+
+    /**
+     * dst = src1 | src2.
+     *
+     * @param size register size. Has to be 32 or 64.
+     * @param dst general purpose register. May not be null or stackpointer.
+     * @param src1 general purpose register. May not be null or stackpointer.
+     * @param src2 general purpose register. May not be null or stackpointer.
+     */
+    public void or(int size, Register dst, Register src1, Register src2) {
+        super.orr(size, dst, src1, src2, ShiftType.LSL, 0);
+    }
+
+    /**
+     * dst = src | bimm.
+     *
+     * @param size register size. Has to be 32 or 64.
+     * @param dst general purpose register. May not be null or zero-register.
+     * @param src general purpose register. May not be null or stack-pointer.
+     * @param bimm logical immediate. See {@link AArch64Assembler.LogicalImmediateTable} for exact
+     *            definition.
+     */
+    public void or(int size, Register dst, Register src, long bimm) {
+        super.orr(size, dst, src, bimm);
+    }
+
+    /**
+     * dst = ~src.
+     *
+     * @param size register size. Has to be 32 or 64.
+     * @param dst general purpose register. May not be null or stackpointer.
+     * @param src general purpose register. May not be null or stackpointer.
+     */
+    public void not(int size, Register dst, Register src) {
+        super.orn(size, dst, zr, src, ShiftType.LSL, 0);
+    }
+
+    /**
+     * Sign-extend value from src into dst.
+     *
+     * @param destSize destination register size. Has to be 32 or 64.
+     * @param srcSize source register size. May be 8, 16 or 32 and smaller than destSize.
+     * @param dst general purpose register. May not be null, stackpointer or zero-register.
+     * @param src general purpose register. May not be null, stackpointer or zero-register.
+     */
+    public void sxt(int destSize, int srcSize, Register dst, Register src) {
+        assert (destSize == 32 || destSize == 64) && srcSize < destSize;
+        assert srcSize == 8 || srcSize == 16 || srcSize == 32;
+        int[] srcSizeValues = {7, 15, 31};
+        super.sbfm(destSize, dst, src, 0, srcSizeValues[NumUtil.log2Ceil(srcSize / 8)]);
+    }
+
+    /**
+     * dst = src if condition else -src.
+     *
+     * @param size register size. Must be 32 or 64.
+     * @param dst general purpose register. May not be null or the stackpointer.
+     * @param src general purpose register. May not be null or the stackpointer.
+     * @param condition any condition except AV or NV. May not be null.
+     */
+    public void csneg(int size, Register dst, Register src, ConditionFlag condition) {
+        super.csneg(size, dst, src, src, condition.negate());
+    }
+
+    /**
+     * @return True if the immediate can be used directly for logical 64-bit instructions.
+     */
+    public static boolean isLogicalImmediate(long imm) {
+        return LogicalImmediateTable.isRepresentable(true, imm) != LogicalImmediateTable.Representable.NO;
+    }
+
+    /**
+     * @return True if the immediate can be used directly for logical 32-bit instructions.
+     */
+    public static boolean isLogicalImmediate(int imm) {
+        return LogicalImmediateTable.isRepresentable(imm) == LogicalImmediateTable.Representable.YES;
+    }
+
+    /* Float instructions */
+
+    /**
+     * Moves integer to float, float to integer, or float to float. Does not support integer to
+     * integer moves.
+     *
+     * @param size register size. Has to be 32 or 64.
+     * @param dst Either floating-point or general-purpose register. If general-purpose register may
+     *            not be stackpointer or zero register. Cannot be null in any case.
+     * @param src Either floating-point or general-purpose register. If general-purpose register may
+     *            not be stackpointer. Cannot be null in any case.
+     */
+    @Override
+    public void fmov(int size, Register dst, Register src) {
+        assert !(dst.getRegisterCategory().equals(CPU) && src.getRegisterCategory().equals(CPU)) : "src and dst cannot both be integer registers.";
+        if (dst.equals(src)) {
+            return;
+        }
+        if (dst.getRegisterCategory().equals(CPU)) {
+            super.fmovFpu2Cpu(size, dst, src);
+        } else if (src.getRegisterCategory().equals(CPU)) {
+            super.fmovCpu2Fpu(size, dst, src);
+        } else {
+            super.fmov(size, dst, src);
+        }
+    }
+
+    /**
+     *
+     * @param size register size. Has to be 32 or 64.
+     * @param dst floating point register. May not be null.
+     * @param imm immediate that is loaded into dst. If size is 32 only float immediates can be
+     *            loaded, i.e. (float) imm == imm must be true. In all cases
+     *            {@code isFloatImmediate}, respectively {@code #isDoubleImmediate} must be true
+     *            depending on size.
+     */
+    @Override
+    public void fmov(int size, Register dst, double imm) {
+        if (imm == 0.0) {
+            assert Double.doubleToRawLongBits(imm) == 0L : "-0.0 is no valid immediate.";
+            super.fmovCpu2Fpu(size, dst, zr);
+        } else {
+            super.fmov(size, dst, imm);
+        }
+    }
+
+    /**
+     *
+     * @return true if immediate can be loaded directly into floating-point register, false
+     *         otherwise.
+     */
+    public static boolean isDoubleImmediate(double imm) {
+        return Double.doubleToRawLongBits(imm) == 0L || AArch64Assembler.isDoubleImmediate(imm);
+    }
+
+    /**
+     *
+     * @return true if immediate can be loaded directly into floating-point register, false
+     *         otherwise.
+     */
+    public static boolean isFloatImmediate(float imm) {
+        return Float.floatToRawIntBits(imm) == 0 || AArch64Assembler.isFloatImmediate(imm);
+    }
+
+    /**
+     * Conditional move. dst = src1 if condition else src2.
+     *
+     * @param size register size.
+     * @param result floating point register. May not be null.
+     * @param trueValue floating point register. May not be null.
+     * @param falseValue floating point register. May not be null.
+     * @param condition every condition allowed. May not be null.
+     */
+    public void fcmov(int size, Register result, Register trueValue, Register falseValue, ConditionFlag condition) {
+        super.fcsel(size, result, trueValue, falseValue, condition);
+    }
+
+    /**
+     * dst = src1 % src2.
+     *
+     * @param size register size. Has to be 32 or 64.
+     * @param dst floating-point register. May not be null.
+     * @param n numerator. Floating-point register. May not be null.
+     * @param d denominator. Floating-point register. May not be null.
+     */
+    public void frem(int size, Register dst, Register n, Register d) {
+        // There is no frem instruction, instead we compute the remainder using the relation:
+        // rem = n - Truncating(n / d) * d
+        super.fdiv(size, dst, n, d);
+        super.frintz(size, dst, dst);
+        super.fmsub(size, dst, dst, d, n);
+    }
+
+    /* Branches */
+
+    /**
+     * Compares x and y and sets condition flags.
+     *
+     * @param size register size. Has to be 32 or 64.
+     * @param x general purpose register. May not be null or stackpointer.
+     * @param y general purpose register. May not be null or stackpointer.
+     */
+    public void cmp(int size, Register x, Register y) {
+        super.subs(size, zr, x, y, ShiftType.LSL, 0);
+    }
+
+    /**
+     * Compares x to y and sets condition flags.
+     *
+     * @param size register size. Has to be 32 or 64.
+     * @param x general purpose register. May not be null or stackpointer.
+     * @param y comparison immediate, {@link #isComparisonImmediate(long)} has to be true for it.
+     */
+    public void cmp(int size, Register x, int y) {
+        if (y < 0) {
+            super.adds(size, zr, x, -y);
+        } else {
+            super.subs(size, zr, x, y);
+        }
+    }
+
+    /**
+     * Sets condition flags according to result of x & y.
+     *
+     * @param size register size. Has to be 32 or 64.
+     * @param dst general purpose register. May not be null or stack-pointer.
+     * @param x general purpose register. May not be null or stackpointer.
+     * @param y general purpose register. May not be null or stackpointer.
+     */
+    public void ands(int size, Register dst, Register x, Register y) {
+        super.ands(size, dst, x, y, ShiftType.LSL, 0);
+    }
+
+    /**
+     * When patching up Labels we have to know what kind of code to generate.
+     */
+    public enum PatchLabelKind {
+        BRANCH_CONDITIONALLY(0x0),
+        BRANCH_UNCONDITIONALLY(0x1),
+        BRANCH_NONZERO(0x2),
+        BRANCH_ZERO(0x3),
+        JUMP_ADDRESS(0x4);
+
+        /**
+         * Offset by which additional information for branch conditionally, branch zero and branch
+         * non zero has to be shifted.
+         */
+        public static final int INFORMATION_OFFSET = 5;
+
+        public final int encoding;
+
+        private PatchLabelKind(int encoding) {
+            this.encoding = encoding;
+        }
+
+        /**
+         * @return PatchLabelKind with given encoding.
+         */
+        private static PatchLabelKind fromEncoding(int encoding) {
+            return values()[encoding & NumUtil.getNbitNumberInt(INFORMATION_OFFSET)];
+        }
+
+    }
+
+    /**
+     * Compare register and branch if non-zero.
+     *
+     * @param size Instruction size in bits. Should be either 32 or 64.
+     * @param cmp general purpose register. May not be null, zero-register or stackpointer.
+     * @param label Can only handle 21-bit word-aligned offsets for now. May be unbound. Non null.
+     */
+    public void cbnz(int size, Register cmp, Label label) {
+        // TODO Handle case where offset is too large for a single jump instruction
+        if (label.isBound()) {
+            int offset = label.position() - position();
+            super.cbnz(size, cmp, offset);
+        } else {
+            label.addPatchAt(position());
+            int regEncoding = cmp.encoding << (PatchLabelKind.INFORMATION_OFFSET + 1);
+            int sizeEncoding = (size == 64 ? 1 : 0) << PatchLabelKind.INFORMATION_OFFSET;
+            // Encode condition flag so that we know how to patch the instruction later
+            emitInt(PatchLabelKind.BRANCH_NONZERO.encoding | regEncoding | sizeEncoding);
+        }
+    }
+
+    /**
+     * Compare register and branch if zero.
+     *
+     * @param size Instruction size in bits. Should be either 32 or 64.
+     * @param cmp general purpose register. May not be null, zero-register or stackpointer.
+     * @param label Can only handle 21-bit word-aligned offsets for now. May be unbound. Non null.
+     */
+    public void cbz(int size, Register cmp, Label label) {
+        // TODO Handle case where offset is too large for a single jump instruction
+        if (label.isBound()) {
+            int offset = label.position() - position();
+            super.cbz(size, cmp, offset);
+        } else {
+            label.addPatchAt(position());
+            int regEncoding = cmp.encoding << (PatchLabelKind.INFORMATION_OFFSET + 1);
+            int sizeEncoding = (size == 64 ? 1 : 0) << PatchLabelKind.INFORMATION_OFFSET;
+            // Encode condition flag so that we know how to patch the instruction later
+            emitInt(PatchLabelKind.BRANCH_ZERO.encoding | regEncoding | sizeEncoding);
+        }
+    }
+
+    /**
+     * Branches to label if condition is true.
+     *
+     * @param condition any condition value allowed. Non null.
+     * @param label Can only handle 21-bit word-aligned offsets for now. May be unbound. Non null.
+     */
+    public void branchConditionally(ConditionFlag condition, Label label) {
+        // TODO Handle case where offset is too large for a single jump instruction
+        if (label.isBound()) {
+            int offset = label.position() - position();
+            super.b(condition, offset);
+        } else {
+            label.addPatchAt(position());
+            // Encode condition flag so that we know how to patch the instruction later
+            emitInt(PatchLabelKind.BRANCH_CONDITIONALLY.encoding | condition.encoding << PatchLabelKind.INFORMATION_OFFSET);
+        }
+    }
+
+    /**
+     * Branches if condition is true. Address of jump is patched up by HotSpot c++ code.
+     *
+     * @param condition any condition value allowed. Non null.
+     */
+    public void branchConditionally(ConditionFlag condition) {
+        // Correct offset is fixed up by HotSpot later.
+        super.b(condition, 0);
+    }
+
+    /**
+     * Jumps to label.
+     *
+     * param label Can only handle signed 28-bit offsets. May be unbound. Non null.
+     */
+    @Override
+    public void jmp(Label label) {
+        // TODO Handle case where offset is too large for a single jump instruction
+        if (label.isBound()) {
+            int offset = label.position() - position();
+            super.b(offset);
+        } else {
+            label.addPatchAt(position());
+            emitInt(PatchLabelKind.BRANCH_UNCONDITIONALLY.encoding);
+        }
+    }
+
+    /**
+     * Jump to address in dest.
+     *
+     * @param dest General purpose register. May not be null, zero-register or stackpointer.
+     */
+    public void jmp(Register dest) {
+        super.br(dest);
+    }
+
+    /**
+     * Immediate jump instruction fixed up by HotSpot c++ code.
+     */
+    public void jmp() {
+        // Offset has to be fixed up by c++ code.
+        super.b(0);
+    }
+
+    /**
+     *
+     * @return true if immediate offset can be used in a single branch instruction.
+     */
+    public static boolean isBranchImmediateOffset(long imm) {
+        return NumUtil.isSignedNbit(28, imm);
+    }
+
+    /* system instructions */
+
+    /**
+     * Exception codes used when calling hlt instruction.
+     */
+    public enum AArch64ExceptionCode {
+        NO_SWITCH_TARGET(0x0),
+        BREAKPOINT(0x1);
+
+        public final int encoding;
+
+        private AArch64ExceptionCode(int encoding) {
+            this.encoding = encoding;
+        }
+    }
+
+    /**
+     * Halting mode software breakpoint: Enters halting mode debug state if enabled, else treated as
+     * UNALLOCATED instruction.
+     *
+     * @param exceptionCode exception code specifying why halt was called. Non null.
+     */
+    public void hlt(AArch64ExceptionCode exceptionCode) {
+        super.hlt(exceptionCode.encoding);
+    }
+
+    /**
+     * Monitor mode software breakpoint: exception routed to a debug monitor executing in a higher
+     * exception level.
+     *
+     * @param exceptionCode exception code specifying why break was called. Non null.
+     */
+    public void brk(AArch64ExceptionCode exceptionCode) {
+        super.brk(exceptionCode.encoding);
+    }
+
+    public void pause() {
+        throw JVMCIError.unimplemented();
+    }
+
+    /**
+     * Executes no-op instruction. No registers or flags are updated, except for PC.
+     */
+    public void nop() {
+        super.hint(SystemHint.NOP);
+    }
+
+    /**
+     * Same as {@link #nop()}.
+     */
+    @Override
+    public void ensureUniquePC() {
+        nop();
+    }
+
+    /**
+     * Aligns PC.
+     *
+     * @param modulus Has to be positive multiple of 4.
+     */
+    @Override
+    public void align(int modulus) {
+        assert modulus > 0 && (modulus & 0x3) == 0 : "Modulus has to be a positive multiple of 4.";
+        if (position() % modulus == 0) {
+            return;
+        }
+        int offset = modulus - position() % modulus;
+        for (int i = 0; i < offset; i += 4) {
+            nop();
+        }
+    }
+
+    /**
+     * Patches jump targets when label gets bound.
+     */
+    @Override
+    protected void patchJumpTarget(int branch, int jumpTarget) {
+        int instruction = getInt(branch);
+        int branchOffset = jumpTarget - branch;
+        PatchLabelKind type = PatchLabelKind.fromEncoding(instruction);
+        switch (type) {
+            case BRANCH_CONDITIONALLY:
+                ConditionFlag cf = ConditionFlag.fromEncoding(instruction >>> PatchLabelKind.INFORMATION_OFFSET);
+                super.b(cf, branchOffset, /* pos */branch);
+                break;
+            case BRANCH_UNCONDITIONALLY:
+                super.b(branchOffset, /* pos */branch);
+                break;
+            case JUMP_ADDRESS:
+                emitInt(jumpTarget, /* pos */branch);
+                break;
+            case BRANCH_NONZERO:
+            case BRANCH_ZERO:
+                int information = instruction >>> PatchLabelKind.INFORMATION_OFFSET;
+                int sizeEncoding = information & 1;
+                int regEncoding = information >>> 1;
+                Register reg = AArch64.cpuRegisters[regEncoding];
+                // 1 => 64; 0 => 32
+                int size = sizeEncoding * 32 + 32;
+                switch (type) {
+                    case BRANCH_NONZERO:
+                        super.cbnz(size, reg, branchOffset, /* pos */branch);
+                        break;
+                    case BRANCH_ZERO:
+                        super.cbz(size, reg, branchOffset, /* pos */branch);
+                        break;
+                }
+                break;
+            default:
+                throw JVMCIError.shouldNotReachHere();
+        }
+    }
+
+    /**
+     * Generates an address of the form {@code base + displacement}.
+     *
+     * Does not change base register to fulfil this requirement. Will fail if displacement cannot be
+     * represented directly as address.
+     *
+     * @param base general purpose register. May not be null or the zero register.
+     * @param displacement arbitrary displacement added to base.
+     * @return AArch64Address referencing memory at {@code base + displacement}.
+     */
+    @Override
+    public AArch64Address makeAddress(Register base, int displacement) {
+        return makeAddress(base, displacement, zr, /* signExtend */false, /* transferSize */0, zr, /* allowOverwrite */false);
+    }
+
+    @Override
+    public AbstractAddress getPlaceholder() {
+        return AArch64Address.PLACEHOLDER;
+    }
+}
--- a/graal/com.oracle.graal.asm/src/com/oracle/graal/asm/NumUtil.java	Wed Dec 23 14:36:50 2015 +0100
+++ b/graal/com.oracle.graal.asm/src/com/oracle/graal/asm/NumUtil.java	Thu Dec 24 11:43:35 2015 -1000
@@ -113,4 +113,74 @@
     public static long roundUp(long number, long mod) {
         return ((number + mod - 1L) / mod) * mod;
     }
+
+    public static int roundDown(int number, int mod) {
+        return number / mod * mod;
+    }
+
+    public static long roundDown(long number, long mod) {
+        return number / mod * mod;
+    }
+
+    public static int log2Ceil(int val) {
+        int x = 1;
+        int log2 = 0;
+        while (x < val) {
+            log2++;
+            x *= 2;
+        }
+        return log2;
+    }
+
+    public static boolean isUnsignedNbit(int n, int value) {
+        assert n > 0 && n < 32;
+        return 32 - Integer.numberOfLeadingZeros(value) <= n;
+    }
+
+    public static boolean isUnsignedNbit(int n, long value) {
+        assert n > 0 && n < 64;
+        return 64 - Long.numberOfLeadingZeros(value) <= n;
+    }
+
+    public static boolean isSignedNbit(int n, int value) {
+        assert n > 0 && n < 32;
+        int min = -(1 << (n - 1));
+        int max = (1 << (n - 1)) - 1;
+        return value >= min && value <= max;
+    }
+
+    public static boolean isSignedNbit(int n, long value) {
+        assert n > 0 && n < 64;
+        long min = -(1L << (n - 1));
+        long max = (1L << (n - 1)) - 1;
+        return value >= min && value <= max;
+    }
+
+    /**
+     *
+     * @param n Number of bits that should be set to 1. Must be between 0 and 32 (inclusive).
+     * @return A number with n bits set to 1.
+     */
+    public static int getNbitNumberInt(int n) {
+        assert n >= 0 && n <= 32 : "0 <= n <= 32; instead: " + n;
+        if (n < 32) {
+            return (1 << n) - 1;
+        } else {
+            return 0xFFFFFFFF;
+        }
+    }
+
+    /**
+     *
+     * @param n Number of bits that should be set to 1. Must be between 0 and 64 (inclusive).
+     * @return A number with n bits set to 1.
+     */
+    public static long getNbitNumberLong(int n) {
+        assert n >= 0 && n <= 64;
+        if (n < 64) {
+            return (1L << n) - 1;
+        } else {
+            return 0xFFFFFFFFFFFFFFFFL;
+        }
+    }
 }
--- /dev/null	Thu Jan 01 00:00:00 1970 +0000
+++ b/graal/com.oracle.graal.compiler.aarch64/src/com/oracle/graal/compiler/aarch64/AArch64AddressLowering.java	Thu Dec 24 11:43:35 2015 -1000
@@ -0,0 +1,61 @@
+/*
+ * Copyright (c) 2015, Oracle and/or its affiliates. All rights reserved.
+ * DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER.
+ *
+ * This code is free software; you can redistribute it and/or modify it
+ * under the terms of the GNU General Public License version 2 only, as
+ * published by the Free Software Foundation.
+ *
+ * This code is distributed in the hope that it will be useful, but WITHOUT
+ * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
+ * FITNESS FOR A PARTICULAR PURPOSE.  See the GNU General Public License
+ * version 2 for more details (a copy is included in the LICENSE file that
+ * accompanied this code).
+ *
+ * You should have received a copy of the GNU General Public License version
+ * 2 along with this work; if not, write to the Free Software Foundation,
+ * Inc., 51 Franklin St, Fifth Floor, Boston, MA 02110-1301 USA.
+ *
+ * Please contact Oracle, 500 Oracle Parkway, Redwood Shores, CA 94065 USA
+ * or visit www.oracle.com if you need additional information or have any
+ * questions.
+ */
+
+package com.oracle.graal.compiler.aarch64;
+
+import com.oracle.graal.nodes.ValueNode;
+import com.oracle.graal.nodes.memory.address.AddressNode;
+import com.oracle.graal.phases.common.AddressLoweringPhase.AddressLowering;
+
+import jdk.vm.ci.code.CodeCacheProvider;
+import jdk.vm.ci.common.JVMCIError;
+import jdk.vm.ci.meta.JavaConstant;
+
+public class AArch64AddressLowering extends AddressLowering {
+
+    private final CodeCacheProvider codeCache;
+
+    public AArch64AddressLowering(CodeCacheProvider codeCache) {
+        this.codeCache = codeCache;
+    }
+
+    @Override
+    public AddressNode lower(ValueNode address) {
+        return lower(address, null);
+    }
+
+    @Override
+    public AddressNode lower(ValueNode base, ValueNode offset) {
+        asImmediate(base);
+        throw JVMCIError.unimplemented();
+    }
+
+    private JavaConstant asImmediate(ValueNode value) {
+        JavaConstant c = value.asJavaConstant();
+        if (c != null && c.getJavaKind().isNumericInteger() && !codeCache.needsDataPatch(c)) {
+            return c;
+        } else {
+            return null;
+        }
+    }
+}
--- /dev/null	Thu Jan 01 00:00:00 1970 +0000
+++ b/graal/com.oracle.graal.compiler.aarch64/src/com/oracle/graal/compiler/aarch64/AArch64ArithmeticLIRGenerator.java	Thu Dec 24 11:43:35 2015 -1000
@@ -0,0 +1,454 @@
+/*
+ * Copyright (c) 2015, 2015, Oracle and/or its affiliates. All rights reserved.
+ * DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER.
+ *
+ * This code is free software; you can redistribute it and/or modify it
+ * under the terms of the GNU General Public License version 2 only, as
+ * published by the Free Software Foundation.
+ *
+ * This code is distributed in the hope that it will be useful, but WITHOUT
+ * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
+ * FITNESS FOR A PARTICULAR PURPOSE.  See the GNU General Public License
+ * version 2 for more details (a copy is included in the LICENSE file that
+ * accompanied this code).
+ *
+ * You should have received a copy of the GNU General Public License version
+ * 2 along with this work; if not, write to the Free Software Foundation,
+ * Inc., 51 Franklin St, Fifth Floor, Boston, MA 02110-1301 USA.
+ *
+ * Please contact Oracle, 500 Oracle Parkway, Redwood Shores, CA 94065 USA
+ * or visit www.oracle.com if you need additional information or have any
+ * questions.
+ */
+
+package com.oracle.graal.compiler.aarch64;
+
+import static com.oracle.graal.lir.LIRValueUtil.asJavaConstant;
+import static com.oracle.graal.lir.LIRValueUtil.isJavaConstant;
+import static jdk.vm.ci.aarch64.AArch64.sp;
+import static jdk.vm.ci.aarch64.AArch64Kind.DWORD;
+import static jdk.vm.ci.aarch64.AArch64Kind.QWORD;
+
+import com.oracle.graal.asm.NumUtil;
+import com.oracle.graal.asm.aarch64.AArch64MacroAssembler;
+import com.oracle.graal.compiler.common.calc.FloatConvert;
+import com.oracle.graal.lir.ConstantValue;
+import com.oracle.graal.lir.LIRFrameState;
+import com.oracle.graal.lir.Variable;
+import com.oracle.graal.lir.aarch64.AArch64AddressValue;
+import com.oracle.graal.lir.aarch64.AArch64ArithmeticLIRGeneratorTool;
+import com.oracle.graal.lir.aarch64.AArch64ArithmeticOp;
+import com.oracle.graal.lir.aarch64.AArch64BitManipulationOp;
+import com.oracle.graal.lir.aarch64.AArch64Move.LoadOp;
+import com.oracle.graal.lir.aarch64.AArch64Move.StoreConstantOp;
+import com.oracle.graal.lir.aarch64.AArch64Move.StoreOp;
+import com.oracle.graal.lir.aarch64.AArch64ReinterpretOp;
+import com.oracle.graal.lir.aarch64.AArch64SignExtendOp;
+import com.oracle.graal.lir.gen.ArithmeticLIRGenerator;
+
+import jdk.vm.ci.aarch64.AArch64Kind;
+import jdk.vm.ci.code.RegisterValue;
+import jdk.vm.ci.common.JVMCIError;
+import jdk.vm.ci.meta.AllocatableValue;
+import jdk.vm.ci.meta.JavaConstant;
+import jdk.vm.ci.meta.LIRKind;
+import jdk.vm.ci.meta.PlatformKind;
+import jdk.vm.ci.meta.Value;
+
+public class AArch64ArithmeticLIRGenerator extends ArithmeticLIRGenerator implements AArch64ArithmeticLIRGeneratorTool {
+
+    @Override
+    public AArch64LIRGenerator getLIRGen() {
+        return (AArch64LIRGenerator) super.getLIRGen();
+    }
+
+    @Override
+    protected boolean isNumericInteger(PlatformKind kind) {
+        return ((AArch64Kind) kind).isInteger();
+    }
+
+    @Override
+    protected Variable emitAdd(LIRKind resultKind, Value a, Value b, boolean setFlags) {
+        if (isNumericInteger(a.getPlatformKind())) {
+            AArch64ArithmeticOp op = setFlags ? AArch64ArithmeticOp.ADDS : AArch64ArithmeticOp.ADD;
+            return emitBinary(resultKind, op, true, a, b);
+        } else {
+            assert !setFlags : "Cannot set flags on floating point arithmetic";
+            return emitBinary(resultKind, AArch64ArithmeticOp.FADD, true, a, b);
+        }
+    }
+
+    @Override
+    protected Variable emitSub(LIRKind resultKind, Value a, Value b, boolean setFlags) {
+        if (isNumericInteger(a.getPlatformKind())) {
+            AArch64ArithmeticOp op = setFlags ? AArch64ArithmeticOp.SUBS : AArch64ArithmeticOp.SUB;
+            return emitBinary(resultKind, op, false, a, b);
+        } else {
+            assert !setFlags : "Cannot set flags on floating point arithmetic";
+            return emitBinary(resultKind, AArch64ArithmeticOp.FSUB, false, a, b);
+        }
+    }
+
+    @Override
+    public Value emitMul(Value a, Value b, boolean setFlags) {
+        // TODO (das) setFlags handling - should be handled higher up. Ask for ideas at mailing list
+        assert !setFlags : "Set flags on multiplication is not supported";
+        return emitBinary(LIRKind.combine(a, b), getOpCode(a, AArch64ArithmeticOp.MUL, AArch64ArithmeticOp.FMUL), true, a, b);
+    }
+
+    @Override
+    public Value emitMulHigh(Value a, Value b) {
+        assert isNumericInteger(a.getPlatformKind());
+        return emitBinary(LIRKind.combine(a, b), AArch64ArithmeticOp.SMULH, true, a, b);
+    }
+
+    @Override
+    public Value emitUMulHigh(Value a, Value b) {
+        assert isNumericInteger(a.getPlatformKind());
+        return emitBinary(LIRKind.combine(a, b), AArch64ArithmeticOp.UMULH, true, a, b);
+    }
+
+    @Override
+    public Value emitDiv(Value a, Value b, LIRFrameState state) {
+        return emitBinary(LIRKind.combine(a, b), getOpCode(a, AArch64ArithmeticOp.DIV, AArch64ArithmeticOp.FDIV), false, getLIRGen().asAllocatable(a), getLIRGen().asAllocatable(b));
+    }
+
+    @Override
+    public Value emitRem(Value a, Value b, LIRFrameState state) {
+        return emitBinary(LIRKind.combine(a, b), getOpCode(a, AArch64ArithmeticOp.REM, AArch64ArithmeticOp.FREM), false, getLIRGen().asAllocatable(a), getLIRGen().asAllocatable(b));
+    }
+
+    @Override
+    public Value emitUDiv(Value a, Value b, LIRFrameState state) {
+        assert isNumericInteger(a.getPlatformKind());
+        return emitBinary(LIRKind.combine(a, b), AArch64ArithmeticOp.UDIV, false, getLIRGen().asAllocatable(a), getLIRGen().asAllocatable(b));
+    }
+
+    @Override
+    public Value emitURem(Value a, Value b, LIRFrameState state) {
+        assert isNumericInteger(a.getPlatformKind());
+        return emitBinary(LIRKind.combine(a, b), AArch64ArithmeticOp.UREM, false, getLIRGen().asAllocatable(a), getLIRGen().asAllocatable(b));
+    }
+
+    @Override
+    public Value emitAnd(Value a, Value b) {
+        assert isNumericInteger(a.getPlatformKind());
+        return emitBinary(LIRKind.combine(a, b), AArch64ArithmeticOp.AND, true, a, b);
+    }
+
+    @Override
+    public Value emitOr(Value a, Value b) {
+        assert isNumericInteger(a.getPlatformKind());
+        return emitBinary(LIRKind.combine(a, b), AArch64ArithmeticOp.OR, true, a, b);
+    }
+
+    @Override
+    public Value emitXor(Value a, Value b) {
+        assert isNumericInteger(a.getPlatformKind());
+        return emitBinary(LIRKind.combine(a, b), AArch64ArithmeticOp.XOR, true, a, b);
+    }
+
+    @Override
+    public Value emitShl(Value a, Value b) {
+        assert isNumericInteger(a.getPlatformKind());
+        return emitBinary(LIRKind.combine(a, b), AArch64ArithmeticOp.SHL, false, a, b);
+    }
+
+    @Override
+    public Value emitShr(Value a, Value b) {
+        assert isNumericInteger(a.getPlatformKind());
+        return emitBinary(LIRKind.combine(a, b), AArch64ArithmeticOp.ASHR, false, a, b);
+    }
+
+    @Override
+    public Value emitUShr(Value a, Value b) {
+        assert isNumericInteger(a.getPlatformKind());
+        return emitBinary(LIRKind.combine(a, b), AArch64ArithmeticOp.LSHR, false, a, b);
+    }
+
+    @Override
+    public Value emitFloatConvert(FloatConvert op, Value inputVal) {
+        PlatformKind resultPlatformKind = getFloatConvertResultKind(op);
+        LIRKind resultLirKind = LIRKind.combine(inputVal).changeType(resultPlatformKind);
+        Variable result = getLIRGen().newVariable(resultLirKind);
+        getLIRGen().append(new AArch64FloatConvertOp(op, result, getLIRGen().asAllocatable(inputVal)));
+        return result;
+    }
+
+    private static PlatformKind getFloatConvertResultKind(FloatConvert op) {
+        switch (op) {
+            case F2I:
+            case D2I:
+                return AArch64Kind.DWORD;
+            case F2L:
+            case D2L:
+                return AArch64Kind.QWORD;
+            case I2F:
+            case L2F:
+            case D2F:
+                return AArch64Kind.SINGLE;
+            case I2D:
+            case L2D:
+            case F2D:
+                return AArch64Kind.DOUBLE;
+            default:
+                throw JVMCIError.shouldNotReachHere();
+        }
+    }
+
+    @Override
+    public Value emitReinterpret(LIRKind to, Value inputVal) {
+        LIRKind from = inputVal.getLIRKind();
+        if (to.equals(from)) {
+            return inputVal;
+        }
+        Variable result = getLIRGen().newVariable(to);
+        getLIRGen().append(new AArch64ReinterpretOp(result, getLIRGen().asAllocatable(inputVal)));
+        return result;
+    }
+
+    @Override
+    public Value emitNarrow(Value inputVal, int bits) {
+        assert inputVal.getPlatformKind() == AArch64Kind.QWORD && bits == 32 : "Can only convert from long to int";
+        LIRKind resultKind = getResultLirKind(bits, inputVal);
+        long mask = NumUtil.getNbitNumberLong(bits);
+        Value maskValue = new ConstantValue(resultKind, JavaConstant.forLong(mask));
+        return emitBinary(resultKind, AArch64ArithmeticOp.AND, true, inputVal, maskValue);
+    }
+
+    @Override
+    public Value emitZeroExtend(Value inputVal, int fromBits, int toBits) {
+        assert fromBits <= toBits && (toBits == 32 || toBits == 64);
+        assert isNumericInteger(inputVal.getPlatformKind());
+        if (fromBits == toBits) {
+            return inputVal;
+        }
+        LIRKind resultKind = getResultLirKind(toBits, inputVal);
+        long mask = NumUtil.getNbitNumberLong(fromBits);
+        Value maskValue = new ConstantValue(resultKind, JavaConstant.forLong(mask));
+        return emitBinary(resultKind, AArch64ArithmeticOp.AND, true, inputVal, maskValue);
+    }
+
+    @Override
+    public Value emitSignExtend(Value inputVal, int fromBits, int toBits) {
+        assert fromBits <= toBits && (toBits == 32 || toBits == 64);
+        if (fromBits == toBits) {
+            return inputVal;
+        }
+        LIRKind resultKind = getResultLirKind(toBits, inputVal);
+        Variable result = getLIRGen().newVariable(resultKind);
+        getLIRGen().append(new AArch64SignExtendOp(result, getLIRGen().asAllocatable(inputVal)));
+        return result;
+    }
+
+    private static LIRKind getResultLirKind(int resultBitSize, Value... inputValues) {
+        if (resultBitSize == 64) {
+            return LIRKind.combine(inputValues).changeType(QWORD);
+        } else {
+            assert resultBitSize == 32;
+            return LIRKind.combine(inputValues).changeType(DWORD);
+        }
+    }
+
+    protected Variable emitBinary(LIRKind resultKind, AArch64ArithmeticOp op, boolean commutative, Value a, Value b) {
+        Variable result = getLIRGen().newVariable(resultKind);
+        if (isValidBinaryConstant(op, b)) {
+            emitBinaryConst(result, op, getLIRGen().asAllocatable(a), asJavaConstant(b));
+        } else if (commutative && isValidBinaryConstant(op, a)) {
+            emitBinaryConst(result, op, getLIRGen().asAllocatable(b), asJavaConstant(a));
+        } else {
+            emitBinaryVar(result, op, getLIRGen().asAllocatable(a), getLIRGen().asAllocatable(b));
+        }
+        return result;
+    }
+
+    private void emitBinaryVar(Variable result, AArch64ArithmeticOp op, AllocatableValue a, AllocatableValue b) {
+        AllocatableValue x = moveSp(a);
+        AllocatableValue y = moveSp(b);
+        switch (op) {
+            case FREM:
+            case REM:
+            case UREM:
+                getLIRGen().append(new AArch64ArithmeticOp.BinaryCompositeOp(op, result, x, y));
+                break;
+            default:
+                getLIRGen().append(new AArch64ArithmeticOp.BinaryOp(op, result, x, y));
+                break;
+        }
+    }
+
+    private void emitBinaryConst(Variable result, AArch64ArithmeticOp op, AllocatableValue a, JavaConstant b) {
+        AllocatableValue x = moveSp(a);
+        getLIRGen().append(new AArch64ArithmeticOp.BinaryConstOp(op, result, x, b));
+    }
+
+    private static boolean isValidBinaryConstant(AArch64ArithmeticOp op, Value val) {
+        if (!isJavaConstant(val)) {
+            return false;
+        }
+        JavaConstant constValue = asJavaConstant(val);
+        switch (op.category) {
+            case LOGICAL:
+                return isLogicalConstant(constValue);
+            case ARITHMETIC:
+                return isArithmeticConstant(constValue);
+            case SHIFT:
+                assert constValue.asLong() >= 0 && constValue.asLong() < val.getPlatformKind().getSizeInBytes() * Byte.SIZE;
+                return true;
+            case NONE:
+                return false;
+            default:
+                throw JVMCIError.shouldNotReachHere();
+        }
+    }
+
+    private static boolean isLogicalConstant(JavaConstant constValue) {
+        switch (constValue.getJavaKind()) {
+            case Int:
+                return AArch64MacroAssembler.isLogicalImmediate(constValue.asInt());
+            case Long:
+                return AArch64MacroAssembler.isLogicalImmediate(constValue.asLong());
+            default:
+                return false;
+        }
+    }
+
+    protected static boolean isArithmeticConstant(JavaConstant constValue) {
+        switch (constValue.getJavaKind()) {
+            case Int:
+            case Long:
+                return AArch64MacroAssembler.isArithmeticImmediate(constValue.asLong());
+            case Object:
+                return constValue.isNull();
+            default:
+                return false;
+        }
+    }
+
+    @Override
+    public Value emitNegate(Value inputVal) {
+        return emitUnary(getOpCode(inputVal, AArch64ArithmeticOp.NEG, AArch64ArithmeticOp.FNEG), inputVal);
+    }
+
+    @Override
+    public Value emitNot(Value input) {
+        assert isNumericInteger(input.getPlatformKind());
+        return emitUnary(AArch64ArithmeticOp.NOT, input);
+    }
+
+    @Override
+    public Value emitMathAbs(Value input) {
+        return emitUnary(getOpCode(input, AArch64ArithmeticOp.ABS, AArch64ArithmeticOp.FABS), input);
+    }
+
+    @Override
+    public Value emitMathSqrt(Value input) {
+        assert input.getPlatformKind() == AArch64Kind.DOUBLE;
+        return emitUnary(AArch64ArithmeticOp.SQRT, input);
+    }
+
+    @Override
+    public Value emitBitScanForward(Value inputVal) {
+        return emitBitManipulation(AArch64BitManipulationOp.BitManipulationOpCode.BSF, inputVal);
+    }
+
+    @Override
+    public Value emitBitCount(Value operand) {
+        throw JVMCIError.unimplemented("AArch64 ISA does not offer way to implement this more efficiently " + "than a simple Java algorithm.");
+    }
+
+    @Override
+    public Value emitBitScanReverse(Value inputVal) {
+        // TODO (das) old implementation said to use emitCountLeadingZeros instead - need extra node
+        // for that though
+        return emitBitManipulation(AArch64BitManipulationOp.BitManipulationOpCode.BSR, inputVal);
+    }
+
+    private Variable emitBitManipulation(AArch64BitManipulationOp.BitManipulationOpCode op, Value inputVal) {
+        assert isNumericInteger(inputVal.getPlatformKind());
+        AllocatableValue input = getLIRGen().asAllocatable(inputVal);
+        Variable result = getLIRGen().newVariable(LIRKind.combine(input));
+        getLIRGen().append(new AArch64BitManipulationOp(op, result, input));
+        return result;
+    }
+
+    private Variable emitUnary(AArch64ArithmeticOp op, Value inputVal) {
+        AllocatableValue input = getLIRGen().asAllocatable(inputVal);
+        Variable result = getLIRGen().newVariable(LIRKind.combine(input));
+        getLIRGen().append(new AArch64ArithmeticOp.UnaryOp(op, result, input));
+        return result;
+    }
+
+    /**
+     * If val denotes the stackpointer, move it to another location. This is necessary since most
+     * ops cannot handle the stackpointer as input or output.
+     */
+    private AllocatableValue moveSp(AllocatableValue val) {
+        if (val instanceof RegisterValue && ((RegisterValue) val).getRegister().equals(sp)) {
+            assert val.getPlatformKind() == AArch64Kind.QWORD : "Stackpointer must be long";
+            return getLIRGen().emitMove(val);
+        }
+        return val;
+    }
+
+    /**
+     * Returns the opcode depending on the platform kind of val.
+     */
+    private AArch64ArithmeticOp getOpCode(Value val, AArch64ArithmeticOp intOp, AArch64ArithmeticOp floatOp) {
+        return isNumericInteger(val.getPlatformKind()) ? intOp : floatOp;
+    }
+
+    @Override
+    public Variable emitLoad(LIRKind kind, Value address, LIRFrameState state) {
+        AArch64AddressValue loadAddress = getLIRGen().asAddressValue(address);
+        Variable result = getLIRGen().newVariable(getLIRGen().toRegisterKind(kind));
+        getLIRGen().append(new LoadOp((AArch64Kind) kind.getPlatformKind(), result, loadAddress, state));
+        return result;
+    }
+
+    @Override
+    public void emitStore(LIRKind lirKind, Value address, Value inputVal, LIRFrameState state) {
+        AArch64AddressValue storeAddress = getLIRGen().asAddressValue(address);
+        AArch64Kind kind = (AArch64Kind) lirKind.getPlatformKind();
+
+        if (isJavaConstant(inputVal) && kind.isInteger()) {
+            JavaConstant c = asJavaConstant(inputVal);
+            if (c.isDefaultForKind()) {
+                // We can load 0 directly into integer registers
+                getLIRGen().append(new StoreConstantOp(kind, storeAddress, c, state));
+                return;
+            }
+        }
+        AllocatableValue input = getLIRGen().asAllocatable(inputVal);
+        getLIRGen().append(new StoreOp(kind, storeAddress, input, state));
+    }
+
+    public Value emitMathLog(Value input, boolean base10) {
+        throw JVMCIError.unimplemented();
+    }
+
+    public Value emitMathCos(Value input) {
+        throw JVMCIError.unimplemented();
+    }
+
+    public Value emitMathSin(Value input) {
+        throw JVMCIError.unimplemented();
+    }
+
+    public Value emitMathTan(Value input) {
+        throw JVMCIError.unimplemented();
+    }
+
+    public Value emitCountLeadingZeros(Value value) {
+        throw JVMCIError.unimplemented();
+    }
+
+    public Value emitCountTrailingZeros(Value value) {
+        throw JVMCIError.unimplemented();
+    }
+
+    public void emitCompareOp(AArch64Kind cmpKind, Variable left, Value right) {
+        throw JVMCIError.unimplemented();
+    }
+
+}
--- /dev/null	Thu Jan 01 00:00:00 1970 +0000
+++ b/graal/com.oracle.graal.compiler.aarch64/src/com/oracle/graal/compiler/aarch64/AArch64FloatConvertOp.java	Thu Dec 24 11:43:35 2015 -1000
@@ -0,0 +1,81 @@
+/*
+ * Copyright (c) 2013, 2015, Oracle and/or its affiliates. All rights reserved.
+ * DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER.
+ *
+ * This code is free software; you can redistribute it and/or modify it
+ * under the terms of the GNU General Public License version 2 only, as
+ * published by the Free Software Foundation.
+ *
+ * This code is distributed in the hope that it will be useful, but WITHOUT
+ * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
+ * FITNESS FOR A PARTICULAR PURPOSE.  See the GNU General Public License
+ * version 2 for more details (a copy is included in the LICENSE file that
+ * accompanied this code).
+ *
+ * You should have received a copy of the GNU General Public License version
+ * 2 along with this work; if not, write to the Free Software Foundation,
+ * Inc., 51 Franklin St, Fifth Floor, Boston, MA 02110-1301 USA.
+ *
+ * Please contact Oracle, 500 Oracle Parkway, Redwood Shores, CA 94065 USA
+ * or visit www.oracle.com if you need additional information or have any
+ * questions.
+ */
+
+package com.oracle.graal.compiler.aarch64;
+
+import static jdk.vm.ci.code.ValueUtil.asRegister;
+
+import com.oracle.graal.asm.aarch64.AArch64MacroAssembler;
+import com.oracle.graal.compiler.common.calc.FloatConvert;
+import com.oracle.graal.lir.LIRInstructionClass;
+import com.oracle.graal.lir.aarch64.AArch64LIRInstruction;
+import com.oracle.graal.lir.asm.CompilationResultBuilder;
+
+import jdk.vm.ci.code.Register;
+import jdk.vm.ci.common.JVMCIError;
+import jdk.vm.ci.meta.AllocatableValue;
+
+public final class AArch64FloatConvertOp extends AArch64LIRInstruction {
+    private static final LIRInstructionClass<AArch64FloatConvertOp> TYPE = LIRInstructionClass.create(AArch64FloatConvertOp.class);
+
+    private final FloatConvert op;
+    @Def protected AllocatableValue resultValue;
+    @Use protected AllocatableValue inputValue;
+
+    protected AArch64FloatConvertOp(FloatConvert op, AllocatableValue resultValue, AllocatableValue inputValue) {
+        super(TYPE);
+        this.op = op;
+        this.resultValue = resultValue;
+        this.inputValue = inputValue;
+    }
+
+    @Override
+    public void emitCode(CompilationResultBuilder crb, AArch64MacroAssembler masm) {
+        int fromSize = inputValue.getPlatformKind().getSizeInBytes() * Byte.SIZE;
+        int toSize = resultValue.getPlatformKind().getSizeInBytes() * Byte.SIZE;
+
+        Register result = asRegister(resultValue);
+        Register input = asRegister(inputValue);
+        switch (op) {
+            case F2I:
+            case D2I:
+            case F2L:
+            case D2L:
+                masm.fcvtzs(toSize, fromSize, result, input);
+                break;
+            case I2F:
+            case I2D:
+            case L2F:
+            case L2D:
+                masm.scvtf(toSize, fromSize, result, input);
+                break;
+            case D2F:
+            case F2D:
+                masm.fcvt(fromSize, result, input);
+                break;
+            default:
+                throw JVMCIError.shouldNotReachHere();
+        }
+    }
+
+}
--- /dev/null	Thu Jan 01 00:00:00 1970 +0000
+++ b/graal/com.oracle.graal.compiler.aarch64/src/com/oracle/graal/compiler/aarch64/AArch64LIRGenerator.java	Thu Dec 24 11:43:35 2015 -1000
@@ -0,0 +1,538 @@
+/*
+ * Copyright (c) 2013, Oracle and/or its affiliates. All rights reserved.
+ * DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER.
+ *
+ * This code is free software; you can redistribute it and/or modify it
+ * under the terms of the GNU General Public License version 2 only, as
+ * published by the Free Software Foundation.
+ *
+ * This code is distributed in the hope that it will be useful, but WITHOUT
+ * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
+ * FITNESS FOR A PARTICULAR PURPOSE.  See the GNU General Public License
+ * version 2 for more details (a copy is included in the LICENSE file that
+ * accompanied this code).
+ *
+ * You should have received a copy of the GNU General Public License version
+ * 2 along with this work; if not, write to the Free Software Foundation,
+ * Inc., 51 Franklin St, Fifth Floor, Boston, MA 02110-1301 USA.
+ *
+ * Please contact Oracle, 500 Oracle Parkway, Redwood Shores, CA 94065 USA
+ * or visit www.oracle.com if you need additional information or have any
+ * questions.
+ */
+package com.oracle.graal.compiler.aarch64;
+
+import static com.oracle.graal.lir.LIRValueUtil.asJavaConstant;
+import static com.oracle.graal.lir.LIRValueUtil.isJavaConstant;
+import static com.oracle.graal.lir.LIRValueUtil.isStackSlotValue;
+import static jdk.vm.ci.code.ValueUtil.asAllocatableValue;
+import static jdk.vm.ci.code.ValueUtil.isStackSlot;
+
+import com.oracle.graal.asm.NumUtil;
+import com.oracle.graal.asm.aarch64.AArch64Address;
+import com.oracle.graal.asm.aarch64.AArch64Address.AddressingMode;
+import com.oracle.graal.asm.aarch64.AArch64Assembler;
+import com.oracle.graal.asm.aarch64.AArch64Assembler.ConditionFlag;
+import com.oracle.graal.compiler.common.calc.Condition;
+import com.oracle.graal.compiler.common.spi.ForeignCallLinkage;
+import com.oracle.graal.compiler.common.spi.LIRKindTool;
+import com.oracle.graal.lir.ConstantValue;
+import com.oracle.graal.lir.LIRFrameState;
+import com.oracle.graal.lir.LIRValueUtil;
+import com.oracle.graal.lir.LabelRef;
+import com.oracle.graal.lir.StandardOp;
+import com.oracle.graal.lir.SwitchStrategy;
+import com.oracle.graal.lir.Variable;
+import com.oracle.graal.lir.aarch64.AArch64AddressValue;
+import com.oracle.graal.lir.aarch64.AArch64ArithmeticOp;
+import com.oracle.graal.lir.aarch64.AArch64Call;
+import com.oracle.graal.lir.aarch64.AArch64Compare;
+import com.oracle.graal.lir.aarch64.AArch64ControlFlow;
+import com.oracle.graal.lir.aarch64.AArch64ControlFlow.BranchOp;
+import com.oracle.graal.lir.aarch64.AArch64ControlFlow.CondMoveOp;
+import com.oracle.graal.lir.aarch64.AArch64LIRInstruction;
+import com.oracle.graal.lir.aarch64.AArch64Move;
+import com.oracle.graal.lir.aarch64.AArch64Move.CompareAndSwap;
+import com.oracle.graal.lir.aarch64.AArch64Move.MembarOp;
+import com.oracle.graal.lir.aarch64.AArch64PauseOp;
+import com.oracle.graal.lir.gen.LIRGenerationResult;
+import com.oracle.graal.lir.gen.LIRGenerator;
+import com.oracle.graal.phases.util.Providers;
+
+import jdk.vm.ci.aarch64.AArch64Kind;
+import jdk.vm.ci.amd64.AMD64Kind;
+import jdk.vm.ci.code.CallingConvention;
+import jdk.vm.ci.code.RegisterValue;
+import jdk.vm.ci.common.JVMCIError;
+import jdk.vm.ci.meta.AllocatableValue;
+import jdk.vm.ci.meta.Constant;
+import jdk.vm.ci.meta.JavaConstant;
+import jdk.vm.ci.meta.JavaKind;
+import jdk.vm.ci.meta.LIRKind;
+import jdk.vm.ci.meta.PlatformKind;
+import jdk.vm.ci.meta.Value;
+
+public abstract class AArch64LIRGenerator extends LIRGenerator {
+
+    @SuppressWarnings("unused") private final ConstantTableBaseProvider constantTableBaseProvider;
+
+    public static final class ConstantTableBaseProvider {
+        // private Variable constantTableBase;
+        @SuppressWarnings("unused") private boolean useConstantTableBase = false;
+
+        public Variable getConstantTableBase() {
+            useConstantTableBase = true;
+            // return constantTableBase;
+            return null;
+        }
+    }
+
+    public AArch64LIRGenerator(LIRKindTool lirKindTool, AArch64ArithmeticLIRGenerator arithmeticLIRGen, MoveFactory moveFactory, Providers providers, CallingConvention cc,
+                    LIRGenerationResult lirGenRes, ConstantTableBaseProvider constantTableBaseProvider) {
+        super(lirKindTool, arithmeticLIRGen, moveFactory, providers, cc, lirGenRes);
+        this.constantTableBaseProvider = constantTableBaseProvider;
+    }
+
+    /**
+     * Checks whether the supplied constant can be used without loading it into a register for store
+     * operations, i.e., on the right hand side of a memory access.
+     *
+     * @param c The constant to check.
+     * @return True if the constant can be used directly, false if the constant needs to be in a
+     *         register.
+     */
+    protected static final boolean canStoreConstant(JavaConstant c) {
+        // Our own code never calls this since we can't make a definite statement about whether or
+        // not we can inline a constant without knowing what kind of operation we execute. Let's be
+        // optimistic here and fix up mistakes later.
+        return true;
+    }
+
+    /**
+     * AArch64 cannot use anything smaller than a word in any instruction other than load and store.
+     */
+    @Override
+    public LIRKind toRegisterKind(LIRKind kind) {
+        switch ((AArch64Kind) kind.getPlatformKind()) {
+            case BYTE:
+            case WORD:
+                return kind.changeType(AMD64Kind.DWORD);
+            default:
+                return kind;
+        }
+    }
+
+    @Override
+    public void emitNullCheck(Value address, LIRFrameState state) {
+        append(new AArch64Move.NullCheckOp(asAddressValue(address), state));
+    }
+
+    @Override
+    public Variable emitAddress(AllocatableValue stackslot) {
+        Variable result = newVariable(LIRKind.value(target().arch.getWordKind()));
+        append(new AArch64Move.StackLoadAddressOp(result, stackslot));
+        return result;
+    }
+
+    public AArch64AddressValue asAddressValue(Value address) {
+        if (address instanceof AArch64AddressValue) {
+            return (AArch64AddressValue) address;
+        } else {
+            return new AArch64AddressValue(address.getLIRKind(), asAllocatable(address), Value.ILLEGAL, 0, false, AddressingMode.BASE_REGISTER_ONLY);
+        }
+    }
+
+    @Override
+    public void emitMove(AllocatableValue dst, Value src) {
+        append(createMove(dst, src));
+    }
+
+    @Override
+    public void emitMoveConstant(AllocatableValue dst, Constant src) {
+        append(createMoveConstant(dst, (JavaConstant) src));
+    }
+
+    /**
+     * Moves src to dst.
+     *
+     * If src is AArch64AddressValue the address value is loaded into dst, not the value pointed to
+     * by address. All valid combinations of src and dst values are supported, except StackSlot to
+     * StackSlot.
+     *
+     * @param dst Value stored on stack or in register. Non null.
+     * @param src Arbitrary input value. Non null.
+     * @return AArch64LIRInstruction representing the move. Non null.
+     */
+    protected AArch64LIRInstruction createMove(AllocatableValue dst, Value src) {
+        if (src instanceof AArch64AddressValue) {
+            return new AArch64Move.LoadAddressOp(dst, (AArch64AddressValue) src);
+        } else if (isStackSlot(dst)) {
+            return new AArch64Move.MoveToStackOp(dst, asAllocatable(src));
+        } else {
+            return new AArch64Move.MoveToRegOp(dst, asAllocatable(src));
+        }
+    }
+
+    protected AArch64LIRInstruction createMoveConstant(AllocatableValue dst, JavaConstant src) {
+        if (isStackSlotValue(dst)) {
+            // constant -> stack is not possible so we need a scratch register in between.
+            Variable tmp = newVariable(dst.getLIRKind());
+            append(new AArch64Move.MoveFromConstOp(tmp, src));
+            return new AArch64Move.MoveToStackOp(dst, tmp);
+        } else {
+            return new AArch64Move.MoveFromConstOp(dst, src);
+        }
+    }
+
+    @Override
+    public Variable emitCompareAndSwap(Value address, Value expectedValue, Value newValue, Value trueValue, Value falseValue) {
+        AArch64AddressValue addressValue = convertToBaseRegisterOnlyAddress(asAddressValue(address));
+        Variable result = newVariable(trueValue.getLIRKind());
+        Variable scratch = newVariable(LIRKind.value(AArch64Kind.WORD));
+        append(new CompareAndSwap(result, loadNonCompareConst(expectedValue), loadReg(newValue), addressValue, scratch));
+        return result;
+    }
+
+    /**
+     * Converts an arbitrary address to a BASE_REGISTER_ONLY form. This is useful since several
+     * instructions (e.g. load-acquire/store-release) are limited to this addressing mode.
+     *
+     * @return An address using the
+     *         {@link com.oracle.graal.asm.aarch64.AArch64Address.AddressingMode#BASE_REGISTER_ONLY}
+     *         addressingmode, pointing to the same location as address.
+     */
+    private AArch64AddressValue convertToBaseRegisterOnlyAddress(AArch64AddressValue address) {
+        AllocatableValue base = address.getBase();
+        AllocatableValue index = address.getOffset();
+        int immediate = address.getImmediate();
+        int shiftAmt;
+        if (address.isScaled()) {
+            shiftAmt = NumUtil.log2Ceil(address.getPlatformKind().getSizeInBytes() * Byte.SIZE);
+        } else {
+            shiftAmt = 0;
+        }
+        switch (address.getAddressingMode()) {
+            case IMMEDIATE_SCALED:
+            case IMMEDIATE_UNSCALED:
+                JavaConstant constVal = JavaConstant.forInt(immediate << shiftAmt);
+                ConstantValue constValue = new ConstantValue(LIRKind.value(AArch64Kind.WORD), constVal);
+                base = asAllocatable(getArithmetic().emitAdd(base, constValue, false));
+                break;
+            case REGISTER_OFFSET:
+                append(new AArch64ArithmeticOp.ExtendedAddShiftOp(base, base, index, AArch64Assembler.ExtendType.UXTX, shiftAmt));
+                break;
+            case EXTENDED_REGISTER_OFFSET:
+                append(new AArch64ArithmeticOp.ExtendedAddShiftOp(base, base, index, AArch64Assembler.ExtendType.SXTW, shiftAmt));
+                break;
+            case BASE_REGISTER_ONLY:
+                // nothing to do.
+                break;
+            default:
+                throw JVMCIError.shouldNotReachHere();
+        }
+        return new AArch64AddressValue(address.getLIRKind(), base, Value.ILLEGAL, 0, false, AArch64Address.AddressingMode.BASE_REGISTER_ONLY);
+    }
+
+    @Override
+    public void emitData(AllocatableValue dst, byte[] data) {
+        append(new AArch64Move.LoadDataOp(dst, data));
+    }
+
+    @Override
+    public void emitMembar(int barriers) {
+        int necessaryBarriers = target().arch.requiredBarriers(barriers);
+        if (target().isMP && necessaryBarriers != 0) {
+            append(new MembarOp(necessaryBarriers));
+        }
+    }
+
+    @Override
+    public void emitJump(LabelRef label) {
+        assert label != null;
+        append(new StandardOp.JumpOp(label));
+    }
+
+    @Override
+    public void emitOverflowCheckBranch(LabelRef overflow, LabelRef noOverflow, LIRKind cmpKind, double overflowProbability) {
+        append(new AArch64ControlFlow.BranchOp(AArch64Assembler.ConditionFlag.VS, overflow, noOverflow, overflowProbability));
+    }
+
+    /**
+     * Branches to label if (left & right) == 0. If negated is true branchse on non-zero instead.
+     *
+     * @param left Integer kind. Non null.
+     * @param right Integer kind. Non null.
+     * @param trueDestination destination if left & right == 0. Non null.
+     * @param falseDestination destination if left & right != 0. Non null
+     * @param trueSuccessorProbability hoistoric probability that comparison is true
+     */
+    @Override
+    public void emitIntegerTestBranch(Value left, Value right, LabelRef trueDestination, LabelRef falseDestination, double trueSuccessorProbability) {
+        assert ((AArch64Kind) left.getPlatformKind()).isInteger() && left.getPlatformKind() == right.getPlatformKind();
+        ((AArch64ArithmeticLIRGenerator) getArithmetic()).emitBinary(LIRKind.combine(left, right), AArch64ArithmeticOp.ANDS, true, left, right);
+        append(new AArch64ControlFlow.BranchOp(AArch64Assembler.ConditionFlag.EQ, trueDestination, falseDestination, trueSuccessorProbability));
+    }
+
+    /**
+     * Conditionally move trueValue into new variable if cond + unorderedIsTrue is true, else
+     * falseValue.
+     *
+     * @param left Arbitrary value. Has to have same type as right. Non null.
+     * @param right Arbitrary value. Has to have same type as left. Non null.
+     * @param cond condition that decides whether to move trueValue or falseValue into result. Non
+     *            null.
+     * @param unorderedIsTrue defines whether floating-point comparisons consider unordered true or
+     *            not. Ignored for integer comparisons.
+     * @param trueValue arbitrary value same type as falseValue. Non null.
+     * @param falseValue arbitrary value same type as trueValue. Non null.
+     * @return value containing trueValue if cond + unorderedIsTrue is true, else falseValue. Non
+     *         null.
+     */
+    @Override
+    public Variable emitConditionalMove(PlatformKind cmpKind, Value left, Value right, Condition cond, boolean unorderedIsTrue, Value trueValue, Value falseValue) {
+        assert cmpKind == left.getPlatformKind() && cmpKind == right.getPlatformKind();
+        boolean mirrored = emitCompare(left, right, cond, unorderedIsTrue);
+        Condition finalCondition = mirrored ? cond.mirror() : cond;
+        boolean finalUnorderedIsTrue = mirrored ? !unorderedIsTrue : unorderedIsTrue;
+        ConditionFlag cmpCondition = toConditionFlag(((AArch64Kind) cmpKind).isInteger(), finalCondition, finalUnorderedIsTrue);
+        Variable result = newVariable(trueValue.getLIRKind());
+        append(new CondMoveOp(result, cmpCondition, loadReg(trueValue), loadReg(falseValue)));
+        return result;
+    }
+
+    @Override
+    public void emitCompareBranch(PlatformKind cmpKind, Value left, Value right, Condition cond, boolean unorderedIsTrue, LabelRef trueDestination, LabelRef falseDestination,
+                    double trueDestinationProbability) {
+        assert cmpKind == left.getPlatformKind() && cmpKind == right.getPlatformKind();
+        boolean mirrored = emitCompare(left, right, cond, unorderedIsTrue);
+        Condition finalCondition = mirrored ? cond.mirror() : cond;
+        boolean finalUnorderedIsTrue = mirrored ? !unorderedIsTrue : unorderedIsTrue;
+        ConditionFlag cmpCondition = toConditionFlag(((AArch64Kind) cmpKind).isInteger(), finalCondition, finalUnorderedIsTrue);
+        append(new BranchOp(cmpCondition, trueDestination, falseDestination, trueDestinationProbability));
+    }
+
+    private static AArch64Assembler.ConditionFlag toConditionFlag(boolean isInt, Condition cond, boolean unorderedIsTrue) {
+        return isInt ? toIntConditionFlag(cond) : toFloatConditionFlag(cond, unorderedIsTrue);
+    }
+
+    /**
+     * Takes a Condition and unorderedIsTrue flag and returns the correct Aarch64 specific
+     * ConditionFlag. Note: This is only correct if the emitCompare code for floats has correctly
+     * handled the case of 'EQ && unorderedIsTrue', respectively 'NE && !unorderedIsTrue'!
+     */
+    private static AArch64Assembler.ConditionFlag toFloatConditionFlag(Condition cond, boolean unorderedIsTrue) {
+        switch (cond) {
+            case LT:
+                return unorderedIsTrue ? AArch64Assembler.ConditionFlag.LT : AArch64Assembler.ConditionFlag.LO;
+            case LE:
+                return unorderedIsTrue ? AArch64Assembler.ConditionFlag.LE : AArch64Assembler.ConditionFlag.LS;
+            case GE:
+                return unorderedIsTrue ? AArch64Assembler.ConditionFlag.PL : AArch64Assembler.ConditionFlag.GE;
+            case GT:
+                return unorderedIsTrue ? AArch64Assembler.ConditionFlag.HI : AArch64Assembler.ConditionFlag.GT;
+            case EQ:
+                return AArch64Assembler.ConditionFlag.EQ;
+            case NE:
+                return AArch64Assembler.ConditionFlag.NE;
+            default:
+                throw JVMCIError.shouldNotReachHere();
+        }
+    }
+
+    /**
+     * Takes a Condition and returns the correct Aarch64 specific ConditionFlag.
+     */
+    private static AArch64Assembler.ConditionFlag toIntConditionFlag(Condition cond) {
+        switch (cond) {
+            case EQ:
+                return AArch64Assembler.ConditionFlag.EQ;
+            case NE:
+                return AArch64Assembler.ConditionFlag.NE;
+            case LT:
+                return AArch64Assembler.ConditionFlag.LT;
+            case LE:
+                return AArch64Assembler.ConditionFlag.LE;
+            case GT:
+                return AArch64Assembler.ConditionFlag.GT;
+            case GE:
+                return AArch64Assembler.ConditionFlag.GE;
+            case AE:
+                return AArch64Assembler.ConditionFlag.HS;
+            case BE:
+                return AArch64Assembler.ConditionFlag.LS;
+            case AT:
+                return AArch64Assembler.ConditionFlag.HI;
+            case BT:
+                return AArch64Assembler.ConditionFlag.LO;
+            default:
+                throw JVMCIError.shouldNotReachHere();
+        }
+    }
+
+    /**
+     * Emits a gpCompare instruction, possibly reordering the parameters.
+     *
+     * @param a the left operand of the comparison. Has to have same type as b. Non null.
+     * @param b the right operand of the comparison. Has to have same type as a. Non null.
+     * @return true if mirrored (i.e. "b cmp a" instead of "a cmp b" was done).
+     */
+    private boolean emitCompare(Value a, Value b, Condition condition, boolean unorderedIsTrue) {
+        boolean mirrored;
+        AllocatableValue left;
+        Value right;
+        if (((AArch64Kind) a.getPlatformKind()).isInteger()) {
+            if (LIRValueUtil.isVariable(b) || b instanceof RegisterValue) {
+                left = loadReg(b);
+                right = loadNonConst(a);
+                mirrored = true;
+            } else {
+                left = loadReg(a);
+                right = loadNonConst(b);
+                mirrored = true;
+            }
+            append(new AArch64Compare.CompareOp(left, asAllocatable(right)));
+        } else {
+            if (AArch64Compare.FloatCompareOp.isFloatCmpConstant(a, condition, unorderedIsTrue)) {
+                left = loadReg(b);
+                right = a;
+                mirrored = true;
+            } else if (AArch64Compare.FloatCompareOp.isFloatCmpConstant(b, condition, unorderedIsTrue)) {
+                left = loadReg(a);
+                right = b;
+                mirrored = false;
+            } else {
+                left = loadReg(a);
+                right = loadReg(b);
+                mirrored = false;
+            }
+            append(new AArch64Compare.FloatCompareOp(left, asAllocatable(right), condition, unorderedIsTrue));
+        }
+        return mirrored;
+    }
+
+    /**
+     * Checks whether value can be used directly with a gpCompare instruction. This is <b>not</b>
+     * the same as {@link AArch64ArithmeticLIRGenerator#isArithmeticConstant(JavaConstant)}, because
+     * 0.0 is a valid compare constant for floats, while there are no arithmetic constants for
+     * floats.
+     *
+     * @param value any type. Non null.
+     * @return true if value can be used directly in comparison instruction, false otherwise.
+     */
+    public boolean isCompareConstant(Value value) {
+        if (!isJavaConstant(value)) {
+            return false;
+        }
+        JavaConstant constant = asJavaConstant(value);
+        if (((AArch64Kind) value.getPlatformKind()).isInteger()) {
+            return AArch64ArithmeticLIRGenerator.isArithmeticConstant(constant);
+        } else {
+            return constant.isDefaultForKind();
+        }
+    }
+
+    /**
+     * Moves trueValue into result if (left & right) == 0, else falseValue.
+     *
+     * @param left Integer kind. Non null.
+     * @param right Integer kind. Non null.
+     * @param trueValue Integer kind. Non null.
+     * @param falseValue Integer kind. Non null.
+     * @return virtual register containing trueValue if (left & right) == 0, else falseValue.
+     */
+    @Override
+    public Variable emitIntegerTestMove(Value left, Value right, Value trueValue, Value falseValue) {
+        assert ((AArch64Kind) left.getPlatformKind()).isInteger() && ((AArch64Kind) right.getPlatformKind()).isInteger();
+        assert ((AArch64Kind) trueValue.getPlatformKind()).isInteger() && ((AArch64Kind) falseValue.getPlatformKind()).isInteger();
+        ((AArch64ArithmeticLIRGenerator) getArithmetic()).emitBinary(trueValue.getLIRKind(), AArch64ArithmeticOp.ANDS, true, left, right);
+        Variable result = newVariable(trueValue.getLIRKind());
+        append(new AArch64ControlFlow.CondMoveOp(result, AArch64Assembler.ConditionFlag.EQ, asAllocatableValue(trueValue), asAllocatableValue(falseValue)));
+        return result;
+    }
+
+    @Override
+    protected void emitForeignCallOp(ForeignCallLinkage linkage, Value result, Value[] arguments, Value[] temps, LIRFrameState info) {
+        if (AArch64Call.isNearCall(linkage)) {
+            append(new AArch64Call.DirectNearForeignCallOp(linkage, result, arguments, temps, info));
+        } else {
+            append(new AArch64Call.DirectFarForeignCallOp(linkage, result, arguments, temps, info));
+        }
+    }
+
+    @Override
+    public void emitStrategySwitch(SwitchStrategy strategy, Variable key, LabelRef[] keyTargets, LabelRef defaultTarget) {
+        append(new AArch64ControlFlow.StrategySwitchOp(strategy, keyTargets, defaultTarget, key, newVariable(key.getLIRKind()), AArch64LIRGenerator::toIntConditionFlag));
+    }
+
+    @Override
+    protected void emitTableSwitch(int lowKey, LabelRef defaultTarget, LabelRef[] targets, Value key) {
+        // Make copy of key since the TableSwitch destroys its input.
+        Variable tmp = emitMove(key);
+        Variable scratch = newVariable(LIRKind.value(AArch64Kind.WORD));
+        append(new AArch64ControlFlow.TableSwitchOp(lowKey, defaultTarget, targets, tmp, scratch));
+    }
+
+    @Override
+    public Variable emitByteSwap(Value operand) {
+        // TODO (das) Do not generate until we support vector instructions
+        throw JVMCIError.unimplemented("Do not generate until we support vector instructions");
+    }
+
+    @Override
+    public Variable emitArrayEquals(JavaKind kind, Value array1, Value array2, Value length) {
+        // TODO (das) Do not generate until we support vector instructions
+        throw JVMCIError.unimplemented("Do not generate until we support vector instructions");
+    }
+
+    @Override
+    protected JavaConstant zapValueForKind(PlatformKind kind) {
+        long dead = 0xDEADDEADDEADDEADL;
+        switch ((AArch64Kind) kind) {
+            case BYTE:
+                return JavaConstant.forByte((byte) dead);
+            case WORD:
+                return JavaConstant.forShort((short) dead);
+            case DWORD:
+                return JavaConstant.forInt((int) dead);
+            case QWORD:
+                return JavaConstant.forLong(dead);
+            case SINGLE:
+                return JavaConstant.forFloat(Float.intBitsToFloat((int) dead));
+            case DOUBLE:
+                return JavaConstant.forDouble(Double.longBitsToDouble(dead));
+            default:
+                throw JVMCIError.shouldNotReachHere();
+        }
+    }
+
+    /**
+     * Loads value into virtual register. Contrary to {@link #load(Value)} this handles
+     * RegisterValues (i.e. values corresponding to fixed physical registers) correctly, by not
+     * creating an unnecessary move into a virtual register.
+     *
+     * This avoids generating the following code: mov x0, x19 # x19 is fixed thread register ldr x0,
+     * [x0] instead of: ldr x0, [x19].
+     */
+    protected AllocatableValue loadReg(Value val) {
+        if (!(val instanceof Variable || val instanceof RegisterValue)) {
+            return emitMove(val);
+        }
+        return (AllocatableValue) val;
+    }
+
+    /**
+     * If value is a constant that cannot be used directly with a gpCompare instruction load it into
+     * a register and return the register, otherwise return constant value unchanged.
+     */
+    protected Value loadNonCompareConst(Value value) {
+        if (!isCompareConstant(value)) {
+            return loadReg(value);
+        }
+        return value;
+    }
+
+    @Override
+    public void emitPause() {
+        append(new AArch64PauseOp());
+    }
+}
--- /dev/null	Thu Jan 01 00:00:00 1970 +0000
+++ b/graal/com.oracle.graal.compiler.aarch64/src/com/oracle/graal/compiler/aarch64/AArch64LIRKindTool.java	Thu Dec 24 11:43:35 2015 -1000
@@ -0,0 +1,65 @@
+/*
+ * Copyright (c) 2015, Oracle and/or its affiliates. All rights reserved.
+ * DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER.
+ *
+ * This code is free software; you can redistribute it and/or modify it
+ * under the terms of the GNU General Public License version 2 only, as
+ * published by the Free Software Foundation.
+ *
+ * This code is distributed in the hope that it will be useful, but WITHOUT
+ * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
+ * FITNESS FOR A PARTICULAR PURPOSE.  See the GNU General Public License
+ * version 2 for more details (a copy is included in the LICENSE file that
+ * accompanied this code).
+ *
+ * You should have received a copy of the GNU General Public License version
+ * 2 along with this work; if not, write to the Free Software Foundation,
+ * Inc., 51 Franklin St, Fifth Floor, Boston, MA 02110-1301 USA.
+ *
+ * Please contact Oracle, 500 Oracle Parkway, Redwood Shores, CA 94065 USA
+ * or visit www.oracle.com if you need additional information or have any
+ * questions.
+ */
+package com.oracle.graal.compiler.aarch64;
+
+import com.oracle.graal.compiler.common.spi.LIRKindTool;
+
+import jdk.vm.ci.aarch64.AArch64Kind;
+import jdk.vm.ci.common.JVMCIError;
+import jdk.vm.ci.meta.LIRKind;
+
+public class AArch64LIRKindTool implements LIRKindTool {
+
+    public LIRKind getIntegerKind(int bits) {
+        if (bits <= 8) {
+            return LIRKind.value(AArch64Kind.BYTE);
+        } else if (bits <= 16) {
+            return LIRKind.value(AArch64Kind.WORD);
+        } else if (bits <= 32) {
+            return LIRKind.value(AArch64Kind.DWORD);
+        } else {
+            assert bits <= 64;
+            return LIRKind.value(AArch64Kind.QWORD);
+        }
+    }
+
+    public LIRKind getFloatingKind(int bits) {
+        switch (bits) {
+            case 32:
+                return LIRKind.value(AArch64Kind.SINGLE);
+            case 64:
+                return LIRKind.value(AArch64Kind.DOUBLE);
+            default:
+                throw JVMCIError.shouldNotReachHere();
+        }
+    }
+
+    public LIRKind getObjectKind() {
+        return LIRKind.reference(AArch64Kind.QWORD);
+    }
+
+    public LIRKind getWordKind() {
+        return LIRKind.value(AArch64Kind.QWORD);
+    }
+
+}
--- /dev/null	Thu Jan 01 00:00:00 1970 +0000
+++ b/graal/com.oracle.graal.compiler.aarch64/src/com/oracle/graal/compiler/aarch64/AArch64MoveFactory.java	Thu Dec 24 11:43:35 2015 -1000
@@ -0,0 +1,122 @@
+/*
+ * Copyright (c) 2015, Oracle and/or its affiliates. All rights reserved.
+ * DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER.
+ *
+ * This code is free software; you can redistribute it and/or modify it
+ * under the terms of the GNU General Public License version 2 only, as
+ * published by the Free Software Foundation.
+ *
+ * This code is distributed in the hope that it will be useful, but WITHOUT
+ * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
+ * FITNESS FOR A PARTICULAR PURPOSE.  See the GNU General Public License
+ * version 2 for more details (a copy is included in the LICENSE file that
+ * accompanied this code).
+ *
+ * You should have received a copy of the GNU General Public License version
+ * 2 along with this work; if not, write to the Free Software Foundation,
+ * Inc., 51 Franklin St, Fifth Floor, Boston, MA 02110-1301 USA.
+ *
+ * Please contact Oracle, 500 Oracle Parkway, Redwood Shores, CA 94065 USA
+ * or visit www.oracle.com if you need additional information or have any
+ * questions.
+ */
+
+package com.oracle.graal.compiler.aarch64;
+
+import static com.oracle.graal.lir.LIRValueUtil.asConstant;
+import static com.oracle.graal.lir.LIRValueUtil.isConstantValue;
+import static com.oracle.graal.lir.LIRValueUtil.isStackSlotValue;
+
+import com.oracle.graal.compiler.aarch64.AArch64LIRGenerator.ConstantTableBaseProvider;
+import com.oracle.graal.lir.LIRInstruction;
+import com.oracle.graal.lir.aarch64.AArch64AddressValue;
+import com.oracle.graal.lir.aarch64.AArch64Move.LoadAddressOp;
+import com.oracle.graal.lir.gen.LIRGeneratorTool.MoveFactory;
+
+import jdk.vm.ci.code.CodeCacheProvider;
+import jdk.vm.ci.common.JVMCIError;
+import jdk.vm.ci.meta.AllocatableValue;
+import jdk.vm.ci.meta.Constant;
+import jdk.vm.ci.meta.JavaConstant;
+import jdk.vm.ci.meta.Value;
+
+public class AArch64MoveFactory implements MoveFactory {
+
+    private final CodeCacheProvider codeCache;
+    protected final ConstantTableBaseProvider constantTableBaseProvider;
+
+    public AArch64MoveFactory(CodeCacheProvider codeCache, ConstantTableBaseProvider constantTableBaseProvider) {
+        this.codeCache = codeCache;
+        this.constantTableBaseProvider = constantTableBaseProvider;
+    }
+
+    @Override
+    public LIRInstruction createMove(AllocatableValue dst, Value src) {
+        boolean srcIsSlot = isStackSlotValue(src);
+        boolean dstIsSlot = isStackSlotValue(dst);
+        if (isConstantValue(src)) {
+            return createLoad(dst, asConstant(src));
+        } else if (src instanceof AArch64AddressValue) {
+            return new LoadAddressOp(dst, (AArch64AddressValue) src);
+        } else {
+            assert src instanceof AllocatableValue;
+            if (srcIsSlot && dstIsSlot) {
+                throw JVMCIError.shouldNotReachHere(src.getClass() + " " + dst.getClass());
+            } else {
+                // return new Move(dst, (AllocatableValue) src);
+                throw JVMCIError.unimplemented();
+            }
+        }
+    }
+
+    @Override
+    public LIRInstruction createStackMove(AllocatableValue result, AllocatableValue input) {
+        // return new AArch64Move.Move(result, input);
+        throw JVMCIError.unimplemented();
+    }
+
+    @Override
+    public LIRInstruction createLoad(AllocatableValue dst, Constant src) {
+        if (src instanceof JavaConstant) {
+            JavaConstant javaConstant = (JavaConstant) src;
+            if (canInlineConstant(javaConstant)) {
+                // return new AArch64Move.LoadInlineConstant(javaConstant, dst);
+                throw JVMCIError.unimplemented();
+            } else {
+                // return new AArch64Move.LoadConstantFromTable(javaConstant,
+                // constantTableBaseProvider.getConstantTableBase(), dst);
+                throw JVMCIError.unimplemented();
+            }
+        } else {
+            throw JVMCIError.shouldNotReachHere(src.getClass().toString());
+        }
+    }
+
+    @Override
+    public boolean canInlineConstant(JavaConstant c) {
+        switch (c.getJavaKind()) {
+            case Boolean:
+            case Byte:
+            case Char:
+            case Short:
+            case Int:
+                // return SPARCAssembler.isSimm13(c.asInt()) && !codeCache.needsDataPatch(c);
+                boolean x = !codeCache.needsDataPatch(c);
+                throw JVMCIError.unimplemented("needsDataPatch=" + x);
+            case Long:
+                // return SPARCAssembler.isSimm13(c.asLong()) && !codeCache.needsDataPatch(c);
+                boolean y = !codeCache.needsDataPatch(c);
+                throw JVMCIError.unimplemented("needsDataPatch=" + y);
+            case Object:
+                return c.isNull();
+            default:
+                return false;
+        }
+    }
+
+    @Override
+    public boolean allowConstantToStackMove(Constant value) {
+        return false;
+    }
+
+}
--- /dev/null	Thu Jan 01 00:00:00 1970 +0000
+++ b/graal/com.oracle.graal.compiler.aarch64/src/com/oracle/graal/compiler/aarch64/AArch64NodeLIRBuilder.java	Thu Dec 24 11:43:35 2015 -1000
@@ -0,0 +1,75 @@
+/*
+ * Copyright (c) 2015, Oracle and/or its affiliates. All rights reserved.
+ * DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER.
+ *
+ * This code is free software; you can redistribute it and/or modify it
+ * under the terms of the GNU General Public License version 2 only, as
+ * published by the Free Software Foundation.
+ *
+ * This code is distributed in the hope that it will be useful, but WITHOUT
+ * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
+ * FITNESS FOR A PARTICULAR PURPOSE.  See the GNU General Public License
+ * version 2 for more details (a copy is included in the LICENSE file that
+ * accompanied this code).
+ *
+ * You should have received a copy of the GNU General Public License version
+ * 2 along with this work; if not, write to the Free Software Foundation,
+ * Inc., 51 Franklin St, Fifth Floor, Boston, MA 02110-1301 USA.
+ *
+ * Please contact Oracle, 500 Oracle Parkway, Redwood Shores, CA 94065 USA
+ * or visit www.oracle.com if you need additional information or have any
+ * questions.
+ */
+
+package com.oracle.graal.compiler.aarch64;
+
+import com.oracle.graal.compiler.gen.NodeLIRBuilder;
+import com.oracle.graal.lir.aarch64.AArch64BreakpointOp;
+import com.oracle.graal.lir.gen.LIRGeneratorTool;
+import com.oracle.graal.nodes.BreakpointNode;
+import com.oracle.graal.nodes.StructuredGraph;
+import com.oracle.graal.nodes.ValueNode;
+
+import jdk.vm.ci.code.CallingConvention;
+import jdk.vm.ci.meta.JavaType;
+import jdk.vm.ci.meta.Value;
+
+/**
+ * This class implements the SPARC specific portion of the LIR generator.
+ */
+public abstract class AArch64NodeLIRBuilder extends NodeLIRBuilder {
+
+    public AArch64NodeLIRBuilder(StructuredGraph graph, LIRGeneratorTool lirGen, AArch64NodeMatchRules nodeMatchRules) {
+        super(graph, lirGen, nodeMatchRules);
+    }
+
+    @Override
+    protected boolean peephole(ValueNode valueNode) {
+        // No peephole optimizations for now
+        return false;
+    }
+
+    @Override
+    public void visitBreakpointNode(BreakpointNode node) {
+        JavaType[] sig = new JavaType[node.arguments().size()];
+        for (int i = 0; i < sig.length; i++) {
+            sig[i] = node.arguments().get(i).stamp().javaType(gen.getMetaAccess());
+        }
+
+        Value[] parameters = visitInvokeArguments(gen.getResult().getFrameMapBuilder().getRegisterConfig().getCallingConvention(CallingConvention.Type.JavaCall, null, sig, gen.target(), false),
+                        node.arguments());
+        append(new AArch64BreakpointOp(parameters));
+    }
+
+    @Override
+    public AArch64LIRGenerator getLIRGeneratorTool() {
+        return (AArch64LIRGenerator) super.getLIRGeneratorTool();
+    }
+
+    @Override
+    protected void emitPrologue(StructuredGraph graph) {
+        // XXX Maybe we need something like this.
+        // getLIRGeneratorTool().emitLoadConstantTableBase();
+        super.emitPrologue(graph);
+    }
+}
--- /dev/null	Thu Jan 01 00:00:00 1970 +0000
+++ b/graal/com.oracle.graal.compiler.aarch64/src/com/oracle/graal/compiler/aarch64/AArch64NodeMatchRules.java	Thu Dec 24 11:43:35 2015 -1000
@@ -0,0 +1,59 @@
+/*
+ * Copyright (c) 2015, Oracle and/or its affiliates. All rights reserved.
+ * DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER.
+ *
+ * This code is free software; you can redistribute it and/or modify it
+ * under the terms of the GNU General Public License version 2 only, as
+ * published by the Free Software Foundation.
+ *
+ * This code is distributed in the hope that it will be useful, but WITHOUT
+ * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
+ * FITNESS FOR A PARTICULAR PURPOSE.  See the GNU General Public License
+ * version 2 for more details (a copy is included in the LICENSE file that
+ * accompanied this code).
+ *
+ * You should have received a copy of the GNU General Public License version
+ * 2 along with this work; if not, write to the Free Software Foundation,
+ * Inc., 51 Franklin St, Fifth Floor, Boston, MA 02110-1301 USA.
+ *
+ * Please contact Oracle, 500 Oracle Parkway, Redwood Shores, CA 94065 USA
+ * or visit www.oracle.com if you need additional information or have any
+ * questions.
+ */
+
+package com.oracle.graal.compiler.aarch64;
+
+import com.oracle.graal.compiler.gen.NodeMatchRules;
+import com.oracle.graal.lir.LIRFrameState;
+import com.oracle.graal.lir.gen.LIRGeneratorTool;
+import com.oracle.graal.nodes.DeoptimizingNode;
+import com.oracle.graal.nodes.memory.Access;
+
+import jdk.vm.ci.aarch64.AArch64Kind;
+
+public class AArch64NodeMatchRules extends NodeMatchRules {
+
+    public AArch64NodeMatchRules(LIRGeneratorTool gen) {
+        super(gen);
+    }
+
+    protected LIRFrameState getState(Access access) {
+        if (access instanceof DeoptimizingNode) {
+            return state((DeoptimizingNode) access);
+        }
+        return null;
+    }
+
+    protected AArch64Kind getMemoryKind(Access access) {
+        return (AArch64Kind) gen.getLIRKind(access.asNode().stamp()).getPlatformKind();
+    }
+
+    @Override
+    public AArch64LIRGenerator getLIRGeneratorTool() {
+        return (AArch64LIRGenerator) gen;
+    }
+
+    protected AArch64ArithmeticLIRGenerator getArithmeticLIRGenerator() {
+        return (AArch64ArithmeticLIRGenerator) getLIRGeneratorTool().getArithmetic();
+    }
+}
--- /dev/null	Thu Jan 01 00:00:00 1970 +0000
+++ b/graal/com.oracle.graal.compiler.aarch64/src/com/oracle/graal/compiler/aarch64/AArch64SuitesProvider.java	Thu Dec 24 11:43:35 2015 -1000
@@ -0,0 +1,35 @@
+/*
+ * Copyright (c) 2015, Oracle and/or its affiliates. All rights reserved.
+ * DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER.
+ *
+ * This code is free software; you can redistribute it and/or modify it
+ * under the terms of the GNU General Public License version 2 only, as
+ * published by the Free Software Foundation.
+ *
+ * This code is distributed in the hope that it will be useful, but WITHOUT
+ * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
+ * FITNESS FOR A PARTICULAR PURPOSE.  See the GNU General Public License
+ * version 2 for more details (a copy is included in the LICENSE file that
+ * accompanied this code).
+ *
+ * You should have received a copy of the GNU General Public License version
+ * 2 along with this work; if not, write to the Free Software Foundation,
+ * Inc., 51 Franklin St, Fifth Floor, Boston, MA 02110-1301 USA.
+ *
+ * Please contact Oracle, 500 Oracle Parkway, Redwood Shores, CA 94065 USA
+ * or visit www.oracle.com if you need additional information or have any
+ * questions.
+ */
+package com.oracle.graal.compiler.aarch64;
+
+import com.oracle.graal.java.DefaultSuitesProvider;
+import com.oracle.graal.nodes.graphbuilderconf.GraphBuilderConfiguration.Plugins;
+import com.oracle.graal.phases.tiers.CompilerConfiguration;
+
+public class AArch64SuitesProvider extends DefaultSuitesProvider {
+
+    public AArch64SuitesProvider(CompilerConfiguration compilerConfiguration, Plugins plugins) {
+        super(compilerConfiguration, plugins);
+    }
+
+}
--- /dev/null	Thu Jan 01 00:00:00 1970 +0000
+++ b/graal/com.oracle.graal.hotspot.aarch64/src/com/oracle/graal/hotspot/aarch64/AArch64HotSpotBackend.java	Thu Dec 24 11:43:35 2015 -1000
@@ -0,0 +1,352 @@
+/*
+ * Copyright (c) 2013, 2015, Oracle and/or its affiliates. All rights reserved.
+ * DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER.
+ *
+ * This code is free software; you can redistribute it and/or modify it
+ * under the terms of the GNU General Public License version 2 only, as
+ * published by the Free Software Foundation.
+ *
+ * This code is distributed in the hope that it will be useful, but WITHOUT
+ * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
+ * FITNESS FOR A PARTICULAR PURPOSE.  See the GNU General Public License
+ * version 2 for more details (a copy is included in the LICENSE file that
+ * accompanied this code).
+ *
+ * You should have received a copy of the GNU General Public License version
+ * 2 along with this work; if not, write to the Free Software Foundation,
+ * Inc., 51 Franklin St, Fifth Floor, Boston, MA 02110-1301 USA.
+ *
+ * Please contact Oracle, 500 Oracle Parkway, Redwood Shores, CA 94065 USA
+ * or visit www.oracle.com if you need additional information or have any
+ * questions.
+ */
+package com.oracle.graal.hotspot.aarch64;
+
+import static com.oracle.graal.compiler.common.GraalOptions.ZapStackOnMethodEntry;
+import static java.lang.reflect.Modifier.isStatic;
+import static jdk.vm.ci.aarch64.AArch64.lr;
+import static jdk.vm.ci.aarch64.AArch64.r10;
+import static jdk.vm.ci.aarch64.AArch64.sp;
+import static jdk.vm.ci.aarch64.AArch64.zr;
+import static jdk.vm.ci.code.CallingConvention.Type.JavaCallee;
+import static jdk.vm.ci.code.ValueUtil.asRegister;
+import static jdk.vm.ci.hotspot.HotSpotVMConfig.config;
+import static jdk.vm.ci.hotspot.aarch64.AArch64HotSpotRegisterConfig.fp;
+
+import java.lang.reflect.Field;
+import java.util.Set;
+
+import com.oracle.graal.asm.Assembler;
+import com.oracle.graal.asm.Label;
+import com.oracle.graal.asm.aarch64.AArch64Address;
+import com.oracle.graal.asm.aarch64.AArch64Assembler;
+import com.oracle.graal.asm.aarch64.AArch64MacroAssembler;
+import com.oracle.graal.asm.aarch64.AArch64MacroAssembler.ScratchRegister;
+import com.oracle.graal.compiler.aarch64.AArch64NodeMatchRules;
+import com.oracle.graal.compiler.common.alloc.RegisterAllocationConfig;
+import com.oracle.graal.compiler.common.spi.ForeignCallLinkage;
+import com.oracle.graal.hotspot.HotSpotGraalRuntimeProvider;
+import com.oracle.graal.hotspot.HotSpotHostBackend;
+import com.oracle.graal.hotspot.meta.HotSpotForeignCallsProvider;
+import com.oracle.graal.hotspot.meta.HotSpotProviders;
+import com.oracle.graal.hotspot.stubs.Stub;
+import com.oracle.graal.lir.LIR;
+import com.oracle.graal.lir.aarch64.AArch64Call;
+import com.oracle.graal.lir.aarch64.AArch64FrameMap;
+import com.oracle.graal.lir.aarch64.AArch64FrameMapBuilder;
+import com.oracle.graal.lir.asm.CompilationResultBuilder;
+import com.oracle.graal.lir.asm.CompilationResultBuilderFactory;
+import com.oracle.graal.lir.asm.FrameContext;
+import com.oracle.graal.lir.framemap.FrameMap;
+import com.oracle.graal.lir.framemap.FrameMapBuilder;
+import com.oracle.graal.lir.gen.LIRGenerationResult;
+import com.oracle.graal.lir.gen.LIRGeneratorTool;
+import com.oracle.graal.nodes.StructuredGraph;
+import com.oracle.graal.nodes.spi.NodeLIRBuilderTool;
+
+import jdk.vm.ci.code.CallingConvention;
+import jdk.vm.ci.code.CompilationResult;
+import jdk.vm.ci.code.Register;
+import jdk.vm.ci.code.RegisterConfig;
+import jdk.vm.ci.code.StackSlot;
+import jdk.vm.ci.hotspot.HotSpotVMConfig;
+import jdk.vm.ci.hotspot.aarch64.AArch64HotSpotRegisterConfig;
+import jdk.vm.ci.meta.JavaType;
+import jdk.vm.ci.meta.ResolvedJavaMethod;
+import sun.misc.Unsafe;
+
+/**
+ * HotSpot AArch64 specific backend.
+ */
+public class AArch64HotSpotBackend extends HotSpotHostBackend {
+
+    public AArch64HotSpotBackend(HotSpotVMConfig config, HotSpotGraalRuntimeProvider runtime, HotSpotProviders providers) {
+        super(config, runtime, providers);
+    }
+
+    @Override
+    public FrameMapBuilder newFrameMapBuilder(RegisterConfig registerConfig) {
+        RegisterConfig registerConfigNonNull = registerConfig == null ? getCodeCache().getRegisterConfig() : registerConfig;
+        return new AArch64FrameMapBuilder(newFrameMap(registerConfigNonNull), getCodeCache(), registerConfigNonNull);
+    }
+
+    @Override
+    public FrameMap newFrameMap(RegisterConfig registerConfig) {
+        return new AArch64FrameMap(getCodeCache(), registerConfig, this);
+    }
+
+    @Override
+    public LIRGeneratorTool newLIRGenerator(CallingConvention cc, LIRGenerationResult lirGenRes) {
+        return new AArch64HotSpotLIRGenerator(getProviders(), config(), cc, lirGenRes);
+    }
+
+    @Override
+    public LIRGenerationResult newLIRGenerationResult(String compilationUnitName, LIR lir, FrameMapBuilder frameMapBuilder, ResolvedJavaMethod method, Object stub) {
+        return new AArch64HotSpotLIRGenerationResult(compilationUnitName, lir, frameMapBuilder, stub);
+    }
+
+    @Override
+    public NodeLIRBuilderTool newNodeLIRBuilder(StructuredGraph graph, LIRGeneratorTool lirGen) {
+        return new AArch64HotSpotNodeLIRBuilder(graph, lirGen, new AArch64NodeMatchRules(lirGen));
+    }
+
+    /**
+     * Emits code to do stack overflow checking.
+     *
+     * @param afterFrameInit specifies if the stack pointer has already been adjusted to allocate
+     *            the current frame
+     */
+    protected static void emitStackOverflowCheck(CompilationResultBuilder crb, int pagesToBang, boolean afterFrameInit) {
+        if (pagesToBang > 0) {
+            AArch64MacroAssembler masm = (AArch64MacroAssembler) crb.asm;
+            int frameSize = crb.frameMap.totalFrameSize();
+            if (frameSize > 0) {
+                int lastFramePage = frameSize / UNSAFE.pageSize();
+                // emit multiple stack bangs for methods with frames larger than a page
+                for (int i = 0; i <= lastFramePage; i++) {
+                    int disp = (i + pagesToBang) * UNSAFE.pageSize();
+                    if (afterFrameInit) {
+                        disp -= frameSize;
+                    }
+                    crb.blockComment("[stack overflow check]");
+                    try (ScratchRegister sc = masm.getScratchRegister()) {
+                        Register scratch = sc.getRegister();
+                        AArch64Address address = masm.makeAddress(sp, -disp, scratch, 8, /* allowOverwrite */false);
+                        masm.str(64, zr, address);
+                    }
+                }
+            }
+        }
+    }
+
+    private class HotSpotFrameContext implements FrameContext {
+        final boolean isStub;
+
+        HotSpotFrameContext(boolean isStub) {
+            this.isStub = isStub;
+        }
+
+        @Override
+        public void enter(CompilationResultBuilder crb) {
+            FrameMap frameMap = crb.frameMap;
+            final int frameSize = frameMap.frameSize();
+            final int totalFrameSize = frameMap.totalFrameSize();
+            assert frameSize + 2 * crb.target.arch.getWordSize() == totalFrameSize : "total framesize should be framesize + 2 words";
+            AArch64MacroAssembler masm = (AArch64MacroAssembler) crb.asm;
+            if (!isStub && pagesToBang > 0) {
+                emitStackOverflowCheck(crb, pagesToBang, false);
+            }
+            crb.blockComment("[method prologue]");
+
+            try (ScratchRegister sc = masm.getScratchRegister()) {
+                Register scratch = sc.getRegister();
+                // save link register and framepointer
+                masm.mov(64, scratch, sp);
+                AArch64Address address = AArch64Address.createPreIndexedImmediateAddress(scratch, -crb.target.arch.getWordSize());
+                masm.str(64, lr, address);
+                masm.str(64, fp, address);
+                // Update framepointer
+                masm.mov(64, fp, scratch);
+
+                if (ZapStackOnMethodEntry.getValue()) {
+                    int intSize = 4;
+                    address = AArch64Address.createPreIndexedImmediateAddress(scratch, -intSize);
+                    try (ScratchRegister sc2 = masm.getScratchRegister()) {
+                        Register value = sc2.getRegister();
+                        masm.mov(value, 0xC1C1C1C1);
+                        for (int i = 0; i < frameSize; i += intSize) {
+                            masm.str(32, value, address);
+                        }
+                    }
+                    masm.mov(64, sp, scratch);
+                } else {
+                    if (AArch64MacroAssembler.isArithmeticImmediate(totalFrameSize)) {
+                        masm.sub(64, sp, scratch, frameSize);
+                    } else {
+                        try (ScratchRegister sc2 = masm.getScratchRegister()) {
+                            Register scratch2 = sc2.getRegister();
+                            masm.mov(scratch2, frameSize);
+                            masm.sub(64, sp, scratch, scratch2);
+                        }
+                    }
+                }
+            }
+            crb.blockComment("[code body]");
+        }
+
+        @Override
+        public void leave(CompilationResultBuilder crb) {
+            AArch64MacroAssembler masm = (AArch64MacroAssembler) crb.asm;
+            crb.blockComment("[method epilogue]");
+            final int frameSize = crb.frameMap.totalFrameSize();
+            if (AArch64MacroAssembler.isArithmeticImmediate(frameSize)) {
+                masm.add(64, sp, sp, frameSize);
+            } else {
+                try (ScratchRegister sc = masm.getScratchRegister()) {
+                    Register scratch = sc.getRegister();
+                    masm.mov(scratch, frameSize);
+                    masm.add(64, sp, sp, scratch);
+                }
+            }
+            try (ScratchRegister sc = masm.getScratchRegister()) {
+                Register scratch = sc.getRegister();
+                // restore link register and framepointer
+                masm.mov(64, scratch, sp);
+                AArch64Address address = AArch64Address.createPostIndexedImmediateAddress(scratch, crb.target.arch.getWordSize());
+                masm.ldr(64, fp, address);
+                masm.ldr(64, lr, address);
+                masm.mov(64, sp, scratch);
+            }
+        }
+
+        @Override
+        public boolean hasFrame() {
+            return true;
+        }
+    }
+
+    @Override
+    protected Assembler createAssembler(FrameMap frameMap) {
+        return new AArch64MacroAssembler(getTarget());
+    }
+
+    @Override
+    public CompilationResultBuilder newCompilationResultBuilder(LIRGenerationResult lirGenRen, FrameMap frameMap, CompilationResult compilationResult, CompilationResultBuilderFactory factory) {
+        AArch64HotSpotLIRGenerationResult gen = (AArch64HotSpotLIRGenerationResult) lirGenRen;
+        LIR lir = gen.getLIR();
+        assert gen.getDeoptimizationRescueSlot() == null || frameMap.frameNeedsAllocating() : "method that can deoptimize must have a frame";
+
+        Stub stub = gen.getStub();
+        Assembler masm = createAssembler(frameMap);
+        HotSpotFrameContext frameContext = new HotSpotFrameContext(stub != null);
+
+        CompilationResultBuilder crb = new CompilationResultBuilder(getCodeCache(), getForeignCalls(), frameMap, masm, frameContext, compilationResult);
+        crb.setTotalFrameSize(frameMap.frameSize());
+        StackSlot deoptimizationRescueSlot = gen.getDeoptimizationRescueSlot();
+        if (deoptimizationRescueSlot != null && stub == null) {
+            crb.compilationResult.setCustomStackAreaOffset(frameMap.offsetForStackSlot(deoptimizationRescueSlot));
+        }
+
+        if (stub != null) {
+            Set<Register> definedRegisters = gatherDefinedRegisters(lir);
+            updateStub(stub, definedRegisters, gen.getCalleeSaveInfo(), frameMap);
+        }
+        return crb;
+    }
+
+    @Override
+    public void emitCode(CompilationResultBuilder crb, LIR lir, ResolvedJavaMethod installedCodeOwner) {
+        AArch64MacroAssembler masm = (AArch64MacroAssembler) crb.asm;
+        FrameMap frameMap = crb.frameMap;
+        RegisterConfig regConfig = frameMap.getRegisterConfig();
+        HotSpotVMConfig config = config();
+        Label verifiedStub = new Label();
+
+        emitCodePrefix(crb, installedCodeOwner, masm, regConfig, config, verifiedStub);
+        emitCodeBody(crb, lir);
+        emitCodeSuffix(crb, masm, config, frameMap);
+    }
+
+    private void emitCodePrefix(CompilationResultBuilder crb, ResolvedJavaMethod installedCodeOwner, AArch64MacroAssembler masm, RegisterConfig regConfig, HotSpotVMConfig config, Label verifiedStub) {
+        HotSpotProviders providers = getProviders();
+        if (installedCodeOwner != null && !isStatic(installedCodeOwner.getModifiers())) {
+            crb.recordMark(config.MARKID_UNVERIFIED_ENTRY);
+            CallingConvention cc = regConfig.getCallingConvention(JavaCallee, null, new JavaType[]{providers.getMetaAccess().lookupJavaType(Object.class)}, getTarget(), false);
+            // See definition of IC_Klass in c1_LIRAssembler_aarch64.cpp
+            // equal to scratch(1) careful!
+            Register inlineCacheKlass = AArch64HotSpotRegisterConfig.inlineCacheRegister;
+            Register receiver = asRegister(cc.getArgument(0));
+            int transferSize = config.useCompressedClassPointers ? 4 : 8;
+            AArch64Address klassAddress = masm.makeAddress(receiver, config.hubOffset, transferSize);
+
+            // Are r10 and r11 available scratch registers here? One would hope so.
+            Register klass = r10;
+            if (config.useCompressedClassPointers) {
+                masm.ldr(32, klass, klassAddress);
+                AArch64HotSpotMove.decodeKlassPointer(masm, klass, klass, providers.getRegisters().getHeapBaseRegister(), config.getKlassEncoding());
+            } else {
+                masm.ldr(64, klass, klassAddress);
+            }
+            masm.cmp(64, inlineCacheKlass, klass);
+            // conditional jumps have a much lower range than unconditional ones, which can be a
+            // problem because
+            // the miss handler could be out of range.
+            masm.branchConditionally(AArch64Assembler.ConditionFlag.EQ, verifiedStub);
+            AArch64Call.directJmp(crb, masm, getForeignCalls().lookupForeignCall(IC_MISS_HANDLER));
+        }
+        masm.align(config.codeEntryAlignment);
+        crb.recordMark(config.MARKID_OSR_ENTRY);
+        masm.bind(verifiedStub);
+        crb.recordMark(config.MARKID_VERIFIED_ENTRY);
+    }
+
+    private static void emitCodeBody(CompilationResultBuilder crb, LIR lir) {
+        crb.emit(lir);
+    }
+
+    private void emitCodeSuffix(CompilationResultBuilder crb, AArch64MacroAssembler masm, HotSpotVMConfig config, FrameMap frameMap) {
+        HotSpotProviders providers = getProviders();
+        HotSpotFrameContext frameContext = (HotSpotFrameContext) crb.frameContext;
+        if (!frameContext.isStub) {
+            try (ScratchRegister sc = masm.getScratchRegister()) {
+                Register scratch = sc.getRegister();
+                HotSpotForeignCallsProvider foreignCalls = providers.getForeignCalls();
+                crb.recordMark(config.MARKID_EXCEPTION_HANDLER_ENTRY);
+                ForeignCallLinkage linkage = foreignCalls.lookupForeignCall(EXCEPTION_HANDLER);
+                Register helper = AArch64Call.isNearCall(linkage) ? null : scratch;
+                AArch64Call.directCall(crb, masm, linkage, helper, null);
+
+                crb.recordMark(config.MARKID_DEOPT_HANDLER_ENTRY);
+                linkage = foreignCalls.lookupForeignCall(DEOPTIMIZATION_HANDLER);
+                helper = AArch64Call.isNearCall(linkage) ? null : scratch;
+                AArch64Call.directCall(crb, masm, linkage, helper, null);
+            }
+        } else {
+            // No need to emit the stubs for entries back into the method since
+            // it has no calls that can cause such "return" entries
+            assert !frameMap.accessesCallerFrame();
+        }
+    }
+
+    @Override
+    public RegisterAllocationConfig newRegisterAllocationConfig(RegisterConfig registerConfig) {
+        RegisterConfig registerConfigNonNull = registerConfig == null ? getCodeCache().getRegisterConfig() : registerConfig;
+        return new AArch64HotSpotRegisterAllocationConfig(registerConfigNonNull);
+    }
+
+    private static final Unsafe UNSAFE = initUnsafe();
+
+    private static Unsafe initUnsafe() {
+        try {
+            return Unsafe.getUnsafe();
+        } catch (SecurityException se) {
+            try {
+                Field theUnsafe = Unsafe.class.getDeclaredField("theUnsafe");
+                theUnsafe.setAccessible(true);
+                return (Unsafe) theUnsafe.get(Unsafe.class);
+            } catch (Exception e) {
+                throw new RuntimeException("exception while trying to get Unsafe", e);
+            }
+        }
+    }
+}
--- /dev/null	Thu Jan 01 00:00:00 1970 +0000
+++ b/graal/com.oracle.graal.hotspot.aarch64/src/com/oracle/graal/hotspot/aarch64/AArch64HotSpotBackendFactory.java	Thu Dec 24 11:43:35 2015 -1000
@@ -0,0 +1,190 @@
+/*
+ * Copyright (c) 2013, 2015, Oracle and/or its affiliates. All rights reserved.
+ * DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER.
+ *
+ * This code is free software; you can redistribute it and/or modify it
+ * under the terms of the GNU General Public License version 2 only, as
+ * published by the Free Software Foundation.
+ *
+ * This code is distributed in the hope that it will be useful, but WITHOUT
+ * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
+ * FITNESS FOR A PARTICULAR PURPOSE.  See the GNU General Public License
+ * version 2 for more details (a copy is included in the LICENSE file that
+ * accompanied this code).
+ *
+ * You should have received a copy of the GNU General Public License version
+ * 2 along with this work; if not, write to the Free Software Foundation,
+ * Inc., 51 Franklin St, Fifth Floor, Boston, MA 02110-1301 USA.
+ *
+ * Please contact Oracle, 500 Oracle Parkway, Redwood Shores, CA 94065 USA
+ * or visit www.oracle.com if you need additional information or have any
+ * questions.
+ */
+package com.oracle.graal.hotspot.aarch64;
+
+import static jdk.vm.ci.aarch64.AArch64.sp;
+import static jdk.vm.ci.inittimer.InitTimer.timer;
+
+import com.oracle.graal.api.replacements.SnippetReflectionProvider;
+import com.oracle.graal.compiler.aarch64.AArch64AddressLowering;
+import com.oracle.graal.compiler.aarch64.AArch64SuitesProvider;
+import com.oracle.graal.hotspot.DefaultHotSpotGraalCompilerFactory;
+import com.oracle.graal.hotspot.HotSpotBackend;
+import com.oracle.graal.hotspot.HotSpotBackendFactory;
+import com.oracle.graal.hotspot.HotSpotGraalRuntimeProvider;
+import com.oracle.graal.hotspot.HotSpotReplacementsImpl;
+import com.oracle.graal.hotspot.meta.HotSpotForeignCallsProvider;
+import com.oracle.graal.hotspot.meta.HotSpotGraalConstantReflectionProvider;
+import com.oracle.graal.hotspot.meta.HotSpotGraphBuilderPlugins;
+import com.oracle.graal.hotspot.meta.HotSpotHostForeignCallsProvider;
+import com.oracle.graal.hotspot.meta.HotSpotLoweringProvider;
+import com.oracle.graal.hotspot.meta.HotSpotProviders;
+import com.oracle.graal.hotspot.meta.HotSpotRegisters;
+import com.oracle.graal.hotspot.meta.HotSpotRegistersProvider;
+import com.oracle.graal.hotspot.meta.HotSpotSnippetReflectionProvider;
+import com.oracle.graal.hotspot.meta.HotSpotStampProvider;
+import com.oracle.graal.hotspot.meta.HotSpotSuitesProvider;
+import com.oracle.graal.hotspot.word.HotSpotWordTypes;
+import com.oracle.graal.nodes.graphbuilderconf.GraphBuilderConfiguration.Plugins;
+import com.oracle.graal.phases.tiers.CompilerConfiguration;
+import com.oracle.graal.phases.util.Providers;
+import com.oracle.graal.replacements.aarch64.AArch64GraphBuilderPlugins;
+import com.oracle.graal.word.WordTypes;
+
+import jdk.vm.ci.aarch64.AArch64;
+import jdk.vm.ci.code.CodeCacheProvider;
+import jdk.vm.ci.code.Register;
+import jdk.vm.ci.code.RegisterConfig;
+import jdk.vm.ci.code.TargetDescription;
+import jdk.vm.ci.hotspot.HotSpotCodeCacheProvider;
+import jdk.vm.ci.hotspot.HotSpotConstantReflectionProvider;
+import jdk.vm.ci.hotspot.HotSpotJVMCIRuntimeProvider;
+import jdk.vm.ci.hotspot.HotSpotMetaAccessProvider;
+import jdk.vm.ci.hotspot.HotSpotVMConfig;
+import jdk.vm.ci.hotspot.aarch64.AArch64HotSpotRegisterConfig;
+import jdk.vm.ci.inittimer.InitTimer;
+import jdk.vm.ci.meta.Value;
+import jdk.vm.ci.runtime.JVMCIBackend;
+import jdk.vm.ci.service.ServiceProvider;
+
+@ServiceProvider(HotSpotBackendFactory.class)
+public class AArch64HotSpotBackendFactory implements HotSpotBackendFactory {
+
+    @Override
+    public void register() {
+        DefaultHotSpotGraalCompilerFactory.registerBackend(AArch64.class, this);
+    }
+
+    @Override
+    @SuppressWarnings("try")
+    public HotSpotBackend createBackend(HotSpotGraalRuntimeProvider graalRuntime, CompilerConfiguration compilerConfiguration, HotSpotJVMCIRuntimeProvider jvmciRuntime, HotSpotBackend host) {
+        assert host == null;
+
+        JVMCIBackend jvmci = jvmciRuntime.getHostJVMCIBackend();
+        HotSpotVMConfig config = jvmciRuntime.getConfig();
+        HotSpotProviders providers;
+        HotSpotRegistersProvider registers;
+        HotSpotCodeCacheProvider codeCache = (HotSpotCodeCacheProvider) jvmci.getCodeCache();
+        TargetDescription target = codeCache.getTarget();
+        HotSpotConstantReflectionProvider constantReflection = new HotSpotGraalConstantReflectionProvider(jvmciRuntime);
+        HotSpotHostForeignCallsProvider foreignCalls;
+        Value[] nativeABICallerSaveRegisters;
+        HotSpotMetaAccessProvider metaAccess = (HotSpotMetaAccessProvider) jvmci.getMetaAccess();
+        HotSpotLoweringProvider lowerer;
+        HotSpotSnippetReflectionProvider snippetReflection;
+        HotSpotReplacementsImpl replacements;
+        HotSpotSuitesProvider suites;
+        HotSpotWordTypes wordTypes;
+        Plugins plugins;
+        try (InitTimer t = timer("create providers")) {
+            try (InitTimer rt = timer("create HotSpotRegisters provider")) {
+                registers = createRegisters();
+            }
+            try (InitTimer rt = timer("create NativeABICallerSaveRegisters")) {
+                nativeABICallerSaveRegisters = createNativeABICallerSaveRegisters(config, codeCache.getRegisterConfig());
+            }
+            try (InitTimer rt = timer("create ForeignCalls provider")) {
+                foreignCalls = createForeignCalls(jvmciRuntime, graalRuntime, metaAccess, codeCache, nativeABICallerSaveRegisters);
+            }
+            try (InitTimer rt = timer("create Lowerer provider")) {
+                lowerer = createLowerer(graalRuntime, metaAccess, foreignCalls, registers, constantReflection, target);
+            }
+            HotSpotStampProvider stampProvider = new HotSpotStampProvider();
+            Providers p = new Providers(metaAccess, codeCache, constantReflection, foreignCalls, lowerer, null, stampProvider);
+
+            try (InitTimer rt = timer("create WordTypes")) {
+                wordTypes = new HotSpotWordTypes(metaAccess, target.wordJavaKind);
+            }
+            try (InitTimer rt = timer("create SnippetReflection provider")) {
+                snippetReflection = createSnippetReflection(graalRuntime, constantReflection, wordTypes);
+            }
+            try (InitTimer rt = timer("create Replacements provider")) {
+                replacements = createReplacements(config, p, snippetReflection);
+            }
+            try (InitTimer rt = timer("create GraphBuilderPhase plugins")) {
+                plugins = createGraphBuilderPlugins(config, constantReflection, foreignCalls, metaAccess, snippetReflection, replacements, wordTypes, stampProvider);
+                replacements.setGraphBuilderPlugins(plugins);
+            }
+            try (InitTimer rt = timer("create Suites provider")) {
+                suites = createSuites(config, graalRuntime, compilerConfiguration, plugins, codeCache);
+            }
+            providers = new HotSpotProviders(metaAccess, codeCache, constantReflection, foreignCalls, lowerer, replacements, suites, registers, snippetReflection, wordTypes, plugins);
+        }
+        try (InitTimer rt = timer("instantiate backend")) {
+            return createBackend(config, graalRuntime, providers);
+        }
+    }
+
+    protected Plugins createGraphBuilderPlugins(HotSpotVMConfig config, HotSpotConstantReflectionProvider constantReflection, HotSpotHostForeignCallsProvider foreignCalls,
+                    HotSpotMetaAccessProvider metaAccess, HotSpotSnippetReflectionProvider snippetReflection, HotSpotReplacementsImpl replacements, HotSpotWordTypes wordTypes,
+                    HotSpotStampProvider stampProvider) {
+        Plugins plugins = HotSpotGraphBuilderPlugins.create(config, wordTypes, metaAccess, constantReflection, snippetReflection, foreignCalls, stampProvider, replacements);
+        AArch64GraphBuilderPlugins.register(plugins, foreignCalls);
+        return plugins;
+    }
+
+    protected AArch64HotSpotBackend createBackend(HotSpotVMConfig config, HotSpotGraalRuntimeProvider runtime, HotSpotProviders providers) {
+        return new AArch64HotSpotBackend(config, runtime, providers);
+    }
+
+    protected HotSpotRegistersProvider createRegisters() {
+        return new HotSpotRegisters(AArch64HotSpotRegisterConfig.threadRegister, AArch64HotSpotRegisterConfig.heapBaseRegister, sp);
+    }
+
+    protected HotSpotReplacementsImpl createReplacements(HotSpotVMConfig config, Providers p, SnippetReflectionProvider snippetReflection) {
+        return new HotSpotReplacementsImpl(p, snippetReflection, config, p.getCodeCache().getTarget());
+    }
+
+    protected HotSpotHostForeignCallsProvider createForeignCalls(HotSpotJVMCIRuntimeProvider jvmciRuntime, HotSpotGraalRuntimeProvider runtime, HotSpotMetaAccessProvider metaAccess,
+                    HotSpotCodeCacheProvider codeCache, Value[] nativeABICallerSaveRegisters) {
+        return new AArch64HotSpotForeignCallsProvider(jvmciRuntime, runtime, metaAccess, codeCache, nativeABICallerSaveRegisters);
+    }
+
+    protected HotSpotSuitesProvider createSuites(HotSpotVMConfig config, HotSpotGraalRuntimeProvider runtime, CompilerConfiguration compilerConfiguration, Plugins plugins, CodeCacheProvider codeCache) {
+        return new HotSpotSuitesProvider(new AArch64SuitesProvider(compilerConfiguration, plugins), config, runtime, new AArch64AddressLowering(codeCache));
+    }
+
+    protected HotSpotSnippetReflectionProvider createSnippetReflection(HotSpotGraalRuntimeProvider runtime, HotSpotConstantReflectionProvider constantReflection, WordTypes wordTypes) {
+        return new HotSpotSnippetReflectionProvider(runtime, constantReflection, wordTypes);
+    }
+
+    protected HotSpotLoweringProvider createLowerer(HotSpotGraalRuntimeProvider runtime, HotSpotMetaAccessProvider metaAccess, HotSpotForeignCallsProvider foreignCalls,
+                    HotSpotRegistersProvider registers, HotSpotConstantReflectionProvider constantReflection, TargetDescription target) {
+        return new AArch64HotSpotLoweringProvider(runtime, metaAccess, foreignCalls, registers, constantReflection, target);
+    }
+
+    protected static Value[] createNativeABICallerSaveRegisters(@SuppressWarnings("unused") HotSpotVMConfig config, RegisterConfig regConfig) {
+        AArch64HotSpotRegisterConfig conf = (AArch64HotSpotRegisterConfig) regConfig;
+        Register[] callerSavedRegisters = conf.getCallerSaveRegisters();
+        Value[] nativeABICallerSaveRegisters = new Value[callerSavedRegisters.length];
+        for (int i = 0; i < callerSavedRegisters.length; i++) {
+            nativeABICallerSaveRegisters[i] = callerSavedRegisters[i].asValue();
+        }
+        return nativeABICallerSaveRegisters;
+    }
+
+    @Override
+    public String toString() {
+        return "AArch64";
+    }
+}
--- /dev/null	Thu Jan 01 00:00:00 1970 +0000
+++ b/graal/com.oracle.graal.hotspot.aarch64/src/com/oracle/graal/hotspot/aarch64/AArch64HotSpotCRuntimeCallEpilogueOp.java	Thu Dec 24 11:43:35 2015 -1000
@@ -0,0 +1,56 @@
+/*
+ * Copyright (c) 2013, 2015, Oracle and/or its affiliates. All rights reserved.
+ * DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER.
+ *
+ * This code is free software; you can redistribute it and/or modify it
+ * under the terms of the GNU General Public License version 2 only, as
+ * published by the Free Software Foundation.
+ *
+ * This code is distributed in the hope that it will be useful, but WITHOUT
+ * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
+ * FITNESS FOR A PARTICULAR PURPOSE.  See the GNU General Public License
+ * version 2 for more details (a copy is included in the LICENSE file that
+ * accompanied this code).
+ *
+ * You should have received a copy of the GNU General Public License version
+ * 2 along with this work; if not, write to the Free Software Foundation,
+ * Inc., 51 Franklin St, Fifth Floor, Boston, MA 02110-1301 USA.
+ *
+ * Please contact Oracle, 500 Oracle Parkway, Redwood Shores, CA 94065 USA
+ * or visit www.oracle.com if you need additional information or have any
+ * questions.
+ */
+package com.oracle.graal.hotspot.aarch64;
+
+import static jdk.vm.ci.aarch64.AArch64.zr;
+
+import com.oracle.graal.asm.aarch64.AArch64MacroAssembler;
+import com.oracle.graal.lir.LIRInstructionClass;
+import com.oracle.graal.lir.Opcode;
+import com.oracle.graal.lir.aarch64.AArch64LIRInstruction;
+import com.oracle.graal.lir.asm.CompilationResultBuilder;
+
+import jdk.vm.ci.code.Register;
+
+@Opcode("CRUNTIME_CALL_EPILOGUE")
+public class AArch64HotSpotCRuntimeCallEpilogueOp extends AArch64LIRInstruction {
+    public static final LIRInstructionClass<AArch64HotSpotCRuntimeCallEpilogueOp> TYPE = LIRInstructionClass.create(AArch64HotSpotCRuntimeCallEpilogueOp.class);
+
+    private final int threadLastJavaSpOffset;
+    private final int threadLastJavaFpOffset;
+    private final Register thread;
+
+    public AArch64HotSpotCRuntimeCallEpilogueOp(int threadLastJavaSpOffset, int threadLastJavaFpOffset, Register thread) {
+        super(TYPE);
+        this.threadLastJavaSpOffset = threadLastJavaSpOffset;
+        this.threadLastJavaFpOffset = threadLastJavaFpOffset;
+        this.thread = thread;
+    }
+
+    @Override
+    public void emitCode(CompilationResultBuilder crb, AArch64MacroAssembler masm) {
+        // reset last Java frame:
+        masm.str(64, zr, masm.makeAddress(thread, threadLastJavaSpOffset, 8));
+        masm.str(64, zr, masm.makeAddress(thread, threadLastJavaFpOffset, 8));
+    }
+}
--- /dev/null	Thu Jan 01 00:00:00 1970 +0000
+++ b/graal/com.oracle.graal.hotspot.aarch64/src/com/oracle/graal/hotspot/aarch64/AArch64HotSpotCRuntimeCallPrologueOp.java	Thu Dec 24 11:43:35 2015 -1000
@@ -0,0 +1,61 @@
+/*
+ * Copyright (c) 2013, 2015, Oracle and/or its affiliates. All rights reserved.
+ * DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER.
+ *
+ * This code is free software; you can redistribute it and/or modify it
+ * under the terms of the GNU General Public License version 2 only, as
+ * published by the Free Software Foundation.
+ *
+ * This code is distributed in the hope that it will be useful, but WITHOUT
+ * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
+ * FITNESS FOR A PARTICULAR PURPOSE.  See the GNU General Public License
+ * version 2 for more details (a copy is included in the LICENSE file that
+ * accompanied this code).
+ *
+ * You should have received a copy of the GNU General Public License version
+ * 2 along with this work; if not, write to the Free Software Foundation,
+ * Inc., 51 Franklin St, Fifth Floor, Boston, MA 02110-1301 USA.
+ *
+ * Please contact Oracle, 500 Oracle Parkway, Redwood Shores, CA 94065 USA
+ * or visit www.oracle.com if you need additional information or have any
+ * questions.
+ */
+package com.oracle.graal.hotspot.aarch64;
+
+import static com.oracle.graal.lir.LIRInstruction.OperandFlag.REG;
+import static jdk.vm.ci.aarch64.AArch64.sp;
+import static jdk.vm.ci.code.ValueUtil.asRegister;
+
+import com.oracle.graal.asm.aarch64.AArch64MacroAssembler;
+import com.oracle.graal.lir.LIRInstructionClass;
+import com.oracle.graal.lir.Opcode;
+import com.oracle.graal.lir.aarch64.AArch64LIRInstruction;
+import com.oracle.graal.lir.asm.CompilationResultBuilder;
+
+import jdk.vm.ci.code.Register;
+import jdk.vm.ci.meta.AllocatableValue;
+
+@Opcode
+public class AArch64HotSpotCRuntimeCallPrologueOp extends AArch64LIRInstruction {
+    public static final LIRInstructionClass<AArch64HotSpotCRuntimeCallPrologueOp> TYPE = LIRInstructionClass.create(AArch64HotSpotCRuntimeCallPrologueOp.class);
+
+    private final int threadLastJavaSpOffset;
+    private final Register thread;
+    @Temp({REG}) protected AllocatableValue spScratch;
+
+    public AArch64HotSpotCRuntimeCallPrologueOp(int threadLastJavaSpOffset, Register thread, AllocatableValue spScratch) {
+        super(TYPE);
+        this.threadLastJavaSpOffset = threadLastJavaSpOffset;
+        this.thread = thread;
+        this.spScratch = spScratch;
+    }
+
+    @Override
+    public void emitCode(CompilationResultBuilder crb, AArch64MacroAssembler masm) {
+        // save last Java frame
+        // We cannot save the SP directly so use a temporary register.
+        Register scratchRegister = asRegister(spScratch);
+        masm.movx(scratchRegister, sp);
+        masm.str(64, scratchRegister, masm.makeAddress(thread, threadLastJavaSpOffset, 8));
+    }
+}
--- /dev/null	Thu Jan 01 00:00:00 1970 +0000
+++ b/graal/com.oracle.graal.hotspot.aarch64/src/com/oracle/graal/hotspot/aarch64/AArch64HotSpotDeoptimizeCallerOp.java	Thu Dec 24 11:43:35 2015 -1000
@@ -0,0 +1,50 @@
+/*
+ * Copyright (c) 2013, Oracle and/or its affiliates. All rights reserved.
+ * DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER.
+ *
+ * This code is free software; you can redistribute it and/or modify it
+ * under the terms of the GNU General Public License version 2 only, as
+ * published by the Free Software Foundation.
+ *
+ * This code is distributed in the hope that it will be useful, but WITHOUT
+ * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
+ * FITNESS FOR A PARTICULAR PURPOSE.  See the GNU General Public License
+ * version 2 for more details (a copy is included in the LICENSE file that
+ * accompanied this code).
+ *
+ * You should have received a copy of the GNU General Public License version
+ * 2 along with this work; if not, write to the Free Software Foundation,
+ * Inc., 51 Franklin St, Fifth Floor, Boston, MA 02110-1301 USA.
+ *
+ * Please contact Oracle, 500 Oracle Parkway, Redwood Shores, CA 94065 USA
+ * or visit www.oracle.com if you need additional information or have any
+ * questions.
+ */
+package com.oracle.graal.hotspot.aarch64;
+
+import com.oracle.graal.asm.aarch64.AArch64MacroAssembler;
+import com.oracle.graal.lir.LIRInstructionClass;
+import com.oracle.graal.lir.Opcode;
+import com.oracle.graal.lir.aarch64.AArch64Call;
+import com.oracle.graal.lir.asm.CompilationResultBuilder;
+import jdk.vm.ci.hotspot.HotSpotVMConfig;
+
+import static com.oracle.graal.hotspot.HotSpotHostBackend.UNCOMMON_TRAP_HANDLER;
+
+/**
+ * Removes the current frame and tail calls the uncommon trap routine.
+ */
+@Opcode("DEOPT_CALLER")
+public class AArch64HotSpotDeoptimizeCallerOp extends AArch64HotSpotEpilogueOp {
+    public static final LIRInstructionClass<AArch64HotSpotDeoptimizeCallerOp> TYPE = LIRInstructionClass.create(AArch64HotSpotDeoptimizeCallerOp.class);
+
+    public AArch64HotSpotDeoptimizeCallerOp(HotSpotVMConfig config) {
+        super(TYPE, config);
+    }
+
+    @Override
+    public void emitCode(CompilationResultBuilder crb, AArch64MacroAssembler masm) {
+        leaveFrame(crb, masm, /* emitSafepoint */false);
+        AArch64Call.directJmp(crb, masm, crb.foreignCalls.lookupForeignCall(UNCOMMON_TRAP_HANDLER));
+    }
+}
--- /dev/null	Thu Jan 01 00:00:00 1970 +0000
+++ b/graal/com.oracle.graal.hotspot.aarch64/src/com/oracle/graal/hotspot/aarch64/AArch64HotSpotDeoptimizeOp.java	Thu Dec 24 11:43:35 2015 -1000
@@ -0,0 +1,51 @@
+/*
+ * Copyright (c) 2013, Oracle and/or its affiliates. All rights reserved.
+ * DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER.
+ *
+ * This code is free software; you can redistribute it and/or modify it
+ * under the terms of the GNU General Public License version 2 only, as
+ * published by the Free Software Foundation.
+ *
+ * This code is distributed in the hope that it will be useful, but WITHOUT
+ * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
+ * FITNESS FOR A PARTICULAR PURPOSE.  See the GNU General Public License
+ * version 2 for more details (a copy is included in the LICENSE file that
+ * accompanied this code).
+ *
+ * You should have received a copy of the GNU General Public License version
+ * 2 along with this work; if not, write to the Free Software Foundation,
+ * Inc., 51 Franklin St, Fifth Floor, Boston, MA 02110-1301 USA.
+ *
+ * Please contact Oracle, 500 Oracle Parkway, Redwood Shores, CA 94065 USA
+ * or visit www.oracle.com if you need additional information or have any
+ * questions.
+ */
+package com.oracle.graal.hotspot.aarch64;
+
+import com.oracle.graal.asm.aarch64.AArch64MacroAssembler;
+import com.oracle.graal.lir.LIRFrameState;
+import com.oracle.graal.lir.LIRInstructionClass;
+import com.oracle.graal.lir.Opcode;
+import com.oracle.graal.lir.StandardOp;
+import com.oracle.graal.lir.aarch64.AArch64BlockEndOp;
+import com.oracle.graal.lir.aarch64.AArch64Call;
+import com.oracle.graal.lir.asm.CompilationResultBuilder;
+
+import static com.oracle.graal.hotspot.HotSpotHostBackend.UNCOMMON_TRAP_HANDLER;
+
+@Opcode("DEOPT")
+public class AArch64HotSpotDeoptimizeOp extends AArch64BlockEndOp implements StandardOp.BlockEndOp {
+    public static final LIRInstructionClass<AArch64HotSpotDeoptimizeOp> TYPE = LIRInstructionClass.create(AArch64HotSpotDeoptimizeOp.class);
+    @State private LIRFrameState info;
+
+    public AArch64HotSpotDeoptimizeOp(LIRFrameState info) {
+        super(TYPE);
+        this.info = info;
+    }
+
+    @Override
+    public void emitCode(CompilationResultBuilder crb, AArch64MacroAssembler masm) {
+        AArch64Call.directCall(crb, masm, crb.foreignCalls.lookupForeignCall(UNCOMMON_TRAP_HANDLER), null, info);
+    }
+
+}
--- /dev/null	Thu Jan 01 00:00:00 1970 +0000
+++ b/graal/com.oracle.graal.hotspot.aarch64/src/com/oracle/graal/hotspot/aarch64/AArch64HotSpotDirectStaticCallOp.java	Thu Dec 24 11:43:35 2015 -1000
@@ -0,0 +1,69 @@
+/*
+ * Copyright (c) 2013, 2015, Oracle and/or its affiliates. All rights reserved.
+ * DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER.
+ *
+ * This code is free software; you can redistribute it and/or modify it
+ * under the terms of the GNU General Public License version 2 only, as
+ * published by the Free Software Foundation.
+ *
+ * This code is distributed in the hope that it will be useful, but WITHOUT
+ * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
+ * FITNESS FOR A PARTICULAR PURPOSE.  See the GNU General Public License
+ * version 2 for more details (a copy is included in the LICENSE file that
+ * accompanied this code).
+ *
+ * You should have received a copy of the GNU General Public License version
+ * 2 along with this work; if not, write to the Free Software Foundation,
+ * Inc., 51 Franklin St, Fifth Floor, Boston, MA 02110-1301 USA.
+ *
+ * Please contact Oracle, 500 Oracle Parkway, Redwood Shores, CA 94065 USA
+ * or visit www.oracle.com if you need additional information or have any
+ * questions.
+ */
+package com.oracle.graal.hotspot.aarch64;
+
+import static jdk.vm.ci.hotspot.aarch64.AArch64HotSpotRegisterConfig.inlineCacheRegister;
+
+import com.oracle.graal.asm.aarch64.AArch64MacroAssembler;
+import com.oracle.graal.lir.LIRFrameState;
+import com.oracle.graal.lir.LIRInstructionClass;
+import com.oracle.graal.lir.Opcode;
+import com.oracle.graal.lir.aarch64.AArch64Call.DirectCallOp;
+import com.oracle.graal.lir.asm.CompilationResultBuilder;
+import com.oracle.graal.nodes.CallTargetNode.InvokeKind;
+
+import jdk.vm.ci.hotspot.HotSpotVMConfig;
+import jdk.vm.ci.meta.ResolvedJavaMethod;
+import jdk.vm.ci.meta.Value;
+
+/**
+ * A direct call that complies with the conventions for such calls in HotSpot. In particular, for
+ * calls using an inline cache, a MOVE instruction is emitted just prior to the aligned direct call.
+ */
+@Opcode("CALL_DIRECT")
+final class AArch64HotSpotDirectStaticCallOp extends DirectCallOp {
+
+    public static final LIRInstructionClass<AArch64HotSpotDirectStaticCallOp> TYPE = LIRInstructionClass.create(AArch64HotSpotDirectStaticCallOp.class);
+
+    private final InvokeKind invokeKind;
+    private final HotSpotVMConfig config;
+
+    public AArch64HotSpotDirectStaticCallOp(ResolvedJavaMethod target, Value result, Value[] parameters, Value[] temps, LIRFrameState state, InvokeKind invokeKind, HotSpotVMConfig config) {
+        super(TYPE, target, result, parameters, temps, state);
+        assert invokeKind.isDirect();
+        this.invokeKind = invokeKind;
+        this.config = config;
+    }
+
+    @Override
+    public void emitCode(CompilationResultBuilder crb, AArch64MacroAssembler masm) {
+        // The mark for an invocation that uses an inline cache must be placed at the
+        // instruction that loads the Klass from the inline cache.
+        // For the first invocation this is set to a bitpattern that is guaranteed to never be a
+        // valid object which causes the called function to call a handler that installs the
+        // correct inline cache value here.
+        crb.recordMark(invokeKind == InvokeKind.Static ? config.MARKID_INVOKESTATIC : config.MARKID_INVOKESPECIAL);
+        masm.forceMov(inlineCacheRegister, config.nonOopBits);
+        super.emitCode(crb, masm);
+    }
+}
--- /dev/null	Thu Jan 01 00:00:00 1970 +0000
+++ b/graal/com.oracle.graal.hotspot.aarch64/src/com/oracle/graal/hotspot/aarch64/AArch64HotSpotDirectVirtualCallOp.java	Thu Dec 24 11:43:35 2015 -1000
@@ -0,0 +1,69 @@
+/*
+ * Copyright (c) 2013, 2015, Oracle and/or its affiliates. All rights reserved.
+ * DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER.
+ *
+ * This code is free software; you can redistribute it and/or modify it
+ * under the terms of the GNU General Public License version 2 only, as
+ * published by the Free Software Foundation.
+ *
+ * This code is distributed in the hope that it will be useful, but WITHOUT
+ * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
+ * FITNESS FOR A PARTICULAR PURPOSE.  See the GNU General Public License
+ * version 2 for more details (a copy is included in the LICENSE file that
+ * accompanied this code).
+ *
+ * You should have received a copy of the GNU General Public License version
+ * 2 along with this work; if not, write to the Free Software Foundation,
+ * Inc., 51 Franklin St, Fifth Floor, Boston, MA 02110-1301 USA.
+ *
+ * Please contact Oracle, 500 Oracle Parkway, Redwood Shores, CA 94065 USA
+ * or visit www.oracle.com if you need additional information or have any
+ * questions.
+ */
+package com.oracle.graal.hotspot.aarch64;
+
+import static jdk.vm.ci.hotspot.aarch64.AArch64HotSpotRegisterConfig.inlineCacheRegister;
+
+import com.oracle.graal.asm.aarch64.AArch64MacroAssembler;
+import com.oracle.graal.lir.LIRFrameState;
+import com.oracle.graal.lir.LIRInstructionClass;
+import com.oracle.graal.lir.Opcode;
+import com.oracle.graal.lir.aarch64.AArch64Call.DirectCallOp;
+import com.oracle.graal.lir.asm.CompilationResultBuilder;
+import com.oracle.graal.nodes.CallTargetNode.InvokeKind;
+
+import jdk.vm.ci.hotspot.HotSpotVMConfig;
+import jdk.vm.ci.meta.ResolvedJavaMethod;
+import jdk.vm.ci.meta.Value;
+
+/**
+ * A direct call that complies with the conventions for such calls in HotSpot. In particular, for
+ * calls using an inline cache, a MOVE instruction is emitted just prior to the aligned direct call.
+ */
+@Opcode("CALL_DIRECT")
+final class AArch64HotSpotDirectVirtualCallOp extends DirectCallOp {
+
+    public static final LIRInstructionClass<AArch64HotSpotDirectVirtualCallOp> TYPE = LIRInstructionClass.create(AArch64HotSpotDirectVirtualCallOp.class);
+
+    private final InvokeKind invokeKind;
+    private final HotSpotVMConfig config;
+
+    public AArch64HotSpotDirectVirtualCallOp(ResolvedJavaMethod target, Value result, Value[] parameters, Value[] temps, LIRFrameState state, InvokeKind invokeKind, HotSpotVMConfig config) {
+        super(TYPE, target, result, parameters, temps, state);
+        assert invokeKind.isIndirect();
+        this.invokeKind = invokeKind;
+        this.config = config;
+    }
+
+    @Override
+    public void emitCode(CompilationResultBuilder crb, AArch64MacroAssembler masm) {
+        // The mark for an invocation that uses an inline cache must be placed at the
+        // instruction that loads the Klass from the inline cache.
+        // For the first invocation this is set to a bitpattern that is guaranteed to never be a
+        // valid object which causes the called function to call a handler that installs the
+        // correct inline cache value here.
+        crb.recordMark(invokeKind == InvokeKind.Virtual ? config.MARKID_INVOKEVIRTUAL : config.MARKID_INVOKEINTERFACE);
+        masm.forceMov(inlineCacheRegister, config.nonOopBits);
+        super.emitCode(crb, masm);
+    }
+}
--- /dev/null	Thu Jan 01 00:00:00 1970 +0000
+++ b/graal/com.oracle.graal.hotspot.aarch64/src/com/oracle/graal/hotspot/aarch64/AArch64HotSpotEpilogueOp.java	Thu Dec 24 11:43:35 2015 -1000
@@ -0,0 +1,54 @@
+/*
+ * Copyright (c) 2013, 2015, Oracle and/or its affiliates. All rights reserved.
+ * DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER.
+ *
+ * This code is free software; you can redistribute it and/or modify it
+ * under the terms of the GNU General Public License version 2 only, as
+ * published by the Free Software Foundation.
+ *
+ * This code is distributed in the hope that it will be useful, but WITHOUT
+ * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
+ * FITNESS FOR A PARTICULAR PURPOSE.  See the GNU General Public License
+ * version 2 for more details (a copy is included in the LICENSE file that
+ * accompanied this code).
+ *
+ * You should have received a copy of the GNU General Public License version
+ * 2 along with this work; if not, write to the Free Software Foundation,
+ * Inc., 51 Franklin St, Fifth Floor, Boston, MA 02110-1301 USA.
+ *
+ * Please contact Oracle, 500 Oracle Parkway, Redwood Shores, CA 94065 USA
+ * or visit www.oracle.com if you need additional information or have any
+ * questions.
+ */
+package com.oracle.graal.hotspot.aarch64;
+
+import com.oracle.graal.asm.aarch64.AArch64MacroAssembler;
+import com.oracle.graal.asm.aarch64.AArch64MacroAssembler.ScratchRegister;
+import com.oracle.graal.lir.LIRInstructionClass;
+import com.oracle.graal.lir.StandardOp;
+import com.oracle.graal.lir.aarch64.AArch64BlockEndOp;
+import com.oracle.graal.lir.asm.CompilationResultBuilder;
+
+import jdk.vm.ci.code.Register;
+import jdk.vm.ci.hotspot.HotSpotVMConfig;
+
+abstract class AArch64HotSpotEpilogueOp extends AArch64BlockEndOp implements StandardOp.BlockEndOp {
+
+    private final HotSpotVMConfig config;
+
+    protected AArch64HotSpotEpilogueOp(LIRInstructionClass<? extends StandardOp.AbstractBlockEndOp> c, HotSpotVMConfig config) {
+        super(c);
+        this.config = config;
+    }
+
+    protected void leaveFrame(CompilationResultBuilder crb, AArch64MacroAssembler masm, boolean emitSafepoint) {
+        assert crb.frameContext != null : "We never elide frames in aarch64";
+        crb.frameContext.leave(crb);
+        if (emitSafepoint) {
+            try (ScratchRegister sc = masm.getScratchRegister()) {
+                Register scratch = sc.getRegister();
+                AArch64HotSpotSafepointOp.emitCode(crb, masm, config, true, scratch, null);
+            }
+        }
+    }
+}
--- /dev/null	Thu Jan 01 00:00:00 1970 +0000
+++ b/graal/com.oracle.graal.hotspot.aarch64/src/com/oracle/graal/hotspot/aarch64/AArch64HotSpotForeignCallsProvider.java	Thu Dec 24 11:43:35 2015 -1000
@@ -0,0 +1,95 @@
+/*
+ * Copyright (c) 2013, 2015, Oracle and/or its affiliates. All rights reserved.
+ * DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER.
+ *
+ * This code is free software; you can redistribute it and/or modify it
+ * under the terms of the GNU General Public License version 2 only, as
+ * published by the Free Software Foundation.
+ *
+ * This code is distributed in the hope that it will be useful, but WITHOUT
+ * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
+ * FITNESS FOR A PARTICULAR PURPOSE.  See the GNU General Public License
+ * version 2 for more details (a copy is included in the LICENSE file that
+ * accompanied this code).
+ *
+ * You should have received a copy of the GNU General Public License version
+ * 2 along with this work; if not, write to the Free Software Foundation,
+ * Inc., 51 Franklin St, Fifth Floor, Boston, MA 02110-1301 USA.
+ *
+ * Please contact Oracle, 500 Oracle Parkway, Redwood Shores, CA 94065 USA
+ * or visit www.oracle.com if you need additional information or have any
+ * questions.
+ */
+package com.oracle.graal.hotspot.aarch64;
+
+import static com.oracle.graal.hotspot.HotSpotBackend.Options.PreferGraalStubs;
+import static com.oracle.graal.hotspot.HotSpotForeignCallLinkage.JUMP_ADDRESS;
+import static com.oracle.graal.hotspot.HotSpotForeignCallLinkage.RegisterEffect.PRESERVES_REGISTERS;
+import static com.oracle.graal.hotspot.HotSpotForeignCallLinkage.Transition.LEAF;
+import static com.oracle.graal.hotspot.replacements.CRC32Substitutions.UPDATE_BYTES_CRC32;
+import static jdk.vm.ci.aarch64.AArch64.r0;
+import static jdk.vm.ci.aarch64.AArch64.r3;
+import static jdk.vm.ci.code.CallingConvention.Type.NativeCall;
+import static jdk.vm.ci.meta.LocationIdentity.any;
+import static jdk.vm.ci.meta.Value.ILLEGAL;
+
+import com.oracle.graal.hotspot.HotSpotBackend;
+import com.oracle.graal.hotspot.HotSpotForeignCallLinkageImpl;
+import com.oracle.graal.hotspot.HotSpotGraalRuntimeProvider;
+import com.oracle.graal.hotspot.meta.HotSpotHostForeignCallsProvider;
+import com.oracle.graal.hotspot.meta.HotSpotProviders;
+
+import jdk.vm.ci.code.CallingConvention;
+import jdk.vm.ci.code.CodeCacheProvider;
+import jdk.vm.ci.code.RegisterValue;
+import jdk.vm.ci.code.TargetDescription;
+import jdk.vm.ci.common.JVMCIError;
+import jdk.vm.ci.hotspot.HotSpotJVMCIRuntimeProvider;
+import jdk.vm.ci.hotspot.HotSpotVMConfig;
+import jdk.vm.ci.meta.LIRKind;
+import jdk.vm.ci.meta.MetaAccessProvider;
+import jdk.vm.ci.meta.PlatformKind;
+import jdk.vm.ci.meta.Value;
+
+public class AArch64HotSpotForeignCallsProvider extends HotSpotHostForeignCallsProvider {
+
+    private final Value[] nativeABICallerSaveRegisters;
+
+    public AArch64HotSpotForeignCallsProvider(HotSpotJVMCIRuntimeProvider jvmciRuntime, HotSpotGraalRuntimeProvider runtime, MetaAccessProvider metaAccess, CodeCacheProvider codeCache,
+                    Value[] nativeABICallerSaveRegisters) {
+        super(jvmciRuntime, runtime, metaAccess, codeCache);
+        this.nativeABICallerSaveRegisters = nativeABICallerSaveRegisters;
+    }
+
+    @Override
+    public void initialize(HotSpotProviders providers) {
+        HotSpotVMConfig config = jvmciRuntime.getConfig();
+        TargetDescription target = providers.getCodeCache().getTarget();
+        PlatformKind word = target.arch.getWordKind();
+
+        // The calling convention for the exception handler stub is (only?) defined in
+        // TemplateInterpreterGenerator::generate_throw_exception()
+        RegisterValue exception = r0.asValue(LIRKind.reference(word));
+        RegisterValue exceptionPc = r3.asValue(LIRKind.value(word));
+        CallingConvention exceptionCc = new CallingConvention(0, ILLEGAL, exception, exceptionPc);
+        register(new HotSpotForeignCallLinkageImpl(HotSpotBackend.EXCEPTION_HANDLER, 0L, PRESERVES_REGISTERS, LEAF, null, exceptionCc, NOT_REEXECUTABLE, any()));
+        register(new HotSpotForeignCallLinkageImpl(HotSpotBackend.EXCEPTION_HANDLER_IN_CALLER, JUMP_ADDRESS, PRESERVES_REGISTERS, LEAF, exceptionCc, null, NOT_REEXECUTABLE, any()));
+
+        if (PreferGraalStubs.getValue()) {
+            throw JVMCIError.unimplemented("PreferGraalStubs");
+        }
+
+        // These stubs do callee saving
+        if (config.useCRC32Intrinsics) {
+            registerForeignCall(UPDATE_BYTES_CRC32, config.updateBytesCRC32Stub, NativeCall, PRESERVES_REGISTERS, LEAF, NOT_REEXECUTABLE, any());
+        }
+
+        super.initialize(providers);
+    }
+
+    @Override
+    public Value[] getNativeABICallerSaveRegisters() {
+        return nativeABICallerSaveRegisters;
+    }
+
+}
--- /dev/null	Thu Jan 01 00:00:00 1970 +0000
+++ b/graal/com.oracle.graal.hotspot.aarch64/src/com/oracle/graal/hotspot/aarch64/AArch64HotSpotJumpToExceptionHandlerInCallerOp.java	Thu Dec 24 11:43:35 2015 -1000
@@ -0,0 +1,83 @@
+/*
+ * Copyright (c) 2013, Oracle and/or its affiliates. All rights reserved.
+ * DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER.
+ *
+ * This code is free software; you can redistribute it and/or modify it
+ * under the terms of the GNU General Public License version 2 only, as
+ * published by the Free Software Foundation.
+ *
+ * This code is distributed in the hope that it will be useful, but WITHOUT
+ * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
+ * FITNESS FOR A PARTICULAR PURPOSE.  See the GNU General Public License
+ * version 2 for more details (a copy is included in the LICENSE file that
+ * accompanied this code).
+ *
+ * You should have received a copy of the GNU General Public License version
+ * 2 along with this work; if not, write to the Free Software Foundation,
+ * Inc., 51 Franklin St, Fifth Floor, Boston, MA 02110-1301 USA.
+ *
+ * Please contact Oracle, 500 Oracle Parkway, Redwood Shores, CA 94065 USA
+ * or visit www.oracle.com if you need additional information or have any
+ * questions.
+ */
+package com.oracle.graal.hotspot.aarch64;
+
+import static com.oracle.graal.lir.LIRInstruction.OperandFlag.REG;
+import static jdk.vm.ci.aarch64.AArch64.sp;
+import static jdk.vm.ci.code.ValueUtil.asRegister;
+import static jdk.vm.ci.hotspot.aarch64.AArch64HotSpotRegisterConfig.fp;
+
+import com.oracle.graal.asm.Label;
+import com.oracle.graal.asm.aarch64.AArch64Address;
+import com.oracle.graal.asm.aarch64.AArch64MacroAssembler;
+import com.oracle.graal.asm.aarch64.AArch64MacroAssembler.ScratchRegister;
+import com.oracle.graal.lir.LIRInstructionClass;
+import com.oracle.graal.lir.Opcode;
+import com.oracle.graal.lir.asm.CompilationResultBuilder;
+
+import jdk.vm.ci.code.Register;
+import jdk.vm.ci.hotspot.HotSpotVMConfig;
+import jdk.vm.ci.meta.AllocatableValue;
+
+/**
+ * Sets up the arguments for an exception handler in the callers frame, removes the current frame
+ * and jumps to the handler.
+ */
+@Opcode("JUMP_TO_EXCEPTION_HANDLER_IN_CALLER")
+public class AArch64HotSpotJumpToExceptionHandlerInCallerOp extends AArch64HotSpotEpilogueOp {
+
+    public static final LIRInstructionClass<AArch64HotSpotJumpToExceptionHandlerInCallerOp> TYPE = LIRInstructionClass.create(AArch64HotSpotJumpToExceptionHandlerInCallerOp.class);
+
+    @Use(REG) private AllocatableValue handlerInCallerPc;
+    @Use(REG) private AllocatableValue exception;
+    @Use(REG) private AllocatableValue exceptionPc;
+    private final Register thread;
+    private final int isMethodHandleReturnOffset;
+
+    public AArch64HotSpotJumpToExceptionHandlerInCallerOp(AllocatableValue handlerInCallerPc, AllocatableValue exception, AllocatableValue exceptionPc, int isMethodHandleReturnOffset,
+                    Register thread, HotSpotVMConfig config) {
+        super(TYPE, config);
+        this.handlerInCallerPc = handlerInCallerPc;
+        this.exception = exception;
+        this.exceptionPc = exceptionPc;
+        this.isMethodHandleReturnOffset = isMethodHandleReturnOffset;
+        this.thread = thread;
+    }
+
+    @Override
+    public void emitCode(CompilationResultBuilder crb, AArch64MacroAssembler masm) {
+        leaveFrame(crb, masm, /* emitSafepoint */false);
+
+        // Restore sp from fp if the exception PC is a method handle call site.
+        try (ScratchRegister sc = masm.getScratchRegister()) {
+            Register scratch = sc.getRegister();
+            AArch64Address address = masm.makeAddress(thread, isMethodHandleReturnOffset, scratch, 4, /* allowOverwrite */false);
+            masm.ldr(32, scratch, address);
+            Label noRestore = new Label();
+            masm.cbz(32, scratch, noRestore);
+            masm.mov(64, sp, fp);
+            masm.bind(noRestore);
+        }
+        masm.jmp(asRegister(handlerInCallerPc));
+    }
+}
--- /dev/null	Thu Jan 01 00:00:00 1970 +0000
+++ b/graal/com.oracle.graal.hotspot.aarch64/src/com/oracle/graal/hotspot/aarch64/AArch64HotSpotLIRGenerationResult.java	Thu Dec 24 11:43:35 2015 -1000
@@ -0,0 +1,73 @@
+/*
+ * Copyright (c) 2015, Oracle and/or its affiliates. All rights reserved.
+ * DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER.
+ *
+ * This code is free software; you can redistribute it and/or modify it
+ * under the terms of the GNU General Public License version 2 only, as
+ * published by the Free Software Foundation.
+ *
+ * This code is distributed in the hope that it will be useful, but WITHOUT
+ * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
+ * FITNESS FOR A PARTICULAR PURPOSE.  See the GNU General Public License
+ * version 2 for more details (a copy is included in the LICENSE file that
+ * accompanied this code).
+ *
+ * You should have received a copy of the GNU General Public License version
+ * 2 along with this work; if not, write to the Free Software Foundation,
+ * Inc., 51 Franklin St, Fifth Floor, Boston, MA 02110-1301 USA.
+ *
+ * Please contact Oracle, 500 Oracle Parkway, Redwood Shores, CA 94065 USA
+ * or visit www.oracle.com if you need additional information or have any
+ * questions.
+ */
+
+package com.oracle.graal.hotspot.aarch64;
+
+import java.util.Map;
+
+import com.oracle.graal.compiler.common.CollectionsFactory;
+import com.oracle.graal.hotspot.stubs.Stub;
+import com.oracle.graal.lir.LIR;
+import com.oracle.graal.lir.LIRFrameState;
+import com.oracle.graal.lir.StandardOp;
+import com.oracle.graal.lir.framemap.FrameMapBuilder;
+import com.oracle.graal.lir.gen.LIRGenerationResultBase;
+
+import jdk.vm.ci.code.StackSlot;
+
+public class AArch64HotSpotLIRGenerationResult extends LIRGenerationResultBase {
+    /**
+     * The slot reserved for storing the original return address when a frame is marked for
+     * deoptimization. The return address slot in the callee is overwritten with the address of a
+     * deoptimization stub.
+     */
+    private StackSlot deoptimizationRescueSlot;
+    private final Object stub;
+
+    /**
+     * Map from debug infos that need to be updated with callee save information to the operations
+     * that provide the information.
+     */
+    private final Map<LIRFrameState, StandardOp.SaveRegistersOp> calleeSaveInfo = CollectionsFactory.newMap();
+
+    public AArch64HotSpotLIRGenerationResult(String compilationUnitName, LIR lir, FrameMapBuilder frameMapBuilder, Object stub) {
+        super(compilationUnitName, lir, frameMapBuilder);
+        this.stub = stub;
+    }
+
+    StackSlot getDeoptimizationRescueSlot() {
+        return deoptimizationRescueSlot;
+    }
+
+    public final void setDeoptimizationRescueSlot(StackSlot stackSlot) {
+        this.deoptimizationRescueSlot = stackSlot;
+    }
+
+    Stub getStub() {
+        return (Stub) stub;
+    }
+
+    Map<LIRFrameState, StandardOp.SaveRegistersOp> getCalleeSaveInfo() {
+        return calleeSaveInfo;
+    }
+}
--- /dev/null	Thu Jan 01 00:00:00 1970 +0000
+++ b/graal/com.oracle.graal.hotspot.aarch64/src/com/oracle/graal/hotspot/aarch64/AArch64HotSpotLIRGenerator.java	Thu Dec 24 11:43:35 2015 -1000
@@ -0,0 +1,206 @@
+/*
+ * Copyright (c) 2015, Oracle and/or its affiliates. All rights reserved.
+ * DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER.
+ *
+ * This code is free software; you can redistribute it and/or modify it
+ * under the terms of the GNU General Public License version 2 only, as
+ * published by the Free Software Foundation.
+ *
+ * This code is distributed in the hope that it will be useful, but WITHOUT
+ * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
+ * FITNESS FOR A PARTICULAR PURPOSE.  See the GNU General Public License
+ * version 2 for more details (a copy is included in the LICENSE file that
+ * accompanied this code).
+ *
+ * You should have received a copy of the GNU General Public License version
+ * 2 along with this work; if not, write to the Free Software Foundation,
+ * Inc., 51 Franklin St, Fifth Floor, Boston, MA 02110-1301 USA.
+ *
+ * Please contact Oracle, 500 Oracle Parkway, Redwood Shores, CA 94065 USA
+ * or visit www.oracle.com if you need additional information or have any
+ * questions.
+ */
+
+package com.oracle.graal.hotspot.aarch64;
+
+import com.oracle.graal.asm.aarch64.AArch64Address;
+import com.oracle.graal.compiler.aarch64.AArch64ArithmeticLIRGenerator;
+import com.oracle.graal.compiler.aarch64.AArch64LIRGenerator;
+import com.oracle.graal.compiler.common.spi.ForeignCallLinkage;
+import com.oracle.graal.compiler.common.spi.LIRKindTool;
+import com.oracle.graal.hotspot.HotSpotBackend;
+import com.oracle.graal.hotspot.HotSpotLIRGenerator;
+import com.oracle.graal.hotspot.HotSpotLockStack;
+import com.oracle.graal.hotspot.meta.HotSpotProviders;
+import com.oracle.graal.hotspot.stubs.Stub;
+import com.oracle.graal.lir.LIRFrameState;
+import com.oracle.graal.lir.StandardOp.SaveRegistersOp;
+import com.oracle.graal.lir.Variable;
+import com.oracle.graal.lir.VirtualStackSlot;
+import com.oracle.graal.lir.aarch64.AArch64AddressValue;
+import com.oracle.graal.lir.aarch64.AArch64Move;
+import com.oracle.graal.lir.gen.LIRGenerationResult;
+
+import jdk.vm.ci.aarch64.AArch64;
+import jdk.vm.ci.aarch64.AArch64Kind;
+import jdk.vm.ci.code.CallingConvention;
+import jdk.vm.ci.code.RegisterValue;
+import jdk.vm.ci.common.JVMCIError;
+import jdk.vm.ci.hotspot.HotSpotVMConfig;
+import jdk.vm.ci.meta.AllocatableValue;
+import jdk.vm.ci.meta.DeoptimizationAction;
+import jdk.vm.ci.meta.DeoptimizationReason;
+import jdk.vm.ci.meta.JavaConstant;
+import jdk.vm.ci.meta.JavaKind;
+import jdk.vm.ci.meta.LIRKind;
+import jdk.vm.ci.meta.Value;
+
+/**
+ * LIR generator specialized for AArch64 HotSpot.
+ */
+public class AArch64HotSpotLIRGenerator extends AArch64LIRGenerator implements HotSpotLIRGenerator {
+
+    final HotSpotVMConfig config;
+    private HotSpotLockStack lockStack;
+
+    protected AArch64HotSpotLIRGenerator(HotSpotProviders providers, HotSpotVMConfig config, CallingConvention cc, LIRGenerationResult lirGenRes) {
+        this(providers, config, cc, lirGenRes, new ConstantTableBaseProvider());
+    }
+
+    private AArch64HotSpotLIRGenerator(HotSpotProviders providers, HotSpotVMConfig config, CallingConvention cc, LIRGenerationResult lirGenRes, ConstantTableBaseProvider constantTableBaseProvider) {
+        this(new AArch64HotSpotLIRKindTool(), new AArch64ArithmeticLIRGenerator(), new AArch64HotSpotMoveFactory(providers.getCodeCache(), constantTableBaseProvider), providers, config, cc,
+                        lirGenRes, constantTableBaseProvider);
+    }
+
+    protected AArch64HotSpotLIRGenerator(LIRKindTool lirKindTool, AArch64ArithmeticLIRGenerator arithmeticLIRGen, MoveFactory moveFactory, HotSpotProviders providers, HotSpotVMConfig config,
+                    CallingConvention cc, LIRGenerationResult lirGenRes, ConstantTableBaseProvider constantTableBaseProvider) {
+        super(lirKindTool, arithmeticLIRGen, moveFactory, providers, cc, lirGenRes, constantTableBaseProvider);
+        this.config = config;
+    }
+
+    @Override
+    public boolean needOnlyOopMaps() {
+        // Stubs only need oop maps
+        return ((AArch64HotSpotLIRGenerationResult) getResult()).getStub() != null;
+    }
+
+    @Override
+    public HotSpotProviders getProviders() {
+        return (HotSpotProviders) super.getProviders();
+    }
+
+    @Override
+    public void emitTailcall(Value[] args, Value address) {
+        throw JVMCIError.unimplemented();
+    }
+
+    @Override
+    public SaveRegistersOp emitSaveAllRegisters() {
+        throw JVMCIError.unimplemented();
+    }
+
+    @Override
+    public VirtualStackSlot getLockSlot(int lockDepth) {
+        return getLockStack().makeLockSlot(lockDepth);
+    }
+
+    private HotSpotLockStack getLockStack() {
+        assert lockStack != null;
+        return lockStack;
+    }
+
+    protected void setLockStack(HotSpotLockStack lockStack) {
+        assert this.lockStack == null;
+        this.lockStack = lockStack;
+    }
+
+    @SuppressWarnings("unused")
+    @Override
+    public Value emitCompress(Value pointer, HotSpotVMConfig.CompressEncoding encoding, boolean nonNull) {
+        LIRKind inputKind = pointer.getLIRKind();
+        assert inputKind.getPlatformKind() == AArch64Kind.QWORD;
+        Variable result = newVariable(LIRKind.reference(AArch64Kind.DWORD));
+        AllocatableValue base = getCompressionBase(encoding, inputKind);
+        // TODO (das) continue here.
+        throw JVMCIError.unimplemented("finish implementation");
+    }
+
+    private AllocatableValue getCompressionBase(HotSpotVMConfig.CompressEncoding encoding, LIRKind inputKind) {
+        if (inputKind.isReference(0)) {
+            // oop
+            return getProviders().getRegisters().getHeapBaseRegister().asValue();
+        } else {
+            // metaspace pointer
+            if (encoding.base == 0) {
+                return AArch64.zr.asValue(LIRKind.value(AArch64Kind.QWORD));
+            } else {
+                return emitLoadConstant(LIRKind.value(AArch64Kind.QWORD), JavaConstant.forLong(encoding.base));
+            }
+        }
+    }
+
+    @Override
+    public Value emitUncompress(Value pointer, HotSpotVMConfig.CompressEncoding encoding, boolean nonNull) {
+        return null;
+    }
+
+    @Override
+    public void emitPrefetchAllocate(Value address) {
+        // TODO (das) Optimization for later.
+    }
+
+    @Override
+    public void emitDeoptimizeCaller(DeoptimizationAction action, DeoptimizationReason reason) {
+        Value actionAndReason = emitJavaConstant(getMetaAccess().encodeDeoptActionAndReason(action, reason, 0));
+        Value nullValue = emitConstant(LIRKind.reference(AArch64Kind.QWORD), JavaConstant.NULL_POINTER);
+        moveDeoptValuesToThread(actionAndReason, nullValue);
+        append(new AArch64HotSpotDeoptimizeCallerOp(config));
+    }
+
+    @Override
+    public void emitDeoptimize(Value actionAndReason, Value failedSpeculation, LIRFrameState state) {
+        moveDeoptValuesToThread(actionAndReason, failedSpeculation);
+        append(new AArch64HotSpotDeoptimizeOp(state));
+    }
+
+    private void moveDeoptValuesToThread(Value actionAndReason, Value speculation) {
+        moveValueToThread(actionAndReason, config.pendingDeoptimizationOffset);
+        moveValueToThread(speculation, config.pendingFailedSpeculationOffset);
+    }
+
+    private void moveValueToThread(Value value, int offset) {
+        LIRKind wordKind = LIRKind.value(target().arch.getWordKind());
+        RegisterValue thread = getProviders().getRegisters().getThreadRegister().asValue(wordKind);
+        AArch64AddressValue pendingDeoptAddress = new AArch64AddressValue(value.getLIRKind(), thread, Value.ILLEGAL, offset, false, AArch64Address.AddressingMode.IMMEDIATE_UNSCALED);
+        append(new AArch64Move.StoreOp((AArch64Kind) value.getPlatformKind(), pendingDeoptAddress, loadReg(value), null));
+    }
+
+    @Override
+    public void emitUnwind(Value exception) {
+        ForeignCallLinkage linkage = getForeignCalls().lookupForeignCall(HotSpotBackend.UNWIND_EXCEPTION_TO_CALLER);
+        CallingConvention outgoingCc = linkage.getOutgoingCallingConvention();
+        assert outgoingCc.getArgumentCount() == 2;
+        RegisterValue exceptionParameter = (RegisterValue) outgoingCc.getArgument(0);
+        emitMove(exceptionParameter, exception);
+        append(new AArch64HotSpotUnwindOp(config, exceptionParameter));
+    }
+
+    @Override
+    public void emitReturn(JavaKind kind, Value input) {
+        AllocatableValue operand = Value.ILLEGAL;
+        if (input != null) {
+            operand = resultOperandFor(kind, input.getLIRKind());
+            emitMove(operand, input);
+        }
+        append(new AArch64HotSpotReturnOp(operand, getStub() != null, config));
+    }
+
+    /**
+     * Gets the {@link Stub} this generator is generating code for or {@code null} if a stub is not
+     * being generated.
+     */
+    public Stub getStub() {
+        return ((AArch64HotSpotLIRGenerationResult) getResult()).getStub();
+    }
+
+}
--- /dev/null	Thu Jan 01 00:00:00 1970 +0000
+++ b/graal/com.oracle.graal.hotspot.aarch64/src/com/oracle/graal/hotspot/aarch64/AArch64HotSpotLIRKindTool.java	Thu Dec 24 11:43:35 2015 -1000
@@ -0,0 +1,40 @@
+/*
+ * Copyright (c) 2015, Oracle and/or its affiliates. All rights reserved.
+ * DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER.
+ *
+ * This code is free software; you can redistribute it and/or modify it
+ * under the terms of the GNU General Public License version 2 only, as
+ * published by the Free Software Foundation.
+ *
+ * This code is distributed in the hope that it will be useful, but WITHOUT
+ * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
+ * FITNESS FOR A PARTICULAR PURPOSE.  See the GNU General Public License
+ * version 2 for more details (a copy is included in the LICENSE file that
+ * accompanied this code).
+ *
+ * You should have received a copy of the GNU General Public License version
+ * 2 along with this work; if not, write to the Free Software Foundation,
+ * Inc., 51 Franklin St, Fifth Floor, Boston, MA 02110-1301 USA.
+ *
+ * Please contact Oracle, 500 Oracle Parkway, Redwood Shores, CA 94065 USA
+ * or visit www.oracle.com if you need additional information or have any
+ * questions.
+ */
+package com.oracle.graal.hotspot.aarch64;
+
+import com.oracle.graal.compiler.aarch64.AArch64LIRKindTool;
+import com.oracle.graal.hotspot.nodes.type.HotSpotLIRKindTool;
+
+import jdk.vm.ci.aarch64.AArch64Kind;
+import jdk.vm.ci.meta.LIRKind;
+
+public class AArch64HotSpotLIRKindTool extends AArch64LIRKindTool implements HotSpotLIRKindTool {
+
+    public LIRKind getNarrowOopKind() {
+        return LIRKind.reference(AArch64Kind.DWORD);
+    }
+
+    public LIRKind getNarrowPointerKind() {
+        return LIRKind.value(AArch64Kind.DWORD);
+    }
+}
--- /dev/null	Thu Jan 01 00:00:00 1970 +0000
+++ b/graal/com.oracle.graal.hotspot.aarch64/src/com/oracle/graal/hotspot/aarch64/AArch64HotSpotLoweringProvider.java	Thu Dec 24 11:43:35 2015 -1000
@@ -0,0 +1,74 @@
+/*
+ * Copyright (c) 2014, 2015, Oracle and/or its affiliates. All rights reserved.
+ * DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER.
+ *
+ * This code is free software; you can redistribute it and/or modify it
+ * under the terms of the GNU General Public License version 2 only, as
+ * published by the Free Software Foundation.
+ *
+ * This code is distributed in the hope that it will be useful, but WITHOUT
+ * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
+ * FITNESS FOR A PARTICULAR PURPOSE.  See the GNU General Public License
+ * version 2 for more details (a copy is included in the LICENSE file that
+ * accompanied this code).
+ *
+ * You should have received a copy of the GNU General Public License version
+ * 2 along with this work; if not, write to the Free Software Foundation,
+ * Inc., 51 Franklin St, Fifth Floor, Boston, MA 02110-1301 USA.
+ *
+ * Please contact Oracle, 500 Oracle Parkway, Redwood Shores, CA 94065 USA
+ * or visit www.oracle.com if you need additional information or have any
+ * questions.
+ */
+
+package com.oracle.graal.hotspot.aarch64;
+
+import com.oracle.graal.compiler.common.spi.ForeignCallsProvider;
+import com.oracle.graal.graph.Node;
+import com.oracle.graal.hotspot.HotSpotGraalRuntimeProvider;
+import com.oracle.graal.hotspot.meta.DefaultHotSpotLoweringProvider;
+import com.oracle.graal.hotspot.meta.HotSpotProviders;
+import com.oracle.graal.hotspot.meta.HotSpotRegistersProvider;
+import com.oracle.graal.nodes.calc.ConvertNode;
+import com.oracle.graal.nodes.calc.FixedBinaryNode;
+import com.oracle.graal.nodes.calc.RemNode;
+import com.oracle.graal.nodes.spi.LoweringTool;
+import com.oracle.graal.replacements.aarch64.AArch64FloatArithmeticSnippets;
+import com.oracle.graal.replacements.aarch64.AArch64IntegerArithmeticSnippets;
+
+import jdk.vm.ci.code.TargetDescription;
+import jdk.vm.ci.hotspot.HotSpotConstantReflectionProvider;
+import jdk.vm.ci.hotspot.HotSpotVMConfig;
+import jdk.vm.ci.meta.MetaAccessProvider;
+
+public class AArch64HotSpotLoweringProvider extends DefaultHotSpotLoweringProvider {
+
+    private AArch64IntegerArithmeticSnippets integerArithmeticSnippets;
+    private AArch64FloatArithmeticSnippets floatArithmeticSnippets;
+
+    public AArch64HotSpotLoweringProvider(HotSpotGraalRuntimeProvider runtime, MetaAccessProvider metaAccess, ForeignCallsProvider foreignCalls, HotSpotRegistersProvider registers,
+                    HotSpotConstantReflectionProvider constantReflection, TargetDescription target) {
+        super(runtime, metaAccess, foreignCalls, registers, constantReflection, target);
+    }
+
+    @Override
+    public void initialize(HotSpotProviders providers, HotSpotVMConfig config) {
+        integerArithmeticSnippets = new AArch64IntegerArithmeticSnippets(providers, providers.getSnippetReflection(), providers.getCodeCache().getTarget());
+        floatArithmeticSnippets = new AArch64FloatArithmeticSnippets(providers, providers.getSnippetReflection(), providers.getCodeCache().getTarget());
+        super.initialize(providers, config);
+    }
+
+    @Override
+    public void lower(Node n, LoweringTool tool) {
+        if (n instanceof FixedBinaryNode) {
+            integerArithmeticSnippets.lower((FixedBinaryNode) n, tool);
+        } else if (n instanceof RemNode) {
+            floatArithmeticSnippets.lower((RemNode) n, tool);
+        } else if (n instanceof ConvertNode) {
+            // AMD64 has custom lowerings for ConvertNodes, HotSpotLoweringProvider does not expect
+            // to see a ConvertNode and throws an error, just do nothing here.
+        } else {
+            super.lower(n, tool);
+        }
+    }
+}
--- /dev/null	Thu Jan 01 00:00:00 1970 +0000
+++ b/graal/com.oracle.graal.hotspot.aarch64/src/com/oracle/graal/hotspot/aarch64/AArch64HotSpotMove.java	Thu Dec 24 11:43:35 2015 -1000
@@ -0,0 +1,158 @@
+/*
+ * Copyright (c) 2013, 2015, Oracle and/or its affiliates. All rights reserved.
+ * DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER.
+ *
+ * This code is free software; you can redistribute it and/or modify it
+ * under the terms of the GNU General Public License version 2 only, as
+ * published by the Free Software Foundation.
+ *
+ * This code is distributed in the hope that it will be useful, but WITHOUT
+ * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
+ * FITNESS FOR A PARTICULAR PURPOSE.  See the GNU General Public License
+ * version 2 for more details (a copy is included in the LICENSE file that
+ * accompanied this code).
+ *
+ * You should have received a copy of the GNU General Public License version
+ * 2 along with this work; if not, write to the Free Software Foundation,
+ * Inc., 51 Franklin St, Fifth Floor, Boston, MA 02110-1301 USA.
+ *
+ * Please contact Oracle, 500 Oracle Parkway, Redwood Shores, CA 94065 USA
+ * or visit www.oracle.com if you need additional information or have any
+ * questions.
+ */
+package com.oracle.graal.hotspot.aarch64;
+
+import static com.oracle.graal.lir.LIRInstruction.OperandFlag.HINT;
+import static com.oracle.graal.lir.LIRInstruction.OperandFlag.ILLEGAL;
+import static com.oracle.graal.lir.LIRInstruction.OperandFlag.REG;
+import static jdk.vm.ci.code.ValueUtil.asRegister;
+
+import com.oracle.graal.asm.aarch64.AArch64Assembler;
+import com.oracle.graal.asm.aarch64.AArch64MacroAssembler;
+import com.oracle.graal.lir.LIRInstructionClass;
+import com.oracle.graal.lir.aarch64.AArch64LIRInstruction;
+import com.oracle.graal.lir.asm.CompilationResultBuilder;
+
+import jdk.vm.ci.code.Register;
+import jdk.vm.ci.hotspot.HotSpotVMConfig.CompressEncoding;
+import jdk.vm.ci.meta.AllocatableValue;
+
+public class AArch64HotSpotMove {
+
+    /**
+     * Compresses a 8-byte pointer as a 4-byte int.
+     */
+    public static class CompressPointer extends AArch64LIRInstruction {
+        public static final LIRInstructionClass<CompressPointer> TYPE = LIRInstructionClass.create(CompressPointer.class);
+
+        private final CompressEncoding encoding;
+        private final boolean nonNull;
+
+        @Def({REG, HINT}) protected AllocatableValue result;
+        @Use({REG}) protected AllocatableValue input;
+        @Alive({REG, ILLEGAL}) protected AllocatableValue baseRegister;
+
+        public CompressPointer(AllocatableValue result, AllocatableValue input, AllocatableValue baseRegister, CompressEncoding encoding, boolean nonNull) {
+            super(TYPE);
+            this.result = result;
+            this.input = input;
+            this.baseRegister = baseRegister;
+            this.encoding = encoding;
+            this.nonNull = nonNull;
+        }
+
+        @Override
+        public void emitCode(CompilationResultBuilder crb, AArch64MacroAssembler masm) {
+            Register resultRegister = asRegister(result);
+            Register ptr = asRegister(input);
+            Register base = asRegister(baseRegister);
+            // result = (ptr - base) >> shift
+            if (encoding.base == 0) {
+                if (encoding.shift == 0) {
+                    masm.movx(resultRegister, ptr);
+                } else {
+                    assert encoding.alignment == encoding.shift : "Encode algorithm is wrong";
+                    masm.lshr(64, resultRegister, ptr, encoding.shift);
+                }
+            } else if (nonNull) {
+                masm.sub(64, resultRegister, ptr, base);
+                if (encoding.shift != 0) {
+                    assert encoding.alignment == encoding.shift : "Encode algorithm is wrong";
+                    masm.shl(64, resultRegister, resultRegister, encoding.shift);
+                }
+            } else {
+                // if ptr is null it still has to be null after compression
+                masm.cmp(64, ptr, 0);
+                masm.cmov(64, resultRegister, ptr, base, AArch64Assembler.ConditionFlag.NE);
+                masm.sub(64, resultRegister, resultRegister, base);
+                if (encoding.shift != 0) {
+                    assert encoding.alignment == encoding.shift : "Encode algorithm is wrong";
+                    masm.lshr(64, resultRegister, resultRegister, encoding.shift);
+                }
+            }
+        }
+    }
+
+    /**
+     * Decompresses a 4-byte offset into an actual pointer.
+     */
+    public static class UncompressPointer extends AArch64LIRInstruction {
+        public static final LIRInstructionClass<UncompressPointer> TYPE = LIRInstructionClass.create(UncompressPointer.class);
+
+        private final CompressEncoding encoding;
+        private final boolean nonNull;
+
+        @Def({REG}) protected AllocatableValue result;
+        @Use({REG}) protected AllocatableValue input;
+        @Alive({REG, ILLEGAL}) protected AllocatableValue baseRegister;
+
+        public UncompressPointer(AllocatableValue result, AllocatableValue input, AllocatableValue baseRegister, CompressEncoding encoding, boolean nonNull) {
+            super(TYPE);
+            this.result = result;
+            this.input = input;
+            this.baseRegister = baseRegister;
+            this.encoding = encoding;
+            this.nonNull = nonNull;
+        }
+
+        @Override
+        public void emitCode(CompilationResultBuilder crb, AArch64MacroAssembler masm) {
+            Register ptr = asRegister(input);
+            Register resultRegister = asRegister(result);
+            Register base = asRegister(baseRegister);
+            // result = base + (ptr << shift)
+            if (nonNull) {
+                assert encoding.shift == encoding.alignment;
+                masm.add(64, resultRegister, base, ptr, AArch64Assembler.ShiftType.ASR, encoding.shift);
+            } else {
+                // if ptr is null it has to be null after decompression
+                // masm.cmp(64, );
+            }
+
+        }
+    }
+
+    //
+    // private static void decompressPointer(CompilationResultBuilder crb, ARMv8MacroAssembler masm,
+    // Register result,
+    // Register ptr, long base, int shift, int alignment) {
+    // assert base != 0 || shift == 0 || alignment == shift;
+    // // result = heapBase + ptr << alignment
+    // Register heapBase = ARMv8.heapBaseRegister;
+    // // if result == 0, we make sure that it will still be 0 at the end, so that it traps when
+    // // loading storing a value.
+    // masm.cmp(32, ptr, 0);
+    // masm.add(64, result, heapBase, ptr, ARMv8Assembler.ExtendType.UXTX, alignment);
+    // masm.cmov(64, result, result, ARMv8.zr, ARMv8Assembler.ConditionFlag.NE);
+    // }
+
+    public static void decodeKlassPointer(AArch64MacroAssembler masm, Register result, Register ptr, Register klassBase, CompressEncoding encoding) {
+        // result = klassBase + ptr << shift
+        if (encoding.shift != 0 || encoding.base != 0) {
+            // (shift != 0 -> shift == alignment) && (shift == 0 -> base == 0)
+            assert (encoding.shift == 0 || encoding.shift == encoding.alignment) && (encoding.shift != 0 || encoding.base == 0) : "Decode algorithm is wrong.";
+            masm.add(64, result, klassBase, ptr, AArch64Assembler.ExtendType.UXTX, encoding.shift);
+        }
+    }
+
+}
--- /dev/null	Thu Jan 01 00:00:00 1970 +0000
+++ b/graal/com.oracle.graal.hotspot.aarch64/src/com/oracle/graal/hotspot/aarch64/AArch64HotSpotMoveFactory.java	Thu Dec 24 11:43:35 2015 -1000
@@ -0,0 +1,83 @@
+/*
+ * Copyright (c) 2015, Oracle and/or its affiliates. All rights reserved.
+ * DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER.
+ *
+ * This code is free software; you can redistribute it and/or modify it
+ * under the terms of the GNU General Public License version 2 only, as
+ * published by the Free Software Foundation.
+ *
+ * This code is distributed in the hope that it will be useful, but WITHOUT
+ * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
+ * FITNESS FOR A PARTICULAR PURPOSE.  See the GNU General Public License
+ * version 2 for more details (a copy is included in the LICENSE file that
+ * accompanied this code).
+ *
+ * You should have received a copy of the GNU General Public License version
+ * 2 along with this work; if not, write to the Free Software Foundation,
+ * Inc., 51 Franklin St, Fifth Floor, Boston, MA 02110-1301 USA.
+ *
+ * Please contact Oracle, 500 Oracle Parkway, Redwood Shores, CA 94065 USA
+ * or visit www.oracle.com if you need additional information or have any
+ * questions.
+ */
+package com.oracle.graal.hotspot.aarch64;
+
+import static jdk.vm.ci.hotspot.HotSpotCompressedNullConstant.COMPRESSED_NULL;
+import static jdk.vm.ci.meta.JavaConstant.INT_0;
+import static jdk.vm.ci.meta.JavaConstant.LONG_0;
+
+import com.oracle.graal.compiler.aarch64.AArch64LIRGenerator.ConstantTableBaseProvider;
+import com.oracle.graal.compiler.aarch64.AArch64MoveFactory;
+import com.oracle.graal.lir.LIRInstruction;
+
+import jdk.vm.ci.code.CodeCacheProvider;
+import jdk.vm.ci.common.JVMCIError;
+import jdk.vm.ci.hotspot.HotSpotCompressedNullConstant;
+import jdk.vm.ci.hotspot.HotSpotConstant;
+import jdk.vm.ci.hotspot.HotSpotObjectConstant;
+import jdk.vm.ci.meta.AllocatableValue;
+import jdk.vm.ci.meta.Constant;
+import jdk.vm.ci.meta.JavaConstant;
+
+public class AArch64HotSpotMoveFactory extends AArch64MoveFactory {
+
+    public AArch64HotSpotMoveFactory(CodeCacheProvider codeCache, ConstantTableBaseProvider constantTableBaseProvider) {
+        super(codeCache, constantTableBaseProvider);
+    }
+
+    @Override
+    public boolean canInlineConstant(JavaConstant c) {
+        if (HotSpotCompressedNullConstant.COMPRESSED_NULL.equals(c)) {
+            return true;
+        } else if (c instanceof HotSpotObjectConstant) {
+            return false;
+        } else {
+            return super.canInlineConstant(c);
+        }
+    }
+
+    @Override
+    public LIRInstruction createLoad(AllocatableValue dst, Constant src) {
+        Constant usedSource;
+        if (COMPRESSED_NULL.equals(src)) {
+            usedSource = INT_0;
+        } else if (src instanceof HotSpotObjectConstant && ((HotSpotObjectConstant) src).isNull()) {
+            usedSource = LONG_0;
+        } else {
+            usedSource = src;
+        }
+        if (usedSource instanceof HotSpotConstant) {
+            HotSpotConstant constant = (HotSpotConstant) usedSource;
+            if (constant.isCompressed()) {
+                // return new SPARCHotSpotMove.LoadHotSpotObjectConstantInline(constant, dst);
+                throw JVMCIError.unimplemented();
+            } else {
+                // return new SPARCHotSpotMove.LoadHotSpotObjectConstantFromTable(constant, dst,
+                // constantTableBaseProvider.getConstantTableBase());
+                throw JVMCIError.unimplemented();
+            }
+        } else {
+            return super.createLoad(dst, usedSource);
+        }
+    }
+}
--- /dev/null	Thu Jan 01 00:00:00 1970 +0000
+++ b/graal/com.oracle.graal.hotspot.aarch64/src/com/oracle/graal/hotspot/aarch64/AArch64HotSpotNodeLIRBuilder.java	Thu Dec 24 11:43:35 2015 -1000
@@ -0,0 +1,192 @@
+/*
+ * Copyright (c) 2015, Oracle and/or its affiliates. All rights reserved.
+ * DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER.
+ *
+ * This code is free software; you can redistribute it and/or modify it
+ * under the terms of the GNU General Public License version 2 only, as
+ * published by the Free Software Foundation.
+ *
+ * This code is distributed in the hope that it will be useful, but WITHOUT
+ * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
+ * FITNESS FOR A PARTICULAR PURPOSE.  See the GNU General Public License
+ * version 2 for more details (a copy is included in the LICENSE file that
+ * accompanied this code).
+ *
+ * You should have received a copy of the GNU General Public License version
+ * 2 along with this work; if not, write to the Free Software Foundation,
+ * Inc., 51 Franklin St, Fifth Floor, Boston, MA 02110-1301 USA.
+ *
+ * Please contact Oracle, 500 Oracle Parkway, Redwood Shores, CA 94065 USA
+ * or visit www.oracle.com if you need additional information or have any
+ * questions.
+ */
+package com.oracle.graal.hotspot.aarch64;
+
+import static com.oracle.graal.hotspot.HotSpotBackend.EXCEPTION_HANDLER_IN_CALLER;
+import static jdk.vm.ci.aarch64.AArch64.lr;
+import static jdk.vm.ci.code.ValueUtil.isStackSlot;
+import static jdk.vm.ci.hotspot.HotSpotVMConfig.config;
+import static jdk.vm.ci.hotspot.aarch64.AArch64HotSpotRegisterConfig.fp;
+import static jdk.vm.ci.hotspot.aarch64.AArch64HotSpotRegisterConfig.inlineCacheRegister;
+import static jdk.vm.ci.hotspot.aarch64.AArch64HotSpotRegisterConfig.metaspaceMethodRegister;
+
+import com.oracle.graal.compiler.aarch64.AArch64NodeLIRBuilder;
+import com.oracle.graal.compiler.aarch64.AArch64NodeMatchRules;
+import com.oracle.graal.compiler.common.spi.ForeignCallLinkage;
+import com.oracle.graal.compiler.gen.DebugInfoBuilder;
+import com.oracle.graal.debug.Debug;
+import com.oracle.graal.hotspot.HotSpotDebugInfoBuilder;
+import com.oracle.graal.hotspot.HotSpotLockStack;
+import com.oracle.graal.hotspot.HotSpotNodeLIRBuilder;
+import com.oracle.graal.hotspot.nodes.DirectCompareAndSwapNode;
+import com.oracle.graal.hotspot.nodes.HotSpotDirectCallTargetNode;
+import com.oracle.graal.hotspot.nodes.HotSpotIndirectCallTargetNode;
+import com.oracle.graal.lir.LIRFrameState;
+import com.oracle.graal.lir.Variable;
+import com.oracle.graal.lir.aarch64.AArch64Move.CompareAndSwap;
+import com.oracle.graal.lir.gen.LIRGeneratorTool;
+import com.oracle.graal.nodes.CallTargetNode.InvokeKind;
+import com.oracle.graal.nodes.DirectCallTargetNode;
+import com.oracle.graal.nodes.FullInfopointNode;
+import com.oracle.graal.nodes.IndirectCallTargetNode;
+import com.oracle.graal.nodes.ParameterNode;
+import com.oracle.graal.nodes.SafepointNode;
+import com.oracle.graal.nodes.StructuredGraph;
+import com.oracle.graal.nodes.ValueNode;
+import com.oracle.graal.nodes.spi.NodeValueMap;
+
+import jdk.vm.ci.aarch64.AArch64Kind;
+import jdk.vm.ci.amd64.AMD64Kind;
+import jdk.vm.ci.code.BytecodeFrame;
+import jdk.vm.ci.code.CallingConvention;
+import jdk.vm.ci.code.Register;
+import jdk.vm.ci.code.RegisterValue;
+import jdk.vm.ci.code.StackSlot;
+import jdk.vm.ci.code.ValueUtil;
+import jdk.vm.ci.common.JVMCIError;
+import jdk.vm.ci.hotspot.HotSpotResolvedJavaMethod;
+import jdk.vm.ci.meta.AllocatableValue;
+import jdk.vm.ci.meta.LIRKind;
+import jdk.vm.ci.meta.Value;
+
+/**
+ * LIR generator specialized for AArch64 HotSpot.
+ */
+public class AArch64HotSpotNodeLIRBuilder extends AArch64NodeLIRBuilder implements HotSpotNodeLIRBuilder {
+
+    public AArch64HotSpotNodeLIRBuilder(StructuredGraph graph, LIRGeneratorTool gen, AArch64NodeMatchRules nodeMatchRules) {
+        super(graph, gen, nodeMatchRules);
+        assert gen instanceof AArch64HotSpotLIRGenerator;
+        assert getDebugInfoBuilder() instanceof HotSpotDebugInfoBuilder;
+        ((AArch64HotSpotLIRGenerator) gen).setLockStack(((HotSpotDebugInfoBuilder) getDebugInfoBuilder()).lockStack());
+    }
+
+    @Override
+    protected DebugInfoBuilder createDebugInfoBuilder(StructuredGraph graph, NodeValueMap nodeValueMap) {
+        HotSpotLockStack lockStack = new HotSpotLockStack(gen.getResult().getFrameMapBuilder(), LIRKind.value(AArch64Kind.QWORD));
+        return new HotSpotDebugInfoBuilder(nodeValueMap, lockStack);
+    }
+
+    private AArch64HotSpotLIRGenerator getGen() {
+        return (AArch64HotSpotLIRGenerator) gen;
+    }
+
+    @Override
+    protected void emitPrologue(StructuredGraph graph) {
+        CallingConvention incomingArguments = gen.getCallingConvention();
+        Value[] params = new Value[incomingArguments.getArgumentCount() + 2];
+        for (int i = 0; i < incomingArguments.getArgumentCount(); i++) {
+            params[i] = incomingArguments.getArgument(i);
+            if (isStackSlot(params[i])) {
+                StackSlot slot = ValueUtil.asStackSlot(params[i]);
+                if (slot.isInCallerFrame() && !gen.getResult().getLIR().hasArgInCallerFrame()) {
+                    gen.getResult().getLIR().setHasArgInCallerFrame();
+                }
+            }
+        }
+        params[params.length - 2] = fp.asValue(LIRKind.value(AMD64Kind.QWORD));
+        params[params.length - 1] = lr.asValue(LIRKind.value(AMD64Kind.QWORD));
+
+        gen.emitIncomingValues(params);
+
+        for (ParameterNode param : graph.getNodes(ParameterNode.TYPE)) {
+            Value paramValue = params[param.index()];
+            assert paramValue.getLIRKind().equals(getLIRGeneratorTool().getLIRKind(param.stamp())) : paramValue.getLIRKind() + " != " + param.stamp();
+            setResult(param, gen.emitMove(paramValue));
+        }
+    }
+
+    @Override
+    public void visitSafepointNode(SafepointNode i) {
+        LIRFrameState info = state(i);
+        Variable scratch = gen.newVariable(LIRKind.value(getGen().target().arch.getWordKind()));
+        append(new AArch64HotSpotSafepointOp(info, getGen().config, scratch));
+    }
+
+    @Override
+    protected void emitDirectCall(DirectCallTargetNode callTarget, Value result, Value[] parameters, Value[] temps, LIRFrameState callState) {
+        InvokeKind invokeKind = ((HotSpotDirectCallTargetNode) callTarget).invokeKind();
+        if (invokeKind.isIndirect()) {
+            append(new AArch64HotSpotDirectVirtualCallOp(callTarget.targetMethod(), result, parameters, temps, callState, invokeKind, config()));
+        } else {
+            assert invokeKind.isDirect();
+            HotSpotResolvedJavaMethod resolvedMethod = (HotSpotResolvedJavaMethod) callTarget.targetMethod();
+            assert resolvedMethod.isConcrete() : "Cannot make direct call to abstract method.";
+            append(new AArch64HotSpotDirectStaticCallOp(callTarget.targetMethod(), result, parameters, temps, callState, invokeKind, config()));
+        }
+    }
+
+    @Override
+    protected void emitIndirectCall(IndirectCallTargetNode callTarget, Value result, Value[] parameters, Value[] temps, LIRFrameState callState) {
+        Value metaspaceMethodSrc = operand(((HotSpotIndirectCallTargetNode) callTarget).metaspaceMethod());
+        Value targetAddressSrc = operand(callTarget.computedAddress());
+        AllocatableValue metaspaceMethodDst = metaspaceMethodRegister.asValue(metaspaceMethodSrc.getLIRKind());
+        AllocatableValue targetAddressDst = inlineCacheRegister.asValue(targetAddressSrc.getLIRKind());
+        gen.emitMove(metaspaceMethodDst, metaspaceMethodSrc);
+        gen.emitMove(targetAddressDst, targetAddressSrc);
+        append(new AArch64IndirectCallOp(callTarget.targetMethod(), result, parameters, temps, metaspaceMethodDst, targetAddressDst, callState, config()));
+    }
+
+    @Override
+    public void emitPatchReturnAddress(ValueNode address) {
+        throw JVMCIError.unimplemented();
+    }
+
+    @Override
+    public void emitJumpToExceptionHandlerInCaller(ValueNode handlerInCallerPc, ValueNode exception, ValueNode exceptionPc) {
+        Variable handler = gen.load(operand(handlerInCallerPc));
+        ForeignCallLinkage linkage = gen.getForeignCalls().lookupForeignCall(EXCEPTION_HANDLER_IN_CALLER);
+        CallingConvention outgoingCc = linkage.getOutgoingCallingConvention();
+        assert outgoingCc.getArgumentCount() == 2;
+        RegisterValue exceptionFixed = (RegisterValue) outgoingCc.getArgument(0);
+        RegisterValue exceptionPcFixed = (RegisterValue) outgoingCc.getArgument(1);
+        gen.emitMove(exceptionFixed, operand(exception));
+        gen.emitMove(exceptionPcFixed, operand(exceptionPc));
+        Register thread = getGen().getProviders().getRegisters().getThreadRegister();
+        AArch64HotSpotJumpToExceptionHandlerInCallerOp op = new AArch64HotSpotJumpToExceptionHandlerInCallerOp(handler, exceptionFixed, exceptionPcFixed,
+                        getGen().config.threadIsMethodHandleReturnOffset, thread, config());
+        append(op);
+    }
+
+    @Override
+    public void visitFullInfopointNode(FullInfopointNode i) {
+        if (i.getState() != null && i.getState().bci == BytecodeFrame.AFTER_BCI) {
+            Debug.log("Ignoring InfopointNode for AFTER_BCI");
+        } else {
+            super.visitFullInfopointNode(i);
+        }
+    }
+
+    @Override
+    public void visitDirectCompareAndSwap(DirectCompareAndSwapNode x) {
+        AllocatableValue cmpValue = gen.asAllocatable(operand(x.expectedValue()));
+        AllocatableValue newValue = gen.asAllocatable(operand(x.newValue()));
+        LIRKind kind = cmpValue.getLIRKind();
+        assert kind.equals(newValue.getLIRKind());
+
+        Variable result = gen.newVariable(newValue.getLIRKind());
+        Variable scratch = gen.newVariable(LIRKind.value(AArch64Kind.DWORD));
+        append(new CompareAndSwap(result, cmpValue, newValue, getGen().asAddressValue(operand(x.getAddress())), scratch));
+        setResult(x, result);
+    }
+}
--- /dev/null	Thu Jan 01 00:00:00 1970 +0000
+++ b/graal/com.oracle.graal.hotspot.aarch64/src/com/oracle/graal/hotspot/aarch64/AArch64HotSpotRegisterAllocationConfig.java	Thu Dec 24 11:43:35 2015 -1000
@@ -0,0 +1,148 @@
+/*
+ * Copyright (c) 2015, Oracle and/or its affiliates. All rights reserved.
+ * DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER.
+ *
+ * This code is free software; you can redistribute it and/or modify it
+ * under the terms of the GNU General Public License version 2 only, as
+ * published by the Free Software Foundation.
+ *
+ * This code is distributed in the hope that it will be useful, but WITHOUT
+ * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
+ * FITNESS FOR A PARTICULAR PURPOSE.  See the GNU General Public License
+ * version 2 for more details (a copy is included in the LICENSE file that
+ * accompanied this code).
+ *
+ * You should have received a copy of the GNU General Public License version
+ * 2 along with this work; if not, write to the Free Software Foundation,
+ * Inc., 51 Franklin St, Fifth Floor, Boston, MA 02110-1301 USA.
+ *
+ * Please contact Oracle, 500 Oracle Parkway, Redwood Shores, CA 94065 USA
+ * or visit www.oracle.com if you need additional information or have any
+ * questions.
+ */
+package com.oracle.graal.hotspot.aarch64;
+
+import static jdk.vm.ci.aarch64.AArch64.r0;
+import static jdk.vm.ci.aarch64.AArch64.r1;
+import static jdk.vm.ci.aarch64.AArch64.r10;
+import static jdk.vm.ci.aarch64.AArch64.r11;
+import static jdk.vm.ci.aarch64.AArch64.r12;
+import static jdk.vm.ci.aarch64.AArch64.r13;
+import static jdk.vm.ci.aarch64.AArch64.r14;
+import static jdk.vm.ci.aarch64.AArch64.r15;
+import static jdk.vm.ci.aarch64.AArch64.r16;
+import static jdk.vm.ci.aarch64.AArch64.r17;
+import static jdk.vm.ci.aarch64.AArch64.r18;
+import static jdk.vm.ci.aarch64.AArch64.r19;
+import static jdk.vm.ci.aarch64.AArch64.r2;
+import static jdk.vm.ci.aarch64.AArch64.r20;
+import static jdk.vm.ci.aarch64.AArch64.r21;
+import static jdk.vm.ci.aarch64.AArch64.r22;
+import static jdk.vm.ci.aarch64.AArch64.r23;
+import static jdk.vm.ci.aarch64.AArch64.r24;
+import static jdk.vm.ci.aarch64.AArch64.r25;
+import static jdk.vm.ci.aarch64.AArch64.r26;
+import static jdk.vm.ci.aarch64.AArch64.r27;
+import static jdk.vm.ci.aarch64.AArch64.r28;
+import static jdk.vm.ci.aarch64.AArch64.r3;
+import static jdk.vm.ci.aarch64.AArch64.r4;
+import static jdk.vm.ci.aarch64.AArch64.r5;
+import static jdk.vm.ci.aarch64.AArch64.r6;
+import static jdk.vm.ci.aarch64.AArch64.r7;
+import static jdk.vm.ci.aarch64.AArch64.r8;
+import static jdk.vm.ci.aarch64.AArch64.r9;
+import static jdk.vm.ci.aarch64.AArch64.v0;
+import static jdk.vm.ci.aarch64.AArch64.v1;
+import static jdk.vm.ci.aarch64.AArch64.v10;
+import static jdk.vm.ci.aarch64.AArch64.v11;
+import static jdk.vm.ci.aarch64.AArch64.v12;
+import static jdk.vm.ci.aarch64.AArch64.v13;
+import static jdk.vm.ci.aarch64.AArch64.v14;
+import static jdk.vm.ci.aarch64.AArch64.v15;
+import static jdk.vm.ci.aarch64.AArch64.v16;
+import static jdk.vm.ci.aarch64.AArch64.v17;
+import static jdk.vm.ci.aarch64.AArch64.v18;
+import static jdk.vm.ci.aarch64.AArch64.v19;
+import static jdk.vm.ci.aarch64.AArch64.v2;
+import static jdk.vm.ci.aarch64.AArch64.v20;
+import static jdk.vm.ci.aarch64.AArch64.v21;
+import static jdk.vm.ci.aarch64.AArch64.v22;
+import static jdk.vm.ci.aarch64.AArch64.v23;
+import static jdk.vm.ci.aarch64.AArch64.v24;
+import static jdk.vm.ci.aarch64.AArch64.v25;
+import static jdk.vm.ci.aarch64.AArch64.v26;
+import static jdk.vm.ci.aarch64.AArch64.v27;
+import static jdk.vm.ci.aarch64.AArch64.v28;
+import static jdk.vm.ci.aarch64.AArch64.v29;
+import static jdk.vm.ci.aarch64.AArch64.v3;
+import static jdk.vm.ci.aarch64.AArch64.v30;
+import static jdk.vm.ci.aarch64.AArch64.v31;
+import static jdk.vm.ci.aarch64.AArch64.v4;
+import static jdk.vm.ci.aarch64.AArch64.v5;
+import static jdk.vm.ci.aarch64.AArch64.v6;
+import static jdk.vm.ci.aarch64.AArch64.v7;
+import static jdk.vm.ci.aarch64.AArch64.v8;
+import static jdk.vm.ci.aarch64.AArch64.v9;
+
+import java.util.ArrayList;
+import java.util.BitSet;
+
+import com.oracle.graal.compiler.common.alloc.RegisterAllocationConfig;
+
+import jdk.vm.ci.code.Register;
+import jdk.vm.ci.code.RegisterConfig;
+
+public class AArch64HotSpotRegisterAllocationConfig extends RegisterAllocationConfig {
+
+    // @formatter:off
+    static final Register[] registerAllocationOrder = {
+        r0,  r1,  r2,  r3,  r4,  r5,  r6,  r7,
+        r8,  r9,  r10, r11, r12, r13, r14, r15,
+        r16, r17, r18, r19, r20, r21, r22, r23,
+        r24, r25, r26, r27, r28, /* r29, r30, r31 */
+
+        v0,  v1,  v2,  v3,  v4,  v5,  v6,  v7,
+        v8,  v9,  v10, v11, v12, v13, v14, v15,
+        v16, v17, v18, v19, v20, v21, v22, v23,
+        v24, v25, v26, v27, v28, v29, v30, v31
+    };
+    // @formatter:on
+
+    public AArch64HotSpotRegisterAllocationConfig(RegisterConfig registerConfig) {
+        super(registerConfig);
+    }
+
+    @Override
+    protected Register[] initAllocatable(Register[] registers) {
+        BitSet regMap = new BitSet(registerConfig.getAllocatableRegisters().length);
+        for (Register reg : registers) {
+            regMap.set(reg.number);
+        }
+
+        ArrayList<Register> allocatableRegisters = new ArrayList<>(registers.length);
+        for (Register reg : registerAllocationOrder) {
+            if (regMap.get(reg.number)) {
+                allocatableRegisters.add(reg);
+            }
+        }
+
+        return super.initAllocatable(allocatableRegisters.toArray(new Register[allocatableRegisters.size()]));
+    }
+
+    @Override
+    protected AllocatableRegisters createAllocatableRegisters(Register[] registers) {
+        int min = Integer.MAX_VALUE;
+        int max = Integer.MIN_VALUE;
+        for (Register reg : registers) {
+            int number = reg.number;
+            if (number < min) {
+                min = number;
+            }
+            if (number > max) {
+                max = number;
+            }
+        }
+        assert min < max;
+        return new AllocatableRegisters(registers, min, max);
+    }
+}
--- /dev/null	Thu Jan 01 00:00:00 1970 +0000
+++ b/graal/com.oracle.graal.hotspot.aarch64/src/com/oracle/graal/hotspot/aarch64/AArch64HotSpotReturnOp.java	Thu Dec 24 11:43:35 2015 -1000
@@ -0,0 +1,69 @@
+/*
+ * Copyright (c) 2013, 2015, Oracle and/or its affiliates. All rights reserved.
+ * DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER.
+ *
+ * This code is free software; you can redistribute it and/or modify it
+ * under the terms of the GNU General Public License version 2 only, as
+ * published by the Free Software Foundation.
+ *
+ * This code is distributed in the hope that it will be useful, but WITHOUT
+ * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
+ * FITNESS FOR A PARTICULAR PURPOSE.  See the GNU General Public License
+ * version 2 for more details (a copy is included in the LICENSE file that
+ * accompanied this code).
+ *
+ * You should have received a copy of the GNU General Public License version
+ * 2 along with this work; if not, write to the Free Software Foundation,
+ * Inc., 51 Franklin St, Fifth Floor, Boston, MA 02110-1301 USA.
+ *
+ * Please contact Oracle, 500 Oracle Parkway, Redwood Shores, CA 94065 USA
+ * or visit www.oracle.com if you need additional information or have any
+ * questions.
+ */
+package com.oracle.graal.hotspot.aarch64;
+
+import static com.oracle.graal.lir.LIRInstruction.OperandFlag.ILLEGAL;
+import static com.oracle.graal.lir.LIRInstruction.OperandFlag.REG;
+import static jdk.vm.ci.aarch64.AArch64.lr;
+import static jdk.vm.ci.code.ValueUtil.asRegister;
+
+import com.oracle.graal.asm.aarch64.AArch64MacroAssembler;
+import com.oracle.graal.lir.LIRInstructionClass;
+import com.oracle.graal.lir.Opcode;
+import com.oracle.graal.lir.asm.CompilationResultBuilder;
+
+import jdk.vm.ci.hotspot.HotSpotVMConfig;
+import jdk.vm.ci.meta.Value;
+
+/**
+ * Returns from a function.
+ */
+@Opcode("RETURN")
+public final class AArch64HotSpotReturnOp extends AArch64HotSpotEpilogueOp {
+
+    public static final LIRInstructionClass<AArch64HotSpotReturnOp> TYPE = LIRInstructionClass.create(AArch64HotSpotReturnOp.class);
+
+    @Use({REG, ILLEGAL}) private Value result;
+    private final boolean isStub;
+
+    public AArch64HotSpotReturnOp(Value result, boolean isStub, HotSpotVMConfig config) {
+        super(TYPE, config);
+        assert validReturnValue(result);
+        this.result = result;
+        this.isStub = isStub;
+    }
+
+    private static boolean validReturnValue(Value result) {
+        if (result.equals(Value.ILLEGAL)) {
+            return true;
+        }
+        return asRegister(result).encoding == 0;
+    }
+
+    @Override
+    public void emitCode(CompilationResultBuilder crb, AArch64MacroAssembler masm) {
+        final boolean emitSafepoint = !isStub;
+        leaveFrame(crb, masm, emitSafepoint);
+        masm.ret(lr);
+    }
+}
--- /dev/null	Thu Jan 01 00:00:00 1970 +0000
+++ b/graal/com.oracle.graal.hotspot.aarch64/src/com/oracle/graal/hotspot/aarch64/AArch64HotSpotSafepointOp.java	Thu Dec 24 11:43:35 2015 -1000
@@ -0,0 +1,97 @@
+/*
+ * Copyright (c) 2013, 2015, Oracle and/or its affiliates. All rights reserved.
+ * DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER.
+ *
+ * This code is free software; you can redistribute it and/or modify it
+ * under the terms of the GNU General Public License version 2 only, as
+ * published by the Free Software Foundation.
+ *
+ * This code is distributed in the hope that it will be useful, but WITHOUT
+ * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
+ * FITNESS FOR A PARTICULAR PURPOSE.  See the GNU General Public License
+ * version 2 for more details (a copy is included in the LICENSE file that
+ * accompanied this code).
+ *
+ * You should have received a copy of the GNU General Public License version
+ * 2 along with this work; if not, write to the Free Software Foundation,
+ * Inc., 51 Franklin St, Fifth Floor, Boston, MA 02110-1301 USA.
+ *
+ * Please contact Oracle, 500 Oracle Parkway, Redwood Shores, CA 94065 USA
+ * or visit www.oracle.com if you need additional information or have any
+ * questions.
+ */
+package com.oracle.graal.hotspot.aarch64;
+
+import static jdk.vm.ci.aarch64.AArch64.zr;
+import static jdk.vm.ci.code.ValueUtil.asRegister;
+
+import com.oracle.graal.asm.NumUtil;
+import com.oracle.graal.asm.aarch64.AArch64Address;
+import com.oracle.graal.asm.aarch64.AArch64MacroAssembler;
+import com.oracle.graal.lir.LIRFrameState;
+import com.oracle.graal.lir.LIRInstructionClass;
+import com.oracle.graal.lir.Opcode;
+import com.oracle.graal.lir.aarch64.AArch64LIRInstruction;
+import com.oracle.graal.lir.asm.CompilationResultBuilder;
+
+import jdk.vm.ci.code.InfopointReason;
+import jdk.vm.ci.code.Register;
+import jdk.vm.ci.hotspot.HotSpotVMConfig;
+import jdk.vm.ci.meta.AllocatableValue;
+
+/**
+ * Emits a safepoint poll.
+ */
+@Opcode("SAFEPOINT")
+public class AArch64HotSpotSafepointOp extends AArch64LIRInstruction {
+    public static final LIRInstructionClass<AArch64HotSpotSafepointOp> TYPE = LIRInstructionClass.create(AArch64HotSpotSafepointOp.class);
+
+    @State protected LIRFrameState state;
+    @Temp protected AllocatableValue scratchValue;
+
+    private final HotSpotVMConfig config;
+
+    public AArch64HotSpotSafepointOp(LIRFrameState state, HotSpotVMConfig config, AllocatableValue scratch) {
+        super(TYPE);
+        this.state = state;
+        this.config = config;
+        this.scratchValue = scratch;
+    }
+
+    @Override
+    public void emitCode(CompilationResultBuilder crb, AArch64MacroAssembler masm) {
+        Register scratch = asRegister(scratchValue);
+        emitCode(crb, masm, config, false, scratch, state);
+    }
+
+    /**
+     * Conservatively checks whether we can load the safepoint polling address with a single ldr
+     * instruction or not.
+     *
+     * @return true if it is guaranteed that polling page offset will always fit into a 21-bit
+     *         signed integer, false otherwise.
+     */
+    private static boolean isPollingPageFar(HotSpotVMConfig config) {
+        final long pollingPageAddress = config.safepointPollingAddress;
+        return !NumUtil.isSignedNbit(21, pollingPageAddress - config.codeCacheLowBound) || !NumUtil.isSignedNbit(21, pollingPageAddress - config.codeCacheHighBound);
+    }
+
+    public static void emitCode(CompilationResultBuilder crb, AArch64MacroAssembler masm, HotSpotVMConfig config, boolean onReturn, Register scratch, LIRFrameState state) {
+        int pos = masm.position();
+        if (isPollingPageFar(config)) {
+            crb.recordMark(onReturn ? config.MARKID_POLL_RETURN_FAR : config.MARKID_POLL_FAR);
+            masm.forceMov(scratch, config.safepointPollingAddress);
+            if (state != null) {
+                crb.recordInfopoint(pos, state, InfopointReason.SAFEPOINT);
+            }
+            masm.ldr(32, zr, AArch64Address.createBaseRegisterOnlyAddress(scratch));
+        } else {
+            crb.recordMark(onReturn ? config.MARKID_POLL_RETURN_NEAR : config.MARKID_POLL_NEAR);
+            if (state != null) {
+                crb.recordInfopoint(pos, state, InfopointReason.SAFEPOINT);
+            }
+            masm.ldr(32, zr, AArch64Address.createPcLiteralAddress(0));
+        }
+    }
+
+}
--- /dev/null	Thu Jan 01 00:00:00 1970 +0000
+++ b/graal/com.oracle.graal.hotspot.aarch64/src/com/oracle/graal/hotspot/aarch64/AArch64HotSpotUnwindOp.java	Thu Dec 24 11:43:35 2015 -1000
@@ -0,0 +1,71 @@
+/*
+ * Copyright (c) 2013, 2015, Oracle and/or its affiliates. All rights reserved.
+ * DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER.
+ *
+ * This code is free software; you can redistribute it and/or modify it
+ * under the terms of the GNU General Public License version 2 only, as
+ * published by the Free Software Foundation.
+ *
+ * This code is distributed in the hope that it will be useful, but WITHOUT
+ * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
+ * FITNESS FOR A PARTICULAR PURPOSE.  See the GNU General Public License
+ * version 2 for more details (a copy is included in the LICENSE file that
+ * accompanied this code).
+ *
+ * You should have received a copy of the GNU General Public License version
+ * 2 along with this work; if not, write to the Free Software Foundation,
+ * Inc., 51 Franklin St, Fifth Floor, Boston, MA 02110-1301 USA.
+ *
+ * Please contact Oracle, 500 Oracle Parkway, Redwood Shores, CA 94065 USA
+ * or visit www.oracle.com if you need additional information or have any
+ * questions.
+ */
+package com.oracle.graal.hotspot.aarch64;
+
+import static com.oracle.graal.hotspot.HotSpotBackend.UNWIND_EXCEPTION_TO_CALLER;
+import static jdk.vm.ci.aarch64.AArch64.lr;
+import static jdk.vm.ci.code.ValueUtil.asRegister;
+
+import com.oracle.graal.asm.aarch64.AArch64MacroAssembler;
+import com.oracle.graal.compiler.common.spi.ForeignCallLinkage;
+import com.oracle.graal.hotspot.stubs.UnwindExceptionToCallerStub;
+import com.oracle.graal.lir.LIRInstructionClass;
+import com.oracle.graal.lir.Opcode;
+import com.oracle.graal.lir.aarch64.AArch64Call;
+import com.oracle.graal.lir.asm.CompilationResultBuilder;
+
+import jdk.vm.ci.code.CallingConvention;
+import jdk.vm.ci.code.Register;
+import jdk.vm.ci.code.RegisterValue;
+import jdk.vm.ci.hotspot.HotSpotVMConfig;
+
+/**
+ * Removes the current frame and jumps to the {@link UnwindExceptionToCallerStub}.
+ */
+@Opcode("UNWIND")
+public class AArch64HotSpotUnwindOp extends AArch64HotSpotEpilogueOp {
+    public static final LIRInstructionClass<AArch64HotSpotUnwindOp> TYPE = LIRInstructionClass.create(AArch64HotSpotUnwindOp.class);
+
+    @Use protected RegisterValue exception;
+
+    public AArch64HotSpotUnwindOp(HotSpotVMConfig config, RegisterValue exception) {
+        super(TYPE, config);
+        this.exception = exception;
+    }
+
+    @Override
+    public void emitCode(CompilationResultBuilder crb, AArch64MacroAssembler masm) {
+        leaveFrame(crb, masm, /* emitSafepoint */false);
+
+        ForeignCallLinkage linkage = crb.foreignCalls.lookupForeignCall(UNWIND_EXCEPTION_TO_CALLER);
+        CallingConvention cc = linkage.getOutgoingCallingConvention();
+        assert cc.getArgumentCount() == 2;
+        assert exception.equals(cc.getArgument(0));
+
+        // Get return address (is in lr after frame leave)
+        Register returnAddress = asRegister(cc.getArgument(1));
+        masm.movx(returnAddress, lr);
+
+        AArch64Call.directJmp(crb, masm, linkage);
+    }
+}
--- /dev/null	Thu Jan 01 00:00:00 1970 +0000
+++ b/graal/com.oracle.graal.hotspot.aarch64/src/com/oracle/graal/hotspot/aarch64/AArch64IndirectCallOp.java	Thu Dec 24 11:43:35 2015 -1000
@@ -0,0 +1,81 @@
+/*
+ * Copyright (c) 2013, 2015, Oracle and/or its affiliates. All rights reserved.
+ * DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER.
+ *
+ * This code is free software; you can redistribute it and/or modify it
+ * under the terms of the GNU General Public License version 2 only, as
+ * published by the Free Software Foundation.
+ *
+ * This code is distributed in the hope that it will be useful, but WITHOUT
+ * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
+ * FITNESS FOR A PARTICULAR PURPOSE.  See the GNU General Public License
+ * version 2 for more details (a copy is included in the LICENSE file that
+ * accompanied this code).
+ *
+ * You should have received a copy of the GNU General Public License version
+ * 2 along with this work; if not, write to the Free Software Foundation,
+ * Inc., 51 Franklin St, Fifth Floor, Boston, MA 02110-1301 USA.
+ *
+ * Please contact Oracle, 500 Oracle Parkway, Redwood Shores, CA 94065 USA
+ * or visit www.oracle.com if you need additional information or have any
+ * questions.
+ */
+package com.oracle.graal.hotspot.aarch64;
+
+import static com.oracle.graal.lir.LIRInstruction.OperandFlag.REG;
+import static jdk.vm.ci.aarch64.AArch64.r12;
+import static jdk.vm.ci.code.ValueUtil.asRegister;
+
+import com.oracle.graal.asm.aarch64.AArch64MacroAssembler;
+import com.oracle.graal.lir.LIRFrameState;
+import com.oracle.graal.lir.LIRInstructionClass;
+import com.oracle.graal.lir.Opcode;
+import com.oracle.graal.lir.aarch64.AArch64Call;
+import com.oracle.graal.lir.aarch64.AArch64Call.IndirectCallOp;
+import com.oracle.graal.lir.asm.CompilationResultBuilder;
+
+import jdk.vm.ci.code.Register;
+import jdk.vm.ci.hotspot.HotSpotVMConfig;
+import jdk.vm.ci.meta.ResolvedJavaMethod;
+import jdk.vm.ci.meta.Value;
+
+/**
+ * A register indirect call that complies with the extra conventions for such calls in HotSpot. In
+ * particular, the metaspace Method of the callee must be in r12 for the case where a vtable entry's
+ * _from_compiled_entry is the address of an C2I adapter. Such adapters expect the target method to
+ * be in r12.
+ */
+@Opcode("CALL_INDIRECT")
+final class AArch64IndirectCallOp extends IndirectCallOp {
+
+    public static final LIRInstructionClass<AArch64IndirectCallOp> TYPE = LIRInstructionClass.create(AArch64IndirectCallOp.class);
+
+    /**
+     * Vtable stubs expect the metaspace Method in r12.
+     */
+    public static final Register METHOD = r12;
+
+    @Use({REG}) private Value metaspaceMethod;
+
+    private final HotSpotVMConfig config;
+
+    public AArch64IndirectCallOp(ResolvedJavaMethod callTarget, Value result, Value[] parameters, Value[] temps, Value metaspaceMethod, Value targetAddress, LIRFrameState state, HotSpotVMConfig config) {
+        super(TYPE, callTarget, result, parameters, temps, targetAddress, state);
+        this.metaspaceMethod = metaspaceMethod;
+        this.config = config;
+    }
+
+    @Override
+    public void emitCode(CompilationResultBuilder crb, AArch64MacroAssembler masm) {
+        crb.recordMark(config.MARKID_INLINE_INVOKE);
+        Register callReg = asRegister(targetAddress);
+        assert !callReg.equals(METHOD);
+        AArch64Call.indirectCall(crb, masm, callReg, callTarget, state);
+    }
+
+    @Override
+    public void verify() {
+        super.verify();
+        assert asRegister(metaspaceMethod).equals(METHOD);
+    }
+}
--- /dev/null	Thu Jan 01 00:00:00 1970 +0000
+++ b/graal/com.oracle.graal.lir.aarch64/src/com/oracle/graal/lir/aarch64/AArch64AddressValue.java	Thu Dec 24 11:43:35 2015 -1000
@@ -0,0 +1,113 @@
+/*
+ * Copyright (c) 2013, 2015, Oracle and/or its affiliates. All rights reserved.
+ * DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER.
+ *
+ * This code is free software; you can redistribute it and/or modify it
+ * under the terms of the GNU General Public License version 2 only, as
+ * published by the Free Software Foundation.
+ *
+ * This code is distributed in the hope that it will be useful, but WITHOUT
+ * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
+ * FITNESS FOR A PARTICULAR PURPOSE.  See the GNU General Public License
+ * version 2 for more details (a copy is included in the LICENSE file that
+ * accompanied this code).
+ *
+ * You should have received a copy of the GNU General Public License version
+ * 2 along with this work; if not, write to the Free Software Foundation,
+ * Inc., 51 Franklin St, Fifth Floor, Boston, MA 02110-1301 USA.
+ *
+ * Please contact Oracle, 500 Oracle Parkway, Redwood Shores, CA 94065 USA
+ * or visit www.oracle.com if you need additional information or have any
+ * questions.
+ */
+package com.oracle.graal.lir.aarch64;
+
+import java.util.EnumSet;
+
+import com.oracle.graal.asm.aarch64.AArch64Address;
+import com.oracle.graal.asm.aarch64.AArch64Assembler;
+import com.oracle.graal.lir.CompositeValue;
+import com.oracle.graal.lir.InstructionValueConsumer;
+import com.oracle.graal.lir.InstructionValueProcedure;
+import com.oracle.graal.lir.LIRInstruction;
+import com.oracle.graal.lir.LIRInstruction.OperandFlag;
+
+import jdk.vm.ci.aarch64.AArch64;
+import jdk.vm.ci.code.Register;
+import jdk.vm.ci.code.RegisterValue;
+import jdk.vm.ci.meta.AllocatableValue;
+import jdk.vm.ci.meta.LIRKind;
+import jdk.vm.ci.meta.Value;
+
+public final class AArch64AddressValue extends CompositeValue {
+    private static final EnumSet<OperandFlag> flags = EnumSet.of(OperandFlag.REG, OperandFlag.ILLEGAL);
+
+    @Component({OperandFlag.REG, OperandFlag.ILLEGAL}) protected AllocatableValue base;
+    @Component({OperandFlag.REG, OperandFlag.ILLEGAL}) protected AllocatableValue offset;
+    private final int immediate;
+    /**
+     * Whether register offset should be scaled or not.
+     */
+    private final boolean scaled;
+    private final AArch64Address.AddressingMode addressingMode;
+
+    public AArch64AddressValue(LIRKind kind, AllocatableValue base, AllocatableValue offset, int immediate, boolean scaled, AArch64Address.AddressingMode addressingMode) {
+        super(kind);
+        this.base = base;
+        this.offset = offset;
+        this.immediate = immediate;
+        this.scaled = scaled;
+        this.addressingMode = addressingMode;
+    }
+
+    private static Register toRegister(AllocatableValue value) {
+        if (value.equals(Value.ILLEGAL)) {
+            return AArch64.zr;
+        } else {
+            return ((RegisterValue) value).getRegister();
+        }
+    }
+
+    public AllocatableValue getBase() {
+        return base;
+    }
+
+    public AllocatableValue getOffset() {
+        return offset;
+    }
+
+    public int getImmediate() {
+        return immediate;
+    }
+
+    public boolean isScaled() {
+        return scaled;
+    }
+
+    public AArch64Address.AddressingMode getAddressingMode() {
+        return addressingMode;
+    }
+
+    public AArch64Address toAddress() {
+        Register baseReg = toRegister(base);
+        Register offsetReg = toRegister(offset);
+        AArch64Assembler.ExtendType extendType = addressingMode == AArch64Address.AddressingMode.EXTENDED_REGISTER_OFFSET ? AArch64Assembler.ExtendType.SXTW : null;
+        return AArch64Address.createAddress(addressingMode, baseReg, offsetReg, immediate, scaled, extendType);
+    }
+
+    @Override
+    public CompositeValue forEachComponent(LIRInstruction inst, LIRInstruction.OperandMode mode, InstructionValueProcedure proc) {
+        AllocatableValue newBase = (AllocatableValue) proc.doValue(inst, base, mode, flags);
+        AllocatableValue newOffset = (AllocatableValue) proc.doValue(inst, offset, mode, flags);
+        if (!base.identityEquals(newBase) || !offset.identityEquals(newOffset)) {
+            return new AArch64AddressValue(getLIRKind(), newBase, newOffset, immediate, scaled, addressingMode);
+        }
+        return this;
+    }
+
+    @Override
+    protected void visitEachComponent(LIRInstruction inst, LIRInstruction.OperandMode mode, InstructionValueConsumer proc) {
+        proc.visitValue(inst, base, mode, flags);
+        proc.visitValue(inst, offset, mode, flags);
+    }
+}
--- /dev/null	Thu Jan 01 00:00:00 1970 +0000
+++ b/graal/com.oracle.graal.lir.aarch64/src/com/oracle/graal/lir/aarch64/AArch64ArithmeticLIRGeneratorTool.java	Thu Dec 24 11:43:35 2015 -1000
@@ -0,0 +1,49 @@
+/*
+ * Copyright (c) 2015, Oracle and/or its affiliates. All rights reserved.
+ * DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER.
+ *
+ * This code is free software; you can redistribute it and/or modify it
+ * under the terms of the GNU General Public License version 2 only, as
+ * published by the Free Software Foundation.
+ *
+ * This code is distributed in the hope that it will be useful, but WITHOUT
+ * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
+ * FITNESS FOR A PARTICULAR PURPOSE.  See the GNU General Public License
+ * version 2 for more details (a copy is included in the LICENSE file that
+ * accompanied this code).
+ *
+ * You should have received a copy of the GNU General Public License version
+ * 2 along with this work; if not, write to the Free Software Foundation,
+ * Inc., 51 Franklin St, Fifth Floor, Boston, MA 02110-1301 USA.
+ *
+ * Please contact Oracle, 500 Oracle Parkway, Redwood Shores, CA 94065 USA
+ * or visit www.oracle.com if you need additional information or have any
+ * questions.
+ */
+package com.oracle.graal.lir.aarch64;
+
+import jdk.vm.ci.aarch64.AArch64Kind;
+import jdk.vm.ci.meta.Value;
+
+import com.oracle.graal.lir.Variable;
+import com.oracle.graal.lir.gen.ArithmeticLIRGeneratorTool;
+
+/**
+ * This interface can be used to generate AArch64 LIR for arithmetic operations.
+ */
+public interface AArch64ArithmeticLIRGeneratorTool extends ArithmeticLIRGeneratorTool {
+
+    Value emitMathLog(Value input, boolean base10);
+
+    Value emitMathCos(Value input);
+
+    Value emitMathSin(Value input);
+
+    Value emitMathTan(Value input);
+
+    Value emitCountLeadingZeros(Value value);
+
+    Value emitCountTrailingZeros(Value value);
+
+    void emitCompareOp(AArch64Kind cmpKind, Variable left, Value right);
+}
--- /dev/null	Thu Jan 01 00:00:00 1970 +0000
+++ b/graal/com.oracle.graal.lir.aarch64/src/com/oracle/graal/lir/aarch64/AArch64ArithmeticOp.java	Thu Dec 24 11:43:35 2015 -1000
@@ -0,0 +1,415 @@
+/*
+ * Copyright (c) 2013, 2015, Oracle and/or its affiliates. All rights reserved.
+ * DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER.
+ *
+ * This code is free software; you can redistribute it and/or modify it
+ * under the terms of the GNU General Public License version 2 only, as
+ * published by the Free Software Foundation.
+ *
+ * This code is distributed in the hope that it will be useful, but WITHOUT
+ * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
+ * FITNESS FOR A PARTICULAR PURPOSE.  See the GNU General Public License
+ * version 2 for more details (a copy is included in the LICENSE file that
+ * accompanied this code).
+ *
+ * You should have received a copy of the GNU General Public License version
+ * 2 along with this work; if not, write to the Free Software Foundation,
+ * Inc., 51 Franklin St, Fifth Floor, Boston, MA 02110-1301 USA.
+ *
+ * Please contact Oracle, 500 Oracle Parkway, Redwood Shores, CA 94065 USA
+ * or visit www.oracle.com if you need additional information or have any
+ * questions.
+ */
+package com.oracle.graal.lir.aarch64;
+
+import static com.oracle.graal.lir.LIRInstruction.OperandFlag.REG;
+import static com.oracle.graal.lir.aarch64.AArch64ArithmeticOp.ARMv8ConstantCategory.ARITHMETIC;
+import static com.oracle.graal.lir.aarch64.AArch64ArithmeticOp.ARMv8ConstantCategory.LOGICAL;
+import static com.oracle.graal.lir.aarch64.AArch64ArithmeticOp.ARMv8ConstantCategory.NONE;
+import static com.oracle.graal.lir.aarch64.AArch64ArithmeticOp.ARMv8ConstantCategory.SHIFT;
+import static jdk.vm.ci.aarch64.AArch64.zr;
+import static jdk.vm.ci.code.ValueUtil.asRegister;
+
+import com.oracle.graal.asm.aarch64.AArch64Assembler;
+import com.oracle.graal.asm.aarch64.AArch64Assembler.ConditionFlag;
+import com.oracle.graal.asm.aarch64.AArch64MacroAssembler;
+import com.oracle.graal.lir.LIRInstructionClass;
+import com.oracle.graal.lir.Opcode;
+import com.oracle.graal.lir.asm.CompilationResultBuilder;
+
+import jdk.vm.ci.code.Register;
+import jdk.vm.ci.common.JVMCIError;
+import jdk.vm.ci.meta.AllocatableValue;
+import jdk.vm.ci.meta.JavaConstant;
+
+public enum AArch64ArithmeticOp {
+    // TODO At least add and sub *can* be used with SP, so this should be supported
+    NEG,
+    NOT,
+    ADD(ARITHMETIC),
+    ADDS(ARITHMETIC),
+    SUB(ARITHMETIC),
+    SUBS(ARITHMETIC),
+    MUL,
+    DIV,
+    SMULH,
+    UMULH,
+    REM,
+    UDIV,
+    UREM,
+    AND(LOGICAL),
+    ANDS(LOGICAL),
+    OR(LOGICAL),
+    XOR(LOGICAL),
+    SHL(SHIFT),
+    LSHR(SHIFT),
+    ASHR(SHIFT),
+    ABS,
+
+    FADD,
+    FSUB,
+    FMUL,
+    FDIV,
+    FREM,
+    FNEG,
+    FABS,
+    SQRT;
+
+    /**
+     * Specifies what constants can be used directly without having to be loaded into a register
+     * with the given instruction.
+     */
+    public enum ARMv8ConstantCategory {
+        NONE,
+        LOGICAL,
+        ARITHMETIC,
+        SHIFT
+    }
+
+    public final ARMv8ConstantCategory category;
+
+    AArch64ArithmeticOp(ARMv8ConstantCategory category) {
+        this.category = category;
+    }
+
+    AArch64ArithmeticOp() {
+        this(NONE);
+    }
+
+    public static class UnaryOp extends AArch64LIRInstruction {
+        private static final LIRInstructionClass<UnaryOp> TYPE = LIRInstructionClass.create(UnaryOp.class);
+
+        @Opcode private final AArch64ArithmeticOp opcode;
+        @Def({REG}) protected AllocatableValue result;
+        @Use({REG}) protected AllocatableValue x;
+
+        public UnaryOp(AArch64ArithmeticOp opcode, AllocatableValue result, AllocatableValue x) {
+            super(TYPE);
+            this.opcode = opcode;
+            this.result = result;
+            this.x = x;
+        }
+
+        @Override
+        public void emitCode(CompilationResultBuilder crb, AArch64MacroAssembler masm) {
+            Register dst = asRegister(result);
+            Register src = asRegister(x);
+            // TODO remove
+            int size = result.getPlatformKind().getSizeInBytes() * Byte.SIZE;
+            switch (opcode) {
+                case NEG:
+                    masm.sub(size, dst, zr, src);
+                    break;
+                case FNEG:
+                    masm.fneg(size, dst, src);
+                    break;
+                case NOT:
+                    masm.not(size, dst, src);
+                    break;
+                case ABS:
+                    masm.cmp(size, src, 0);
+                    masm.csneg(size, dst, src, ConditionFlag.LT);
+                    break;
+                case FABS:
+                    masm.fabs(size, dst, src);
+                    break;
+                case SQRT:
+                    masm.fsqrt(size, dst, src);
+                    break;
+                default:
+                    throw JVMCIError.shouldNotReachHere("op=" + opcode.name());
+            }
+        }
+    }
+
+    public static class BinaryConstOp extends AArch64LIRInstruction {
+        private static final LIRInstructionClass<BinaryConstOp> TYPE = LIRInstructionClass.create(BinaryConstOp.class);
+
+        @Opcode private final AArch64ArithmeticOp op;
+        @Def({REG}) protected AllocatableValue result;
+        @Use({REG}) protected AllocatableValue a;
+        private final JavaConstant b;
+
+        public BinaryConstOp(AArch64ArithmeticOp op, AllocatableValue result, AllocatableValue a, JavaConstant b) {
+            super(TYPE);
+            this.op = op;
+            this.result = result;
+            this.a = a;
+            this.b = b;
+        }
+
+        @Override
+        public void emitCode(CompilationResultBuilder crb, AArch64MacroAssembler masm) {
+            assert op.category != NONE;
+            Register dst = asRegister(result);
+            Register src = asRegister(a);
+            // TODO remove
+            int size = result.getPlatformKind().getSizeInBytes() * Byte.SIZE;
+            switch (op) {
+                case ADD:
+                    // Don't use asInt() here, since we can't use asInt on a long variable, even
+                    // if the constant easily fits as an int.
+                    assert AArch64MacroAssembler.isArithmeticImmediate(b.asLong());
+                    masm.add(size, dst, src, (int) b.asLong());
+                    break;
+                case SUB:
+                    // Don't use asInt() here, since we can't use asInt on a long variable, even
+                    // if the constant easily fits as an int.
+                    assert AArch64MacroAssembler.isArithmeticImmediate(b.asLong());
+                    masm.sub(size, dst, src, (int) b.asLong());
+                    break;
+                case AND:
+                    masm.and(size, dst, src, b.asLong());
+                    break;
+                case ANDS:
+                    masm.ands(size, dst, src, b.asLong());
+                    break;
+                case OR:
+                    masm.or(size, dst, src, b.asLong());
+                    break;
+                case XOR:
+                    masm.eor(size, dst, src, b.asLong());
+                    break;
+                case SHL:
+                    masm.shl(size, dst, src, b.asLong());
+                    break;
+                case LSHR:
+                    masm.lshr(size, dst, src, b.asLong());
+                    break;
+                case ASHR:
+                    masm.ashr(size, dst, src, b.asLong());
+                    break;
+                default:
+                    throw JVMCIError.shouldNotReachHere("op=" + op.name());
+            }
+        }
+    }
+
+    public static class BinaryOp extends AArch64LIRInstruction {
+        private static final LIRInstructionClass<BinaryConstOp> TYPE = LIRInstructionClass.create(BinaryConstOp.class);
+
+        @Opcode private final AArch64ArithmeticOp op;
+        @Def({REG}) protected AllocatableValue result;
+        @Use({REG}) protected AllocatableValue a;
+        @Use({REG}) protected AllocatableValue b;
+
+        public BinaryOp(AArch64ArithmeticOp op, AllocatableValue result, AllocatableValue a, AllocatableValue b) {
+            super(TYPE);
+            this.op = op;
+            this.result = result;
+            this.a = a;
+            this.b = b;
+        }
+
+        @Override
+        public void emitCode(CompilationResultBuilder crb, AArch64MacroAssembler masm) {
+            Register dst = asRegister(result);
+            Register src1 = asRegister(a);
+            Register src2 = asRegister(b);
+            // TODO remove
+            int size = result.getPlatformKind().getSizeInBytes() * Byte.SIZE;
+            switch (op) {
+                case ADD:
+                    masm.add(size, dst, src1, src2);
+                    break;
+                case ADDS:
+                    masm.adds(size, dst, src1, src2);
+                    break;
+                case SUB:
+                    masm.sub(size, dst, src1, src2);
+                    break;
+                case SUBS:
+                    masm.subs(size, dst, src1, src2);
+                    break;
+                case MUL:
+                    masm.mul(size, dst, src1, src2);
+                    break;
+                case UMULH:
+                    masm.umulh(size, dst, src1, src2);
+                    break;
+                case SMULH:
+                    masm.smulh(size, dst, src1, src2);
+                    break;
+                case DIV:
+                    masm.sdiv(size, dst, src1, src2);
+                    break;
+                case UDIV:
+                    masm.udiv(size, dst, src1, src2);
+                    break;
+                case AND:
+                    masm.and(size, dst, src1, src2);
+                    break;
+                case ANDS:
+                    masm.ands(size, dst, src1, src2);
+                    break;
+                case OR:
+                    masm.or(size, dst, src1, src2);
+                    break;
+                case XOR:
+                    masm.eor(size, dst, src1, src2);
+                    break;
+                case SHL:
+                    masm.shl(size, dst, src1, src2);
+                    break;
+                case LSHR:
+                    masm.lshr(size, dst, src1, src2);
+                    break;
+                case ASHR:
+                    masm.ashr(size, dst, src1, src2);
+                    break;
+                case FADD:
+                    masm.fadd(size, dst, src1, src2);
+                    break;
+                case FSUB:
+                    masm.fsub(size, dst, src1, src2);
+                    break;
+                case FMUL:
+                    masm.fmul(size, dst, src1, src2);
+                    break;
+                case FDIV:
+                    masm.fdiv(size, dst, src1, src2);
+                    break;
+                default:
+                    throw JVMCIError.shouldNotReachHere("op=" + op.name());
+            }
+        }
+    }
+
+    /**
+     * Class used for instructions that have to reuse one of their arguments. This only applies to
+     * the remainder instructions at the moment, since we have to compute n % d using rem = n -
+     * TruncatingDivision(n, d) * d
+     *
+     * TODO (das) Replace the remainder nodes in the LIR.
+     */
+    public static class BinaryCompositeOp extends AArch64LIRInstruction {
+        private static final LIRInstructionClass<BinaryCompositeOp> TYPE = LIRInstructionClass.create(BinaryCompositeOp.class);
+        @Opcode private final AArch64ArithmeticOp op;
+        @Def({REG}) protected AllocatableValue result;
+        @Alive({REG}) protected AllocatableValue a;
+        @Alive({REG}) protected AllocatableValue b;
+
+        public BinaryCompositeOp(AArch64ArithmeticOp op, AllocatableValue result, AllocatableValue a, AllocatableValue b) {
+            super(TYPE);
+            this.op = op;
+            this.result = result;
+            this.a = a;
+            this.b = b;
+        }
+
+        @Override
+        public void emitCode(CompilationResultBuilder crb, AArch64MacroAssembler masm) {
+            Register dst = asRegister(result);
+            Register src1 = asRegister(a);
+            Register src2 = asRegister(b);
+            // TODO remove
+            int size = result.getPlatformKind().getSizeInBytes() * Byte.SIZE;
+            switch (op) {
+                case REM:
+                    masm.rem(size, dst, src1, src2);
+                    break;
+                case UREM:
+                    masm.urem(size, dst, src1, src2);
+                    break;
+                case FREM:
+                    masm.frem(size, dst, src1, src2);
+                    break;
+                default:
+                    throw JVMCIError.shouldNotReachHere();
+            }
+        }
+    }
+
+    public static class AddSubShiftOp extends AArch64LIRInstruction {
+        private static final LIRInstructionClass<AddSubShiftOp> TYPE = LIRInstructionClass.create(AddSubShiftOp.class);
+
+        @Opcode private final AArch64ArithmeticOp op;
+        @Def(REG) protected AllocatableValue result;
+        @Use(REG) protected AllocatableValue src1;
+        @Use(REG) protected AllocatableValue src2;
+        private final AArch64MacroAssembler.ShiftType shiftType;
+        private final int shiftAmt;
+
+        /**
+         * Computes <code>result = src1 <op> src2 <shiftType> <shiftAmt></code>.
+         */
+        public AddSubShiftOp(AArch64ArithmeticOp op, AllocatableValue result, AllocatableValue src1, AllocatableValue src2, AArch64MacroAssembler.ShiftType shiftType, int shiftAmt) {
+            super(TYPE);
+            assert op == ADD || op == SUB;
+            this.op = op;
+            this.result = result;
+            this.src1 = src1;
+            this.src2 = src2;
+            this.shiftType = shiftType;
+            this.shiftAmt = shiftAmt;
+        }
+
+        @Override
+        public void emitCode(CompilationResultBuilder crb, AArch64MacroAssembler masm) {
+            // TODO remove
+            int size = result.getPlatformKind().getSizeInBytes() * Byte.SIZE;
+            switch (op) {
+                case ADD:
+                    masm.add(size, asRegister(result), asRegister(src1), asRegister(src2), shiftType, shiftAmt);
+                    break;
+                case SUB:
+                    masm.sub(size, asRegister(result), asRegister(src1), asRegister(src2), shiftType, shiftAmt);
+                    break;
+                default:
+                    throw JVMCIError.shouldNotReachHere();
+            }
+        }
+    }
+
+    public static class ExtendedAddShiftOp extends AArch64LIRInstruction {
+        private static final LIRInstructionClass<ExtendedAddShiftOp> TYPE = LIRInstructionClass.create(ExtendedAddShiftOp.class);
+        @Def(REG) protected AllocatableValue result;
+        @Use(REG) protected AllocatableValue src1;
+        @Use(REG) protected AllocatableValue src2;
+        private final AArch64Assembler.ExtendType extendType;
+        private final int shiftAmt;
+
+        /**
+         * Computes <code>result = src1 + extendType(src2) << shiftAmt</code>.
+         *
+         * @param extendType defines how src2 is extended to the same size as src1.
+         * @param shiftAmt must be in range 0 to 4.
+         */
+        public ExtendedAddShiftOp(AllocatableValue result, AllocatableValue src1, AllocatableValue src2, AArch64Assembler.ExtendType extendType, int shiftAmt) {
+            super(TYPE);
+            this.result = result;
+            this.src1 = src1;
+            this.src2 = src2;
+            this.extendType = extendType;
+            this.shiftAmt = shiftAmt;
+        }
+
+        @Override
+        public void emitCode(CompilationResultBuilder crb, AArch64MacroAssembler masm) {
+            // TODO remove
+            int size = result.getPlatformKind().getSizeInBytes() * Byte.SIZE;
+            masm.add(size, asRegister(result), asRegister(src1), asRegister(src2), extendType, shiftAmt);
+        }
+    }
+
+}
--- /dev/null	Thu Jan 01 00:00:00 1970 +0000
+++ b/graal/com.oracle.graal.lir.aarch64/src/com/oracle/graal/lir/aarch64/AArch64BitManipulationOp.java	Thu Dec 24 11:43:35 2015 -1000
@@ -0,0 +1,90 @@
+/*
+ * Copyright (c) 2013, 2015, Oracle and/or its affiliates. All rights reserved.
+ * DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER.
+ *
+ * This code is free software; you can redistribute it and/or modify it
+ * under the terms of the GNU General Public License version 2 only, as
+ * published by the Free Software Foundation.
+ *
+ * This code is distributed in the hope that it will be useful, but WITHOUT
+ * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
+ * FITNESS FOR A PARTICULAR PURPOSE.  See the GNU General Public License
+ * version 2 for more details (a copy is included in the LICENSE file that
+ * accompanied this code).
+ *
+ * You should have received a copy of the GNU General Public License version
+ * 2 along with this work; if not, write to the Free Software Foundation,
+ * Inc., 51 Franklin St, Fifth Floor, Boston, MA 02110-1301 USA.
+ *
+ * Please contact Oracle, 500 Oracle Parkway, Redwood Shores, CA 94065 USA
+ * or visit www.oracle.com if you need additional information or have any
+ * questions.
+ */
+package com.oracle.graal.lir.aarch64;
+
+import static com.oracle.graal.lir.LIRInstruction.OperandFlag.REG;
+import static jdk.vm.ci.code.ValueUtil.asRegister;
+
+import com.oracle.graal.asm.aarch64.AArch64MacroAssembler;
+import com.oracle.graal.lir.LIRInstructionClass;
+import com.oracle.graal.lir.Opcode;
+import com.oracle.graal.lir.asm.CompilationResultBuilder;
+
+import jdk.vm.ci.code.Register;
+import jdk.vm.ci.common.JVMCIError;
+import jdk.vm.ci.meta.AllocatableValue;
+
+/**
+ * Bit manipulation ops for ARMv8 ISA.
+ */
+public class AArch64BitManipulationOp extends AArch64LIRInstruction {
+    public enum BitManipulationOpCode {
+        BSF,
+        BSR,
+        BSWP,
+        CLZ,
+    }
+
+    private static final LIRInstructionClass<AArch64BitManipulationOp> TYPE = LIRInstructionClass.create(AArch64BitManipulationOp.class);
+
+    @Opcode private final BitManipulationOpCode opcode;
+    @Def protected AllocatableValue result;
+    @Use({REG}) protected AllocatableValue input;
+
+    public AArch64BitManipulationOp(BitManipulationOpCode opcode, AllocatableValue result, AllocatableValue input) {
+        super(TYPE);
+        this.opcode = opcode;
+        this.result = result;
+        this.input = input;
+    }
+
+    @Override
+    public void emitCode(CompilationResultBuilder crb, AArch64MacroAssembler masm) {
+        Register dst = asRegister(result);
+        Register src = asRegister(input);
+        // TODO remove
+        int size = result.getPlatformKind().getSizeInBytes() * Byte.SIZE;
+        switch (opcode) {
+            case CLZ:
+                masm.clz(size, dst, src);
+                break;
+            case BSR:
+                // BSR == <type width> - 1 - CLZ(input)
+                masm.clz(size, dst, src);
+                masm.neg(size, dst, dst);
+                masm.add(size, dst, dst, size - 1);
+                break;
+            case BSF:
+                // BSF == CLZ(rev(input))
+                masm.rev(size, dst, src);
+                masm.clz(size, dst, dst);
+                break;
+            case BSWP:
+                masm.rev(size, dst, src);
+                break;
+            default:
+                throw JVMCIError.shouldNotReachHere();
+        }
+    }
+
+}
--- /dev/null	Thu Jan 01 00:00:00 1970 +0000
+++ b/graal/com.oracle.graal.lir.aarch64/src/com/oracle/graal/lir/aarch64/AArch64BlockEndOp.java	Thu Dec 24 11:43:35 2015 -1000
@@ -0,0 +1,45 @@
+/*
+ * Copyright (c) 2015, Oracle and/or its affiliates. All rights reserved.
+ * DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER.
+ *
+ * This code is free software; you can redistribute it and/or modify it
+ * under the terms of the GNU General Public License version 2 only, as
+ * published by the Free Software Foundation.
+ *
+ * This code is distributed in the hope that it will be useful, but WITHOUT
+ * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
+ * FITNESS FOR A PARTICULAR PURPOSE.  See the GNU General Public License
+ * version 2 for more details (a copy is included in the LICENSE file that
+ * accompanied this code).
+ *
+ * You should have received a copy of the GNU General Public License version
+ * 2 along with this work; if not, write to the Free Software Foundation,
+ * Inc., 51 Franklin St, Fifth Floor, Boston, MA 02110-1301 USA.
+ *
+ * Please contact Oracle, 500 Oracle Parkway, Redwood Shores, CA 94065 USA
+ * or visit www.oracle.com if you need additional information or have any
+ * questions.
+ */
+
+package com.oracle.graal.lir.aarch64;
+
+import com.oracle.graal.asm.aarch64.AArch64MacroAssembler;
+import com.oracle.graal.lir.LIRInstructionClass;
+import com.oracle.graal.lir.StandardOp;
+import com.oracle.graal.lir.asm.CompilationResultBuilder;
+
+public abstract class AArch64BlockEndOp extends StandardOp.AbstractBlockEndOp {
+
+    public static final LIRInstructionClass<AArch64BlockEndOp> TYPE = LIRInstructionClass.create(AArch64BlockEndOp.class);
+
+    protected AArch64BlockEndOp(LIRInstructionClass<? extends StandardOp.AbstractBlockEndOp> c) {
+        super(c);
+    }
+
+    @Override
+    public final void emitCode(CompilationResultBuilder crb) {
+        emitCode(crb, (AArch64MacroAssembler) crb.asm);
+    }
+
+    public abstract void emitCode(CompilationResultBuilder crb, AArch64MacroAssembler masm);
+}
--- /dev/null	Thu Jan 01 00:00:00 1970 +0000
+++ b/graal/com.oracle.graal.lir.aarch64/src/com/oracle/graal/lir/aarch64/AArch64BreakpointOp.java	Thu Dec 24 11:43:35 2015 -1000
@@ -0,0 +1,54 @@
+/*
+ * Copyright (c) 2013, 2015, Oracle and/or its affiliates. All rights reserved.
+ * DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER.
+ *
+ * This code is free software; you can redistribute it and/or modify it
+ * under the terms of the GNU General Public License version 2 only, as
+ * published by the Free Software Foundation.
+ *
+ * This code is distributed in the hope that it will be useful, but WITHOUT
+ * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
+ * FITNESS FOR A PARTICULAR PURPOSE.  See the GNU General Public License
+ * version 2 for more details (a copy is included in the LICENSE file that
+ * accompanied this code).
+ *
+ * You should have received a copy of the GNU General Public License version
+ * 2 along with this work; if not, write to the Free Software Foundation,
+ * Inc., 51 Franklin St, Fifth Floor, Boston, MA 02110-1301 USA.
+ *
+ * Please contact Oracle, 500 Oracle Parkway, Redwood Shores, CA 94065 USA
+ * or visit www.oracle.com if you need additional information or have any
+ * questions.
+ */
+package com.oracle.graal.lir.aarch64;
+
+import static com.oracle.graal.lir.LIRInstruction.OperandFlag.REG;
+import static com.oracle.graal.lir.LIRInstruction.OperandFlag.STACK;
+
+import com.oracle.graal.asm.aarch64.AArch64MacroAssembler;
+import com.oracle.graal.asm.aarch64.AArch64MacroAssembler.AArch64ExceptionCode;
+import com.oracle.graal.lir.LIRInstructionClass;
+import com.oracle.graal.lir.Opcode;
+import com.oracle.graal.lir.asm.CompilationResultBuilder;
+
+import jdk.vm.ci.meta.Value;
+
+@Opcode("BREAKPOINT")
+public class AArch64BreakpointOp extends AArch64LIRInstruction {
+    public static final LIRInstructionClass<AArch64BreakpointOp> TYPE = LIRInstructionClass.create(AArch64BreakpointOp.class);
+
+    /**
+     * A set of values loaded into the Java ABI parameter locations (for inspection by a debugger).
+     */
+    @Use({REG, STACK}) private Value[] parameters;
+
+    public AArch64BreakpointOp(Value[] parameters) {
+        super(TYPE);
+        this.parameters = parameters;
+    }
+
+    @Override
+    public void emitCode(CompilationResultBuilder crb, AArch64MacroAssembler masm) {
+        masm.brk(AArch64ExceptionCode.BREAKPOINT);
+    }
+}
--- /dev/null	Thu Jan 01 00:00:00 1970 +0000
+++ b/graal/com.oracle.graal.lir.aarch64/src/com/oracle/graal/lir/aarch64/AArch64Call.java	Thu Dec 24 11:43:35 2015 -1000
@@ -0,0 +1,243 @@
+/*
+ * Copyright (c) 2013, 2015, Oracle and/or its affiliates. All rights reserved.
+ * DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER.
+ *
+ * This code is free software; you can redistribute it and/or modify it
+ * under the terms of the GNU General Public License version 2 only, as
+ * published by the Free Software Foundation.
+ *
+ * This code is distributed in the hope that it will be useful, but WITHOUT
+ * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
+ * FITNESS FOR A PARTICULAR PURPOSE.  See the GNU General Public License
+ * version 2 for more details (a copy is included in the LICENSE file that
+ * accompanied this code).
+ *
+ * You should have received a copy of the GNU General Public License version
+ * 2 along with this work; if not, write to the Free Software Foundation,
+ * Inc., 51 Franklin St, Fifth Floor, Boston, MA 02110-1301 USA.
+ *
+ * Please contact Oracle, 500 Oracle Parkway, Redwood Shores, CA 94065 USA
+ * or visit www.oracle.com if you need additional information or have any
+ * questions.
+ */
+package com.oracle.graal.lir.aarch64;
+
+import static com.oracle.graal.lir.LIRInstruction.OperandFlag.ILLEGAL;
+import static com.oracle.graal.lir.LIRInstruction.OperandFlag.REG;
+import static com.oracle.graal.lir.LIRInstruction.OperandFlag.STACK;
+import static jdk.vm.ci.code.ValueUtil.asRegister;
+import static jdk.vm.ci.code.ValueUtil.isRegister;
+
+import com.oracle.graal.asm.aarch64.AArch64Assembler;
+import com.oracle.graal.asm.aarch64.AArch64MacroAssembler;
+import com.oracle.graal.compiler.common.spi.ForeignCallLinkage;
+import com.oracle.graal.lir.LIRFrameState;
+import com.oracle.graal.lir.LIRInstructionClass;
+import com.oracle.graal.lir.Opcode;
+import com.oracle.graal.lir.asm.CompilationResultBuilder;
+
+import jdk.vm.ci.aarch64.AArch64;
+import jdk.vm.ci.code.Register;
+import jdk.vm.ci.meta.InvokeTarget;
+import jdk.vm.ci.meta.ResolvedJavaMethod;
+import jdk.vm.ci.meta.Value;
+
+public class AArch64Call {
+
+    public abstract static class CallOp extends AArch64LIRInstruction {
+        @Def({REG, ILLEGAL}) protected Value result;
+        @Use({REG, STACK}) protected Value[] parameters;
+        @Temp protected Value[] temps;
+        @State protected LIRFrameState state;
+
+        protected CallOp(LIRInstructionClass<? extends CallOp> c, Value result, Value[] parameters, Value[] temps, LIRFrameState state) {
+            super(c);
+            this.result = result;
+            this.parameters = parameters;
+            this.state = state;
+            this.temps = temps;
+            assert temps != null;
+        }
+
+        @Override
+        public boolean destroysCallerSavedRegisters() {
+            return true;
+        }
+    }
+
+    public abstract static class MethodCallOp extends CallOp {
+        protected final ResolvedJavaMethod callTarget;
+
+        protected MethodCallOp(LIRInstructionClass<? extends MethodCallOp> c, ResolvedJavaMethod callTarget, Value result, Value[] parameters, Value[] temps, LIRFrameState state) {
+            super(c, result, parameters, temps, state);
+            this.callTarget = callTarget;
+        }
+    }
+
+    @Opcode("CALL_INDIRECT")
+    public static class IndirectCallOp extends MethodCallOp {
+        public static final LIRInstructionClass<IndirectCallOp> TYPE = LIRInstructionClass.create(IndirectCallOp.class);
+
+        @Use({REG}) protected Value targetAddress;
+
+        public IndirectCallOp(ResolvedJavaMethod callTarget, Value result, Value[] parameters, Value[] temps, Value targetAddress, LIRFrameState state) {
+            this(TYPE, callTarget, result, parameters, temps, targetAddress, state);
+        }
+
+        protected IndirectCallOp(LIRInstructionClass<? extends IndirectCallOp> c, ResolvedJavaMethod callTarget, Value result, Value[] parameters, Value[] temps, Value targetAddress,
+                        LIRFrameState state) {
+            super(c, callTarget, result, parameters, temps, state);
+            this.targetAddress = targetAddress;
+        }
+
+        @Override
+        public void emitCode(CompilationResultBuilder crb, AArch64MacroAssembler masm) {
+            Register target = asRegister(targetAddress);
+            indirectCall(crb, masm, target, callTarget, state);
+        }
+
+        @Override
+        public void verify() {
+            super.verify();
+            assert isRegister(targetAddress) : "The current register allocator cannot handle variables to be used at call sites, " + "it must be in a fixed register for now";
+        }
+    }
+
+    @Opcode("CALL_DIRECT")
+    public abstract static class DirectCallOp extends MethodCallOp {
+        public static final LIRInstructionClass<DirectCallOp> TYPE = LIRInstructionClass.create(DirectCallOp.class);
+
+        public DirectCallOp(ResolvedJavaMethod target, Value result, Value[] parameters, Value[] temps, LIRFrameState state) {
+            super(TYPE, target, result, parameters, temps, state);
+        }
+
+        protected DirectCallOp(LIRInstructionClass<? extends DirectCallOp> c, ResolvedJavaMethod callTarget, Value result, Value[] parameters, Value[] temps, LIRFrameState state) {
+            super(c, callTarget, result, parameters, temps, state);
+        }
+
+        @Override
+        public void emitCode(CompilationResultBuilder crb, AArch64MacroAssembler masm) {
+            directCall(crb, masm, callTarget, null, state);
+        }
+    }
+
+    public abstract static class ForeignCallOp extends CallOp {
+        protected final ForeignCallLinkage callTarget;
+
+        protected ForeignCallOp(LIRInstructionClass<? extends ForeignCallOp> c, ForeignCallLinkage callTarget, Value result, Value[] parameters, Value[] temps, LIRFrameState state) {
+            super(c, result, parameters, temps, state);
+            this.callTarget = callTarget;
+        }
+
+        @Override
+        public boolean destroysCallerSavedRegisters() {
+            return callTarget.destroysRegisters();
+        }
+
+        @Override
+        public void emitCode(CompilationResultBuilder crb, AArch64MacroAssembler masm) {
+            emitCall(crb, masm);
+        }
+
+        protected abstract void emitCall(CompilationResultBuilder crb, AArch64MacroAssembler masm);
+    }
+
+    @Opcode("NEAR_FOREIGN_CALL")
+    public static class DirectNearForeignCallOp extends ForeignCallOp {
+        public static final LIRInstructionClass<DirectNearForeignCallOp> TYPE = LIRInstructionClass.create(DirectNearForeignCallOp.class);
+
+        public DirectNearForeignCallOp(ForeignCallLinkage callTarget, Value result, Value[] parameters, Value[] temps, LIRFrameState state) {
+            super(TYPE, callTarget, result, parameters, temps, state);
+        }
+
+        @Override
+        protected void emitCall(CompilationResultBuilder crb, AArch64MacroAssembler masm) {
+            directCall(crb, masm, callTarget, null, state);
+        }
+    }
+
+    @Opcode("FAR_FOREIGN_CALL")
+    public static class DirectFarForeignCallOp extends ForeignCallOp {
+        public static final LIRInstructionClass<DirectFarForeignCallOp> TYPE = LIRInstructionClass.create(DirectFarForeignCallOp.class);
+
+        public DirectFarForeignCallOp(ForeignCallLinkage callTarget, Value result, Value[] parameters, Value[] temps, LIRFrameState state) {
+            super(TYPE, callTarget, result, parameters, temps, state);
+        }
+
+        @Override
+        protected void emitCall(CompilationResultBuilder crb, AArch64MacroAssembler masm) {
+            // We can use any scratch register we want, since we know that they have been saved
+            // before calling.
+            directCall(crb, masm, callTarget, AArch64.r8, state);
+        }
+    }
+
+    /**
+     * Tests whether linkage can be called directly under all circumstances without the need for a
+     * scratch register.
+     *
+     * Note this is a pessimistic assumption: This may return false despite a near call/jump being
+     * adequate.
+     *
+     * @param linkage Foreign call description
+     * @return true if foreign call can be called directly and does not need a scratch register to
+     *         load the address into.
+     */
+    public static boolean isNearCall(ForeignCallLinkage linkage) {
+        long maxOffset = linkage.getMaxCallTargetOffset();
+        return maxOffset != -1 && AArch64MacroAssembler.isBranchImmediateOffset(maxOffset);
+    }
+
+    public static void directCall(CompilationResultBuilder crb, AArch64MacroAssembler masm, InvokeTarget callTarget, Register scratch, LIRFrameState info) {
+        int before = masm.position();
+        if (scratch != null) {
+            // offset might not fit into a 28-bit immediate, generate an indirect call with a 64-bit
+            // immediate
+            // address which is fixed up by HotSpot.
+            masm.forceMov(scratch, 0L);
+            masm.blr(scratch);
+        } else {
+            // address is fixed up by HotSpot.
+            masm.bl(0);
+        }
+        int after = masm.position();
+        crb.recordDirectCall(before, after, callTarget, info);
+        crb.recordExceptionHandlers(after, info);
+        masm.ensureUniquePC();
+    }
+
+    public static void indirectCall(CompilationResultBuilder crb, AArch64MacroAssembler masm, Register dst, InvokeTarget callTarget, LIRFrameState info) {
+        int before = masm.position();
+        masm.blr(dst);
+        int after = masm.position();
+        crb.recordIndirectCall(before, after, callTarget, info);
+        crb.recordExceptionHandlers(after, info);
+        masm.ensureUniquePC();
+    }
+
+    public static void directJmp(CompilationResultBuilder crb, AArch64MacroAssembler masm, InvokeTarget target) {
+        int before = masm.position();
+        // Address is fixed up later by c++ code.
+        masm.jmp();
+        int after = masm.position();
+        crb.recordDirectCall(before, after, target, null);
+        masm.ensureUniquePC();
+    }
+
+    public static void indirectJmp(CompilationResultBuilder crb, AArch64MacroAssembler masm, Register dst, InvokeTarget target) {
+        int before = masm.position();
+        masm.jmp(dst);
+        int after = masm.position();
+        crb.recordIndirectCall(before, after, target, null);
+        masm.ensureUniquePC();
+    }
+
+    public static void directConditionalJmp(CompilationResultBuilder crb, AArch64MacroAssembler masm, InvokeTarget target, AArch64Assembler.ConditionFlag cond) {
+        int before = masm.position();
+        masm.branchConditionally(cond);
+        int after = masm.position();
+        crb.recordDirectCall(before, after, target, null);
+        masm.ensureUniquePC();
+    }
+
+}
--- /dev/null	Thu Jan 01 00:00:00 1970 +0000
+++ b/graal/com.oracle.graal.lir.aarch64/src/com/oracle/graal/lir/aarch64/AArch64Compare.java	Thu Dec 24 11:43:35 2015 -1000
@@ -0,0 +1,152 @@
+/*
+ * Copyright (c) 2013, 2015, Oracle and/or its affiliates. All rights reserved.
+ * DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER.
+ *
+ * This code is free software; you can redistribute it and/or modify it
+ * under the terms of the GNU General Public License version 2 only, as
+ * published by the Free Software Foundation.
+ *
+ * This code is distributed in the hope that it will be useful, but WITHOUT
+ * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
+ * FITNESS FOR A PARTICULAR PURPOSE.  See the GNU General Public License
+ * version 2 for more details (a copy is included in the LICENSE file that
+ * accompanied this code).
+ *
+ * You should have received a copy of the GNU General Public License version
+ * 2 along with this work; if not, write to the Free Software Foundation,
+ * Inc., 51 Franklin St, Fifth Floor, Boston, MA 02110-1301 USA.
+ *
+ * Please contact Oracle, 500 Oracle Parkway, Redwood Shores, CA 94065 USA
+ * or visit www.oracle.com if you need additional information or have any
+ * questions.
+ */
+package com.oracle.graal.lir.aarch64;
+
+import static com.oracle.graal.lir.LIRInstruction.OperandFlag.CONST;
+import static com.oracle.graal.lir.LIRInstruction.OperandFlag.REG;
+import static com.oracle.graal.lir.LIRValueUtil.asJavaConstant;
+import static com.oracle.graal.lir.LIRValueUtil.isJavaConstant;
+import static jdk.vm.ci.code.ValueUtil.asRegister;
+import static jdk.vm.ci.code.ValueUtil.isRegister;
+
+import com.oracle.graal.asm.NumUtil;
+import com.oracle.graal.asm.aarch64.AArch64Assembler;
+import com.oracle.graal.asm.aarch64.AArch64MacroAssembler;
+import com.oracle.graal.compiler.common.calc.Condition;
+import com.oracle.graal.lir.LIRInstructionClass;
+import com.oracle.graal.lir.asm.CompilationResultBuilder;
+
+import jdk.vm.ci.aarch64.AArch64Kind;
+import jdk.vm.ci.meta.AllocatableValue;
+import jdk.vm.ci.meta.JavaConstant;
+import jdk.vm.ci.meta.Value;
+
+public class AArch64Compare {
+
+    public static class CompareOp extends AArch64LIRInstruction {
+        public static final LIRInstructionClass<CompareOp> TYPE = LIRInstructionClass.create(CompareOp.class);
+
+        @Use protected AllocatableValue x;
+        @Use({REG, CONST}) protected Value y;
+
+        public CompareOp(AllocatableValue x, AllocatableValue y) {
+            super(TYPE);
+            assert ((AArch64Kind) x.getPlatformKind()).isInteger() && ((AArch64Kind) y.getPlatformKind()).isInteger();
+            assert x.getPlatformKind() == y.getPlatformKind();
+            this.x = x;
+            this.y = y;
+        }
+
+        @Override
+        public void emitCode(CompilationResultBuilder crb, AArch64MacroAssembler masm) {
+            gpCompare(masm, x, y);
+        }
+    }
+
+    /**
+     * Compares integer values x and y.
+     *
+     * @param x integer value to compare. May not be null.
+     * @param y integer value to compare. May not be null.
+     */
+    public static void gpCompare(AArch64MacroAssembler masm, AllocatableValue x, Value y) {
+        int size = x.getPlatformKind().getSizeInBytes() * Byte.SIZE;
+        if (isRegister(y)) {
+            masm.cmp(size, asRegister(x), asRegister(y));
+        } else {
+            JavaConstant c = asJavaConstant(y);
+            assert NumUtil.isInt(c.asLong());
+            masm.cmp(size, asRegister(x), (int) c.asLong());
+        }
+    }
+
+    public static class FloatCompareOp extends AArch64LIRInstruction {
+        public static final LIRInstructionClass<FloatCompareOp> TYPE = LIRInstructionClass.create(FloatCompareOp.class);
+
+        @Use protected AllocatableValue x;
+        @Use({REG, CONST}) protected Value y;
+        private final Condition condition;
+        private final boolean unorderedIsTrue;
+
+        public FloatCompareOp(AllocatableValue x, AllocatableValue y, Condition condition, boolean unorderedIsTrue) {
+            super(TYPE);
+            assert !isJavaConstant(y) || isFloatCmpConstant(y, condition, unorderedIsTrue);
+            this.x = x;
+            this.y = y;
+            this.condition = condition;
+            this.unorderedIsTrue = unorderedIsTrue;
+        }
+
+        /**
+         * Checks if val can be used as a constant for the gpCompare operation or not.
+         */
+        public static boolean isFloatCmpConstant(Value val, Condition condition, boolean unorderedIsTrue) {
+            // If the condition is "EQ || unordered" or "NE && unordered" we have to use 2 registers
+            // in any case.
+            if (!(condition == Condition.EQ && unorderedIsTrue || condition == Condition.NE && !unorderedIsTrue)) {
+                return false;
+            }
+            return isJavaConstant(val) && asJavaConstant(val).isDefaultForKind();
+        }
+
+        @Override
+        public void emitCode(CompilationResultBuilder crb, AArch64MacroAssembler masm) {
+            assert isRegister(x);
+            int size = x.getPlatformKind().getSizeInBytes() * Byte.SIZE;
+            if (isRegister(y)) {
+                masm.fcmp(size, asRegister(x), asRegister(y));
+                // There is no condition code for "EQ || unordered" nor one for "NE && unordered",
+                // so we have to fix them up ourselves.
+                // In both cases we combine the asked for condition into the EQ, respectively NE
+                // condition, i.e.
+                // if EQ && unoreredIsTrue, then the EQ flag will be set if the two values gpCompare
+                // unequal but are
+                // unordered.
+                if (condition == Condition.EQ && unorderedIsTrue) {
+                    // if f1 ordered f2:
+                    // result = f1 == f2
+                    // else:
+                    // result = EQUAL
+                    int nzcv = 0b0100;   // EQUAL -> Z = 1
+                    masm.fccmp(size, asRegister(x), asRegister(y), nzcv, AArch64Assembler.ConditionFlag.VC);
+                } else if (condition == Condition.NE && !unorderedIsTrue) {
+                    // if f1 ordered f2:
+                    // result = f1 != f2
+                    // else:
+                    // result = !NE == EQUAL
+                    int nzcv = 0b0100;   // EQUAL -> Z = 1
+                    masm.fccmp(size, asRegister(x), asRegister(y), nzcv, AArch64Assembler.ConditionFlag.VC);
+                }
+            } else {
+                // cmp against +0.0
+                masm.fcmpZero(size, asRegister(x));
+            }
+        }
+
+        @Override
+        public void verify() {
+            assert x.getPlatformKind().equals(y.getPlatformKind()) : "a: " + x + " b: " + y;
+        }
+    }
+
+}
--- /dev/null	Thu Jan 01 00:00:00 1970 +0000
+++ b/graal/com.oracle.graal.lir.aarch64/src/com/oracle/graal/lir/aarch64/AArch64ControlFlow.java	Thu Dec 24 11:43:35 2015 -1000
@@ -0,0 +1,303 @@
+/*
+ * Copyright (c) 2013, 2015, Oracle and/or its affiliates. All rights reserved.
+ * DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER.
+ *
+ * This code is free software; you can redistribute it and/or modify it
+ * under the terms of the GNU General Public License version 2 only, as
+ * published by the Free Software Foundation.
+ *
+ * This code is distributed in the hope that it will be useful, but WITHOUT
+ * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
+ * FITNESS FOR A PARTICULAR PURPOSE.  See the GNU General Public License
+ * version 2 for more details (a copy is included in the LICENSE file that
+ * accompanied this code).
+ *
+ * You should have received a copy of the GNU General Public License version
+ * 2 along with this work; if not, write to the Free Software Foundation,
+ * Inc., 51 Franklin St, Fifth Floor, Boston, MA 02110-1301 USA.
+ *
+ * Please contact Oracle, 500 Oracle Parkway, Redwood Shores, CA 94065 USA
+ * or visit www.oracle.com if you need additional information or have any
+ * questions.
+ */
+package com.oracle.graal.lir.aarch64;
+
+import static jdk.vm.ci.code.ValueUtil.asAllocatableValue;
+import static jdk.vm.ci.code.ValueUtil.asRegister;
+
+import java.util.function.Function;
+
+import com.oracle.graal.asm.Label;
+import com.oracle.graal.asm.NumUtil;
+import com.oracle.graal.asm.aarch64.AArch64Address;
+import com.oracle.graal.asm.aarch64.AArch64Assembler;
+import com.oracle.graal.asm.aarch64.AArch64MacroAssembler;
+import com.oracle.graal.asm.aarch64.AArch64MacroAssembler.PatchLabelKind;
+import com.oracle.graal.compiler.common.calc.Condition;
+import com.oracle.graal.lir.ConstantValue;
+import com.oracle.graal.lir.LIRInstructionClass;
+import com.oracle.graal.lir.LabelRef;
+import com.oracle.graal.lir.Opcode;
+import com.oracle.graal.lir.StandardOp;
+import com.oracle.graal.lir.SwitchStrategy;
+import com.oracle.graal.lir.Variable;
+import com.oracle.graal.lir.asm.CompilationResultBuilder;
+
+import jdk.vm.ci.aarch64.AArch64Kind;
+import jdk.vm.ci.code.CompilationResult.JumpTable;
+import jdk.vm.ci.code.Register;
+import jdk.vm.ci.common.JVMCIError;
+import jdk.vm.ci.meta.Constant;
+import jdk.vm.ci.meta.JavaConstant;
+import jdk.vm.ci.meta.LIRKind;
+import jdk.vm.ci.meta.Value;
+
+public class AArch64ControlFlow {
+
+    /**
+     * Compares integer register to 0 and branches if condition is true. Condition may only be equal
+     * or non-equal.
+     */
+    // TODO (das) where do we need this?
+    // public static class CompareAndBranchOp extends AArch64LIRInstruction implements
+    // StandardOp.BranchOp {
+    // private final ConditionFlag condition;
+    // private final LabelRef destination;
+    // @Use({REG}) private Value x;
+    //
+    // public CompareAndBranchOp(Condition condition, LabelRef destination, Value x) {
+    // assert condition == Condition.EQ || condition == Condition.NE;
+    // assert ARMv8.isGpKind(x.getKind());
+    // this.condition = condition == Condition.EQ ? ConditionFlag.EQ : ConditionFlag.NE;
+    // this.destination = destination;
+    // this.x = x;
+    // }
+    //
+    // @Override
+    // public void emitCode(CompilationResultBuilder crb, ARMv8MacroAssembler masm) {
+    // int size = ARMv8.bitsize(x.getKind());
+    // if (condition == ConditionFlag.EQ) {
+    // masm.cbz(size, asRegister(x), destination.label());
+    // } else {
+    // masm.cbnz(size, asRegister(x), destination.label());
+    // }
+    // }
+    // }
+
+    public static class BranchOp extends AArch64BlockEndOp implements StandardOp.BranchOp {
+        public static final LIRInstructionClass<BranchOp> TYPE = LIRInstructionClass.create(BranchOp.class);
+
+        private final AArch64Assembler.ConditionFlag condition;
+        private final LabelRef trueDestination;
+        private final LabelRef falseDestination;
+
+        private final double trueDestinationProbability;
+
+        public BranchOp(AArch64Assembler.ConditionFlag condition, LabelRef trueDestination, LabelRef falseDestination, double trueDestinationProbability) {
+            super(TYPE);
+            this.condition = condition;
+            this.trueDestination = trueDestination;
+            this.falseDestination = falseDestination;
+            this.trueDestinationProbability = trueDestinationProbability;
+        }
+
+        @Override
+        public void emitCode(CompilationResultBuilder crb, AArch64MacroAssembler masm) {
+            /*
+             * Explanation: Depending on what the successor edge is, we can use the fall-through to
+             * optimize the generated code. If neither is a successor edge, use the branch
+             * probability to try to take the conditional jump as often as possible to avoid
+             * executing two instructions instead of one.
+             */
+            if (crb.isSuccessorEdge(trueDestination)) {
+                masm.branchConditionally(condition.negate(), falseDestination.label());
+            } else if (crb.isSuccessorEdge(falseDestination)) {
+                masm.branchConditionally(condition, trueDestination.label());
+            } else if (trueDestinationProbability < 0.5) {
+                masm.branchConditionally(condition.negate(), falseDestination.label());
+                masm.jmp(trueDestination.label());
+            } else {
+                masm.branchConditionally(condition, trueDestination.label());
+                masm.jmp(falseDestination.label());
+            }
+        }
+
+    }
+
+    @Opcode("CMOVE")
+    public static class CondMoveOp extends AArch64LIRInstruction {
+        public static final LIRInstructionClass<CondMoveOp> TYPE = LIRInstructionClass.create(CondMoveOp.class);
+
+        @Def protected Value result;
+        @Use protected Value trueValue;
+        @Use protected Value falseValue;
+        private final AArch64Assembler.ConditionFlag condition;
+
+        public CondMoveOp(Variable result, AArch64Assembler.ConditionFlag condition, Value trueValue, Value falseValue) {
+            super(TYPE);
+            assert trueValue.getPlatformKind() == falseValue.getPlatformKind() && trueValue.getPlatformKind() == result.getPlatformKind();
+            this.result = result;
+            this.condition = condition;
+            this.trueValue = trueValue;
+            this.falseValue = falseValue;
+        }
+
+        @Override
+        public void emitCode(CompilationResultBuilder crb, AArch64MacroAssembler masm) {
+            AArch64Kind kind = (AArch64Kind) trueValue.getPlatformKind();
+            int size = kind.getSizeInBytes() * Byte.SIZE;
+            if (kind.isInteger()) {
+                masm.cmov(size, asRegister(result), asRegister(trueValue), asRegister(falseValue), condition);
+            } else {
+                masm.fcmov(size, asRegister(result), asRegister(trueValue), asRegister(falseValue), condition);
+            }
+        }
+    }
+
+    public static class StrategySwitchOp extends AArch64BlockEndOp implements StandardOp.BlockEndOp {
+        public static final LIRInstructionClass<StrategySwitchOp> TYPE = LIRInstructionClass.create(StrategySwitchOp.class);
+
+        private final Constant[] keyConstants;
+        private final SwitchStrategy strategy;
+        private final Function<Condition, AArch64Assembler.ConditionFlag> converter;
+        private final LabelRef[] keyTargets;
+        private final LabelRef defaultTarget;
+        @Alive protected Value key;
+        // TODO (das) This could be optimized: We only need the scratch register in case of a
+        // datapatch, or too large
+        // immediates.
+        @Temp protected Value scratch;
+
+        public StrategySwitchOp(SwitchStrategy strategy, LabelRef[] keyTargets, LabelRef defaultTarget, Value key, Value scratch, Function<Condition, AArch64Assembler.ConditionFlag> converter) {
+            super(TYPE);
+            this.strategy = strategy;
+            this.converter = converter;
+            this.keyConstants = strategy.getKeyConstants();
+            this.keyTargets = keyTargets;
+            this.defaultTarget = defaultTarget;
+            this.key = key;
+            this.scratch = scratch;
+            assert keyConstants.length == keyTargets.length;
+            assert keyConstants.length == strategy.keyProbabilities.length;
+        }
+
+        @Override
+        public void emitCode(CompilationResultBuilder crb, AArch64MacroAssembler masm) {
+            strategy.run(new SwitchClosure(crb, masm));
+        }
+
+        private class SwitchClosure extends SwitchStrategy.BaseSwitchClosure {
+            private final AArch64MacroAssembler masm;
+            private final CompilationResultBuilder crb;
+
+            public SwitchClosure(CompilationResultBuilder crb, AArch64MacroAssembler masm) {
+                super(crb, masm, keyTargets, defaultTarget);
+                this.masm = masm;
+                this.crb = crb;
+            }
+
+            @Override
+            protected void conditionalJump(int index, Condition condition, Label target) {
+                emitComparison(keyConstants[index]);
+                masm.branchConditionally(converter.apply(condition), target);
+            }
+
+            private void emitComparison(Constant c) {
+                JavaConstant jc = (JavaConstant) c;
+                ConstantValue constVal = new ConstantValue(LIRKind.value(key.getPlatformKind()), c);
+                switch (jc.getJavaKind()) {
+                    case Int:
+                        long lc = jc.asLong();
+                        assert NumUtil.isInt(lc);
+                        if (crb.codeCache.needsDataPatch(jc)) {
+                            crb.recordInlineDataInCode(jc);
+                            masm.forceMov(asRegister(scratch), (int) lc);
+                            masm.cmp(32, asRegister(key), asRegister(scratch));
+                        } else {
+                            emitCompare(crb, masm, key, scratch, constVal);
+                        }
+                        break;
+                    case Long:
+                        emitCompare(crb, masm, key, scratch, constVal);
+                        break;
+                    case Object:
+                        emitCompare(crb, masm, key, scratch, constVal);
+                        break;
+                    default:
+                        throw new JVMCIError("switch only supported for int, long and object");
+                }
+            }
+        }
+    }
+
+    public static class TableSwitchOp extends AArch64BlockEndOp implements StandardOp.BlockEndOp {
+        public static final LIRInstructionClass<TableSwitchOp> TYPE = LIRInstructionClass.create(TableSwitchOp.class);
+
+        private final int lowKey;
+        private final LabelRef defaultTarget;
+        private final LabelRef[] targets;
+        @Alive protected Variable keyValue;
+        @Temp protected Variable scratchValue;
+
+        public TableSwitchOp(int lowKey, LabelRef defaultTarget, LabelRef[] targets, Variable key, Variable scratch) {
+            super(TYPE);
+            this.lowKey = lowKey;
+            this.defaultTarget = defaultTarget;
+            this.targets = targets;
+            this.keyValue = key;
+            this.scratchValue = scratch;
+        }
+
+        @Override
+        public void emitCode(CompilationResultBuilder crb, AArch64MacroAssembler masm) {
+            Register key = asRegister(keyValue);
+            Register scratch = asRegister(scratchValue);
+            if (lowKey != 0) {
+                if (AArch64MacroAssembler.isArithmeticImmediate(lowKey)) {
+                    masm.sub(32, key, key, lowKey);
+                } else {
+                    ConstantValue constVal = new ConstantValue(LIRKind.value(AArch64Kind.WORD), JavaConstant.forInt(lowKey));
+                    AArch64Move.move(crb, masm, scratchValue, constVal);
+                    masm.sub(32, key, key, scratch);
+                }
+            }
+            if (defaultTarget != null) {
+                // if key is not in table range, jump to default target if it exists.
+                ConstantValue constVal = new ConstantValue(LIRKind.value(AArch64Kind.WORD), JavaConstant.forInt(targets.length));
+                emitCompare(crb, masm, keyValue, scratchValue, constVal);
+                masm.branchConditionally(AArch64Assembler.ConditionFlag.HS, defaultTarget.label());
+            }
+
+            // Load the start address of the jump table - which starts 3 instructions after the adr
+            // - into scratch.
+            masm.adr(scratch, 4 * 3);
+            masm.ldr(32, scratch, AArch64Address.createRegisterOffsetAddress(scratch, key, /* scaled */true));
+            masm.jmp(scratch);
+            int jumpTablePos = masm.position();
+            // emit jump table entries
+            for (LabelRef target : targets) {
+                Label label = target.label();
+                if (label.isBound()) {
+                    masm.emitInt(target.label().position());
+                } else {
+                    label.addPatchAt(masm.position());
+                    masm.emitInt(PatchLabelKind.JUMP_ADDRESS.encoding);
+                }
+            }
+            JumpTable jt = new JumpTable(jumpTablePos, lowKey, lowKey + targets.length - 1, 4);
+            crb.compilationResult.addAnnotation(jt);
+        }
+    }
+
+    private static void emitCompare(CompilationResultBuilder crb, AArch64MacroAssembler masm, Value key, Value scratchValue, ConstantValue c) {
+        long imm = c.getJavaConstant().asLong();
+        int size = key.getPlatformKind().getSizeInBytes() * Byte.SIZE;
+        if (AArch64MacroAssembler.isComparisonImmediate(imm)) {
+            masm.cmp(size, asRegister(key), (int) imm);
+        } else {
+            AArch64Move.move(crb, masm, asAllocatableValue(scratchValue), c);
+            masm.cmp(size, asRegister(key), asRegister(scratchValue));
+        }
+    }
+
+}
--- /dev/null	Thu Jan 01 00:00:00 1970 +0000
+++ b/graal/com.oracle.graal.lir.aarch64/src/com/oracle/graal/lir/aarch64/AArch64FrameMap.java	Thu Dec 24 11:43:35 2015 -1000
@@ -0,0 +1,117 @@
+/*
+ * Copyright (c) 2013, 2015, Oracle and/or its affiliates. All rights reserved.
+ * DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER.
+ *
+ * This code is free software; you can redistribute it and/or modify it
+ * under the terms of the GNU General Public License version 2 only, as
+ * published by the Free Software Foundation.
+ *
+ * This code is distributed in the hope that it will be useful, but WITHOUT
+ * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
+ * FITNESS FOR A PARTICULAR PURPOSE.  See the GNU General Public License
+ * version 2 for more details (a copy is included in the LICENSE file that
+ * accompanied this code).
+ *
+ * You should have received a copy of the GNU General Public License version
+ * 2 along with this work; if not, write to the Free Software Foundation,
+ * Inc., 51 Franklin St, Fifth Floor, Boston, MA 02110-1301 USA.
+ *
+ * Please contact Oracle, 500 Oracle Parkway, Redwood Shores, CA 94065 USA
+ * or visit www.oracle.com if you need additional information or have any
+ * questions.
+ */
+package com.oracle.graal.lir.aarch64;
+
+import com.oracle.graal.asm.NumUtil;
+import com.oracle.graal.lir.framemap.FrameMap;
+
+import jdk.vm.ci.aarch64.AArch64Kind;
+import jdk.vm.ci.code.CodeCacheProvider;
+import jdk.vm.ci.code.RegisterConfig;
+import jdk.vm.ci.code.StackSlot;
+import jdk.vm.ci.meta.LIRKind;
+
+/**
+ * AArch64 specific frame map.
+ * <p/>
+ * This is the format of an AArch64 stack frame:
+ * <p/>
+ *
+ * <pre>
+ *   Base       Contents
+ *
+ *            :                                :  -----
+ *   caller   | incoming overflow argument n   |    ^
+ *   frame    :     ...                        :    | positive
+ *            | incoming overflow argument 0   |    | offsets
+ *   ---------+--------------------------------+-------------------------
+ *            | return address                 |    |            ^
+ *            | prev. frame pointer            |    |            |
+ *            +--------------------------------+    |            |
+ *            | spill slot 0                   |    | negative   |      ^
+ *    callee  :     ...                        :    v offsets    |      |
+ *    frame   | spill slot n                   |  -----        total  frame
+ *            +--------------------------------+               frame  size
+ *            | alignment padding              |               size     |
+ *            +--------------------------------+  -----          |      |
+ *            | outgoing overflow argument n   |    ^            |      |
+ *            :     ...                        :    | positive   |      |
+ *            | outgoing overflow argument 0   |    | offsets    v      v
+ *    %sp-->  +--------------------------------+---------------------------
+ *
+ * </pre>
+ *
+ * The spill slot area also includes stack allocated memory blocks (ALLOCA blocks). The size of such
+ * a block may be greater than the size of a normal spill slot or the word size.
+ * <p/>
+ * A runtime can reserve space at the beginning of the overflow argument area. The calling
+ * convention can specify that the first overflow stack argument is not at offset 0, but at a
+ * specified offset. Use {@link CodeCacheProvider#getMinimumOutgoingSize()} to make sure that
+ * call-free methods also have this space reserved. Then the VM can use the memory at offset 0
+ * relative to the stack pointer.
+ * <p/>
+ */
+public class AArch64FrameMap extends FrameMap {
+    // Note: Spill size includes callee save area
+
+    /**
+     * Creates a new frame map for the specified method.
+     */
+    public AArch64FrameMap(CodeCacheProvider codeCache, RegisterConfig registerConfig, ReferenceMapBuilderFactory referenceMapFactory) {
+        super(codeCache, registerConfig, referenceMapFactory);
+        initialSpillSize = frameSetupSize();
+        spillSize = initialSpillSize;
+    }
+
+    @Override
+    public int totalFrameSize() {
+        // frameSize + return address + frame pointer
+        return frameSize() + frameSetupSize();
+    }
+
+    private int frameSetupSize() {
+        // Size of return address and frame pointer that are saved in function prologue
+        return getTarget().arch.getWordSize() * 2;
+    }
+
+    @Override
+    public int currentFrameSize() {
+        return alignFrameSize(spillSize + outgoingSize - frameSetupSize());
+    }
+
+    @Override
+    protected int alignFrameSize(int size) {
+        return NumUtil.roundUp(size, getTarget().stackAlignment);
+    }
+
+    @Override
+    protected StackSlot allocateNewSpillSlot(LIRKind kind, int additionalOffset) {
+        return StackSlot.get(kind, -spillSize + additionalOffset, true);
+    }
+
+    public StackSlot allocateDeoptimizationRescueSlot() {
+        // XXX This is very likely not correct.
+        assert spillSize == initialSpillSize : "Deoptimization rescue slot must be the first stack slot";
+        return allocateSpillSlot(LIRKind.value(AArch64Kind.QWORD));
+    }
+}
--- /dev/null	Thu Jan 01 00:00:00 1970 +0000
+++ b/graal/com.oracle.graal.lir.aarch64/src/com/oracle/graal/lir/aarch64/AArch64FrameMapBuilder.java	Thu Dec 24 11:43:35 2015 -1000
@@ -0,0 +1,41 @@
+/*
+ * Copyright (c) 2015, Oracle and/or its affiliates. All rights reserved.
+ * DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER.
+ *
+ * This code is free software; you can redistribute it and/or modify it
+ * under the terms of the GNU General Public License version 2 only, as
+ * published by the Free Software Foundation.
+ *
+ * This code is distributed in the hope that it will be useful, but WITHOUT
+ * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
+ * FITNESS FOR A PARTICULAR PURPOSE.  See the GNU General Public License
+ * version 2 for more details (a copy is included in the LICENSE file that
+ * accompanied this code).
+ *
+ * You should have received a copy of the GNU General Public License version
+ * 2 along with this work; if not, write to the Free Software Foundation,
+ * Inc., 51 Franklin St, Fifth Floor, Boston, MA 02110-1301 USA.
+ *
+ * Please contact Oracle, 500 Oracle Parkway, Redwood Shores, CA 94065 USA
+ * or visit www.oracle.com if you need additional information or have any
+ * questions.
+ */
+package com.oracle.graal.lir.aarch64;
+
+import com.oracle.graal.lir.framemap.FrameMap;
+import com.oracle.graal.lir.framemap.FrameMapBuilderImpl;
+
+import jdk.vm.ci.code.CodeCacheProvider;
+import jdk.vm.ci.code.RegisterConfig;
+import jdk.vm.ci.code.StackSlot;
+
+public class AArch64FrameMapBuilder extends FrameMapBuilderImpl {
+
+    public AArch64FrameMapBuilder(FrameMap frameMap, CodeCacheProvider codeCache, RegisterConfig registerConfig) {
+        super(frameMap, codeCache, registerConfig);
+    }
+
+    public StackSlot allocateDeoptimizationRescueSlot() {
+        return ((AArch64FrameMap) getFrameMap()).allocateDeoptimizationRescueSlot();
+    }
+}
--- /dev/null	Thu Jan 01 00:00:00 1970 +0000
+++ b/graal/com.oracle.graal.lir.aarch64/src/com/oracle/graal/lir/aarch64/AArch64LIRInstruction.java	Thu Dec 24 11:43:35 2015 -1000
@@ -0,0 +1,41 @@
+/*
+ * Copyright (c) 2013, Oracle and/or its affiliates. All rights reserved.
+ * DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER.
+ *
+ * This code is free software; you can redistribute it and/or modify it
+ * under the terms of the GNU General Public License version 2 only, as
+ * published by the Free Software Foundation.
+ *
+ * This code is distributed in the hope that it will be useful, but WITHOUT
+ * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
+ * FITNESS FOR A PARTICULAR PURPOSE.  See the GNU General Public License
+ * version 2 for more details (a copy is included in the LICENSE file that
+ * accompanied this code).
+ *
+ * You should have received a copy of the GNU General Public License version
+ * 2 along with this work; if not, write to the Free Software Foundation,
+ * Inc., 51 Franklin St, Fifth Floor, Boston, MA 02110-1301 USA.
+ *
+ * Please contact Oracle, 500 Oracle Parkway, Redwood Shores, CA 94065 USA
+ * or visit www.oracle.com if you need additional information or have any
+ * questions.
+ */
+package com.oracle.graal.lir.aarch64;
+
+import com.oracle.graal.asm.aarch64.AArch64MacroAssembler;
+import com.oracle.graal.lir.LIRInstruction;
+import com.oracle.graal.lir.LIRInstructionClass;
+import com.oracle.graal.lir.asm.CompilationResultBuilder;
+
+public abstract class AArch64LIRInstruction extends LIRInstruction {
+    protected AArch64LIRInstruction(LIRInstructionClass<? extends AArch64LIRInstruction> c) {
+        super(c);
+    }
+
+    @Override
+    public final void emitCode(CompilationResultBuilder crb) {
+        emitCode(crb, (AArch64MacroAssembler) crb.asm);
+    }
+
+    public abstract void emitCode(CompilationResultBuilder crb, AArch64MacroAssembler masm);
+}
--- /dev/null	Thu Jan 01 00:00:00 1970 +0000
+++ b/graal/com.oracle.graal.lir.aarch64/src/com/oracle/graal/lir/aarch64/AArch64Move.java	Thu Dec 24 11:43:35 2015 -1000
@@ -0,0 +1,559 @@
+/*
+ * Copyright (c) 2013, 2015, Oracle and/or its affiliates. All rights reserved.
+ * DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER.
+ *
+ * This code is free software; you can redistribute it and/or modify it
+ * under the terms of the GNU General Public License version 2 only, as
+ * published by the Free Software Foundation.
+ *
+ * This code is distributed in the hope that it will be useful, but WITHOUT
+ * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
+ * FITNESS FOR A PARTICULAR PURPOSE.  See the GNU General Public License
+ * version 2 for more details (a copy is included in the LICENSE file that
+ * accompanied this code).
+ *
+ * You should have received a copy of the GNU General Public License version
+ * 2 along with this work; if not, write to the Free Software Foundation,
+ * Inc., 51 Franklin St, Fifth Floor, Boston, MA 02110-1301 USA.
+ *
+ * Please contact Oracle, 500 Oracle Parkway, Redwood Shores, CA 94065 USA
+ * or visit www.oracle.com if you need additional information or have any
+ * questions.
+ */
+package com.oracle.graal.lir.aarch64;
+
+import static com.oracle.graal.lir.LIRInstruction.OperandFlag.COMPOSITE;
+import static com.oracle.graal.lir.LIRInstruction.OperandFlag.REG;
+import static com.oracle.graal.lir.LIRInstruction.OperandFlag.STACK;
+import static com.oracle.graal.lir.LIRInstruction.OperandFlag.UNINITIALIZED;
+import static com.oracle.graal.lir.LIRValueUtil.asJavaConstant;
+import static com.oracle.graal.lir.LIRValueUtil.isJavaConstant;
+import static jdk.vm.ci.aarch64.AArch64.zr;
+import static jdk.vm.ci.code.ValueUtil.asAllocatableValue;
+import static jdk.vm.ci.code.ValueUtil.asRegister;
+import static jdk.vm.ci.code.ValueUtil.asStackSlot;
+import static jdk.vm.ci.code.ValueUtil.isRegister;
+import static jdk.vm.ci.code.ValueUtil.isStackSlot;
+
+import com.oracle.graal.asm.Label;
+import com.oracle.graal.asm.aarch64.AArch64Address;
+import com.oracle.graal.asm.aarch64.AArch64Assembler;
+import com.oracle.graal.asm.aarch64.AArch64MacroAssembler;
+import com.oracle.graal.lir.LIRFrameState;
+import com.oracle.graal.lir.LIRInstructionClass;
+import com.oracle.graal.lir.Opcode;
+import com.oracle.graal.lir.StandardOp;
+import com.oracle.graal.lir.StandardOp.NullCheck;
+import com.oracle.graal.lir.StandardOp.ValueMoveOp;
+import com.oracle.graal.lir.VirtualStackSlot;
+import com.oracle.graal.lir.asm.CompilationResultBuilder;
+
+import jdk.vm.ci.aarch64.AArch64;
+import jdk.vm.ci.aarch64.AArch64Kind;
+import jdk.vm.ci.code.Register;
+import jdk.vm.ci.code.StackSlot;
+import jdk.vm.ci.common.JVMCIError;
+import jdk.vm.ci.meta.AllocatableValue;
+import jdk.vm.ci.meta.Constant;
+import jdk.vm.ci.meta.JavaConstant;
+import jdk.vm.ci.meta.PlatformKind;
+import jdk.vm.ci.meta.Value;
+
+public class AArch64Move {
+
+    @Opcode("MOVE")
+    public static class MoveToRegOp extends AArch64LIRInstruction implements ValueMoveOp {
+        public static final LIRInstructionClass<MoveToRegOp> TYPE = LIRInstructionClass.create(MoveToRegOp.class);
+
+        @Def protected AllocatableValue result;
+        @Use({REG, STACK}) protected AllocatableValue input;
+
+        public MoveToRegOp(AllocatableValue result, AllocatableValue input) {
+            super(TYPE);
+            this.result = result;
+            this.input = input;
+        }
+
+        @Override
+        public void emitCode(CompilationResultBuilder crb, AArch64MacroAssembler masm) {
+            move(crb, masm, getResult(), getInput());
+        }
+
+        @Override
+        public AllocatableValue getInput() {
+            return input;
+        }
+
+        @Override
+        public AllocatableValue getResult() {
+            return result;
+        }
+    }
+
+    /**
+     * If the destination is a StackSlot we cannot have a StackSlot or Constant as the source, hence
+     * we have to special case this particular combination. Note: We allow a register as the
+     * destination too just to give the register allocator more freedom.
+     */
+    @Opcode("MOVE")
+    public static class MoveToStackOp extends AArch64LIRInstruction implements StandardOp.ValueMoveOp {
+        public static final LIRInstructionClass<MoveToStackOp> TYPE = LIRInstructionClass.create(MoveToStackOp.class);
+
+        @Def({STACK, REG}) protected AllocatableValue result;
+        @Use protected AllocatableValue input;
+
+        public MoveToStackOp(AllocatableValue result, AllocatableValue input) {
+            super(TYPE);
+            this.result = result;
+            this.input = input;
+        }
+
+        @Override
+        public void emitCode(CompilationResultBuilder crb, AArch64MacroAssembler masm) {
+            move(crb, masm, getResult(), getInput());
+        }
+
+        @Override
+        public AllocatableValue getInput() {
+            return input;
+        }
+
+        @Override
+        public AllocatableValue getResult() {
+            return result;
+        }
+    }
+
+    @Opcode("MOVE")
+    public static class MoveFromConstOp extends AArch64LIRInstruction implements StandardOp.LoadConstantOp {
+        public static final LIRInstructionClass<MoveFromConstOp> TYPE = LIRInstructionClass.create(MoveFromConstOp.class);
+
+        @Def protected AllocatableValue result;
+        private final JavaConstant input;
+
+        public MoveFromConstOp(AllocatableValue result, JavaConstant input) {
+            super(TYPE);
+            this.result = result;
+            this.input = input;
+        }
+
+        @Override
+        public void emitCode(CompilationResultBuilder crb, AArch64MacroAssembler masm) {
+            const2reg(crb, masm, result, input);
+        }
+
+        @Override
+        public Constant getConstant() {
+            return input;
+        }
+
+        @Override
+        public AllocatableValue getResult() {
+            return result;
+        }
+    }
+
+    public static class LoadAddressOp extends AArch64LIRInstruction {
+        public static final LIRInstructionClass<LoadAddressOp> TYPE = LIRInstructionClass.create(LoadAddressOp.class);
+
+        @Def protected AllocatableValue result;
+        @Use(COMPOSITE) protected AArch64AddressValue address;
+
+        public LoadAddressOp(AllocatableValue result, AArch64AddressValue address) {
+            super(TYPE);
+            this.result = result;
+            this.address = address;
+        }
+
+        @Override
+        public void emitCode(CompilationResultBuilder crb, AArch64MacroAssembler masm) {
+            Register dst = asRegister(result);
+            AArch64Address adr = address.toAddress();
+            masm.loadAddress(dst, adr, address.getPlatformKind().getSizeInBytes());
+        }
+    }
+
+    public static class LoadDataOp extends AArch64LIRInstruction {
+        public static final LIRInstructionClass<LoadDataOp> TYPE = LIRInstructionClass.create(LoadDataOp.class);
+
+        @Def protected AllocatableValue result;
+        private final byte[] data;
+
+        public LoadDataOp(AllocatableValue result, byte[] data) {
+            super(TYPE);
+            this.result = result;
+            this.data = data;
+        }
+
+        @Override
+        public void emitCode(CompilationResultBuilder crb, AArch64MacroAssembler masm) {
+            Register dst = asRegister(result);
+            int alignment = 16;
+            masm.loadAddress(dst, (AArch64Address) crb.recordDataReferenceInCode(data, alignment), alignment);
+        }
+    }
+
+    public static class StackLoadAddressOp extends AArch64LIRInstruction {
+        public static final LIRInstructionClass<StackLoadAddressOp> TYPE = LIRInstructionClass.create(StackLoadAddressOp.class);
+
+        @Def protected AllocatableValue result;
+        @Use({STACK, UNINITIALIZED}) protected AllocatableValue slot;
+
+        public StackLoadAddressOp(AllocatableValue result, AllocatableValue slot) {
+            super(TYPE);
+            assert slot instanceof VirtualStackSlot || slot instanceof StackSlot;
+            this.result = result;
+            this.slot = slot;
+        }
+
+        @Override
+        public void emitCode(CompilationResultBuilder crb, AArch64MacroAssembler masm) {
+            AArch64Address address = (AArch64Address) crb.asAddress(slot);
+            PlatformKind kind = AArch64Kind.QWORD;
+            masm.loadAddress(asRegister(result, kind), address, kind.getSizeInBytes());
+        }
+    }
+
+    public static class MembarOp extends AArch64LIRInstruction {
+        public static final LIRInstructionClass<MembarOp> TYPE = LIRInstructionClass.create(MembarOp.class);
+
+        @SuppressWarnings("unused") private final int barriers;
+
+        public MembarOp(int barriers) {
+            super(TYPE);
+            this.barriers = barriers;
+        }
+
+        @Override
+        public void emitCode(CompilationResultBuilder crb, AArch64MacroAssembler masm) {
+            // As I understand it load acquire/store release have the same semantics as on IA64
+            // and allow us to handle LoadStore, LoadLoad and StoreStore without an explicit
+            // barrier.
+            // But Graal support to figure out if a load/store is volatile is non-existant so for
+            // now
+            // just use
+            // memory barriers everywhere.
+            // if ((barrier & MemoryBarriers.STORE_LOAD) != 0) {
+            masm.dmb(AArch64MacroAssembler.BarrierKind.ANY_ANY);
+            // }
+        }
+    }
+
+    abstract static class MemOp extends AArch64LIRInstruction implements StandardOp.ImplicitNullCheck {
+
+        protected final AArch64Kind kind;
+        @Use({COMPOSITE}) protected AArch64AddressValue addressValue;
+        @State protected LIRFrameState state;
+
+        public MemOp(LIRInstructionClass<? extends MemOp> c, AArch64Kind kind, AArch64AddressValue address, LIRFrameState state) {
+            super(c);
+            this.kind = kind;
+            this.addressValue = address;
+            this.state = state;
+        }
+
+        protected abstract void emitMemAccess(CompilationResultBuilder crb, AArch64MacroAssembler masm);
+
+        @Override
+        public void emitCode(CompilationResultBuilder crb, AArch64MacroAssembler masm) {
+            if (state != null) {
+                crb.recordImplicitException(masm.position(), state);
+            }
+            emitMemAccess(crb, masm);
+        }
+
+        @Override
+        public boolean makeNullCheckFor(Value value, LIRFrameState nullCheckState, int implicitNullCheckLimit) {
+            int immediate = addressValue.getImmediate();
+            if (state == null && value.equals(addressValue.getBase()) && addressValue.getOffset().equals(Value.ILLEGAL) && immediate >= 0 && immediate < implicitNullCheckLimit) {
+                state = nullCheckState;
+                return true;
+            }
+            return false;
+        }
+    }
+
+    public static final class LoadOp extends MemOp {
+        public static final LIRInstructionClass<LoadOp> TYPE = LIRInstructionClass.create(LoadOp.class);
+
+        @Def protected AllocatableValue result;
+
+        public LoadOp(AArch64Kind kind, AllocatableValue result, AArch64AddressValue address, LIRFrameState state) {
+            super(TYPE, kind, address, state);
+            this.result = result;
+        }
+
+        @Override
+        protected void emitMemAccess(CompilationResultBuilder crb, AArch64MacroAssembler masm) {
+            AArch64Address address = addressValue.toAddress();
+            Register dst = asRegister(result);
+
+            int destSize = result.getPlatformKind().getSizeInBytes() * Byte.SIZE;
+            int srcSize = kind.getSizeInBytes() * Byte.SIZE;
+            if (kind.isInteger()) {
+                // TODO How to load unsigned chars without the necessary information?
+                masm.ldrs(destSize, srcSize, dst, address);
+            } else {
+                assert srcSize == destSize;
+                masm.fldr(srcSize, dst, address);
+            }
+        }
+    }
+
+    public static class StoreOp extends MemOp {
+        public static final LIRInstructionClass<StoreOp> TYPE = LIRInstructionClass.create(StoreOp.class);
+        @Use protected AllocatableValue input;
+
+        public StoreOp(AArch64Kind kind, AArch64AddressValue address, AllocatableValue input, LIRFrameState state) {
+            super(TYPE, kind, address, state);
+            this.input = input;
+        }
+
+        @Override
+        protected void emitMemAccess(CompilationResultBuilder crb, AArch64MacroAssembler masm) {
+            emitStore(crb, masm, kind, addressValue.toAddress(), asRegister(input));
+        }
+    }
+
+    public static final class StoreConstantOp extends MemOp {
+        public static final LIRInstructionClass<StoreConstantOp> TYPE = LIRInstructionClass.create(StoreConstantOp.class);
+
+        protected final JavaConstant input;
+
+        public StoreConstantOp(AArch64Kind kind, AArch64AddressValue address, JavaConstant input, LIRFrameState state) {
+            super(TYPE, kind, address, state);
+            this.input = input;
+            if (!input.isDefaultForKind()) {
+                throw JVMCIError.shouldNotReachHere("Can only store null constants to memory");
+            }
+        }
+
+        @Override
+        public void emitMemAccess(CompilationResultBuilder crb, AArch64MacroAssembler masm) {
+            emitStore(crb, masm, kind, addressValue.toAddress(), zr);
+        }
+    }
+
+    public static final class NullCheckOp extends AArch64LIRInstruction implements NullCheck {
+        public static final LIRInstructionClass<NullCheckOp> TYPE = LIRInstructionClass.create(NullCheckOp.class);
+
+        @Use(COMPOSITE) protected AArch64AddressValue address;
+        @State protected LIRFrameState state;
+
+        public NullCheckOp(AArch64AddressValue address, LIRFrameState state) {
+            super(TYPE);
+            this.address = address;
+            this.state = state;
+        }
+
+        @Override
+        public void emitCode(CompilationResultBuilder crb, AArch64MacroAssembler masm) {
+            crb.recordImplicitException(masm.position(), state);
+            masm.ldr(64, zr, address.toAddress());
+        }
+
+        public Value getCheckedValue() {
+            return address.base;
+        }
+
+        public LIRFrameState getState() {
+            return state;
+        }
+    }
+
+    /**
+     * Compare and swap instruction. Does the following atomically: <code>
+     *  CAS(newVal, expected, address):
+     *    oldVal = *address
+     *    if oldVal == expected:
+     *        *address = newVal
+     *    return oldVal
+     * </code>
+     */
+    @Opcode("CAS")
+    public static class CompareAndSwap extends AArch64LIRInstruction {
+        public static final LIRInstructionClass<CompareAndSwap> TYPE = LIRInstructionClass.create(CompareAndSwap.class);
+
+        @Def protected AllocatableValue resultValue;
+        @Alive protected Value expectedValue;
+        @Alive protected AllocatableValue newValue;
+        @Alive(COMPOSITE) protected AArch64AddressValue addressValue;
+        @Temp protected AllocatableValue scratchValue;
+
+        public CompareAndSwap(AllocatableValue result, Value expectedValue, AllocatableValue newValue, AArch64AddressValue addressValue, AllocatableValue scratch) {
+            super(TYPE);
+            this.resultValue = result;
+            this.expectedValue = expectedValue;
+            this.newValue = newValue;
+            this.addressValue = addressValue;
+            this.scratchValue = scratch;
+        }
+
+        @Override
+        public void emitCode(CompilationResultBuilder crb, AArch64MacroAssembler masm) {
+            AArch64Kind kind = (AArch64Kind) expectedValue.getPlatformKind();
+            assert kind.isInteger();
+            int size = kind.getSizeInBytes() * Byte.SIZE;
+
+            AArch64Address address = addressValue.toAddress();
+            Register result = asRegister(resultValue);
+            Register newVal = asRegister(newValue);
+            Register scratch = asRegister(scratchValue);
+            // We could avoid using a scratch register here, by reusing resultValue for the stlxr
+            // success flag
+            // and issue a mov resultValue, expectedValue in case of success before returning.
+            Label retry = new Label();
+            Label fail = new Label();
+            masm.bind(retry);
+            masm.ldaxr(size, result, address);
+            AArch64Compare.gpCompare(masm, resultValue, expectedValue);
+            masm.branchConditionally(AArch64Assembler.ConditionFlag.NE, fail);
+            masm.stlxr(size, scratch, newVal, address);
+            // if scratch == 0 then write successful, else retry.
+            masm.cbnz(32, scratch, retry);
+            masm.bind(fail);
+        }
+    }
+
+    public static void emitStore(@SuppressWarnings("unused") CompilationResultBuilder crb, AArch64MacroAssembler masm, AArch64Kind kind, AArch64Address dst, Register src) {
+        int destSize = kind.getSizeInBytes() * Byte.SIZE;
+        if (kind.isInteger()) {
+            masm.str(destSize, src, dst);
+        } else {
+            masm.fstr(destSize, src, dst);
+        }
+    }
+
+    public static void move(CompilationResultBuilder crb, AArch64MacroAssembler masm, AllocatableValue result, Value input) {
+        if (isRegister(input)) {
+            if (isRegister(result)) {
+                reg2reg(crb, masm, result, asAllocatableValue(input));
+            } else if (isStackSlot(result)) {
+                reg2stack(crb, masm, result, asAllocatableValue(input));
+            } else {
+                throw JVMCIError.shouldNotReachHere();
+            }
+        } else if (isStackSlot(input)) {
+            if (isRegister(result)) {
+                stack2reg(crb, masm, result, asAllocatableValue(input));
+            } else {
+                throw JVMCIError.shouldNotReachHere();
+            }
+        } else if (isJavaConstant(input)) {
+            if (isRegister(result)) {
+                const2reg(crb, masm, result, asJavaConstant(input));
+            } else {
+                throw JVMCIError.shouldNotReachHere();
+            }
+        } else {
+            throw JVMCIError.shouldNotReachHere();
+        }
+    }
+
+    private static void reg2reg(@SuppressWarnings("unused") CompilationResultBuilder crb, AArch64MacroAssembler masm, AllocatableValue result, AllocatableValue input) {
+        Register dst = asRegister(result);
+        Register src = asRegister(input);
+        AArch64Kind kind = (AArch64Kind) input.getPlatformKind();
+        int size = kind.getSizeInBytes() * Byte.SIZE;
+        if (kind.isInteger()) {
+            masm.mov(size, dst, src);
+        } else {
+            masm.fmov(size, dst, src);
+        }
+    }
+
+    private static void reg2stack(CompilationResultBuilder crb, AArch64MacroAssembler masm, AllocatableValue result, AllocatableValue input) {
+        AArch64Address dest = loadStackSlotAddress(crb, masm, asStackSlot(result), Value.ILLEGAL);
+        Register src = asRegister(input);
+        AArch64Kind kind = (AArch64Kind) input.getPlatformKind();
+        int size = kind.getSizeInBytes() * Byte.SIZE;
+        if (kind.isInteger()) {
+            masm.str(size, src, dest);
+        } else {
+            masm.fstr(size, src, dest);
+        }
+    }
+
+    private static void stack2reg(CompilationResultBuilder crb, AArch64MacroAssembler masm, AllocatableValue result, AllocatableValue input) {
+        AArch64Address src = loadStackSlotAddress(crb, masm, asStackSlot(input), result);
+        Register dest = asRegister(result);
+        AArch64Kind kind = (AArch64Kind) input.getPlatformKind();
+        int size = kind.getSizeInBytes() * Byte.SIZE;
+        if (kind.isInteger()) {
+            masm.ldr(size, dest, src);
+        } else {
+            masm.fldr(size, dest, src);
+        }
+    }
+
+    private static void const2reg(CompilationResultBuilder crb, AArch64MacroAssembler masm, AllocatableValue result, JavaConstant input) {
+        Register dst = asRegister(result);
+        switch (input.getJavaKind().getStackKind()) {
+            case Int:
+                if (crb.codeCache.needsDataPatch(input)) {
+                    crb.recordInlineDataInCode(input);
+                    masm.forceMov(dst, input.asInt());
+                } else {
+                    masm.mov(dst, input.asInt());
+                }
+                break;
+            case Long:
+                if (crb.codeCache.needsDataPatch(input)) {
+                    crb.recordInlineDataInCode(input);
+                    masm.forceMov(dst, input.asLong());
+                } else {
+                    masm.mov(dst, input.asLong());
+                }
+                break;
+            case Float:
+                if (AArch64MacroAssembler.isFloatImmediate(input.asFloat())) {
+                    masm.fmov(32, dst, input.asFloat());
+                } else {
+                    masm.fldr(32, dst, (AArch64Address) crb.asFloatConstRef(input));
+                }
+                break;
+            case Double:
+                if (AArch64MacroAssembler.isDoubleImmediate(input.asDouble())) {
+                    masm.fmov(64, dst, input.asDouble());
+                } else {
+                    masm.fldr(64, dst, (AArch64Address) crb.asDoubleConstRef(input));
+                }
+                break;
+            case Object:
+                if (input.isNull()) {
+                    masm.mov(dst, 0);
+                } else if (crb.target.inlineObjects) {
+                    crb.recordInlineDataInCode(input);
+                    masm.forceMov(dst, 0xDEADDEADDEADDEADL);
+                } else {
+                    masm.ldr(64, dst, (AArch64Address) crb.recordDataReferenceInCode(input, 8));
+                }
+                break;
+            default:
+                throw JVMCIError.shouldNotReachHere("kind=" + input.getJavaKind().getStackKind());
+        }
+    }
+
+    /**
+     * Returns AArch64Address of given StackSlot. We cannot use CompilationResultBuilder.asAddress
+     * since this calls AArch64MacroAssembler.makeAddress with displacements that may be larger than
+     * 9-bit signed, which cannot be handled by that method.
+     *
+     * Instead we create an address ourselves. We use scaled unsigned addressing since we know the
+     * transfersize, which gives us a 15-bit address range (for longs/doubles) respectively a 14-bit
+     * range (for everything else).
+     *
+     * @param scratch Scratch register that can be used to load address. If Value.ILLEGAL this
+     *            instruction fails if we try to access a StackSlot that is too large to be loaded
+     *            directly.
+     * @return AArch64Address of given StackSlot. Uses scratch register if necessary to do so.
+     */
+    private static AArch64Address loadStackSlotAddress(CompilationResultBuilder crb, AArch64MacroAssembler masm, StackSlot slot, AllocatableValue scratch) {
+        AArch64Kind kind = (AArch64Kind) scratch.getPlatformKind();
+        assert kind.isInteger();
+        int displacement = crb.frameMap.offsetForStackSlot(slot);
+        int transferSize = slot.getPlatformKind().getSizeInBytes();
+        Register scratchReg = Value.ILLEGAL.equals(scratch) ? AArch64.zr : asRegister(scratch);
+        return masm.makeAddress(AArch64.sp, displacement, scratchReg, transferSize, /* allowOverwrite */false);
+    }
+}
--- /dev/null	Thu Jan 01 00:00:00 1970 +0000
+++ b/graal/com.oracle.graal.lir.aarch64/src/com/oracle/graal/lir/aarch64/AArch64PauseOp.java	Thu Dec 24 11:43:35 2015 -1000
@@ -0,0 +1,45 @@
+/*
+ * Copyright (c) 2015, Oracle and/or its affiliates. All rights reserved.
+ * DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER.
+ *
+ * This code is free software; you can redistribute it and/or modify it
+ * under the terms of the GNU General Public License version 2 only, as
+ * published by the Free Software Foundation.
+ *
+ * This code is distributed in the hope that it will be useful, but WITHOUT
+ * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
+ * FITNESS FOR A PARTICULAR PURPOSE.  See the GNU General Public License
+ * version 2 for more details (a copy is included in the LICENSE file that
+ * accompanied this code).
+ *
+ * You should have received a copy of the GNU General Public License version
+ * 2 along with this work; if not, write to the Free Software Foundation,
+ * Inc., 51 Franklin St, Fifth Floor, Boston, MA 02110-1301 USA.
+ *
+ * Please contact Oracle, 500 Oracle Parkway, Redwood Shores, CA 94065 USA
+ * or visit www.oracle.com if you need additional information or have any
+ * questions.
+ */
+package com.oracle.graal.lir.aarch64;
+
+import com.oracle.graal.asm.aarch64.AArch64MacroAssembler;
+import com.oracle.graal.lir.LIRInstructionClass;
+import com.oracle.graal.lir.Opcode;
+import com.oracle.graal.lir.asm.CompilationResultBuilder;
+
+/**
+ * Emits a pause.
+ */
+@Opcode("PAUSE")
+public final class AArch64PauseOp extends AArch64LIRInstruction {
+    public static final LIRInstructionClass<AArch64PauseOp> TYPE = LIRInstructionClass.create(AArch64PauseOp.class);
+
+    public AArch64PauseOp() {
+        super(TYPE);
+    }
+
+    @Override
+    public void emitCode(CompilationResultBuilder crb, AArch64MacroAssembler masm) {
+        masm.pause();
+    }
+}
--- /dev/null	Thu Jan 01 00:00:00 1970 +0000
+++ b/graal/com.oracle.graal.lir.aarch64/src/com/oracle/graal/lir/aarch64/AArch64ReinterpretOp.java	Thu Dec 24 11:43:35 2015 -1000
@@ -0,0 +1,63 @@
+/*
+ * Copyright (c) 2013, 2015, Oracle and/or its affiliates. All rights reserved.
+ * DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER.
+ *
+ * This code is free software; you can redistribute it and/or modify it
+ * under the terms of the GNU General Public License version 2 only, as
+ * published by the Free Software Foundation.
+ *
+ * This code is distributed in the hope that it will be useful, but WITHOUT
+ * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
+ * FITNESS FOR A PARTICULAR PURPOSE.  See the GNU General Public License
+ * version 2 for more details (a copy is included in the LICENSE file that
+ * accompanied this code).
+ *
+ * You should have received a copy of the GNU General Public License version
+ * 2 along with this work; if not, write to the Free Software Foundation,
+ * Inc., 51 Franklin St, Fifth Floor, Boston, MA 02110-1301 USA.
+ *
+ * Please contact Oracle, 500 Oracle Parkway, Redwood Shores, CA 94065 USA
+ * or visit www.oracle.com if you need additional information or have any
+ * questions.
+ */
+
+package com.oracle.graal.lir.aarch64;
+
+import static jdk.vm.ci.code.ValueUtil.asRegister;
+
+import com.oracle.graal.asm.aarch64.AArch64MacroAssembler;
+import com.oracle.graal.lir.LIRInstructionClass;
+import com.oracle.graal.lir.asm.CompilationResultBuilder;
+
+import jdk.vm.ci.aarch64.AArch64Kind;
+import jdk.vm.ci.code.Register;
+import jdk.vm.ci.meta.AllocatableValue;
+
+/**
+ * Instruction that reinterprets some bit pattern as a different type. It is possible to reinterpret
+ * the following: - int <-> float - long <-> double
+ */
+public class AArch64ReinterpretOp extends AArch64LIRInstruction {
+    private static final LIRInstructionClass<AArch64ReinterpretOp> TYPE = LIRInstructionClass.create(AArch64ReinterpretOp.class);
+
+    @Def protected AllocatableValue resultValue;
+    @Use protected AllocatableValue inputValue;
+
+    public AArch64ReinterpretOp(AllocatableValue resultValue, AllocatableValue inputValue) {
+        super(TYPE);
+        AArch64Kind from = (AArch64Kind) inputValue.getPlatformKind();
+        AArch64Kind to = (AArch64Kind) resultValue.getPlatformKind();
+        assert from.getSizeInBytes() == to.getSizeInBytes() && from.isInteger() ^ to.isInteger();
+        this.resultValue = resultValue;
+        this.inputValue = inputValue;
+    }
+
+    @Override
+    public void emitCode(CompilationResultBuilder crb, AArch64MacroAssembler masm) {
+        Register result = asRegister(resultValue);
+        Register input = asRegister(inputValue);
+        AArch64Kind to = (AArch64Kind) resultValue.getPlatformKind();
+        final int size = to.getSizeInBytes() * Byte.SIZE;
+        masm.fmov(size, result, input);
+    }
+}
--- /dev/null	Thu Jan 01 00:00:00 1970 +0000
+++ b/graal/com.oracle.graal.lir.aarch64/src/com/oracle/graal/lir/aarch64/AArch64SignExtendOp.java	Thu Dec 24 11:43:35 2015 -1000
@@ -0,0 +1,55 @@
+/*
+ * Copyright (c) 2015, Oracle and/or its affiliates. All rights reserved.
+ * DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER.
+ *
+ * This code is free software; you can redistribute it and/or modify it
+ * under the terms of the GNU General Public License version 2 only, as
+ * published by the Free Software Foundation.
+ *
+ * This code is distributed in the hope that it will be useful, but WITHOUT
+ * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
+ * FITNESS FOR A PARTICULAR PURPOSE.  See the GNU General Public License
+ * version 2 for more details (a copy is included in the LICENSE file that
+ * accompanied this code).
+ *
+ * You should have received a copy of the GNU General Public License version
+ * 2 along with this work; if not, write to the Free Software Foundation,
+ * Inc., 51 Franklin St, Fifth Floor, Boston, MA 02110-1301 USA.
+ *
+ * Please contact Oracle, 500 Oracle Parkway, Redwood Shores, CA 94065 USA
+ * or visit www.oracle.com if you need additional information or have any
+ * questions.
+ */
+
+package com.oracle.graal.lir.aarch64;
+
+import static jdk.vm.ci.code.ValueUtil.asRegister;
+
+import com.oracle.graal.asm.aarch64.AArch64MacroAssembler;
+import com.oracle.graal.lir.LIRInstructionClass;
+import com.oracle.graal.lir.asm.CompilationResultBuilder;
+
+import jdk.vm.ci.code.Register;
+import jdk.vm.ci.meta.AllocatableValue;
+
+public class AArch64SignExtendOp extends AArch64LIRInstruction {
+    private static final LIRInstructionClass<AArch64SignExtendOp> TYPE = LIRInstructionClass.create(AArch64SignExtendOp.class);
+
+    @Def protected AllocatableValue resultValue;
+    @Use protected AllocatableValue inputValue;
+
+    public AArch64SignExtendOp(AllocatableValue resultValue, AllocatableValue inputValue) {
+        super(TYPE);
+        this.resultValue = resultValue;
+        this.inputValue = inputValue;
+    }
+
+    @Override
+    public void emitCode(CompilationResultBuilder crb, AArch64MacroAssembler masm) {
+        Register result = asRegister(resultValue);
+        Register input = asRegister(inputValue);
+        int to = resultValue.getPlatformKind().getSizeInBytes() * Byte.SIZE;
+        int from = inputValue.getPlatformKind().getSizeInBytes() * Byte.SIZE;
+        masm.sxt(to, from, result, input);
+    }
+}
--- /dev/null	Thu Jan 01 00:00:00 1970 +0000
+++ b/graal/com.oracle.graal.replacements.aarch64/src/com/oracle/graal/replacements/aarch64/AArch64CountLeadingZerosNode.java	Thu Dec 24 11:43:35 2015 -1000
@@ -0,0 +1,72 @@
+/*
+ * Copyright (c) 2013, 2015, Oracle and/or its affiliates. All rights reserved.
+ * DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER.
+ *
+ * This code is free software; you can redistribute it and/or modify it
+ * under the terms of the GNU General Public License version 2 only, as
+ * published by the Free Software Foundation.
+ *
+ * This code is distributed in the hope that it will be useful, but WITHOUT
+ * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
+ * FITNESS FOR A PARTICULAR PURPOSE.  See the GNU General Public License
+ * version 2 for more details (a copy is included in the LICENSE file that
+ * accompanied this code).
+ *
+ * You should have received a copy of the GNU General Public License version
+ * 2 along with this work; if not, write to the Free Software Foundation,
+ * Inc., 51 Franklin St, Fifth Floor, Boston, MA 02110-1301 USA.
+ *
+ * Please contact Oracle, 500 Oracle Parkway, Redwood Shores, CA 94065 USA
+ * or visit www.oracle.com if you need additional information or have any
+ * questions.
+ */
+package com.oracle.graal.replacements.aarch64;
+
+import com.oracle.graal.compiler.common.type.PrimitiveStamp;
+import com.oracle.graal.compiler.common.type.StampFactory;
+import com.oracle.graal.graph.NodeClass;
+import com.oracle.graal.graph.spi.CanonicalizerTool;
+import com.oracle.graal.lir.aarch64.AArch64ArithmeticLIRGeneratorTool;
+import com.oracle.graal.lir.gen.ArithmeticLIRGeneratorTool;
+import com.oracle.graal.nodeinfo.NodeInfo;
+import com.oracle.graal.nodes.ConstantNode;
+import com.oracle.graal.nodes.ValueNode;
+import com.oracle.graal.nodes.calc.UnaryNode;
+import com.oracle.graal.nodes.spi.ArithmeticLIRLowerable;
+import com.oracle.graal.nodes.spi.NodeLIRBuilderTool;
+
+import jdk.vm.ci.meta.JavaConstant;
+import jdk.vm.ci.meta.JavaKind;
+
+@NodeInfo
+public final class AArch64CountLeadingZerosNode extends UnaryNode implements ArithmeticLIRLowerable {
+
+    public static final NodeClass<AArch64CountLeadingZerosNode> TYPE = NodeClass.create(AArch64CountLeadingZerosNode.class);
+
+    public AArch64CountLeadingZerosNode(ValueNode value) {
+        super(TYPE, StampFactory.forInteger(JavaKind.Int, 0, ((PrimitiveStamp) value.stamp()).getBits()), value);
+    }
+
+    public static ValueNode tryFold(ValueNode value) {
+        if (value.isConstant()) {
+            JavaConstant c = value.asJavaConstant();
+            if (value.getStackKind() == JavaKind.Int) {
+                return ConstantNode.forInt(Integer.numberOfLeadingZeros(c.asInt()));
+            } else {
+                return ConstantNode.forInt(Long.numberOfLeadingZeros(c.asLong()));
+            }
+        }
+        return null;
+    }
+
+    @Override
+    public ValueNode canonical(CanonicalizerTool tool, ValueNode forValue) {
+        ValueNode folded = tryFold(forValue);
+        return folded != null ? folded : this;
+    }
+
+    @Override
+    public void generate(NodeLIRBuilderTool builder, ArithmeticLIRGeneratorTool gen) {
+        builder.setResult(this, ((AArch64ArithmeticLIRGeneratorTool) gen).emitCountLeadingZeros(builder.operand(getValue())));
+    }
+}
--- /dev/null	Thu Jan 01 00:00:00 1970 +0000
+++ b/graal/com.oracle.graal.replacements.aarch64/src/com/oracle/graal/replacements/aarch64/AArch64FloatArithmeticSnippets.java	Thu Dec 24 11:43:35 2015 -1000
@@ -0,0 +1,142 @@
+/*
+ * Copyright (c) 2014, 2015, Oracle and/or its affiliates. All rights reserved.
+ * DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER.
+ *
+ * This code is free software; you can redistribute it and/or modify it
+ * under the terms of the GNU General Public License version 2 only, as
+ * published by the Free Software Foundation.
+ *
+ * This code is distributed in the hope that it will be useful, but WITHOUT
+ * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
+ * FITNESS FOR A PARTICULAR PURPOSE.  See the GNU General Public License
+ * version 2 for more details (a copy is included in the LICENSE file that
+ * accompanied this code).
+ *
+ * You should have received a copy of the GNU General Public License version
+ * 2 along with this work; if not, write to the Free Software Foundation,
+ * Inc., 51 Franklin St, Fifth Floor, Boston, MA 02110-1301 USA.
+ *
+ * Please contact Oracle, 500 Oracle Parkway, Redwood Shores, CA 94065 USA
+ * or visit www.oracle.com if you need additional information or have any
+ * questions.
+ */
+
+package com.oracle.graal.replacements.aarch64;
+
+import com.oracle.graal.api.replacements.SnippetReflectionProvider;
+import com.oracle.graal.compiler.common.type.ArithmeticOpTable;
+import com.oracle.graal.compiler.common.type.ArithmeticOpTable.BinaryOp.Rem;
+import com.oracle.graal.graph.Node;
+import com.oracle.graal.graph.Node.NodeIntrinsic;
+import com.oracle.graal.graph.NodeClass;
+import com.oracle.graal.graph.spi.CanonicalizerTool;
+import com.oracle.graal.lir.gen.ArithmeticLIRGeneratorTool;
+import com.oracle.graal.nodeinfo.NodeInfo;
+import com.oracle.graal.nodes.ValueNode;
+import com.oracle.graal.nodes.calc.BinaryArithmeticNode;
+import com.oracle.graal.nodes.calc.RemNode;
+import com.oracle.graal.nodes.spi.LoweringTool;
+import com.oracle.graal.nodes.spi.NodeLIRBuilderTool;
+import com.oracle.graal.phases.util.Providers;
+import com.oracle.graal.replacements.Snippet;
+import com.oracle.graal.replacements.SnippetTemplate;
+import com.oracle.graal.replacements.Snippets;
+
+import jdk.vm.ci.code.TargetDescription;
+import jdk.vm.ci.common.JVMCIError;
+import jdk.vm.ci.meta.JavaKind;
+
+/**
+ * AArch64 does not have a remainder operation. We use <code>n % d == n - Truncate(n / d) * d</code>
+ * for it instead. This is not correct for some edge cases, so we have to fix it up using these
+ * snippets.
+ */
+public class AArch64FloatArithmeticSnippets extends SnippetTemplate.AbstractTemplates implements Snippets {
+
+    private final SnippetTemplate.SnippetInfo drem;
+    private final SnippetTemplate.SnippetInfo frem;
+
+    public AArch64FloatArithmeticSnippets(Providers providers, SnippetReflectionProvider snippetReflection, TargetDescription target) {
+        super(providers, snippetReflection, target);
+        drem = snippet(AArch64FloatArithmeticSnippets.class, "dremSnippet");
+        frem = snippet(AArch64FloatArithmeticSnippets.class, "fremSnippet");
+    }
+
+    public void lower(RemNode node, LoweringTool tool) {
+        // assert node.kind() == JavaKind.Float || node.kind() == JavaKind.Double;
+        // if (node instanceof SafeNode) {
+        // // We already introduced the necessary checks, nothing to do.
+        // return;
+        // }
+        // SnippetTemplate.SnippetInfo snippet = node.kind() == Kind.Double ? drem : frem;
+        // SnippetTemplate.Arguments args = new SnippetTemplate.Arguments(snippet,
+        // node.graph().getGuardsStage());
+        // args.add("x", node.x());
+        // args.add("y", node.y());
+        // args.add("isStrictFP", node.isStrictFP());
+        // template(args).instantiate(providers.getMetaAccess(), node,
+        // SnippetTemplate.DEFAULT_REPLACER,
+        // tool, args);
+        throw JVMCIError.unimplemented(node + ", " + tool);
+    }
+
+    @Snippet
+    public static double dremSnippet(double x, double y, @Snippet.ConstantParameter boolean isStrictFP) {
+        if (Double.isInfinite(x) || y == 0.0 || Double.isNaN(y)) {
+            return Double.NaN;
+        }
+        // -0.0 % 5.0 will result in 0.0 and not -0.0 if we don't check here.
+        if (Double.isInfinite(y) || x == 0.0) {
+            return x;
+        }
+        return safeRem(JavaKind.Double, x, y, isStrictFP);
+    }
+
+    @Snippet
+    public static float fremSnippet(float x, float y, @Snippet.ConstantParameter boolean isStrictFP) {
+        if (Float.isInfinite(x) || y == 0.0f || Float.isNaN(y)) {
+            return Float.NaN;
+        }
+        // -0.0 % 5.0 will result in 0.0 and not -0.0 if we don't check here.
+        if (Float.isInfinite(y) || x == 0.0f) {
+            return x;
+        }
+        return safeRem(JavaKind.Float, x, y, isStrictFP);
+    }
+
+    @NodeIntrinsic(SafeFloatRemNode.class)
+    private static native double safeRem(@Node.ConstantNodeParameter JavaKind kind, double x, double y, @Node.ConstantNodeParameter boolean isStrictFP);
+
+    @NodeIntrinsic(SafeFloatRemNode.class)
+    private static native float safeRem(@Node.ConstantNodeParameter JavaKind kind, float x, float y, @Node.ConstantNodeParameter boolean isStrictFP);
+
+    // Marker interface to distinguish untreated nodes from ones where we have installed the
+    // additional checks
+    private interface SafeNode {
+    }
+
+    @NodeInfo
+    // static class SafeFloatRemNode extends FloatRemNode implements SafeNode {
+    static class SafeFloatRemNode extends BinaryArithmeticNode<Rem> implements SafeNode {
+
+        public static final NodeClass<SafeFloatRemNode> TYPE = NodeClass.create(SafeFloatRemNode.class);
+
+        @SuppressWarnings("unused")
+        public SafeFloatRemNode(JavaKind kind, ValueNode x, ValueNode y, boolean isStrictFP) {
+            super(TYPE, ArithmeticOpTable::getRem, x, y);
+        }
+
+        public void generate(NodeLIRBuilderTool builder, ArithmeticLIRGeneratorTool gen) {
+            throw JVMCIError.unimplemented();
+        }
+
+        public void generate(NodeLIRBuilderTool generator) {
+            throw JVMCIError.unimplemented();
+        }
+
+        public Node canonical(CanonicalizerTool tool) {
+            throw JVMCIError.unimplemented();
+        }
+    }
+
+}
--- /dev/null	Thu Jan 01 00:00:00 1970 +0000
+++ b/graal/com.oracle.graal.replacements.aarch64/src/com/oracle/graal/replacements/aarch64/AArch64GraphBuilderPlugins.java	Thu Dec 24 11:43:35 2015 -1000
@@ -0,0 +1,81 @@
+/*
+ * Copyright (c) 2015, Oracle and/or its affiliates. All rights reserved.
+ * DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER.
+ *
+ * This code is free software; you can redistribute it and/or modify it
+ * under the terms of the GNU General Public License version 2 only, as
+ * published by the Free Software Foundation.
+ *
+ * This code is distributed in the hope that it will be useful, but WITHOUT
+ * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
+ * FITNESS FOR A PARTICULAR PURPOSE.  See the GNU General Public License
+ * version 2 for more details (a copy is included in the LICENSE file that
+ * accompanied this code).
+ *
+ * You should have received a copy of the GNU General Public License version
+ * 2 along with this work; if not, write to the Free Software Foundation,
+ * Inc., 51 Franklin St, Fifth Floor, Boston, MA 02110-1301 USA.
+ *
+ * Please contact Oracle, 500 Oracle Parkway, Redwood Shores, CA 94065 USA
+ * or visit www.oracle.com if you need additional information or have any
+ * questions.
+ */
+package com.oracle.graal.replacements.aarch64;
+
+import com.oracle.graal.compiler.common.spi.ForeignCallsProvider;
+import com.oracle.graal.nodes.ValueNode;
+import com.oracle.graal.nodes.graphbuilderconf.GraphBuilderConfiguration.Plugins;
+import com.oracle.graal.nodes.graphbuilderconf.GraphBuilderContext;
+import com.oracle.graal.nodes.graphbuilderconf.InvocationPlugin;
+import com.oracle.graal.nodes.graphbuilderconf.InvocationPlugins;
+import com.oracle.graal.nodes.graphbuilderconf.InvocationPlugins.Registration;
+import com.oracle.graal.replacements.IntegerSubstitutions;
+import com.oracle.graal.replacements.LongSubstitutions;
+
+import jdk.vm.ci.meta.JavaKind;
+import jdk.vm.ci.meta.ResolvedJavaMethod;
+
+public class AArch64GraphBuilderPlugins {
+
+    public static void register(Plugins plugins, ForeignCallsProvider foreignCalls) {
+        InvocationPlugins invocationPlugins = plugins.getInvocationPlugins();
+        invocationPlugins.defer(new Runnable() {
+            public void run() {
+                registerIntegerLongPlugins(invocationPlugins, IntegerSubstitutions.class, JavaKind.Int);
+                registerIntegerLongPlugins(invocationPlugins, LongSubstitutions.class, JavaKind.Long);
+                registerMathPlugins(invocationPlugins, foreignCalls);
+            }
+        });
+    }
+
+    private static void registerIntegerLongPlugins(InvocationPlugins plugins, Class<?> substituteDeclaringClass, JavaKind kind) {
+        Class<?> declaringClass = kind.toBoxedJavaClass();
+        Class<?> type = kind.toJavaClass();
+        Registration r = new Registration(plugins, declaringClass);
+        r.register1("numberOfLeadingZeros", type, new InvocationPlugin() {
+            public boolean apply(GraphBuilderContext b, ResolvedJavaMethod targetMethod, Receiver receiver, ValueNode value) {
+                ValueNode folded = AArch64CountLeadingZerosNode.tryFold(value);
+                if (folded != null) {
+                    b.addPush(JavaKind.Int, folded);
+                } else {
+                    b.addPush(JavaKind.Int, new AArch64CountLeadingZerosNode(value));
+                }
+                return true;
+            }
+        });
+        r.registerMethodSubstitution(substituteDeclaringClass, "numberOfTrailingZeros", type);
+    }
+
+    @SuppressWarnings("unused")
+    private static void registerMathPlugins(InvocationPlugins plugins, ForeignCallsProvider foreignCalls) {
+        // Registration r = new Registration(plugins, Math.class);
+        // r.register1("sin", Double.TYPE, new ForeignCallPlugin(foreignCalls, ARITHMETIC_SIN));
+        // r.register1("cos", Double.TYPE, new ForeignCallPlugin(foreignCalls, ARITHMETIC_COS));
+        // r.register1("tan", Double.TYPE, new ForeignCallPlugin(foreignCalls, ARITHMETIC_TAN));
+        // r.register1("exp", Double.TYPE, new ForeignCallPlugin(foreignCalls, ARITHMETIC_EXP));
+        // r.register1("log", Double.TYPE, new ForeignCallPlugin(foreignCalls, ARITHMETIC_LOG));
+        // r.register1("log10", Double.TYPE, new ForeignCallPlugin(foreignCalls, ARITHMETIC_LOG10));
+        // r.register2("pow", Double.TYPE, Double.TYPE, new ForeignCallPlugin(foreignCalls,
+        // ARITHMETIC_POW));
+    }
+}
--- /dev/null	Thu Jan 01 00:00:00 1970 +0000
+++ b/graal/com.oracle.graal.replacements.aarch64/src/com/oracle/graal/replacements/aarch64/AArch64IntegerArithmeticSnippets.java	Thu Dec 24 11:43:35 2015 -1000
@@ -0,0 +1,232 @@
+/*
+ * Copyright (c) 2014, 2015, Oracle and/or its affiliates. All rights reserved.
+ * DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER.
+ *
+ * This code is free software; you can redistribute it and/or modify it
+ * under the terms of the GNU General Public License version 2 only, as
+ * published by the Free Software Foundation.
+ *
+ * This code is distributed in the hope that it will be useful, but WITHOUT
+ * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
+ * FITNESS FOR A PARTICULAR PURPOSE.  See the GNU General Public License
+ * version 2 for more details (a copy is included in the LICENSE file that
+ * accompanied this code).
+ *
+ * You should have received a copy of the GNU General Public License version
+ * 2 along with this work; if not, write to the Free Software Foundation,
+ * Inc., 51 Franklin St, Fifth Floor, Boston, MA 02110-1301 USA.
+ *
+ * Please contact Oracle, 500 Oracle Parkway, Redwood Shores, CA 94065 USA
+ * or visit www.oracle.com if you need additional information or have any
+ * questions.
+ */
+
+package com.oracle.graal.replacements.aarch64;
+
+import com.oracle.graal.api.replacements.SnippetReflectionProvider;
+import com.oracle.graal.graph.Node.ConstantNodeParameter;
+import com.oracle.graal.graph.Node.NodeIntrinsic;
+import com.oracle.graal.graph.NodeClass;
+import com.oracle.graal.nodeinfo.NodeInfo;
+import com.oracle.graal.nodes.ValueNode;
+import com.oracle.graal.nodes.calc.FixedBinaryNode;
+import com.oracle.graal.nodes.calc.IntegerDivNode;
+import com.oracle.graal.nodes.calc.IntegerRemNode;
+import com.oracle.graal.nodes.calc.UnsignedDivNode;
+import com.oracle.graal.nodes.calc.UnsignedRemNode;
+import com.oracle.graal.nodes.spi.LoweringTool;
+import com.oracle.graal.phases.util.Providers;
+import com.oracle.graal.replacements.Snippet;
+import com.oracle.graal.replacements.SnippetTemplate;
+import com.oracle.graal.replacements.SnippetTemplate.AbstractTemplates;
+import com.oracle.graal.replacements.Snippets;
+
+import jdk.vm.ci.code.TargetDescription;
+import jdk.vm.ci.common.JVMCIError;
+import jdk.vm.ci.meta.JavaKind;
+
+/**
+ * Division in AArch64 ISA does not generate a trap when dividing by zero, but instead sets the
+ * result to 0. These snippets throw an ArithmethicException if the denominator is 0 and otherwise
+ * forward to the LIRGenerator.
+ */
+public class AArch64IntegerArithmeticSnippets extends AbstractTemplates implements Snippets {
+
+    private final SnippetTemplate.SnippetInfo idiv;
+    private final SnippetTemplate.SnippetInfo ldiv;
+    private final SnippetTemplate.SnippetInfo irem;
+    private final SnippetTemplate.SnippetInfo lrem;
+
+    private final SnippetTemplate.SnippetInfo uidiv;
+    private final SnippetTemplate.SnippetInfo uldiv;
+    private final SnippetTemplate.SnippetInfo uirem;
+    private final SnippetTemplate.SnippetInfo ulrem;
+
+    public AArch64IntegerArithmeticSnippets(Providers providers, SnippetReflectionProvider snippetReflection, TargetDescription target) {
+        super(providers, snippetReflection, target);
+        idiv = snippet(AArch64IntegerArithmeticSnippets.class, "idivSnippet");
+        ldiv = snippet(AArch64IntegerArithmeticSnippets.class, "ldivSnippet");
+        irem = snippet(AArch64IntegerArithmeticSnippets.class, "iremSnippet");
+        lrem = snippet(AArch64IntegerArithmeticSnippets.class, "lremSnippet");
+
+        uidiv = snippet(AArch64IntegerArithmeticSnippets.class, "uidivSnippet");
+        uldiv = snippet(AArch64IntegerArithmeticSnippets.class, "uldivSnippet");
+        uirem = snippet(AArch64IntegerArithmeticSnippets.class, "uiremSnippet");
+        ulrem = snippet(AArch64IntegerArithmeticSnippets.class, "ulremSnippet");
+    }
+
+    public void lower(FixedBinaryNode node, LoweringTool tool) {
+        // assert node.kind() == JavaKind.Int || node.kind() == JavaKind.Long;
+        // SnippetTemplate.SnippetInfo snippet;
+        // if (node instanceof SafeNode) {
+        // // We already introduced the 0 division check, nothing to do.
+        // return;
+        // } else if (node instanceof IntegerDivNode) {
+        // snippet = node.kind() == JavaKind.Int ? idiv : ldiv;
+        // } else if (node instanceof IntegerRemNode) {
+        // snippet = node.kind() == JavaKind.Int ? irem : lrem;
+        // } else if (node instanceof UnsignedDivNode) {
+        // snippet = node.kind() == JavaKind.Int ? uidiv : uldiv;
+        // } else if (node instanceof UnsignedRemNode) {
+        // snippet = node.kind() == JavaKind.Int ? uirem : ulrem;
+        // } else {
+        // throw GraalInternalError.shouldNotReachHere();
+        // }
+        // Arguments args = new Arguments(snippet, node.graph().getGuardsStage());
+        // args.add("x", node.x());
+        // args.add("y", node.y());
+        // template(args).instantiate(providers.getMetaAccess(), node,
+        // SnippetTemplate.DEFAULT_REPLACER,
+        // args);
+        throw JVMCIError.unimplemented(node + ", " + tool);
+    }
+
+    @Snippet
+    public static int idivSnippet(int x, int y) {
+        checkForZero(y);
+        return safeDiv(JavaKind.Int, x, y);
+    }
+
+    @Snippet
+    public static long ldivSnippet(long x, long y) {
+        checkForZero(y);
+        return safeDiv(JavaKind.Long, x, y);
+    }
+
+    @Snippet
+    public static int iremSnippet(int x, int y) {
+        checkForZero(y);
+        return safeRem(JavaKind.Int, x, y);
+    }
+
+    @Snippet
+    public static long lremSnippet(long x, long y) {
+        checkForZero(y);
+        return safeRem(JavaKind.Long, x, y);
+    }
+
+    @Snippet
+    public static int uidivSnippet(int x, int y) {
+        checkForZero(y);
+        return safeUDiv(JavaKind.Int, x, y);
+    }
+
+    @Snippet
+    public static long uldivSnippet(long x, long y) {
+        checkForZero(y);
+        return safeUDiv(JavaKind.Long, x, y);
+    }
+
+    @Snippet
+    public static int uiremSnippet(int x, int y) {
+        checkForZero(y);
+        return safeURem(JavaKind.Int, x, y);
+    }
+
+    @Snippet
+    public static long ulremSnippet(long x, long y) {
+        checkForZero(y);
+        return safeURem(JavaKind.Long, x, y);
+    }
+
+    private static void checkForZero(int y) {
+        if (y == 0) {
+            throw new ArithmeticException("/ by zero.");
+        }
+    }
+
+    private static void checkForZero(long y) {
+        if (y == 0) {
+            throw new ArithmeticException("/ by zero.");
+        }
+    }
+
+    @NodeIntrinsic(SafeIntegerDivNode.class)
+    private static native int safeDiv(@ConstantNodeParameter JavaKind kind, int x, int y);
+
+    @NodeIntrinsic(SafeIntegerDivNode.class)
+    private static native long safeDiv(@ConstantNodeParameter JavaKind kind, long x, long y);
+
+    @NodeIntrinsic(SafeIntegerRemNode.class)
+    private static native int safeRem(@ConstantNodeParameter JavaKind kind, int x, int y);
+
+    @NodeIntrinsic(SafeIntegerRemNode.class)
+    private static native long safeRem(@ConstantNodeParameter JavaKind kind, long x, long y);
+
+    @NodeIntrinsic(SafeUnsignedDivNode.class)
+    private static native int safeUDiv(@ConstantNodeParameter JavaKind kind, int x, int y);
+
+    @NodeIntrinsic(SafeUnsignedDivNode.class)
+    private static native long safeUDiv(@ConstantNodeParameter JavaKind kind, long x, long y);
+
+    @NodeIntrinsic(SafeUnsignedRemNode.class)
+    private static native int safeURem(@ConstantNodeParameter JavaKind kind, int x, int y);
+
+    @NodeIntrinsic(SafeUnsignedRemNode.class)
+    private static native long safeURem(@ConstantNodeParameter JavaKind kind, long x, long y);
+
+    // Marker interface to distinguish untreated nodes from ones where we have installed the
+    // additional checks
+    private interface SafeNode {
+    }
+
+    @NodeInfo
+    static class SafeIntegerDivNode extends IntegerDivNode implements SafeNode {
+        public static final NodeClass<SafeIntegerDivNode> TYPE = NodeClass.create(SafeIntegerDivNode.class);
+
+        @SuppressWarnings("unused")
+        public SafeIntegerDivNode(JavaKind kind, ValueNode x, ValueNode y) {
+            super(x, y);
+        }
+    }
+
+    @NodeInfo
+    static class SafeIntegerRemNode extends IntegerRemNode implements SafeNode {
+        public static final NodeClass<SafeIntegerRemNode> TYPE = NodeClass.create(SafeIntegerRemNode.class);
+
+        @SuppressWarnings("unused")
+        public SafeIntegerRemNode(JavaKind kind, ValueNode x, ValueNode y) {
+            super(x, y);
+        }
+    }
+
+    @NodeInfo
+    static class SafeUnsignedDivNode extends UnsignedDivNode implements SafeNode {
+        public static final NodeClass<SafeUnsignedDivNode> TYPE = NodeClass.create(SafeUnsignedDivNode.class);
+
+        @SuppressWarnings("unused")
+        public SafeUnsignedDivNode(JavaKind kind, ValueNode x, ValueNode y) {
+            super(x, y);
+        }
+    }
+
+    @NodeInfo
+    static class SafeUnsignedRemNode extends UnsignedRemNode implements SafeNode {
+        public static final NodeClass<SafeUnsignedRemNode> TYPE = NodeClass.create(SafeUnsignedRemNode.class);
+
+        @SuppressWarnings("unused")
+        public SafeUnsignedRemNode(JavaKind kind, ValueNode x, ValueNode y) {
+            super(x, y);
+        }
+    }
+}
--- /dev/null	Thu Jan 01 00:00:00 1970 +0000
+++ b/graal/com.oracle.graal.replacements.aarch64/src/com/oracle/graal/replacements/aarch64/AArch64IntegerSubstitutions.java	Thu Dec 24 11:43:35 2015 -1000
@@ -0,0 +1,55 @@
+/*
+ * Copyright (c) 2013, 2015, Oracle and/or its affiliates. All rights reserved.
+ * DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER.
+ *
+ * This code is free software; you can redistribute it and/or modify it
+ * under the terms of the GNU General Public License version 2 only, as
+ * published by the Free Software Foundation.
+ *
+ * This code is distributed in the hope that it will be useful, but WITHOUT
+ * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
+ * FITNESS FOR A PARTICULAR PURPOSE.  See the GNU General Public License
+ * version 2 for more details (a copy is included in the LICENSE file that
+ * accompanied this code).
+ *
+ * You should have received a copy of the GNU General Public License version
+ * 2 along with this work; if not, write to the Free Software Foundation,
+ * Inc., 51 Franklin St, Fifth Floor, Boston, MA 02110-1301 USA.
+ *
+ * Please contact Oracle, 500 Oracle Parkway, Redwood Shores, CA 94065 USA
+ * or visit www.oracle.com if you need additional information or have any
+ * questions.
+ */
+package com.oracle.graal.replacements.aarch64;
+
+import com.oracle.graal.api.replacements.ClassSubstitution;
+import com.oracle.graal.api.replacements.MethodSubstitution;
+import com.oracle.graal.replacements.nodes.BitScanForwardNode;
+
+/**
+ * AArch64 ISA offers a count leading zeros instruction which can be used to implement
+ * numberOfLeadingZeros more efficiently than using BitScanReverse.
+ */
+@ClassSubstitution(Integer.class)
+public class AArch64IntegerSubstitutions {
+
+    @MethodSubstitution
+    public static int numberOfTrailingZeros(int i) {
+        return BitScanForwardNode.scan(i);
+    }
+
+    @MethodSubstitution
+    public static int bitCount(int value) {
+        // Based on Warren, Hacker's Delight, slightly adapted to profit from Aarch64 add + shift
+        // instruction.
+        // Assuming the peephole optimizer optimizes all x - y >>> z into a single instruction
+        // this takes 10 instructions.
+        int x = value;
+        x = x - ((x & 0xaaaaaaaa) >>> 1);
+        x = (x & 0x33333333) + ((x & 0xcccccccc) >>> 2);
+        x = (x + (x >>> 4)) & 0x0f0f0f0f;
+        x = x + (x >>> 8);
+        x = x + (x >>> 16);
+        return x & 0x3f;
+    }
+}
--- /dev/null	Thu Jan 01 00:00:00 1970 +0000
+++ b/graal/com.oracle.graal.replacements.aarch64/src/com/oracle/graal/replacements/aarch64/AArch64LongSubstitutions.java	Thu Dec 24 11:43:35 2015 -1000
@@ -0,0 +1,57 @@
+/*
+ * Copyright (c) 2013, 2015, Oracle and/or its affiliates. All rights reserved.
+ * DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER.
+ *
+ * This code is free software; you can redistribute it and/or modify it
+ * under the terms of the GNU General Public License version 2 only, as
+ * published by the Free Software Foundation.
+ *
+ * This code is distributed in the hope that it will be useful, but WITHOUT
+ * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
+ * FITNESS FOR A PARTICULAR PURPOSE.  See the GNU General Public License
+ * version 2 for more details (a copy is included in the LICENSE file that
+ * accompanied this code).
+ *
+ * You should have received a copy of the GNU General Public License version
+ * 2 along with this work; if not, write to the Free Software Foundation,
+ * Inc., 51 Franklin St, Fifth Floor, Boston, MA 02110-1301 USA.
+ *
+ * Please contact Oracle, 500 Oracle Parkway, Redwood Shores, CA 94065 USA
+ * or visit www.oracle.com if you need additional information or have any
+ * questions.
+ */
+package com.oracle.graal.replacements.aarch64;
+
+import com.oracle.graal.api.replacements.ClassSubstitution;
+import com.oracle.graal.api.replacements.MethodSubstitution;
+import com.oracle.graal.replacements.nodes.BitScanForwardNode;
+
+/**
+ * Aarch64 ISA offers a count leading zeros instruction which can be used to implement
+ * numberOfLeadingZeros more efficiently than using BitScanReverse.
+ */
+@ClassSubstitution(Long.class)
+public class AArch64LongSubstitutions {
+
+    @MethodSubstitution
+    public static int numberOfTrailingZeros(long i) {
+        return BitScanForwardNode.scan(i);
+    }
+
+    @MethodSubstitution
+    public static int bitCount(long value) {
+        // Based on Warren, Hacker's Delight, slightly adapted to profit from Aarch64 add + shift
+        // instruction.
+        // Assuming the peephole optimizer optimizes all x - y >>> z into a single instruction
+        // this takes 11 instructions.
+        long x = value;
+        x = x - ((x & 0xaaaaaaaaaaaaaaaaL) >>> 1);
+        x = (x & 0x3333333333333333L) + ((x & 0xccccccccccccccccL) >>> 2);
+        x = (x + (x >>> 4)) & 0x0f0f0f0f0f0f0f0fL;
+        x = x + (x >>> 8);
+        x = x + (x >>> 16);
+        x = x + (x >>> 32);
+        return (int) x & 0x7f;
+    }
+
+}
--- /dev/null	Thu Jan 01 00:00:00 1970 +0000
+++ b/graal/com.oracle.graal.replacements.aarch64/src/com/oracle/graal/replacements/aarch64/AArch64MathSubstitutions.java	Thu Dec 24 11:43:35 2015 -1000
@@ -0,0 +1,61 @@
+/*
+ * Copyright (c) 2013, 2015, Oracle and/or its affiliates. All rights reserved.
+ * DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER.
+ *
+ * This code is free software; you can redistribute it and/or modify it
+ * under the terms of the GNU General Public License version 2 only, as
+ * published by the Free Software Foundation.
+ *
+ * This code is distributed in the hope that it will be useful, but WITHOUT
+ * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
+ * FITNESS FOR A PARTICULAR PURPOSE.  See the GNU General Public License
+ * version 2 for more details (a copy is included in the LICENSE file that
+ * accompanied this code).
+ *
+ * You should have received a copy of the GNU General Public License version
+ * 2 along with this work; if not, write to the Free Software Foundation,
+ * Inc., 51 Franklin St, Fifth Floor, Boston, MA 02110-1301 USA.
+ *
+ * Please contact Oracle, 500 Oracle Parkway, Redwood Shores, CA 94065 USA
+ * or visit www.oracle.com if you need additional information or have any
+ * questions.
+ */
+
+package com.oracle.graal.replacements.aarch64;
+
+import com.oracle.graal.api.replacements.ClassSubstitution;
+import com.oracle.graal.api.replacements.MethodSubstitution;
+
+/**
+ * Substitutions for {@link java.lang.Math} methods. Aarch64 does not offer special instructions to
+ * implement these functions, so implement them either in Java or call standard c library functions.
+ */
+@ClassSubstitution(java.lang.Math.class)
+public class AArch64MathSubstitutions {
+
+    @MethodSubstitution
+    public static double log(double x) {
+        return StrictMath.log(x);
+    }
+
+    @MethodSubstitution
+    public static double log10(double x) {
+        return StrictMath.log10(x);
+    }
+
+    @MethodSubstitution
+    public static double sin(double x) {
+        return StrictMath.sin(x);
+    }
+
+    @MethodSubstitution
+    public static double cos(double x) {
+        return StrictMath.cos(x);
+    }
+
+    @MethodSubstitution
+    public static double tan(double x) {
+        return StrictMath.tan(x);
+    }
+
+}
--- a/mx.graal/suite.py	Wed Dec 23 14:36:50 2015 +0100
+++ b/mx.graal/suite.py	Thu Dec 24 11:43:35 2015 -1000
@@ -39,7 +39,7 @@
             {
                "name" : "jvmci",
                "optional" : "true",
-               "version" : "a130b51efb072b754f9ecad316dcda01bd2f0e9f",
+               "version" : "577a4a8caa72d06edaef3d40a4d24022df2a8e7c",
                "urls" : [
                     {"url" : "http://lafo.ssw.uni-linz.ac.at/hg/graal-jvmci-8", "kind" : "hg"},
                     {"url" : "https://curio.ssw.jku.at/nexus/content/repositories/snapshots", "kind" : "binary"},
@@ -277,6 +277,23 @@
       "workingSets" : "Graal,HotSpot",
     },
 
+    "com.oracle.graal.hotspot.aarch64" : {
+      "subDir" : "graal",
+      "sourceDirs" : ["src"],
+      "dependencies" : [
+        "com.oracle.graal.compiler.aarch64",
+        "com.oracle.graal.hotspot",
+        "com.oracle.graal.replacements.aarch64",
+      ],
+      "checkstyle" : "com.oracle.graal.graph",
+      "annotationProcessors" : deps([
+        "jvmci:JVMCI_SERVICE_PROCESSOR",
+        "GRAAL_NODEINFO_PROCESSOR"
+      ]),
+      "javaCompliance" : "1.8",
+      "workingSets" : "Graal,HotSpot,AArch64",
+    },
+
     "com.oracle.graal.hotspot.amd64" : {
       "subDir" : "graal",
       "sourceDirs" : ["src"],
@@ -321,6 +338,19 @@
       "workingSets" : "Graal,HotSpot,Test",
     },
 
+    "com.oracle.graal.hotspot.aarch64.test" : {
+      "subDir" : "graal",
+      "sourceDirs" : ["src"],
+      "dependencies" : [
+        "com.oracle.graal.asm.aarch64",
+        "com.oracle.graal.hotspot.test",
+      ],
+      "annotationProcessors" : ["GRAAL_NODEINFO_PROCESSOR"],
+      "checkstyle" : "com.oracle.graal.graph",
+      "javaCompliance" : "1.8",
+      "workingSets" : "Graal,HotSpot,AArch64,Test",
+    },
+
     "com.oracle.graal.hotspot.amd64.test" : {
       "subDir" : "graal",
       "sourceDirs" : ["src"],
@@ -392,6 +422,17 @@
       "workingSets" : "Graal,Assembler",
     },
 
+    "com.oracle.graal.asm.aarch64" : {
+      "subDir" : "graal",
+      "sourceDirs" : ["src"],
+      "dependencies" : [
+        "com.oracle.graal.asm",
+      ],
+      "checkstyle" : "com.oracle.graal.graph",
+      "javaCompliance" : "1.8",
+      "workingSets" : "Graal,Assembler,AArch64",
+    },
+
     "com.oracle.graal.asm.amd64" : {
       "subDir" : "graal",
       "sourceDirs" : ["src"],
@@ -435,6 +476,18 @@
       "workingSets" : "Graal,Assembler,Test",
     },
 
+    "com.oracle.graal.asm.aarch64.test" : {
+      "subDir" : "graal",
+      "sourceDirs" : ["src"],
+      "dependencies" : [
+        "com.oracle.graal.asm.test",
+        "com.oracle.graal.asm.aarch64",
+      ],
+      "checkstyle" : "com.oracle.graal.graph",
+      "javaCompliance" : "1.8",
+      "workingSets" : "Graal,Assembler,AArch64,Test",
+    },
+
     "com.oracle.graal.asm.amd64.test" : {
       "subDir" : "graal",
       "sourceDirs" : ["src"],
@@ -485,6 +538,19 @@
       "workingSets" : "Graal,LIR",
     },
 
+    "com.oracle.graal.lir.aarch64" : {
+      "subDir" : "graal",
+      "sourceDirs" : ["src"],
+      "dependencies" : [
+        "com.oracle.graal.lir",
+        "com.oracle.graal.asm.aarch64",
+      ],
+      "annotationProcessors" : deps(["GRAAL_OPTIONS_PROCESSOR"]),
+      "checkstyle" : "com.oracle.graal.graph",
+      "javaCompliance" : "1.8",
+      "workingSets" : "Graal,LIR,AArch64",
+    },
+
     "com.oracle.graal.lir.amd64" : {
       "subDir" : "graal",
       "sourceDirs" : ["src"],
@@ -539,6 +605,22 @@
       "workingSets" : "Graal,Replacements",
     },
 
+    "com.oracle.graal.replacements.aarch64" : {
+      "subDir" : "graal",
+      "sourceDirs" : ["src"],
+      "dependencies" : [
+          "com.oracle.graal.replacements",
+          "com.oracle.graal.lir.aarch64",
+          ],
+      "checkstyle" : "com.oracle.graal.graph",
+      "javaCompliance" : "1.8",
+      "annotationProcessors" : [
+        "GRAAL_NODEINFO_PROCESSOR",
+        "GRAAL_REPLACEMENTS_VERIFIER",
+      ],
+      "workingSets" : "Graal,Replacements,AArch64",
+    },
+
     "com.oracle.graal.replacements.amd64" : {
       "subDir" : "graal",
       "sourceDirs" : ["src"],
@@ -748,6 +830,36 @@
       "workingSets" : "Graal,Codegen",
     },
 
+    "com.oracle.graal.compiler.aarch64" : {
+      "subDir" : "graal",
+      "sourceDirs" : ["src"],
+      "dependencies" : [
+        "com.oracle.graal.compiler",
+        "com.oracle.graal.lir.aarch64",
+        "com.oracle.graal.java",
+      ],
+      "checkstyle" : "com.oracle.graal.graph",
+      "annotationProcessors" : deps([
+        "GRAAL_NODEINFO_PROCESSOR",
+        "GRAAL_COMPILER_MATCH_PROCESSOR",
+      ]),
+      "javaCompliance" : "1.8",
+      "workingSets" : "Graal,AArch64",
+    },
+
+    "com.oracle.graal.compiler.aarch64.test" : {
+      "subDir" : "graal",
+      "sourceDirs" : ["src"],
+      "dependencies" : deps([
+        "com.oracle.graal.lir.jtt",
+        "com.oracle.graal.lir.aarch64",
+        "jvmci:JVMCI_HOTSPOT"
+      ]),
+      "checkstyle" : "com.oracle.graal.graph",
+      "javaCompliance" : "1.8",
+      "workingSets" : "Graal,AArch64,Test",
+    },
+
     "com.oracle.graal.compiler.amd64" : {
       "subDir" : "graal",
       "sourceDirs" : ["src"],
@@ -1064,6 +1176,8 @@
         "com.oracle.graal.runtime",
         "com.oracle.graal.code",
         "com.oracle.graal.printer",
+        "com.oracle.graal.compiler.aarch64",
+        "com.oracle.graal.replacements.aarch64",
         "com.oracle.graal.compiler.amd64",
         "com.oracle.graal.replacements.amd64",
         "com.oracle.graal.compiler.sparc",
@@ -1080,6 +1194,7 @@
     "GRAAL_HOTSPOT" : {
       "subDir" : "graal",
       "dependencies" : [
+        "com.oracle.graal.hotspot.aarch64",
         "com.oracle.graal.hotspot.amd64",
         "com.oracle.graal.hotspot.sparc",
         "com.oracle.graal.hotspot",
@@ -1097,9 +1212,12 @@
       "dependencies" : [
         "com.oracle.graal.api.test",
         "com.oracle.graal.api.directives.test",
+        "com.oracle.graal.asm.aarch64.test",
         "com.oracle.graal.asm.amd64.test",
+        "com.oracle.graal.compiler.aarch64.test",
         "com.oracle.graal.compiler.amd64.test",
         "com.oracle.graal.compiler.sparc.test",
+        "com.oracle.graal.hotspot.aarch64.test",
         "com.oracle.graal.hotspot.amd64.test",
         "com.oracle.graal.options.test",
         "com.oracle.graal.jtt",