23217
|
1 /*
|
|
2 * Copyright (c) 2013, 2015, Oracle and/or its affiliates. All rights reserved.
|
|
3 * DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER.
|
|
4 *
|
|
5 * This code is free software; you can redistribute it and/or modify it
|
|
6 * under the terms of the GNU General Public License version 2 only, as
|
|
7 * published by the Free Software Foundation.
|
|
8 *
|
|
9 * This code is distributed in the hope that it will be useful, but WITHOUT
|
|
10 * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
|
|
11 * FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License
|
|
12 * version 2 for more details (a copy is included in the LICENSE file that
|
|
13 * accompanied this code).
|
|
14 *
|
|
15 * You should have received a copy of the GNU General Public License version
|
|
16 * 2 along with this work; if not, write to the Free Software Foundation,
|
|
17 * Inc., 51 Franklin St, Fifth Floor, Boston, MA 02110-1301 USA.
|
|
18 *
|
|
19 * Please contact Oracle, 500 Oracle Parkway, Redwood Shores, CA 94065 USA
|
|
20 * or visit www.oracle.com if you need additional information or have any
|
|
21 * questions.
|
|
22 */
|
|
23 package com.oracle.graal.hotspot.aarch64;
|
|
24
|
|
25 import static com.oracle.graal.lir.LIRInstruction.OperandFlag.HINT;
|
|
26 import static com.oracle.graal.lir.LIRInstruction.OperandFlag.ILLEGAL;
|
|
27 import static com.oracle.graal.lir.LIRInstruction.OperandFlag.REG;
|
23349
|
28 import static com.oracle.graal.lir.LIRInstruction.OperandFlag.STACK;
|
23217
|
29 import static jdk.vm.ci.code.ValueUtil.asRegister;
|
|
30
|
|
31 import com.oracle.graal.asm.aarch64.AArch64Assembler;
|
|
32 import com.oracle.graal.asm.aarch64.AArch64MacroAssembler;
|
|
33 import com.oracle.graal.lir.LIRInstructionClass;
|
23349
|
34 import com.oracle.graal.lir.StandardOp.LoadConstantOp;
|
23217
|
35 import com.oracle.graal.lir.aarch64.AArch64LIRInstruction;
|
|
36 import com.oracle.graal.lir.asm.CompilationResultBuilder;
|
|
37
|
|
38 import jdk.vm.ci.code.Register;
|
23349
|
39 import jdk.vm.ci.common.JVMCIError;
|
|
40 import jdk.vm.ci.hotspot.HotSpotConstant;
|
23217
|
41 import jdk.vm.ci.hotspot.HotSpotVMConfig.CompressEncoding;
|
|
42 import jdk.vm.ci.meta.AllocatableValue;
|
23349
|
43 import jdk.vm.ci.meta.Constant;
|
23217
|
44
|
|
45 public class AArch64HotSpotMove {
|
|
46
|
23349
|
47 public static class LoadHotSpotObjectConstantInline extends AArch64LIRInstruction implements LoadConstantOp {
|
|
48 public static final LIRInstructionClass<LoadHotSpotObjectConstantInline> TYPE = LIRInstructionClass.create(LoadHotSpotObjectConstantInline.class);
|
|
49
|
|
50 private HotSpotConstant constant;
|
|
51 @Def({REG, STACK}) AllocatableValue result;
|
|
52
|
|
53 public LoadHotSpotObjectConstantInline(HotSpotConstant constant, AllocatableValue result) {
|
|
54 super(TYPE);
|
|
55 this.constant = constant;
|
|
56 this.result = result;
|
|
57 }
|
|
58
|
|
59 @Override
|
|
60 protected void emitCode(CompilationResultBuilder crb, AArch64MacroAssembler masm) {
|
|
61 crb.recordInlineDataInCode(constant);
|
|
62 if (constant.isCompressed()) {
|
|
63 // masm.forceMov(asRegister(result), 0);
|
|
64 throw JVMCIError.unimplemented();
|
|
65 } else {
|
|
66 masm.forceMov(asRegister(result), 0);
|
|
67 }
|
|
68 }
|
|
69
|
|
70 @Override
|
|
71 public AllocatableValue getResult() {
|
|
72 return result;
|
|
73 }
|
|
74
|
|
75 @Override
|
|
76 public Constant getConstant() {
|
|
77 return constant;
|
|
78 }
|
|
79 }
|
|
80
|
23217
|
81 /**
|
|
82 * Compresses a 8-byte pointer as a 4-byte int.
|
|
83 */
|
|
84 public static class CompressPointer extends AArch64LIRInstruction {
|
|
85 public static final LIRInstructionClass<CompressPointer> TYPE = LIRInstructionClass.create(CompressPointer.class);
|
|
86
|
|
87 private final CompressEncoding encoding;
|
|
88 private final boolean nonNull;
|
|
89
|
|
90 @Def({REG, HINT}) protected AllocatableValue result;
|
|
91 @Use({REG}) protected AllocatableValue input;
|
|
92 @Alive({REG, ILLEGAL}) protected AllocatableValue baseRegister;
|
|
93
|
|
94 public CompressPointer(AllocatableValue result, AllocatableValue input, AllocatableValue baseRegister, CompressEncoding encoding, boolean nonNull) {
|
|
95 super(TYPE);
|
|
96 this.result = result;
|
|
97 this.input = input;
|
|
98 this.baseRegister = baseRegister;
|
|
99 this.encoding = encoding;
|
|
100 this.nonNull = nonNull;
|
|
101 }
|
|
102
|
|
103 @Override
|
|
104 public void emitCode(CompilationResultBuilder crb, AArch64MacroAssembler masm) {
|
|
105 Register resultRegister = asRegister(result);
|
|
106 Register ptr = asRegister(input);
|
|
107 Register base = asRegister(baseRegister);
|
|
108 // result = (ptr - base) >> shift
|
|
109 if (encoding.base == 0) {
|
|
110 if (encoding.shift == 0) {
|
|
111 masm.movx(resultRegister, ptr);
|
|
112 } else {
|
|
113 assert encoding.alignment == encoding.shift : "Encode algorithm is wrong";
|
|
114 masm.lshr(64, resultRegister, ptr, encoding.shift);
|
|
115 }
|
|
116 } else if (nonNull) {
|
|
117 masm.sub(64, resultRegister, ptr, base);
|
|
118 if (encoding.shift != 0) {
|
|
119 assert encoding.alignment == encoding.shift : "Encode algorithm is wrong";
|
|
120 masm.shl(64, resultRegister, resultRegister, encoding.shift);
|
|
121 }
|
|
122 } else {
|
|
123 // if ptr is null it still has to be null after compression
|
|
124 masm.cmp(64, ptr, 0);
|
|
125 masm.cmov(64, resultRegister, ptr, base, AArch64Assembler.ConditionFlag.NE);
|
|
126 masm.sub(64, resultRegister, resultRegister, base);
|
|
127 if (encoding.shift != 0) {
|
|
128 assert encoding.alignment == encoding.shift : "Encode algorithm is wrong";
|
|
129 masm.lshr(64, resultRegister, resultRegister, encoding.shift);
|
|
130 }
|
|
131 }
|
|
132 }
|
|
133 }
|
|
134
|
|
135 /**
|
|
136 * Decompresses a 4-byte offset into an actual pointer.
|
|
137 */
|
|
138 public static class UncompressPointer extends AArch64LIRInstruction {
|
|
139 public static final LIRInstructionClass<UncompressPointer> TYPE = LIRInstructionClass.create(UncompressPointer.class);
|
|
140
|
|
141 private final CompressEncoding encoding;
|
|
142 private final boolean nonNull;
|
|
143
|
|
144 @Def({REG}) protected AllocatableValue result;
|
|
145 @Use({REG}) protected AllocatableValue input;
|
|
146 @Alive({REG, ILLEGAL}) protected AllocatableValue baseRegister;
|
|
147
|
|
148 public UncompressPointer(AllocatableValue result, AllocatableValue input, AllocatableValue baseRegister, CompressEncoding encoding, boolean nonNull) {
|
|
149 super(TYPE);
|
|
150 this.result = result;
|
|
151 this.input = input;
|
|
152 this.baseRegister = baseRegister;
|
|
153 this.encoding = encoding;
|
|
154 this.nonNull = nonNull;
|
|
155 }
|
|
156
|
|
157 @Override
|
|
158 public void emitCode(CompilationResultBuilder crb, AArch64MacroAssembler masm) {
|
|
159 Register ptr = asRegister(input);
|
|
160 Register resultRegister = asRegister(result);
|
|
161 Register base = asRegister(baseRegister);
|
|
162 // result = base + (ptr << shift)
|
|
163 if (nonNull) {
|
|
164 assert encoding.shift == encoding.alignment;
|
|
165 masm.add(64, resultRegister, base, ptr, AArch64Assembler.ShiftType.ASR, encoding.shift);
|
|
166 } else {
|
|
167 // if ptr is null it has to be null after decompression
|
|
168 // masm.cmp(64, );
|
23349
|
169 throw JVMCIError.unimplemented();
|
23217
|
170 }
|
|
171
|
|
172 }
|
|
173 }
|
|
174
|
|
175 //
|
|
176 // private static void decompressPointer(CompilationResultBuilder crb, ARMv8MacroAssembler masm,
|
|
177 // Register result,
|
|
178 // Register ptr, long base, int shift, int alignment) {
|
|
179 // assert base != 0 || shift == 0 || alignment == shift;
|
|
180 // // result = heapBase + ptr << alignment
|
|
181 // Register heapBase = ARMv8.heapBaseRegister;
|
|
182 // // if result == 0, we make sure that it will still be 0 at the end, so that it traps when
|
|
183 // // loading storing a value.
|
|
184 // masm.cmp(32, ptr, 0);
|
|
185 // masm.add(64, result, heapBase, ptr, ARMv8Assembler.ExtendType.UXTX, alignment);
|
|
186 // masm.cmov(64, result, result, ARMv8.zr, ARMv8Assembler.ConditionFlag.NE);
|
|
187 // }
|
|
188
|
|
189 public static void decodeKlassPointer(AArch64MacroAssembler masm, Register result, Register ptr, Register klassBase, CompressEncoding encoding) {
|
|
190 // result = klassBase + ptr << shift
|
|
191 if (encoding.shift != 0 || encoding.base != 0) {
|
23349
|
192 // (shift != 0 -> shift == alignment)
|
|
193 assert (encoding.shift == 0 || encoding.shift == encoding.alignment) : "Decode algorithm is wrong: " + encoding;
|
23217
|
194 masm.add(64, result, klassBase, ptr, AArch64Assembler.ExtendType.UXTX, encoding.shift);
|
|
195 }
|
|
196 }
|
|
197
|
|
198 }
|