comparison graal/com.oracle.jvmci.asm.amd64/src/com/oracle/jvmci/asm/amd64/AMD64MacroAssembler.java @ 21708:6df25b1418be

moved com.oracle.asm.** to jvmci-util.jar (JBS:GRAAL-53)
author Doug Simon <doug.simon@oracle.com>
date Wed, 03 Jun 2015 18:06:44 +0200
parents graal/com.oracle.graal.asm.amd64/src/com/oracle/graal/asm/amd64/AMD64MacroAssembler.java@5024c80224c7
children
comparison
equal deleted inserted replaced
21707:e0f311284930 21708:6df25b1418be
1 /*
2 * Copyright (c) 2009, 2015, Oracle and/or its affiliates. All rights reserved.
3 * DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER.
4 *
5 * This code is free software; you can redistribute it and/or modify it
6 * under the terms of the GNU General Public License version 2 only, as
7 * published by the Free Software Foundation.
8 *
9 * This code is distributed in the hope that it will be useful, but WITHOUT
10 * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
11 * FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License
12 * version 2 for more details (a copy is included in the LICENSE file that
13 * accompanied this code).
14 *
15 * You should have received a copy of the GNU General Public License version
16 * 2 along with this work; if not, write to the Free Software Foundation,
17 * Inc., 51 Franklin St, Fifth Floor, Boston, MA 02110-1301 USA.
18 *
19 * Please contact Oracle, 500 Oracle Parkway, Redwood Shores, CA 94065 USA
20 * or visit www.oracle.com if you need additional information or have any
21 * questions.
22 */
23 package com.oracle.jvmci.asm.amd64;
24
25 import com.oracle.jvmci.amd64.*;
26 import com.oracle.jvmci.asm.*;
27 import com.oracle.jvmci.code.Register;
28 import com.oracle.jvmci.code.CalleeSaveLayout;
29 import com.oracle.jvmci.code.TargetDescription;
30 import com.oracle.jvmci.code.RegisterConfig;
31 import com.oracle.jvmci.meta.Kind;
32
33 import static com.oracle.jvmci.asm.amd64.AMD64AsmOptions.*;
34
35 /**
36 * This class implements commonly used X86 code patterns.
37 */
38 public class AMD64MacroAssembler extends AMD64Assembler {
39
40 public AMD64MacroAssembler(TargetDescription target, RegisterConfig registerConfig) {
41 super(target, registerConfig);
42 }
43
44 public final void decrementq(Register reg, int value) {
45 if (value == Integer.MIN_VALUE) {
46 subq(reg, value);
47 return;
48 }
49 if (value < 0) {
50 incrementq(reg, -value);
51 return;
52 }
53 if (value == 0) {
54 return;
55 }
56 if (value == 1 && UseIncDec) {
57 decq(reg);
58 } else {
59 subq(reg, value);
60 }
61 }
62
63 public final void decrementq(AMD64Address dst, int value) {
64 if (value == Integer.MIN_VALUE) {
65 subq(dst, value);
66 return;
67 }
68 if (value < 0) {
69 incrementq(dst, -value);
70 return;
71 }
72 if (value == 0) {
73 return;
74 }
75 if (value == 1 && UseIncDec) {
76 decq(dst);
77 } else {
78 subq(dst, value);
79 }
80 }
81
82 public void incrementq(Register reg, int value) {
83 if (value == Integer.MIN_VALUE) {
84 addq(reg, value);
85 return;
86 }
87 if (value < 0) {
88 decrementq(reg, -value);
89 return;
90 }
91 if (value == 0) {
92 return;
93 }
94 if (value == 1 && UseIncDec) {
95 incq(reg);
96 } else {
97 addq(reg, value);
98 }
99 }
100
101 public final void incrementq(AMD64Address dst, int value) {
102 if (value == Integer.MIN_VALUE) {
103 addq(dst, value);
104 return;
105 }
106 if (value < 0) {
107 decrementq(dst, -value);
108 return;
109 }
110 if (value == 0) {
111 return;
112 }
113 if (value == 1 && UseIncDec) {
114 incq(dst);
115 } else {
116 addq(dst, value);
117 }
118 }
119
120 public final void movptr(Register dst, AMD64Address src) {
121 movq(dst, src);
122 }
123
124 public final void movptr(AMD64Address dst, Register src) {
125 movq(dst, src);
126 }
127
128 public final void movptr(AMD64Address dst, int src) {
129 movslq(dst, src);
130 }
131
132 public final void cmpptr(Register src1, Register src2) {
133 cmpq(src1, src2);
134 }
135
136 public final void cmpptr(Register src1, AMD64Address src2) {
137 cmpq(src1, src2);
138 }
139
140 public final void decrementl(Register reg, int value) {
141 if (value == Integer.MIN_VALUE) {
142 subl(reg, value);
143 return;
144 }
145 if (value < 0) {
146 incrementl(reg, -value);
147 return;
148 }
149 if (value == 0) {
150 return;
151 }
152 if (value == 1 && UseIncDec) {
153 decl(reg);
154 } else {
155 subl(reg, value);
156 }
157 }
158
159 public final void decrementl(AMD64Address dst, int value) {
160 if (value == Integer.MIN_VALUE) {
161 subl(dst, value);
162 return;
163 }
164 if (value < 0) {
165 incrementl(dst, -value);
166 return;
167 }
168 if (value == 0) {
169 return;
170 }
171 if (value == 1 && UseIncDec) {
172 decl(dst);
173 } else {
174 subl(dst, value);
175 }
176 }
177
178 public final void incrementl(Register reg, int value) {
179 if (value == Integer.MIN_VALUE) {
180 addl(reg, value);
181 return;
182 }
183 if (value < 0) {
184 decrementl(reg, -value);
185 return;
186 }
187 if (value == 0) {
188 return;
189 }
190 if (value == 1 && UseIncDec) {
191 incl(reg);
192 } else {
193 addl(reg, value);
194 }
195 }
196
197 public final void incrementl(AMD64Address dst, int value) {
198 if (value == Integer.MIN_VALUE) {
199 addl(dst, value);
200 return;
201 }
202 if (value < 0) {
203 decrementl(dst, -value);
204 return;
205 }
206 if (value == 0) {
207 return;
208 }
209 if (value == 1 && UseIncDec) {
210 incl(dst);
211 } else {
212 addl(dst, value);
213 }
214 }
215
216 public void movflt(Register dst, Register src) {
217 assert dst.getRegisterCategory().equals(AMD64.XMM) && src.getRegisterCategory().equals(AMD64.XMM);
218 if (UseXmmRegToRegMoveAll) {
219 movaps(dst, src);
220 } else {
221 movss(dst, src);
222 }
223 }
224
225 public void movflt(Register dst, AMD64Address src) {
226 assert dst.getRegisterCategory().equals(AMD64.XMM);
227 movss(dst, src);
228 }
229
230 public void movflt(AMD64Address dst, Register src) {
231 assert src.getRegisterCategory().equals(AMD64.XMM);
232 movss(dst, src);
233 }
234
235 public void movdbl(Register dst, Register src) {
236 assert dst.getRegisterCategory().equals(AMD64.XMM) && src.getRegisterCategory().equals(AMD64.XMM);
237 if (UseXmmRegToRegMoveAll) {
238 movapd(dst, src);
239 } else {
240 movsd(dst, src);
241 }
242 }
243
244 public void movdbl(Register dst, AMD64Address src) {
245 assert dst.getRegisterCategory().equals(AMD64.XMM);
246 if (UseXmmLoadAndClearUpper) {
247 movsd(dst, src);
248 } else {
249 movlpd(dst, src);
250 }
251 }
252
253 public void movdbl(AMD64Address dst, Register src) {
254 assert src.getRegisterCategory().equals(AMD64.XMM);
255 movsd(dst, src);
256 }
257
258 /**
259 * Non-atomic write of a 64-bit constant to memory. Do not use if the address might be a
260 * volatile field!
261 */
262 public final void movlong(AMD64Address dst, long src) {
263 if (NumUtil.isInt(src)) {
264 AMD64MIOp.MOV.emit(this, OperandSize.QWORD, dst, (int) src);
265 } else {
266 AMD64Address high = new AMD64Address(dst.getBase(), dst.getIndex(), dst.getScale(), dst.getDisplacement() + 4);
267 movl(dst, (int) (src & 0xFFFFFFFF));
268 movl(high, (int) (src >> 32));
269 }
270
271 }
272
273 public final void flog(Register dest, Register value, boolean base10) {
274 if (base10) {
275 fldlg2();
276 } else {
277 fldln2();
278 }
279 AMD64Address tmp = trigPrologue(value);
280 fyl2x();
281 trigEpilogue(dest, tmp);
282 }
283
284 public final void fsin(Register dest, Register value) {
285 AMD64Address tmp = trigPrologue(value);
286 fsin();
287 trigEpilogue(dest, tmp);
288 }
289
290 public final void fcos(Register dest, Register value) {
291 AMD64Address tmp = trigPrologue(value);
292 fcos();
293 trigEpilogue(dest, tmp);
294 }
295
296 public final void ftan(Register dest, Register value) {
297 AMD64Address tmp = trigPrologue(value);
298 fptan();
299 fstp(0); // ftan pushes 1.0 in addition to the actual result, pop
300 trigEpilogue(dest, tmp);
301 }
302
303 public final void fpop() {
304 ffree(0);
305 fincstp();
306 }
307
308 private AMD64Address trigPrologue(Register value) {
309 assert value.getRegisterCategory().equals(AMD64.XMM);
310 AMD64Address tmp = new AMD64Address(AMD64.rsp);
311 subq(AMD64.rsp, target.getSizeInBytes(Kind.Double));
312 movdbl(tmp, value);
313 fldd(tmp);
314 return tmp;
315 }
316
317 private void trigEpilogue(Register dest, AMD64Address tmp) {
318 assert dest.getRegisterCategory().equals(AMD64.XMM);
319 fstpd(tmp);
320 movdbl(dest, tmp);
321 addq(AMD64.rsp, target.getSizeInBytes(Kind.Double));
322 }
323
324 /**
325 * Emit code to save a given set of callee save registers in the {@linkplain CalleeSaveLayout
326 * CSA} within the frame.
327 *
328 * @param csl the description of the CSA
329 * @param frameToCSA offset from the frame pointer to the CSA
330 */
331 public final void save(CalleeSaveLayout csl, int frameToCSA) {
332 for (Register r : csl.registers) {
333 int offset = csl.offsetOf(r);
334 movq(new AMD64Address(frameRegister, frameToCSA + offset), r);
335 }
336 }
337
338 public final void restore(CalleeSaveLayout csl, int frameToCSA) {
339 for (Register r : csl.registers) {
340 int offset = csl.offsetOf(r);
341 movq(r, new AMD64Address(frameRegister, frameToCSA + offset));
342 }
343 }
344 }