comparison graal/com.oracle.max.asm/src/com/oracle/max/asm/target/amd64/AMD64MacroAssembler.java @ 3733:e233f5660da4

Added Java files from Maxine project.
author Thomas Wuerthinger <thomas.wuerthinger@oracle.com>
date Sat, 17 Dec 2011 19:59:18 +0100
parents
children aaac4894175c
comparison
equal deleted inserted replaced
3732:3e2e8b8abdaf 3733:e233f5660da4
1 /*
2 * Copyright (c) 2009, 2011, Oracle and/or its affiliates. All rights reserved.
3 * DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER.
4 *
5 * This code is free software; you can redistribute it and/or modify it
6 * under the terms of the GNU General Public License version 2 only, as
7 * published by the Free Software Foundation.
8 *
9 * This code is distributed in the hope that it will be useful, but WITHOUT
10 * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
11 * FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License
12 * version 2 for more details (a copy is included in the LICENSE file that
13 * accompanied this code).
14 *
15 * You should have received a copy of the GNU General Public License version
16 * 2 along with this work; if not, write to the Free Software Foundation,
17 * Inc., 51 Franklin St, Fifth Floor, Boston, MA 02110-1301 USA.
18 *
19 * Please contact Oracle, 500 Oracle Parkway, Redwood Shores, CA 94065 USA
20 * or visit www.oracle.com if you need additional information or have any
21 * questions.
22 */
23 package com.oracle.max.asm.target.amd64;
24
25 import com.oracle.max.asm.*;
26 import com.sun.cri.ci.*;
27 import com.sun.cri.ri.*;
28
29 /**
30 * This class implements commonly used X86 code patterns.
31 */
32 public class AMD64MacroAssembler extends AMD64Assembler {
33
34 public AMD64MacroAssembler(CiTarget target, RiRegisterConfig registerConfig) {
35 super(target, registerConfig);
36 }
37
38 public void pushptr(CiAddress src) {
39 pushq(src);
40 }
41
42 public void popptr(CiAddress src) {
43 popq(src);
44 }
45
46 public void xorptr(CiRegister dst, CiRegister src) {
47 xorq(dst, src);
48 }
49
50 public void xorptr(CiRegister dst, CiAddress src) {
51 xorq(dst, src);
52 }
53
54 // 64 bit versions
55
56
57 public void decrementq(CiRegister reg, int value) {
58 if (value == Integer.MIN_VALUE) {
59 subq(reg, value);
60 return;
61 }
62 if (value < 0) {
63 incrementq(reg, -value);
64 return;
65 }
66 if (value == 0) {
67 return;
68 }
69 if (value == 1 && AsmOptions.UseIncDec) {
70 decq(reg);
71 } else {
72 subq(reg, value);
73 }
74 }
75
76 public void incrementq(CiRegister reg, int value) {
77 if (value == Integer.MIN_VALUE) {
78 addq(reg, value);
79 return;
80 }
81 if (value < 0) {
82 decrementq(reg, -value);
83 return;
84 }
85 if (value == 0) {
86 return;
87 }
88 if (value == 1 && AsmOptions.UseIncDec) {
89 incq(reg);
90 } else {
91 addq(reg, value);
92 }
93 }
94
95 // These are mostly for initializing null
96 public void movptr(CiAddress dst, int src) {
97 movslq(dst, src);
98 }
99
100 public final void cmp32(CiRegister src1, int imm) {
101 cmpl(src1, imm);
102 }
103
104 public final void cmp32(CiRegister src1, CiAddress src2) {
105 cmpl(src1, src2);
106 }
107
108 public void cmpsd2int(CiRegister opr1, CiRegister opr2, CiRegister dst, boolean unorderedIsLess) {
109 assert opr1.isFpu() && opr2.isFpu();
110 ucomisd(opr1, opr2);
111
112 Label l = new Label();
113 if (unorderedIsLess) {
114 movl(dst, -1);
115 jcc(AMD64Assembler.ConditionFlag.parity, l);
116 jcc(AMD64Assembler.ConditionFlag.below, l);
117 movl(dst, 0);
118 jcc(AMD64Assembler.ConditionFlag.equal, l);
119 incrementl(dst, 1);
120 } else { // unordered is greater
121 movl(dst, 1);
122 jcc(AMD64Assembler.ConditionFlag.parity, l);
123 jcc(AMD64Assembler.ConditionFlag.above, l);
124 movl(dst, 0);
125 jcc(AMD64Assembler.ConditionFlag.equal, l);
126 decrementl(dst, 1);
127 }
128 bind(l);
129 }
130
131 public void cmpss2int(CiRegister opr1, CiRegister opr2, CiRegister dst, boolean unorderedIsLess) {
132 assert opr1.isFpu();
133 assert opr2.isFpu();
134 ucomiss(opr1, opr2);
135
136 Label l = new Label();
137 if (unorderedIsLess) {
138 movl(dst, -1);
139 jcc(AMD64Assembler.ConditionFlag.parity, l);
140 jcc(AMD64Assembler.ConditionFlag.below, l);
141 movl(dst, 0);
142 jcc(AMD64Assembler.ConditionFlag.equal, l);
143 incrementl(dst, 1);
144 } else { // unordered is greater
145 movl(dst, 1);
146 jcc(AMD64Assembler.ConditionFlag.parity, l);
147 jcc(AMD64Assembler.ConditionFlag.above, l);
148 movl(dst, 0);
149 jcc(AMD64Assembler.ConditionFlag.equal, l);
150 decrementl(dst, 1);
151 }
152 bind(l);
153 }
154
155 public void cmpptr(CiRegister src1, CiRegister src2) {
156 cmpq(src1, src2);
157 }
158
159 public void cmpptr(CiRegister src1, CiAddress src2) {
160 cmpq(src1, src2);
161 }
162
163 public void cmpptr(CiRegister src1, int src2) {
164 cmpq(src1, src2);
165 }
166
167 public void cmpptr(CiAddress src1, int src2) {
168 cmpq(src1, src2);
169 }
170
171 public void decrementl(CiRegister reg, int value) {
172 if (value == Integer.MIN_VALUE) {
173 subl(reg, value);
174 return;
175 }
176 if (value < 0) {
177 incrementl(reg, -value);
178 return;
179 }
180 if (value == 0) {
181 return;
182 }
183 if (value == 1 && AsmOptions.UseIncDec) {
184 decl(reg);
185 } else {
186 subl(reg, value);
187 }
188 }
189
190 public void decrementl(CiAddress dst, int value) {
191 if (value == Integer.MIN_VALUE) {
192 subl(dst, value);
193 return;
194 }
195 if (value < 0) {
196 incrementl(dst, -value);
197 return;
198 }
199 if (value == 0) {
200 return;
201 }
202 if (value == 1 && AsmOptions.UseIncDec) {
203 decl(dst);
204 } else {
205 subl(dst, value);
206 }
207 }
208
209 public void incrementl(CiRegister reg, int value) {
210 if (value == Integer.MIN_VALUE) {
211 addl(reg, value);
212 return;
213 }
214 if (value < 0) {
215 decrementl(reg, -value);
216 return;
217 }
218 if (value == 0) {
219 return;
220 }
221 if (value == 1 && AsmOptions.UseIncDec) {
222 incl(reg);
223 } else {
224 addl(reg, value);
225 }
226 }
227
228 public void incrementl(CiAddress dst, int value) {
229 if (value == Integer.MIN_VALUE) {
230 addl(dst, value);
231 return;
232 }
233 if (value < 0) {
234 decrementl(dst, -value);
235 return;
236 }
237 if (value == 0) {
238 return;
239 }
240 if (value == 1 && AsmOptions.UseIncDec) {
241 incl(dst);
242 } else {
243 addl(dst, value);
244 }
245 }
246
247 public void signExtendByte(CiRegister reg) {
248 if (reg.isByte()) {
249 movsxb(reg, reg); // movsxb
250 } else {
251 shll(reg, 24);
252 sarl(reg, 24);
253 }
254 }
255
256 public void signExtendShort(CiRegister reg) {
257 movsxw(reg, reg); // movsxw
258 }
259
260 // Support optimal SSE move instructions.
261 public void movflt(CiRegister dst, CiRegister src) {
262 assert dst.isFpu() && src.isFpu();
263 if (AsmOptions.UseXmmRegToRegMoveAll) {
264 movaps(dst, src);
265 } else {
266 movss(dst, src);
267 }
268 }
269
270 public void movflt(CiRegister dst, CiAddress src) {
271 assert dst.isFpu();
272 movss(dst, src);
273 }
274
275 public void movflt(CiAddress dst, CiRegister src) {
276 assert src.isFpu();
277 movss(dst, src);
278 }
279
280 public void movdbl(CiRegister dst, CiRegister src) {
281 assert dst.isFpu() && src.isFpu();
282 if (AsmOptions.UseXmmRegToRegMoveAll) {
283 movapd(dst, src);
284 } else {
285 movsd(dst, src);
286 }
287 }
288
289 public void movdbl(CiRegister dst, CiAddress src) {
290 assert dst.isFpu();
291 if (AsmOptions.UseXmmLoadAndClearUpper) {
292 movsd(dst, src);
293 } else {
294 movlpd(dst, src);
295 }
296 }
297
298 public void movdbl(CiAddress dst, CiRegister src) {
299 assert src.isFpu();
300 movsd(dst, src);
301 }
302
303 /**
304 * Non-atomic write of a 64-bit constant to memory. Do not use
305 * if the address might be a volatile field!
306 */
307 public void movlong(CiAddress dst, long src) {
308 CiAddress high = new CiAddress(dst.kind, dst.base, dst.index, dst.scale, dst.displacement + 4);
309 movl(dst, (int) (src & 0xFFFFFFFF));
310 movl(high, (int) (src >> 32));
311 }
312
313 public void xchgptr(CiRegister src1, CiRegister src2) {
314 xchgq(src1, src2);
315 }
316
317 public void flog(CiRegister dest, CiRegister value, boolean base10) {
318 assert value.spillSlotSize == dest.spillSlotSize;
319
320 CiAddress tmp = new CiAddress(CiKind.Double, AMD64.RSP);
321 if (base10) {
322 fldlg2();
323 } else {
324 fldln2();
325 }
326 subq(AMD64.rsp, value.spillSlotSize);
327 movsd(tmp, value);
328 fld(tmp);
329 fyl2x();
330 fstp(tmp);
331 movsd(dest, tmp);
332 addq(AMD64.rsp, dest.spillSlotSize);
333 }
334
335 public void fsin(CiRegister dest, CiRegister value) {
336 ftrig(dest, value, 's');
337 }
338
339 public void fcos(CiRegister dest, CiRegister value) {
340 ftrig(dest, value, 'c');
341 }
342
343 public void ftan(CiRegister dest, CiRegister value) {
344 ftrig(dest, value, 't');
345 }
346
347 private void ftrig(CiRegister dest, CiRegister value, char op) {
348 assert value.spillSlotSize == dest.spillSlotSize;
349
350 CiAddress tmp = new CiAddress(CiKind.Double, AMD64.RSP);
351 subq(AMD64.rsp, value.spillSlotSize);
352 movsd(tmp, value);
353 fld(tmp);
354 if (op == 's') {
355 fsin();
356 } else if (op == 'c') {
357 fcos();
358 } else if (op == 't') {
359 fptan();
360 fstp(0); // ftan pushes 1.0 in addition to the actual result, pop
361 } else {
362 throw new InternalError("should not reach here");
363 }
364 fstp(tmp);
365 movsd(dest, tmp);
366 addq(AMD64.rsp, dest.spillSlotSize);
367 }
368
369 /**
370 * Emit code to save a given set of callee save registers in the
371 * {@linkplain CiCalleeSaveLayout CSA} within the frame.
372 * @param csl the description of the CSA
373 * @param frameToCSA offset from the frame pointer to the CSA
374 */
375 public void save(CiCalleeSaveLayout csl, int frameToCSA) {
376 CiRegisterValue frame = frameRegister.asValue();
377 for (CiRegister r : csl.registers) {
378 int offset = csl.offsetOf(r);
379 movq(new CiAddress(target.wordKind, frame, frameToCSA + offset), r);
380 }
381 }
382
383 public void restore(CiCalleeSaveLayout csl, int frameToCSA) {
384 CiRegisterValue frame = frameRegister.asValue();
385 for (CiRegister r : csl.registers) {
386 int offset = csl.offsetOf(r);
387 movq(r, new CiAddress(target.wordKind, frame, frameToCSA + offset));
388 }
389 }
390 }