comparison graal/Compiler/src/com/sun/c1x/target/amd64/AMD64MacroAssembler.java @ 2507:9ec15d6914ca

Pull over of compiler from maxine repository.
author Thomas Wuerthinger <thomas@wuerthinger.net>
date Wed, 27 Apr 2011 11:43:22 +0200
parents
children
comparison
equal deleted inserted replaced
2506:4a3bf8a5bf41 2507:9ec15d6914ca
1 /*
2 * Copyright (c) 2009, 2011, Oracle and/or its affiliates. All rights reserved.
3 * DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER.
4 *
5 * This code is free software; you can redistribute it and/or modify it
6 * under the terms of the GNU General Public License version 2 only, as
7 * published by the Free Software Foundation.
8 *
9 * This code is distributed in the hope that it will be useful, but WITHOUT
10 * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
11 * FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License
12 * version 2 for more details (a copy is included in the LICENSE file that
13 * accompanied this code).
14 *
15 * You should have received a copy of the GNU General Public License version
16 * 2 along with this work; if not, write to the Free Software Foundation,
17 * Inc., 51 Franklin St, Fifth Floor, Boston, MA 02110-1301 USA.
18 *
19 * Please contact Oracle, 500 Oracle Parkway, Redwood Shores, CA 94065 USA
20 * or visit www.oracle.com if you need additional information or have any
21 * questions.
22 */
23 package com.sun.c1x.target.amd64;
24
25 import com.sun.c1x.*;
26 import com.sun.c1x.asm.*;
27 import com.sun.c1x.globalstub.*;
28 import com.sun.c1x.lir.*;
29 import com.sun.c1x.util.*;
30 import com.sun.cri.ci.*;
31 import com.sun.cri.ri.*;
32 import com.sun.cri.xir.*;
33
34 /**
35 * This class implements the AMD64-specific portion of the macro assembler.
36 *
37 * @author Thomas Wuerthinger
38 * @author Ben L. Titzer
39 */
40 public class AMD64MacroAssembler extends AMD64Assembler {
41
42 private final CiRegister rscratch1;
43
44 public static class WithCompiler extends AMD64MacroAssembler {
45
46 private final C1XCompiler compiler;
47
48 public WithCompiler(C1XCompiler compiler, RiRegisterConfig registerConfig) {
49 super(compiler.target, registerConfig);
50 this.compiler = compiler;
51 }
52
53 @Override
54 public GlobalStub lookupGlobalStub(XirTemplate template) {
55 return compiler.lookupGlobalStub(template);
56 }
57 }
58
59 public AMD64MacroAssembler(CiTarget target, RiRegisterConfig registerConfig) {
60 super(target, registerConfig);
61 this.rscratch1 = registerConfig.getScratchRegister();
62 }
63
64 /**
65 * Must be overridden if compiling code that makes calls to global stubs.
66 */
67 public GlobalStub lookupGlobalStub(XirTemplate template) {
68 throw new IllegalArgumentException("This assembler does not support compiling calls to global stubs");
69 }
70
71 public final int callGlobalStub(XirTemplate stub, LIRDebugInfo info, CiRegister result, CiValue... args) {
72 assert args.length == stub.parameters.length;
73 return callGlobalStubHelper(lookupGlobalStub(stub), stub.resultOperand.kind, info, result, args);
74 }
75
76 public final int callGlobalStub(GlobalStub stub, LIRDebugInfo info, CiRegister result, CiValue... args) {
77 assert args.length == stub.argOffsets.length;
78 return callGlobalStubHelper(stub, stub.resultKind, info, result, args);
79 }
80
81 private int callGlobalStubHelper(GlobalStub stub, CiKind resultKind, LIRDebugInfo info, CiRegister result, CiValue... args) {
82 for (int i = 0; i < args.length; i++) {
83 storeParameter(args[i], stub.argOffsets[i]);
84 }
85
86 int pos = directCall(stub.stubObject, info);
87
88 if (result != CiRegister.None) {
89 loadResult(result, stub.resultOffset, resultKind);
90 }
91
92 // Clear out parameters
93 if (C1XOptions.GenAssertionCode) {
94 for (int i = 0; i < args.length; i++) {
95 movptr(new CiAddress(CiKind.Word, AMD64.RSP, stub.argOffsets[i]), 0);
96 }
97 }
98 return pos;
99 }
100
101 private void loadResult(CiRegister r, int offset, CiKind kind) {
102 if (kind == CiKind.Int || kind == CiKind.Boolean) {
103 movl(r, new CiAddress(CiKind.Int, AMD64.RSP, offset));
104 } else if (kind == CiKind.Float) {
105 movss(r, new CiAddress(CiKind.Float, AMD64.RSP, offset));
106 } else if (kind == CiKind.Double) {
107 movsd(r, new CiAddress(CiKind.Double, AMD64.RSP, offset));
108 } else {
109 movq(r, new CiAddress(CiKind.Word, AMD64.RSP, offset));
110 }
111 }
112
113 private void storeParameter(CiValue registerOrConstant, int offset) {
114 CiKind k = registerOrConstant.kind;
115 if (registerOrConstant.isConstant()) {
116 CiConstant c = (CiConstant) registerOrConstant;
117 if (c.kind == CiKind.Object) {
118 movoop(new CiAddress(CiKind.Word, AMD64.RSP, offset), c);
119 } else {
120 movptr(new CiAddress(CiKind.Word, AMD64.RSP, offset), c.asInt());
121 }
122 } else if (registerOrConstant.isRegister()) {
123 if (k.isFloat()) {
124 movss(new CiAddress(CiKind.Float, AMD64.RSP, offset), registerOrConstant.asRegister());
125 } else if (k.isDouble()) {
126 movsd(new CiAddress(CiKind.Double, AMD64.RSP, offset), registerOrConstant.asRegister());
127 } else {
128 movq(new CiAddress(CiKind.Word, AMD64.RSP, offset), registerOrConstant.asRegister());
129 }
130 } else {
131 Util.shouldNotReachHere();
132 }
133 }
134
135 void movoop(CiRegister dst, CiConstant obj) {
136 assert obj.kind == CiKind.Object;
137 if (obj.isNull()) {
138 xorq(dst, dst);
139 } else {
140 if (target.inlineObjects) {
141 recordDataReferenceInCode(obj);
142 movq(dst, 0xDEADDEADDEADDEADL);
143 } else {
144 movq(dst, recordDataReferenceInCode(obj));
145 }
146 }
147 }
148
149 void movoop(CiAddress dst, CiConstant obj) {
150 movoop(rscratch1, obj);
151 movq(dst, rscratch1);
152 }
153
154 void mov64(CiAddress dst, long src) {
155 movq(rscratch1, src);
156 movq(dst, rscratch1);
157 }
158
159 void pushptr(CiAddress src) {
160 pushq(src);
161 }
162
163 void popptr(CiAddress src) {
164 popq(src);
165 }
166
167 void xorptr(CiRegister dst, CiRegister src) {
168 xorq(dst, src);
169 }
170
171 void xorptr(CiRegister dst, CiAddress src) {
172 xorq(dst, src);
173 }
174
175 // 64 bit versions
176
177 int correctedIdivq(CiRegister reg) {
178 // Full implementation of Java ldiv and lrem; checks for special
179 // case as described in JVM spec. : p.243 & p.271. The function
180 // returns the (pc) offset of the idivl instruction - may be needed
181 // for implicit exceptions.
182 //
183 // normal case special case
184 //
185 // input : X86Register.rax: dividend minLong
186 // reg: divisor (may not be eax/edx) -1
187 //
188 // output: X86Register.rax: quotient (= X86Register.rax idiv reg) minLong
189 // X86Register.rdx: remainder (= X86Register.rax irem reg) 0
190 assert reg != AMD64.rax && reg != AMD64.rdx : "reg cannot be X86Register.rax or X86Register.rdx register";
191 final long minLong = 0x8000000000000000L;
192 Label normalCase = new Label();
193 Label specialCase = new Label();
194
195 // check for special case
196 cmpq(AMD64.rax, recordDataReferenceInCode(CiConstant.forLong(minLong)));
197 jcc(AMD64Assembler.ConditionFlag.notEqual, normalCase);
198 xorl(AMD64.rdx, AMD64.rdx); // prepare X86Register.rdx for possible special case (where
199 // remainder = 0)
200 cmpq(reg, -1);
201 jcc(AMD64Assembler.ConditionFlag.equal, specialCase);
202
203 // handle normal case
204 bind(normalCase);
205 cdqq();
206 int idivqOffset = codeBuffer.position();
207 idivq(reg);
208
209 // normal and special case exit
210 bind(specialCase);
211
212 return idivqOffset;
213 }
214
215 void decrementq(CiRegister reg, int value) {
216 if (value == Integer.MIN_VALUE) {
217 subq(reg, value);
218 return;
219 }
220 if (value < 0) {
221 incrementq(reg, -value);
222 return;
223 }
224 if (value == 0) {
225 return;
226 }
227 if (value == 1 && C1XOptions.UseIncDec) {
228 decq(reg);
229 } else {
230 subq(reg, value);
231 }
232 }
233
234 void incrementq(CiRegister reg, int value) {
235 if (value == Integer.MIN_VALUE) {
236 addq(reg, value);
237 return;
238 }
239 if (value < 0) {
240 decrementq(reg, -value);
241 return;
242 }
243 if (value == 0) {
244 return;
245 }
246 if (value == 1 && C1XOptions.UseIncDec) {
247 incq(reg);
248 } else {
249 addq(reg, value);
250 }
251 }
252
253 // These are mostly for initializing null
254 void movptr(CiAddress dst, int src) {
255 movslq(dst, src);
256 }
257
258 void stop(String msg) {
259 if (C1XOptions.GenAssertionCode) {
260 // TODO: pass a pointer to the message
261 directCall(CiRuntimeCall.Debug, null);
262 hlt();
263 }
264 }
265
266 public final void cmp32(CiRegister src1, int imm) {
267 cmpl(src1, imm);
268 }
269
270 public final void cmp32(CiRegister src1, CiAddress src2) {
271 cmpl(src1, src2);
272 }
273
274 void cmpsd2int(CiRegister opr1, CiRegister opr2, CiRegister dst, boolean unorderedIsLess) {
275 assert opr1.isFpu() && opr2.isFpu();
276 ucomisd(opr1, opr2);
277
278 Label l = new Label();
279 if (unorderedIsLess) {
280 movl(dst, -1);
281 jcc(AMD64Assembler.ConditionFlag.parity, l);
282 jcc(AMD64Assembler.ConditionFlag.below, l);
283 movl(dst, 0);
284 jcc(AMD64Assembler.ConditionFlag.equal, l);
285 incrementl(dst, 1);
286 } else { // unordered is greater
287 movl(dst, 1);
288 jcc(AMD64Assembler.ConditionFlag.parity, l);
289 jcc(AMD64Assembler.ConditionFlag.above, l);
290 movl(dst, 0);
291 jcc(AMD64Assembler.ConditionFlag.equal, l);
292 decrementl(dst, 1);
293 }
294 bind(l);
295 }
296
297 void cmpss2int(CiRegister opr1, CiRegister opr2, CiRegister dst, boolean unorderedIsLess) {
298 assert opr1.isFpu();
299 assert opr2.isFpu();
300 ucomiss(opr1, opr2);
301
302 Label l = new Label();
303 if (unorderedIsLess) {
304 movl(dst, -1);
305 jcc(AMD64Assembler.ConditionFlag.parity, l);
306 jcc(AMD64Assembler.ConditionFlag.below, l);
307 movl(dst, 0);
308 jcc(AMD64Assembler.ConditionFlag.equal, l);
309 incrementl(dst, 1);
310 } else { // unordered is greater
311 movl(dst, 1);
312 jcc(AMD64Assembler.ConditionFlag.parity, l);
313 jcc(AMD64Assembler.ConditionFlag.above, l);
314 movl(dst, 0);
315 jcc(AMD64Assembler.ConditionFlag.equal, l);
316 decrementl(dst, 1);
317 }
318 bind(l);
319 }
320
321 void cmpptr(CiRegister src1, CiRegister src2) {
322 cmpq(src1, src2);
323 }
324
325 void cmpptr(CiRegister src1, CiAddress src2) {
326 cmpq(src1, src2);
327 }
328
329 void cmpptr(CiRegister src1, int src2) {
330 cmpq(src1, src2);
331 }
332
333 void cmpptr(CiAddress src1, int src2) {
334 cmpq(src1, src2);
335 }
336
337 void decrementl(CiRegister reg, int value) {
338 if (value == Integer.MIN_VALUE) {
339 subl(reg, value);
340 return;
341 }
342 if (value < 0) {
343 incrementl(reg, -value);
344 return;
345 }
346 if (value == 0) {
347 return;
348 }
349 if (value == 1 && C1XOptions.UseIncDec) {
350 decl(reg);
351 } else {
352 subl(reg, value);
353 }
354 }
355
356 void decrementl(CiAddress dst, int value) {
357 if (value == Integer.MIN_VALUE) {
358 subl(dst, value);
359 return;
360 }
361 if (value < 0) {
362 incrementl(dst, -value);
363 return;
364 }
365 if (value == 0) {
366 return;
367 }
368 if (value == 1 && C1XOptions.UseIncDec) {
369 decl(dst);
370 } else {
371 subl(dst, value);
372 }
373 }
374
375 void incrementl(CiRegister reg, int value) {
376 if (value == Integer.MIN_VALUE) {
377 addl(reg, value);
378 return;
379 }
380 if (value < 0) {
381 decrementl(reg, -value);
382 return;
383 }
384 if (value == 0) {
385 return;
386 }
387 if (value == 1 && C1XOptions.UseIncDec) {
388 incl(reg);
389 } else {
390 addl(reg, value);
391 }
392 }
393
394 void incrementl(CiAddress dst, int value) {
395 if (value == Integer.MIN_VALUE) {
396 addl(dst, value);
397 return;
398 }
399 if (value < 0) {
400 decrementl(dst, -value);
401 return;
402 }
403 if (value == 0) {
404 return;
405 }
406 if (value == 1 && C1XOptions.UseIncDec) {
407 incl(dst);
408 } else {
409 addl(dst, value);
410 }
411 }
412
413 void signExtendByte(CiRegister reg) {
414 if (reg.isByte()) {
415 movsxb(reg, reg); // movsxb
416 } else {
417 shll(reg, 24);
418 sarl(reg, 24);
419 }
420 }
421
422 void signExtendShort(CiRegister reg) {
423 movsxw(reg, reg); // movsxw
424 }
425
426 // Support optimal SSE move instructions.
427 void movflt(CiRegister dst, CiRegister src) {
428 assert dst.isFpu() && src.isFpu();
429 if (C1XOptions.UseXmmRegToRegMoveAll) {
430 movaps(dst, src);
431 } else {
432 movss(dst, src);
433 }
434 }
435
436 void movflt(CiRegister dst, CiAddress src) {
437 assert dst.isFpu();
438 movss(dst, src);
439 }
440
441 void movflt(CiAddress dst, CiRegister src) {
442 assert src.isFpu();
443 movss(dst, src);
444 }
445
446 void movdbl(CiRegister dst, CiRegister src) {
447 assert dst.isFpu() && src.isFpu();
448 if (C1XOptions.UseXmmRegToRegMoveAll) {
449 movapd(dst, src);
450 } else {
451 movsd(dst, src);
452 }
453 }
454
455 void movdbl(CiRegister dst, CiAddress src) {
456 assert dst.isFpu();
457 if (C1XOptions.UseXmmLoadAndClearUpper) {
458 movsd(dst, src);
459 } else {
460 movlpd(dst, src);
461 }
462 }
463
464 void xchgptr(CiRegister src1, CiRegister src2) {
465 xchgq(src1, src2);
466 }
467
468 public void shouldNotReachHere() {
469 stop("should not reach here");
470 }
471
472 public void enter(short imm16, byte imm8) {
473 emitByte(0xC8);
474 // appended:
475 emitByte(imm16 & 0xff);
476 imm16 >>= 8;
477 emitByte(imm16 & 0xff);
478 emitByte(imm8);
479 }
480
481 /**
482 * Emit code to save a given set of callee save registers to the
483 * {@linkplain CiCalleeSaveArea CSA} within the frame.
484 * @param csa the description of the CSA
485 * @param frameToCSA offset from the frame pointer to the CSA
486 */
487 public void save(CiCalleeSaveArea csa, int frameToCSA) {
488 CiRegisterValue frame = frameRegister.asValue();
489 for (CiRegister r : csa.registers) {
490 int offset = csa.offsetOf(r);
491 movq(new CiAddress(CiKind.Word, frame, frameToCSA + offset), r);
492 }
493 }
494
495 public void restore(CiCalleeSaveArea csa, int frameToCSA) {
496 CiRegisterValue frame = frameRegister.asValue();
497 for (CiRegister r : csa.registers) {
498 int offset = csa.offsetOf(r);
499 movq(r, new CiAddress(CiKind.Word, frame, frameToCSA + offset));
500 }
501 }
502
503 public void int3() {
504 emitByte(0xCC);
505 }
506 }