001/* 002 * Copyright (c) 2009, 2015, Oracle and/or its affiliates. All rights reserved. 003 * DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER. 004 * 005 * This code is free software; you can redistribute it and/or modify it 006 * under the terms of the GNU General Public License version 2 only, as 007 * published by the Free Software Foundation. 008 * 009 * This code is distributed in the hope that it will be useful, but WITHOUT 010 * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or 011 * FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License 012 * version 2 for more details (a copy is included in the LICENSE file that 013 * accompanied this code). 014 * 015 * You should have received a copy of the GNU General Public License version 016 * 2 along with this work; if not, write to the Free Software Foundation, 017 * Inc., 51 Franklin St, Fifth Floor, Boston, MA 02110-1301 USA. 018 * 019 * Please contact Oracle, 500 Oracle Parkway, Redwood Shores, CA 94065 USA 020 * or visit www.oracle.com if you need additional information or have any 021 * questions. 022 */ 023package com.oracle.graal.asm.amd64; 024 025import jdk.internal.jvmci.amd64.*; 026import jdk.internal.jvmci.code.*; 027import jdk.internal.jvmci.meta.*; 028 029import com.oracle.graal.asm.*; 030 031import static com.oracle.graal.asm.amd64.AMD64AsmOptions.*; 032 033/** 034 * This class implements commonly used X86 code patterns. 035 */ 036public class AMD64MacroAssembler extends AMD64Assembler { 037 038 public AMD64MacroAssembler(TargetDescription target, RegisterConfig registerConfig) { 039 super(target, registerConfig); 040 } 041 042 public final void decrementq(Register reg, int value) { 043 if (value == Integer.MIN_VALUE) { 044 subq(reg, value); 045 return; 046 } 047 if (value < 0) { 048 incrementq(reg, -value); 049 return; 050 } 051 if (value == 0) { 052 return; 053 } 054 if (value == 1 && UseIncDec) { 055 decq(reg); 056 } else { 057 subq(reg, value); 058 } 059 } 060 061 public final void decrementq(AMD64Address dst, int value) { 062 if (value == Integer.MIN_VALUE) { 063 subq(dst, value); 064 return; 065 } 066 if (value < 0) { 067 incrementq(dst, -value); 068 return; 069 } 070 if (value == 0) { 071 return; 072 } 073 if (value == 1 && UseIncDec) { 074 decq(dst); 075 } else { 076 subq(dst, value); 077 } 078 } 079 080 public void incrementq(Register reg, int value) { 081 if (value == Integer.MIN_VALUE) { 082 addq(reg, value); 083 return; 084 } 085 if (value < 0) { 086 decrementq(reg, -value); 087 return; 088 } 089 if (value == 0) { 090 return; 091 } 092 if (value == 1 && UseIncDec) { 093 incq(reg); 094 } else { 095 addq(reg, value); 096 } 097 } 098 099 public final void incrementq(AMD64Address dst, int value) { 100 if (value == Integer.MIN_VALUE) { 101 addq(dst, value); 102 return; 103 } 104 if (value < 0) { 105 decrementq(dst, -value); 106 return; 107 } 108 if (value == 0) { 109 return; 110 } 111 if (value == 1 && UseIncDec) { 112 incq(dst); 113 } else { 114 addq(dst, value); 115 } 116 } 117 118 public final void movptr(Register dst, AMD64Address src) { 119 movq(dst, src); 120 } 121 122 public final void movptr(AMD64Address dst, Register src) { 123 movq(dst, src); 124 } 125 126 public final void movptr(AMD64Address dst, int src) { 127 movslq(dst, src); 128 } 129 130 public final void cmpptr(Register src1, Register src2) { 131 cmpq(src1, src2); 132 } 133 134 public final void cmpptr(Register src1, AMD64Address src2) { 135 cmpq(src1, src2); 136 } 137 138 public final void decrementl(Register reg, int value) { 139 if (value == Integer.MIN_VALUE) { 140 subl(reg, value); 141 return; 142 } 143 if (value < 0) { 144 incrementl(reg, -value); 145 return; 146 } 147 if (value == 0) { 148 return; 149 } 150 if (value == 1 && UseIncDec) { 151 decl(reg); 152 } else { 153 subl(reg, value); 154 } 155 } 156 157 public final void decrementl(AMD64Address dst, int value) { 158 if (value == Integer.MIN_VALUE) { 159 subl(dst, value); 160 return; 161 } 162 if (value < 0) { 163 incrementl(dst, -value); 164 return; 165 } 166 if (value == 0) { 167 return; 168 } 169 if (value == 1 && UseIncDec) { 170 decl(dst); 171 } else { 172 subl(dst, value); 173 } 174 } 175 176 public final void incrementl(Register reg, int value) { 177 if (value == Integer.MIN_VALUE) { 178 addl(reg, value); 179 return; 180 } 181 if (value < 0) { 182 decrementl(reg, -value); 183 return; 184 } 185 if (value == 0) { 186 return; 187 } 188 if (value == 1 && UseIncDec) { 189 incl(reg); 190 } else { 191 addl(reg, value); 192 } 193 } 194 195 public final void incrementl(AMD64Address dst, int value) { 196 if (value == Integer.MIN_VALUE) { 197 addl(dst, value); 198 return; 199 } 200 if (value < 0) { 201 decrementl(dst, -value); 202 return; 203 } 204 if (value == 0) { 205 return; 206 } 207 if (value == 1 && UseIncDec) { 208 incl(dst); 209 } else { 210 addl(dst, value); 211 } 212 } 213 214 public void movflt(Register dst, Register src) { 215 assert dst.getRegisterCategory().equals(AMD64.XMM) && src.getRegisterCategory().equals(AMD64.XMM); 216 if (UseXmmRegToRegMoveAll) { 217 movaps(dst, src); 218 } else { 219 movss(dst, src); 220 } 221 } 222 223 public void movflt(Register dst, AMD64Address src) { 224 assert dst.getRegisterCategory().equals(AMD64.XMM); 225 movss(dst, src); 226 } 227 228 public void movflt(AMD64Address dst, Register src) { 229 assert src.getRegisterCategory().equals(AMD64.XMM); 230 movss(dst, src); 231 } 232 233 public void movdbl(Register dst, Register src) { 234 assert dst.getRegisterCategory().equals(AMD64.XMM) && src.getRegisterCategory().equals(AMD64.XMM); 235 if (UseXmmRegToRegMoveAll) { 236 movapd(dst, src); 237 } else { 238 movsd(dst, src); 239 } 240 } 241 242 public void movdbl(Register dst, AMD64Address src) { 243 assert dst.getRegisterCategory().equals(AMD64.XMM); 244 if (UseXmmLoadAndClearUpper) { 245 movsd(dst, src); 246 } else { 247 movlpd(dst, src); 248 } 249 } 250 251 public void movdbl(AMD64Address dst, Register src) { 252 assert src.getRegisterCategory().equals(AMD64.XMM); 253 movsd(dst, src); 254 } 255 256 /** 257 * Non-atomic write of a 64-bit constant to memory. Do not use if the address might be a 258 * volatile field! 259 */ 260 public final void movlong(AMD64Address dst, long src) { 261 if (NumUtil.isInt(src)) { 262 AMD64MIOp.MOV.emit(this, OperandSize.QWORD, dst, (int) src); 263 } else { 264 AMD64Address high = new AMD64Address(dst.getBase(), dst.getIndex(), dst.getScale(), dst.getDisplacement() + 4); 265 movl(dst, (int) (src & 0xFFFFFFFF)); 266 movl(high, (int) (src >> 32)); 267 } 268 269 } 270 271 public final void flog(Register dest, Register value, boolean base10) { 272 if (base10) { 273 fldlg2(); 274 } else { 275 fldln2(); 276 } 277 AMD64Address tmp = trigPrologue(value); 278 fyl2x(); 279 trigEpilogue(dest, tmp); 280 } 281 282 public final void fsin(Register dest, Register value) { 283 AMD64Address tmp = trigPrologue(value); 284 fsin(); 285 trigEpilogue(dest, tmp); 286 } 287 288 public final void fcos(Register dest, Register value) { 289 AMD64Address tmp = trigPrologue(value); 290 fcos(); 291 trigEpilogue(dest, tmp); 292 } 293 294 public final void ftan(Register dest, Register value) { 295 AMD64Address tmp = trigPrologue(value); 296 fptan(); 297 fstp(0); // ftan pushes 1.0 in addition to the actual result, pop 298 trigEpilogue(dest, tmp); 299 } 300 301 public final void fpop() { 302 ffree(0); 303 fincstp(); 304 } 305 306 private AMD64Address trigPrologue(Register value) { 307 assert value.getRegisterCategory().equals(AMD64.XMM); 308 AMD64Address tmp = new AMD64Address(AMD64.rsp); 309 subq(AMD64.rsp, target.getSizeInBytes(Kind.Double)); 310 movdbl(tmp, value); 311 fldd(tmp); 312 return tmp; 313 } 314 315 private void trigEpilogue(Register dest, AMD64Address tmp) { 316 assert dest.getRegisterCategory().equals(AMD64.XMM); 317 fstpd(tmp); 318 movdbl(dest, tmp); 319 addq(AMD64.rsp, target.getSizeInBytes(Kind.Double)); 320 } 321 322 /** 323 * Emit code to save a given set of callee save registers in the {@linkplain CalleeSaveLayout 324 * CSA} within the frame. 325 * 326 * @param csl the description of the CSA 327 * @param frameToCSA offset from the frame pointer to the CSA 328 */ 329 public final void save(CalleeSaveLayout csl, int frameToCSA) { 330 for (Register r : csl.registers) { 331 int offset = csl.offsetOf(r); 332 movq(new AMD64Address(frameRegister, frameToCSA + offset), r); 333 } 334 } 335 336 public final void restore(CalleeSaveLayout csl, int frameToCSA) { 337 for (Register r : csl.registers) { 338 int offset = csl.offsetOf(r); 339 movq(r, new AMD64Address(frameRegister, frameToCSA + offset)); 340 } 341 } 342}