001/* 002 * Copyright (c) 2013, 2015, Oracle and/or its affiliates. All rights reserved. 003 * DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER. 004 * 005 * This code is free software; you can redistribute it and/or modify it 006 * under the terms of the GNU General Public License version 2 only, as 007 * published by the Free Software Foundation. 008 * 009 * This code is distributed in the hope that it will be useful, but WITHOUT 010 * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or 011 * FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License 012 * version 2 for more details (a copy is included in the LICENSE file that 013 * accompanied this code). 014 * 015 * You should have received a copy of the GNU General Public License version 016 * 2 along with this work; if not, write to the Free Software Foundation, 017 * Inc., 51 Franklin St, Fifth Floor, Boston, MA 02110-1301 USA. 018 * 019 * Please contact Oracle, 500 Oracle Parkway, Redwood Shores, CA 94065 USA 020 * or visit www.oracle.com if you need additional information or have any 021 * questions. 022 */ 023package com.oracle.graal.hotspot.amd64; 024 025import static com.oracle.graal.lir.LIRInstruction.OperandFlag.*; 026import static jdk.internal.jvmci.code.ValueUtil.*; 027import jdk.internal.jvmci.code.*; 028import jdk.internal.jvmci.common.*; 029import jdk.internal.jvmci.hotspot.*; 030import jdk.internal.jvmci.hotspot.HotSpotVMConfig.*; 031import jdk.internal.jvmci.meta.*; 032 033import com.oracle.graal.asm.*; 034import com.oracle.graal.asm.amd64.*; 035import com.oracle.graal.asm.amd64.AMD64Assembler.*; 036import com.oracle.graal.compiler.common.*; 037import com.oracle.graal.hotspot.*; 038import com.oracle.graal.lir.*; 039import com.oracle.graal.lir.StandardOp.MoveOp; 040import com.oracle.graal.lir.StandardOp.StackStoreOp; 041import com.oracle.graal.lir.amd64.*; 042import com.oracle.graal.lir.asm.*; 043 044public class AMD64HotSpotMove { 045 046 public static final class HotSpotLoadObjectConstantOp extends AMD64LIRInstruction implements MoveOp { 047 public static final LIRInstructionClass<HotSpotLoadObjectConstantOp> TYPE = LIRInstructionClass.create(HotSpotLoadObjectConstantOp.class); 048 049 @Def({REG, STACK}) private AllocatableValue result; 050 private final HotSpotObjectConstant input; 051 052 public HotSpotLoadObjectConstantOp(AllocatableValue result, HotSpotObjectConstant input) { 053 super(TYPE); 054 this.result = result; 055 this.input = input; 056 } 057 058 @Override 059 public void emitCode(CompilationResultBuilder crb, AMD64MacroAssembler masm) { 060 boolean compressed = input.isCompressed(); 061 if (crb.target.inlineObjects) { 062 crb.recordInlineDataInCode(input); 063 if (isRegister(result)) { 064 if (compressed) { 065 masm.movl(asRegister(result), 0xDEADDEAD); 066 } else { 067 masm.movq(asRegister(result), 0xDEADDEADDEADDEADL); 068 } 069 } else { 070 assert isStackSlot(result); 071 if (compressed) { 072 masm.movl((AMD64Address) crb.asAddress(result), 0xDEADDEAD); 073 } else { 074 throw JVMCIError.shouldNotReachHere("Cannot store 64-bit constants to memory"); 075 } 076 } 077 } else { 078 if (isRegister(result)) { 079 AMD64Address address = (AMD64Address) crb.recordDataReferenceInCode(input, compressed ? 4 : 8); 080 if (compressed) { 081 masm.movl(asRegister(result), address); 082 } else { 083 masm.movq(asRegister(result), address); 084 } 085 } else { 086 throw JVMCIError.shouldNotReachHere("Cannot directly store data patch to memory"); 087 } 088 } 089 } 090 091 public Value getInput() { 092 return input; 093 } 094 095 public AllocatableValue getResult() { 096 return result; 097 } 098 } 099 100 public static final class HotSpotLoadMetaspaceConstantOp extends AMD64LIRInstruction implements MoveOp { 101 public static final LIRInstructionClass<HotSpotLoadMetaspaceConstantOp> TYPE = LIRInstructionClass.create(HotSpotLoadMetaspaceConstantOp.class); 102 103 @Def({REG, STACK}) private AllocatableValue result; 104 private final HotSpotMetaspaceConstant input; 105 106 public HotSpotLoadMetaspaceConstantOp(AllocatableValue result, HotSpotMetaspaceConstant input) { 107 super(TYPE); 108 this.result = result; 109 this.input = input; 110 } 111 112 @Override 113 public void emitCode(CompilationResultBuilder crb, AMD64MacroAssembler masm) { 114 boolean compressed = input.isCompressed(); 115 boolean isImmutable = GraalOptions.ImmutableCode.getValue(); 116 boolean generatePIC = GraalOptions.GeneratePIC.getValue(); 117 crb.recordInlineDataInCode(input); 118 if (isRegister(result)) { 119 if (compressed) { 120 if (isImmutable && generatePIC) { 121 Kind hostWordKind = HotSpotGraalRuntime.getHostWordKind(); 122 int alignment = hostWordKind.getBitCount() / Byte.SIZE; 123 // recordDataReferenceInCode forces the mov to be rip-relative 124 masm.movl(asRegister(result), (AMD64Address) crb.recordDataReferenceInCode(JavaConstant.INT_0, alignment)); 125 } else { 126 assert NumUtil.isInt(input.rawValue()); 127 masm.movl(asRegister(result), (int) input.rawValue()); 128 } 129 } else { 130 if (isImmutable && generatePIC) { 131 Kind hostWordKind = HotSpotGraalRuntime.getHostWordKind(); 132 int alignment = hostWordKind.getBitCount() / Byte.SIZE; 133 // recordDataReferenceInCode forces the mov to be rip-relative 134 masm.movq(asRegister(result), (AMD64Address) crb.recordDataReferenceInCode(JavaConstant.INT_0, alignment)); 135 } else { 136 masm.movq(asRegister(result), input.rawValue()); 137 } 138 } 139 } else { 140 assert isStackSlot(result); 141 if (compressed) { 142 if (isImmutable && generatePIC) { 143 throw JVMCIError.shouldNotReachHere("Unsupported operation offset(%rip) -> mem (mem -> mem)"); 144 } else { 145 assert NumUtil.isInt(input.rawValue()); 146 masm.movl((AMD64Address) crb.asAddress(result), (int) input.rawValue()); 147 } 148 } else { 149 throw JVMCIError.shouldNotReachHere("Cannot store 64-bit constants to memory"); 150 } 151 } 152 } 153 154 public Value getInput() { 155 return (Value) input; 156 } 157 158 public AllocatableValue getResult() { 159 return result; 160 } 161 } 162 163 public static final class CompressPointer extends AMD64LIRInstruction { 164 public static final LIRInstructionClass<CompressPointer> TYPE = LIRInstructionClass.create(CompressPointer.class); 165 166 private final CompressEncoding encoding; 167 private final boolean nonNull; 168 169 @Def({REG, HINT}) protected AllocatableValue result; 170 @Use({REG}) protected AllocatableValue input; 171 @Alive({REG, ILLEGAL}) protected AllocatableValue baseRegister; 172 173 public CompressPointer(AllocatableValue result, AllocatableValue input, AllocatableValue baseRegister, CompressEncoding encoding, boolean nonNull) { 174 super(TYPE); 175 this.result = result; 176 this.input = input; 177 this.baseRegister = baseRegister; 178 this.encoding = encoding; 179 this.nonNull = nonNull; 180 } 181 182 @Override 183 public void emitCode(CompilationResultBuilder crb, AMD64MacroAssembler masm) { 184 AMD64Move.move(Kind.Long, crb, masm, result, input); 185 186 Register resReg = asRegister(result); 187 if (encoding.base != 0) { 188 Register baseReg = asRegister(baseRegister); 189 if (!nonNull) { 190 masm.testq(resReg, resReg); 191 masm.cmovq(ConditionFlag.Equal, resReg, baseReg); 192 } 193 masm.subq(resReg, baseReg); 194 } 195 196 if (encoding.shift != 0) { 197 masm.shrq(resReg, encoding.shift); 198 } 199 } 200 } 201 202 public static final class StoreRbpOp extends AMD64LIRInstruction implements StackStoreOp { 203 public static final LIRInstructionClass<StoreRbpOp> TYPE = LIRInstructionClass.create(StoreRbpOp.class); 204 205 @Def({REG, HINT}) protected AllocatableValue result; 206 @Use({REG}) protected AllocatableValue input; 207 @Def({STACK}) protected StackSlotValue stackSlot; 208 209 protected StoreRbpOp(AllocatableValue result, AllocatableValue input, StackSlotValue stackSlot) { 210 super(TYPE); 211 assert result.getLIRKind().equals(input.getLIRKind()) && stackSlot.getLIRKind().equals(input.getLIRKind()) : String.format("result %s, input %s, stackSlot %s", result.getLIRKind(), 212 input.getLIRKind(), stackSlot.getLIRKind()); 213 this.result = result; 214 this.input = input; 215 this.stackSlot = stackSlot; 216 } 217 218 public Value getInput() { 219 return input; 220 } 221 222 public AllocatableValue getResult() { 223 return result; 224 } 225 226 public StackSlotValue getStackSlot() { 227 return stackSlot; 228 } 229 230 @Override 231 public void emitCode(CompilationResultBuilder crb, AMD64MacroAssembler masm) { 232 assert result.getPlatformKind() instanceof Kind : "Can only deal with Kind: " + result.getLIRKind(); 233 Kind kind = (Kind) result.getPlatformKind(); 234 AMD64Move.move(kind, crb, masm, result, input); 235 AMD64Move.move(kind, crb, masm, stackSlot, input); 236 } 237 } 238 239 public static final class UncompressPointer extends AMD64LIRInstruction { 240 public static final LIRInstructionClass<UncompressPointer> TYPE = LIRInstructionClass.create(UncompressPointer.class); 241 242 private final CompressEncoding encoding; 243 private final boolean nonNull; 244 245 @Def({REG, HINT}) protected AllocatableValue result; 246 @Use({REG}) protected AllocatableValue input; 247 @Alive({REG, ILLEGAL}) protected AllocatableValue baseRegister; 248 249 public UncompressPointer(AllocatableValue result, AllocatableValue input, AllocatableValue baseRegister, CompressEncoding encoding, boolean nonNull) { 250 super(TYPE); 251 this.result = result; 252 this.input = input; 253 this.baseRegister = baseRegister; 254 this.encoding = encoding; 255 this.nonNull = nonNull; 256 } 257 258 @Override 259 public void emitCode(CompilationResultBuilder crb, AMD64MacroAssembler masm) { 260 AMD64Move.move(Kind.Int, crb, masm, result, input); 261 262 Register resReg = asRegister(result); 263 if (encoding.shift != 0) { 264 masm.shlq(resReg, encoding.shift); 265 } 266 267 if (encoding.base != 0) { 268 if (nonNull) { 269 masm.addq(resReg, asRegister(baseRegister)); 270 } else { 271 if (encoding.shift == 0) { 272 // if encoding.shift != 0, the flags are already set by the shlq 273 masm.testq(resReg, resReg); 274 } 275 276 Label done = new Label(); 277 masm.jccb(ConditionFlag.Equal, done); 278 masm.addq(resReg, asRegister(baseRegister)); 279 masm.bind(done); 280 } 281 } 282 } 283 } 284 285 public static void decodeKlassPointer(AMD64MacroAssembler masm, Register register, Register scratch, AMD64Address address, CompressEncoding encoding) { 286 masm.movl(register, address); 287 if (encoding.shift != 0) { 288 assert encoding.alignment == encoding.shift : "Decode algorithm is wrong"; 289 masm.shlq(register, encoding.alignment); 290 } 291 if (encoding.base != 0) { 292 masm.movq(scratch, encoding.base); 293 masm.addq(register, scratch); 294 } 295 } 296}