001/* 002 * Copyright (c) 2011, 2015, Oracle and/or its affiliates. All rights reserved. 003 * DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER. 004 * 005 * This code is free software; you can redistribute it and/or modify it 006 * under the terms of the GNU General Public License version 2 only, as 007 * published by the Free Software Foundation. 008 * 009 * This code is distributed in the hope that it will be useful, but WITHOUT 010 * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or 011 * FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License 012 * version 2 for more details (a copy is included in the LICENSE file that 013 * accompanied this code). 014 * 015 * You should have received a copy of the GNU General Public License version 016 * 2 along with this work; if not, write to the Free Software Foundation, 017 * Inc., 51 Franklin St, Fifth Floor, Boston, MA 02110-1301 USA. 018 * 019 * Please contact Oracle, 500 Oracle Parkway, Redwood Shores, CA 94065 USA 020 * or visit www.oracle.com if you need additional information or have any 021 * questions. 022 */ 023package com.oracle.graal.lir.amd64; 024 025import jdk.internal.jvmci.amd64.*; 026import jdk.internal.jvmci.code.*; 027import jdk.internal.jvmci.common.*; 028import jdk.internal.jvmci.meta.*; 029import static com.oracle.graal.lir.LIRInstruction.OperandFlag.*; 030import static java.lang.Double.*; 031import static java.lang.Float.*; 032import static jdk.internal.jvmci.code.ValueUtil.*; 033 034import com.oracle.graal.asm.*; 035import com.oracle.graal.asm.amd64.*; 036import com.oracle.graal.asm.amd64.AMD64Assembler.*; 037import com.oracle.graal.lir.*; 038import com.oracle.graal.lir.StandardOp.MoveOp; 039import com.oracle.graal.lir.StandardOp.NullCheck; 040import com.oracle.graal.lir.asm.*; 041 042public class AMD64Move { 043 044 private abstract static class AbstractMoveOp extends AMD64LIRInstruction implements MoveOp { 045 public static final LIRInstructionClass<AbstractMoveOp> TYPE = LIRInstructionClass.create(AbstractMoveOp.class); 046 047 private Kind moveKind; 048 049 protected AbstractMoveOp(LIRInstructionClass<? extends AbstractMoveOp> c, Kind moveKind) { 050 super(c); 051 if (moveKind == Kind.Illegal) { 052 // unknown operand size, conservatively move the whole register 053 this.moveKind = Kind.Long; 054 } else { 055 this.moveKind = moveKind; 056 } 057 } 058 059 @Override 060 public void emitCode(CompilationResultBuilder crb, AMD64MacroAssembler masm) { 061 move(moveKind, crb, masm, getResult(), getInput()); 062 } 063 } 064 065 @Opcode("MOVE") 066 public static final class MoveToRegOp extends AbstractMoveOp { 067 public static final LIRInstructionClass<MoveToRegOp> TYPE = LIRInstructionClass.create(MoveToRegOp.class); 068 069 @Def({REG, HINT}) protected AllocatableValue result; 070 @Use({REG, STACK, CONST}) protected Value input; 071 072 public MoveToRegOp(Kind moveKind, AllocatableValue result, Value input) { 073 super(TYPE, moveKind); 074 this.result = result; 075 this.input = input; 076 } 077 078 @Override 079 public Value getInput() { 080 return input; 081 } 082 083 @Override 084 public AllocatableValue getResult() { 085 return result; 086 } 087 } 088 089 @Opcode("MOVE") 090 public static final class MoveFromRegOp extends AbstractMoveOp { 091 public static final LIRInstructionClass<MoveFromRegOp> TYPE = LIRInstructionClass.create(MoveFromRegOp.class); 092 093 @Def({REG, STACK}) protected AllocatableValue result; 094 @Use({REG, CONST, HINT}) protected Value input; 095 096 public MoveFromRegOp(Kind moveKind, AllocatableValue result, Value input) { 097 super(TYPE, moveKind); 098 this.result = result; 099 this.input = input; 100 } 101 102 @Override 103 public Value getInput() { 104 return input; 105 } 106 107 @Override 108 public AllocatableValue getResult() { 109 return result; 110 } 111 } 112 113 @Opcode("STACKMOVE") 114 public static final class AMD64StackMove extends AMD64LIRInstruction implements MoveOp { 115 public static final LIRInstructionClass<AMD64StackMove> TYPE = LIRInstructionClass.create(AMD64StackMove.class); 116 117 @Def({STACK}) protected AllocatableValue result; 118 @Use({STACK, HINT}) protected Value input; 119 @Alive({OperandFlag.STACK, OperandFlag.UNINITIALIZED}) private StackSlotValue backupSlot; 120 121 private Register scratch; 122 123 public AMD64StackMove(AllocatableValue result, Value input, Register scratch, StackSlotValue backupSlot) { 124 super(TYPE); 125 this.result = result; 126 this.input = input; 127 this.backupSlot = backupSlot; 128 this.scratch = scratch; 129 } 130 131 @Override 132 public Value getInput() { 133 return input; 134 } 135 136 @Override 137 public AllocatableValue getResult() { 138 return result; 139 } 140 141 public Register getScratchRegister() { 142 return scratch; 143 } 144 145 public StackSlotValue getBackupSlot() { 146 return backupSlot; 147 } 148 149 @Override 150 public void emitCode(CompilationResultBuilder crb, AMD64MacroAssembler masm) { 151 // backup scratch register 152 move(backupSlot.getKind(), crb, masm, backupSlot, scratch.asValue(backupSlot.getLIRKind())); 153 // move stack slot 154 move(getInput().getKind(), crb, masm, scratch.asValue(getInput().getLIRKind()), getInput()); 155 move(getResult().getKind(), crb, masm, getResult(), scratch.asValue(getResult().getLIRKind())); 156 // restore scratch register 157 move(backupSlot.getKind(), crb, masm, scratch.asValue(backupSlot.getLIRKind()), backupSlot); 158 159 } 160 } 161 162 @Opcode("MULTISTACKMOVE") 163 public static final class AMD64MultiStackMove extends AMD64LIRInstruction { 164 public static final LIRInstructionClass<AMD64MultiStackMove> TYPE = LIRInstructionClass.create(AMD64MultiStackMove.class); 165 166 @Def({STACK}) protected AllocatableValue[] results; 167 @Use({STACK}) protected Value[] inputs; 168 @Alive({OperandFlag.STACK, OperandFlag.UNINITIALIZED}) private StackSlotValue backupSlot; 169 170 private Register scratch; 171 172 public AMD64MultiStackMove(AllocatableValue[] results, Value[] inputs, Register scratch, StackSlotValue backupSlot) { 173 super(TYPE); 174 this.results = results; 175 this.inputs = inputs; 176 this.backupSlot = backupSlot; 177 this.scratch = scratch; 178 } 179 180 @Override 181 public void emitCode(CompilationResultBuilder crb, AMD64MacroAssembler masm) { 182 // backup scratch register 183 move(backupSlot.getKind(), crb, masm, backupSlot, scratch.asValue(backupSlot.getLIRKind())); 184 for (int i = 0; i < results.length; i++) { 185 Value input = inputs[i]; 186 AllocatableValue result = results[i]; 187 // move stack slot 188 move(input.getKind(), crb, masm, scratch.asValue(input.getLIRKind()), input); 189 move(result.getKind(), crb, masm, result, scratch.asValue(result.getLIRKind())); 190 } 191 // restore scratch register 192 move(backupSlot.getKind(), crb, masm, scratch.asValue(backupSlot.getLIRKind()), backupSlot); 193 194 } 195 } 196 197 @Opcode("STACKMOVE") 198 public static final class AMD64PushPopStackMove extends AMD64LIRInstruction implements MoveOp { 199 public static final LIRInstructionClass<AMD64PushPopStackMove> TYPE = LIRInstructionClass.create(AMD64PushPopStackMove.class); 200 201 @Def({STACK}) protected AllocatableValue result; 202 @Use({STACK, HINT}) protected Value input; 203 private final OperandSize size; 204 205 public AMD64PushPopStackMove(OperandSize size, AllocatableValue result, Value input) { 206 super(TYPE); 207 this.result = result; 208 this.input = input; 209 this.size = size; 210 } 211 212 @Override 213 public Value getInput() { 214 return input; 215 } 216 217 @Override 218 public AllocatableValue getResult() { 219 return result; 220 } 221 222 @Override 223 public void emitCode(CompilationResultBuilder crb, AMD64MacroAssembler masm) { 224 AMD64MOp.PUSH.emit(masm, size, (AMD64Address) crb.asAddress(input)); 225 AMD64MOp.POP.emit(masm, size, (AMD64Address) crb.asAddress(result)); 226 } 227 } 228 229 public static final class LeaOp extends AMD64LIRInstruction { 230 public static final LIRInstructionClass<LeaOp> TYPE = LIRInstructionClass.create(LeaOp.class); 231 232 @Def({REG}) protected AllocatableValue result; 233 @Use({COMPOSITE, UNINITIALIZED}) protected AMD64AddressValue address; 234 235 public LeaOp(AllocatableValue result, AMD64AddressValue address) { 236 super(TYPE); 237 this.result = result; 238 this.address = address; 239 } 240 241 @Override 242 public void emitCode(CompilationResultBuilder crb, AMD64MacroAssembler masm) { 243 masm.leaq(asLongReg(result), address.toAddress()); 244 } 245 } 246 247 public static final class LeaDataOp extends AMD64LIRInstruction { 248 public static final LIRInstructionClass<LeaDataOp> TYPE = LIRInstructionClass.create(LeaDataOp.class); 249 250 @Def({REG}) protected AllocatableValue result; 251 private final byte[] data; 252 253 public LeaDataOp(AllocatableValue result, byte[] data) { 254 super(TYPE); 255 this.result = result; 256 this.data = data; 257 } 258 259 @Override 260 public void emitCode(CompilationResultBuilder crb, AMD64MacroAssembler masm) { 261 masm.leaq(asRegister(result), (AMD64Address) crb.recordDataReferenceInCode(data, 16)); 262 } 263 } 264 265 public static final class StackLeaOp extends AMD64LIRInstruction { 266 public static final LIRInstructionClass<StackLeaOp> TYPE = LIRInstructionClass.create(StackLeaOp.class); 267 268 @Def({REG}) protected AllocatableValue result; 269 @Use({STACK, UNINITIALIZED}) protected StackSlotValue slot; 270 271 public StackLeaOp(AllocatableValue result, StackSlotValue slot) { 272 super(TYPE); 273 assert isStackSlotValue(slot) : "Not a stack slot: " + slot; 274 this.result = result; 275 this.slot = slot; 276 } 277 278 @Override 279 public void emitCode(CompilationResultBuilder crb, AMD64MacroAssembler masm) { 280 masm.leaq(asLongReg(result), (AMD64Address) crb.asAddress(slot)); 281 } 282 } 283 284 public static final class MembarOp extends AMD64LIRInstruction { 285 public static final LIRInstructionClass<MembarOp> TYPE = LIRInstructionClass.create(MembarOp.class); 286 287 private final int barriers; 288 289 public MembarOp(final int barriers) { 290 super(TYPE); 291 this.barriers = barriers; 292 } 293 294 @Override 295 public void emitCode(CompilationResultBuilder crb, AMD64MacroAssembler masm) { 296 masm.membar(barriers); 297 } 298 } 299 300 public static final class NullCheckOp extends AMD64LIRInstruction implements NullCheck { 301 public static final LIRInstructionClass<NullCheckOp> TYPE = LIRInstructionClass.create(NullCheckOp.class); 302 303 @Use({COMPOSITE}) protected AMD64AddressValue address; 304 @State protected LIRFrameState state; 305 306 public NullCheckOp(AMD64AddressValue address, LIRFrameState state) { 307 super(TYPE); 308 this.address = address; 309 this.state = state; 310 } 311 312 @Override 313 public void emitCode(CompilationResultBuilder crb, AMD64MacroAssembler masm) { 314 crb.recordImplicitException(masm.position(), state); 315 masm.nullCheck(address.toAddress()); 316 } 317 318 public Value getCheckedValue() { 319 return address.base; 320 } 321 322 public LIRFrameState getState() { 323 return state; 324 } 325 } 326 327 @Opcode("CAS") 328 public static final class CompareAndSwapOp extends AMD64LIRInstruction { 329 public static final LIRInstructionClass<CompareAndSwapOp> TYPE = LIRInstructionClass.create(CompareAndSwapOp.class); 330 331 private final Kind accessKind; 332 333 @Def protected AllocatableValue result; 334 @Use({COMPOSITE}) protected AMD64AddressValue address; 335 @Use protected AllocatableValue cmpValue; 336 @Use protected AllocatableValue newValue; 337 338 public CompareAndSwapOp(Kind accessKind, AllocatableValue result, AMD64AddressValue address, AllocatableValue cmpValue, AllocatableValue newValue) { 339 super(TYPE); 340 this.accessKind = accessKind; 341 this.result = result; 342 this.address = address; 343 this.cmpValue = cmpValue; 344 this.newValue = newValue; 345 } 346 347 @Override 348 public void emitCode(CompilationResultBuilder crb, AMD64MacroAssembler masm) { 349 assert asRegister(cmpValue).equals(AMD64.rax) && asRegister(result).equals(AMD64.rax); 350 351 if (crb.target.isMP) { 352 masm.lock(); 353 } 354 switch (accessKind) { 355 case Int: 356 masm.cmpxchgl(asRegister(newValue), address.toAddress()); 357 break; 358 case Long: 359 case Object: 360 masm.cmpxchgq(asRegister(newValue), address.toAddress()); 361 break; 362 default: 363 throw JVMCIError.shouldNotReachHere(); 364 } 365 } 366 } 367 368 @Opcode("ATOMIC_READ_AND_ADD") 369 public static final class AtomicReadAndAddOp extends AMD64LIRInstruction { 370 public static final LIRInstructionClass<AtomicReadAndAddOp> TYPE = LIRInstructionClass.create(AtomicReadAndAddOp.class); 371 372 private final Kind accessKind; 373 374 @Def protected AllocatableValue result; 375 @Alive({COMPOSITE}) protected AMD64AddressValue address; 376 @Use protected AllocatableValue delta; 377 378 public AtomicReadAndAddOp(Kind accessKind, AllocatableValue result, AMD64AddressValue address, AllocatableValue delta) { 379 super(TYPE); 380 this.accessKind = accessKind; 381 this.result = result; 382 this.address = address; 383 this.delta = delta; 384 } 385 386 @Override 387 public void emitCode(CompilationResultBuilder crb, AMD64MacroAssembler masm) { 388 move(accessKind, crb, masm, result, delta); 389 if (crb.target.isMP) { 390 masm.lock(); 391 } 392 switch (accessKind) { 393 case Int: 394 masm.xaddl(address.toAddress(), asRegister(result)); 395 break; 396 case Long: 397 masm.xaddq(address.toAddress(), asRegister(result)); 398 break; 399 default: 400 throw JVMCIError.shouldNotReachHere(); 401 } 402 } 403 } 404 405 @Opcode("ATOMIC_READ_AND_WRITE") 406 public static final class AtomicReadAndWriteOp extends AMD64LIRInstruction { 407 public static final LIRInstructionClass<AtomicReadAndWriteOp> TYPE = LIRInstructionClass.create(AtomicReadAndWriteOp.class); 408 409 private final Kind accessKind; 410 411 @Def protected AllocatableValue result; 412 @Alive({COMPOSITE}) protected AMD64AddressValue address; 413 @Use protected AllocatableValue newValue; 414 415 public AtomicReadAndWriteOp(Kind accessKind, AllocatableValue result, AMD64AddressValue address, AllocatableValue newValue) { 416 super(TYPE); 417 this.accessKind = accessKind; 418 this.result = result; 419 this.address = address; 420 this.newValue = newValue; 421 } 422 423 @Override 424 public void emitCode(CompilationResultBuilder crb, AMD64MacroAssembler masm) { 425 move(accessKind, crb, masm, result, newValue); 426 switch (accessKind) { 427 case Int: 428 masm.xchgl(asRegister(result), address.toAddress()); 429 break; 430 case Long: 431 case Object: 432 masm.xchgq(asRegister(result), address.toAddress()); 433 break; 434 default: 435 throw JVMCIError.shouldNotReachHere(); 436 } 437 } 438 } 439 440 public static void move(CompilationResultBuilder crb, AMD64MacroAssembler masm, Value result, Value input) { 441 move(result.getKind(), crb, masm, result, input); 442 } 443 444 public static void move(Kind moveKind, CompilationResultBuilder crb, AMD64MacroAssembler masm, Value result, Value input) { 445 if (isRegister(input)) { 446 if (isRegister(result)) { 447 reg2reg(moveKind, masm, result, input); 448 } else if (isStackSlot(result)) { 449 reg2stack(moveKind, crb, masm, result, input); 450 } else { 451 throw JVMCIError.shouldNotReachHere(); 452 } 453 } else if (isStackSlot(input)) { 454 if (isRegister(result)) { 455 stack2reg(moveKind, crb, masm, result, input); 456 } else { 457 throw JVMCIError.shouldNotReachHere(); 458 } 459 } else if (isConstant(input)) { 460 if (isRegister(result)) { 461 const2reg(crb, masm, result, (JavaConstant) input); 462 } else if (isStackSlot(result)) { 463 const2stack(crb, masm, result, (JavaConstant) input); 464 } else { 465 throw JVMCIError.shouldNotReachHere(); 466 } 467 } else { 468 throw JVMCIError.shouldNotReachHere(); 469 } 470 } 471 472 private static void reg2reg(Kind kind, AMD64MacroAssembler masm, Value result, Value input) { 473 if (asRegister(input).equals(asRegister(result))) { 474 return; 475 } 476 switch (kind.getStackKind()) { 477 case Int: 478 masm.movl(asRegister(result), asRegister(input)); 479 break; 480 case Long: 481 masm.movq(asRegister(result), asRegister(input)); 482 break; 483 case Float: 484 masm.movflt(asFloatReg(result), asFloatReg(input)); 485 break; 486 case Double: 487 masm.movdbl(asDoubleReg(result), asDoubleReg(input)); 488 break; 489 case Object: 490 masm.movq(asRegister(result), asRegister(input)); 491 break; 492 default: 493 throw JVMCIError.shouldNotReachHere("kind=" + result.getKind()); 494 } 495 } 496 497 private static void reg2stack(Kind kind, CompilationResultBuilder crb, AMD64MacroAssembler masm, Value result, Value input) { 498 AMD64Address dest = (AMD64Address) crb.asAddress(result); 499 switch (kind) { 500 case Boolean: 501 case Byte: 502 masm.movb(dest, asRegister(input)); 503 break; 504 case Short: 505 case Char: 506 masm.movw(dest, asRegister(input)); 507 break; 508 case Int: 509 masm.movl(dest, asRegister(input)); 510 break; 511 case Long: 512 masm.movq(dest, asRegister(input)); 513 break; 514 case Float: 515 masm.movflt(dest, asFloatReg(input)); 516 break; 517 case Double: 518 masm.movsd(dest, asDoubleReg(input)); 519 break; 520 case Object: 521 masm.movq(dest, asRegister(input)); 522 break; 523 default: 524 throw JVMCIError.shouldNotReachHere(); 525 } 526 } 527 528 private static void stack2reg(Kind kind, CompilationResultBuilder crb, AMD64MacroAssembler masm, Value result, Value input) { 529 AMD64Address src = (AMD64Address) crb.asAddress(input); 530 switch (kind) { 531 case Boolean: 532 masm.movzbl(asRegister(result), src); 533 break; 534 case Byte: 535 masm.movsbl(asRegister(result), src); 536 break; 537 case Short: 538 masm.movswl(asRegister(result), src); 539 break; 540 case Char: 541 masm.movzwl(asRegister(result), src); 542 break; 543 case Int: 544 masm.movl(asRegister(result), src); 545 break; 546 case Long: 547 masm.movq(asRegister(result), src); 548 break; 549 case Float: 550 masm.movflt(asFloatReg(result), src); 551 break; 552 case Double: 553 masm.movdbl(asDoubleReg(result), src); 554 break; 555 case Object: 556 masm.movq(asRegister(result), src); 557 break; 558 default: 559 throw JVMCIError.shouldNotReachHere(); 560 } 561 } 562 563 private static void const2reg(CompilationResultBuilder crb, AMD64MacroAssembler masm, Value result, JavaConstant input) { 564 /* 565 * Note: we use the kind of the input operand (and not the kind of the result operand) 566 * because they don't match in all cases. For example, an object constant can be loaded to a 567 * long register when unsafe casts occurred (e.g., for a write barrier where arithmetic 568 * operations are then performed on the pointer). 569 */ 570 switch (input.getKind().getStackKind()) { 571 case Int: 572 if (crb.codeCache.needsDataPatch(input)) { 573 crb.recordInlineDataInCode(input); 574 } 575 // Do not optimize with an XOR as this instruction may be between 576 // a CMP and a Jcc in which case the XOR will modify the condition 577 // flags and interfere with the Jcc. 578 masm.movl(asRegister(result), input.asInt()); 579 580 break; 581 case Long: 582 boolean patch = false; 583 if (crb.codeCache.needsDataPatch(input)) { 584 patch = true; 585 crb.recordInlineDataInCode(input); 586 } 587 // Do not optimize with an XOR as this instruction may be between 588 // a CMP and a Jcc in which case the XOR will modify the condition 589 // flags and interfere with the Jcc. 590 if (patch) { 591 masm.movq(asRegister(result), input.asLong()); 592 } else { 593 if (input.asLong() == (int) input.asLong()) { 594 // Sign extended to long 595 masm.movslq(asRegister(result), (int) input.asLong()); 596 } else if ((input.asLong() & 0xFFFFFFFFL) == input.asLong()) { 597 // Zero extended to long 598 masm.movl(asRegister(result), (int) input.asLong()); 599 } else { 600 masm.movq(asRegister(result), input.asLong()); 601 } 602 } 603 break; 604 case Float: 605 // This is *not* the same as 'constant == 0.0f' in the case where constant is -0.0f 606 if (Float.floatToRawIntBits(input.asFloat()) == Float.floatToRawIntBits(0.0f)) { 607 assert !crb.codeCache.needsDataPatch(input); 608 masm.xorps(asFloatReg(result), asFloatReg(result)); 609 } else { 610 masm.movflt(asFloatReg(result), (AMD64Address) crb.asFloatConstRef(input)); 611 } 612 break; 613 case Double: 614 // This is *not* the same as 'constant == 0.0d' in the case where constant is -0.0d 615 if (Double.doubleToRawLongBits(input.asDouble()) == Double.doubleToRawLongBits(0.0d)) { 616 assert !crb.codeCache.needsDataPatch(input); 617 masm.xorpd(asDoubleReg(result), asDoubleReg(result)); 618 } else { 619 masm.movdbl(asDoubleReg(result), (AMD64Address) crb.asDoubleConstRef(input)); 620 } 621 break; 622 case Object: 623 // Do not optimize with an XOR as this instruction may be between 624 // a CMP and a Jcc in which case the XOR will modify the condition 625 // flags and interfere with the Jcc. 626 if (input.isNull()) { 627 masm.movq(asRegister(result), 0x0L); 628 } else if (crb.target.inlineObjects) { 629 crb.recordInlineDataInCode(input); 630 masm.movq(asRegister(result), 0xDEADDEADDEADDEADL); 631 } else { 632 masm.movq(asRegister(result), (AMD64Address) crb.recordDataReferenceInCode(input, 0)); 633 } 634 break; 635 default: 636 throw JVMCIError.shouldNotReachHere(); 637 } 638 } 639 640 private static void const2stack(CompilationResultBuilder crb, AMD64MacroAssembler masm, Value result, JavaConstant input) { 641 assert !crb.codeCache.needsDataPatch(input); 642 AMD64Address dest = (AMD64Address) crb.asAddress(result); 643 final long imm; 644 switch (input.getKind().getStackKind()) { 645 case Int: 646 imm = input.asInt(); 647 break; 648 case Long: 649 imm = input.asLong(); 650 break; 651 case Float: 652 imm = floatToRawIntBits(input.asFloat()); 653 break; 654 case Double: 655 imm = doubleToRawLongBits(input.asDouble()); 656 break; 657 case Object: 658 if (input.isNull()) { 659 imm = 0; 660 } else { 661 throw JVMCIError.shouldNotReachHere("Non-null object constants must be in register"); 662 } 663 break; 664 default: 665 throw JVMCIError.shouldNotReachHere(); 666 } 667 switch (result.getKind()) { 668 case Byte: 669 assert NumUtil.isByte(imm) : "Is not in byte range: " + imm; 670 AMD64MIOp.MOVB.emit(masm, OperandSize.BYTE, dest, (int) imm); 671 break; 672 case Short: 673 assert NumUtil.isShort(imm) : "Is not in short range: " + imm; 674 AMD64MIOp.MOV.emit(masm, OperandSize.WORD, dest, (int) imm); 675 break; 676 case Char: 677 assert NumUtil.isUShort(imm) : "Is not in char range: " + imm; 678 AMD64MIOp.MOV.emit(masm, OperandSize.WORD, dest, (int) imm); 679 break; 680 case Int: 681 case Float: 682 assert NumUtil.isInt(imm) : "Is not in int range: " + imm; 683 masm.movl(dest, (int) imm); 684 break; 685 case Long: 686 case Double: 687 case Object: 688 masm.movlong(dest, imm); 689 break; 690 default: 691 throw JVMCIError.shouldNotReachHere("Unknown result Kind: " + result.getKind()); 692 } 693 } 694}