# HG changeset patch # User Christian Humer # Date 1377875185 -7200 # Node ID ff41fd80d562254992defbdf70b3bbdafb613c00 # Parent cb364a90ef62b3a9d023f52e7008cae8a869843f# Parent 60937d54db2edd6d6cbfb13def4c0461e1626437 Merge. diff -r cb364a90ef62 -r ff41fd80d562 graal/com.oracle.graal.api.code/src/com/oracle/graal/api/code/CodeCacheProvider.java --- a/graal/com.oracle.graal.api.code/src/com/oracle/graal/api/code/CodeCacheProvider.java Fri Aug 30 17:00:26 2013 +0200 +++ b/graal/com.oracle.graal.api.code/src/com/oracle/graal/api/code/CodeCacheProvider.java Fri Aug 30 17:06:25 2013 +0200 @@ -89,12 +89,4 @@ * Gets a description of the target architecture. */ TargetDescription getTarget(); - - /** - * Returns the register the runtime uses for maintaining the heap base address. This is mainly - * utilized by runtimes which support compressed pointers. - * - * @return the register that keeps the heap base address - */ - Register heapBaseRegister(); } diff -r cb364a90ef62 -r ff41fd80d562 graal/com.oracle.graal.api.meta/src/com/oracle/graal/api/meta/Constant.java --- a/graal/com.oracle.graal.api.meta/src/com/oracle/graal/api/meta/Constant.java Fri Aug 30 17:00:26 2013 +0200 +++ b/graal/com.oracle.graal.api.meta/src/com/oracle/graal/api/meta/Constant.java Fri Aug 30 17:06:25 2013 +0200 @@ -454,4 +454,102 @@ throw new IllegalArgumentException(kind.toString()); } } + + /** + * Returns the zero value for a given numeric kind. + */ + public static Constant zero(Kind kind) { + switch (kind) { + case Byte: + return forByte((byte) 0); + case Char: + return forChar((char) 0); + case Double: + return DOUBLE_0; + case Float: + return FLOAT_0; + case Int: + return INT_0; + case Long: + return LONG_0; + case Short: + return forShort((short) 0); + default: + throw new IllegalArgumentException(kind.toString()); + } + } + + /** + * Returns the one value for a given numeric kind. + */ + public static Constant one(Kind kind) { + switch (kind) { + case Byte: + return forByte((byte) 1); + case Char: + return forChar((char) 1); + case Double: + return DOUBLE_1; + case Float: + return FLOAT_1; + case Int: + return INT_1; + case Long: + return LONG_1; + case Short: + return forShort((short) 1); + default: + throw new IllegalArgumentException(kind.toString()); + } + } + + /** + * Adds two numeric constants. + */ + public static Constant add(Constant x, Constant y) { + assert x.getKind() == y.getKind(); + switch (x.getKind()) { + case Byte: + return forByte((byte) (x.asInt() + y.asInt())); + case Char: + return forChar((char) (x.asInt() + y.asInt())); + case Double: + return forDouble(x.asDouble() + y.asDouble()); + case Float: + return forFloat(x.asFloat() + y.asFloat()); + case Int: + return forInt(x.asInt() + y.asInt()); + case Long: + return forLong(x.asLong() + y.asLong()); + case Short: + return forShort((short) (x.asInt() + y.asInt())); + default: + throw new IllegalArgumentException(x.getKind().toString()); + } + } + + /** + * Multiplies two numeric constants. + */ + public static Constant mul(Constant x, Constant y) { + assert x.getKind() == y.getKind(); + switch (x.getKind()) { + case Byte: + return forByte((byte) (x.asInt() * y.asInt())); + case Char: + return forChar((char) (x.asInt() * y.asInt())); + case Double: + return forDouble(x.asDouble() * y.asDouble()); + case Float: + return forFloat(x.asFloat() * y.asFloat()); + case Int: + return forInt(x.asInt() * y.asInt()); + case Long: + return forLong(x.asLong() * y.asLong()); + case Short: + return forShort((short) (x.asInt() * y.asInt())); + default: + throw new IllegalArgumentException(x.getKind().toString()); + } + } } diff -r cb364a90ef62 -r ff41fd80d562 graal/com.oracle.graal.hotspot.amd64/src/com/oracle/graal/hotspot/amd64/AMD64HotSpotBackend.java --- a/graal/com.oracle.graal.hotspot.amd64/src/com/oracle/graal/hotspot/amd64/AMD64HotSpotBackend.java Fri Aug 30 17:00:26 2013 +0200 +++ b/graal/com.oracle.graal.hotspot.amd64/src/com/oracle/graal/hotspot/amd64/AMD64HotSpotBackend.java Fri Aug 30 17:06:25 2013 +0200 @@ -244,10 +244,10 @@ AMD64Address src = new AMD64Address(receiver, config.hubOffset); AMD64HotSpotLIRGenerator gen = (AMD64HotSpotLIRGenerator) lirGen; - HotSpotRuntime hr = ((HotSpotRuntime) gen.getRuntime()); - if (hr.config.useCompressedKlassPointers) { + AMD64HotSpotRuntime hr = ((AMD64HotSpotRuntime) gen.getRuntime()); + if (hr.useCompressedKlassPointers()) { Register register = r10; - AMD64Move.decodeKlassPointer(asm, register, hr.heapBaseRegister(), src, hr.config.narrowKlassBase, hr.config.narrowKlassShift, hr.config.logKlassAlignment); + AMD64HotSpotMove.decodeKlassPointer(asm, register, hr.heapBaseRegister(), src, config.narrowKlassBase, config.narrowKlassShift, config.logKlassAlignment); asm.cmpq(inlineCacheKlass, register); } else { asm.cmpq(inlineCacheKlass, src); diff -r cb364a90ef62 -r ff41fd80d562 graal/com.oracle.graal.hotspot.amd64/src/com/oracle/graal/hotspot/amd64/AMD64HotSpotLIRGenerator.java --- a/graal/com.oracle.graal.hotspot.amd64/src/com/oracle/graal/hotspot/amd64/AMD64HotSpotLIRGenerator.java Fri Aug 30 17:00:26 2013 +0200 +++ b/graal/com.oracle.graal.hotspot.amd64/src/com/oracle/graal/hotspot/amd64/AMD64HotSpotLIRGenerator.java Fri Aug 30 17:06:25 2013 +0200 @@ -38,6 +38,10 @@ import com.oracle.graal.compiler.gen.*; import com.oracle.graal.graph.*; import com.oracle.graal.hotspot.*; +import com.oracle.graal.hotspot.amd64.AMD64HotSpotMove.CompareAndSwapCompressedOp; +import com.oracle.graal.hotspot.amd64.AMD64HotSpotMove.LoadCompressedPointer; +import com.oracle.graal.hotspot.amd64.AMD64HotSpotMove.StoreCompressedConstantOp; +import com.oracle.graal.hotspot.amd64.AMD64HotSpotMove.StoreCompressedPointer; import com.oracle.graal.hotspot.meta.*; import com.oracle.graal.hotspot.nodes.*; import com.oracle.graal.hotspot.stubs.*; @@ -46,12 +50,9 @@ import com.oracle.graal.lir.StandardOp.PlaceholderOp; import com.oracle.graal.lir.amd64.*; import com.oracle.graal.lir.amd64.AMD64ControlFlow.CondMoveOp; -import com.oracle.graal.lir.amd64.AMD64Move.CompareAndSwapCompressedOp; import com.oracle.graal.lir.amd64.AMD64Move.CompareAndSwapOp; -import com.oracle.graal.lir.amd64.AMD64Move.LoadCompressedPointer; import com.oracle.graal.lir.amd64.AMD64Move.LoadOp; import com.oracle.graal.lir.amd64.AMD64Move.MoveFromRegOp; -import com.oracle.graal.lir.amd64.AMD64Move.StoreCompressedPointer; import com.oracle.graal.lir.amd64.AMD64Move.StoreConstantOp; import com.oracle.graal.lir.amd64.AMD64Move.StoreOp; import com.oracle.graal.nodes.*; @@ -427,12 +428,12 @@ * algorithms may differ. */ if (isCompressCandidate(access)) { - if (runtime().config.useCompressedOops && kind == Kind.Object) { - append(new LoadCompressedPointer(kind, result, runtime().heapBaseRegister().asValue(), loadAddress, access != null ? state(access) : null, runtime().config.narrowOopBase, - runtime().config.narrowOopShift, runtime().config.logMinObjAlignment)); - } else if (runtime().config.useCompressedKlassPointers && kind == Kind.Long) { - append(new LoadCompressedPointer(kind, result, runtime().heapBaseRegister().asValue(), loadAddress, access != null ? state(access) : null, runtime().config.narrowKlassBase, - runtime().config.narrowKlassShift, runtime().config.logKlassAlignment)); + if (runtime().useCompressedOops() && kind == Kind.Object) { + append(new LoadCompressedPointer(kind, result, runtime().heapBaseRegister().asValue(), loadAddress, access != null ? state(access) : null, getNarrowOopBase(), getNarrowOopShift(), + getLogMinObjectAlignment())); + } else if (runtime().useCompressedKlassPointers() && kind == Kind.Long) { + append(new LoadCompressedPointer(kind, result, runtime().heapBaseRegister().asValue(), loadAddress, access != null ? state(access) : null, getNarrowKlassBase(), getNarrowKlassShift(), + getLogKlassAlignment())); } else { append(new LoadOp(kind, result, loadAddress, access != null ? state(access) : null)); } @@ -449,29 +450,29 @@ if (isConstant(inputVal)) { Constant c = asConstant(inputVal); if (canStoreConstant(c)) { - if (inputVal.getKind() == Kind.Object) { - append(new StoreConstantOp(kind, storeAddress, c, state, runtime().config.useCompressedOops && isCompressCandidate(access))); - } else if (inputVal.getKind() == Kind.Long) { - append(new StoreConstantOp(kind, storeAddress, c, state, runtime().config.useCompressedKlassPointers && isCompressCandidate(access))); + if (inputVal.getKind() == Kind.Object && runtime().useCompressedOops() && isCompressCandidate(access)) { + append(new StoreCompressedConstantOp(kind, storeAddress, c, state)); + } else if (inputVal.getKind() == Kind.Long && runtime().useCompressedKlassPointers() && isCompressCandidate(access)) { + append(new StoreCompressedConstantOp(kind, storeAddress, c, state)); } else { - append(new StoreConstantOp(kind, storeAddress, c, state, false)); + append(new StoreConstantOp(kind, storeAddress, c, state)); } return; } } Variable input = load(inputVal); if (isCompressCandidate(access)) { - if (runtime().config.useCompressedOops && kind == Kind.Object) { + if (runtime().useCompressedOops() && kind == Kind.Object) { if (input.getKind() == Kind.Object) { Variable scratch = newVariable(Kind.Long); - append(new StoreCompressedPointer(kind, storeAddress, input, scratch, state, runtime().config.narrowOopBase, runtime().config.narrowOopShift, runtime().config.logMinObjAlignment)); + append(new StoreCompressedPointer(kind, storeAddress, input, scratch, state, getNarrowOopBase(), getNarrowOopShift(), getLogMinObjectAlignment())); } else { // the input oop is already compressed append(new StoreOp(input.getKind(), storeAddress, input, state)); } - } else if (runtime().config.useCompressedKlassPointers && kind == Kind.Long) { + } else if (runtime().useCompressedKlassPointers() && kind == Kind.Long) { Variable scratch = newVariable(Kind.Long); - append(new StoreCompressedPointer(kind, storeAddress, input, scratch, state, runtime().config.narrowKlassBase, runtime().config.narrowKlassShift, runtime().config.logKlassAlignment)); + append(new StoreCompressedPointer(kind, storeAddress, input, scratch, state, getNarrowKlassBase(), getNarrowKlassShift(), getLogKlassAlignment())); } else { append(new StoreOp(kind, storeAddress, input, state)); } @@ -480,6 +481,30 @@ } } + private int getLogMinObjectAlignment() { + return runtime().config.logMinObjAlignment; + } + + private int getNarrowOopShift() { + return runtime().config.narrowOopShift; + } + + private long getNarrowOopBase() { + return runtime().config.narrowOopBase; + } + + private int getLogKlassAlignment() { + return runtime().config.logKlassAlignment; + } + + private int getNarrowKlassShift() { + return runtime().config.narrowKlassShift; + } + + private long getNarrowKlassBase() { + return runtime().config.narrowKlassBase; + } + @Override public void visitCompareAndSwap(LoweredCompareAndSwapNode node, Value address) { Kind kind = node.getNewValue().kind(); @@ -489,9 +514,9 @@ AMD64AddressValue addressValue = asAddressValue(address); RegisterValue raxRes = AMD64.rax.asValue(kind); emitMove(raxRes, expected); - if (runtime().config.useCompressedOops && node.isCompressible()) { + if (runtime().useCompressedOops() && node.isCompressible()) { Variable scratch = newVariable(Kind.Long); - append(new CompareAndSwapCompressedOp(raxRes, addressValue, raxRes, newValue, scratch, runtime().config.narrowOopBase, runtime().config.narrowOopShift, runtime().config.logMinObjAlignment)); + append(new CompareAndSwapCompressedOp(raxRes, addressValue, raxRes, newValue, scratch, getNarrowOopBase(), getNarrowOopShift(), getLogMinObjectAlignment())); } else { append(new CompareAndSwapOp(raxRes, addressValue, raxRes, newValue)); } diff -r cb364a90ef62 -r ff41fd80d562 graal/com.oracle.graal.hotspot.amd64/src/com/oracle/graal/hotspot/amd64/AMD64HotSpotMove.java --- /dev/null Thu Jan 01 00:00:00 1970 +0000 +++ b/graal/com.oracle.graal.hotspot.amd64/src/com/oracle/graal/hotspot/amd64/AMD64HotSpotMove.java Fri Aug 30 17:06:25 2013 +0200 @@ -0,0 +1,250 @@ +/* + * Copyright (c) 2013, Oracle and/or its affiliates. All rights reserved. + * DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER. + * + * This code is free software; you can redistribute it and/or modify it + * under the terms of the GNU General Public License version 2 only, as + * published by the Free Software Foundation. + * + * This code is distributed in the hope that it will be useful, but WITHOUT + * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or + * FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License + * version 2 for more details (a copy is included in the LICENSE file that + * accompanied this code). + * + * You should have received a copy of the GNU General Public License version + * 2 along with this work; if not, write to the Free Software Foundation, + * Inc., 51 Franklin St, Fifth Floor, Boston, MA 02110-1301 USA. + * + * Please contact Oracle, 500 Oracle Parkway, Redwood Shores, CA 94065 USA + * or visit www.oracle.com if you need additional information or have any + * questions. + */ +package com.oracle.graal.hotspot.amd64; + +import static com.oracle.graal.api.code.ValueUtil.*; +import static com.oracle.graal.lir.LIRInstruction.OperandFlag.*; + +import com.oracle.graal.amd64.*; +import com.oracle.graal.api.code.*; +import com.oracle.graal.api.meta.*; +import com.oracle.graal.asm.*; +import com.oracle.graal.asm.amd64.*; +import com.oracle.graal.asm.amd64.AMD64Assembler.ConditionFlag; +import com.oracle.graal.graph.*; +import com.oracle.graal.hotspot.meta.*; +import com.oracle.graal.lir.*; +import com.oracle.graal.lir.amd64.*; +import com.oracle.graal.lir.amd64.AMD64Move.LoadOp; +import com.oracle.graal.lir.amd64.AMD64Move.StoreConstantOp; +import com.oracle.graal.lir.asm.*; + +public class AMD64HotSpotMove { + + public static class StoreCompressedConstantOp extends StoreConstantOp { + + public StoreCompressedConstantOp(Kind kind, AMD64AddressValue address, Constant input, LIRFrameState state) { + super(kind, address, input, state); + } + + @Override + public void emitMemAccess(AMD64MacroAssembler masm) { + if (kind == Kind.Long) { + if (NumUtil.isInt(input.asLong())) { + masm.movl(address.toAddress(), (int) input.asLong()); + } else { + throw GraalInternalError.shouldNotReachHere("Cannot store 64-bit constants to memory"); + } + } else if (kind == Kind.Object) { + if (input.isNull()) { + masm.movl(address.toAddress(), 0); + } else { + throw GraalInternalError.shouldNotReachHere("Cannot store 64-bit constants to memory"); + } + } else { + throw GraalInternalError.shouldNotReachHere("Attempt to store compressed constant of wrong type."); + } + } + } + + public static class LoadCompressedPointer extends LoadOp { + + private long base; + private int shift; + private int alignment; + @Alive({REG}) protected AllocatableValue heapBaseRegister; + + public LoadCompressedPointer(Kind kind, AllocatableValue result, AllocatableValue heapBaseRegister, AMD64AddressValue address, LIRFrameState state, long base, int shift, int alignment) { + super(kind, result, address, state); + this.base = base; + this.shift = shift; + this.alignment = alignment; + this.heapBaseRegister = heapBaseRegister; + assert kind == Kind.Object || kind == Kind.Long; + } + + @Override + public void emitMemAccess(AMD64MacroAssembler masm) { + Register resRegister = asRegister(result); + masm.movl(resRegister, address.toAddress()); + if (kind == Kind.Object) { + decodePointer(masm, resRegister, asRegister(heapBaseRegister), base, shift, alignment); + } else { + decodeKlassPointer(masm, resRegister, asRegister(heapBaseRegister), base, shift, alignment); + } + } + } + + public static class StoreCompressedPointer extends AMD64LIRInstruction { + + protected final Kind kind; + private long base; + private int shift; + private int alignment; + @Temp({REG}) private AllocatableValue scratch; + @Alive({REG}) protected AllocatableValue input; + @Alive({COMPOSITE}) protected AMD64AddressValue address; + @State protected LIRFrameState state; + + public StoreCompressedPointer(Kind kind, AMD64AddressValue address, AllocatableValue input, AllocatableValue scratch, LIRFrameState state, long base, int shift, int alignment) { + this.base = base; + this.shift = shift; + this.alignment = alignment; + this.scratch = scratch; + this.kind = kind; + this.address = address; + this.state = state; + this.input = input; + assert kind == Kind.Object || kind == Kind.Long; + } + + @Override + public void emitCode(TargetMethodAssembler tasm, AMD64MacroAssembler masm) { + Register heapBase = ((HotSpotRuntime) tasm.runtime).heapBaseRegister(); + masm.movq(asRegister(scratch), asRegister(input)); + if (kind == Kind.Object) { + encodePointer(masm, asRegister(scratch), heapBase, base, shift, alignment); + } else { + encodeKlassPointer(masm, asRegister(scratch), heapBase, base, shift, alignment); + } + if (state != null) { + tasm.recordImplicitException(masm.codeBuffer.position(), state); + } + masm.movl(address.toAddress(), asRegister(scratch)); + } + } + + @Opcode("CAS") + public static class CompareAndSwapCompressedOp extends AMD64LIRInstruction { + + @Def protected AllocatableValue result; + @Alive({COMPOSITE}) protected AMD64AddressValue address; + @Alive protected AllocatableValue cmpValue; + @Alive protected AllocatableValue newValue; + @Temp({REG}) protected AllocatableValue scratch; + + private long base; + private int shift; + private int alignment; + + public CompareAndSwapCompressedOp(AllocatableValue result, AMD64AddressValue address, AllocatableValue cmpValue, AllocatableValue newValue, AllocatableValue scratch, long base, int shift, + int alignment) { + this.base = base; + this.shift = shift; + this.alignment = alignment; + this.scratch = scratch; + this.result = result; + this.address = address; + this.cmpValue = cmpValue; + this.newValue = newValue; + assert cmpValue.getKind() == Kind.Object; + } + + @Override + public void emitCode(TargetMethodAssembler tasm, AMD64MacroAssembler masm) { + compareAndSwapCompressed(tasm, masm, result, address, cmpValue, newValue, scratch, base, shift, alignment); + } + } + + protected static void compareAndSwapCompressed(TargetMethodAssembler tasm, AMD64MacroAssembler masm, AllocatableValue result, AMD64AddressValue address, AllocatableValue cmpValue, + AllocatableValue newValue, AllocatableValue scratch, long base, int shift, int alignment) { + assert AMD64.rax.equals(asRegister(cmpValue)) && AMD64.rax.equals(asRegister(result)); + final Register scratchRegister = asRegister(scratch); + final Register cmpRegister = asRegister(cmpValue); + final Register newRegister = asRegister(newValue); + Register heapBase = ((HotSpotRuntime) tasm.runtime).heapBaseRegister(); + encodePointer(masm, cmpRegister, heapBase, base, shift, alignment); + masm.movq(scratchRegister, newRegister); + encodePointer(masm, scratchRegister, heapBase, base, shift, alignment); + if (tasm.target.isMP) { + masm.lock(); + } + masm.cmpxchgl(scratchRegister, address.toAddress()); + } + + private static void encodePointer(AMD64MacroAssembler masm, Register scratchRegister, Register heapBaseRegister, long base, int shift, int alignment) { + // If the base is zero, the uncompressed address has to be shifted right + // in order to be compressed. + if (base == 0) { + if (shift != 0) { + assert alignment == shift : "Encode algorithm is wrong"; + masm.shrq(scratchRegister, alignment); + } + } else { + // Otherwise the heap base, which resides always in register 12, is subtracted + // followed by right shift. + masm.testq(scratchRegister, scratchRegister); + // If the stored reference is null, move the heap to scratch + // register and then calculate the compressed oop value. + masm.cmovq(ConditionFlag.Equal, scratchRegister, heapBaseRegister); + masm.subq(scratchRegister, heapBaseRegister); + masm.shrq(scratchRegister, alignment); + } + } + + private static void decodePointer(AMD64MacroAssembler masm, Register resRegister, Register heapBaseRegister, long base, int shift, int alignment) { + // If the base is zero, the compressed address has to be shifted left + // in order to be uncompressed. + if (base == 0) { + if (shift != 0) { + assert alignment == shift : "Decode algorithm is wrong"; + masm.shlq(resRegister, alignment); + } + } else { + Label done = new Label(); + masm.shlq(resRegister, alignment); + masm.jccb(ConditionFlag.Equal, done); + // Otherwise the heap base is added to the shifted address. + masm.addq(resRegister, heapBaseRegister); + masm.bind(done); + } + } + + private static void encodeKlassPointer(AMD64MacroAssembler masm, Register scratchRegister, Register heapBaseRegister, long base, int shift, int alignment) { + if (base != 0) { + masm.subq(scratchRegister, heapBaseRegister); + } + if (shift != 0) { + assert alignment == shift : "Encode algorithm is wrong"; + masm.shrq(scratchRegister, alignment); + } + } + + private static void decodeKlassPointer(AMD64MacroAssembler masm, Register resRegister, Register heapBaseRegister, long base, int shift, int alignment) { + if (shift != 0) { + assert alignment == shift : "Decode algorithm is wrong"; + masm.shlq(resRegister, alignment); + if (base != 0) { + masm.addq(resRegister, heapBaseRegister); + } + } else { + assert base == 0 : "Sanity"; + } + } + + public static void decodeKlassPointer(AMD64MacroAssembler masm, Register register, Register heapBaseRegister, AMD64Address address, long narrowKlassBase, int narrowKlassShift, + int logKlassAlignment) { + masm.movl(register, address); + decodeKlassPointer(masm, register, heapBaseRegister, narrowKlassBase, narrowKlassShift, logKlassAlignment); + } +} diff -r cb364a90ef62 -r ff41fd80d562 graal/com.oracle.graal.hotspot/src/com/oracle/graal/hotspot/meta/HotSpotRuntime.java --- a/graal/com.oracle.graal.hotspot/src/com/oracle/graal/hotspot/meta/HotSpotRuntime.java Fri Aug 30 17:00:26 2013 +0200 +++ b/graal/com.oracle.graal.hotspot/src/com/oracle/graal/hotspot/meta/HotSpotRuntime.java Fri Aug 30 17:06:25 2013 +0200 @@ -361,6 +361,12 @@ public abstract Register threadRegister(); /** + * Returns the register used by the runtime for maintaining the heap base address for compressed + * pointers. + */ + public abstract Register heapBaseRegister(); + + /** * Gets the stack pointer register. */ public abstract Register stackPointerRegister(); @@ -494,6 +500,14 @@ return Array.getLength(array.asObject()); } + public boolean useCompressedOops() { + return config.useCompressedOops; + } + + public boolean useCompressedKlassPointers() { + return config.useCompressedKlassPointers; + } + @Override public void lower(Node n, LoweringTool tool) { StructuredGraph graph = (StructuredGraph) n.graph(); @@ -879,13 +893,13 @@ private FloatingReadNode createReadHub(StructuredGraph graph, Kind wordKind, ValueNode object, GuardingNode guard) { LocationNode location = ConstantLocationNode.create(FINAL_LOCATION, wordKind, config.hubOffset, graph); assert !object.isConstant() || object.asConstant().isNull(); - return graph.add(new FloatingReadNode(object, location, null, StampFactory.forKind(wordKind()), guard, BarrierType.NONE, config.useCompressedKlassPointers)); + return graph.add(new FloatingReadNode(object, location, null, StampFactory.forKind(wordKind()), guard, BarrierType.NONE, useCompressedKlassPointers())); } private WriteNode createWriteHub(StructuredGraph graph, Kind wordKind, ValueNode object, ValueNode value) { LocationNode location = ConstantLocationNode.create(ANY_LOCATION, wordKind, config.hubOffset, graph); assert !object.isConstant() || object.asConstant().isNull(); - return graph.add(new WriteNode(object, value, location, BarrierType.NONE, config.useCompressedKlassPointers)); + return graph.add(new WriteNode(object, value, location, BarrierType.NONE, useCompressedKlassPointers())); } private static BarrierType getFieldLoadBarrierType(HotSpotResolvedJavaField loadField) { @@ -943,7 +957,7 @@ } public int getScalingFactor(Kind kind) { - if (config.useCompressedOops && kind == Kind.Object) { + if (useCompressedOops() && kind == Kind.Object) { return this.graalRuntime.getTarget().arch.getSizeInBytes(Kind.Int); } else { return this.graalRuntime.getTarget().arch.getSizeInBytes(kind); @@ -1150,7 +1164,7 @@ case Int: return Constant.forInt(base == null ? unsafe.getInt(displacement) : unsafe.getInt(base, displacement)); case Long: - if (displacement == config().hubOffset && this.getGraalRuntime().getRuntime().config.useCompressedKlassPointers) { + if (displacement == config().hubOffset && useCompressedKlassPointers()) { if (base == null) { throw new GraalInternalError("Base of object must not be null"); } else { diff -r cb364a90ef62 -r ff41fd80d562 graal/com.oracle.graal.lir.amd64/src/com/oracle/graal/lir/amd64/AMD64Move.java --- a/graal/com.oracle.graal.lir.amd64/src/com/oracle/graal/lir/amd64/AMD64Move.java Fri Aug 30 17:00:26 2013 +0200 +++ b/graal/com.oracle.graal.lir.amd64/src/com/oracle/graal/lir/amd64/AMD64Move.java Fri Aug 30 17:06:25 2013 +0200 @@ -32,7 +32,6 @@ import com.oracle.graal.api.meta.*; import com.oracle.graal.asm.*; import com.oracle.graal.asm.amd64.*; -import com.oracle.graal.asm.amd64.AMD64Assembler.ConditionFlag; import com.oracle.graal.graph.*; import com.oracle.graal.lir.*; import com.oracle.graal.lir.StandardOp.MoveOp; @@ -117,34 +116,6 @@ } } - public static class LoadCompressedPointer extends LoadOp { - - private long base; - private int shift; - private int alignment; - @Alive({REG}) protected AllocatableValue heapBaseRegister; - - public LoadCompressedPointer(Kind kind, AllocatableValue result, AllocatableValue heapBaseRegister, AMD64AddressValue address, LIRFrameState state, long base, int shift, int alignment) { - super(kind, result, address, state); - this.base = base; - this.shift = shift; - this.alignment = alignment; - this.heapBaseRegister = heapBaseRegister; - assert kind == Kind.Object || kind == Kind.Long; - } - - @Override - public void emitMemAccess(AMD64MacroAssembler masm) { - Register resRegister = asRegister(result); - masm.movl(resRegister, address.toAddress()); - if (kind == Kind.Object) { - decodePointer(masm, resRegister, asRegister(heapBaseRegister), base, shift, alignment); - } else { - decodeKlassPointer(masm, resRegister, asRegister(heapBaseRegister), base, shift, alignment); - } - } - } - public static class LoadOp extends MemOp { @Def({REG}) protected AllocatableValue result; @@ -188,44 +159,6 @@ } } - public static class StoreCompressedPointer extends AMD64LIRInstruction { - - protected final Kind kind; - private long base; - private int shift; - private int alignment; - @Temp({REG}) private AllocatableValue scratch; - @Alive({REG}) protected AllocatableValue input; - @Alive({COMPOSITE}) protected AMD64AddressValue address; - @State protected LIRFrameState state; - - public StoreCompressedPointer(Kind kind, AMD64AddressValue address, AllocatableValue input, AllocatableValue scratch, LIRFrameState state, long base, int shift, int alignment) { - this.base = base; - this.shift = shift; - this.alignment = alignment; - this.scratch = scratch; - this.kind = kind; - this.address = address; - this.state = state; - this.input = input; - assert kind == Kind.Object || kind == Kind.Long; - } - - @Override - public void emitCode(TargetMethodAssembler tasm, AMD64MacroAssembler masm) { - masm.movq(asRegister(scratch), asRegister(input)); - if (kind == Kind.Object) { - encodePointer(masm, asRegister(scratch), tasm.runtime.heapBaseRegister(), base, shift, alignment); - } else { - encodeKlassPointer(masm, asRegister(scratch), tasm.runtime.heapBaseRegister(), base, shift, alignment); - } - if (state != null) { - tasm.recordImplicitException(masm.codeBuffer.position(), state); - } - masm.movl(address.toAddress(), asRegister(scratch)); - } - } - public static class StoreOp extends MemOp { @Use({REG}) protected AllocatableValue input; @@ -271,12 +204,10 @@ public static class StoreConstantOp extends MemOp { protected final Constant input; - private final boolean compressible; - public StoreConstantOp(Kind kind, AMD64AddressValue address, Constant input, LIRFrameState state, boolean compressible) { + public StoreConstantOp(Kind kind, AMD64AddressValue address, Constant input, LIRFrameState state) { super(kind, address, state); this.input = input; - this.compressible = compressible; } @Override @@ -295,11 +226,7 @@ break; case Long: if (NumUtil.isInt(input.asLong())) { - if (compressible) { - masm.movl(address.toAddress(), (int) input.asLong()); - } else { - masm.movslq(address.toAddress(), (int) input.asLong()); - } + masm.movslq(address.toAddress(), (int) input.asLong()); } else { throw GraalInternalError.shouldNotReachHere("Cannot store 64-bit constants to memory"); } @@ -311,11 +238,7 @@ throw GraalInternalError.shouldNotReachHere("Cannot store 64-bit constants to memory"); case Object: if (input.isNull()) { - if (compressible) { - masm.movl(address.toAddress(), 0); - } else { - masm.movptr(address.toAddress(), 0); - } + masm.movptr(address.toAddress(), 0); } else { throw GraalInternalError.shouldNotReachHere("Cannot store 64-bit constants to memory"); } @@ -410,38 +333,6 @@ } } - @Opcode("CAS") - public static class CompareAndSwapCompressedOp extends AMD64LIRInstruction { - - @Def protected AllocatableValue result; - @Alive({COMPOSITE}) protected AMD64AddressValue address; - @Alive protected AllocatableValue cmpValue; - @Alive protected AllocatableValue newValue; - @Temp({REG}) protected AllocatableValue scratch; - - private long base; - private int shift; - private int alignment; - - public CompareAndSwapCompressedOp(AllocatableValue result, AMD64AddressValue address, AllocatableValue cmpValue, AllocatableValue newValue, AllocatableValue scratch, long base, int shift, - int alignment) { - this.base = base; - this.shift = shift; - this.alignment = alignment; - this.scratch = scratch; - this.result = result; - this.address = address; - this.cmpValue = cmpValue; - this.newValue = newValue; - assert cmpValue.getKind() == Kind.Object; - } - - @Override - public void emitCode(TargetMethodAssembler tasm, AMD64MacroAssembler masm) { - compareAndSwapCompressed(tasm, masm, result, address, cmpValue, newValue, scratch, base, shift, alignment); - } - } - public static void move(TargetMethodAssembler tasm, AMD64MacroAssembler masm, Value result, Value input) { if (isRegister(input)) { if (isRegister(result)) { @@ -650,85 +541,4 @@ throw GraalInternalError.shouldNotReachHere(); } } - - protected static void compareAndSwapCompressed(TargetMethodAssembler tasm, AMD64MacroAssembler masm, AllocatableValue result, AMD64AddressValue address, AllocatableValue cmpValue, - AllocatableValue newValue, AllocatableValue scratch, long base, int shift, int alignment) { - assert AMD64.rax.equals(asRegister(cmpValue)) && AMD64.rax.equals(asRegister(result)); - final Register scratchRegister = asRegister(scratch); - final Register cmpRegister = asRegister(cmpValue); - final Register newRegister = asRegister(newValue); - encodePointer(masm, cmpRegister, tasm.runtime.heapBaseRegister(), base, shift, alignment); - masm.movq(scratchRegister, newRegister); - encodePointer(masm, scratchRegister, tasm.runtime.heapBaseRegister(), base, shift, alignment); - if (tasm.target.isMP) { - masm.lock(); - } - masm.cmpxchgl(scratchRegister, address.toAddress()); - } - - private static void encodePointer(AMD64MacroAssembler masm, Register scratchRegister, Register heapBaseRegister, long base, int shift, int alignment) { - // If the base is zero, the uncompressed address has to be shifted right - // in order to be compressed. - if (base == 0) { - if (shift != 0) { - assert alignment == shift : "Encode algorithm is wrong"; - masm.shrq(scratchRegister, alignment); - } - } else { - // Otherwise the heap base, which resides always in register 12, is subtracted - // followed by right shift. - masm.testq(scratchRegister, scratchRegister); - // If the stored reference is null, move the heap to scratch - // register and then calculate the compressed oop value. - masm.cmovq(ConditionFlag.Equal, scratchRegister, heapBaseRegister); - masm.subq(scratchRegister, heapBaseRegister); - masm.shrq(scratchRegister, alignment); - } - } - - private static void decodePointer(AMD64MacroAssembler masm, Register resRegister, Register heapBaseRegister, long base, int shift, int alignment) { - // If the base is zero, the compressed address has to be shifted left - // in order to be uncompressed. - if (base == 0) { - if (shift != 0) { - assert alignment == shift : "Decode algorithm is wrong"; - masm.shlq(resRegister, alignment); - } - } else { - Label done = new Label(); - masm.shlq(resRegister, alignment); - masm.jccb(ConditionFlag.Equal, done); - // Otherwise the heap base is added to the shifted address. - masm.addq(resRegister, heapBaseRegister); - masm.bind(done); - } - } - - private static void encodeKlassPointer(AMD64MacroAssembler masm, Register scratchRegister, Register heapBaseRegister, long base, int shift, int alignment) { - if (base != 0) { - masm.subq(scratchRegister, heapBaseRegister); - } - if (shift != 0) { - assert alignment == shift : "Encode algorithm is wrong"; - masm.shrq(scratchRegister, alignment); - } - } - - private static void decodeKlassPointer(AMD64MacroAssembler masm, Register resRegister, Register heapBaseRegister, long base, int shift, int alignment) { - if (shift != 0) { - assert alignment == shift : "Decode algorithm is wrong"; - masm.shlq(resRegister, alignment); - if (base != 0) { - masm.addq(resRegister, heapBaseRegister); - } - } else { - assert base == 0 : "Sanity"; - } - } - - public static void decodeKlassPointer(AMD64MacroAssembler masm, Register register, Register heapBaseRegister, AMD64Address address, long narrowKlassBase, int narrowKlassShift, - int logKlassAlignment) { - masm.movl(register, address); - decodeKlassPointer(masm, register, heapBaseRegister, narrowKlassBase, narrowKlassShift, logKlassAlignment); - } } diff -r cb364a90ef62 -r ff41fd80d562 graal/com.oracle.graal.nodes/src/com/oracle/graal/nodes/ConstantNode.java --- a/graal/com.oracle.graal.nodes/src/com/oracle/graal/nodes/ConstantNode.java Fri Aug 30 17:00:26 2013 +0200 +++ b/graal/com.oracle.graal.nodes/src/com/oracle/graal/nodes/ConstantNode.java Fri Aug 30 17:06:25 2013 +0200 @@ -77,6 +77,14 @@ } /** + * Returns a node for a primitive constant. + */ + public static ConstantNode forPrimitive(Constant constant, Graph graph) { + assert constant.getKind() != Kind.Object; + return forConstant(constant, null, graph); + } + + /** * Returns a node for a double constant. * * @param d the double value for which to create the instruction diff -r cb364a90ef62 -r ff41fd80d562 graal/com.oracle.graal.nodes/src/com/oracle/graal/nodes/PhiNode.java --- a/graal/com.oracle.graal.nodes/src/com/oracle/graal/nodes/PhiNode.java Fri Aug 30 17:00:26 2013 +0200 +++ b/graal/com.oracle.graal.nodes/src/com/oracle/graal/nodes/PhiNode.java Fri Aug 30 17:06:25 2013 +0200 @@ -34,7 +34,7 @@ * variable. */ @NodeInfo(nameTemplate = "{p#type/s}Phi({i#values})") -public final class PhiNode extends FloatingNode implements Canonicalizable, Node.IterableNodeType, GuardingNode { +public class PhiNode extends FloatingNode implements Canonicalizable, Node.IterableNodeType, GuardingNode { public static enum PhiType { Value(null), // normal value phis diff -r cb364a90ef62 -r ff41fd80d562 graal/com.oracle.graal.nodes/src/com/oracle/graal/nodes/calc/BinaryNode.java --- a/graal/com.oracle.graal.nodes/src/com/oracle/graal/nodes/calc/BinaryNode.java Fri Aug 30 17:00:26 2013 +0200 +++ b/graal/com.oracle.graal.nodes/src/com/oracle/graal/nodes/calc/BinaryNode.java Fri Aug 30 17:06:25 2013 +0200 @@ -83,6 +83,57 @@ } } + public static BinaryNode add(ValueNode x, ValueNode y) { + assert x.kind() == y.kind(); + switch (x.kind()) { + case Byte: + case Char: + case Short: + case Int: + case Long: + return IntegerArithmeticNode.add(x, y); + case Float: + case Double: + return x.graph().unique(new FloatAddNode(x.kind(), x, y, false)); + default: + throw GraalInternalError.shouldNotReachHere(); + } + } + + public static BinaryNode sub(ValueNode x, ValueNode y) { + assert x.kind() == y.kind(); + switch (x.kind()) { + case Byte: + case Char: + case Short: + case Int: + case Long: + return IntegerArithmeticNode.sub(x, y); + case Float: + case Double: + return x.graph().unique(new FloatSubNode(x.kind(), x, y, false)); + default: + throw GraalInternalError.shouldNotReachHere(); + } + } + + public static BinaryNode mul(ValueNode x, ValueNode y) { + assert x.kind() == y.kind(); + switch (x.kind()) { + case Byte: + case Char: + case Short: + case Int: + case Long: + return IntegerArithmeticNode.mul(x, y); + case Float: + case Double: + return x.graph().unique(new FloatMulNode(x.kind(), x, y, false)); + default: + throw GraalInternalError.shouldNotReachHere(); + } + } + public static boolean canTryReassociate(BinaryNode node) { return node instanceof IntegerAddNode || node instanceof IntegerSubNode || node instanceof IntegerMulNode || node instanceof AndNode || node instanceof OrNode || node instanceof XorNode; }