Mercurial > hg > truffle
changeset 9441:ee3279c0f9a0
Merge
line wrap: on
line diff
--- a/graal/com.oracle.graal.amd64/src/com/oracle/graal/amd64/AMD64.java Mon Apr 29 00:25:30 2013 +0200 +++ b/graal/com.oracle.graal.amd64/src/com/oracle/graal/amd64/AMD64.java Tue Apr 30 19:50:12 2013 +0200 @@ -23,12 +23,12 @@ package com.oracle.graal.amd64; import static com.oracle.graal.api.code.MemoryBarriers.*; -import static com.oracle.graal.api.code.Register.RegisterFlag.*; +import static com.oracle.graal.api.code.Register.*; import java.nio.*; import com.oracle.graal.api.code.*; -import com.oracle.graal.api.code.Register.RegisterFlag; +import com.oracle.graal.api.code.Register.RegisterCategory; import com.oracle.graal.api.meta.*; /** @@ -36,26 +36,29 @@ */ public class AMD64 extends Architecture { + public static final RegisterCategory CPU = new RegisterCategory("CPU"); + public static final RegisterCategory XMM = new RegisterCategory("XMM"); + // @formatter:off // General purpose CPU registers - public static final Register rax = new Register(0, 0, 8, "rax", CPU, RegisterFlag.Byte); - public static final Register rcx = new Register(1, 1, 8, "rcx", CPU, RegisterFlag.Byte); - public static final Register rdx = new Register(2, 2, 8, "rdx", CPU, RegisterFlag.Byte); - public static final Register rbx = new Register(3, 3, 8, "rbx", CPU, RegisterFlag.Byte); - public static final Register rsp = new Register(4, 4, 8, "rsp", CPU, RegisterFlag.Byte); - public static final Register rbp = new Register(5, 5, 8, "rbp", CPU, RegisterFlag.Byte); - public static final Register rsi = new Register(6, 6, 8, "rsi", CPU, RegisterFlag.Byte); - public static final Register rdi = new Register(7, 7, 8, "rdi", CPU, RegisterFlag.Byte); + public static final Register rax = new Register(0, 0, "rax", CPU); + public static final Register rcx = new Register(1, 1, "rcx", CPU); + public static final Register rdx = new Register(2, 2, "rdx", CPU); + public static final Register rbx = new Register(3, 3, "rbx", CPU); + public static final Register rsp = new Register(4, 4, "rsp", CPU); + public static final Register rbp = new Register(5, 5, "rbp", CPU); + public static final Register rsi = new Register(6, 6, "rsi", CPU); + public static final Register rdi = new Register(7, 7, "rdi", CPU); - public static final Register r8 = new Register(8, 8, 8, "r8", CPU, RegisterFlag.Byte); - public static final Register r9 = new Register(9, 9, 8, "r9", CPU, RegisterFlag.Byte); - public static final Register r10 = new Register(10, 10, 8, "r10", CPU, RegisterFlag.Byte); - public static final Register r11 = new Register(11, 11, 8, "r11", CPU, RegisterFlag.Byte); - public static final Register r12 = new Register(12, 12, 8, "r12", CPU, RegisterFlag.Byte); - public static final Register r13 = new Register(13, 13, 8, "r13", CPU, RegisterFlag.Byte); - public static final Register r14 = new Register(14, 14, 8, "r14", CPU, RegisterFlag.Byte); - public static final Register r15 = new Register(15, 15, 8, "r15", CPU, RegisterFlag.Byte); + public static final Register r8 = new Register(8, 8, "r8", CPU); + public static final Register r9 = new Register(9, 9, "r9", CPU); + public static final Register r10 = new Register(10, 10, "r10", CPU); + public static final Register r11 = new Register(11, 11, "r11", CPU); + public static final Register r12 = new Register(12, 12, "r12", CPU); + public static final Register r13 = new Register(13, 13, "r13", CPU); + public static final Register r14 = new Register(14, 14, "r14", CPU); + public static final Register r15 = new Register(15, 15, "r15", CPU); public static final Register[] cpuRegisters = { rax, rcx, rdx, rbx, rsp, rbp, rsi, rdi, @@ -63,23 +66,23 @@ }; // XMM registers - public static final Register xmm0 = new Register(16, 0, 8, "xmm0", FPU); - public static final Register xmm1 = new Register(17, 1, 8, "xmm1", FPU); - public static final Register xmm2 = new Register(18, 2, 8, "xmm2", FPU); - public static final Register xmm3 = new Register(19, 3, 8, "xmm3", FPU); - public static final Register xmm4 = new Register(20, 4, 8, "xmm4", FPU); - public static final Register xmm5 = new Register(21, 5, 8, "xmm5", FPU); - public static final Register xmm6 = new Register(22, 6, 8, "xmm6", FPU); - public static final Register xmm7 = new Register(23, 7, 8, "xmm7", FPU); + public static final Register xmm0 = new Register(16, 0, "xmm0", XMM); + public static final Register xmm1 = new Register(17, 1, "xmm1", XMM); + public static final Register xmm2 = new Register(18, 2, "xmm2", XMM); + public static final Register xmm3 = new Register(19, 3, "xmm3", XMM); + public static final Register xmm4 = new Register(20, 4, "xmm4", XMM); + public static final Register xmm5 = new Register(21, 5, "xmm5", XMM); + public static final Register xmm6 = new Register(22, 6, "xmm6", XMM); + public static final Register xmm7 = new Register(23, 7, "xmm7", XMM); - public static final Register xmm8 = new Register(24, 8, 8, "xmm8", FPU); - public static final Register xmm9 = new Register(25, 9, 8, "xmm9", FPU); - public static final Register xmm10 = new Register(26, 10, 8, "xmm10", FPU); - public static final Register xmm11 = new Register(27, 11, 8, "xmm11", FPU); - public static final Register xmm12 = new Register(28, 12, 8, "xmm12", FPU); - public static final Register xmm13 = new Register(29, 13, 8, "xmm13", FPU); - public static final Register xmm14 = new Register(30, 14, 8, "xmm14", FPU); - public static final Register xmm15 = new Register(31, 15, 8, "xmm15", FPU); + public static final Register xmm8 = new Register(24, 8, "xmm8", XMM); + public static final Register xmm9 = new Register(25, 9, "xmm9", XMM); + public static final Register xmm10 = new Register(26, 10, "xmm10", XMM); + public static final Register xmm11 = new Register(27, 11, "xmm11", XMM); + public static final Register xmm12 = new Register(28, 12, "xmm12", XMM); + public static final Register xmm13 = new Register(29, 13, "xmm13", XMM); + public static final Register xmm14 = new Register(30, 14, "xmm14", XMM); + public static final Register xmm15 = new Register(31, 15, "xmm15", XMM); public static final Register[] xmmRegisters = { xmm0, xmm1, xmm2, xmm3, xmm4, xmm5, xmm6, xmm7, @@ -96,7 +99,7 @@ /** * Register used to construct an instruction-relative address. */ - public static final Register rip = new Register(32, -1, 0, "rip"); + public static final Register rip = new Register(32, -1, "rip", SPECIAL); public static final Register[] allRegisters = { rax, rcx, rdx, rbx, rsp, rbp, rsi, rdi, @@ -124,54 +127,6 @@ } // @formatter:on - @Override - public int getMaxVectorLength(Kind kind) { - if (supportedAVXVersion > 0) { - switch (kind) { - case Boolean: - return 32; - case Byte: - return 32; - case Short: - return 16; - case Char: - return 16; - case Int: - return 8; - case Float: - return 8; - case Long: - return 4; - case Double: - return 4; - case Object: - return 4; - } - } else { - switch (kind) { - case Boolean: - return 16; - case Byte: - return 16; - case Short: - return 8; - case Char: - return 8; - case Int: - return 4; - case Float: - return 4; - case Long: - return 2; - case Double: - return 2; - case Object: - return 2; - } - } - return 1; - } - public int getSupportedSSEVersion() { return supportedSSEVersion; } @@ -179,4 +134,44 @@ public int getSupportedAVXVersion() { return supportedAVXVersion; } + + @Override + public boolean canStoreValue(RegisterCategory category, PlatformKind platformKind) { + if (!(platformKind instanceof Kind)) { + return false; + } + + Kind kind = (Kind) platformKind; + if (category == CPU) { + switch (kind) { + case Boolean: + case Byte: + case Char: + case Short: + case Int: + case Long: + case Object: + return true; + } + } else if (category == XMM) { + switch (kind) { + case Float: + case Double: + return true; + } + } + + return false; + } + + @Override + public PlatformKind getLargestStorableKind(RegisterCategory category) { + if (category == CPU) { + return Kind.Long; + } else if (category == XMM) { + return Kind.Double; + } else { + return Kind.Illegal; + } + } }
--- a/graal/com.oracle.graal.api.code/src/com/oracle/graal/api/code/Architecture.java Mon Apr 29 00:25:30 2013 +0200 +++ b/graal/com.oracle.graal.api.code/src/com/oracle/graal/api/code/Architecture.java Tue Apr 30 19:50:12 2013 +0200 @@ -24,6 +24,7 @@ import java.nio.*; +import com.oracle.graal.api.code.Register.RegisterCategory; import com.oracle.graal.api.meta.*; /** @@ -164,4 +165,40 @@ public int getMaxVectorLength(@SuppressWarnings("unused") Kind kind) { return 1; } + + /** + * Gets the size in bytes of the specified kind for this target. + * + * @param kind the kind for which to get the size + * + * @return the size in bytes of {@code kind} + */ + public int getSizeInBytes(PlatformKind kind) { + switch ((Kind) kind) { + case Boolean: + return 1; + case Byte: + return 1; + case Char: + return 2; + case Short: + return 2; + case Int: + return 4; + case Long: + return 8; + case Float: + return 4; + case Double: + return 8; + case Object: + return wordSize; + default: + return 0; + } + } + + public abstract boolean canStoreValue(RegisterCategory category, PlatformKind kind); + + public abstract PlatformKind getLargestStorableKind(RegisterCategory category); }
--- a/graal/com.oracle.graal.api.code/src/com/oracle/graal/api/code/CalleeSaveLayout.java Mon Apr 29 00:25:30 2013 +0200 +++ b/graal/com.oracle.graal.api.code/src/com/oracle/graal/api/code/CalleeSaveLayout.java Tue Apr 30 19:50:12 2013 +0200 @@ -24,6 +24,8 @@ import java.util.*; +import com.oracle.graal.api.meta.*; + /** * The callee save area (CSA) is a contiguous space in a stack frame used to save (and restore) the * values of the caller's registers. This class describes the layout of a CSA in terms of its @@ -69,7 +71,7 @@ * CSA * @param registers the registers that can be saved in the CSA */ - public CalleeSaveLayout(int frameOffsetToCSA, int size, int slotSize, Register... registers) { + public CalleeSaveLayout(Architecture architecture, int frameOffsetToCSA, int size, int slotSize, Register... registers) { this.frameOffsetToCSA = frameOffsetToCSA; assert slotSize == 0 || CodeUtil.isPowerOf2(slotSize); this.slotSize = slotSize; @@ -86,7 +88,8 @@ if (offset > maxOffset) { maxOffset = offset; } - offset += reg.spillSlotSize; + PlatformKind kind = architecture.getLargestStorableKind(reg.getRegisterCategory()); + offset += architecture.getSizeInBytes(kind); } if (size == -1) { this.size = offset; @@ -103,7 +106,8 @@ int index = offset / slotSize; regNumToIndex[reg.number] = index; indexToReg[index] = reg; - offset += reg.spillSlotSize; + PlatformKind kind = architecture.getLargestStorableKind(reg.getRegisterCategory()); + offset += architecture.getSizeInBytes(kind); } }
--- a/graal/com.oracle.graal.api.code/src/com/oracle/graal/api/code/Register.java Mon Apr 29 00:25:30 2013 +0200 +++ b/graal/com.oracle.graal.api.code/src/com/oracle/graal/api/code/Register.java Tue Apr 30 19:50:12 2013 +0200 @@ -34,18 +34,20 @@ private static final long serialVersionUID = -7213269157816016300L; + public static final RegisterCategory SPECIAL = new RegisterCategory("SPECIAL"); + /** * Invalid register. */ - public static final Register None = new Register(-1, -1, 0, "noreg"); + public static final Register None = new Register(-1, -1, "noreg", SPECIAL); /** * Frame pointer of the current method. All spill slots and outgoing stack-based arguments are * addressed relative to this register. */ - public static final Register Frame = new Register(-2, -2, 0, "framereg", RegisterFlag.CPU); + public static final Register Frame = new Register(-2, -2, "framereg", SPECIAL); - public static final Register CallerFrame = new Register(-3, -3, 0, "callerframereg", RegisterFlag.CPU); + public static final Register CallerFrame = new Register(-3, -3, "callerframereg", SPECIAL); /** * The identifier for this register that is unique across all the registers in a @@ -72,42 +74,32 @@ } /** - * The size of the stack slot used to spill the value of this register. + * A platform specific register category that describes which values can be stored in a + * register. */ - public final int spillSlotSize; - - /** - * The set of {@link RegisterFlag} values associated with this register. - */ - private final int flags; + private final RegisterCategory registerCategory; /** * An array of {@link RegisterValue} objects, for this register, with one entry per {@link Kind} * , indexed by {@link Kind#ordinal}. */ - private final RegisterValue[] values; + private final HashMap<PlatformKind, RegisterValue> values; /** - * Attributes that characterize a register in a useful way. - * + * A platform specific register type that describes which values can be stored in a register. */ - public enum RegisterFlag { - /** - * Denotes an integral (i.e. non floating point) register. - */ - CPU, + public static class RegisterCategory { + + private String name; - /** - * Denotes a register whose lowest order byte can be addressed separately. - */ - Byte, + public RegisterCategory(String name) { + this.name = name; + } - /** - * Denotes a floating point register. - */ - FPU; - - public final int mask = 1 << (ordinal() + 1); + @Override + public String toString() { + return name; + } } /** @@ -115,33 +107,19 @@ * * @param number unique identifier for the register * @param encoding the target machine encoding for the register - * @param spillSlotSize the size of the stack slot used to spill the value of the register * @param name the mnemonic name for the register - * @param flags the set of {@link RegisterFlag} values for the register + * @param registerCategory the register category */ - public Register(int number, int encoding, int spillSlotSize, String name, RegisterFlag... flags) { + public Register(int number, int encoding, String name, RegisterCategory registerCategory) { this.number = number; this.name = name; - this.spillSlotSize = spillSlotSize; - this.flags = createMask(flags); + this.registerCategory = registerCategory; this.encoding = encoding; - - values = new RegisterValue[Kind.values().length]; - for (Kind kind : Kind.values()) { - values[kind.ordinal()] = new RegisterValue(kind, this); - } + this.values = new HashMap<>(); } - private static int createMask(RegisterFlag... flags) { - int result = 0; - for (RegisterFlag f : flags) { - result |= f.mask; - } - return result; - } - - public boolean isSet(RegisterFlag f) { - return (flags & f.mask) != 0; + public RegisterCategory getRegisterCategory() { + return registerCategory; } /** @@ -150,8 +128,14 @@ * @param kind the specified kind * @return the {@link RegisterValue} */ - public RegisterValue asValue(Kind kind) { - return values[kind.ordinal()]; + public RegisterValue asValue(PlatformKind kind) { + if (values.containsKey(kind)) { + return values.get(kind); + } else { + RegisterValue ret = new RegisterValue(kind, this); + values.put(kind, ret); + return ret; + } } /** @@ -173,29 +157,6 @@ } /** - * Determines if this a floating point register. - */ - public boolean isFpu() { - return isSet(RegisterFlag.FPU); - } - - /** - * Determines if this a general purpose register. - */ - public boolean isCpu() { - return isSet(RegisterFlag.CPU); - } - - /** - * Determines if this register has the {@link RegisterFlag#Byte} attribute set. - * - * @return {@code true} iff this register has the {@link RegisterFlag#Byte} attribute set. - */ - public boolean isByte() { - return isSet(RegisterFlag.Byte); - } - - /** * Gets a hash code for this register. * * @return the value of {@link #number} @@ -206,27 +167,6 @@ } /** - * Categorizes a set of registers by {@link RegisterFlag}. - * - * @param registers a list of registers to be categorized - * @return a map from each {@link RegisterFlag} constant to the list of registers for which the - * flag is {@linkplain #isSet(RegisterFlag) set} - */ - public static EnumMap<RegisterFlag, Register[]> categorize(Register[] registers) { - EnumMap<RegisterFlag, Register[]> result = new EnumMap<>(RegisterFlag.class); - for (RegisterFlag flag : RegisterFlag.values()) { - ArrayList<Register> list = new ArrayList<>(); - for (Register r : registers) { - if (r.isSet(flag)) { - list.add(r); - } - } - result.put(flag, list.toArray(new Register[list.size()])); - } - return result; - } - - /** * Gets the maximum register {@linkplain #number number} in a given set of registers. * * @param registers the set of registers to process
--- a/graal/com.oracle.graal.api.code/src/com/oracle/graal/api/code/RegisterConfig.java Mon Apr 29 00:25:30 2013 +0200 +++ b/graal/com.oracle.graal.api.code/src/com/oracle/graal/api/code/RegisterConfig.java Tue Apr 30 19:50:12 2013 +0200 @@ -22,10 +22,7 @@ */ package com.oracle.graal.api.code; -import java.util.*; - -import com.oracle.graal.api.code.CallingConvention.*; -import com.oracle.graal.api.code.Register.*; +import com.oracle.graal.api.code.CallingConvention.Type; import com.oracle.graal.api.meta.*; /** @@ -60,12 +57,11 @@ * given calling convention. * * @param type the type of calling convention - * @param flag specifies whether registers for {@linkplain RegisterFlag#CPU integral} or - * {@linkplain RegisterFlag#FPU floating point} parameters are being requested + * @param kind specifies what kind of registers is being requested * @return the ordered set of registers that may be used to pass parameters in a call conforming * to {@code type} */ - Register[] getCallingConventionRegisters(Type type, RegisterFlag flag); + Register[] getCallingConventionRegisters(Type type, Kind kind); /** * Gets the set of registers that can be used by the register allocator. @@ -73,16 +69,10 @@ Register[] getAllocatableRegisters(); /** - * Gets the set of registers that can be used by the register allocator, - * {@linkplain Register#categorize(Register[]) categorized} by register - * {@linkplain RegisterFlag flags}. - * - * @return a map from each {@link RegisterFlag} constant to the list of - * {@linkplain #getAllocatableRegisters() allocatable} registers for which the flag is - * set - * + * Gets the set of registers that can be used by the register allocator for a value of a + * particular kind. */ - EnumMap<RegisterFlag, Register[]> getCategorizedAllocatableRegisters(); + Register[] getAllocatableRegisters(PlatformKind kind); /** * Gets the registers whose values must be preserved by a method across any call it makes. @@ -102,7 +92,6 @@ * * @return an array where an element at index i holds the attributes of the register whose * number is i - * @see Register#categorize(Register[]) */ RegisterAttributes[] getAttributesMap();
--- a/graal/com.oracle.graal.api.code/src/com/oracle/graal/api/code/RegisterValue.java Mon Apr 29 00:25:30 2013 +0200 +++ b/graal/com.oracle.graal.api.code/src/com/oracle/graal/api/code/RegisterValue.java Tue Apr 30 19:50:12 2013 +0200 @@ -27,8 +27,8 @@ /** * Denotes a register that stores a value of a fixed kind. There is exactly one (canonical) instance * of {@link RegisterValue} for each ({@link Register}, {@link Kind}) pair. Use - * {@link Register#asValue(Kind)} to retrieve the canonical {@link RegisterValue} instance for a - * given (register,kind) pair. + * {@link Register#asValue(PlatformKind)} to retrieve the canonical {@link RegisterValue} instance + * for a given (register,kind) pair. */ public final class RegisterValue extends AllocatableValue { @@ -39,14 +39,14 @@ /** * Should only be called from {@link Register#Register} to ensure canonicalization. */ - protected RegisterValue(Kind kind, Register register) { + protected RegisterValue(PlatformKind kind, Register register) { super(kind); this.reg = register; } @Override public int hashCode() { - return (getRegister().number << 4) ^ getKind().ordinal(); + return (getRegister().number << 4) ^ getPlatformKind().hashCode(); } @Override
--- a/graal/com.oracle.graal.api.code/src/com/oracle/graal/api/code/StackSlot.java Mon Apr 29 00:25:30 2013 +0200 +++ b/graal/com.oracle.graal.api.code/src/com/oracle/graal/api/code/StackSlot.java Tue Apr 30 19:50:12 2013 +0200 @@ -22,7 +22,7 @@ */ package com.oracle.graal.api.code; -import static com.oracle.graal.api.meta.Kind.*; +import java.util.*; import com.oracle.graal.api.meta.*; @@ -46,25 +46,20 @@ * @param addFrameSize Specifies if the offset is relative to the stack pointer, or the * beginning of the frame (stack pointer + total frame size). */ - public static StackSlot get(Kind kind, int offset, boolean addFrameSize) { - assert kind.getStackKind() == kind; + public static StackSlot get(PlatformKind kind, int offset, boolean addFrameSize) { assert addFrameSize || offset >= 0; if (offset % CACHE_GRANULARITY == 0) { - StackSlot[][] cache; - int index = offset / CACHE_GRANULARITY; + StackSlot slot; if (!addFrameSize) { - cache = OUT_CACHE; + slot = OUT_CACHE.lookup(kind, offset); } else if (offset >= 0) { - cache = IN_CACHE; + slot = IN_CACHE.lookup(kind, offset); } else { - cache = SPILL_CACHE; - index = -index; + slot = SPILL_CACHE.lookup(kind, offset); } - StackSlot[] slots = cache[kind.ordinal()]; - if (index < slots.length) { - StackSlot slot = slots[index]; - assert slot.getKind() == kind && slot.offset == offset && slot.addFrameSize == addFrameSize; + if (slot != null) { + assert slot.getPlatformKind().equals(kind) && slot.offset == offset && slot.addFrameSize == addFrameSize; return slot; } } @@ -72,10 +67,10 @@ } /** - * Private constructor to enforce use of {@link #get(Kind, int, boolean)} so that a cache can be - * used. + * Private constructor to enforce use of {@link #get(PlatformKind, int, boolean)} so that a + * cache can be used. */ - private StackSlot(Kind kind, int offset, boolean addFrameSize) { + private StackSlot(PlatformKind kind, int offset, boolean addFrameSize) { super(kind); this.offset = offset; this.addFrameSize = addFrameSize; @@ -107,7 +102,7 @@ @Override public int hashCode() { - return getKind().ordinal() ^ (offset << 4) ^ (addFrameSize ? 15 : 0); + return getPlatformKind().hashCode() ^ (offset << 4) ^ (addFrameSize ? 15 : 0); } @Override @@ -117,7 +112,7 @@ } if (o instanceof StackSlot) { StackSlot l = (StackSlot) o; - return l.getKind() == getKind() && l.offset == offset && l.addFrameSize == addFrameSize; + return l.getPlatformKind().equals(getPlatformKind()) && l.offset == offset && l.addFrameSize == addFrameSize; } return false; } @@ -139,7 +134,7 @@ public StackSlot asOutArg() { assert offset >= 0; if (addFrameSize) { - return get(getKind(), offset, false); + return get(getPlatformKind(), offset, false); } return this; } @@ -150,28 +145,48 @@ public StackSlot asInArg() { assert offset >= 0; if (!addFrameSize) { - return get(getKind(), offset, true); + return get(getPlatformKind(), offset, true); } return this; } - private static final int CACHE_GRANULARITY = 8; private static final int SPILL_CACHE_PER_KIND_SIZE = 100; private static final int PARAM_CACHE_PER_KIND_SIZE = 10; + private static final int CACHE_GRANULARITY = 8; - private static final StackSlot[][] SPILL_CACHE = makeCache(SPILL_CACHE_PER_KIND_SIZE, -1, true); - private static final StackSlot[][] IN_CACHE = makeCache(PARAM_CACHE_PER_KIND_SIZE, 1, true); - private static final StackSlot[][] OUT_CACHE = makeCache(PARAM_CACHE_PER_KIND_SIZE, 1, false); + private static class Cache extends HashMap<PlatformKind, StackSlot[]> { + + private static final long serialVersionUID = 4424132866289682843L; + + private final int cachePerKindSize; + private final int sign; + private final boolean addFrameSize; + + Cache(int cachePerKindSize, int sign, boolean addFrameSize) { + this.cachePerKindSize = cachePerKindSize; + this.sign = sign; + this.addFrameSize = addFrameSize; + } - private static StackSlot[][] makeCache(int cachePerKindSize, int sign, boolean addFrameSize) { - StackSlot[][] cache = new StackSlot[Kind.values().length][]; - for (Kind kind : new Kind[]{Illegal, Int, Long, Float, Double, Object}) { - StackSlot[] slots = new StackSlot[cachePerKindSize]; - for (int i = 0; i < cachePerKindSize; i++) { - slots[i] = new StackSlot(kind, sign * i * CACHE_GRANULARITY, addFrameSize); + StackSlot lookup(PlatformKind kind, int offset) { + int index = sign * offset / CACHE_GRANULARITY; + StackSlot[] slots = this.get(kind); + if (slots == null) { + slots = new StackSlot[cachePerKindSize]; + for (int i = 0; i < cachePerKindSize; i++) { + slots[i] = new StackSlot(kind, sign * i * CACHE_GRANULARITY, addFrameSize); + } + this.put(kind, slots); } - cache[kind.ordinal()] = slots; + if (index < slots.length) { + return slots[index]; + } else { + return null; + } } - return cache; } + + private static final Cache SPILL_CACHE = new Cache(SPILL_CACHE_PER_KIND_SIZE, -1, true); + private static final Cache IN_CACHE = new Cache(PARAM_CACHE_PER_KIND_SIZE, 1, true); + private static final Cache OUT_CACHE = new Cache(PARAM_CACHE_PER_KIND_SIZE, 1, false); }
--- a/graal/com.oracle.graal.api.code/src/com/oracle/graal/api/code/TargetDescription.java Mon Apr 29 00:25:30 2013 +0200 +++ b/graal/com.oracle.graal.api.code/src/com/oracle/graal/api/code/TargetDescription.java Tue Apr 30 19:50:12 2013 +0200 @@ -83,39 +83,6 @@ } /** - * Gets the size in bytes of the specified kind for this target. - * - * @param kind the kind for which to get the size - * @return the size in bytes of {@code kind} - */ - public int sizeInBytes(Kind kind) { - // Checkstyle: stop - switch (kind) { - case Boolean: - return 1; - case Byte: - return 1; - case Char: - return 2; - case Short: - return 2; - case Int: - return 4; - case Long: - return 8; - case Float: - return 4; - case Double: - return 8; - case Object: - return wordSize; - default: - return 0; - } - // Checkstyle: resume - } - - /** * Aligns the given frame size (without return instruction pointer) to the stack alignment size * and return the aligned size (without return instruction pointer). *
--- a/graal/com.oracle.graal.api.code/src/com/oracle/graal/api/code/VirtualObject.java Mon Apr 29 00:25:30 2013 +0200 +++ b/graal/com.oracle.graal.api.code/src/com/oracle/graal/api/code/VirtualObject.java Tue Apr 30 19:50:12 2013 +0200 @@ -168,7 +168,7 @@ @Override public int hashCode() { - return getKind().ordinal() + type.hashCode(); + return getPlatformKind().hashCode() + type.hashCode(); } @Override
--- a/graal/com.oracle.graal.api.meta/src/com/oracle/graal/api/meta/AllocatableValue.java Mon Apr 29 00:25:30 2013 +0200 +++ b/graal/com.oracle.graal.api.meta/src/com/oracle/graal/api/meta/AllocatableValue.java Tue Apr 30 19:50:12 2013 +0200 @@ -32,8 +32,7 @@ public static final AllocatableValue[] NONE = {}; - public AllocatableValue(Kind kind) { - super(kind); + public AllocatableValue(PlatformKind platformKind) { + super(platformKind); } - }
--- a/graal/com.oracle.graal.api.meta/src/com/oracle/graal/api/meta/Kind.java Mon Apr 29 00:25:30 2013 +0200 +++ b/graal/com.oracle.graal.api.meta/src/com/oracle/graal/api/meta/Kind.java Tue Apr 30 19:50:12 2013 +0200 @@ -29,7 +29,7 @@ * {@link Kind#Int} for {@code int} and {@link Kind#Object} for all object types. A kind has a * single character short name, a Java name, and a set of flags further describing its behavior. */ -public enum Kind { +public enum Kind implements PlatformKind { /** The primitive boolean kind, represented as an int on the stack. */ Boolean('z', "boolean", true, java.lang.Boolean.TYPE, java.lang.Boolean.class),
--- /dev/null Thu Jan 01 00:00:00 1970 +0000 +++ b/graal/com.oracle.graal.api.meta/src/com/oracle/graal/api/meta/PlatformKind.java Tue Apr 30 19:50:12 2013 +0200 @@ -0,0 +1,31 @@ +/* + * Copyright (c) 2013, Oracle and/or its affiliates. All rights reserved. + * DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER. + * + * This code is free software; you can redistribute it and/or modify it + * under the terms of the GNU General Public License version 2 only, as + * published by the Free Software Foundation. + * + * This code is distributed in the hope that it will be useful, but WITHOUT + * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or + * FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License + * version 2 for more details (a copy is included in the LICENSE file that + * accompanied this code). + * + * You should have received a copy of the GNU General Public License version + * 2 along with this work; if not, write to the Free Software Foundation, + * Inc., 51 Franklin St, Fifth Floor, Boston, MA 02110-1301 USA. + * + * Please contact Oracle, 500 Oracle Parkway, Redwood Shores, CA 94065 USA + * or visit www.oracle.com if you need additional information or have any + * questions. + */ +package com.oracle.graal.api.meta; + +/** + * Represents a platform-specific low-level type for values. + */ +public interface PlatformKind { + + String name(); +}
--- a/graal/com.oracle.graal.api.meta/src/com/oracle/graal/api/meta/Value.java Mon Apr 29 00:25:30 2013 +0200 +++ b/graal/com.oracle.graal.api.meta/src/com/oracle/graal/api/meta/Value.java Tue Apr 30 19:50:12 2013 +0200 @@ -41,14 +41,20 @@ }; private final Kind kind; + private final PlatformKind platformKind; /** * Initializes a new value of the specified kind. * - * @param kind the kind + * @param platformKind the kind */ - protected Value(Kind kind) { - this.kind = kind; + protected Value(PlatformKind platformKind) { + this.platformKind = platformKind; + if (platformKind instanceof Kind) { + this.kind = (Kind) platformKind; + } else { + this.kind = Kind.Illegal; + } } /** @@ -65,4 +71,11 @@ public final Kind getKind() { return kind; } + + /** + * Returns the platform specific kind used to store this value. + */ + public final PlatformKind getPlatformKind() { + return platformKind; + } }
--- a/graal/com.oracle.graal.asm.amd64/src/com/oracle/graal/asm/amd64/AMD64Assembler.java Mon Apr 29 00:25:30 2013 +0200 +++ b/graal/com.oracle.graal.asm.amd64/src/com/oracle/graal/asm/amd64/AMD64Assembler.java Tue Apr 30 19:50:12 2013 +0200 @@ -380,7 +380,7 @@ } public final void addsd(Register dst, Register src) { - assert dst.isFpu() && src.isFpu(); + assert dst.getRegisterCategory() == AMD64.XMM && src.getRegisterCategory() == AMD64.XMM; emitByte(0xF2); int encode = prefixAndEncode(dst.encoding, src.encoding); emitByte(0x0F); @@ -389,7 +389,7 @@ } public final void addsd(Register dst, AMD64Address src) { - assert dst.isFpu(); + assert dst.getRegisterCategory() == AMD64.XMM; emitByte(0xF2); prefix(src, dst); emitByte(0x0F); @@ -398,7 +398,7 @@ } public final void addss(Register dst, Register src) { - assert dst.isFpu() && src.isFpu(); + assert dst.getRegisterCategory() == AMD64.XMM && src.getRegisterCategory() == AMD64.XMM; emitByte(0xF3); int encode = prefixAndEncode(dst.encoding, src.encoding); emitByte(0x0F); @@ -407,7 +407,7 @@ } public final void addss(Register dst, AMD64Address src) { - assert dst.isFpu(); + assert dst.getRegisterCategory() == AMD64.XMM; emitByte(0xF3); prefix(src, dst); emitByte(0x0F); @@ -540,7 +540,7 @@ } public final void cvtsd2ss(Register dst, AMD64Address src) { - assert dst.isFpu(); + assert dst.getRegisterCategory() == AMD64.XMM; emitByte(0xF2); prefix(src, dst); emitByte(0x0F); @@ -549,8 +549,8 @@ } public final void cvtsd2ss(Register dst, Register src) { - assert dst.isFpu(); - assert src.isFpu(); + assert dst.getRegisterCategory() == AMD64.XMM; + assert src.getRegisterCategory() == AMD64.XMM; emitByte(0xF2); int encode = prefixAndEncode(dst.encoding, src.encoding); emitByte(0x0F); @@ -559,7 +559,7 @@ } public final void cvtsi2sdl(Register dst, AMD64Address src) { - assert dst.isFpu(); + assert dst.getRegisterCategory() == AMD64.XMM; emitByte(0xF2); prefix(src, dst); emitByte(0x0F); @@ -568,7 +568,7 @@ } public final void cvtsi2sdl(Register dst, Register src) { - assert dst.isFpu(); + assert dst.getRegisterCategory() == AMD64.XMM; emitByte(0xF2); int encode = prefixAndEncode(dst.encoding, src.encoding); emitByte(0x0F); @@ -577,7 +577,7 @@ } public final void cvtsi2ssl(Register dst, AMD64Address src) { - assert dst.isFpu(); + assert dst.getRegisterCategory() == AMD64.XMM; emitByte(0xF3); prefix(src, dst); emitByte(0x0F); @@ -586,7 +586,7 @@ } public final void cvtsi2ssl(Register dst, Register src) { - assert dst.isFpu(); + assert dst.getRegisterCategory() == AMD64.XMM; emitByte(0xF3); int encode = prefixAndEncode(dst.encoding, src.encoding); emitByte(0x0F); @@ -595,7 +595,7 @@ } public final void cvtss2sd(Register dst, AMD64Address src) { - assert dst.isFpu(); + assert dst.getRegisterCategory() == AMD64.XMM; emitByte(0xF3); prefix(src, dst); emitByte(0x0F); @@ -604,8 +604,8 @@ } public final void cvtss2sd(Register dst, Register src) { - assert dst.isFpu(); - assert src.isFpu(); + assert dst.getRegisterCategory() == AMD64.XMM; + assert src.getRegisterCategory() == AMD64.XMM; emitByte(0xF3); int encode = prefixAndEncode(dst.encoding, src.encoding); emitByte(0x0F); @@ -622,7 +622,7 @@ } public final void cvttsd2sil(Register dst, Register src) { - assert src.isFpu(); + assert src.getRegisterCategory() == AMD64.XMM; emitByte(0xF2); int encode = prefixAndEncode(dst.encoding, src.encoding); emitByte(0x0F); @@ -639,7 +639,7 @@ } public final void cvttss2sil(Register dst, Register src) { - assert src.isFpu(); + assert src.getRegisterCategory() == AMD64.XMM; emitByte(0xF3); int encode = prefixAndEncode(dst.encoding, src.encoding); emitByte(0x0F); @@ -654,7 +654,7 @@ } public final void divsd(Register dst, AMD64Address src) { - assert dst.isFpu(); + assert dst.getRegisterCategory() == AMD64.XMM; emitByte(0xF2); prefix(src, dst); emitByte(0x0F); @@ -663,8 +663,8 @@ } public final void divsd(Register dst, Register src) { - assert dst.isFpu(); - assert src.isFpu(); + assert dst.getRegisterCategory() == AMD64.XMM; + assert src.getRegisterCategory() == AMD64.XMM; emitByte(0xF2); int encode = prefixAndEncode(dst.encoding, src.encoding); emitByte(0x0F); @@ -673,7 +673,7 @@ } public final void divss(Register dst, AMD64Address src) { - assert dst.isFpu(); + assert dst.getRegisterCategory() == AMD64.XMM; emitByte(0xF3); prefix(src, dst); emitByte(0x0F); @@ -682,8 +682,8 @@ } public final void divss(Register dst, Register src) { - assert dst.isFpu(); - assert src.isFpu(); + assert dst.getRegisterCategory() == AMD64.XMM; + assert src.getRegisterCategory() == AMD64.XMM; emitByte(0xF3); int encode = prefixAndEncode(dst.encoding, src.encoding); emitByte(0x0F); @@ -862,8 +862,8 @@ } public final void movapd(Register dst, Register src) { - assert dst.isFpu(); - assert src.isFpu(); + assert dst.getRegisterCategory() == AMD64.XMM; + assert src.getRegisterCategory() == AMD64.XMM; int dstenc = dst.encoding; int srcenc = src.encoding; emitByte(0x66); @@ -887,8 +887,8 @@ } public final void movaps(Register dst, Register src) { - assert dst.isFpu(); - assert src.isFpu(); + assert dst.getRegisterCategory() == AMD64.XMM; + assert src.getRegisterCategory() == AMD64.XMM; int dstenc = dst.encoding; int srcenc = src.encoding; if (dstenc < 8) { @@ -918,22 +918,22 @@ } public final void movb(AMD64Address dst, Register src) { - assert src.isByte() : "must have byte register"; + assert src.getRegisterCategory() == AMD64.CPU : "must have byte register"; prefix(dst, src); // , true) emitByte(0x88); emitOperandHelper(src, dst); } public final void movdl(Register dst, Register src) { - if (dst.isFpu()) { - assert !src.isFpu() : "does this hold?"; + if (dst.getRegisterCategory() == AMD64.XMM) { + assert src.getRegisterCategory() != AMD64.XMM : "does this hold?"; emitByte(0x66); int encode = prefixAndEncode(dst.encoding, src.encoding); emitByte(0x0F); emitByte(0x6E); emitByte(0xC0 | encode); - } else if (src.isFpu()) { - assert !dst.isFpu(); + } else if (src.getRegisterCategory() == AMD64.XMM) { + assert dst.getRegisterCategory() != AMD64.XMM; emitByte(0x66); // swap src/dst to get correct prefix int encode = prefixAndEncode(src.encoding, dst.encoding); @@ -981,7 +981,7 @@ * {@link AMD64MacroAssembler#movflt(Register, Register)}. */ public final void movlpd(Register dst, AMD64Address src) { - assert dst.isFpu(); + assert dst.getRegisterCategory() == AMD64.XMM; emitByte(0x66); prefix(src, dst); emitByte(0x0F); @@ -990,7 +990,7 @@ } public final void movq(Register dst, AMD64Address src) { - if (dst.isFpu()) { + if (dst.getRegisterCategory() == AMD64.XMM) { emitByte(0xF3); prefixq(src, dst); emitByte(0x0F); @@ -1010,7 +1010,7 @@ } public final void movq(AMD64Address dst, Register src) { - if (src.isFpu()) { + if (src.getRegisterCategory() == AMD64.XMM) { emitByte(0x66); prefixq(dst, src); emitByte(0x0F); @@ -1038,8 +1038,8 @@ } public final void movsd(Register dst, Register src) { - assert dst.isFpu(); - assert src.isFpu(); + assert dst.getRegisterCategory() == AMD64.XMM; + assert src.getRegisterCategory() == AMD64.XMM; emitByte(0xF2); int encode = prefixAndEncode(dst.encoding, src.encoding); emitByte(0x0F); @@ -1048,7 +1048,7 @@ } public final void movsd(Register dst, AMD64Address src) { - assert dst.isFpu(); + assert dst.getRegisterCategory() == AMD64.XMM; emitByte(0xF2); prefix(src, dst); emitByte(0x0F); @@ -1057,7 +1057,7 @@ } public final void movsd(AMD64Address dst, Register src) { - assert src.isFpu(); + assert src.getRegisterCategory() == AMD64.XMM; emitByte(0xF2); prefix(dst, src); emitByte(0x0F); @@ -1066,8 +1066,8 @@ } public final void movss(Register dst, Register src) { - assert dst.isFpu(); - assert src.isFpu(); + assert dst.getRegisterCategory() == AMD64.XMM; + assert src.getRegisterCategory() == AMD64.XMM; emitByte(0xF3); int encode = prefixAndEncode(dst.encoding, src.encoding); emitByte(0x0F); @@ -1076,7 +1076,7 @@ } public final void movss(Register dst, AMD64Address src) { - assert dst.isFpu(); + assert dst.getRegisterCategory() == AMD64.XMM; emitByte(0xF3); prefix(src, dst); emitByte(0x0F); @@ -1085,7 +1085,7 @@ } public final void movss(AMD64Address dst, Register src) { - assert src.isFpu(); + assert src.getRegisterCategory() == AMD64.XMM; emitByte(0xF3); prefix(dst, src); emitByte(0x0F); @@ -1137,7 +1137,7 @@ } public final void mulsd(Register dst, AMD64Address src) { - assert dst.isFpu(); + assert dst.getRegisterCategory() == AMD64.XMM; emitByte(0xF2); prefix(src, dst); emitByte(0x0F); @@ -1146,8 +1146,8 @@ } public final void mulsd(Register dst, Register src) { - assert dst.isFpu(); - assert src.isFpu(); + assert dst.getRegisterCategory() == AMD64.XMM; + assert src.getRegisterCategory() == AMD64.XMM; emitByte(0xF2); int encode = prefixAndEncode(dst.encoding, src.encoding); @@ -1157,7 +1157,7 @@ } public final void mulss(Register dst, AMD64Address src) { - assert dst.isFpu(); + assert dst.getRegisterCategory() == AMD64.XMM; emitByte(0xF3); prefix(src, dst); @@ -1167,8 +1167,8 @@ } public final void mulss(Register dst, Register src) { - assert dst.isFpu(); - assert src.isFpu(); + assert dst.getRegisterCategory() == AMD64.XMM; + assert src.getRegisterCategory() == AMD64.XMM; emitByte(0xF3); int encode = prefixAndEncode(dst.encoding, src.encoding); emitByte(0x0F); @@ -1512,8 +1512,8 @@ } public final void sqrtsd(Register dst, Register src) { - assert dst.isFpu(); - assert src.isFpu(); + assert dst.getRegisterCategory() == AMD64.XMM; + assert src.getRegisterCategory() == AMD64.XMM; // HMM Table D-1 says sse2 // assert is64 || target.supportsSSE(); emitByte(0xF2); @@ -1545,8 +1545,8 @@ } public final void subsd(Register dst, Register src) { - assert dst.isFpu(); - assert src.isFpu(); + assert dst.getRegisterCategory() == AMD64.XMM; + assert src.getRegisterCategory() == AMD64.XMM; emitByte(0xF2); int encode = prefixAndEncode(dst.encoding, src.encoding); emitByte(0x0F); @@ -1555,7 +1555,7 @@ } public final void subsd(Register dst, AMD64Address src) { - assert dst.isFpu(); + assert dst.getRegisterCategory() == AMD64.XMM; emitByte(0xF2); prefix(src, dst); @@ -1565,8 +1565,8 @@ } public final void subss(Register dst, Register src) { - assert dst.isFpu(); - assert src.isFpu(); + assert dst.getRegisterCategory() == AMD64.XMM; + assert src.getRegisterCategory() == AMD64.XMM; emitByte(0xF3); int encode = prefixAndEncode(dst.encoding, src.encoding); emitByte(0x0F); @@ -1575,7 +1575,7 @@ } public final void subss(Register dst, AMD64Address src) { - assert dst.isFpu(); + assert dst.getRegisterCategory() == AMD64.XMM; emitByte(0xF3); prefix(src, dst); @@ -1611,20 +1611,20 @@ } public final void ucomisd(Register dst, AMD64Address src) { - assert dst.isFpu(); + assert dst.getRegisterCategory() == AMD64.XMM; emitByte(0x66); ucomiss(dst, src); } public final void ucomisd(Register dst, Register src) { - assert dst.isFpu(); - assert src.isFpu(); + assert dst.getRegisterCategory() == AMD64.XMM; + assert src.getRegisterCategory() == AMD64.XMM; emitByte(0x66); ucomiss(dst, src); } public final void ucomiss(Register dst, AMD64Address src) { - assert dst.isFpu(); + assert dst.getRegisterCategory() == AMD64.XMM; prefix(src, dst); emitByte(0x0F); @@ -1633,8 +1633,8 @@ } public final void ucomiss(Register dst, Register src) { - assert dst.isFpu(); - assert src.isFpu(); + assert dst.getRegisterCategory() == AMD64.XMM; + assert src.getRegisterCategory() == AMD64.XMM; int encode = prefixAndEncode(dst.encoding, src.encoding); emitByte(0x0F); emitByte(0x2E); @@ -1668,7 +1668,7 @@ } public final void andps(Register dst, Register src) { - assert dst.isFpu() && src.isFpu(); + assert dst.getRegisterCategory() == AMD64.XMM && src.getRegisterCategory() == AMD64.XMM; int encode = prefixAndEncode(dst.encoding, src.encoding); emitByte(0x0F); emitByte(0x54); @@ -1676,7 +1676,7 @@ } public final void andps(Register dst, AMD64Address src) { - assert dst.isFpu(); + assert dst.getRegisterCategory() == AMD64.XMM; prefix(src, dst); emitByte(0x0F); emitByte(0x54); @@ -1694,7 +1694,7 @@ } public final void orps(Register dst, Register src) { - assert dst.isFpu() && src.isFpu(); + assert dst.getRegisterCategory() == AMD64.XMM && src.getRegisterCategory() == AMD64.XMM; int encode = prefixAndEncode(dst.encoding, src.encoding); emitByte(0x0F); emitByte(0x56); @@ -1702,7 +1702,7 @@ } public final void orps(Register dst, AMD64Address src) { - assert dst.isFpu(); + assert dst.getRegisterCategory() == AMD64.XMM; prefix(src, dst); emitByte(0x0F); emitByte(0x56); @@ -1720,7 +1720,7 @@ } public final void xorps(Register dst, Register src) { - assert dst.isFpu() && src.isFpu(); + assert dst.getRegisterCategory() == AMD64.XMM && src.getRegisterCategory() == AMD64.XMM; int encode = prefixAndEncode(dst.encoding, src.encoding); emitByte(0x0F); emitByte(0x57); @@ -1728,7 +1728,7 @@ } public final void xorps(Register dst, AMD64Address src) { - assert dst.isFpu(); + assert dst.getRegisterCategory() == AMD64.XMM; prefix(src, dst); emitByte(0x0F); emitByte(0x57); @@ -2014,7 +2014,7 @@ } public final void cvtsi2sdq(Register dst, AMD64Address src) { - assert dst.isFpu(); + assert dst.getRegisterCategory() == AMD64.XMM; emitByte(0xF2); prefixq(src, dst); emitByte(0x0F); @@ -2023,7 +2023,7 @@ } public final void cvtsi2sdq(Register dst, Register src) { - assert dst.isFpu(); + assert dst.getRegisterCategory() == AMD64.XMM; emitByte(0xF2); int encode = prefixqAndEncode(dst.encoding, src.encoding); emitByte(0x0F); @@ -2032,7 +2032,7 @@ } public final void cvtsi2ssq(Register dst, AMD64Address src) { - assert dst.isFpu(); + assert dst.getRegisterCategory() == AMD64.XMM; emitByte(0xF3); prefixq(src, dst); emitByte(0x0F); @@ -2041,7 +2041,7 @@ } public final void cvtsi2ssq(Register dst, Register src) { - assert dst.isFpu(); + assert dst.getRegisterCategory() == AMD64.XMM; emitByte(0xF3); int encode = prefixqAndEncode(dst.encoding, src.encoding); emitByte(0x0F); @@ -2058,7 +2058,7 @@ } public final void cvttsd2siq(Register dst, Register src) { - assert src.isFpu(); + assert src.getRegisterCategory() == AMD64.XMM; emitByte(0xF2); int encode = prefixqAndEncode(dst.encoding, src.encoding); emitByte(0x0F); @@ -2075,7 +2075,7 @@ } public final void cvttss2siq(Register dst, Register src) { - assert src.isFpu(); + assert src.getRegisterCategory() == AMD64.XMM; emitByte(0xF3); int encode = prefixqAndEncode(dst.encoding, src.encoding); emitByte(0x0F); @@ -2154,13 +2154,12 @@ // table D-1 says MMX/SSE2 emitByte(0x66); - if (dst.isFpu()) { - assert dst.isFpu(); + if (dst.getRegisterCategory() == AMD64.XMM) { int encode = prefixqAndEncode(dst.encoding, src.encoding); emitByte(0x0F); emitByte(0x6E); emitByte(0xC0 | encode); - } else if (src.isFpu()) { + } else if (src.getRegisterCategory() == AMD64.XMM) { // swap src/dst to get correct prefix int encode = prefixqAndEncode(src.encoding, dst.encoding);
--- a/graal/com.oracle.graal.asm.amd64/src/com/oracle/graal/asm/amd64/AMD64MacroAssembler.java Mon Apr 29 00:25:30 2013 +0200 +++ b/graal/com.oracle.graal.asm.amd64/src/com/oracle/graal/asm/amd64/AMD64MacroAssembler.java Tue Apr 30 19:50:12 2013 +0200 @@ -166,21 +166,12 @@ } } - public final void signExtendByte(Register reg) { - if (reg.isByte()) { - movsxb(reg, reg); - } else { - shll(reg, 24); - sarl(reg, 24); - } - } - public final void signExtendShort(Register reg) { movsxw(reg, reg); } public final void movflt(Register dst, Register src) { - assert dst.isFpu() && src.isFpu(); + assert dst.getRegisterCategory() == AMD64.XMM && src.getRegisterCategory() == AMD64.XMM; if (UseXmmRegToRegMoveAll) { movaps(dst, src); } else { @@ -189,17 +180,17 @@ } public final void movflt(Register dst, AMD64Address src) { - assert dst.isFpu(); + assert dst.getRegisterCategory() == AMD64.XMM; movss(dst, src); } public final void movflt(AMD64Address dst, Register src) { - assert src.isFpu(); + assert src.getRegisterCategory() == AMD64.XMM; movss(dst, src); } public final void movdbl(Register dst, Register src) { - assert dst.isFpu() && src.isFpu(); + assert dst.getRegisterCategory() == AMD64.XMM && src.getRegisterCategory() == AMD64.XMM; if (UseXmmRegToRegMoveAll) { movapd(dst, src); } else { @@ -208,7 +199,7 @@ } public final void movdbl(Register dst, AMD64Address src) { - assert dst.isFpu(); + assert dst.getRegisterCategory() == AMD64.XMM; if (UseXmmLoadAndClearUpper) { movsd(dst, src); } else { @@ -227,7 +218,7 @@ } public final void flog(Register dest, Register value, boolean base10) { - assert dest.isFpu() && value.isFpu(); + assert dest.getRegisterCategory() == AMD64.XMM && value.getRegisterCategory() == AMD64.XMM; AMD64Address tmp = new AMD64Address(AMD64.rsp); if (base10) { @@ -262,7 +253,7 @@ } private AMD64Address trigPrologue(Register value) { - assert value.isFpu(); + assert value.getRegisterCategory() == AMD64.XMM; AMD64Address tmp = new AMD64Address(AMD64.rsp); subq(AMD64.rsp, 8); movsd(tmp, value); @@ -271,7 +262,7 @@ } private void trigEpilogue(Register dest, AMD64Address tmp) { - assert dest.isFpu(); + assert dest.getRegisterCategory() == AMD64.XMM; fstp(tmp); movsd(dest, tmp); addq(AMD64.rsp, 8);
--- a/graal/com.oracle.graal.compiler.amd64/src/com/oracle/graal/compiler/amd64/AMD64LIRGenerator.java Mon Apr 29 00:25:30 2013 +0200 +++ b/graal/com.oracle.graal.compiler.amd64/src/com/oracle/graal/compiler/amd64/AMD64LIRGenerator.java Tue Apr 30 19:50:12 2013 +0200 @@ -87,11 +87,11 @@ private static final RegisterValue RDX_L = AMD64.rdx.asValue(Kind.Long); private static final RegisterValue RCX_I = AMD64.rcx.asValue(Kind.Int); - public static class AMD64SpillMoveFactory implements LIR.SpillMoveFactory { + private class AMD64SpillMoveFactory implements LIR.SpillMoveFactory { @Override public LIRInstruction createMove(AllocatableValue result, Value input) { - return AMD64LIRGenerator.createMove(result, input); + return AMD64LIRGenerator.this.createMove(result, input); } } @@ -143,7 +143,7 @@ return result; } - private static AMD64LIRInstruction createMove(AllocatableValue dst, Value src) { + protected AMD64LIRInstruction createMove(AllocatableValue dst, Value src) { if (src instanceof AMD64AddressValue) { return new LeaOp(dst, (AMD64AddressValue) src); } else if (isRegister(src) || isStackSlot(dst)) { @@ -525,7 +525,7 @@ } private void emitDivRem(AMD64Arithmetic op, Value a, Value b, LIRFrameState state) { - AllocatableValue rax = AMD64.rax.asValue(a.getKind()); + AllocatableValue rax = AMD64.rax.asValue(a.getPlatformKind()); emitMove(rax, a); append(new DivRemOp(op, rax, asAllocatable(b), state)); } @@ -554,12 +554,12 @@ emitDivRem(LDIV, a, b, state(deopting)); return emitMove(RAX_L); case Float: { - Variable result = newVariable(a.getKind()); + Variable result = newVariable(a.getPlatformKind()); append(new BinaryRegStack(FDIV, result, asAllocatable(a), asAllocatable(b))); return result; } case Double: { - Variable result = newVariable(a.getKind()); + Variable result = newVariable(a.getPlatformKind()); append(new BinaryRegStack(DDIV, result, asAllocatable(a), asAllocatable(b))); return result; } @@ -657,7 +657,7 @@ } private Variable emitShift(AMD64Arithmetic op, Value a, Value b) { - Variable result = newVariable(a.getKind()); + Variable result = newVariable(a.getPlatformKind()); AllocatableValue input = asAllocatable(a); if (isConstant(b)) { append(new BinaryRegConst(op, result, input, asConstant(b)));
--- a/graal/com.oracle.graal.compiler.ptx.test/src/com/oracle/graal/compiler/ptx/test/BasicPTXTest.java Mon Apr 29 00:25:30 2013 +0200 +++ b/graal/com.oracle.graal.compiler.ptx.test/src/com/oracle/graal/compiler/ptx/test/BasicPTXTest.java Tue Apr 30 19:50:12 2013 +0200 @@ -24,6 +24,7 @@ import java.lang.reflect.Method; +import org.junit.Ignore; import org.junit.Test; /** @@ -36,7 +37,12 @@ compile("testAddConst1I"); } - public static int testAddConst1I(int a) { + @Ignore + public void testAddInvoke() { + invoke(compile("testAddConst1I"), new Integer(42)); + } + + public int testAddConst1I(int a) { return a + 1; }
--- a/graal/com.oracle.graal.compiler.ptx.test/src/com/oracle/graal/compiler/ptx/test/PTXTestBase.java Mon Apr 29 00:25:30 2013 +0200 +++ b/graal/com.oracle.graal.compiler.ptx.test/src/com/oracle/graal/compiler/ptx/test/PTXTestBase.java Tue Apr 30 19:50:12 2013 +0200 @@ -24,33 +24,57 @@ import static com.oracle.graal.hotspot.HotSpotGraalRuntime.*; -import com.oracle.graal.api.code.*; -import com.oracle.graal.api.runtime.*; -import com.oracle.graal.compiler.*; -import com.oracle.graal.compiler.ptx.*; -import com.oracle.graal.compiler.test.*; -import com.oracle.graal.debug.*; -import com.oracle.graal.java.*; -import com.oracle.graal.nodes.*; -import com.oracle.graal.phases.*; +import com.oracle.graal.api.code.CompilationResult; +import com.oracle.graal.api.code.SpeculationLog; +import com.oracle.graal.api.code.TargetDescription; +import com.oracle.graal.api.runtime.Graal; +import com.oracle.graal.compiler.GraalCompiler; +import com.oracle.graal.compiler.ptx.PTXBackend; +import com.oracle.graal.compiler.test.GraalCompilerTest; +import com.oracle.graal.debug.Debug; +import com.oracle.graal.java.GraphBuilderConfiguration; +import com.oracle.graal.java.GraphBuilderPhase; +import com.oracle.graal.nodes.StructuredGraph; +import com.oracle.graal.nodes.spi.GraalCodeCacheProvider; +import com.oracle.graal.phases.OptimisticOptimizations; +import com.oracle.graal.phases.PhasePlan; import com.oracle.graal.phases.PhasePlan.PhasePosition; -import com.oracle.graal.ptx.*; +import com.oracle.graal.ptx.PTX; public abstract class PTXTestBase extends GraalCompilerTest { + private StructuredGraph sg; + protected CompilationResult compile(String test) { StructuredGraph graph = parse(test); + sg = graph; Debug.dump(graph, "Graph"); TargetDescription target = new TargetDescription(new PTX(), true, 1, 0, true); - PTXBackend ptxBackend = new PTXBackend(Graal.getRequiredCapability(CodeCacheProvider.class), target); + PTXBackend ptxBackend = new PTXBackend(Graal.getRequiredCapability(GraalCodeCacheProvider.class), target); PhasePlan phasePlan = new PhasePlan(); GraphBuilderPhase graphBuilderPhase = new GraphBuilderPhase(runtime, GraphBuilderConfiguration.getDefault(), OptimisticOptimizations.NONE); phasePlan.addPhase(PhasePosition.AFTER_PARSING, graphBuilderPhase); phasePlan.addPhase(PhasePosition.AFTER_PARSING, new PTXPhase()); new PTXPhase().apply(graph); - CompilationResult result = GraalCompiler.compileMethod(runtime, graalRuntime().getReplacements(), ptxBackend, target, graph.method(), graph, null, phasePlan, OptimisticOptimizations.NONE, - new SpeculationLog()); + CompilationResult result = GraalCompiler.compileMethod(runtime, graalRuntime().getReplacements(), + ptxBackend, target, graph.method(), graph, null, phasePlan, + OptimisticOptimizations.NONE, new SpeculationLog()); return result; } + protected StructuredGraph getStructuredGraph() { + return sg; + } + + @SuppressWarnings("unused") + protected void invoke(CompilationResult result, Object... args) { + try { + // not quite yet - need multi-architecture Method changes from JDK-8013168 + // Object[] executeArgs = argsWithReceiver(this, args); + // InstalledCode installedCode = runtime.addMethod(getStructuredGraph().method(), result); + // installedCode.executeVarargs(executeArgs); + } catch (Throwable th) { + th.printStackTrace(); + } + } }
--- a/graal/com.oracle.graal.compiler.ptx/src/com/oracle/graal/compiler/ptx/PTXBackend.java Mon Apr 29 00:25:30 2013 +0200 +++ b/graal/com.oracle.graal.compiler.ptx/src/com/oracle/graal/compiler/ptx/PTXBackend.java Tue Apr 30 19:50:12 2013 +0200 @@ -50,9 +50,6 @@ @Override public void enter(TargetMethodAssembler tasm) { - Buffer codeBuffer = tasm.asm.codeBuffer; - codeBuffer.emitString(".version 1.4"); - codeBuffer.emitString(".target sm_10"); // codeBuffer.emitString(".address_size 32"); // PTX ISA version 2.3 } @@ -62,6 +59,11 @@ } @Override + protected AbstractAssembler createAssembler(FrameMap frameMap) { + return new PTXAssembler(target, frameMap.registerConfig); + } + + @Override public TargetMethodAssembler newAssembler(LIRGenerator lirGen, CompilationResult compilationResult) { // Omit the frame if the method: // - has no spill slots or other slots allocated during register allocation @@ -69,9 +71,9 @@ // - has no incoming arguments passed on the stack // - has no instructions with debug info FrameMap frameMap = lirGen.frameMap; - AbstractAssembler masm = new PTXAssembler(target, frameMap.registerConfig); + AbstractAssembler masm = createAssembler(frameMap); HotSpotFrameContext frameContext = new HotSpotFrameContext(); - TargetMethodAssembler tasm = new TargetMethodAssembler(target, runtime(), frameMap, masm, frameContext, compilationResult); + TargetMethodAssembler tasm = new PTXTargetMethodAssembler(target, runtime(), frameMap, masm, frameContext, compilationResult); tasm.setFrameSize(frameMap.frameSize()); return tasm; } @@ -81,6 +83,8 @@ // Emit the prologue final String name = method.getName(); Buffer codeBuffer = tasm.asm.codeBuffer; + codeBuffer.emitString(".version 1.4"); + codeBuffer.emitString(".target sm_10"); codeBuffer.emitString0(".entry " + name + " ("); codeBuffer.emitString("");
--- /dev/null Thu Jan 01 00:00:00 1970 +0000 +++ b/graal/com.oracle.graal.compiler.ptx/src/com/oracle/graal/compiler/ptx/PTXTargetMethodAssembler.java Tue Apr 30 19:50:12 2013 +0200 @@ -0,0 +1,65 @@ +/* + * Copyright (c) 2013, Oracle and/or its affiliates. All rights reserved. + * DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER. + * + * This code is free software; you can redistribute it and/or modify it + * under the terms of the GNU General Public License version 2 only, as + * published by the Free Software Foundation. + * + * This code is distributed in the hope that it will be useful, but WITHOUT + * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or + * FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License + * version 2 for more details (a copy is included in the LICENSE file that + * accompanied this code). + * + * You should have received a copy of the GNU General Public License version + * 2 along with this work; if not, write to the Free Software Foundation, + * Inc., 51 Franklin St, Fifth Floor, Boston, MA 02110-1301 USA. + * + * Please contact Oracle, 500 Oracle Parkway, Redwood Shores, CA 94065 USA + * or visit www.oracle.com if you need additional information or have any + * questions. + */ +package com.oracle.graal.compiler.ptx; + +import com.oracle.graal.api.code.CodeCacheProvider; +import com.oracle.graal.api.code.CompilationResult; +import com.oracle.graal.api.code.TargetDescription; +import com.oracle.graal.asm.AbstractAssembler; +import com.oracle.graal.hotspot.HotSpotGraalRuntime; +import com.oracle.graal.hotspot.bridge.CompilerToGPU; +import com.oracle.graal.hotspot.meta.HotSpotMethod; +import com.oracle.graal.lir.FrameMap; +import com.oracle.graal.lir.asm.FrameContext; +import com.oracle.graal.lir.asm.TargetMethodAssembler; + +public class PTXTargetMethodAssembler extends TargetMethodAssembler { + + private static CompilerToGPU toGPU = HotSpotGraalRuntime.graalRuntime().getCompilerToGPU(); + private static boolean validDevice = toGPU.deviceInit(); + + // detach ?? + + public PTXTargetMethodAssembler(TargetDescription target, + CodeCacheProvider runtime, FrameMap frameMap, + AbstractAssembler asm, FrameContext frameContext, + CompilationResult compilationResult) { + super(target, runtime, frameMap, asm, frameContext, compilationResult); + } + + @Override + public CompilationResult finishTargetMethod(Object name, boolean isStub) { + CompilationResult graalCompile = super.finishTargetMethod(name, isStub); + + try { + if (validDevice) { + HotSpotMethod method = (HotSpotMethod) name; + toGPU.generateKernel(graalCompile.getTargetCode(), method.getName()); + } + } catch (Throwable th) { + th.printStackTrace(); + } + + return graalCompile; // for now + } +}
--- /dev/null Thu Jan 01 00:00:00 1970 +0000 +++ b/graal/com.oracle.graal.compiler.test/src/com/oracle/graal/compiler/test/WriteBarrierVerificationTest.java Tue Apr 30 19:50:12 2013 +0200 @@ -0,0 +1,706 @@ +/* + * Copyright (c) 2013, Oracle and/or its affiliates. All rights reserved. + * DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER. + * + * This code is free software; you can redistribute it and/or modify it + * under the terms of the GNU General Public License version 2 only, as + * published by the Free Software Foundation. + * + * This code is distributed in the hope that it will be useful, but WITHOUT + * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or + * FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License + * version 2 for more details (a copy is included in the LICENSE file that + * accompanied this code). + * + * You should have received a copy of the GNU General Public License version + * 2 along with this work; if not, write to the Free Software Foundation, + * Inc., 51 Franklin St, Fifth Floor, Boston, MA 02110-1301 USA. + * + * Please contact Oracle, 500 Oracle Parkway, Redwood Shores, CA 94065 USA + * or visit www.oracle.com if you need additional information or have any + * questions. + */ +package com.oracle.graal.compiler.test; + +import java.util.*; + +import org.junit.*; + +import com.oracle.graal.api.code.*; +import com.oracle.graal.api.meta.*; +import com.oracle.graal.debug.*; +import com.oracle.graal.hotspot.phases.*; +import com.oracle.graal.nodes.*; +import com.oracle.graal.nodes.extended.*; +import com.oracle.graal.nodes.spi.Lowerable.*; +import com.oracle.graal.phases.common.*; +import com.oracle.graal.phases.graph.*; +import com.oracle.graal.phases.graph.ReentrantNodeIterator.*; +import com.oracle.graal.phases.tiers.*; + +/** + * The following tests validate the write barrier verification phase. For every tested snippet, an + * array of write barrier indices and the total write barrier number are passed as parameters. The + * indices denote the barriers that will be manually removed. The write barrier verification phase + * runs after the write barrier removal and depending on the result an assertion might be generated. + * The tests anticipate the presence or not of an assertion generated by the verification phase. + */ +public class WriteBarrierVerificationTest extends GraalCompilerTest { + + public static int barrierIndex; + + public static class Container { + + public Container a; + public Container b; + } + + private static native void safepoint(); + + public static void test1Snippet() { + Container main = new Container(); + Container temp1 = new Container(); + Container temp2 = new Container(); + barrierIndex = 0; + safepoint(); + barrierIndex = 1; + main.a = temp1; + safepoint(); + barrierIndex = 2; + main.b = temp2; + safepoint(); + } + + @Test(expected = AssertionError.class) + public void test1() { + test("test1Snippet", 2, new int[]{1}); + } + + @Test(expected = AssertionError.class) + public void test2() { + test("test1Snippet", 2, new int[]{2}); + } + + public static void test2Snippet() { + Container main = new Container(); + Container temp1 = new Container(); + Container temp2 = new Container(); + barrierIndex = 0; + safepoint(); + barrierIndex = 1; + main.a = temp1; + barrierIndex = 2; + main.b = temp2; + safepoint(); + } + + @Test(expected = AssertionError.class) + public void test3() { + test("test2Snippet", 2, new int[]{1}); + } + + @Test + public void test4() { + test("test2Snippet", 2, new int[]{2}); + } + + public static void test3Snippet(boolean test) { + Container main = new Container(); + Container temp1 = new Container(); + Container temp2 = new Container(); + barrierIndex = 0; + safepoint(); + for (int i = 0; i < 10; i++) { + if (test) { + barrierIndex = 1; + main.a = temp1; + barrierIndex = 2; + main.b = temp2; + } else { + barrierIndex = 3; + main.a = temp1; + barrierIndex = 4; + main.b = temp2; + } + } + } + + @Test(expected = AssertionError.class) + public void test5() { + test("test3Snippet", 4, new int[]{1, 2}); + } + + @Test(expected = AssertionError.class) + public void test6() { + test("test3Snippet", 4, new int[]{3, 4}); + } + + @Test(expected = AssertionError.class) + public void test7() { + test("test3Snippet", 4, new int[]{1}); + } + + @Test + public void test8() { + test("test3Snippet", 4, new int[]{2}); + } + + @Test(expected = AssertionError.class) + public void test9() { + test("test3Snippet", 4, new int[]{3}); + } + + @Test + public void test10() { + test("test3Snippet", 4, new int[]{4}); + } + + public static void test4Snippet(boolean test) { + Container main = new Container(); + Container temp1 = new Container(); + Container temp2 = new Container(); + safepoint(); + barrierIndex = 1; + main.a = temp1; + for (int i = 0; i < 10; i++) { + if (test) { + barrierIndex = 2; + main.a = temp1; + barrierIndex = 3; + main.b = temp2; + } else { + barrierIndex = 4; + main.a = temp2; + barrierIndex = 5; + main.b = temp1; + } + } + } + + @Test(expected = AssertionError.class) + public void test11() { + test("test4Snippet", 5, new int[]{2, 3}); + } + + @Test(expected = AssertionError.class) + public void test12() { + test("test4Snippet", 5, new int[]{4, 5}); + } + + @Test(expected = AssertionError.class) + public void test13() { + test("test4Snippet", 5, new int[]{1}); + } + + public static void test5Snippet() { + Container main = new Container(); + Container temp1 = new Container(); + Container temp2 = new Container(); + safepoint(); + barrierIndex = 1; + main.a = temp1; + if (main.a == main.b) { + barrierIndex = 2; + main.a = temp1; + barrierIndex = 3; + main.b = temp2; + } else { + barrierIndex = 4; + main.a = temp2; + barrierIndex = 5; + main.b = temp1; + } + safepoint(); + } + + @Test(expected = AssertionError.class) + public void test14() { + test("test5Snippet", 5, new int[]{1}); + } + + @Test + public void test15() { + test("test5Snippet", 5, new int[]{2}); + } + + @Test + public void test16() { + test("test5Snippet", 5, new int[]{4}); + } + + @Test + public void test17() { + test("test5Snippet", 5, new int[]{3}); + } + + @Test + public void test18() { + test("test5Snippet", 5, new int[]{5}); + } + + @Test + public void test19() { + test("test5Snippet", 5, new int[]{2, 3}); + } + + @Test + public void test20() { + test("test5Snippet", 5, new int[]{4, 5}); + } + + public static void test6Snippet(boolean test) { + Container main = new Container(); + Container temp1 = new Container(); + Container temp2 = new Container(); + safepoint(); + barrierIndex = 1; + main.a = temp1; + if (test) { + barrierIndex = 2; + main.a = temp1; + barrierIndex = 3; + main.b = temp1.a.a; + } else { + barrierIndex = 4; + main.a = temp2; + barrierIndex = 5; + main.b = temp2.a.a; + } + safepoint(); + } + + @Test(expected = AssertionError.class) + public void test21() { + test("test6Snippet", 5, new int[]{1}); + } + + @Test(expected = AssertionError.class) + public void test22() { + test("test6Snippet", 5, new int[]{1, 2}); + } + + @Test(expected = AssertionError.class) + public void test23() { + test("test6Snippet", 5, new int[]{3}); + } + + @Test + public void test24() { + test("test6Snippet", 5, new int[]{4}); + } + + public static void test7Snippet(boolean test) { + Container main = new Container(); + Container temp1 = new Container(); + Container temp2 = new Container(); + safepoint(); + barrierIndex = 1; + main.a = temp1; + if (test) { + barrierIndex = 2; + main.a = temp1; + } + barrierIndex = 3; + main.b = temp2; + safepoint(); + } + + @Test + public void test25() { + test("test7Snippet", 3, new int[]{2}); + } + + @Test + public void test26() { + test("test7Snippet", 3, new int[]{3}); + } + + @Test + public void test27() { + test("test7Snippet", 3, new int[]{2, 3}); + } + + @Test(expected = AssertionError.class) + public void test28() { + test("test7Snippet", 3, new int[]{1}); + } + + public static void test8Snippet(boolean test) { + Container main = new Container(); + Container temp1 = new Container(); + Container temp2 = new Container(); + safepoint(); + if (test) { + barrierIndex = 1; + main.a = temp1; + } + barrierIndex = 2; + main.b = temp2; + safepoint(); + } + + @Test(expected = AssertionError.class) + public void test29() { + test("test8Snippet", 2, new int[]{1}); + } + + @Test(expected = AssertionError.class) + public void test30() { + test("test8Snippet", 2, new int[]{2}); + } + + @Test(expected = AssertionError.class) + public void test31() { + test("test8Snippet", 2, new int[]{1, 2}); + } + + public static void test9Snippet(boolean test) { + Container main1 = new Container(); + Container main2 = new Container(); + Container temp1 = new Container(); + Container temp2 = new Container(); + safepoint(); + if (test) { + barrierIndex = 1; + main1.a = temp1; + } else { + barrierIndex = 2; + main2.a = temp1; + } + barrierIndex = 3; + main1.b = temp2; + barrierIndex = 4; + main2.b = temp2; + safepoint(); + } + + @Test(expected = AssertionError.class) + public void test32() { + test("test9Snippet", 4, new int[]{1}); + } + + @Test(expected = AssertionError.class) + public void test33() { + test("test9Snippet", 4, new int[]{2}); + } + + @Test(expected = AssertionError.class) + public void test34() { + test("test9Snippet", 4, new int[]{3}); + } + + @Test(expected = AssertionError.class) + public void test35() { + test("test9Snippet", 4, new int[]{4}); + } + + @Test(expected = AssertionError.class) + public void test36() { + test("test9Snippet", 4, new int[]{1, 2}); + } + + @Test(expected = AssertionError.class) + public void test37() { + test("test9Snippet", 4, new int[]{3, 4}); + } + + public static void test10Snippet(boolean test) { + Container main1 = new Container(); + Container main2 = new Container(); + Container temp1 = new Container(); + Container temp2 = new Container(); + safepoint(); + if (test) { + barrierIndex = 1; + main1.a = temp1; + barrierIndex = 2; + main2.a = temp2; + } else { + barrierIndex = 3; + main2.a = temp1; + } + barrierIndex = 4; + main1.b = temp2; + barrierIndex = 5; + main2.b = temp2; + safepoint(); + } + + @Test(expected = AssertionError.class) + public void test38() { + test("test10Snippet", 5, new int[]{1}); + } + + @Test(expected = AssertionError.class) + public void test39() { + test("test10Snippet", 5, new int[]{2}); + } + + @Test(expected = AssertionError.class) + public void test40() { + test("test10Snippet", 5, new int[]{3}); + } + + @Test(expected = AssertionError.class) + public void test41() { + test("test10Snippet", 5, new int[]{4}); + } + + @Test + public void test42() { + test("test10Snippet", 5, new int[]{5}); + } + + @Test(expected = AssertionError.class) + public void test43() { + test("test10Snippet", 5, new int[]{1, 2}); + } + + @Test(expected = AssertionError.class) + public void test44() { + test("test10Snippet", 5, new int[]{1, 2, 3}); + } + + @Test(expected = AssertionError.class) + public void test45() { + test("test10Snippet", 5, new int[]{3, 4}); + } + + public static void test11Snippet(boolean test) { + Container main1 = new Container(); + Container main2 = new Container(); + Container main3 = new Container(); + Container temp1 = new Container(); + Container temp2 = new Container(); + safepoint(); + if (test) { + barrierIndex = 1; + main1.a = temp1; + barrierIndex = 2; + main3.a = temp1; + if (!test) { + barrierIndex = 3; + main2.a = temp2; + } else { + barrierIndex = 4; + main1.a = temp2; + barrierIndex = 5; + main3.a = temp2; + } + } else { + barrierIndex = 6; + main1.b = temp2; + for (int i = 0; i < 10; i++) { + barrierIndex = 7; + main3.a = temp1; + } + barrierIndex = 8; + main3.b = temp2; + } + barrierIndex = 9; + main1.b = temp2; + barrierIndex = 10; + main2.b = temp2; + barrierIndex = 11; + main3.b = temp2; + safepoint(); + } + + @Test(expected = AssertionError.class) + public void test46() { + test("test11Snippet", 11, new int[]{1}); + } + + @Test(expected = AssertionError.class) + public void test47() { + test("test11Snippet", 11, new int[]{2}); + } + + @Test(expected = AssertionError.class) + public void test48() { + test("test11Snippet", 11, new int[]{3}); + } + + @Test(expected = AssertionError.class) + public void test49() { + test("test11Snippet", 11, new int[]{6}); + } + + @Test(expected = AssertionError.class) + public void test50() { + test("test11Snippet", 11, new int[]{7}); + } + + @Test(expected = AssertionError.class) + public void test51() { + test("test11Snippet", 11, new int[]{8}); + } + + @Test(expected = AssertionError.class) + public void test52() { + test("test11Snippet", 11, new int[]{9}); + } + + @Test(expected = AssertionError.class) + public void test53() { + test("test11Snippet", 11, new int[]{10}); + } + + @Test + public void test54() { + test("test11Snippet", 11, new int[]{4}); + } + + @Test + public void test55() { + test("test11Snippet", 11, new int[]{5}); + } + + @Test + public void test56() { + test("test11Snippet", 11, new int[]{11}); + } + + public static void test12Snippet(boolean test) { + Container main = new Container(); + Container main1 = new Container(); + Container temp1 = new Container(); + Container temp2 = new Container(); + barrierIndex = 0; + safepoint(); + barrierIndex = 7; + main1.a = temp1; + for (int i = 0; i < 10; i++) { + if (test) { + barrierIndex = 1; + main.a = temp1; + barrierIndex = 2; + main.b = temp2; + } else { + barrierIndex = 3; + main.a = temp1; + barrierIndex = 4; + main.b = temp2; + } + } + barrierIndex = 5; + main.a = temp1; + barrierIndex = 6; + main.b = temp1; + barrierIndex = 8; + main1.b = temp1; + safepoint(); + } + + @Test(expected = AssertionError.class) + public void test57() { + test("test12Snippet", 8, new int[]{5}); + } + + @Test + public void test58() { + test("test12Snippet", 8, new int[]{6}); + } + + @Test(expected = AssertionError.class) + public void test59() { + test("test12Snippet", 8, new int[]{7}); + } + + @Test(expected = AssertionError.class) + public void test60() { + test("test12Snippet", 8, new int[]{8}); + } + + private void test(final String snippet, final int expectedBarriers, final int... removedBarrierIndices) { + Debug.scope("WriteBarrierVerificationTest", new DebugDumpScope(snippet), new Runnable() { + + public void run() { + final StructuredGraph graph = parse(snippet); + HighTierContext highTierContext = new HighTierContext(runtime(), new Assumptions(false), replacements); + MidTierContext midTierContext = new MidTierContext(runtime(), new Assumptions(false), replacements, runtime().getTarget()); + + new LoweringPhase(LoweringType.BEFORE_GUARDS).apply(graph, highTierContext); + new GuardLoweringPhase().apply(graph, midTierContext); + new SafepointInsertionPhase().apply(graph); + new WriteBarrierAdditionPhase().apply(graph); + + // First, the total number of expected barriers is checked. + final int barriers = graph.getNodes(SerialWriteBarrier.class).count(); + Assert.assertTrue(expectedBarriers == barriers); + + class State { + + boolean removeBarrier = false; + + } + + // Iterate over all write nodes and remove barriers according to input indices. + NodeIteratorClosure<State> closure = new NodeIteratorClosure<State>() { + + @Override + protected void processNode(FixedNode node, State currentState) { + if (node instanceof WriteNode) { + WriteNode write = (WriteNode) node; + Object obj = write.getLocationIdentities()[0]; + if (obj instanceof ResolvedJavaField) { + if (((ResolvedJavaField) obj).getName().equals("barrierIndex")) { + /* + * A "barrierIndex" variable was found and is checked against + * the input barrier array. + */ + if (eliminateBarrier(write.value().asConstant().asInt(), removedBarrierIndices)) { + currentState.removeBarrier = true; + } + } + } + } else if (node instanceof SerialWriteBarrier) { + // Remove flagged write barriers. + if (currentState.removeBarrier) { + graph.removeFixed(((SerialWriteBarrier) node)); + currentState.removeBarrier = false; + } + } + } + + private boolean eliminateBarrier(int index, int[] map) { + for (int i = 0; i < map.length; i++) { + if (map[i] == index) { + return true; + } + } + return false; + } + + @Override + protected Map<LoopExitNode, State> processLoop(LoopBeginNode loop, State initialState) { + return ReentrantNodeIterator.processLoop(this, loop, initialState).exitStates; + } + + @Override + protected State merge(MergeNode merge, List<State> states) { + return new State(); + } + + @Override + protected State afterSplit(BeginNode node, State oldState) { + return new State(); + } + }; + + try { + ReentrantNodeIterator.apply(closure, graph.start(), new State(), null); + new WriteBarrierVerificationPhase().apply(graph); + } catch (AssertionError error) { + /* + * Catch assertion, test for expected one and re-throw in order to validate unit + * test. + */ + Assert.assertTrue(error.getMessage().equals("Write barrier must be present")); + throw new AssertionError(); + } + + } + }); + } +}
--- a/graal/com.oracle.graal.compiler/src/com/oracle/graal/compiler/alloc/Interval.java Mon Apr 29 00:25:30 2013 +0200 +++ b/graal/com.oracle.graal.compiler/src/com/oracle/graal/compiler/alloc/Interval.java Tue Apr 30 19:50:12 2013 +0200 @@ -430,7 +430,7 @@ /** * The kind of this interval. */ - private Kind kind; + private PlatformKind kind; /** * The head of the list of ranges describing this interval. This list is sorted by @@ -501,15 +501,15 @@ void assignLocation(AllocatableValue newLocation) { if (isRegister(newLocation)) { assert this.location == null : "cannot re-assign location for " + this; - if (newLocation.getKind() == Kind.Illegal && kind != Kind.Illegal) { + if (newLocation.getPlatformKind() == Kind.Illegal && kind != Kind.Illegal) { this.location = asRegister(newLocation).asValue(kind); return; } } else { assert this.location == null || isRegister(this.location) : "cannot re-assign location for " + this; assert isStackSlot(newLocation); - assert newLocation.getKind() != Kind.Illegal; - assert newLocation.getKind() == this.kind; + assert newLocation.getPlatformKind() != Kind.Illegal; + assert newLocation.getPlatformKind() == this.kind; } this.location = newLocation; } @@ -522,14 +522,13 @@ return location; } - public Kind kind() { + public PlatformKind kind() { assert !isRegister(operand) : "cannot access type for fixed interval"; return kind; } - void setKind(Kind kind) { + void setKind(PlatformKind kind) { assert isRegister(operand) || this.kind() == Kind.Illegal || this.kind() == kind : "overwriting existing type"; - assert kind == kind.getStackKind() || kind == Kind.Short : "these kinds should have int type registers"; this.kind = kind; }
--- a/graal/com.oracle.graal.compiler/src/com/oracle/graal/compiler/alloc/LinearScan.java Mon Apr 29 00:25:30 2013 +0200 +++ b/graal/com.oracle.graal.compiler/src/com/oracle/graal/compiler/alloc/LinearScan.java Tue Apr 30 19:50:12 2013 +0200 @@ -305,7 +305,7 @@ intervals = Arrays.copyOf(intervals, intervals.length * 2); } intervalsSize++; - Variable variable = new Variable(source.kind(), ir.nextVariable(), asVariable(source.operand).flag); + Variable variable = new Variable(source.kind(), ir.nextVariable()); assert variables.size() == variable.index; variables.add(variable); @@ -968,7 +968,7 @@ TTY.println(blockData.get(block).liveOut.toString()); } - void addUse(AllocatableValue operand, int from, int to, RegisterPriority registerPriority, Kind kind) { + void addUse(AllocatableValue operand, int from, int to, RegisterPriority registerPriority, PlatformKind kind) { if (!isProcessed(operand)) { return; } @@ -987,7 +987,7 @@ interval.addUsePos(to & ~1, registerPriority); } - void addTemp(AllocatableValue operand, int tempPos, RegisterPriority registerPriority, Kind kind) { + void addTemp(AllocatableValue operand, int tempPos, RegisterPriority registerPriority, PlatformKind kind) { if (!isProcessed(operand)) { return; } @@ -1008,7 +1008,7 @@ return !isRegister(operand) || attributes(asRegister(operand)).isAllocatable(); } - void addDef(AllocatableValue operand, int defPos, RegisterPriority registerPriority, Kind kind) { + void addDef(AllocatableValue operand, int defPos, RegisterPriority registerPriority, PlatformKind kind) { if (!isProcessed(operand)) { return; } @@ -1197,7 +1197,7 @@ @Override public Value doValue(Value operand, OperandMode mode, EnumSet<OperandFlag> flags) { if (isVariableOrRegister(operand)) { - addDef((AllocatableValue) operand, opId, registerPriorityOfOutputOperand(op), operand.getKind().getStackKind()); + addDef((AllocatableValue) operand, opId, registerPriorityOfOutputOperand(op), operand.getPlatformKind()); addRegisterHint(op, operand, mode, flags, true); } return operand; @@ -1208,7 +1208,7 @@ @Override public Value doValue(Value operand, OperandMode mode, EnumSet<OperandFlag> flags) { if (isVariableOrRegister(operand)) { - addTemp((AllocatableValue) operand, opId, RegisterPriority.MustHaveRegister, operand.getKind().getStackKind()); + addTemp((AllocatableValue) operand, opId, RegisterPriority.MustHaveRegister, operand.getPlatformKind()); addRegisterHint(op, operand, mode, flags, false); } return operand; @@ -1220,7 +1220,7 @@ public Value doValue(Value operand, OperandMode mode, EnumSet<OperandFlag> flags) { if (isVariableOrRegister(operand)) { RegisterPriority p = registerPriorityOfInputOperand(flags); - addUse((AllocatableValue) operand, blockFrom, opId + 1, p, operand.getKind().getStackKind()); + addUse((AllocatableValue) operand, blockFrom, opId + 1, p, operand.getPlatformKind()); addRegisterHint(op, operand, mode, flags, false); } return operand; @@ -1232,7 +1232,7 @@ public Value doValue(Value operand, OperandMode mode, EnumSet<OperandFlag> flags) { if (isVariableOrRegister(operand)) { RegisterPriority p = registerPriorityOfInputOperand(flags); - addUse((AllocatableValue) operand, blockFrom, opId, p, operand.getKind().getStackKind()); + addUse((AllocatableValue) operand, blockFrom, opId, p, operand.getPlatformKind()); addRegisterHint(op, operand, mode, flags, false); } return operand; @@ -1247,7 +1247,7 @@ @Override public Value doValue(Value operand) { - addUse((AllocatableValue) operand, blockFrom, opId + 1, RegisterPriority.None, operand.getKind().getStackKind()); + addUse((AllocatableValue) operand, blockFrom, opId + 1, RegisterPriority.None, operand.getPlatformKind()); return operand; } });
--- a/graal/com.oracle.graal.compiler/src/com/oracle/graal/compiler/alloc/LinearScanWalker.java Mon Apr 29 00:25:30 2013 +0200 +++ b/graal/com.oracle.graal.compiler/src/com/oracle/graal/compiler/alloc/LinearScanWalker.java Tue Apr 30 19:50:12 2013 +0200 @@ -29,7 +29,6 @@ import java.util.*; import com.oracle.graal.api.code.*; -import com.oracle.graal.api.code.Register.RegisterFlag; import com.oracle.graal.api.meta.*; import com.oracle.graal.compiler.alloc.Interval.RegisterBinding; import com.oracle.graal.compiler.alloc.Interval.RegisterPriority; @@ -811,8 +810,7 @@ } void initVarsForAlloc(Interval interval) { - EnumMap<RegisterFlag, Register[]> categorizedRegs = allocator.frameMap.registerConfig.getCategorizedAllocatableRegisters(); - availableRegs = categorizedRegs.get(asVariable(interval.operand).flag); + availableRegs = allocator.frameMap.registerConfig.getAllocatableRegisters(interval.kind()); } static boolean isMove(LIRInstruction op, Interval from, Interval to) {
--- a/graal/com.oracle.graal.compiler/src/com/oracle/graal/compiler/alloc/MoveResolver.java Mon Apr 29 00:25:30 2013 +0200 +++ b/graal/com.oracle.graal.compiler/src/com/oracle/graal/compiler/alloc/MoveResolver.java Tue Apr 30 19:50:12 2013 +0200 @@ -207,7 +207,7 @@ } private void insertMove(Value fromOpr, Interval toInterval) { - assert fromOpr.getKind() == toInterval.kind() : "move between different types"; + assert fromOpr.getPlatformKind() == toInterval.kind() : "move between different types"; assert insertIdx != -1 : "must setup insert position first"; AllocatableValue toOpr = toInterval.operand;
--- a/graal/com.oracle.graal.compiler/src/com/oracle/graal/compiler/gen/LIRGenerator.java Mon Apr 29 00:25:30 2013 +0200 +++ b/graal/com.oracle.graal.compiler/src/com/oracle/graal/compiler/gen/LIRGenerator.java Tue Apr 30 19:50:12 2013 +0200 @@ -142,23 +142,18 @@ /** * Creates a new {@linkplain Variable variable}. * - * @param kind The kind of the new variable. + * @param platformKind The kind of the new variable. * @return a new variable */ @Override - public Variable newVariable(Kind kind) { - Kind stackKind = kind.getStackKind(); - switch (stackKind) { - case Int: - case Long: - case Object: - return new Variable(stackKind, lir.nextVariable(), Register.RegisterFlag.CPU); - case Float: - case Double: - return new Variable(stackKind, lir.nextVariable(), Register.RegisterFlag.FPU); - default: - throw GraalInternalError.shouldNotReachHere(); + public Variable newVariable(PlatformKind platformKind) { + PlatformKind stackKind; + if (platformKind instanceof Kind) { + stackKind = ((Kind) platformKind).getStackKind(); + } else { + stackKind = platformKind; } + return new Variable(stackKind, lir.nextVariable()); } @Override
--- a/graal/com.oracle.graal.compiler/src/com/oracle/graal/compiler/target/Backend.java Mon Apr 29 00:25:30 2013 +0200 +++ b/graal/com.oracle.graal.compiler/src/com/oracle/graal/compiler/target/Backend.java Tue Apr 30 19:50:12 2013 +0200 @@ -24,6 +24,7 @@ import com.oracle.graal.api.code.*; import com.oracle.graal.api.meta.*; +import com.oracle.graal.asm.*; import com.oracle.graal.compiler.gen.*; import com.oracle.graal.lir.*; import com.oracle.graal.lir.asm.*; @@ -52,6 +53,8 @@ public abstract LIRGenerator newLIRGenerator(StructuredGraph graph, FrameMap frameMap, ResolvedJavaMethod method, LIR lir); + protected abstract AbstractAssembler createAssembler(FrameMap frameMap); + public abstract TargetMethodAssembler newAssembler(LIRGenerator lirGen, CompilationResult compilationResult); /**
--- a/graal/com.oracle.graal.hotspot.amd64.test/src/com/oracle/graal/hotspot/amd64/test/AMD64HotSpotFrameOmissionTest.java Mon Apr 29 00:25:30 2013 +0200 +++ b/graal/com.oracle.graal.hotspot.amd64.test/src/com/oracle/graal/hotspot/amd64/test/AMD64HotSpotFrameOmissionTest.java Tue Apr 30 19:50:12 2013 +0200 @@ -30,7 +30,6 @@ import org.junit.*; import com.oracle.graal.api.code.*; -import com.oracle.graal.api.code.Register.RegisterFlag; import com.oracle.graal.api.meta.*; import com.oracle.graal.api.runtime.*; import com.oracle.graal.asm.amd64.*; @@ -71,7 +70,7 @@ @Override public void generateCode(AMD64Assembler asm) { - Register arg = getArgumentRegister(0, RegisterFlag.CPU); + Register arg = getArgumentRegister(0, Kind.Int); asm.addl(arg, 5); asm.movl(rax, arg); asm.ret(0); @@ -89,7 +88,7 @@ @Override public void generateCode(AMD64Assembler asm) { - Register arg = getArgumentRegister(0, RegisterFlag.CPU); + Register arg = getArgumentRegister(0, Kind.Long); asm.addq(arg, 1); asm.movq(rax, arg); asm.ret(0); @@ -117,8 +116,8 @@ Assert.assertArrayEquals(expectedCode, actualCode); } - private Register getArgumentRegister(int index, RegisterFlag flag) { - Register[] regs = runtime.lookupRegisterConfig().getCallingConventionRegisters(CallingConvention.Type.JavaCall, flag); + private Register getArgumentRegister(int index, Kind kind) { + Register[] regs = runtime.lookupRegisterConfig().getCallingConventionRegisters(CallingConvention.Type.JavaCall, kind); return regs[index]; } }
--- a/graal/com.oracle.graal.hotspot.amd64/src/com/oracle/graal/hotspot/amd64/AMD64HotSpotBackend.java Mon Apr 29 00:25:30 2013 +0200 +++ b/graal/com.oracle.graal.hotspot.amd64/src/com/oracle/graal/hotspot/amd64/AMD64HotSpotBackend.java Tue Apr 30 19:50:12 2013 +0200 @@ -140,6 +140,11 @@ } @Override + protected AbstractAssembler createAssembler(FrameMap frameMap) { + return new AMD64MacroAssembler(target, frameMap.registerConfig); + } + + @Override public TargetMethodAssembler newAssembler(LIRGenerator lirGen, CompilationResult compilationResult) { // Omit the frame if the method: // - has no spill slots or other slots allocated during register allocation @@ -151,7 +156,7 @@ LIR lir = gen.lir; boolean omitFrame = CanOmitFrame && !frameMap.frameNeedsAllocating() && !lir.hasArgInCallerFrame(); - AbstractAssembler masm = new AMD64MacroAssembler(target, frameMap.registerConfig); + AbstractAssembler masm = createAssembler(frameMap); HotSpotFrameContext frameContext = omitFrame ? null : new HotSpotFrameContext(); TargetMethodAssembler tasm = new TargetMethodAssembler(target, runtime(), frameMap, masm, frameContext, compilationResult); tasm.setFrameSize(frameMap.frameSize()); @@ -163,18 +168,14 @@ Stub stub = runtime().asStub(lirGen.method()); if (stub != null) { - List<AMD64RegisterPreservationOp> registerPreservations = new ArrayList<>(); - final Set<Register> definedRegisters = gatherDefinedRegisters(lir, registerPreservations); + final Set<Register> definedRegisters = gatherDefinedRegisters(lir); stub.initDefinedRegisters(definedRegisters); - // Eliminate unnecessary register preservation - for (AMD64RegisterPreservationOp op : registerPreservations) { - op.doNotPreserve(definedRegisters); - } - - // Record where preserved registers are saved - for (Map.Entry<LIRFrameState, AMD64RestoreRegistersOp> e : gen.calleeSaveInfo.entrySet()) { - e.getValue().describePreservation(e.getKey().debugInfo(), frameMap); + // Eliminate unnecessary register preservation and + // record where preserved registers are saved + for (Map.Entry<LIRFrameState, AMD64SaveRegistersOp> e : gen.calleeSaveInfo.entrySet()) { + AMD64SaveRegistersOp save = e.getValue(); + save.updateAndDescribePreservation(definedRegisters, e.getKey().debugInfo(), frameMap); } } @@ -185,10 +186,9 @@ * Finds all the registers that are defined by some given LIR. * * @param lir the LIR to examine - * @param registerPreservations register preservation operations in {@code lir} are added to this list * @return the registers that are defined by or used as temps for any instruction in {@code lir} */ - private static Set<Register> gatherDefinedRegisters(LIR lir, List<AMD64RegisterPreservationOp> registerPreservations) { + private static Set<Register> gatherDefinedRegisters(LIR lir) { final Set<Register> definedRegisters = new HashSet<>(); ValueProcedure defProc = new ValueProcedure() { @@ -203,11 +203,8 @@ }; for (Block block : lir.codeEmittingOrder()) { for (LIRInstruction op : lir.lir(block)) { - if (op instanceof AMD64RegisterPreservationOp) { - // Don't consider these ops as definitions - registerPreservations.add((AMD64RegisterPreservationOp) op); - } else if (op instanceof ParametersOp) { - // Don't consider these ops as definitions + if (op instanceof ParametersOp) { + // Don't consider this as a definition } else { op.forEachTemp(defProc); op.forEachOutput(defProc);
--- a/graal/com.oracle.graal.hotspot.amd64/src/com/oracle/graal/hotspot/amd64/AMD64HotSpotEpilogueOp.java Mon Apr 29 00:25:30 2013 +0200 +++ b/graal/com.oracle.graal.hotspot.amd64/src/com/oracle/graal/hotspot/amd64/AMD64HotSpotEpilogueOp.java Tue Apr 30 19:50:12 2013 +0200 @@ -24,7 +24,6 @@ import static com.oracle.graal.lir.LIRInstruction.OperandFlag.*; -import com.oracle.graal.api.code.*; import com.oracle.graal.api.meta.*; import com.oracle.graal.lir.*; import com.oracle.graal.lir.amd64.*; @@ -39,7 +38,7 @@ * initial LIR generation is finished. Until then, we use a placeholder variable so that LIR * verification is successful. */ - private static final Variable PLACEHOLDER = new Variable(Kind.Long, Integer.MAX_VALUE, Register.RegisterFlag.CPU); + private static final Variable PLACEHOLDER = new Variable(Kind.Long, Integer.MAX_VALUE); @Use({REG, STACK}) protected AllocatableValue savedRbp = PLACEHOLDER; }
--- a/graal/com.oracle.graal.hotspot.amd64/src/com/oracle/graal/hotspot/amd64/AMD64HotSpotGraalRuntime.java Mon Apr 29 00:25:30 2013 +0200 +++ b/graal/com.oracle.graal.hotspot.amd64/src/com/oracle/graal/hotspot/amd64/AMD64HotSpotGraalRuntime.java Tue Apr 30 19:50:12 2013 +0200 @@ -30,9 +30,9 @@ /** * AMD64 specific implementation of {@link HotSpotGraalRuntime}. */ -final class AMD64HotSpotGraalRuntime extends HotSpotGraalRuntime { +public class AMD64HotSpotGraalRuntime extends HotSpotGraalRuntime { - private AMD64HotSpotGraalRuntime() { + protected AMD64HotSpotGraalRuntime() { } /** @@ -40,16 +40,25 @@ */ public static HotSpotGraalRuntime makeInstance() { if (graalRuntime() == null) { - setInstance(new AMD64HotSpotGraalRuntime()); + HotSpotGraalRuntimeFactory factory = findFactory("AMD64"); + if (factory != null) { + setInstance(factory.createRuntime()); + } else { + setInstance(new AMD64HotSpotGraalRuntime()); + } } return graalRuntime(); } + protected Architecture createArchitecture() { + return new AMD64(config.useSSE, config.useAVX); + } + @Override protected TargetDescription createTarget() { final int stackFrameAlignment = 16; final int implicitNullCheckLimit = 4096; - return new TargetDescription(new AMD64(config.useSSE, config.useAVX), true, stackFrameAlignment, implicitNullCheckLimit, true); + return new TargetDescription(createArchitecture(), true, stackFrameAlignment, implicitNullCheckLimit, true); } @Override
--- a/graal/com.oracle.graal.hotspot.amd64/src/com/oracle/graal/hotspot/amd64/AMD64HotSpotLIRGenerator.java Mon Apr 29 00:25:30 2013 +0200 +++ b/graal/com.oracle.graal.hotspot.amd64/src/com/oracle/graal/hotspot/amd64/AMD64HotSpotLIRGenerator.java Tue Apr 30 19:50:12 2013 +0200 @@ -54,13 +54,13 @@ /** * LIR generator specialized for AMD64 HotSpot. */ -final class AMD64HotSpotLIRGenerator extends AMD64LIRGenerator implements HotSpotLIRGenerator { +public class AMD64HotSpotLIRGenerator extends AMD64LIRGenerator implements HotSpotLIRGenerator { private HotSpotRuntime runtime() { return (HotSpotRuntime) runtime; } - AMD64HotSpotLIRGenerator(StructuredGraph graph, CodeCacheProvider runtime, TargetDescription target, FrameMap frameMap, ResolvedJavaMethod method, LIR lir) { + protected AMD64HotSpotLIRGenerator(StructuredGraph graph, CodeCacheProvider runtime, TargetDescription target, FrameMap frameMap, ResolvedJavaMethod method, LIR lir) { super(graph, runtime, target, frameMap, method, lir); } @@ -176,7 +176,7 @@ * Map from debug infos that need to be updated with callee save information to the operations * that provide the information. */ - Map<LIRFrameState, AMD64RestoreRegistersOp> calleeSaveInfo = new HashMap<>(); + Map<LIRFrameState, AMD64SaveRegistersOp> calleeSaveInfo = new HashMap<>(); private LIRFrameState currentRuntimeCallInfo; @@ -193,29 +193,24 @@ public Variable emitCall(RuntimeCallTarget callTarget, CallingConvention cc, DeoptimizingNode info, Value... args) { boolean needsCalleeSave = ((HotSpotRuntimeCallTarget) callTarget).isCRuntimeCall(); - RegisterValue[] savedRegisters = null; + AMD64SaveRegistersOp save = null; StackSlot[] savedRegisterLocations = null; if (needsCalleeSave) { - Register returnReg = isRegister(cc.getReturn()) ? asRegister(cc.getReturn()) : null; - Set<Register> registers = new HashSet<>(Arrays.asList(frameMap.registerConfig.getAllocatableRegisters())); - if (returnReg != null) { - registers.remove(returnReg); - } - - savedRegisters = new RegisterValue[registers.size()]; + Register[] savedRegisters = frameMap.registerConfig.getAllocatableRegisters(); savedRegisterLocations = new StackSlot[savedRegisters.length]; - int savedRegisterIndex = 0; - for (Register reg : registers) { - assert reg.isCpu() || reg.isFpu(); - savedRegisters[savedRegisterIndex++] = reg.asValue(reg.isCpu() ? Kind.Long : Kind.Double); - } + AMD64LIRInstruction[] savingMoves = new AMD64LIRInstruction[savedRegisters.length]; + AMD64LIRInstruction[] restoringMoves = new AMD64LIRInstruction[savedRegisters.length]; + for (int i = 0; i < savedRegisters.length; i++) { + PlatformKind kind = target.arch.getLargestStorableKind(savedRegisters[i].getRegisterCategory()); + assert kind != Kind.Illegal; + StackSlot spillSlot = frameMap.allocateSpillSlot(kind); + savedRegisterLocations[i] = spillSlot; - append(new ParametersOp(savedRegisters)); - for (int i = 0; i < savedRegisters.length; i++) { - StackSlot spillSlot = frameMap.allocateSpillSlot(Kind.Long); - savedRegisterLocations[i] = spillSlot; + RegisterValue register = savedRegisters[i].asValue(kind); + savingMoves[i] = createMove(spillSlot, register); + restoringMoves[i] = createMove(register, spillSlot); } - AMD64SaveRegistersOp save = new AMD64SaveRegistersOp(savedRegisters, savedRegisterLocations); + save = new AMD64SaveRegistersOp(savingMoves, restoringMoves, savedRegisterLocations); append(save); Value thread = args[0]; @@ -231,8 +226,8 @@ AMD64HotSpotCRuntimeCallEpilogueOp op = new AMD64HotSpotCRuntimeCallEpilogueOp(thread); append(op); - AMD64RestoreRegistersOp restore = new AMD64RestoreRegistersOp(savedRegisterLocations.clone(), savedRegisters.clone()); - AMD64RestoreRegistersOp oldValue = calleeSaveInfo.put(currentRuntimeCallInfo, restore); + AMD64RestoreRegistersOp restore = new AMD64RestoreRegistersOp(savedRegisterLocations.clone(), save); + AMD64SaveRegistersOp oldValue = calleeSaveInfo.put(currentRuntimeCallInfo, save); assert oldValue == null; append(restore); }
--- a/graal/com.oracle.graal.hotspot.amd64/src/com/oracle/graal/hotspot/amd64/AMD64HotSpotRegisterConfig.java Mon Apr 29 00:25:30 2013 +0200 +++ b/graal/com.oracle.graal.hotspot.amd64/src/com/oracle/graal/hotspot/amd64/AMD64HotSpotRegisterConfig.java Tue Apr 30 19:50:12 2013 +0200 @@ -29,7 +29,6 @@ import com.oracle.graal.amd64.*; import com.oracle.graal.api.code.*; import com.oracle.graal.api.code.CallingConvention.Type; -import com.oracle.graal.api.code.Register.RegisterFlag; import com.oracle.graal.api.meta.*; import com.oracle.graal.graph.*; import com.oracle.graal.hotspot.*; @@ -38,25 +37,39 @@ // @formatter:off public class AMD64HotSpotRegisterConfig implements RegisterConfig { + private final Architecture architecture; + private final Register[] allocatable = initAllocatable(); - private final EnumMap<RegisterFlag, Register[]> categorized = Register.categorize(allocatable); + private final HashMap<PlatformKind, Register[]> categorized = new HashMap<>(); private final RegisterAttributes[] attributesMap; @Override public Register[] getAllocatableRegisters() { - return allocatable; + return allocatable.clone(); } - @Override - public EnumMap<RegisterFlag, Register[]> getCategorizedAllocatableRegisters() { - return categorized; + public Register[] getAllocatableRegisters(PlatformKind kind) { + if (categorized.containsKey(kind)) { + return categorized.get(kind); + } + + ArrayList<Register> list = new ArrayList<>(); + for (Register reg : getAllocatableRegisters()) { + if (architecture.canStoreValue(reg.getRegisterCategory(), kind)) { + list.add(reg); + } + } + + Register[] ret = list.toArray(new Register[0]); + categorized.put(kind, ret); + return ret; } @Override public RegisterAttributes[] getAttributesMap() { - return attributesMap; + return attributesMap.clone(); } private final Register[] javaGeneralParameterRegisters; @@ -93,7 +106,9 @@ return allocatable; } - public AMD64HotSpotRegisterConfig(HotSpotVMConfig config, boolean globalStubConfig) { + public AMD64HotSpotRegisterConfig(Architecture architecture, HotSpotVMConfig config, boolean globalStubConfig) { + this.architecture = architecture; + if (config.windowsOs) { javaGeneralParameterRegisters = new Register[] {rdx, r8, r9, rdi, rsi, rcx}; nativeGeneralParameterRegisters = new Register[] {rcx, rdx, r8, r9}; @@ -109,7 +124,7 @@ xmm0, xmm1, xmm2, xmm3, xmm4, xmm5, xmm6, xmm7, xmm8, xmm9, xmm10, xmm11, xmm12, xmm13, xmm14, xmm15 }; - csl = new CalleeSaveLayout(0, -1, 8, regs); + csl = new CalleeSaveLayout(architecture, 0, -1, 8, regs); } else { csl = null; } @@ -135,10 +150,11 @@ return callingConvention(javaGeneralParameterRegisters, returnType, parameterTypes, type, target, stackOnly); } - public Register[] getCallingConventionRegisters(Type type, RegisterFlag flag) { - if (flag == RegisterFlag.FPU) { + public Register[] getCallingConventionRegisters(Type type, Kind kind) { + if (architecture.canStoreValue(XMM, kind)) { return xmmParameterRegisters; } + assert architecture.canStoreValue(CPU, kind); return type == Type.NativeCall ? nativeGeneralParameterRegisters : javaGeneralParameterRegisters; } @@ -178,7 +194,7 @@ if (locations[i] == null) { locations[i] = StackSlot.get(kind.getStackKind(), currentStackOffset, !type.out); - currentStackOffset += Math.max(target.sizeInBytes(kind), target.wordSize); + currentStackOffset += Math.max(target.arch.getSizeInBytes(kind), target.wordSize); } }
--- a/graal/com.oracle.graal.hotspot.amd64/src/com/oracle/graal/hotspot/amd64/AMD64HotSpotRuntime.java Mon Apr 29 00:25:30 2013 +0200 +++ b/graal/com.oracle.graal.hotspot.amd64/src/com/oracle/graal/hotspot/amd64/AMD64HotSpotRuntime.java Tue Apr 30 19:50:12 2013 +0200 @@ -25,14 +25,11 @@ import static com.oracle.graal.amd64.AMD64.*; import static com.oracle.graal.compiler.amd64.AMD64LIRGenerator.*; import static com.oracle.graal.hotspot.amd64.AMD64DeoptimizeOp.*; +import static com.oracle.graal.hotspot.amd64.AMD64HotSpotBackend.*; import static com.oracle.graal.hotspot.amd64.AMD64HotSpotUnwindOp.*; import static com.oracle.graal.hotspot.nodes.IdentityHashCodeStubCall.*; import static com.oracle.graal.hotspot.nodes.MonitorEnterStubCall.*; import static com.oracle.graal.hotspot.nodes.MonitorExitStubCall.*; -import static com.oracle.graal.hotspot.nodes.NewArrayStubCall.*; -import static com.oracle.graal.hotspot.nodes.NewInstanceStubCall.*; -import static com.oracle.graal.hotspot.nodes.NewMultiArrayStubCall.*; -import static com.oracle.graal.hotspot.nodes.ThreadIsInterruptedStubCall.*; import static com.oracle.graal.hotspot.nodes.VMErrorNode.*; import static com.oracle.graal.hotspot.nodes.VerifyOopStubCall.*; import static com.oracle.graal.hotspot.nodes.WriteBarrierPostStubCall.*; @@ -41,9 +38,6 @@ import static com.oracle.graal.hotspot.replacements.AESCryptSubstitutions.EncryptBlockStubCall.*; import static com.oracle.graal.hotspot.replacements.CipherBlockChainingSubstitutions.DecryptAESCryptStubCall.*; import static com.oracle.graal.hotspot.replacements.CipherBlockChainingSubstitutions.EncryptAESCryptStubCall.*; -import static com.oracle.graal.hotspot.stubs.NewArrayStub.*; -import static com.oracle.graal.hotspot.stubs.NewInstanceStub.*; -import static com.oracle.graal.hotspot.stubs.NewMultiArrayStub.*; import com.oracle.graal.amd64.*; import com.oracle.graal.api.code.*; @@ -106,42 +100,6 @@ /* arg0: object */ javaCallingConvention(Kind.Object, /* arg1: lock */ word)); - addStubCall(NEW_ARRAY, - /* ret */ rax.asValue(Kind.Object), - /* arg0: hub */ rdx.asValue(word), - /* arg1: length */ rbx.asValue(Kind.Int)); - - addRuntimeCall(NEW_ARRAY_C, config.newArrayAddress, - /* temps */ null, - /* ret */ ret(Kind.Void), - /* arg0: thread */ nativeCallingConvention(word, - /* arg1: hub */ word, - /* arg2: length */ Kind.Int)); - - addStubCall(NEW_INSTANCE, - /* ret */ rax.asValue(Kind.Object), - /* arg0: hub */ rdx.asValue(word)); - - addRuntimeCall(NEW_INSTANCE_C, config.newInstanceAddress, - /* temps */ null, - /* ret */ ret(Kind.Void), - /* arg0: thread */ nativeCallingConvention(word, - /* arg1: hub */ word)); - - addStubCall(NEW_MULTI_ARRAY, - /* ret */ rax.asValue(Kind.Object), - /* arg0: hub */ rax.asValue(word), - /* arg1: rank */ rbx.asValue(Kind.Int), - /* arg2: dims */ rcx.asValue(word)); - - addRuntimeCall(NEW_MULTI_ARRAY_C, config.newMultiArrayAddress, - /* temps */ null, - /* ret */ ret(Kind.Void), - /* arg0: thread */ nativeCallingConvention(word, - /* arg1: hub */ word, - /* arg2: rank */ Kind.Int, - /* arg3: dims */ word)); - addRuntimeCall(VERIFY_OOP, config.verifyOopStub, /* temps */ null, /* ret */ ret(Kind.Void), @@ -159,12 +117,6 @@ /* ret */ rax.asValue(Kind.Int), /* arg0: obj */ javaCallingConvention(Kind.Object)); - addRuntimeCall(THREAD_IS_INTERRUPTED, config.threadIsInterruptedStub, - /* temps */ null, - /* ret */ rax.asValue(Kind.Boolean), - /* arg0: thread */ javaCallingConvention(Kind.Object, - /* arg1: clearInterrupted */ Kind.Boolean)); - addRuntimeCall(ENCRYPT_BLOCK, config.aescryptEncryptBlockStub, /* temps */ null, /* ret */ ret(Kind.Void), @@ -197,15 +149,15 @@ /* arg3: r */ word, /* arg4: inLength */ Kind.Int)); - addRuntimeCall(AMD64HotSpotBackend.EXCEPTION_HANDLER, config.handleExceptionStub, + addRuntimeCall(EXCEPTION_HANDLER, config.handleExceptionStub, /* temps */ null, /* ret */ ret(Kind.Void)); - addRuntimeCall(AMD64HotSpotBackend.DEOPT_HANDLER, config.handleDeoptStub, + addRuntimeCall(DEOPT_HANDLER, config.handleDeoptStub, /* temps */ null, /* ret */ ret(Kind.Void)); - addRuntimeCall(AMD64HotSpotBackend.IC_MISS_HANDLER, config.inlineCacheMissStub, + addRuntimeCall(IC_MISS_HANDLER, config.inlineCacheMissStub, /* temps */ null, /* ret */ ret(Kind.Void)); // @formatter:on @@ -241,6 +193,6 @@ @Override protected RegisterConfig createRegisterConfig(boolean globalStubConfig) { - return new AMD64HotSpotRegisterConfig(config, globalStubConfig); + return new AMD64HotSpotRegisterConfig(graalRuntime.getTarget().arch, config, globalStubConfig); } }
--- a/graal/com.oracle.graal.hotspot.sparc/src/com/oracle/graal/hotspot/sparc/SPARCHotSpotBackend.java Mon Apr 29 00:25:30 2013 +0200 +++ b/graal/com.oracle.graal.hotspot.sparc/src/com/oracle/graal/hotspot/sparc/SPARCHotSpotBackend.java Tue Apr 30 19:50:12 2013 +0200 @@ -24,6 +24,7 @@ import com.oracle.graal.api.code.*; import com.oracle.graal.api.meta.*; +import com.oracle.graal.asm.*; import com.oracle.graal.compiler.gen.*; import com.oracle.graal.compiler.sparc.*; import com.oracle.graal.hotspot.*; @@ -47,6 +48,12 @@ } @Override + protected AbstractAssembler createAssembler(FrameMap frameMap) { + // SPARC: Create assembler. + return null; + } + + @Override public TargetMethodAssembler newAssembler(LIRGenerator lirGen, CompilationResult compilationResult) { // SPARC: Create assembler. return null;
--- a/graal/com.oracle.graal.hotspot/src/com/oracle/graal/hotspot/HotSpotGraalRuntime.java Mon Apr 29 00:25:30 2013 +0200 +++ b/graal/com.oracle.graal.hotspot/src/com/oracle/graal/hotspot/HotSpotGraalRuntime.java Tue Apr 30 19:50:12 2013 +0200 @@ -84,6 +84,15 @@ runtime.compilerToVm = toVM; } + protected static HotSpotGraalRuntimeFactory findFactory(String architecture) { + for (HotSpotGraalRuntimeFactory factory : ServiceLoader.loadInstalled(HotSpotGraalRuntimeFactory.class)) { + if (factory.getArchitecture().equals(architecture) && factory.getName().equals(GraalOptions.GraalRuntime)) { + return factory; + } + } + return null; + } + private static Kind wordKind; /** @@ -114,8 +123,9 @@ return unsafe.getInt(object, offset); } - protected/* final */CompilerToVM compilerToVm; - protected/* final */VMToCompiler vmToCompiler; + protected/* final */CompilerToVM compilerToVm; + protected/* final */CompilerToGPU compilerToGpu; + protected/* final */VMToCompiler vmToCompiler; protected final HotSpotRuntime runtime; protected final TargetDescription target; @@ -128,12 +138,14 @@ private final HotSpotBackend backend; protected HotSpotGraalRuntime() { - CompilerToVM toVM = new CompilerToVMImpl(); + CompilerToVM toVM = new CompilerToVMImpl(); + CompilerToGPU toGPU = new CompilerToGPUImpl(); // initialize VmToCompiler VMToCompiler toCompiler = new VMToCompilerImpl(this); - compilerToVm = toVM; + compilerToVm = toVM; + compilerToGpu = toGPU; vmToCompiler = toCompiler; config = new HotSpotVMConfig(); compilerToVm.initializeConfiguration(config); @@ -215,6 +227,10 @@ return vmToCompiler; } + public CompilerToGPU getCompilerToGPU() { + return compilerToGpu; + } + public JavaType lookupType(String name, HotSpotResolvedObjectType accessingClass, boolean eagerResolve) { if (name.length() == 1 && vmToCompiler instanceof VMToCompilerImpl) { VMToCompilerImpl impl = (VMToCompilerImpl) vmToCompiler;
--- /dev/null Thu Jan 01 00:00:00 1970 +0000 +++ b/graal/com.oracle.graal.hotspot/src/com/oracle/graal/hotspot/HotSpotGraalRuntimeFactory.java Tue Apr 30 19:50:12 2013 +0200 @@ -0,0 +1,32 @@ +/* + * Copyright (c) 2011, 2012, Oracle and/or its affiliates. All rights reserved. + * DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER. + * + * This code is free software; you can redistribute it and/or modify it + * under the terms of the GNU General Public License version 2 only, as + * published by the Free Software Foundation. + * + * This code is distributed in the hope that it will be useful, but WITHOUT + * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or + * FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License + * version 2 for more details (a copy is included in the LICENSE file that + * accompanied this code). + * + * You should have received a copy of the GNU General Public License version + * 2 along with this work; if not, write to the Free Software Foundation, + * Inc., 51 Franklin St, Fifth Floor, Boston, MA 02110-1301 USA. + * + * Please contact Oracle, 500 Oracle Parkway, Redwood Shores, CA 94065 USA + * or visit www.oracle.com if you need additional information or have any + * questions. + */ +package com.oracle.graal.hotspot; + +public interface HotSpotGraalRuntimeFactory { + + HotSpotGraalRuntime createRuntime(); + + String getArchitecture(); + + String getName(); +}
--- a/graal/com.oracle.graal.hotspot/src/com/oracle/graal/hotspot/HotSpotRuntimeCallTarget.java Mon Apr 29 00:25:30 2013 +0200 +++ b/graal/com.oracle.graal.hotspot/src/com/oracle/graal/hotspot/HotSpotRuntimeCallTarget.java Tue Apr 30 19:50:12 2013 +0200 @@ -57,8 +57,11 @@ private final CompilerToVM vm; - public HotSpotRuntimeCallTarget(Descriptor descriptor, long address, CallingConvention cc, CompilerToVM vm) { + private final boolean isCRuntimeCall; + + public HotSpotRuntimeCallTarget(Descriptor descriptor, long address, boolean isCRuntimeCall, CallingConvention cc, CompilerToVM vm) { this.address = address; + this.isCRuntimeCall = isCRuntimeCall; this.descriptor = descriptor; this.cc = cc; this.vm = vm; @@ -88,7 +91,7 @@ public void finalizeAddress(Backend backend) { if (address == 0) { - assert stub != null : "linkage without an address must be a stub"; + assert stub != null : "linkage without an address must be a stub - forgot to register a Stub associated with " + descriptor + "?"; InstalledCode code = stub.getCode(backend); AllocatableValue[] argumentLocations = new AllocatableValue[cc.getArgumentCount()]; @@ -118,7 +121,6 @@ * Determines if this is a link to a C/C++ function in the HotSpot runtime. */ public boolean isCRuntimeCall() { - HotSpotVMConfig config = HotSpotGraalRuntime.graalRuntime().getConfig(); - return address == config.newArrayAddress || address == config.newInstanceAddress || address == config.newMultiArrayAddress; + return isCRuntimeCall; } }
--- a/graal/com.oracle.graal.hotspot/src/com/oracle/graal/hotspot/HotSpotVMConfig.java Mon Apr 29 00:25:30 2013 +0200 +++ b/graal/com.oracle.graal.hotspot/src/com/oracle/graal/hotspot/HotSpotVMConfig.java Tue Apr 30 19:50:12 2013 +0200 @@ -356,7 +356,6 @@ public long deoptimizeStub; public long unwindExceptionStub; public long osrMigrationEndStub; - public long registerFinalizerStub; public long createNullPointerExceptionStub; public long createOutOfBoundsExceptionStub; public long javaTimeMillisStub; @@ -371,7 +370,6 @@ public long logPrintfStub; public long stubPrintfStub; public int deoptReasonNone; - public long threadIsInterruptedStub; public long identityHashCodeStub; public long aescryptEncryptBlockStub; public long aescryptDecryptBlockStub; @@ -381,6 +379,8 @@ public long newInstanceAddress; public long newArrayAddress; public long newMultiArrayAddress; + public long registerFinalizerAddress; + public long threadIsInterruptedAddress; public int deoptReasonNullCheck; public int deoptReasonRangeCheck;
--- /dev/null Thu Jan 01 00:00:00 1970 +0000 +++ b/graal/com.oracle.graal.hotspot/src/com/oracle/graal/hotspot/bridge/CompilerToGPU.java Tue Apr 30 19:50:12 2013 +0200 @@ -0,0 +1,55 @@ +/* + * Copyright (c) 2013, Oracle and/or its affiliates. All rights reserved. + * DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER. + * + * This code is free software; you can redistribute it and/or modify it + * under the terms of the GNU General Public License version 2 only, as + * published by the Free Software Foundation. + * + * This code is distributed in the hope that it will be useful, but WITHOUT + * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or + * FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License + * version 2 for more details (a copy is included in the LICENSE file that + * accompanied this code). + * + * You should have received a copy of the GNU General Public License version + * 2 along with this work; if not, write to the Free Software Foundation, + * Inc., 51 Franklin St, Fifth Floor, Boston, MA 02110-1301 USA. + * + * Please contact Oracle, 500 Oracle Parkway, Redwood Shores, CA 94065 USA + * or visit www.oracle.com if you need additional information or have any + * questions. + */ + +package com.oracle.graal.hotspot.bridge; + +import com.oracle.graal.api.code.InvalidInstalledCodeException; + +/** + * Calls from Java into the GPU. + */ +public interface CompilerToGPU { + + /** + * Attempts to initialize and create a valid context with the GPU. + * + * @return whether the GPU context has been initialized and is valid. + */ + boolean deviceInit(); + + /** + * Attempts to detach from a valid GPU context. + * + * @return whether the GPU context has been properly disposed. + */ + boolean deviceDetach(); + + /** + * Attempts to generate and return a bound function to the + * loaded method kernel on the GPU. + * + * @param code the text or binary values for a method kernel + * @return the value of the bound kernel in GPU space. + */ + long generateKernel(byte[] code, String name) throws InvalidInstalledCodeException; +}
--- /dev/null Thu Jan 01 00:00:00 1970 +0000 +++ b/graal/com.oracle.graal.hotspot/src/com/oracle/graal/hotspot/bridge/CompilerToGPUImpl.java Tue Apr 30 19:50:12 2013 +0200 @@ -0,0 +1,40 @@ +/* + * Copyright (c) 2011, Oracle and/or its affiliates. All rights reserved. + * DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER. + * + * This code is free software; you can redistribute it and/or modify it + * under the terms of the GNU General Public License version 2 only, as + * published by the Free Software Foundation. + * + * This code is distributed in the hope that it will be useful, but WITHOUT + * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or + * FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License + * version 2 for more details (a copy is included in the LICENSE file that + * accompanied this code). + * + * You should have received a copy of the GNU General Public License version + * 2 along with this work; if not, write to the Free Software Foundation, + * Inc., 51 Franklin St, Fifth Floor, Boston, MA 02110-1301 USA. + * + * Please contact Oracle, 500 Oracle Parkway, Redwood Shores, CA 94065 USA + * or visit www.oracle.com if you need additional information or have any + * questions. + */ + +package com.oracle.graal.hotspot.bridge; + +import com.oracle.graal.api.code.InvalidInstalledCodeException; + + +/** + * Entries into the HotSpot GPU interface from Java code. + */ +public class CompilerToGPUImpl implements CompilerToGPU { + + public native boolean deviceInit(); + + public native long generateKernel(byte[] code, String name) throws InvalidInstalledCodeException; + + public native boolean deviceDetach(); + +}
--- a/graal/com.oracle.graal.hotspot/src/com/oracle/graal/hotspot/meta/HotSpotResolvedJavaMethod.java Mon Apr 29 00:25:30 2013 +0200 +++ b/graal/com.oracle.graal.hotspot/src/com/oracle/graal/hotspot/meta/HotSpotResolvedJavaMethod.java Tue Apr 30 19:50:12 2013 +0200 @@ -54,6 +54,10 @@ private final HotSpotResolvedObjectType holder; private/* final */int codeSize; private/* final */int exceptionHandlerCount; + private boolean callerSensitive; + private boolean forceInline; + private boolean dontInline; + private boolean ignoredBySecurityStackWalk; private HotSpotSignature signature; private Boolean hasBalancedMonitors; private Map<Object, Object> compilerStorage; @@ -131,6 +135,43 @@ return graalRuntime().getCompilerToVM().initializeExceptionHandlers(metaspaceMethod, handlers); } + /** + * Returns true if this method has a CallerSensitive annotation. + * + * @return true if CallerSensitive annotation present, false otherwise + */ + public boolean isCallerSensitive() { + return callerSensitive; + } + + /** + * Returns true if this method has a ForceInline annotation. + * + * @return true if ForceInline annotation present, false otherwise + */ + public boolean isForceInline() { + return forceInline; + } + + /** + * Returns true if this method has a DontInline annotation. + * + * @return true if DontInline annotation present, false otherwise + */ + public boolean isDontInline() { + return dontInline; + } + + /** + * Returns true if this method is one of the special methods that is ignored by security stack + * walks. + * + * @return true if special method ignored by security stack walks, false otherwise + */ + public boolean ignoredBySecurityStackWalk() { + return ignoredBySecurityStackWalk; + } + public boolean hasBalancedMonitors() { if (hasBalancedMonitors == null) { hasBalancedMonitors = graalRuntime().getCompilerToVM().hasBalancedMonitors(metaspaceMethod);
--- a/graal/com.oracle.graal.hotspot/src/com/oracle/graal/hotspot/meta/HotSpotRuntime.java Mon Apr 29 00:25:30 2013 +0200 +++ b/graal/com.oracle.graal.hotspot/src/com/oracle/graal/hotspot/meta/HotSpotRuntime.java Tue Apr 30 19:50:12 2013 +0200 @@ -25,7 +25,6 @@ import static com.oracle.graal.api.code.CallingConvention.Type.*; import static com.oracle.graal.api.code.DeoptimizationAction.*; import static com.oracle.graal.api.code.MemoryBarriers.*; -import static com.oracle.graal.api.code.Register.RegisterFlag.*; import static com.oracle.graal.api.meta.DeoptimizationReason.*; import static com.oracle.graal.api.meta.Value.*; import static com.oracle.graal.graph.UnsafeAccess.*; @@ -33,8 +32,13 @@ import static com.oracle.graal.hotspot.nodes.NewArrayStubCall.*; import static com.oracle.graal.hotspot.nodes.NewInstanceStubCall.*; import static com.oracle.graal.hotspot.nodes.NewMultiArrayStubCall.*; +import static com.oracle.graal.hotspot.nodes.ThreadIsInterruptedStubCall.*; import static com.oracle.graal.hotspot.replacements.SystemSubstitutions.*; -import static com.oracle.graal.hotspot.stubs.Stub.*; +import static com.oracle.graal.hotspot.stubs.NewArrayStub.*; +import static com.oracle.graal.hotspot.stubs.NewInstanceStub.*; +import static com.oracle.graal.hotspot.stubs.NewMultiArrayStub.*; +import static com.oracle.graal.hotspot.stubs.RegisterFinalizerStub.*; +import static com.oracle.graal.hotspot.stubs.ThreadIsInterruptedStub.*; import static com.oracle.graal.java.GraphBuilderPhase.RuntimeCalls.*; import static com.oracle.graal.nodes.java.RegisterFinalizerNode.*; import static com.oracle.graal.replacements.Log.*; @@ -51,7 +55,6 @@ import com.oracle.graal.api.code.CompilationResult.DataPatch; import com.oracle.graal.api.code.CompilationResult.Infopoint; import com.oracle.graal.api.code.CompilationResult.Mark; -import com.oracle.graal.api.code.Register.RegisterFlag; import com.oracle.graal.api.code.RuntimeCallTarget.Descriptor; import com.oracle.graal.api.meta.*; import com.oracle.graal.graph.*; @@ -187,13 +190,12 @@ int currentStackOffset = 0; for (int i = 0; i < arguments.length; i++) { Kind kind = arguments[i]; - RegisterFlag flag = kind == Kind.Float || kind == Kind.Double ? FPU : CPU; - Register[] ccRegs = globalStubRegConfig.getCallingConventionRegisters(type, flag); + Register[] ccRegs = globalStubRegConfig.getCallingConventionRegisters(type, kind); if (i < ccRegs.length) { result[i] = ccRegs[i].asValue(kind); } else { result[i] = StackSlot.get(kind.getStackKind(), currentStackOffset, false); - currentStackOffset += Math.max(target.sizeInBytes(kind), target.wordSize); + currentStackOffset += Math.max(target.arch.getSizeInBytes(kind), target.wordSize); } } return result; @@ -204,6 +206,7 @@ this.graalRuntime = graalRuntime; regConfig = createRegisterConfig(false); globalStubRegConfig = createRegisterConfig(true); + Kind word = graalRuntime.getTarget().wordKind; // @formatter:off @@ -212,11 +215,48 @@ /* ret */ ret(Kind.Void), /* arg0: long */ javaCallingConvention(Kind.Long)); - addRuntimeCall(REGISTER_FINALIZER, config.registerFinalizerStub, - /* temps */ null, + addStubCall(REGISTER_FINALIZER, /* ret */ ret(Kind.Void), /* arg0: object */ javaCallingConvention(Kind.Object)); + addCRuntimeCall(REGISTER_FINALIZER_C, config.registerFinalizerAddress, + /* ret */ ret(Kind.Void), + /* arg0: thread */ nativeCallingConvention(word, + /* arg1: object */ Kind.Object)); + + addStubCall(NEW_ARRAY, + /* ret */ ret(Kind.Object), + /* arg0: hub */ javaCallingConvention(word, + /* arg1: length */ Kind.Int)); + + addCRuntimeCall(NEW_ARRAY_C, config.newArrayAddress, + /* ret */ ret(Kind.Void), + /* arg0: thread */ nativeCallingConvention(word, + /* arg1: hub */ word, + /* arg2: length */ Kind.Int)); + + addStubCall(NEW_INSTANCE, + /* ret */ ret(Kind.Object), + /* arg0: hub */ javaCallingConvention(word)); + + addCRuntimeCall(NEW_INSTANCE_C, config.newInstanceAddress, + /* ret */ ret(Kind.Void), + /* arg0: thread */ nativeCallingConvention(word, + /* arg1: hub */ word)); + + addStubCall(NEW_MULTI_ARRAY, + /* ret */ ret(Kind.Object), + /* arg0: hub */ javaCallingConvention(word, + /* arg1: rank */ Kind.Int, + /* arg2: dims */ word)); + + addCRuntimeCall(NEW_MULTI_ARRAY_C, config.newMultiArrayAddress, + /* ret */ ret(Kind.Void), + /* arg0: thread */ nativeCallingConvention(word, + /* arg1: hub */ word, + /* arg2: rank */ Kind.Int, + /* arg3: dims */ word)); + addRuntimeCall(CREATE_NULL_POINTER_EXCEPTION, config.createNullPointerExceptionStub, /* temps */ null, /* ret */ ret(Kind.Object)); @@ -277,6 +317,18 @@ /* ret */ ret(Kind.Void), /* arg0: object */ javaCallingConvention(Kind.Object, /* arg1: flags */ Kind.Int)); + + addStubCall(THREAD_IS_INTERRUPTED, + /* ret */ ret(Kind.Boolean), + /* arg0: thread */ javaCallingConvention(Kind.Object, + /* arg1: clearInterrupted */ Kind.Boolean)); + + addCRuntimeCall(THREAD_IS_INTERRUPTED_C, config.threadIsInterruptedAddress, + /* ret */ ret(Kind.Boolean), + /* arg0: thread */ nativeCallingConvention(word, + /* arg1: receiverThread */ Kind.Object, + /* arg1: clearInterrupted */ Kind.Boolean)); + // @formatter:on } @@ -291,6 +343,14 @@ return addRuntimeCall(descriptor, 0L, null, ret, args); } + protected RuntimeCallTarget addCRuntimeCall(Descriptor descriptor, long address, AllocatableValue ret, AllocatableValue... args) { + return addRuntimeCall(descriptor, address, true, null, ret, args); + } + + protected RuntimeCallTarget addRuntimeCall(Descriptor descriptor, long address, Register[] tempRegs, AllocatableValue ret, AllocatableValue... args) { + return addRuntimeCall(descriptor, address, false, tempRegs, ret, args); + } + /** * Registers the details for linking a runtime call. * @@ -300,7 +360,7 @@ * @param ret where the call returns its result * @param args where arguments are passed to the call */ - protected RuntimeCallTarget addRuntimeCall(Descriptor descriptor, long address, Register[] tempRegs, AllocatableValue ret, AllocatableValue... args) { + protected RuntimeCallTarget addRuntimeCall(Descriptor descriptor, long address, boolean isCRuntimeCall, Register[] tempRegs, AllocatableValue ret, AllocatableValue... args) { AllocatableValue[] temps = tempRegs == null || tempRegs.length == 0 ? AllocatableValue.NONE : new AllocatableValue[tempRegs.length]; for (int i = 0; i < temps.length; i++) { temps[i] = tempRegs[i].asValue(); @@ -311,7 +371,7 @@ for (int i = 0; i < argTypes.length; i++) { assert checkAssignable(argTypes[i], args[i]) : descriptor + " incompatible with argument location " + i + ": " + args[i]; } - HotSpotRuntimeCallTarget runtimeCall = new HotSpotRuntimeCallTarget(descriptor, address, new CallingConvention(temps, 0, ret, args), graalRuntime.getCompilerToVM()); + HotSpotRuntimeCallTarget runtimeCall = new HotSpotRuntimeCallTarget(descriptor, address, isCRuntimeCall, new CallingConvention(temps, 0, ret, args), graalRuntime.getCompilerToVM()); runtimeCalls.put(descriptor, runtimeCall); return runtimeCall; } @@ -349,6 +409,9 @@ replacements.registerSubstitutions(AESCryptSubstitutions.class); replacements.registerSubstitutions(CipherBlockChainingSubstitutions.class); } + if (GraalOptions.IntrinsifyReflectionMethods) { + replacements.registerSubstitutions(ReflectionSubstitutions.class); + } checkcastSnippets = new CheckCastSnippets.Templates(this, replacements, graalRuntime.getTarget()); instanceofSnippets = new InstanceOfSnippets.Templates(this, replacements, graalRuntime.getTarget()); @@ -361,6 +424,8 @@ registerStub(new NewInstanceStub(this, replacements, graalRuntime.getTarget(), runtimeCalls.get(NEW_INSTANCE))); registerStub(new NewArrayStub(this, replacements, graalRuntime.getTarget(), runtimeCalls.get(NEW_ARRAY))); registerStub(new NewMultiArrayStub(this, replacements, graalRuntime.getTarget(), runtimeCalls.get(NEW_MULTI_ARRAY))); + registerStub(new RegisterFinalizerStub(this, replacements, graalRuntime.getTarget(), runtimeCalls.get(REGISTER_FINALIZER))); + registerStub(new ThreadIsInterruptedStub(this, replacements, graalRuntime.getTarget(), runtimeCalls.get(THREAD_IS_INTERRUPTED))); } private void registerStub(Stub stub) { @@ -786,7 +851,7 @@ } private IndexedLocationNode createArrayLocation(Graph graph, Kind elementKind, ValueNode index) { - int scale = this.graalRuntime.getTarget().sizeInBytes(elementKind); + int scale = this.graalRuntime.getTarget().arch.getSizeInBytes(elementKind); return IndexedLocationNode.create(LocationNode.getArrayLocation(elementKind), elementKind, getArrayBaseOffset(elementKind), index, graph, scale); }
--- a/graal/com.oracle.graal.hotspot/src/com/oracle/graal/hotspot/nodes/ThreadIsInterruptedStubCall.java Mon Apr 29 00:25:30 2013 +0200 +++ b/graal/com.oracle.graal.hotspot/src/com/oracle/graal/hotspot/nodes/ThreadIsInterruptedStubCall.java Tue Apr 30 19:50:12 2013 +0200 @@ -30,12 +30,13 @@ import com.oracle.graal.compiler.gen.*; import com.oracle.graal.compiler.target.*; import com.oracle.graal.graph.*; +import com.oracle.graal.hotspot.stubs.*; import com.oracle.graal.lir.*; import com.oracle.graal.nodes.*; import com.oracle.graal.nodes.type.*; /** - * Node implementing a call to HotSpot's ThreadIsInterrupted stub. + * Node implementing a call to {@link ThreadIsInterruptedStub}. */ public class ThreadIsInterruptedStubCall extends DeoptimizingStubCall implements LIRGenLowerable {
--- a/graal/com.oracle.graal.hotspot/src/com/oracle/graal/hotspot/phases/WriteBarrierVerificationPhase.java Mon Apr 29 00:25:30 2013 +0200 +++ b/graal/com.oracle.graal.hotspot/src/com/oracle/graal/hotspot/phases/WriteBarrierVerificationPhase.java Tue Apr 30 19:50:12 2013 +0200 @@ -31,193 +31,94 @@ import com.oracle.graal.nodes.extended.WriteNode.WriteBarrierType; import com.oracle.graal.nodes.java.*; import com.oracle.graal.phases.*; -import com.oracle.graal.phases.graph.*; +/** + * Verification phase that checks if, for every write, at least one write barrier is present at all + * paths leading to the previous safepoint. For every write, necessitating a write barrier, a + * bottom-up traversal of the graph is performed up to the previous safepoints via all possible + * paths. If, for a certain path, no write barrier satisfying the processed write is found, an + * assertion is generated. + */ public class WriteBarrierVerificationPhase extends Phase { - private class MemoryMap implements MergeableState<MemoryMap> { - - private IdentityHashMap<Object, LinkedList<LocationNode>> lastMemorySnapshot; - private IdentityHashMap<Object, LinkedList<SerialWriteBarrier>> lastWriteBarrierSnapshot; - - public MemoryMap(MemoryMap memoryMap) { - lastMemorySnapshot = new IdentityHashMap<>(memoryMap.lastMemorySnapshot); - lastWriteBarrierSnapshot = new IdentityHashMap<>(memoryMap.lastWriteBarrierSnapshot); - } - - public MemoryMap() { - lastMemorySnapshot = new IdentityHashMap<>(); - lastWriteBarrierSnapshot = new IdentityHashMap<>(); - } - - @Override - public String toString() { - return "Map=" + lastMemorySnapshot.toString(); - } - - @Override - public boolean merge(MergeNode merge, List<MemoryMap> withStates) { - if (withStates.size() == 0) { - return true; - } + @Override + protected void run(StructuredGraph graph) { + processWrites(graph); + } - for (MemoryMap other : withStates) { - for (Object otherObject : other.lastMemorySnapshot.keySet()) { - LinkedList<LocationNode> currentLocations = lastMemorySnapshot.get(otherObject); - LinkedList<LocationNode> otherLocations = other.lastMemorySnapshot.get(otherObject); - if (otherLocations != null) { - if (currentLocations == null) { - currentLocations = new LinkedList<>(); - } - for (LocationNode location : otherLocations) { - if (!currentLocations.contains(location)) { - currentLocations.add(location); - } - } - } - } - for (Object otherObject : other.lastWriteBarrierSnapshot.keySet()) { - LinkedList<SerialWriteBarrier> currentWriteBarriers = lastWriteBarrierSnapshot.get(otherObject); - LinkedList<SerialWriteBarrier> otherWriteBarriers = other.lastWriteBarrierSnapshot.get(otherObject); - if (otherWriteBarriers != null) { - if (currentWriteBarriers == null) { - currentWriteBarriers = new LinkedList<>(); - } - for (SerialWriteBarrier barrier : otherWriteBarriers) { - if (!currentWriteBarriers.contains(barrier)) { - currentWriteBarriers.add(barrier); - } - } - } - } + private static void processWrites(StructuredGraph graph) { + for (Node node : graph.getNodes()) { + if (isObjectWrite(node)) { + validateWrite(node); } - return true; - } - - @Override - public void loopBegin(LoopBeginNode loopBegin) { - } - - @Override - public void loopEnds(LoopBeginNode loopBegin, List<MemoryMap> loopEndStates) { - } - - @Override - public void afterSplit(AbstractBeginNode node) { - } - - @Override - public MemoryMap clone() { - return new MemoryMap(this); } } - @Override - protected void run(StructuredGraph graph) { - new PostOrderNodeIterator<MemoryMap>(graph.start(), new MemoryMap()) { - - @Override - protected void node(FixedNode node) { - processNode(node, state); - } - }.apply(); - } - - private static void processNode(FixedNode node, MemoryMap state) { - if (node instanceof WriteNode) { - processWriteNode((WriteNode) node, state); - } else if (node instanceof CompareAndSwapNode) { - processCASNode((CompareAndSwapNode) node, state); - } else if (node instanceof SerialWriteBarrier) { - processWriteBarrier((SerialWriteBarrier) node, state); - } else if ((node instanceof DeoptimizingNode)) { - if (((DeoptimizingNode) node).canDeoptimize()) { - validateWriteBarriers(state); - processSafepoint(state); + private static void validateWrite(Node write) { + /* + * The currently validated write is checked in order to discover if it has an appropriate + * attached write barrier. + */ + if (hasAttachedBarrier(write)) { + return; + } + NodeFlood frontier = write.graph().createNodeFlood(); + expandFrontier(frontier, write); + Iterator<Node> iterator = frontier.iterator(); + while (iterator.hasNext()) { + Node currentNode = iterator.next(); + assert !isSafepoint(currentNode) : "Write barrier must be present"; + if (!(currentNode instanceof SerialWriteBarrier) || ((currentNode instanceof SerialWriteBarrier) && !validateBarrier(write, (SerialWriteBarrier) currentNode))) { + expandFrontier(frontier, currentNode); } } } - private static void processWriteNode(WriteNode node, MemoryMap state) { - if (node.getWriteBarrierType() != WriteBarrierType.NONE) { - LinkedList<LocationNode> locations = state.lastMemorySnapshot.get(node.object()); - if (locations == null) { - locations = new LinkedList<>(); - locations.add(node.location()); - state.lastMemorySnapshot.put(node.object(), locations); - } else if ((node.getWriteBarrierType() == WriteBarrierType.PRECISE) && !locations.contains(node.location())) { - locations.add(node.location()); - } - } + private static boolean hasAttachedBarrier(Node node) { + return (((FixedWithNextNode) node).next() instanceof SerialWriteBarrier) && validateBarrier(node, (SerialWriteBarrier) ((FixedWithNextNode) node).next()); } - private static void processCASNode(CompareAndSwapNode node, MemoryMap state) { - if (node.getWriteBarrierType() != WriteBarrierType.NONE) { - LinkedList<LocationNode> locations = state.lastMemorySnapshot.get(node.object()); - if (locations == null) { - locations = new LinkedList<>(); - locations.add(node.getLocation()); - state.lastMemorySnapshot.put(node.object(), locations); - } else if ((node.getWriteBarrierType() == WriteBarrierType.PRECISE) && !locations.contains(node.getLocation())) { - locations.add(node.getLocation()); + private static boolean isObjectWrite(Node node) { + if ((node instanceof WriteNode && (((WriteNode) node).getWriteBarrierType() != WriteBarrierType.NONE)) || + (node instanceof CompareAndSwapNode && (((CompareAndSwapNode) node).getWriteBarrierType() != WriteBarrierType.NONE))) { + return true; + } + return false; + } + + private static void expandFrontier(NodeFlood frontier, Node node) { + for (Node previousNode : node.cfgPredecessors()) { + if (previousNode != null) { + frontier.add(previousNode); } } } - private static void processWriteBarrier(SerialWriteBarrier currentBarrier, MemoryMap state) { - LinkedList<SerialWriteBarrier> writeBarriers = state.lastWriteBarrierSnapshot.get(currentBarrier.getObject()); - if (writeBarriers == null) { - writeBarriers = new LinkedList<>(); - writeBarriers.add(currentBarrier); - state.lastWriteBarrierSnapshot.put(currentBarrier.getObject(), writeBarriers); - } else if (currentBarrier.usePrecise()) { - boolean found = false; - for (SerialWriteBarrier barrier : writeBarriers) { - if (barrier.getLocation() == currentBarrier.getLocation()) { - found = true; - break; - } - } - if (!found) { - writeBarriers.add(currentBarrier); - } - } + private static boolean isSafepoint(Node node) { + /* + * LoopBegin nodes are also treated as safepoints since a bottom-up analysis is performed + * and loop safepoints are placed before LoopEnd nodes. Possible elimination of write + * barriers inside loops, derived from writes outside loops, can not be permitted. + */ + return ((node instanceof DeoptimizingNode) && ((DeoptimizingNode) node).canDeoptimize()) || (node instanceof LoopBeginNode); } - private static void validateWriteBarriers(MemoryMap state) { - Set<Object> objects = state.lastMemorySnapshot.keySet(); - for (Object write : objects) { - LinkedList<SerialWriteBarrier> writeBarriers = state.lastWriteBarrierSnapshot.get(write); - if (writeBarriers == null) { - throw new GraalInternalError("Failed to find any write barrier at safepoint for written object"); - } - /* - * Check the first write barrier of the object to determine if it is precise or not. If - * it is not, the validation for this object has passed (since we had a hit in the write - * barrier hashmap), otherwise we have to ensure the presence of write barriers for - * every written location. - */ - final boolean precise = writeBarriers.getFirst().usePrecise(); - if (precise) { - LinkedList<LocationNode> locations = state.lastMemorySnapshot.get(write); - for (LocationNode location : locations) { - boolean found = false; - for (SerialWriteBarrier barrier : writeBarriers) { - if (location == barrier.getLocation()) { - found = true; - break; - } - } - if (!found) { - throw new GraalInternalError("Failed to find write barrier at safepoint for precise written object"); - } - } - } + private static boolean validateBarrier(Node write, SerialWriteBarrier barrier) { + ValueNode writtenObject = null; + LocationNode writtenLocation = null; + if (write instanceof WriteNode) { + writtenObject = ((WriteNode) write).object(); + writtenLocation = ((WriteNode) write).location(); + } else if (write instanceof CompareAndSwapNode) { + writtenObject = ((CompareAndSwapNode) write).object(); + writtenLocation = ((CompareAndSwapNode) write).getLocation(); + } else { + assert false : "Node must be of type requiring a write barrier"; } - } - private static void processSafepoint(MemoryMap state) { - state.lastMemorySnapshot.clear(); - state.lastWriteBarrierSnapshot.clear(); + if ((barrier.getObject() == writtenObject) && (!barrier.usePrecise() || (barrier.usePrecise() && barrier.getLocation() == writtenLocation))) { + return true; + } + return false; } }
--- a/graal/com.oracle.graal.hotspot/src/com/oracle/graal/hotspot/replacements/NewObjectSnippets.java Mon Apr 29 00:25:30 2013 +0200 +++ b/graal/com.oracle.graal.hotspot/src/com/oracle/graal/hotspot/replacements/NewObjectSnippets.java Tue Apr 30 19:50:12 2013 +0200 @@ -261,7 +261,7 @@ final int alignment = target.wordSize; final int headerSize = HotSpotRuntime.getArrayBaseOffset(elementKind); final Integer length = lengthNode.isConstant() ? Integer.valueOf(lengthNode.asConstant().asInt()) : null; - int log2ElementSize = CodeUtil.log2(target.sizeInBytes(elementKind)); + int log2ElementSize = CodeUtil.log2(target.arch.getSizeInBytes(elementKind)); if (!useTLAB) { ConstantNode zero = ConstantNode.defaultForKind(target.wordKind, graph); /*
--- /dev/null Thu Jan 01 00:00:00 1970 +0000 +++ b/graal/com.oracle.graal.hotspot/src/com/oracle/graal/hotspot/replacements/ReflectionGetCallerClassNode.java Tue Apr 30 19:50:12 2013 +0200 @@ -0,0 +1,102 @@ +/* + * Copyright (c) 2013, Oracle and/or its affiliates. All rights reserved. + * DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER. + * + * This code is free software; you can redistribute it and/or modify it + * under the terms of the GNU General Public License version 2 only, as + * published by the Free Software Foundation. + * + * This code is distributed in the hope that it will be useful, but WITHOUT + * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or + * FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License + * version 2 for more details (a copy is included in the LICENSE file that + * accompanied this code). + * + * You should have received a copy of the GNU General Public License version + * 2 along with this work; if not, write to the Free Software Foundation, + * Inc., 51 Franklin St, Fifth Floor, Boston, MA 02110-1301 USA. + * + * Please contact Oracle, 500 Oracle Parkway, Redwood Shores, CA 94065 USA + * or visit www.oracle.com if you need additional information or have any + * questions. + */ +package com.oracle.graal.hotspot.replacements; + +import com.oracle.graal.api.meta.*; +import com.oracle.graal.graph.*; +import com.oracle.graal.hotspot.meta.*; +import com.oracle.graal.nodes.*; +import com.oracle.graal.nodes.spi.*; +import com.oracle.graal.phases.*; +import com.oracle.graal.replacements.nodes.*; + +public class ReflectionGetCallerClassNode extends MacroNode implements Canonicalizable, Lowerable { + + public ReflectionGetCallerClassNode(Invoke invoke) { + super(invoke); + } + + @Override + public ValueNode canonical(CanonicalizerTool tool) { + ConstantNode callerClassNode = getCallerClassNode(tool.runtime()); + if (callerClassNode != null) { + return callerClassNode; + } + return this; + } + + @Override + public void lower(LoweringTool tool, LoweringType loweringType) { + StructuredGraph graph = (StructuredGraph) graph(); + + ConstantNode callerClassNode = getCallerClassNode(tool.getRuntime()); + + if (callerClassNode != null) { + graph.replaceFixedWithFloating(this, callerClassNode); + } else { + graph.replaceFixedWithFixed(this, createInvoke()); + } + } + + /** + * If inlining is deep enough this method returns a {@link ConstantNode} of the caller class by + * walking the the stack. + * + * @param runtime + * @return ConstantNode of the caller class, or null + */ + private ConstantNode getCallerClassNode(MetaAccessProvider runtime) { + if (!GraalOptions.IntrinsifyReflectionMethods) { + return null; + } + + // Walk back up the frame states to find the caller at the required depth. + FrameState state = stateAfter(); + + // Cf. JVM_GetCallerClass + // NOTE: Start the loop at depth 1 because the current frame state does + // not include the Reflection.getCallerClass() frame. + for (int n = 1; state != null; state = state.outerFrameState(), n++) { + HotSpotResolvedJavaMethod method = (HotSpotResolvedJavaMethod) state.method(); + switch (n) { + case 0: + throw GraalInternalError.shouldNotReachHere("current frame state does not include the Reflection.getCallerClass frame"); + case 1: + // Frame 0 and 1 must be caller sensitive (see JVM_GetCallerClass). + if (!method.isCallerSensitive()) { + return null; // bail-out; let JVM_GetCallerClass do the work + } + break; + default: + if (!method.ignoredBySecurityStackWalk()) { + // We have reached the desired frame; return the holder class. + HotSpotResolvedObjectType callerClass = (HotSpotResolvedObjectType) method.getDeclaringClass(); + return ConstantNode.forObject(callerClass.mirror(), runtime, graph()); + } + break; + } + } + return null; // bail-out; let JVM_GetCallerClass do the work + } + +}
--- /dev/null Thu Jan 01 00:00:00 1970 +0000 +++ b/graal/com.oracle.graal.hotspot/src/com/oracle/graal/hotspot/replacements/ReflectionSubstitutions.java Tue Apr 30 19:50:12 2013 +0200 @@ -0,0 +1,37 @@ +/* + * Copyright (c) 2013, Oracle and/or its affiliates. All rights reserved. + * DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER. + * + * This code is free software; you can redistribute it and/or modify it + * under the terms of the GNU General Public License version 2 only, as + * published by the Free Software Foundation. + * + * This code is distributed in the hope that it will be useful, but WITHOUT + * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or + * FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License + * version 2 for more details (a copy is included in the LICENSE file that + * accompanied this code). + * + * You should have received a copy of the GNU General Public License version + * 2 along with this work; if not, write to the Free Software Foundation, + * Inc., 51 Franklin St, Fifth Floor, Boston, MA 02110-1301 USA. + * + * Please contact Oracle, 500 Oracle Parkway, Redwood Shores, CA 94065 USA + * or visit www.oracle.com if you need additional information or have any + * questions. + */ +package com.oracle.graal.hotspot.replacements; + +import com.oracle.graal.api.replacements.*; +import com.oracle.graal.nodes.spi.MacroSubstitution; + +/** + * Substitutions for {@link sun.reflect.Reflection} methods. + */ +@ClassSubstitution(sun.reflect.Reflection.class) +public class ReflectionSubstitutions { + + @MacroSubstitution(macro = ReflectionGetCallerClassNode.class, optional = true) + public static native Class<?> getCallerClass(); + +}
--- /dev/null Thu Jan 01 00:00:00 1970 +0000 +++ b/graal/com.oracle.graal.hotspot/src/com/oracle/graal/hotspot/stubs/CRuntimeStub.java Tue Apr 30 19:50:12 2013 +0200 @@ -0,0 +1,49 @@ +/* + * Copyright (c) 2012, Oracle and/or its affiliates. All rights reserved. + * DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER. + * + * This code is free software; you can redistribute it and/or modify it + * under the terms of the GNU General Public License version 2 only, as + * published by the Free Software Foundation. + * + * This code is distributed in the hope that it will be useful, but WITHOUT + * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or + * FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License + * version 2 for more details (a copy is included in the LICENSE file that + * accompanied this code). + * + * You should have received a copy of the GNU General Public License version + * 2 along with this work; if not, write to the Free Software Foundation, + * Inc., 51 Franklin St, Fifth Floor, Boston, MA 02110-1301 USA. + * + * Please contact Oracle, 500 Oracle Parkway, Redwood Shores, CA 94065 USA + * or visit www.oracle.com if you need additional information or have any + * questions. + */ +package com.oracle.graal.hotspot.stubs; + +import com.oracle.graal.api.code.*; +import com.oracle.graal.hotspot.*; +import com.oracle.graal.hotspot.meta.*; +import com.oracle.graal.nodes.spi.*; +import com.oracle.graal.replacements.SnippetTemplate.Arguments; +import com.oracle.graal.replacements.SnippetTemplate.SnippetInfo; + +/** + * Base class for a stub that saves registers around a C runtime call. + */ +public abstract class CRuntimeStub extends Stub { + + public CRuntimeStub(final HotSpotRuntime runtime, Replacements replacements, TargetDescription target, HotSpotRuntimeCallTarget linkage) { + super(runtime, replacements, target, linkage); + } + + @Override + protected Arguments makeArguments(SnippetInfo stub) { + Arguments args = new Arguments(stub); + for (int i = 0; i < stub.getParameterCount(); i++) { + args.add(stub.getParameterName(i), null); + } + return args; + } +}
--- a/graal/com.oracle.graal.hotspot/src/com/oracle/graal/hotspot/stubs/NewArrayStub.java Mon Apr 29 00:25:30 2013 +0200 +++ b/graal/com.oracle.graal.hotspot/src/com/oracle/graal/hotspot/stubs/NewArrayStub.java Tue Apr 30 19:50:12 2013 +0200 @@ -46,14 +46,14 @@ /** * Stub implementing the fast path for TLAB refill during instance class allocation. This stub is - * called from the {@linkplain NewObjectSnippets inline} allocation code when TLAB allocation fails. - * If this stub fails to refill the TLAB or allocate the object, it calls out to the HotSpot C++ - * runtime to complete the allocation. + * called via {@link NewArrayStubCall} from the {@linkplain NewObjectSnippets inline} allocation + * code when TLAB allocation fails. If this stub fails to refill the TLAB or allocate the object, it + * calls out to the HotSpot C++ runtime to complete the allocation. */ public class NewArrayStub extends Stub { public NewArrayStub(final HotSpotRuntime runtime, Replacements replacements, TargetDescription target, HotSpotRuntimeCallTarget linkage) { - super(runtime, replacements, target, linkage, "newArray"); + super(runtime, replacements, target, linkage); } @Override @@ -116,7 +116,7 @@ return verifyOop(getAndClearObjectResult(thread())); } - public static final Descriptor NEW_ARRAY_C = new Descriptor("new_array_c", false, void.class, Word.class, Word.class, int.class); + public static final Descriptor NEW_ARRAY_C = descriptorFor(NewArrayStub.class, "newArrayC", false); @NodeIntrinsic(CRuntimeCall.class) public static native void newArrayC(@ConstantNodeParameter Descriptor newArrayC, Word thread, Word hub, int length);
--- a/graal/com.oracle.graal.hotspot/src/com/oracle/graal/hotspot/stubs/NewInstanceStub.java Mon Apr 29 00:25:30 2013 +0200 +++ b/graal/com.oracle.graal.hotspot/src/com/oracle/graal/hotspot/stubs/NewInstanceStub.java Tue Apr 30 19:50:12 2013 +0200 @@ -48,14 +48,14 @@ /** * Stub implementing the fast path for TLAB refill during instance class allocation. This stub is - * called from the {@linkplain NewObjectSnippets inline} allocation code when TLAB allocation fails. - * If this stub fails to refill the TLAB or allocate the object, it calls out to the HotSpot C++ - * runtime for to complete the allocation. + * called via {@link NewInstanceStubCall} from the {@linkplain NewObjectSnippets inline} allocation + * code when TLAB allocation fails. If this stub fails to refill the TLAB or allocate the object, it + * calls out to the HotSpot C++ runtime for to complete the allocation. */ public class NewInstanceStub extends Stub { public NewInstanceStub(final HotSpotRuntime runtime, Replacements replacements, TargetDescription target, HotSpotRuntimeCallTarget linkage) { - super(runtime, replacements, target, linkage, "newInstance"); + super(runtime, replacements, target, linkage); } @Override @@ -231,7 +231,7 @@ return Boolean.getBoolean("graal.newInstanceStub.forceSlowPath"); } - public static final Descriptor NEW_INSTANCE_C = new Descriptor("new_instance_c", false, void.class, Word.class, Word.class); + public static final Descriptor NEW_INSTANCE_C = descriptorFor(NewInstanceStub.class, "newInstanceC", false); @NodeIntrinsic(CRuntimeCall.class) public static native void newInstanceC(@ConstantNodeParameter Descriptor newInstanceC, Word thread, Word hub);
--- a/graal/com.oracle.graal.hotspot/src/com/oracle/graal/hotspot/stubs/NewMultiArrayStub.java Mon Apr 29 00:25:30 2013 +0200 +++ b/graal/com.oracle.graal.hotspot/src/com/oracle/graal/hotspot/stubs/NewMultiArrayStub.java Tue Apr 30 19:50:12 2013 +0200 @@ -22,8 +22,6 @@ */ package com.oracle.graal.hotspot.stubs; -import static com.oracle.graal.api.code.DeoptimizationAction.*; -import static com.oracle.graal.api.meta.DeoptimizationReason.*; import static com.oracle.graal.hotspot.replacements.HotSpotSnippetUtils.*; import com.oracle.graal.api.code.RuntimeCallTarget.Descriptor; @@ -35,37 +33,25 @@ import com.oracle.graal.hotspot.nodes.*; import com.oracle.graal.nodes.spi.*; import com.oracle.graal.replacements.*; -import com.oracle.graal.replacements.SnippetTemplate.Arguments; -import com.oracle.graal.replacements.SnippetTemplate.SnippetInfo; import com.oracle.graal.word.*; -public class NewMultiArrayStub extends Stub { +/** + * Stub called from {@link NewMultiArrayStubCall}. + */ +public class NewMultiArrayStub extends CRuntimeStub { public NewMultiArrayStub(final HotSpotRuntime runtime, Replacements replacements, TargetDescription target, HotSpotRuntimeCallTarget linkage) { - super(runtime, replacements, target, linkage, "newMultiArray"); - } - - @Override - protected Arguments makeArguments(SnippetInfo stub) { - Arguments args = new Arguments(stub); - args.add("hub", null); - args.add("rank", null); - args.add("dims", null); - return args; + super(runtime, replacements, target, linkage); } @Snippet private static Object newMultiArray(Word hub, int rank, Word dims) { newMultiArrayC(NEW_MULTI_ARRAY_C, thread(), hub, rank, dims); - - if (clearPendingException(thread())) { - getAndClearObjectResult(thread()); - DeoptimizeCallerNode.deopt(InvalidateReprofile, RuntimeConstraint); - } + handlePendingException(true); return verifyOop(getAndClearObjectResult(thread())); } - public static final Descriptor NEW_MULTI_ARRAY_C = new Descriptor("new_multi_array_c", false, void.class, Word.class, Word.class, int.class, Word.class); + public static final Descriptor NEW_MULTI_ARRAY_C = descriptorFor(NewMultiArrayStub.class, "newMultiArrayC", false); @NodeIntrinsic(CRuntimeCall.class) public static native void newMultiArrayC(@ConstantNodeParameter Descriptor newArrayC, Word thread, Word hub, int rank, Word dims);
--- /dev/null Thu Jan 01 00:00:00 1970 +0000 +++ b/graal/com.oracle.graal.hotspot/src/com/oracle/graal/hotspot/stubs/RegisterFinalizerStub.java Tue Apr 30 19:50:12 2013 +0200 @@ -0,0 +1,58 @@ +/* + * Copyright (c) 2012, Oracle and/or its affiliates. All rights reserved. + * DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER. + * + * This code is free software; you can redistribute it and/or modify it + * under the terms of the GNU General Public License version 2 only, as + * published by the Free Software Foundation. + * + * This code is distributed in the hope that it will be useful, but WITHOUT + * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or + * FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License + * version 2 for more details (a copy is included in the LICENSE file that + * accompanied this code). + * + * You should have received a copy of the GNU General Public License version + * 2 along with this work; if not, write to the Free Software Foundation, + * Inc., 51 Franklin St, Fifth Floor, Boston, MA 02110-1301 USA. + * + * Please contact Oracle, 500 Oracle Parkway, Redwood Shores, CA 94065 USA + * or visit www.oracle.com if you need additional information or have any + * questions. + */ +package com.oracle.graal.hotspot.stubs; + +import static com.oracle.graal.hotspot.replacements.HotSpotSnippetUtils.*; + +import com.oracle.graal.api.code.RuntimeCallTarget.Descriptor; +import com.oracle.graal.api.code.*; +import com.oracle.graal.graph.Node.ConstantNodeParameter; +import com.oracle.graal.graph.Node.NodeIntrinsic; +import com.oracle.graal.hotspot.*; +import com.oracle.graal.hotspot.meta.*; +import com.oracle.graal.hotspot.nodes.*; +import com.oracle.graal.nodes.java.*; +import com.oracle.graal.nodes.spi.*; +import com.oracle.graal.replacements.*; +import com.oracle.graal.word.*; + +/** + * Stub called from {@link RegisterFinalizerNode}. + */ +public class RegisterFinalizerStub extends CRuntimeStub { + + public RegisterFinalizerStub(final HotSpotRuntime runtime, Replacements replacements, TargetDescription target, HotSpotRuntimeCallTarget linkage) { + super(runtime, replacements, target, linkage); + } + + @Snippet + private static void registerFinalizer(Object object) { + registerFinalizerC(REGISTER_FINALIZER_C, thread(), object); + handlePendingException(false); + } + + public static final Descriptor REGISTER_FINALIZER_C = descriptorFor(RegisterFinalizerStub.class, "registerFinalizerC", false); + + @NodeIntrinsic(CRuntimeCall.class) + public static native void registerFinalizerC(@ConstantNodeParameter Descriptor registerFinalizerC, Word thread, Object object); +}
--- a/graal/com.oracle.graal.hotspot/src/com/oracle/graal/hotspot/stubs/Stub.java Mon Apr 29 00:25:30 2013 +0200 +++ b/graal/com.oracle.graal.hotspot/src/com/oracle/graal/hotspot/stubs/Stub.java Tue Apr 30 19:50:12 2013 +0200 @@ -22,8 +22,12 @@ */ package com.oracle.graal.hotspot.stubs; +import static com.oracle.graal.api.code.DeoptimizationAction.*; +import static com.oracle.graal.api.meta.DeoptimizationReason.*; import static com.oracle.graal.hotspot.nodes.CStringNode.*; +import static com.oracle.graal.hotspot.replacements.HotSpotSnippetUtils.*; +import java.lang.reflect.*; import java.util.*; import java.util.concurrent.*; @@ -39,6 +43,7 @@ import com.oracle.graal.graph.Node.NodeIntrinsic; import com.oracle.graal.hotspot.*; import com.oracle.graal.hotspot.meta.*; +import com.oracle.graal.hotspot.nodes.*; import com.oracle.graal.java.*; import com.oracle.graal.nodes.*; import com.oracle.graal.nodes.extended.*; @@ -99,9 +104,9 @@ * * @param linkage linkage details for a call to the stub */ - public Stub(HotSpotRuntime runtime, Replacements replacements, TargetDescription target, HotSpotRuntimeCallTarget linkage, String methodName) { + public Stub(HotSpotRuntime runtime, Replacements replacements, TargetDescription target, HotSpotRuntimeCallTarget linkage) { super(runtime, replacements, target); - this.stubInfo = snippet(getClass(), methodName); + this.stubInfo = snippet(getClass(), null); this.linkage = linkage; } @@ -135,6 +140,28 @@ } /** + * Looks for a {@link CRuntimeCall} node intrinsic named {@code name} in {@code stubClass} and + * returns a {@link Descriptor} based on its signature and the value of {@code hasSideEffect}. + */ + protected static <T extends Stub> Descriptor descriptorFor(Class<T> stubClass, String name, boolean hasSideEffect) { + Method found = null; + for (Method method : stubClass.getDeclaredMethods()) { + if (Modifier.isStatic(method.getModifiers()) && method.getAnnotation(NodeIntrinsic.class) != null && method.getName().equals(name)) { + if (method.getAnnotation(NodeIntrinsic.class).value() == CRuntimeCall.class) { + assert found == null : "found more than one C runtime call named " + name + " in " + stubClass; + assert method.getParameterTypes().length != 0 && method.getParameterTypes()[0] == Descriptor.class : "first parameter of C runtime call '" + name + "' in " + stubClass + + " must be of type " + Descriptor.class.getSimpleName(); + found = method; + } + } + } + assert found != null : "could not find C runtime call named " + name + " in " + stubClass; + List<Class<?>> paramList = Arrays.asList(found.getParameterTypes()); + Class[] cCallTypes = paramList.subList(1, paramList.size()).toArray(new Class[paramList.size() - 1]); + return new Descriptor(name, hasSideEffect, found.getReturnType(), cCallTypes); + } + + /** * Gets the code for this stub, compiling it first if necessary. */ public synchronized InstalledCode getCode(final Backend backend) { @@ -166,6 +193,8 @@ if (Debug.isDumpEnabled()) { Debug.dump(new Object[]{compResult, installedCode}, "After code installation"); } + // TTY.println(getMethod().toString()); + // TTY.println(runtime().disassemble(installedCode)); return installedCode; } }); @@ -200,6 +229,15 @@ } } + static void handlePendingException(boolean isObjectResult) { + if (clearPendingException(thread())) { + if (isObjectResult) { + getAndClearObjectResult(thread()); + } + DeoptimizeCallerNode.deopt(InvalidateReprofile, RuntimeConstraint); + } + } + public static final Descriptor STUB_PRINTF = new Descriptor("stubPrintf", false, void.class, Word.class, long.class, long.class, long.class); @NodeIntrinsic(RuntimeCallNode.class)
--- /dev/null Thu Jan 01 00:00:00 1970 +0000 +++ b/graal/com.oracle.graal.hotspot/src/com/oracle/graal/hotspot/stubs/ThreadIsInterruptedStub.java Tue Apr 30 19:50:12 2013 +0200 @@ -0,0 +1,58 @@ +/* + * Copyright (c) 2012, Oracle and/or its affiliates. All rights reserved. + * DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER. + * + * This code is free software; you can redistribute it and/or modify it + * under the terms of the GNU General Public License version 2 only, as + * published by the Free Software Foundation. + * + * This code is distributed in the hope that it will be useful, but WITHOUT + * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or + * FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License + * version 2 for more details (a copy is included in the LICENSE file that + * accompanied this code). + * + * You should have received a copy of the GNU General Public License version + * 2 along with this work; if not, write to the Free Software Foundation, + * Inc., 51 Franklin St, Fifth Floor, Boston, MA 02110-1301 USA. + * + * Please contact Oracle, 500 Oracle Parkway, Redwood Shores, CA 94065 USA + * or visit www.oracle.com if you need additional information or have any + * questions. + */ +package com.oracle.graal.hotspot.stubs; + +import static com.oracle.graal.hotspot.replacements.HotSpotSnippetUtils.*; + +import com.oracle.graal.api.code.RuntimeCallTarget.Descriptor; +import com.oracle.graal.api.code.*; +import com.oracle.graal.graph.Node.ConstantNodeParameter; +import com.oracle.graal.graph.Node.NodeIntrinsic; +import com.oracle.graal.hotspot.*; +import com.oracle.graal.hotspot.meta.*; +import com.oracle.graal.hotspot.nodes.*; +import com.oracle.graal.nodes.spi.*; +import com.oracle.graal.replacements.*; +import com.oracle.graal.word.*; + +/** + * Stub called from {@link ThreadIsInterruptedStubCall}. + */ +public class ThreadIsInterruptedStub extends CRuntimeStub { + + public ThreadIsInterruptedStub(final HotSpotRuntime runtime, Replacements replacements, TargetDescription target, HotSpotRuntimeCallTarget linkage) { + super(runtime, replacements, target, linkage); + } + + @Snippet + private static boolean threadIsInterrupted(Thread receiverThread, boolean clearIsInterrupted) { + boolean result = threadIsInterruptedC(THREAD_IS_INTERRUPTED_C, thread(), receiverThread, clearIsInterrupted); + handlePendingException(false); + return result; + } + + public static final Descriptor THREAD_IS_INTERRUPTED_C = descriptorFor(ThreadIsInterruptedStub.class, "threadIsInterruptedC", false); + + @NodeIntrinsic(CRuntimeCall.class) + public static native boolean threadIsInterruptedC(@ConstantNodeParameter Descriptor newArrayC, Word thread, Thread receiverThread, boolean clearIsInterrupted); +}
--- a/graal/com.oracle.graal.java/src/com/oracle/graal/java/GraphBuilderPhase.java Mon Apr 29 00:25:30 2013 +0200 +++ b/graal/com.oracle.graal.java/src/com/oracle/graal/java/GraphBuilderPhase.java Tue Apr 30 19:50:12 2013 +0200 @@ -679,11 +679,7 @@ } private void genGoto() { - double probability = profilingInfo.getBranchTakenProbability(bci()); - if (probability < 0) { - probability = 1; - } - appendGoto(createTarget(probability, currentBlock.successors.get(0), frameState)); + appendGoto(createTarget(1, currentBlock.successors.get(0), frameState)); assert currentBlock.numNormalSuccessors() == 1; }
--- a/graal/com.oracle.graal.jtt/src/com/oracle/graal/jtt/JTTTest.java Mon Apr 29 00:25:30 2013 +0200 +++ b/graal/com.oracle.graal.jtt/src/com/oracle/graal/jtt/JTTTest.java Tue Apr 30 19:50:12 2013 +0200 @@ -60,9 +60,9 @@ Object[] args = argsWithReceiver(receiver, argsToBind); JavaType[] parameterTypes = signatureToTypes(runtime.lookupJavaMethod(m)); assert parameterTypes.length == args.length; - for (int i = 0; i < argsToBind.length; i++) { + for (int i = 0; i < args.length; i++) { LocalNode local = graph.getLocal(i); - Constant c = Constant.forBoxed(parameterTypes[i].getKind(), argsToBind[i]); + Constant c = Constant.forBoxed(parameterTypes[i].getKind(), args[i]); ConstantNode replacement = ConstantNode.forConstant(c, runtime, graph); local.replaceAtUsages(replacement); } @@ -99,8 +99,10 @@ Result expect = executeExpected(method, receiver, args); test(method, expect, receiver, args); - this.argsToBind = args; - test(method, expect, receiver, args); - this.argsToBind = null; + if (args.length > 0) { + this.argsToBind = args; + test(method, expect, receiver, args); + this.argsToBind = null; + } } }
--- a/graal/com.oracle.graal.lir.amd64/src/com/oracle/graal/lir/amd64/AMD64AddressValue.java Mon Apr 29 00:25:30 2013 +0200 +++ b/graal/com.oracle.graal.lir.amd64/src/com/oracle/graal/lir/amd64/AMD64AddressValue.java Tue Apr 30 19:50:12 2013 +0200 @@ -41,11 +41,11 @@ protected final Scale scale; protected final int displacement; - public AMD64AddressValue(Kind kind, AllocatableValue base, int displacement) { + public AMD64AddressValue(PlatformKind kind, AllocatableValue base, int displacement) { this(kind, base, Value.ILLEGAL, Scale.Times1, displacement); } - public AMD64AddressValue(Kind kind, AllocatableValue base, AllocatableValue index, Scale scale, int displacement) { + public AMD64AddressValue(PlatformKind kind, AllocatableValue base, AllocatableValue index, Scale scale, int displacement) { super(kind); this.base = base; this.index = index; @@ -91,13 +91,13 @@ public boolean equals(Object obj) { if (obj instanceof AMD64AddressValue) { AMD64AddressValue addr = (AMD64AddressValue) obj; - return getKind() == addr.getKind() && displacement == addr.displacement && base.equals(addr.base) && scale == addr.scale && index.equals(addr.index); + return getPlatformKind() == addr.getPlatformKind() && displacement == addr.displacement && base.equals(addr.base) && scale == addr.scale && index.equals(addr.index); } return false; } @Override public int hashCode() { - return base.hashCode() ^ index.hashCode() ^ (displacement << 4) ^ (scale.value << 8) ^ (getKind().ordinal() << 12); + return base.hashCode() ^ index.hashCode() ^ (displacement << 4) ^ (scale.value << 8) ^ getPlatformKind().hashCode(); } }
--- a/graal/com.oracle.graal.lir.amd64/src/com/oracle/graal/lir/amd64/AMD64Arithmetic.java Mon Apr 29 00:25:30 2013 +0200 +++ b/graal/com.oracle.graal.lir.amd64/src/com/oracle/graal/lir/amd64/AMD64Arithmetic.java Tue Apr 30 19:50:12 2013 +0200 @@ -255,8 +255,8 @@ public DivRemOp(AMD64Arithmetic opcode, AllocatableValue x, AllocatableValue y, LIRFrameState state) { this.opcode = opcode; - this.divResult = AMD64.rax.asValue(x.getKind()); - this.remResult = AMD64.rdx.asValue(x.getKind()); + this.divResult = AMD64.rax.asValue(x.getPlatformKind()); + this.remResult = AMD64.rdx.asValue(x.getPlatformKind()); this.x = x; this.y = y; this.state = state;
--- a/graal/com.oracle.graal.lir.amd64/src/com/oracle/graal/lir/amd64/AMD64RegisterPreservationOp.java Mon Apr 29 00:25:30 2013 +0200 +++ /dev/null Thu Jan 01 00:00:00 1970 +0000 @@ -1,63 +0,0 @@ -/* - * Copyright (c) 2013, Oracle and/or its affiliates. All rights reserved. - * DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER. - * - * This code is free software; you can redistribute it and/or modify it - * under the terms of the GNU General Public License version 2 only, as - * published by the Free Software Foundation. - * - * This code is distributed in the hope that it will be useful, but WITHOUT - * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or - * FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License - * version 2 for more details (a copy is included in the LICENSE file that - * accompanied this code). - * - * You should have received a copy of the GNU General Public License version - * 2 along with this work; if not, write to the Free Software Foundation, - * Inc., 51 Franklin St, Fifth Floor, Boston, MA 02110-1301 USA. - * - * Please contact Oracle, 500 Oracle Parkway, Redwood Shores, CA 94065 USA - * or visit www.oracle.com if you need additional information or have any - * questions. - */ -package com.oracle.graal.lir.amd64; - -import java.util.*; - -import com.oracle.graal.api.code.*; -import com.oracle.graal.api.meta.*; -import com.oracle.graal.asm.amd64.*; -import com.oracle.graal.lir.asm.*; - -/** - * Base class for the operations that save or restore registers around another operation that may - * potentially destroy any register (e.g., a call). - */ -public abstract class AMD64RegisterPreservationOp extends AMD64LIRInstruction { - - protected static void emitCode(TargetMethodAssembler tasm, AMD64MacroAssembler masm, Value[] dst, Value[] src) { - for (int i = 0; i < dst.length; i++) { - if (dst[i] != null) { - AMD64Move.move(tasm, masm, dst[i], src[i]); - } else { - assert src[i] == null; - } - } - } - - protected static void doNotPreserve(Set<Register> registers, RegisterValue[] registerValues, StackSlot[] slots) { - for (int i = 0; i < slots.length; i++) { - if (registerValues[i] != null) { - if (registers.contains(registerValues[i].getRegister())) { - registerValues[i] = null; - slots[i] = null; - } - } - } - } - - /** - * Records that no registers in {@code registers} need to be preserved. - */ - public abstract void doNotPreserve(Set<Register> registers); -}
--- a/graal/com.oracle.graal.lir.amd64/src/com/oracle/graal/lir/amd64/AMD64RestoreRegistersOp.java Mon Apr 29 00:25:30 2013 +0200 +++ b/graal/com.oracle.graal.lir.amd64/src/com/oracle/graal/lir/amd64/AMD64RestoreRegistersOp.java Tue Apr 30 19:50:12 2013 +0200 @@ -24,11 +24,8 @@ import static com.oracle.graal.lir.LIRInstruction.OperandFlag.*; -import java.util.*; - import com.oracle.graal.api.code.*; import com.oracle.graal.asm.amd64.*; -import com.oracle.graal.lir.*; import com.oracle.graal.lir.LIRInstruction.Opcode; import com.oracle.graal.lir.asm.*; @@ -36,49 +33,29 @@ * Restores registers from stack slots. */ @Opcode("RESTORE_REGISTER") -public final class AMD64RestoreRegistersOp extends AMD64RegisterPreservationOp { +public final class AMD64RestoreRegistersOp extends AMD64LIRInstruction { + + /** + * The slots from which the registers are restored. + */ + @Use(STACK) protected final StackSlot[] slots; - @Use(STACK) protected StackSlot[] src; - @Def(REG) protected RegisterValue[] dst; + /** + * The operation that saved the registers restored by this operation. + */ + private final AMD64SaveRegistersOp save; - public AMD64RestoreRegistersOp(StackSlot[] src, RegisterValue[] dst) { - this.src = src; - this.dst = dst; + public AMD64RestoreRegistersOp(StackSlot[] source, AMD64SaveRegistersOp save) { + this.slots = source; + this.save = save; } @Override public void emitCode(TargetMethodAssembler tasm, AMD64MacroAssembler masm) { - emitCode(tasm, masm, dst, src); - } - - @Override - public void doNotPreserve(Set<Register> registers) { - doNotPreserve(registers, dst, src); - } - - /** - * Updates {@code debugInfo} with a description of where each preserved register is saved. - */ - public void describePreservation(DebugInfo debugInfo, FrameMap frameMap) { - int preserved = 0; - for (RegisterValue r : dst) { - if (r != null) { - preserved++; + for (AMD64LIRInstruction restoringMove : save.restoringMoves) { + if (restoringMove != null) { + restoringMove.emitCode(tasm, masm); } } - if (preserved != 0) { - Register[] keys = new Register[preserved]; - int[] values = new int[keys.length]; - int mapIndex = 0; - for (int i = 0; i < src.length; i++) { - if (dst[i] != null) { - keys[mapIndex] = dst[i].getRegister(); - values[mapIndex] = frameMap.indexForStackSlot(src[i]); - mapIndex++; - } - } - assert mapIndex == preserved; - debugInfo.setCalleeSaveInfo(new RegisterSaveLayout(keys, values)); - } } }
--- a/graal/com.oracle.graal.lir.amd64/src/com/oracle/graal/lir/amd64/AMD64SaveRegistersOp.java Mon Apr 29 00:25:30 2013 +0200 +++ b/graal/com.oracle.graal.lir.amd64/src/com/oracle/graal/lir/amd64/AMD64SaveRegistersOp.java Tue Apr 30 19:50:12 2013 +0200 @@ -28,31 +28,78 @@ import com.oracle.graal.api.code.*; import com.oracle.graal.asm.amd64.*; +import com.oracle.graal.lir.*; import com.oracle.graal.lir.LIRInstruction.Opcode; +import com.oracle.graal.lir.StandardOp.MoveOp; import com.oracle.graal.lir.asm.*; /** * Saves registers to stack slots. */ @Opcode("SAVE_REGISTER") -public final class AMD64SaveRegistersOp extends AMD64RegisterPreservationOp { +public final class AMD64SaveRegistersOp extends AMD64LIRInstruction { + + /** + * The move instructions for saving the registers. + */ + protected final AMD64LIRInstruction[] savingMoves; - @Use(REG) protected RegisterValue[] src; - @Def(STACK) protected StackSlot[] dst; + /** + * The move instructions for restoring the registers. + */ + protected final AMD64LIRInstruction[] restoringMoves; - public AMD64SaveRegistersOp(RegisterValue[] src, StackSlot[] dst) { - this.src = src; - this.dst = dst; + /** + * The slots to which the registers are saved. + */ + @Def(STACK) protected final StackSlot[] slots; + + public AMD64SaveRegistersOp(AMD64LIRInstruction[] savingMoves, AMD64LIRInstruction[] restoringMoves, StackSlot[] slots) { + this.savingMoves = savingMoves; + this.restoringMoves = restoringMoves; + this.slots = slots; } @Override public void emitCode(TargetMethodAssembler tasm, AMD64MacroAssembler masm) { - emitCode(tasm, masm, dst, src); + for (AMD64LIRInstruction savingMove : savingMoves) { + if (savingMove != null) { + savingMove.emitCode(tasm, masm); + } + } } - @Override - public void doNotPreserve(Set<Register> registers) { - doNotPreserve(registers, src, dst); + /** + * Prunes the set of registers saved by this operation to exclude those in {@code notSaved} and + * updates {@code debugInfo} with a {@linkplain DebugInfo#getCalleeSaveInfo() description} of + * where each preserved register is saved. + */ + public void updateAndDescribePreservation(Set<Register> notSaved, DebugInfo debugInfo, FrameMap frameMap) { + int preserved = 0; + for (int i = 0; i < savingMoves.length; i++) { + if (savingMoves[i] != null) { + Register register = ValueUtil.asRegister(((MoveOp) savingMoves[i]).getInput()); + if (notSaved.contains(register)) { + savingMoves[i] = null; + restoringMoves[i] = null; + } else { + preserved++; + } + } + } + if (preserved != 0) { + Register[] keys = new Register[preserved]; + int[] values = new int[keys.length]; + int mapIndex = 0; + for (int i = 0; i < savingMoves.length; i++) { + if (savingMoves[i] != null) { + keys[mapIndex] = ValueUtil.asRegister(((MoveOp) savingMoves[i]).getInput()); + values[mapIndex] = frameMap.indexForStackSlot(slots[i]); + mapIndex++; + } + } + assert mapIndex == preserved; + debugInfo.setCalleeSaveInfo(new RegisterSaveLayout(keys, values)); + } } - }
--- a/graal/com.oracle.graal.lir.ptx/src/com/oracle/graal/lir/ptx/PTXAddressValue.java Mon Apr 29 00:25:30 2013 +0200 +++ b/graal/com.oracle.graal.lir.ptx/src/com/oracle/graal/lir/ptx/PTXAddressValue.java Tue Apr 30 19:50:12 2013 +0200 @@ -48,7 +48,7 @@ * @param kind the kind of the value being addressed * @param base the base register */ - public PTXAddressValue(Kind kind, AllocatableValue base) { + public PTXAddressValue(PlatformKind kind, AllocatableValue base) { this(kind, base, 0); } @@ -60,7 +60,7 @@ * @param base the base register * @param displacement the displacement */ - public PTXAddressValue(Kind kind, AllocatableValue base, long displacement) { + public PTXAddressValue(PlatformKind kind, AllocatableValue base, long displacement) { super(kind); this.base = base; this.displacement = displacement; @@ -95,13 +95,13 @@ public boolean equals(Object obj) { if (obj instanceof PTXAddressValue) { PTXAddressValue addr = (PTXAddressValue) obj; - return getKind() == addr.getKind() && displacement == addr.displacement && base.equals(addr.base); + return getPlatformKind() == addr.getPlatformKind() && displacement == addr.displacement && base.equals(addr.base); } return false; } @Override public int hashCode() { - return base.hashCode() ^ ((int) displacement << 4) ^ (getKind().ordinal() << 12); + return base.hashCode() ^ ((int) displacement << 4) ^ getPlatformKind().hashCode(); } }
--- a/graal/com.oracle.graal.lir/src/com/oracle/graal/lir/CompositeValue.java Mon Apr 29 00:25:30 2013 +0200 +++ b/graal/com.oracle.graal.lir/src/com/oracle/graal/lir/CompositeValue.java Tue Apr 30 19:50:12 2013 +0200 @@ -45,7 +45,7 @@ private final CompositeValueClass valueClass; - public CompositeValue(Kind kind) { + public CompositeValue(PlatformKind kind) { super(kind); valueClass = CompositeValueClass.get(getClass()); }
--- a/graal/com.oracle.graal.lir/src/com/oracle/graal/lir/FrameMap.java Mon Apr 29 00:25:30 2013 +0200 +++ b/graal/com.oracle.graal.lir/src/com/oracle/graal/lir/FrameMap.java Tue Apr 30 19:50:12 2013 +0200 @@ -197,7 +197,7 @@ // Without this, frameNeedsAllocating() would never return true. int total = 0; for (StackSlot s : freedSlots) { - total += target.sizeInBytes(s.getKind()); + total += target.arch.getSizeInBytes(s.getKind()); } int initialSpillSize = returnAddressSize() + calleeSaveAreaSize(); if (total == spillSize - initialSpillSize) { @@ -266,7 +266,7 @@ hasOutgoingStackArguments = hasOutgoingStackArguments || argsSize > 0; } - private StackSlot getSlot(Kind kind, int additionalOffset) { + private StackSlot getSlot(PlatformKind kind, int additionalOffset) { return StackSlot.get(kind, -spillSize + additionalOffset, true); } @@ -277,12 +277,12 @@ * @param kind The kind of the spill slot to be reserved. * @return A spill slot denoting the reserved memory area. */ - public StackSlot allocateSpillSlot(Kind kind) { + public StackSlot allocateSpillSlot(PlatformKind kind) { assert frameSize == -1 : "frame size must not yet be fixed"; if (freedSlots != null) { for (Iterator<StackSlot> iter = freedSlots.iterator(); iter.hasNext();) { StackSlot s = iter.next(); - if (s.getKind() == kind) { + if (s.getPlatformKind() == kind) { iter.remove(); if (freedSlots.isEmpty()) { freedSlots = null; @@ -291,7 +291,7 @@ } } } - int size = target.sizeInBytes(kind); + int size = target.arch.getSizeInBytes(kind); spillSize = NumUtil.roundUp(spillSize + size, size); return getSlot(kind, 0); } @@ -299,8 +299,8 @@ private Set<StackSlot> freedSlots; /** - * Frees a spill slot that was obtained via {@link #allocateSpillSlot(Kind)} such that it can be - * reused for the next allocation request for the same kind of slot. + * Frees a spill slot that was obtained via {@link #allocateSpillSlot(PlatformKind)} such that + * it can be reused for the next allocation request for the same kind of slot. */ public void freeSpillSlot(StackSlot slot) { if (freedSlots == null) {
--- a/graal/com.oracle.graal.lir/src/com/oracle/graal/lir/Variable.java Mon Apr 29 00:25:30 2013 +0200 +++ b/graal/com.oracle.graal.lir/src/com/oracle/graal/lir/Variable.java Tue Apr 30 19:50:12 2013 +0200 @@ -39,22 +39,15 @@ public final int index; /** - * The type of register that this variable needs to get assigned. - */ - public final Register.RegisterFlag flag; - - /** * Creates a new variable. * * @param kind * @param index */ - public Variable(Kind kind, int index, Register.RegisterFlag flag) { + public Variable(PlatformKind kind, int index) { super(kind); - assert kind == kind.getStackKind() : "Variables can be only created for stack kinds"; assert index >= 0; this.index = index; - this.flag = flag; } @Override
--- a/graal/com.oracle.graal.nodes/src/com/oracle/graal/nodes/FixedGuardNode.java Mon Apr 29 00:25:30 2013 +0200 +++ b/graal/com.oracle.graal.nodes/src/com/oracle/graal/nodes/FixedGuardNode.java Tue Apr 30 19:50:12 2013 +0200 @@ -101,7 +101,8 @@ } @Override - public Negatable negate() { + public Negatable negate(LogicNode cond) { + assert cond == condition(); negated = !negated; return this; }
--- a/graal/com.oracle.graal.nodes/src/com/oracle/graal/nodes/GuardNode.java Mon Apr 29 00:25:30 2013 +0200 +++ b/graal/com.oracle.graal.nodes/src/com/oracle/graal/nodes/GuardNode.java Tue Apr 30 19:50:12 2013 +0200 @@ -104,7 +104,8 @@ } @Override - public Negatable negate() { + public Negatable negate(LogicNode cond) { + assert cond == condition(); negated = !negated; return this; }
--- a/graal/com.oracle.graal.nodes/src/com/oracle/graal/nodes/IfNode.java Mon Apr 29 00:25:30 2013 +0200 +++ b/graal/com.oracle.graal.nodes/src/com/oracle/graal/nodes/IfNode.java Tue Apr 30 19:50:12 2013 +0200 @@ -109,7 +109,8 @@ } @Override - public Negatable negate() { + public Negatable negate(LogicNode cond) { + assert cond == condition(); AbstractBeginNode trueSucc = trueSuccessor(); AbstractBeginNode falseSucc = falseSuccessor(); setTrueSuccessor(null);
--- a/graal/com.oracle.graal.nodes/src/com/oracle/graal/nodes/LogicNode.java Mon Apr 29 00:25:30 2013 +0200 +++ b/graal/com.oracle.graal.nodes/src/com/oracle/graal/nodes/LogicNode.java Tue Apr 30 19:50:12 2013 +0200 @@ -40,7 +40,7 @@ public void negateUsages() { for (Node n : usages().snapshot()) { assert n instanceof Negatable; - ((Negatable) n).negate(); + ((Negatable) n).negate(this); } }
--- a/graal/com.oracle.graal.nodes/src/com/oracle/graal/nodes/calc/ConditionalNode.java Mon Apr 29 00:25:30 2013 +0200 +++ b/graal/com.oracle.graal.nodes/src/com/oracle/graal/nodes/calc/ConditionalNode.java Tue Apr 30 19:50:12 2013 +0200 @@ -94,7 +94,8 @@ } @Override - public Negatable negate() { + public Negatable negate(LogicNode cond) { + assert condition() == cond; ConditionalNode replacement = graph().unique(new ConditionalNode(condition, falseValue(), trueValue())); ((StructuredGraph) graph()).replaceFloating(this, replacement); return replacement;
--- a/graal/com.oracle.graal.nodes/src/com/oracle/graal/nodes/extended/UnsafeCastNode.java Mon Apr 29 00:25:30 2013 +0200 +++ b/graal/com.oracle.graal.nodes/src/com/oracle/graal/nodes/extended/UnsafeCastNode.java Tue Apr 30 19:50:12 2013 +0200 @@ -90,7 +90,7 @@ @Override public void generate(LIRGeneratorTool generator) { if (kind() != object().kind()) { - assert generator.target().sizeInBytes(kind()) == generator.target().sizeInBytes(object().kind()) : "unsafe cast cannot be used to change the size of a value"; + assert generator.target().arch.getSizeInBytes(kind()) == generator.target().arch.getSizeInBytes(object().kind()) : "unsafe cast cannot be used to change the size of a value"; AllocatableValue result = generator.newVariable(kind()); generator.emitMove(result, generator.operand(object())); generator.setResult(this, result);
--- a/graal/com.oracle.graal.nodes/src/com/oracle/graal/nodes/spi/LIRGeneratorTool.java Mon Apr 29 00:25:30 2013 +0200 +++ b/graal/com.oracle.graal.nodes/src/com/oracle/graal/nodes/spi/LIRGeneratorTool.java Tue Apr 30 19:50:12 2013 +0200 @@ -49,7 +49,7 @@ Value operand(ValueNode object); - AllocatableValue newVariable(Kind kind); + AllocatableValue newVariable(PlatformKind kind); Value setResult(ValueNode x, Value operand);
--- a/graal/com.oracle.graal.nodes/src/com/oracle/graal/nodes/spi/Negatable.java Mon Apr 29 00:25:30 2013 +0200 +++ b/graal/com.oracle.graal.nodes/src/com/oracle/graal/nodes/spi/Negatable.java Tue Apr 30 19:50:12 2013 +0200 @@ -28,15 +28,15 @@ * This interface marks a node as being able to negate its effect, this is intended for nodes that * depend on a BooleanNode condition. The canonical representation of has, for example, no way to * represent a != b. If such an expression appears during canonicalization the negated expression - * will be created (a == b) and the usages will be negated, using this interface's {@link #negate()} - * method. + * will be created (a == b) and the usages will be negated, using this interface's + * {@link #negate(LogicNode)} method. */ public interface Negatable { /** * Tells this node that a condition it depends has been negated, and that it thus needs to - * invert its own effect. For example, an {@link IfNode} would switch its true and false - * successors. + * invert the effects of this condition. For example, an {@link IfNode} would switch its true + * and false successors. */ - Negatable negate(); + Negatable negate(LogicNode condition); }
--- a/graal/com.oracle.graal.phases/src/com/oracle/graal/phases/GraalOptions.java Mon Apr 29 00:25:30 2013 +0200 +++ b/graal/com.oracle.graal.phases/src/com/oracle/graal/phases/GraalOptions.java Tue Apr 30 19:50:12 2013 +0200 @@ -37,6 +37,7 @@ public static int Threads = 4; public static String CompilerConfiguration = "basic"; + public static String GraalRuntime = "basic"; // inlining settings public static boolean Inline = true; @@ -222,6 +223,7 @@ public static boolean IntrinsifyUnsafeMethods = true; public static boolean IntrinsifyMathMethods = true; public static boolean IntrinsifyAESMethods = true; + public static boolean IntrinsifyReflectionMethods = true; public static boolean IntrinsifyInstalledCodeMethods = true; public static boolean IntrinsifyCallSiteTarget = true; /**
--- a/graal/com.oracle.graal.phases/src/com/oracle/graal/phases/schedule/SchedulePhase.java Mon Apr 29 00:25:30 2013 +0200 +++ b/graal/com.oracle.graal.phases/src/com/oracle/graal/phases/schedule/SchedulePhase.java Tue Apr 30 19:50:12 2013 +0200 @@ -171,7 +171,7 @@ @Override protected void run(StructuredGraph graph) { - cfg = ControlFlowGraph.compute(graph, true, true, true, false); + cfg = ControlFlowGraph.compute(graph, true, true, true, true); earliestCache = graph.createNodeMap(); blockToNodesMap = new BlockMap<>(cfg);
--- a/graal/com.oracle.graal.ptx/src/com/oracle/graal/ptx/PTX.java Mon Apr 29 00:25:30 2013 +0200 +++ b/graal/com.oracle.graal.ptx/src/com/oracle/graal/ptx/PTX.java Tue Apr 30 19:50:12 2013 +0200 @@ -23,18 +23,21 @@ package com.oracle.graal.ptx; import static com.oracle.graal.api.code.MemoryBarriers.*; -import static com.oracle.graal.api.code.Register.RegisterFlag.*; import java.nio.*; import com.oracle.graal.api.code.*; -import com.oracle.graal.api.code.Register.*; +import com.oracle.graal.api.code.Register.RegisterCategory; +import com.oracle.graal.api.meta.*; /** * Represents the PTX architecture. */ public class PTX extends Architecture { + public static final RegisterCategory CPU = new RegisterCategory("CPU"); + public static final RegisterCategory FPU = new RegisterCategory("FPU"); + // @formatter:off /* @@ -49,23 +52,23 @@ */ // General purpose registers - public static final Register r0 = new Register(0, 0, 8, "r0", CPU, RegisterFlag.Byte); - public static final Register r1 = new Register(1, 1, 8, "r1", CPU, RegisterFlag.Byte); - public static final Register r2 = new Register(2, 2, 8, "r2", CPU, RegisterFlag.Byte); - public static final Register r3 = new Register(3, 3, 8, "r3", CPU, RegisterFlag.Byte); - public static final Register r4 = new Register(4, 4, 8, "r4", CPU, RegisterFlag.Byte); - public static final Register r5 = new Register(5, 5, 8, "r5", CPU, RegisterFlag.Byte); - public static final Register r6 = new Register(6, 6, 8, "r6", CPU, RegisterFlag.Byte); - public static final Register r7 = new Register(7, 7, 8, "r7", CPU, RegisterFlag.Byte); + public static final Register r0 = new Register(0, 0, "r0", CPU); + public static final Register r1 = new Register(1, 1, "r1", CPU); + public static final Register r2 = new Register(2, 2, "r2", CPU); + public static final Register r3 = new Register(3, 3, "r3", CPU); + public static final Register r4 = new Register(4, 4, "r4", CPU); + public static final Register r5 = new Register(5, 5, "r5", CPU); + public static final Register r6 = new Register(6, 6, "r6", CPU); + public static final Register r7 = new Register(7, 7, "r7", CPU); - public static final Register r8 = new Register(8, 8, 8, "r8", CPU, RegisterFlag.Byte); - public static final Register r9 = new Register(9, 9, 8, "r9", CPU, RegisterFlag.Byte); - public static final Register r10 = new Register(10, 10, 8, "r10", CPU, RegisterFlag.Byte); - public static final Register r11 = new Register(11, 11, 8, "r11", CPU, RegisterFlag.Byte); - public static final Register r12 = new Register(12, 12, 8, "r12", CPU, RegisterFlag.Byte); - public static final Register r13 = new Register(13, 13, 8, "r13", CPU, RegisterFlag.Byte); - public static final Register r14 = new Register(14, 14, 8, "r14", CPU, RegisterFlag.Byte); - public static final Register r15 = new Register(15, 15, 8, "r15", CPU, RegisterFlag.Byte); + public static final Register r8 = new Register(8, 8, "r8", CPU); + public static final Register r9 = new Register(9, 9, "r9", CPU); + public static final Register r10 = new Register(10, 10, "r10", CPU); + public static final Register r11 = new Register(11, 11, "r11", CPU); + public static final Register r12 = new Register(12, 12, "r12", CPU); + public static final Register r13 = new Register(13, 13, "r13", CPU); + public static final Register r14 = new Register(14, 14, "r14", CPU); + public static final Register r15 = new Register(15, 15, "r15", CPU); public static final Register[] gprRegisters = { r0, r1, r2, r3, r4, r5, r6, r7, @@ -73,23 +76,23 @@ }; // Floating point registers - public static final Register f0 = new Register(16, 0, 8, "f0", FPU); - public static final Register f1 = new Register(17, 1, 8, "f1", FPU); - public static final Register f2 = new Register(18, 2, 8, "f2", FPU); - public static final Register f3 = new Register(19, 3, 8, "f3", FPU); - public static final Register f4 = new Register(20, 4, 8, "f4", FPU); - public static final Register f5 = new Register(21, 5, 8, "f5", FPU); - public static final Register f6 = new Register(22, 6, 8, "f6", FPU); - public static final Register f7 = new Register(23, 7, 8, "f7", FPU); + public static final Register f0 = new Register(16, 0, "f0", FPU); + public static final Register f1 = new Register(17, 1, "f1", FPU); + public static final Register f2 = new Register(18, 2, "f2", FPU); + public static final Register f3 = new Register(19, 3, "f3", FPU); + public static final Register f4 = new Register(20, 4, "f4", FPU); + public static final Register f5 = new Register(21, 5, "f5", FPU); + public static final Register f6 = new Register(22, 6, "f6", FPU); + public static final Register f7 = new Register(23, 7, "f7", FPU); - public static final Register f8 = new Register(24, 8, 8, "f8", FPU); - public static final Register f9 = new Register(25, 9, 8, "f9", FPU); - public static final Register f10 = new Register(26, 10, 8, "f10", FPU); - public static final Register f11 = new Register(27, 11, 8, "f11", FPU); - public static final Register f12 = new Register(28, 12, 8, "f12", FPU); - public static final Register f13 = new Register(29, 13, 8, "f13", FPU); - public static final Register f14 = new Register(30, 14, 8, "f14", FPU); - public static final Register f15 = new Register(31, 15, 8, "f15", FPU); + public static final Register f8 = new Register(24, 8, "f8", FPU); + public static final Register f9 = new Register(25, 9, "f9", FPU); + public static final Register f10 = new Register(26, 10, "f10", FPU); + public static final Register f11 = new Register(27, 11, "f11", FPU); + public static final Register f12 = new Register(28, 12, "f12", FPU); + public static final Register f13 = new Register(29, 13, "f13", FPU); + public static final Register f14 = new Register(30, 14, "f14", FPU); + public static final Register f15 = new Register(31, 15, "f15", FPU); public static final Register[] fpuRegisters = { f0, f1, f2, f3, f4, f5, f6, f7, @@ -116,4 +119,44 @@ 8); } // @formatter:on + + @Override + public boolean canStoreValue(RegisterCategory category, PlatformKind platformKind) { + if (!(platformKind instanceof Kind)) { + return false; + } + + Kind kind = (Kind) platformKind; + if (category == CPU) { + switch (kind) { + case Boolean: + case Byte: + case Char: + case Short: + case Int: + case Long: + case Object: + return true; + } + } else if (category == FPU) { + switch (kind) { + case Float: + case Double: + return true; + } + } + + return false; + } + + @Override + public PlatformKind getLargestStorableKind(RegisterCategory category) { + if (category == CPU) { + return Kind.Long; + } else if (category == FPU) { + return Kind.Double; + } else { + return Kind.Illegal; + } + } }
--- a/graal/com.oracle.graal.replacements/src/com/oracle/graal/replacements/SnippetTemplate.java Mon Apr 29 00:25:30 2013 +0200 +++ b/graal/com.oracle.graal.replacements/src/com/oracle/graal/replacements/SnippetTemplate.java Tue Apr 30 19:50:12 2013 +0200 @@ -116,6 +116,10 @@ public boolean isVarargsParameter(int paramIdx) { return varargsParameters[paramIdx]; } + + public String getParameterName(int paramIdx) { + return names[paramIdx]; + } } /** @@ -283,16 +287,20 @@ this.templates = new ConcurrentHashMap<>(); } + /** + * Finds the method in {@code declaringClass} annotated with {@link Snippet} named + * {@code methodName}. If {@code methodName} is null, then there must be exactly one snippet + * method in {@code declaringClass}. + */ protected SnippetInfo snippet(Class<? extends Snippets> declaringClass, String methodName) { Method found = null; for (Method method : declaringClass.getDeclaredMethods()) { - if (method.getAnnotation(Snippet.class) != null && method.getName().equals(methodName)) { - assert found == null : "found more than one @" + Snippet.class.getSimpleName() + " method " + methodName + " in " + declaringClass; + if (method.getAnnotation(Snippet.class) != null && (methodName == null || method.getName().equals(methodName))) { + assert found == null : "found more than one @" + Snippet.class.getSimpleName() + " method in " + declaringClass + (methodName == null ? "" : " named " + methodName); found = method; } } - assert found != null : "did not find @" + Snippet.class.getSimpleName() + " method " + methodName + " in " + declaringClass; - + assert found != null : "did not find @" + Snippet.class.getSimpleName() + " method in " + declaringClass + (methodName == null ? "" : " named " + methodName); return new SnippetInfo(runtime.lookupJavaMethod(found)); }
--- a/graal/com.oracle.graal.sparc/src/com/oracle/graal/sparc/SPARC.java Mon Apr 29 00:25:30 2013 +0200 +++ b/graal/com.oracle.graal.sparc/src/com/oracle/graal/sparc/SPARC.java Tue Apr 30 19:50:12 2013 +0200 @@ -27,6 +27,8 @@ import java.nio.*; import com.oracle.graal.api.code.*; +import com.oracle.graal.api.code.Register.RegisterCategory; +import com.oracle.graal.api.meta.*; /** * Represents the SPARC architecture. @@ -39,4 +41,16 @@ super("AMD64", 8, ByteOrder.LITTLE_ENDIAN, null, LOAD_STORE | STORE_STORE, 1, 0, 8); // SPARC: Fix architecture parameters. } + + @Override + public boolean canStoreValue(RegisterCategory category, PlatformKind kind) { + // TODO Auto-generated method stub + return false; + } + + @Override + public PlatformKind getLargestStorableKind(RegisterCategory category) { + // TODO Auto-generated method stub + return null; + } }
--- a/make/bsd/makefiles/buildtree.make Mon Apr 29 00:25:30 2013 +0200 +++ b/make/bsd/makefiles/buildtree.make Tue Apr 30 19:50:12 2013 +0200 @@ -243,7 +243,9 @@ echo "$(call gamma-path,altsrc,os/$(OS_FAMILY)/vm) \\"; \ echo "$(call gamma-path,commonsrc,os/$(OS_FAMILY)/vm) \\"; \ echo "$(call gamma-path,altsrc,os/posix/vm) \\"; \ - echo "$(call gamma-path,commonsrc,os/posix/vm)"; \ + echo "$(call gamma-path,commonsrc,os/posix/vm) \\"; \ + echo "$(call gamma-path,altsrc,gpu/ptx) \\"; \ + echo "$(call gamma-path,commonsrc,gpu/ptx)"; \ echo; \ echo "Src_Dirs_I = \\"; \ echo "$(call gamma-path,altsrc,share/vm/prims) \\"; \ @@ -259,7 +261,9 @@ echo "$(call gamma-path,altsrc,os/$(OS_FAMILY)/vm) \\"; \ echo "$(call gamma-path,commonsrc,os/$(OS_FAMILY)/vm) \\"; \ echo "$(call gamma-path,altsrc,os/posix/vm) \\"; \ - echo "$(call gamma-path,commonsrc,os/posix/vm)"; \ + echo "$(call gamma-path,commonsrc,os/posix/vm) \\"; \ + echo "$(call gamma-path,altsrc,gpu) \\"; \ + echo "$(call gamma-path,commonsrc,gpu)"; \ [ -n "$(CFLAGS_BROWSE)" ] && \ echo && echo "CFLAGS_BROWSE = $(CFLAGS_BROWSE)"; \ [ -n "$(HOTSPOT_EXTRA_SYSDEFS)" ] && \
--- a/make/bsd/makefiles/launcher.make Mon Apr 29 00:25:30 2013 +0200 +++ b/make/bsd/makefiles/launcher.make Tue Apr 30 19:50:12 2013 +0200 @@ -67,7 +67,7 @@ # framework libraries. ifeq ($(OS_VENDOR),Darwin) - LFLAGS_LAUNCHER += -framework CoreFoundation + LFLAGS_LAUNCHER += -framework CoreFoundation -framework ApplicationServices endif LIBS_LAUNCHER += -l$(JVM) $(LIBS)
--- a/make/bsd/makefiles/vm.make Mon Apr 29 00:25:30 2013 +0200 +++ b/make/bsd/makefiles/vm.make Tue Apr 30 19:50:12 2013 +0200 @@ -128,6 +128,10 @@ LIBS += -lm -pthread +ifeq ($(OS_VENDOR),Darwin) + LIBS += -framework ApplicationServices -framework IOKit +endif + # By default, link the *.o into the library, not the executable. LINK_INTO$(LINK_INTO) = LIBJVM @@ -157,6 +161,7 @@ SOURCE_PATHS+=$(HS_COMMON_SRC)/os/posix/vm SOURCE_PATHS+=$(HS_COMMON_SRC)/cpu/$(Platform_arch)/vm SOURCE_PATHS+=$(HS_COMMON_SRC)/os_cpu/$(Platform_os_arch)/vm +SOURCE_PATHS+=$(HS_COMMON_SRC)/gpu/ptx ifndef JAVASE_EMBEDDED SOURCE_PATHS+=$(shell if [ -d $(HS_ALT_SRC)/share/vm/jfr ]; then \ @@ -179,7 +184,9 @@ SHARK_PATHS := $(GAMMADIR)/src/share/vm/shark GRAAL_PATHS += $(call altsrc,$(HS_COMMON_SRC)/share/vm/graal) +GRAAL_PATHS += $(call altsrc,$(HS_COMMON_SRC)/gpu/ptx) GRAAL_PATHS += $(HS_COMMON_SRC)/share/vm/graal +GRAAL_PATHS += $(HS_COMMON_SRC)/gpu/ptx # Include dirs per type. Src_Dirs/CORE := $(CORE_PATHS)
--- a/make/linux/makefiles/buildtree.make Mon Apr 29 00:25:30 2013 +0200 +++ b/make/linux/makefiles/buildtree.make Tue Apr 30 19:50:12 2013 +0200 @@ -238,7 +238,9 @@ echo "$(call gamma-path,altsrc,os/$(OS_FAMILY)/vm) \\"; \ echo "$(call gamma-path,commonsrc,os/$(OS_FAMILY)/vm) \\"; \ echo "$(call gamma-path,altsrc,os/posix/vm) \\"; \ - echo "$(call gamma-path,commonsrc,os/posix/vm)"; \ + echo "$(call gamma-path,commonsrc,os/posix/vm) \\"; \ + echo "$(call gamma-path,altsrc,gpu/ptx) \\"; \ + echo "$(call gamma-path,commonsrc,gpu/ptx)"; \ echo; \ echo "Src_Dirs_I = \\"; \ echo "$(call gamma-path,altsrc,share/vm/prims) \\"; \ @@ -253,8 +255,9 @@ echo "$(call gamma-path,commonsrc,os_cpu/$(OS_FAMILY)_$(SRCARCH)/vm) \\"; \ echo "$(call gamma-path,altsrc,os/$(OS_FAMILY)/vm) \\"; \ echo "$(call gamma-path,commonsrc,os/$(OS_FAMILY)/vm) \\"; \ - echo "$(call gamma-path,altsrc,os/posix/vm) \\"; \ - echo "$(call gamma-path,commonsrc,os/posix/vm)"; \ + echo "$(call gamma-path,commonsrc,os/posix/vm) \\"; \ + echo "$(call gamma-path,altsrc,gpu) \\"; \ + echo "$(call gamma-path,commonsrc,gpu)"; \ [ -n "$(CFLAGS_BROWSE)" ] && \ echo && echo "CFLAGS_BROWSE = $(CFLAGS_BROWSE)"; \ [ -n "$(ENABLE_FULL_DEBUG_SYMBOLS)" ] && \
--- a/make/linux/makefiles/vm.make Mon Apr 29 00:25:30 2013 +0200 +++ b/make/linux/makefiles/vm.make Tue Apr 30 19:50:12 2013 +0200 @@ -157,6 +157,7 @@ SOURCE_PATHS+=$(HS_COMMON_SRC)/os/posix/vm SOURCE_PATHS+=$(HS_COMMON_SRC)/cpu/$(Platform_arch)/vm SOURCE_PATHS+=$(HS_COMMON_SRC)/os_cpu/$(Platform_os_arch)/vm +SOURCE_PATHS+=$(HS_COMMON_SRC)/gpu/ptx ifndef JAVASE_EMBEDDED ifneq (${ARCH},arm) @@ -181,7 +182,9 @@ SHARK_PATHS := $(GAMMADIR)/src/share/vm/shark GRAAL_PATHS += $(call altsrc,$(HS_COMMON_SRC)/share/vm/graal) +GRAAL_PATHS += $(call altsrc,$(HS_COMMON_SRC)/gpu/ptx) GRAAL_PATHS += $(HS_COMMON_SRC)/share/vm/graal +GRAAL_PATHS += $(HS_COMMON_SRC)/gpu/ptx # Include dirs per type. Src_Dirs/CORE := $(CORE_PATHS)
--- a/mx/projects Mon Apr 29 00:25:30 2013 +0200 +++ b/mx/projects Tue Apr 30 19:50:12 2013 +0200 @@ -295,7 +295,7 @@ # graal.compiler.ptx project@com.oracle.graal.compiler.ptx@subDir=graal project@com.oracle.graal.compiler.ptx@sourceDirs=src -project@com.oracle.graal.compiler.ptx@dependencies=com.oracle.graal.compiler,com.oracle.graal.lir.ptx +project@com.oracle.graal.compiler.ptx@dependencies=com.oracle.graal.lir.ptx,com.oracle.graal.hotspot project@com.oracle.graal.compiler.ptx@checkstyle=com.oracle.graal.graph project@com.oracle.graal.compiler.ptx@javaCompliance=1.7
--- a/src/cpu/x86/vm/graalRuntime_x86.cpp Mon Apr 29 00:25:30 2013 +0200 +++ b/src/cpu/x86/vm/graalRuntime_x86.cpp Tue Apr 30 19:50:12 2013 +0200 @@ -811,27 +811,6 @@ OopMapSet* oop_maps = NULL; switch (id) { - case register_finalizer_id: - { - __ set_info("register_finalizer", dont_gc_arguments); - - // This is called via call_runtime so the arguments - // will be place in C abi locations - __ verify_oop(j_rarg0); - __ enter(); - OopMap* oop_map = save_live_registers(sasm, 2 /*num_rt_args */); - int call_offset = __ call_RT(noreg, noreg, CAST_FROM_FN_PTR(address, SharedRuntime::register_finalizer), j_rarg0); - oop_maps = new OopMapSet(); - oop_maps->add_gc_map(call_offset, oop_map); - - // Now restore all the live registers - restore_live_registers(sasm); - - __ leave(); - __ ret(0); - } - break; - case handle_exception_nofpu_id: { GraalStubFrame f(sasm, "handle_exception", dont_gc_arguments); oop_maps = generate_handle_exception(id, sasm); @@ -1069,8 +1048,7 @@ } __ ret(0); break; - } - + } case identity_hash_code_id: { Register obj = j_rarg0; // Incoming @@ -1085,21 +1063,6 @@ __ ret(0); break; } - case thread_is_interrupted_id: { - Register thread = j_rarg0; - Register clear_interrupted = j_rarg1; - - __ set_info("identity_hash_code", dont_gc_arguments); - __ enter(); - OopMap* map = save_live_registers(sasm, 1); - int call_offset = __ call_RT(noreg, noreg, CAST_FROM_FN_PTR(address, thread_is_interrupted), thread, clear_interrupted); - oop_maps = new OopMapSet(); - oop_maps->add_gc_map(call_offset, map); - restore_live_registers_except_rax(sasm); - __ leave(); - __ ret(0); - break; - } default: { GraalStubFrame f(sasm, "unimplemented entry", dont_gc_arguments);
--- /dev/null Thu Jan 01 00:00:00 1970 +0000 +++ b/src/gpu/ptx/gpu_ptx.cpp Tue Apr 30 19:50:12 2013 +0200 @@ -0,0 +1,168 @@ +/* + * Copyright (c) 2013, Oracle and/or its affiliates. All rights reserved. + * DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER. + * + * This code is free software; you can redistribute it and/or modify it + * under the terms of the GNU General Public License version 2 only, as + * published by the Free Software Foundation. + * + * This code is distributed in the hope that it will be useful, but WITHOUT + * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or + * FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License + * version 2 for more details (a copy is included in the LICENSE file that + * accompanied this code). + * + * You should have received a copy of the GNU General Public License version + * 2 along with this work; if not, write to the Free Software Foundation, + * Inc., 51 Franklin St, Fifth Floor, Boston, MA 02110-1301 USA. + * + * Please contact Oracle, 500 Oracle Parkway, Redwood Shores, CA 94065 USA + * or visit www.oracle.com if you need additional information or have any + * questions. + * + */ + + +#include "runtime/gpu.hpp" +#include "utilities/globalDefinitions.hpp" +#include "utilities/ostream.hpp" + +void * gpu::Ptx::_device_context; + +gpu::Ptx::cuda_cu_init_func_t gpu::Ptx::_cuda_cu_init; +gpu::Ptx::cuda_cu_ctx_create_func_t gpu::Ptx::_cuda_cu_ctx_create; +gpu::Ptx::cuda_cu_ctx_detach_func_t gpu::Ptx::_cuda_cu_ctx_detach; +gpu::Ptx::cuda_cu_ctx_synchronize_func_t gpu::Ptx::_cuda_cu_ctx_synchronize; +gpu::Ptx::cuda_cu_device_get_count_func_t gpu::Ptx::_cuda_cu_device_get_count; +gpu::Ptx::cuda_cu_device_get_name_func_t gpu::Ptx::_cuda_cu_device_get_name; +gpu::Ptx::cuda_cu_device_get_func_t gpu::Ptx::_cuda_cu_device_get; +gpu::Ptx::cuda_cu_device_compute_capability_func_t gpu::Ptx::_cuda_cu_device_compute_capability; +gpu::Ptx::cuda_cu_launch_kernel_func_t gpu::Ptx::_cuda_cu_launch_kernel; +gpu::Ptx::cuda_cu_module_get_function_func_t gpu::Ptx::_cuda_cu_module_get_function; +gpu::Ptx::cuda_cu_module_load_data_ex_func_t gpu::Ptx::_cuda_cu_module_load_data_ex; + +void gpu::probe_linkage() { +#ifdef __APPLE__ + set_gpu_linkage(gpu::Ptx::probe_linkage_apple()); +#else + set_gpu_linkage(false); +#endif +} + +void gpu::initialize_gpu() { + if (gpu::has_gpu_linkage()) { + set_initialized(gpu::Ptx::initialize_gpu()); + } +} + +void gpu::generate_kernel(unsigned char *code, int code_len, const char *name) { + if (gpu::has_gpu_linkage()) { + gpu::Ptx::generate_kernel(code, code_len, name); + } +} + +#define __CUDA_API_VERSION 5000 + +bool gpu::Ptx::initialize_gpu() { + int status = _cuda_cu_init(0, __CUDA_API_VERSION); + if (TraceWarpLoading) { + tty->print_cr("gpu_ptx::_cuda_cu_init: %d", status); + } + + int device_count = 0; + status = _cuda_cu_device_get_count(&device_count); + if (TraceWarpLoading) { + tty->print_cr("gpu_ptx::_cuda_cu_device_get_count(%d): %d", device_count, status); + } + + int device_id = 0, cu_device = 0; + status = _cuda_cu_device_get(&cu_device, device_id); + if (TraceWarpLoading) { + tty->print_cr("gpu_ptx::_cuda_cu_device_get(%d): %d", cu_device, status); + } + + int major, minor; + status = _cuda_cu_device_compute_capability(&major, &minor, cu_device); + if (TraceWarpLoading) { + tty->print_cr("gpu_ptx::_cuda_cu_device_compute_capability(major %d, minor %d): %d", + major, minor, status); + } + + char device_name[256]; + status = _cuda_cu_device_get_name(device_name, 256, cu_device); + if (TraceWarpLoading) { + tty->print_cr("gpu_ptx::_cuda_cu_device_get_name(%s): %d", device_name, status); + } + + status = _cuda_cu_ctx_create(&_device_context, 0, cu_device); + if (TraceWarpLoading) { + tty->print_cr("gpu_ptx::_cuda_cu_ctx_create(%x): %d", _device_context, status); + } + + return status == 0; // CUDA_SUCCESS +} + +void gpu::Ptx::generate_kernel(unsigned char *code, int code_len, const char *name) { + + void *cu_module; + const unsigned int jit_num_options = 3; + int *jit_options = new int[jit_num_options]; + void **jit_option_values = new void *[jit_num_options]; + + jit_options[0] = 4; // CU_JIT_INFO_LOG_BUFFER_SIZE_BYTES + int jit_log_buffer_size = 1024; + jit_option_values[0] = (void *)(size_t)jit_log_buffer_size; + + jit_options[1] = 3; // CU_JIT_INFO_LOG_BUFFER + char *jit_log_buffer = new char[jit_log_buffer_size]; + jit_option_values[1] = jit_log_buffer; + + jit_options[2] = 0; // CU_JIT_MAX_REGISTERS + int jit_register_count = 32; + jit_option_values[2] = (void *)(size_t)jit_register_count; + + int status = _cuda_cu_module_load_data_ex(&cu_module, code, + jit_num_options, jit_options, (void **)jit_option_values); + if (TraceWarpLoading) { + tty->print_cr("gpu_ptx::_cuda_cu_module_load_data_ex(%x): %d", cu_module, status); + tty->print_cr("gpu_ptx::jit_log_buffer\n%s", jit_log_buffer); + } + + void *cu_function; + + status = _cuda_cu_module_get_function(&cu_function, cu_module, name); + if (TraceWarpLoading) { + tty->print_cr("gpu_ptx::_cuda_cu_module_get_function(%s):%x %d", name, cu_function, status); + } +} + + +#ifdef __APPLE__ +bool gpu::Ptx::probe_linkage_apple() { + void *handle = dlopen("/usr/local/cuda/lib/libcuda.dylib", RTLD_LAZY); + if (handle != NULL) { + _cuda_cu_init = + CAST_TO_FN_PTR(cuda_cu_init_func_t, dlsym(handle, "cuInit")); + _cuda_cu_ctx_create = + CAST_TO_FN_PTR(cuda_cu_ctx_create_func_t, dlsym(handle, "cuCtxCreate")); + _cuda_cu_ctx_detach = + CAST_TO_FN_PTR(cuda_cu_ctx_detach_func_t, dlsym(handle, "cuCtxDetach")); + _cuda_cu_ctx_synchronize = + CAST_TO_FN_PTR(cuda_cu_ctx_synchronize_func_t, dlsym(handle, "cuCtxSynchronize")); + _cuda_cu_device_get_count = + CAST_TO_FN_PTR(cuda_cu_device_get_count_func_t, dlsym(handle, "cuDeviceGetCount")); + _cuda_cu_device_get_name = + CAST_TO_FN_PTR(cuda_cu_device_get_name_func_t, dlsym(handle, "cuDeviceGetName")); + _cuda_cu_device_get = + CAST_TO_FN_PTR(cuda_cu_device_get_func_t, dlsym(handle, "cuDeviceGet")); + _cuda_cu_device_compute_capability = + CAST_TO_FN_PTR(cuda_cu_device_compute_capability_func_t, dlsym(handle, "cuDeviceComputeCapability")); + _cuda_cu_module_get_function = + CAST_TO_FN_PTR(cuda_cu_module_get_function_func_t, dlsym(handle, "cuModuleGetFunction")); + _cuda_cu_module_load_data_ex = + CAST_TO_FN_PTR(cuda_cu_module_load_data_ex_func_t, dlsym(handle, "cuModuleLoadDataEx")); + return true; + } + return false; +} +#endif \ No newline at end of file
--- /dev/null Thu Jan 01 00:00:00 1970 +0000 +++ b/src/gpu/ptx/gpu_ptx.hpp Tue Apr 30 19:50:12 2013 +0200 @@ -0,0 +1,68 @@ +/* + * Copyright (c) 2013, Oracle and/or its affiliates. All rights reserved. + * DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER. + * + * This code is free software; you can redistribute it and/or modify it + * under the terms of the GNU General Public License version 2 only, as + * published by the Free Software Foundation. + * + * This code is distributed in the hope that it will be useful, but WITHOUT + * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or + * FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License + * version 2 for more details (a copy is included in the LICENSE file that + * accompanied this code). + * + * You should have received a copy of the GNU General Public License version + * 2 along with this work; if not, write to the Free Software Foundation, + * Inc., 51 Franklin St, Fifth Floor, Boston, MA 02110-1301 USA. + * + * Please contact Oracle, 500 Oracle Parkway, Redwood Shores, CA 94065 USA + * or visit www.oracle.com if you need additional information or have any + * questions. + * + */ + +#ifndef GPU_PTX_HPP +#define GPU_PTX_HPP + +class Ptx { + friend class gpu; + + protected: + static void probe_linkage(); +#ifdef __APPLE__ + static bool probe_linkage_apple(); +#endif + static bool initialize_gpu(); + static void generate_kernel(unsigned char *code, int code_len, const char *name); + +private: + typedef int (*cuda_cu_init_func_t)(unsigned int, int); + typedef int (*cuda_cu_ctx_create_func_t)(void *, int, int); + typedef int (*cuda_cu_ctx_detach_func_t)(int *); + typedef int (*cuda_cu_ctx_synchronize_func_t)(int *); + typedef int (*cuda_cu_device_get_count_func_t)(int *); + typedef int (*cuda_cu_device_get_name_func_t)(char *, int, int); + typedef int (*cuda_cu_device_get_func_t)(int *, int); + typedef int (*cuda_cu_device_compute_capability_func_t)(int *, int *, int); + typedef int (*cuda_cu_launch_kernel_func_t)(int *, int *, int); + typedef int (*cuda_cu_module_get_function_func_t)(void *, void *, const char *); + typedef int (*cuda_cu_module_load_data_ex_func_t)(void *, void *, unsigned int, int *, void **); + + static cuda_cu_init_func_t _cuda_cu_init; + static cuda_cu_ctx_create_func_t _cuda_cu_ctx_create; + static cuda_cu_ctx_detach_func_t _cuda_cu_ctx_detach; + static cuda_cu_ctx_synchronize_func_t _cuda_cu_ctx_synchronize; + static cuda_cu_device_get_count_func_t _cuda_cu_device_get_count; + static cuda_cu_device_get_name_func_t _cuda_cu_device_get_name; + static cuda_cu_device_get_func_t _cuda_cu_device_get; + static cuda_cu_device_compute_capability_func_t _cuda_cu_device_compute_capability; + static cuda_cu_launch_kernel_func_t _cuda_cu_launch_kernel; + static cuda_cu_module_get_function_func_t _cuda_cu_module_get_function; + static cuda_cu_module_load_data_ex_func_t _cuda_cu_module_load_data_ex; + +protected: + static void * _device_context; +}; + +#endif // GPU_PTX_HPP
--- /dev/null Thu Jan 01 00:00:00 1970 +0000 +++ b/src/os/bsd/vm/gpu_bsd.cpp Tue Apr 30 19:50:12 2013 +0200 @@ -0,0 +1,99 @@ +/* + * Copyright (c) 2013, Oracle and/or its affiliates. All rights reserved. + * DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER. + * + * This code is free software; you can redistribute it and/or modify it + * under the terms of the GNU General Public License version 2 only, as + * published by the Free Software Foundation. + * + * This code is distributed in the hope that it will be useful, but WITHOUT + * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or + * FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License + * version 2 for more details (a copy is included in the LICENSE file that + * accompanied this code). + * + * You should have received a copy of the GNU General Public License version + * 2 along with this work; if not, write to the Free Software Foundation, + * Inc., 51 Franklin St, Fifth Floor, Boston, MA 02110-1301 USA. + * + * Please contact Oracle, 500 Oracle Parkway, Redwood Shores, CA 94065 USA + * or visit www.oracle.com if you need additional information or have any + * questions. + * + */ + +#include "runtime/gpu.hpp" +#include "utilities/ostream.hpp" + +#ifdef __APPLE__ +#include <CoreGraphics/CoreGraphics.h> +#include <IOKit/IOKitLib.h> +#endif + +void gpu::probe_gpu() { +#ifdef __APPLE__ + set_available(gpu::Bsd::probe_gpu_apple()); + if (TraceWarpLoading) { + tty->print_cr("gpu_bsd::probe_gpu(APPLE): %d", gpu::is_available()); + } +#else + if (TraceWarpLoading) { + tty->print_cr("gpu_bsd::probe_gpu(not APPLE)"); + } + set_available(false); +#endif +} + +#ifdef __APPLE__ +/* + * This is rudimentary at best, but until we decide on a CUDA Compiler Compatibility + * level, this will have to suffice. + */ +bool gpu::Bsd::probe_gpu_apple() { + CGError err = CGDisplayNoErr; + CGDisplayCount displayCount = 0; + CFDataRef vendorID, deviceID, model; + CGDirectDisplayID *displays; + IOOptionBits options = kIORegistryIterateRecursively | kIORegistryIterateParents; + io_registry_entry_t displayPort; + + err = CGGetActiveDisplayList(0, NULL, &displayCount); + displays = (CGDirectDisplayID *)calloc((size_t)displayCount, sizeof(CGDirectDisplayID)); + err = CGGetActiveDisplayList(displayCount, displays, &displayCount); + + for (CGDisplayCount i = 0; i < displayCount; i++) { + displayPort = CGDisplayIOServicePort(displays[i]); + vendorID = (CFDataRef)IORegistryEntrySearchCFProperty(displayPort, kIOServicePlane, CFSTR("vendor-id"), + kCFAllocatorDefault, options); + deviceID = (CFDataRef)IORegistryEntrySearchCFProperty(displayPort, kIOServicePlane, CFSTR("device-id"), + kCFAllocatorDefault, options); + model = (CFDataRef)IORegistryEntrySearchCFProperty(displayPort, kIOServicePlane, CFSTR("model"), + kCFAllocatorDefault, options); + if (TraceWarpLoading) { + tty->print_cr("vendor: 0x%08X", *((UInt32*)CFDataGetBytePtr(vendorID))); + tty->print_cr("device: 0x%08X", *((UInt32*)CFDataGetBytePtr(deviceID))); + tty->print_cr("model: %s", CFDataGetBytePtr(model)); + } + UInt32 vendor = *((UInt32*)CFDataGetBytePtr(vendorID)); + if (vendor != 0x10DE) { + return false; + } else { + /* + * see https://developer.nvidia.com/cuda-gpus + * see http://en.wikipedia.org/wiki/CUDA#Supported_GPUs + * see http://www.pcidatabase.com/reports.php?type=csv + * + * Only supporting GK104, GK106, GK107 and GK110 GPUs for now, + * which is CUDA Computer Capability 3.0 and greater. + */ + switch (*((UInt32*)CFDataGetBytePtr(deviceID))) { + case 0x11C0: + return true; // NVIDIA GeForce GTX 660 + default: + return false; + } + } + } + return false; +} +#endif \ No newline at end of file
--- /dev/null Thu Jan 01 00:00:00 1970 +0000 +++ b/src/os/bsd/vm/gpu_bsd.hpp Tue Apr 30 19:50:12 2013 +0200 @@ -0,0 +1,39 @@ +/* + * Copyright (c) 2013, Oracle and/or its affiliates. All rights reserved. + * DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER. + * + * This code is free software; you can redistribute it and/or modify it + * under the terms of the GNU General Public License version 2 only, as + * published by the Free Software Foundation. + * + * This code is distributed in the hope that it will be useful, but WITHOUT + * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or + * FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License + * version 2 for more details (a copy is included in the LICENSE file that + * accompanied this code). + * + * You should have received a copy of the GNU General Public License version + * 2 along with this work; if not, write to the Free Software Foundation, + * Inc., 51 Franklin St, Fifth Floor, Boston, MA 02110-1301 USA. + * + * Please contact Oracle, 500 Oracle Parkway, Redwood Shores, CA 94065 USA + * or visit www.oracle.com if you need additional information or have any + * questions. + * + */ + +#ifndef OS_BSD_VM_GPU_BSD_HPP +#define OS_BSD_VM_GPU_BSD_HPP + + +class Bsd { + friend class gpu; + + protected: + static bool probe_gpu(); +#ifdef __APPLE__ + static bool probe_gpu_apple(); +#endif +}; + +#endif // OS_BSD_VM_GPU_BSD_HPP
--- a/src/share/vm/graal/graalCompiler.cpp Mon Apr 29 00:25:30 2013 +0200 +++ b/src/share/vm/graal/graalCompiler.cpp Tue Apr 30 19:50:12 2013 +0200 @@ -28,6 +28,7 @@ #include "graal/graalJavaAccess.hpp" #include "graal/graalVMToCompiler.hpp" #include "graal/graalCompilerToVM.hpp" +#include "graal/graalCompilerToGPU.hpp" #include "graal/graalEnv.hpp" #include "graal/graalRuntime.hpp" #include "runtime/arguments.hpp" @@ -65,6 +66,13 @@ vm_abort(false); } env->RegisterNatives(klass, CompilerToVM_methods, CompilerToVM_methods_count()); + + klass = env->FindClass("com/oracle/graal/hotspot/bridge/CompilerToGPUImpl"); + if (klass == NULL) { + tty->print_cr("graal CompilerToGPUImpl class not found"); + vm_abort(false); + } + env->RegisterNatives(klass, CompilerToGPU_methods, CompilerToGPU_methods_count()); ResourceMark rm; HandleMark hm;
--- /dev/null Thu Jan 01 00:00:00 1970 +0000 +++ b/src/share/vm/graal/graalCompilerToGPU.cpp Tue Apr 30 19:50:12 2013 +0200 @@ -0,0 +1,124 @@ +/* + * Copyright (c) 2013, Oracle and/or its affiliates. All rights reserved. + * DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER. + * + * This code is free software; you can redistribute it and/or modify it + * under the terms of the GNU General Public License version 2 only, as + * published by the Free Software Foundation. + * + * This code is distributed in the hope that it will be useful, but WITHOUT + * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or + * FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License + * version 2 for more details (a copy is included in the LICENSE file that + * accompanied this code). + * + * You should have received a copy of the GNU General Public License version + * 2 along with this work; if not, write to the Free Software Foundation, + * Inc., 51 Franklin St, Fifth Floor, Boston, MA 02110-1301 USA. + * + * Please contact Oracle, 500 Oracle Parkway, Redwood Shores, CA 94065 USA + * or visit www.oracle.com if you need additional information or have any + * questions. + */ + +#include "precompiled.hpp" + +#include "graal/graalCompiler.hpp" +#include "graal/graalEnv.hpp" +#include "runtime/gpu.hpp" + + +// Entry to native method implementation that transitions current thread to '_thread_in_vm'. +#define C2V_VMENTRY(result_type, name, signature) \ + JNIEXPORT result_type JNICALL c2v_ ## name signature { \ + TRACE_graal_3("CompilerToGPU::" #name); \ + GRAAL_VM_ENTRY_MARK; \ + +// Entry to native method implementation that calls a JNI function +// and hence cannot transition current thread to '_thread_in_vm'. +#define C2V_ENTRY(result_type, name, signature) \ + JNIEXPORT result_type JNICALL c2v_ ## name signature { \ + TRACE_graal_3("CompilerToGPU::" #name); \ + +#define C2V_END } + + +C2V_VMENTRY(jlong, generateKernel, (JNIEnv *env, jobject, jbyteArray code, jstring name)) + if (gpu::is_available() == false || gpu::has_gpu_linkage() == false && gpu::is_initialized()) { + tty->print_cr("generateKernel - not available / no linkage / not initialized"); + return 0; + } + jboolean is_copy; + jbyte *bytes = env->GetByteArrayElements(code, &is_copy); + jint len = env->GetArrayLength(code); + const char *namestr = env->GetStringUTFChars(name, &is_copy); + gpu::generate_kernel((unsigned char *)bytes, len, namestr); + env->ReleaseByteArrayElements(code, bytes, 0); + env->ReleaseStringUTFChars(name, namestr); + + return 42; +C2V_END + +C2V_VMENTRY(jboolean, deviceInit, (JNIEnv *env, jobject)) + if (gpu::is_available() == false || gpu::has_gpu_linkage() == false) { + tty->print_cr("deviceInit - not available / no linkage"); + return false; + } + if (gpu::is_initialized()) { + tty->print_cr("deviceInit - already initialized"); + return true; + } + gpu::initialize_gpu(); + return gpu::is_initialized(); +C2V_END + +C2V_VMENTRY(jboolean, deviceDetach, (JNIEnv *env, jobject)) +return true; +C2V_END + + +#define CC (char*) /*cast a literal from (const char*)*/ +#define FN_PTR(f) CAST_FROM_FN_PTR(void*, &(c2v_ ## f)) + +#define RESOLVED_TYPE "Lcom/oracle/graal/api/meta/ResolvedJavaType;" +#define TYPE "Lcom/oracle/graal/api/meta/JavaType;" +#define METHOD "Lcom/oracle/graal/api/meta/JavaMethod;" +#define FIELD "Lcom/oracle/graal/api/meta/JavaField;" +#define SIGNATURE "Lcom/oracle/graal/api/meta/Signature;" +#define CONSTANT_POOL "Lcom/oracle/graal/api/meta/ConstantPool;" +#define CONSTANT "Lcom/oracle/graal/api/meta/Constant;" +#define KIND "Lcom/oracle/graal/api/meta/Kind;" +#define LOCAL "Lcom/oracle/graal/api/meta/Local;" +#define RUNTIME_CALL "Lcom/oracle/graal/api/code/RuntimeCall;" +#define EXCEPTION_HANDLERS "[Lcom/oracle/graal/api/meta/ExceptionHandler;" +#define REFLECT_METHOD "Ljava/lang/reflect/Method;" +#define REFLECT_CONSTRUCTOR "Ljava/lang/reflect/Constructor;" +#define REFLECT_FIELD "Ljava/lang/reflect/Field;" +#define STRING "Ljava/lang/String;" +#define OBJECT "Ljava/lang/Object;" +#define CLASS "Ljava/lang/Class;" +#define STACK_TRACE_ELEMENT "Ljava/lang/StackTraceElement;" +#define HS_RESOLVED_TYPE "Lcom/oracle/graal/hotspot/meta/HotSpotResolvedObjectType;" +#define HS_RESOLVED_JAVA_TYPE "Lcom/oracle/graal/hotspot/meta/HotSpotResolvedJavaType;" +#define HS_RESOLVED_METHOD "Lcom/oracle/graal/hotspot/meta/HotSpotResolvedJavaMethod;" +#define HS_RESOLVED_FIELD "Lcom/oracle/graal/hotspot/meta/HotSpotResolvedJavaField;" +#define HS_COMP_RESULT "Lcom/oracle/graal/hotspot/HotSpotCompilationResult;" +#define HS_CONFIG "Lcom/oracle/graal/hotspot/HotSpotVMConfig;" +#define HS_METHOD "Lcom/oracle/graal/hotspot/meta/HotSpotMethod;" +#define HS_INSTALLED_CODE "Lcom/oracle/graal/hotspot/meta/HotSpotInstalledCode;" +#define METHOD_DATA "Lcom/oracle/graal/hotspot/meta/HotSpotMethodData;" +#define METASPACE_METHOD "J" +#define METASPACE_METHOD_DATA "J" +#define NMETHOD "J" +#define GPUSPACE_METHOD "J" + +JNINativeMethod CompilerToGPU_methods[] = { + {CC"generateKernel", CC"([B" STRING ")"GPUSPACE_METHOD, FN_PTR(generateKernel)}, + {CC"deviceInit", CC"()Z", FN_PTR(deviceInit)}, + {CC"deviceDetach", CC"()Z", FN_PTR(deviceDetach)}, +}; + +int CompilerToGPU_methods_count() { + return sizeof(CompilerToGPU_methods) / sizeof(JNINativeMethod); +} +
--- /dev/null Thu Jan 01 00:00:00 1970 +0000 +++ b/src/share/vm/graal/graalCompilerToGPU.hpp Tue Apr 30 19:50:12 2013 +0200 @@ -0,0 +1,33 @@ +/* + * Copyright (c) 2011, Oracle and/or its affiliates. All rights reserved. + * DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER. + * + * This code is free software; you can redistribute it and/or modify it + * under the terms of the GNU General Public License version 2 only, as + * published by the Free Software Foundation. + * + * This code is distributed in the hope that it will be useful, but WITHOUT + * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or + * FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License + * version 2 for more details (a copy is included in the LICENSE file that + * accompanied this code). + * + * You should have received a copy of the GNU General Public License version + * 2 along with this work; if not, write to the Free Software Foundation, + * Inc., 51 Franklin St, Fifth Floor, Boston, MA 02110-1301 USA. + * + * Please contact Oracle, 500 Oracle Parkway, Redwood Shores, CA 94065 USA + * or visit www.oracle.com if you need additional information or have any + * questions. + */ + +#ifndef SHARE_VM_GRAAL_GRAAL_COMPILER_TO_GPU_HPP +#define SHARE_VM_GRAAL_GRAAL_COMPILER_TO_GPU_HPP + +#include "prims/jni.h" + +extern JNINativeMethod CompilerToGPU_methods[]; +int CompilerToGPU_methods_count(); + + +#endif // SHARE_VM_GRAAL_GRAAL_COMPILER_TO_GPU_HPP
--- a/src/share/vm/graal/graalCompilerToVM.cpp Mon Apr 29 00:25:30 2013 +0200 +++ b/src/share/vm/graal/graalCompilerToVM.cpp Tue Apr 30 19:50:12 2013 +0200 @@ -276,6 +276,10 @@ HotSpotResolvedJavaMethod::set_name(hotspot_method, name()); HotSpotResolvedJavaMethod::set_codeSize(hotspot_method, method->code_size()); HotSpotResolvedJavaMethod::set_exceptionHandlerCount(hotspot_method, method->exception_table_length()); + HotSpotResolvedJavaMethod::set_callerSensitive(hotspot_method, method->caller_sensitive()); + HotSpotResolvedJavaMethod::set_forceInline(hotspot_method, method->force_inline()); + HotSpotResolvedJavaMethod::set_dontInline(hotspot_method, method->dont_inline()); + HotSpotResolvedJavaMethod::set_ignoredBySecurityStackWalk(hotspot_method, method->is_ignored_by_security_stack_walk()); C2V_END C2V_VMENTRY(jboolean, isMethodCompilable,(JNIEnv *, jobject, jlong metaspace_method)) @@ -751,7 +755,6 @@ set_address("wbPostCallStub", GraalRuntime::entry_for(GraalRuntime::wb_post_call_id)); set_address("identityHashCodeStub", GraalRuntime::entry_for(GraalRuntime::identity_hash_code_id)); - set_address("threadIsInterruptedStub", GraalRuntime::entry_for(GraalRuntime::thread_is_interrupted_id)); set_address("inlineCacheMissStub", SharedRuntime::get_ic_miss_stub()); set_address("handleExceptionStub", GraalRuntime::entry_for(GraalRuntime::handle_exception_nofpu_id)); set_address("handleDeoptStub", SharedRuntime::deopt_blob()->unpack()); @@ -762,7 +765,6 @@ set_address("deoptimizeStub", SharedRuntime::deopt_blob()->uncommon_trap()); set_address("unwindExceptionStub", GraalRuntime::entry_for(GraalRuntime::unwind_exception_call_id)); set_address("osrMigrationEndStub", GraalRuntime::entry_for(GraalRuntime::OSR_migration_end_id)); - set_address("registerFinalizerStub", GraalRuntime::entry_for(GraalRuntime::register_finalizer_id)); set_address("createNullPointerExceptionStub", GraalRuntime::entry_for(GraalRuntime::create_null_pointer_exception_id)); set_address("createOutOfBoundsExceptionStub", GraalRuntime::entry_for(GraalRuntime::create_out_of_bounds_exception_id)); set_address("javaTimeMillisStub", CAST_FROM_FN_PTR(address, os::javaTimeMillis)); @@ -784,6 +786,8 @@ set_address("newInstanceAddress", GraalRuntime::new_instance); set_address("newArrayAddress", GraalRuntime::new_array); set_address("newMultiArrayAddress", GraalRuntime::new_multi_array); + set_address("registerFinalizerAddress", SharedRuntime::register_finalizer); + set_address("threadIsInterruptedAddress", GraalRuntime::thread_is_interrupted); set_int("deoptReasonNone", Deoptimization::Reason_none); set_int("deoptReasonNullCheck", Deoptimization::Reason_null_check);
--- a/src/share/vm/graal/graalJavaAccess.hpp Mon Apr 29 00:25:30 2013 +0200 +++ b/src/share/vm/graal/graalJavaAccess.hpp Tue Apr 30 19:50:12 2013 +0200 @@ -58,6 +58,10 @@ long_field(HotSpotResolvedJavaMethod, metaspaceMethod) \ int_field(HotSpotResolvedJavaMethod, codeSize) \ int_field(HotSpotResolvedJavaMethod, exceptionHandlerCount) \ + boolean_field(HotSpotResolvedJavaMethod, callerSensitive) \ + boolean_field(HotSpotResolvedJavaMethod, forceInline) \ + boolean_field(HotSpotResolvedJavaMethod, dontInline) \ + boolean_field(HotSpotResolvedJavaMethod, ignoredBySecurityStackWalk) \ end_class \ start_class(HotSpotMethodData) \ long_field(HotSpotMethodData, metaspaceMethodData) \
--- a/src/share/vm/graal/graalRuntime.hpp Mon Apr 29 00:25:30 2013 +0200 +++ b/src/share/vm/graal/graalRuntime.hpp Tue Apr 30 19:50:12 2013 +0200 @@ -81,7 +81,6 @@ // runtime routines needed by code code generated // by Graal. #define GRAAL_STUBS(stub, last_entry) \ - stub(register_finalizer) \ stub(handle_exception_nofpu) /* optimized version that does not preserve fpu registers */ \ stub(unwind_exception_call) \ stub(OSR_migration_end) \ @@ -98,7 +97,6 @@ stub(stub_printf) \ stub(log_primitive) \ stub(identity_hash_code) \ - stub(thread_is_interrupted) \ stub(wb_pre_call) \ stub(wb_post_call) \ last_entry(number_of_ids) @@ -146,7 +144,6 @@ static void wb_post_call(JavaThread* thread, oopDesc* obj, void* card); static jint identity_hash_code(JavaThread* thread, oopDesc* objd); - static jboolean thread_is_interrupted(JavaThread* thread, oopDesc* obj, jboolean clear_interrupte); // Note: Must be kept in sync with constants in com.oracle.graal.replacements.Log enum { @@ -160,6 +157,7 @@ static void new_instance(JavaThread* thread, Klass* klass); static void new_array(JavaThread* thread, Klass* klass, jint length); static void new_multi_array(JavaThread* thread, Klass* klass, int rank, jint* dims); + static jboolean thread_is_interrupted(JavaThread* thread, oopDesc* obj, jboolean clear_interrupte); // initialization static void initialize(BufferBlob* blob);
--- a/src/share/vm/runtime/globals.hpp Mon Apr 29 00:25:30 2013 +0200 +++ b/src/share/vm/runtime/globals.hpp Tue Apr 30 19:50:12 2013 +0200 @@ -3691,6 +3691,9 @@ product(bool , AllowNonVirtualCalls, false, \ "Obey the ACC_SUPER flag and allow invokenonvirtual calls") \ \ + develop(bool, TraceWarpLoading, false, \ + "trace external GPU warp loading") \ + \ experimental(uintx, ArrayAllocatorMallocLimit, \ SOLARIS_ONLY(64*K) NOT_SOLARIS(max_uintx), \ "Allocation less than this value will be allocated " \
--- /dev/null Thu Jan 01 00:00:00 1970 +0000 +++ b/src/share/vm/runtime/gpu.cpp Tue Apr 30 19:50:12 2013 +0200 @@ -0,0 +1,40 @@ +/* + * Copyright (c) 2013, Oracle and/or its affiliates. All rights reserved. + * DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER. + * + * This code is free software; you can redistribute it and/or modify it + * under the terms of the GNU General Public License version 2 only, as + * published by the Free Software Foundation. + * + * This code is distributed in the hope that it will be useful, but WITHOUT + * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or + * FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License + * version 2 for more details (a copy is included in the LICENSE file that + * accompanied this code). + * + * You should have received a copy of the GNU General Public License version + * 2 along with this work; if not, write to the Free Software Foundation, + * Inc., 51 Franklin St, Fifth Floor, Boston, MA 02110-1301 USA. + * + * Please contact Oracle, 500 Oracle Parkway, Redwood Shores, CA 94065 USA + * or visit www.oracle.com if you need additional information or have any + * questions. + * + */ + +#include "precompiled.hpp" +#include "runtime/gpu.hpp" +#include "ptx/gpu_ptx.hpp" + +bool gpu::_available = false; // does the hardware exist? +bool gpu::_gpu_linkage = false; // is the driver library to access the GPU installed +bool gpu::_initialized = false; // is the GPU defvice initialized + +void gpu::init() { +#ifdef TARGET_OS_FAMILY_bsd + gpu::probe_gpu(); +#endif + // need multi-gpu TARGET ifdef + gpu::probe_linkage(); +} +
--- /dev/null Thu Jan 01 00:00:00 1970 +0000 +++ b/src/share/vm/runtime/gpu.hpp Tue Apr 30 19:50:12 2013 +0200 @@ -0,0 +1,85 @@ +/* + * Copyright (c) 2013, Oracle and/or its affiliates. All rights reserved. + * DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER. + * + * This code is free software; you can redistribute it and/or modify it + * under the terms of the GNU General Public License version 2 only, as + * published by the Free Software Foundation. + * + * This code is distributed in the hope that it will be useful, but WITHOUT + * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or + * FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License + * version 2 for more details (a copy is included in the LICENSE file that + * accompanied this code). + * + * You should have received a copy of the GNU General Public License version + * 2 along with this work; if not, write to the Free Software Foundation, + * Inc., 51 Franklin St, Fifth Floor, Boston, MA 02110-1301 USA. + * + * Please contact Oracle, 500 Oracle Parkway, Redwood Shores, CA 94065 USA + * or visit www.oracle.com if you need additional information or have any + * questions. + * + */ + +#ifndef SHARE_VM_RUNTIME_GPU_HPP +#define SHARE_VM_RUNTIME_GPU_HPP + +#include "runtime/atomic.hpp" + +// gpu defines the interface to the graphics processor; this includes traditional +// GPU services such as graphics kernel load and execute. + + +class gpu: AllStatic { +public: + static void init(void); + + static void probe_gpu(); + + static void probe_linkage(); + + static void initialize_gpu(); + + static void generate_kernel(unsigned char *code, int code_len, const char *name); + + static void set_available(bool value) { + _available = value; + } + + static bool is_available() { return _available; } + + static void set_initialized(bool value) { + _initialized = value; + } + + static bool is_initialized() { return _initialized; } + + static void set_gpu_linkage(bool value) { + _gpu_linkage = value; + } + + static bool has_gpu_linkage() { return _gpu_linkage; } + +protected: + static bool _available; + static bool _gpu_linkage; + static bool _initialized; + + // Platform dependent stuff +#ifdef TARGET_OS_FAMILY_linux +#endif +#ifdef TARGET_OS_FAMILY_solaris +#endif +#ifdef TARGET_OS_FAMILY_windows +#endif +#ifdef TARGET_OS_FAMILY_bsd +# include "gpu_bsd.hpp" +#endif + +# include "ptx/gpu_ptx.hpp" + +}; + + +#endif // SHARE_VM_RUNTIME_GPU_HPP
--- a/src/share/vm/runtime/thread.cpp Mon Apr 29 00:25:30 2013 +0200 +++ b/src/share/vm/runtime/thread.cpp Tue Apr 30 19:50:12 2013 +0200 @@ -54,6 +54,7 @@ #include "runtime/deoptimization.hpp" #include "runtime/fprofiler.hpp" #include "runtime/frame.inline.hpp" +#include "runtime/gpu.hpp" #include "runtime/init.hpp" #include "runtime/interfaceSupport.hpp" #include "runtime/java.hpp" @@ -3307,6 +3308,9 @@ // Initialize the os module before using TLS os::init(); + // probe for warp capability + gpu::init(); + // Initialize system properties. Arguments::init_system_properties();