# HG changeset patch # User xdono # Date 1241717417 25200 # Node ID 7a485bc4da16083973ac101f3882c993e31d740c # Parent 5d4dd2f5f6a10bb5350406930fd6c28e9cd6dcff# Parent 53d9bf689e80fcc76b221bbe6c5d58e08b80cbc6 Merge diff -r 5d4dd2f5f6a1 -r 7a485bc4da16 .hgignore --- a/.hgignore Fri Apr 17 15:50:12 2009 +0100 +++ b/.hgignore Thu May 07 10:30:17 2009 -0700 @@ -1,7 +1,7 @@ ^build/ ^dist/ ^nbproject/private/ -^src/share/tools/hsdis/bin/ +^src/share/tools/hsdis/build/ ^src/share/tools/IdealGraphVisualizer/[a-zA-Z0-9]*/build/ ^src/share/tools/IdealGraphVisualizer/build/ ^src/share/tools/IdealGraphVisualizer/dist/ diff -r 5d4dd2f5f6a1 -r 7a485bc4da16 .hgtags --- a/.hgtags Fri Apr 17 15:50:12 2009 +0100 +++ b/.hgtags Thu May 07 10:30:17 2009 -0700 @@ -30,3 +30,5 @@ 032c6af894dae8d939b3dd31d82042549e7793e0 jdk7-b53 fafab5d5349c7c066d677538db67a1ee0fb33bd2 jdk7-b54 f8e839c086152da70d6ec5913ba6f9f509282e8d jdk7-b55 +a3fd9e40ff2e854f6169eb6d09d491a28634d04f jdk7-b56 +f4cbf78110c726919f46b59a3b054c54c7e889b4 jdk7-b57 diff -r 5d4dd2f5f6a1 -r 7a485bc4da16 agent/src/share/classes/sun/jvm/hotspot/runtime/ClassConstants.java --- a/agent/src/share/classes/sun/jvm/hotspot/runtime/ClassConstants.java Fri Apr 17 15:50:12 2009 +0100 +++ b/agent/src/share/classes/sun/jvm/hotspot/runtime/ClassConstants.java Thu May 07 10:30:17 2009 -0700 @@ -142,34 +142,35 @@ // from jvm.h public static final long JVM_RECOGNIZED_CLASS_MODIFIERS = (JVM_ACC_PUBLIC | - JVM_ACC_FINAL | - JVM_ACC_SUPER | - JVM_ACC_INTERFACE | - JVM_ACC_ABSTRACT | - JVM_ACC_ANNOTATION | - JVM_ACC_SYNTHETIC); + JVM_ACC_FINAL | + JVM_ACC_SUPER | + JVM_ACC_INTERFACE | + JVM_ACC_ABSTRACT | + JVM_ACC_ANNOTATION | + JVM_ACC_ENUM | + JVM_ACC_SYNTHETIC); public static final long JVM_RECOGNIZED_FIELD_MODIFIERS = (JVM_ACC_PUBLIC | - JVM_ACC_PRIVATE | - JVM_ACC_PROTECTED | - JVM_ACC_STATIC | - JVM_ACC_FINAL | - JVM_ACC_VOLATILE | - JVM_ACC_TRANSIENT | - JVM_ACC_ENUM | - JVM_ACC_SYNTHETIC); + JVM_ACC_PRIVATE | + JVM_ACC_PROTECTED | + JVM_ACC_STATIC | + JVM_ACC_FINAL | + JVM_ACC_VOLATILE | + JVM_ACC_TRANSIENT | + JVM_ACC_ENUM | + JVM_ACC_SYNTHETIC); public static final long JVM_RECOGNIZED_METHOD_MODIFIERS = (JVM_ACC_PUBLIC | - JVM_ACC_PRIVATE | - JVM_ACC_PROTECTED | - JVM_ACC_STATIC | - JVM_ACC_FINAL | - JVM_ACC_SYNCHRONIZED | - JVM_ACC_BRIDGE | - JVM_ACC_VARARGS | - JVM_ACC_NATIVE | - JVM_ACC_ABSTRACT | - JVM_ACC_STRICT | - JVM_ACC_SYNTHETIC); + JVM_ACC_PRIVATE | + JVM_ACC_PROTECTED | + JVM_ACC_STATIC | + JVM_ACC_FINAL | + JVM_ACC_SYNCHRONIZED | + JVM_ACC_BRIDGE | + JVM_ACC_VARARGS | + JVM_ACC_NATIVE | + JVM_ACC_ABSTRACT | + JVM_ACC_STRICT | + JVM_ACC_SYNTHETIC); } diff -r 5d4dd2f5f6a1 -r 7a485bc4da16 agent/src/share/classes/sun/jvm/hotspot/runtime/JavaThread.java --- a/agent/src/share/classes/sun/jvm/hotspot/runtime/JavaThread.java Fri Apr 17 15:50:12 2009 +0100 +++ b/agent/src/share/classes/sun/jvm/hotspot/runtime/JavaThread.java Thu May 07 10:30:17 2009 -0700 @@ -48,6 +48,8 @@ private static AddressField lastJavaPCField; private static CIntegerField threadStateField; private static AddressField osThreadField; + private static AddressField stackBaseField; + private static CIntegerField stackSizeField; private static JavaThreadPDAccess access; @@ -83,6 +85,8 @@ lastJavaPCField = anchorType.getAddressField("_last_Java_pc"); threadStateField = type.getCIntegerField("_thread_state"); osThreadField = type.getAddressField("_osthread"); + stackBaseField = type.getAddressField("_stack_base"); + stackSizeField = type.getCIntegerField("_stack_size"); UNINITIALIZED = db.lookupIntConstant("_thread_uninitialized").intValue(); NEW = db.lookupIntConstant("_thread_new").intValue(); @@ -312,6 +316,14 @@ return (OSThread) VMObjectFactory.newObject(OSThread.class, osThreadField.getValue(addr)); } + public Address getStackBase() { + return stackBaseField.getValue(); + } + + public long getStackSize() { + return stackSizeField.getValue(); + } + /** Gets the Java-side thread object for this JavaThread */ public Oop getThreadObj() { return VM.getVM().getObjectHeap().newOop(threadObjField.getValue(addr)); @@ -345,11 +357,18 @@ if (Assert.ASSERTS_ENABLED) { Assert.that(VM.getVM().isDebugging(), "Not yet implemented for non-debugging system"); } - Address highest = highestLock(); Address sp = lastSPDbg(); + Address stackBase = getStackBase(); // Be robust - if ((highest == null) || (sp == null)) return false; - return (highest.greaterThanOrEqual(a) && sp.lessThanOrEqual(a)); + if (sp == null) return false; + return stackBase.greaterThanOrEqual(a) && sp.lessThanOrEqual(a); + } + + public boolean isLockOwned(Address a) { + Address stackBase = getStackBase(); + Address stackLimit = stackBase.addOffsetTo(-getStackSize()); + + return stackBase.greaterThanOrEqual(a) && stackLimit.lessThanOrEqual(a); // FIXME: should traverse MonitorArray/MonitorChunks as in VM } diff -r 5d4dd2f5f6a1 -r 7a485bc4da16 agent/src/share/classes/sun/jvm/hotspot/runtime/Thread.java --- a/agent/src/share/classes/sun/jvm/hotspot/runtime/Thread.java Fri Apr 17 15:50:12 2009 +0100 +++ b/agent/src/share/classes/sun/jvm/hotspot/runtime/Thread.java Thu May 07 10:30:17 2009 -0700 @@ -38,7 +38,6 @@ private static int HAS_ASYNC_EXCEPTION; private static AddressField activeHandlesField; - private static AddressField highestLockField; private static AddressField currentPendingMonitorField; private static AddressField currentWaitingMonitorField; @@ -60,7 +59,6 @@ tlabFieldOffset = type.getField("_tlab").getOffset(); activeHandlesField = type.getAddressField("_active_handles"); - highestLockField = type.getAddressField("_highest_lock"); currentPendingMonitorField = type.getAddressField("_current_pending_monitor"); currentWaitingMonitorField = type.getAddressField("_current_waiting_monitor"); } @@ -121,10 +119,6 @@ // pending exception } - public Address highestLock() { - return highestLockField.getValue(addr); - } - public ObjectMonitor getCurrentPendingMonitor() { Address monitorAddr = currentPendingMonitorField.getValue(addr); if (monitorAddr == null) { diff -r 5d4dd2f5f6a1 -r 7a485bc4da16 agent/src/share/classes/sun/jvm/hotspot/runtime/Threads.java --- a/agent/src/share/classes/sun/jvm/hotspot/runtime/Threads.java Fri Apr 17 15:50:12 2009 +0100 +++ b/agent/src/share/classes/sun/jvm/hotspot/runtime/Threads.java Thu May 07 10:30:17 2009 -0700 @@ -164,20 +164,11 @@ } } - long leastDiff = 0; - boolean leastDiffInitialized = false; - JavaThread theOwner = null; for (JavaThread thread = first(); thread != null; thread = thread.next()) { - Address addr = thread.highestLock(); - if (addr == null || addr.lessThan(o)) continue; - long diff = addr.minus(o); - if (!leastDiffInitialized || diff < leastDiff) { - leastDiffInitialized = true; - leastDiff = diff; - theOwner = thread; - } + if (thread.isLockOwned(o)) + return thread; } - return theOwner; + return null; } public JavaThread owningThreadFromMonitor(ObjectMonitor monitor) { diff -r 5d4dd2f5f6a1 -r 7a485bc4da16 agent/src/share/classes/sun/jvm/hotspot/tools/jcore/ByteCodeRewriter.java --- a/agent/src/share/classes/sun/jvm/hotspot/tools/jcore/ByteCodeRewriter.java Fri Apr 17 15:50:12 2009 +0100 +++ b/agent/src/share/classes/sun/jvm/hotspot/tools/jcore/ByteCodeRewriter.java Thu May 07 10:30:17 2009 -0700 @@ -89,29 +89,6 @@ // update the code buffer hotspot specific bytecode with the jvm bytecode code[bci] = (byte) (0xFF & bytecode); - // RewriteFrequentPairs - if(hotspotcode == Bytecodes._fast_iaccess_0 || - hotspotcode == Bytecodes._fast_aaccess_0 || - hotspotcode == Bytecodes._fast_faccess_0) { - // rewrite next bytecode as _getfield - bci++; - code[bci] = (byte) (0xFF & Bytecodes._getfield); - bytecode = Bytecodes._getfield; - hotspotcode = Bytecodes._getfield; - } else if (hotspotcode == Bytecodes._fast_iload2) { - // rewrite next bytecode as _iload - bci++; - code[bci] = (byte) (0xFF & Bytecodes._iload); - bytecode = Bytecodes._iload; - hotspotcode = Bytecodes._iload; - } else if (hotspotcode == Bytecodes._fast_icaload) { - // rewrite next bytecode as _caload - bci++; - code[bci] = (byte) (0xFF & Bytecodes._caload); - bytecode = Bytecodes._caload; - bytecode = Bytecodes._caload; - } - short cpoolIndex = 0; switch (bytecode) { // bytecodes with ConstantPoolCache index diff -r 5d4dd2f5f6a1 -r 7a485bc4da16 agent/src/share/classes/sun/jvm/hotspot/tools/jcore/ClassDump.java --- a/agent/src/share/classes/sun/jvm/hotspot/tools/jcore/ClassDump.java Fri Apr 17 15:50:12 2009 +0100 +++ b/agent/src/share/classes/sun/jvm/hotspot/tools/jcore/ClassDump.java Thu May 07 10:30:17 2009 -0700 @@ -59,8 +59,14 @@ SystemDictionary dict = VM.getVM().getSystemDictionary(); dict.classesDo(new SystemDictionary.ClassVisitor() { public void visit(Klass k) { - if (k instanceof InstanceKlass) - dumpKlass((InstanceKlass) k); + if (k instanceof InstanceKlass) { + try { + dumpKlass((InstanceKlass) k); + } catch (Exception e) { + System.out.println(k.getName().asString()); + e.printStackTrace(); + } + } } }); } diff -r 5d4dd2f5f6a1 -r 7a485bc4da16 agent/src/share/classes/sun/jvm/hotspot/tools/jcore/ClassWriter.java --- a/agent/src/share/classes/sun/jvm/hotspot/tools/jcore/ClassWriter.java Fri Apr 17 15:50:12 2009 +0100 +++ b/agent/src/share/classes/sun/jvm/hotspot/tools/jcore/ClassWriter.java Thu May 07 10:30:17 2009 -0700 @@ -40,7 +40,6 @@ protected InstanceKlass klass; protected DataOutputStream dos; protected ConstantPool cpool; - protected boolean is15Format; // Map between class name to index of type CONSTANT_Class protected Map classToIndex = new HashMap(); @@ -73,7 +72,6 @@ klass = kls; dos = new DataOutputStream(os); cpool = klass.getConstants(); - is15Format = is15ClassFile(); } public void write() throws IOException { @@ -82,7 +80,7 @@ // write magic dos.writeInt(0xCAFEBABE); - writeVersion(is15Format); + writeVersion(); writeConstantPool(); writeClassAccessFlags(); writeThisClass(); @@ -96,43 +94,14 @@ dos.flush(); } - protected boolean is15ClassFile() { - // if klass has generic signature, then it is 1.5 class file. - if (klass.getGenericSignature() != null) { - return true; - } - - // if atleast one method has generic signature - // , then we have 1.5 class file. - ObjArray methods = klass.getMethods(); - final int numMethods = (int) methods.getLength(); - for (int m = 0; m < numMethods; m++) { - Method curMethod = (Method) methods.getObjAt(m); - if (curMethod.getGenericSignature() != null) { - return true; - } - } - - // if atleast one field has non-zero generic signature index, then we have - // 1.5 class file - TypeArray fields = klass.getFields(); - final int numFields = (int) fields.getLength(); - for (int f = 0; f < numFields; f += InstanceKlass.NEXT_OFFSET) { - short genSigIndex = fields.getShortAt(f + InstanceKlass.GENERIC_SIGNATURE_INDEX_OFFSET); - if (genSigIndex != (short)0) return true; - } - - return false; + protected void writeVersion() throws IOException { + dos.writeShort((short)klass.minorVersion()); + dos.writeShort((short)klass.majorVersion()); } - protected void writeVersion(boolean is15Format) throws IOException { - if (is15Format) { - dos.writeShort(MINOR_VERSION); - dos.writeShort(MAJOR_VERSION); - } else { - dos.writeShort(MINOR_VERSION_OLD); - dos.writeShort(MAJOR_VERSION_OLD); - } + protected void writeIndex(int index) throws IOException { + if (index == 0) throw new InternalError(); + dos.writeShort(index); } protected void writeConstantPool() throws IOException { @@ -392,8 +361,8 @@ if (DEBUG) debugMessage("\tfield name = " + nameIndex + ", signature = " + signatureIndex); short fieldAttributeCount = 0; - boolean isSyn = isSynthetic(accessFlags); - if (isSyn) + boolean hasSyn = hasSyntheticAttribute(accessFlags); + if (hasSyn) fieldAttributeCount++; short initvalIndex = fields.getShortAt(index + InstanceKlass.INITVAL_INDEX_OFFSET); @@ -407,18 +376,18 @@ dos.writeShort(fieldAttributeCount); // write synthetic, if applicable - if (isSyn) + if (hasSyn) writeSynthetic(); if (initvalIndex != 0) { - dos.writeShort(_constantValueIndex); + writeIndex(_constantValueIndex); dos.writeInt(2); dos.writeShort(initvalIndex); if (DEBUG) debugMessage("\tfield init value = " + initvalIndex); } if (genSigIndex != 0) { - dos.writeShort(_signatureIndex); + writeIndex(_signatureIndex); dos.writeInt(2); dos.writeShort(genSigIndex); if (DEBUG) debugMessage("\tfield generic signature index " + genSigIndex); @@ -430,8 +399,13 @@ return (accessFlags & (short) JVM_ACC_SYNTHETIC) != 0; } + protected boolean hasSyntheticAttribute(short accessFlags) { + // Check if flags have the attribute and if the constant pool contains an entry for it. + return isSynthetic(accessFlags) && _syntheticIndex != 0; + } + protected void writeSynthetic() throws IOException { - dos.writeShort(_syntheticIndex); + writeIndex(_syntheticIndex); dos.writeInt(0); } @@ -459,8 +433,8 @@ short methodAttributeCount = 0; - final boolean isSyn = isSynthetic((short)accessFlags); - if (isSyn) + final boolean hasSyn = hasSyntheticAttribute((short)accessFlags); + if (hasSyn) methodAttributeCount++; final boolean hasCheckedExceptions = m.hasCheckedExceptions(); @@ -478,27 +452,11 @@ dos.writeShort(methodAttributeCount); if (DEBUG) debugMessage("\tmethod attribute count = " + methodAttributeCount); - if (isSyn) { + if (hasSyn) { if (DEBUG) debugMessage("\tmethod is synthetic"); writeSynthetic(); } - if (hasCheckedExceptions) { - CheckedExceptionElement[] exceptions = m.getCheckedExceptions(); - dos.writeShort(_exceptionsIndex); - - int attrSize = 2 /* number_of_exceptions */ + - exceptions.length * 2 /* exception_index */; - dos.writeInt(attrSize); - dos.writeShort(exceptions.length); - if (DEBUG) debugMessage("\tmethod has " + exceptions.length - + " checked exception(s)"); - for (int e = 0; e < exceptions.length; e++) { - short cpIndex = (short) exceptions[e].getClassCPIndex(); - dos.writeShort(cpIndex); - } - } - if (isCodeAvailable) { byte[] code = m.getByteCode(); short codeAttrCount = 0; @@ -574,7 +532,7 @@ // start writing Code - dos.writeShort(_codeIndex); + writeIndex(_codeIndex); dos.writeInt(codeSize); if (DEBUG) debugMessage("\tcode attribute length = " + codeSize); @@ -608,7 +566,7 @@ // write LineNumberTable, if available. if (hasLineNumberTable) { - dos.writeShort(_lineNumberTableIndex); + writeIndex(_lineNumberTableIndex); dos.writeInt(lineNumberAttrLen); dos.writeShort((short) lineNumberTable.length); for (int l = 0; l < lineNumberTable.length; l++) { @@ -619,7 +577,7 @@ // write LocalVariableTable, if available. if (hasLocalVariableTable) { - dos.writeShort((short) _localVariableTableIndex); + writeIndex((short) _localVariableTableIndex); dos.writeInt(localVarAttrLen); dos.writeShort((short) localVariableTable.length); for (int l = 0; l < localVariableTable.length; l++) { @@ -632,6 +590,22 @@ } } + if (hasCheckedExceptions) { + CheckedExceptionElement[] exceptions = m.getCheckedExceptions(); + writeIndex(_exceptionsIndex); + + int attrSize = 2 /* number_of_exceptions */ + + exceptions.length * 2 /* exception_index */; + dos.writeInt(attrSize); + dos.writeShort(exceptions.length); + if (DEBUG) debugMessage("\tmethod has " + exceptions.length + + " checked exception(s)"); + for (int e = 0; e < exceptions.length; e++) { + short cpIndex = (short) exceptions[e].getClassCPIndex(); + dos.writeShort(cpIndex); + } + } + if (isGeneric) { writeGenericSignature(m.getGenericSignature().asString()); } @@ -643,7 +617,7 @@ } protected void writeGenericSignature(String signature) throws IOException { - dos.writeShort(_signatureIndex); + writeIndex(_signatureIndex); if (DEBUG) debugMessage("signature attribute = " + _signatureIndex); dos.writeInt(2); Short index = (Short) utf8ToIndex.get(signature); @@ -653,12 +627,12 @@ protected void writeClassAttributes() throws IOException { final long flags = klass.getAccessFlags(); - final boolean isSyn = isSynthetic((short) flags); + final boolean hasSyn = hasSyntheticAttribute((short) flags); // check for source file short classAttributeCount = 0; - if (isSyn) + if (hasSyn) classAttributeCount++; Symbol sourceFileName = klass.getSourceFileName(); @@ -677,12 +651,12 @@ dos.writeShort(classAttributeCount); if (DEBUG) debugMessage("class attribute count = " + classAttributeCount); - if (isSyn) + if (hasSyn) writeSynthetic(); // write SourceFile, if any if (sourceFileName != null) { - dos.writeShort(_sourceFileIndex); + writeIndex(_sourceFileIndex); if (DEBUG) debugMessage("source file attribute = " + _sourceFileIndex); dos.writeInt(2); Short index = (Short) utf8ToIndex.get(sourceFileName.asString()); @@ -697,7 +671,7 @@ // write inner classes, if any if (numInnerClasses != 0) { - dos.writeShort(_innerClassesIndex); + writeIndex(_innerClassesIndex); final int innerAttrLen = 2 /* number_of_inner_classes */ + numInnerClasses * ( 2 /* inner_class_info_index */ + diff -r 5d4dd2f5f6a1 -r 7a485bc4da16 make/hotspot_version --- a/make/hotspot_version Fri Apr 17 15:50:12 2009 +0100 +++ b/make/hotspot_version Thu May 07 10:30:17 2009 -0700 @@ -33,9 +33,9 @@ # Don't put quotes (fail windows build). HOTSPOT_VM_COPYRIGHT=Copyright 2009 -HS_MAJOR_VER=15 +HS_MAJOR_VER=16 HS_MINOR_VER=0 -HS_BUILD_NUMBER=05 +HS_BUILD_NUMBER=02 JDK_MAJOR_VER=1 JDK_MINOR_VER=7 diff -r 5d4dd2f5f6a1 -r 7a485bc4da16 make/jprt.config --- a/make/jprt.config Fri Apr 17 15:50:12 2009 +0100 +++ b/make/jprt.config Thu May 07 10:30:17 2009 -0700 @@ -73,6 +73,7 @@ else if [ "${JPRT_JOB_PRODUCT_RELEASE}" = "jdk6" -o \ "${JPRT_JOB_PRODUCT_RELEASE}" = "jdk6u10" -o \ + "${JPRT_JOB_PRODUCT_RELEASE}" = "jdk6u14" -o \ "${JPRT_JOB_PRODUCT_RELEASE}" = "jdk6perf" ] ; then # All jdk6 builds use SS11 compiler_name=SS11 diff -r 5d4dd2f5f6a1 -r 7a485bc4da16 make/jprt.properties --- a/make/jprt.properties Fri Apr 17 15:50:12 2009 +0100 +++ b/make/jprt.properties Thu May 07 10:30:17 2009 -0700 @@ -46,24 +46,28 @@ jprt.my.solaris.sparc.jdk6=solaris_sparc_5.8 jprt.my.solaris.sparc.jdk6perf=solaris_sparc_5.8 jprt.my.solaris.sparc.jdk6u10=solaris_sparc_5.8 +jprt.my.solaris.sparc.jdk6u14=solaris_sparc_5.8 jprt.my.solaris.sparc=${jprt.my.solaris.sparc.${jprt.tools.default.release}} jprt.my.solaris.sparcv9.jdk7=solaris_sparcv9_5.10 jprt.my.solaris.sparcv9.jdk6=solaris_sparcv9_5.8 jprt.my.solaris.sparcv9.jdk6perf=solaris_sparcv9_5.8 jprt.my.solaris.sparcv9.jdk6u10=solaris_sparcv9_5.8 +jprt.my.solaris.sparcv9.jdk6u14=solaris_sparcv9_5.8 jprt.my.solaris.sparcv9=${jprt.my.solaris.sparcv9.${jprt.tools.default.release}} jprt.my.solaris.i586.jdk7=solaris_i586_5.10 jprt.my.solaris.i586.jdk6=solaris_i586_5.8 jprt.my.solaris.i586.jdk6perf=solaris_i586_5.8 jprt.my.solaris.i586.jdk6u10=solaris_i586_5.8 +jprt.my.solaris.i586.jdk6u14=solaris_i586_5.8 jprt.my.solaris.i586=${jprt.my.solaris.i586.${jprt.tools.default.release}} jprt.my.solaris.x64.jdk7=solaris_x64_5.10 jprt.my.solaris.x64.jdk6=solaris_x64_5.10 jprt.my.solaris.x64.jdk6perf=solaris_x64_5.10 jprt.my.solaris.x64.jdk6u10=solaris_x64_5.10 +jprt.my.solaris.x64.jdk6u14=solaris_x64_5.10 jprt.my.solaris.x64=${jprt.my.solaris.x64.${jprt.tools.default.release}} jprt.my.linux.i586=linux_i586 diff -r 5d4dd2f5f6a1 -r 7a485bc4da16 make/solaris/makefiles/sparcWorks.make --- a/make/solaris/makefiles/sparcWorks.make Fri Apr 17 15:50:12 2009 +0100 +++ b/make/solaris/makefiles/sparcWorks.make Thu May 07 10:30:17 2009 -0700 @@ -46,7 +46,7 @@ $(shell $(CC) -V 2>&1 | sed -n 's/^.*[ ,\t]C[ ,\t]\([1-9]\.[0-9][0-9]*\).*/\1/p') # Pick which compiler is validated -ifeq ($(JDK_MINOR_VERSION),6) +ifeq ($(JRE_RELEASE_VER),1.6.0) # Validated compiler for JDK6 is SS11 (5.8) VALIDATED_COMPILER_REV := 5.8 VALIDATED_C_COMPILER_REV := 5.8 @@ -101,18 +101,9 @@ # New architecture options started in SS12 (5.9), we need both styles to build. # The older arch options for SS11 (5.8) or older and also for /usr/ccs/bin/as. -# Note: SS12 default for 32bit sparc is now the same as v8plus, so the -# settings below have changed all SS12 32bit sparc builds to be v8plus. -# The older SS11 (5.8) settings have remained as they always have been. -ifeq ($(TYPE),COMPILER2) - ARCHFLAG_OLD/sparc = -xarch=v8plus -else - ifeq ($(TYPE),TIERED) - ARCHFLAG_OLD/sparc = -xarch=v8plus - else - ARCHFLAG_OLD/sparc = -xarch=v8 - endif -endif +# Note: default for 32bit sparc is now the same as v8plus, so the +# settings below have changed all 32bit sparc builds to be v8plus. +ARCHFLAG_OLD/sparc = -xarch=v8plus ARCHFLAG_NEW/sparc = -m32 -xarch=sparc ARCHFLAG_OLD/sparcv9 = -xarch=v9 ARCHFLAG_NEW/sparcv9 = -m64 -xarch=sparc diff -r 5d4dd2f5f6a1 -r 7a485bc4da16 make/windows/makefiles/vm.make --- a/make/windows/makefiles/vm.make Fri Apr 17 15:50:12 2009 +0100 +++ b/make/windows/makefiles/vm.make Thu May 07 10:30:17 2009 -0700 @@ -55,10 +55,16 @@ CPP_FLAGS=$(CPP_FLAGS) /D "COMPILER1" /D "COMPILER2" !endif +!if "$(BUILDARCH)" == "i486" +HOTSPOT_LIB_ARCH=i386 +!else +HOTSPOT_LIB_ARCH=$(BUILDARCH) +!endif + # The following variables are defined in the generated local.make file. CPP_FLAGS=$(CPP_FLAGS) /D "HOTSPOT_RELEASE_VERSION=\"$(HS_BUILD_VER)\"" CPP_FLAGS=$(CPP_FLAGS) /D "JRE_RELEASE_VERSION=\"$(JRE_RELEASE_VER)\"" -CPP_FLAGS=$(CPP_FLAGS) /D "HOTSPOT_LIB_ARCH=\"$(BUILDARCH)\"" +CPP_FLAGS=$(CPP_FLAGS) /D "HOTSPOT_LIB_ARCH=\"$(HOTSPOT_LIB_ARCH)\"" CPP_FLAGS=$(CPP_FLAGS) /D "HOTSPOT_BUILD_TARGET=\"$(BUILD_FLAVOR)\"" CPP_FLAGS=$(CPP_FLAGS) /D "HOTSPOT_BUILD_USER=\"$(BuildUser)\"" CPP_FLAGS=$(CPP_FLAGS) /D "HOTSPOT_VM_DISTRO=\"$(HOTSPOT_VM_DISTRO)\"" diff -r 5d4dd2f5f6a1 -r 7a485bc4da16 src/cpu/sparc/vm/assembler_sparc.cpp --- a/src/cpu/sparc/vm/assembler_sparc.cpp Fri Apr 17 15:50:12 2009 +0100 +++ b/src/cpu/sparc/vm/assembler_sparc.cpp Thu May 07 10:30:17 2009 -0700 @@ -25,24 +25,36 @@ #include "incls/_precompiled.incl" #include "incls/_assembler_sparc.cpp.incl" -// Implementation of Address - -Address::Address( addr_type t, int which ) { - switch (t) { - case extra_in_argument: - case extra_out_argument: - _base = t == extra_in_argument ? FP : SP; - _hi = 0; -// Warning: In LP64 mode, _disp will occupy more than 10 bits. -// This is inconsistent with the other constructors but op -// codes such as ld or ldx, only access disp() to get their -// simm13 argument. - _disp = ((which - Argument::n_register_parameters + frame::memory_parameter_word_sp_offset) * BytesPerWord) + STACK_BIAS; - break; - default: - ShouldNotReachHere(); - break; +// Convert the raw encoding form into the form expected by the +// constructor for Address. +Address Address::make_raw(int base, int index, int scale, int disp, bool disp_is_oop) { + assert(scale == 0, "not supported"); + RelocationHolder rspec; + if (disp_is_oop) { + rspec = Relocation::spec_simple(relocInfo::oop_type); } + + Register rindex = as_Register(index); + if (rindex != G0) { + Address madr(as_Register(base), rindex); + madr._rspec = rspec; + return madr; + } else { + Address madr(as_Register(base), disp); + madr._rspec = rspec; + return madr; + } +} + +Address Argument::address_in_frame() const { + // Warning: In LP64 mode disp will occupy more than 10 bits, but + // op codes such as ld or ldx, only access disp() to get + // their simm13 argument. + int disp = ((_number - Argument::n_register_parameters + frame::memory_parameter_word_sp_offset) * BytesPerWord) + STACK_BIAS; + if (is_in()) + return Address(FP, disp); // In argument. + else + return Address(SP, disp); // Out argument. } static const char* argumentNames[][2] = { @@ -614,16 +626,17 @@ } // This code sequence is relocatable to any address, even on LP64. -void MacroAssembler::jumpl( Address& a, Register d, int offset, const char* file, int line ) { +void MacroAssembler::jumpl(AddressLiteral& addrlit, Register temp, Register d, int offset, const char* file, int line) { assert_not_delayed(); // Force fixed length sethi because NativeJump and NativeFarCall don't handle // variable length instruction streams. - sethi(a, /*ForceRelocatable=*/ true); + patchable_sethi(addrlit, temp); + Address a(temp, addrlit.low10() + offset); // Add the offset to the displacement. if (TraceJumps) { #ifndef PRODUCT // Must do the add here so relocation can find the remainder of the // value to be relocated. - add(a.base(), a.disp() + offset, a.base(), a.rspec(offset)); + add(a.base(), a.disp(), a.base(), addrlit.rspec(offset)); save_frame(0); verify_thread(); ld(G2_thread, in_bytes(JavaThread::jmp_ring_index_offset()), O0); @@ -652,15 +665,15 @@ restore(); jmpl(a.base(), G0, d); #else - jmpl(a, d, offset); + jmpl(a.base(), a.disp(), d); #endif /* PRODUCT */ } else { - jmpl(a, d, offset); + jmpl(a.base(), a.disp(), d); } } -void MacroAssembler::jump( Address& a, int offset, const char* file, int line ) { - jumpl( a, G0, offset, file, line ); +void MacroAssembler::jump(AddressLiteral& addrlit, Register temp, int offset, const char* file, int line) { + jumpl(addrlit, temp, G0, offset, file, line); } @@ -678,7 +691,8 @@ st_ptr(savePtr.as_register(), savePtr.address_in_frame()); } // return the address of the first memory slot - add(inArg.address_in_frame(), d); + Address a = inArg.address_in_frame(); + add(a.base(), a.disp(), d); } // Conditional breakpoint (for assertion checks in assembly code) @@ -702,7 +716,6 @@ // offset to write to within the page. This minimizes bus traffic // due to cache line collision. void MacroAssembler::serialize_memory(Register thread, Register tmp1, Register tmp2) { - Address mem_serialize_page(tmp1, os::get_memory_serialize_page()); srl(thread, os::get_serialize_page_shift_count(), tmp2); if (Assembler::is_simm13(os::vm_page_size())) { and3(tmp2, (os::vm_page_size() - sizeof(int)), tmp2); @@ -711,7 +724,7 @@ set((os::vm_page_size() - sizeof(int)), tmp1); and3(tmp2, tmp1, tmp2); } - load_address(mem_serialize_page); + set(os::get_memory_serialize_page(), tmp1); st(G0, tmp1, tmp2); } @@ -830,10 +843,10 @@ mov(G3, L2); // avoid clobbering G3 also mov(G4, L5); // avoid clobbering G4 #ifdef ASSERT - Address last_get_thread_addr(L3, (address)&last_get_thread); - sethi(last_get_thread_addr); + AddressLiteral last_get_thread_addrlit(&last_get_thread); + set(last_get_thread_addrlit, L3); inc(L4, get_pc(L4) + 2 * BytesPerInstWord); // skip getpc() code + inc + st_ptr to point L4 at call - st_ptr(L4, last_get_thread_addr); + st_ptr(L4, L3, 0); #endif call(CAST_FROM_FN_PTR(address, reinitialize_thread), relocInfo::runtime_call_type); delayed()->nop(); @@ -919,13 +932,9 @@ // %%% maybe get rid of [re]set_last_Java_frame void MacroAssembler::set_last_Java_frame(Register last_java_sp, Register last_Java_pc) { assert_not_delayed(); - Address flags(G2_thread, - 0, - in_bytes(JavaThread::frame_anchor_offset()) + - in_bytes(JavaFrameAnchor::flags_offset())); - Address pc_addr(G2_thread, - 0, - in_bytes(JavaThread::last_Java_pc_offset())); + Address flags(G2_thread, JavaThread::frame_anchor_offset() + + JavaFrameAnchor::flags_offset()); + Address pc_addr(G2_thread, JavaThread::last_Java_pc_offset()); // Always set last_Java_pc and flags first because once last_Java_sp is visible // has_last_Java_frame is true and users will look at the rest of the fields. @@ -977,22 +986,18 @@ #endif // ASSERT assert( last_java_sp != G4_scratch, "bad register usage in set_last_Java_frame"); add( last_java_sp, STACK_BIAS, G4_scratch ); - st_ptr(G4_scratch, Address(G2_thread, 0, in_bytes(JavaThread::last_Java_sp_offset()))); + st_ptr(G4_scratch, G2_thread, JavaThread::last_Java_sp_offset()); #else - st_ptr(last_java_sp, Address(G2_thread, 0, in_bytes(JavaThread::last_Java_sp_offset()))); + st_ptr(last_java_sp, G2_thread, JavaThread::last_Java_sp_offset()); #endif // _LP64 } void MacroAssembler::reset_last_Java_frame(void) { assert_not_delayed(); - Address sp_addr(G2_thread, 0, in_bytes(JavaThread::last_Java_sp_offset())); - Address pc_addr(G2_thread, - 0, - in_bytes(JavaThread::frame_anchor_offset()) + in_bytes(JavaFrameAnchor::last_Java_pc_offset())); - Address flags(G2_thread, - 0, - in_bytes(JavaThread::frame_anchor_offset()) + in_bytes(JavaFrameAnchor::flags_offset())); + Address sp_addr(G2_thread, JavaThread::last_Java_sp_offset()); + Address pc_addr(G2_thread, JavaThread::frame_anchor_offset() + JavaFrameAnchor::last_Java_pc_offset()); + Address flags (G2_thread, JavaThread::frame_anchor_offset() + JavaFrameAnchor::flags_offset()); #ifdef ASSERT // check that it WAS previously set @@ -1063,7 +1068,7 @@ check_and_handle_popframe(scratch_reg); check_and_handle_earlyret(scratch_reg); - Address exception_addr(G2_thread, 0, in_bytes(Thread::pending_exception_offset())); + Address exception_addr(G2_thread, Thread::pending_exception_offset()); ld_ptr(exception_addr, scratch_reg); br_null(scratch_reg,false,pt,L); delayed()->nop(); @@ -1186,7 +1191,7 @@ void MacroAssembler::get_vm_result(Register oop_result) { verify_thread(); - Address vm_result_addr(G2_thread, 0, in_bytes(JavaThread::vm_result_offset())); + Address vm_result_addr(G2_thread, JavaThread::vm_result_offset()); ld_ptr( vm_result_addr, oop_result); st_ptr(G0, vm_result_addr); verify_oop(oop_result); @@ -1195,7 +1200,7 @@ void MacroAssembler::get_vm_result_2(Register oop_result) { verify_thread(); - Address vm_result_addr_2(G2_thread, 0, in_bytes(JavaThread::vm_result_2_offset())); + Address vm_result_addr_2(G2_thread, JavaThread::vm_result_2_offset()); ld_ptr(vm_result_addr_2, oop_result); st_ptr(G0, vm_result_addr_2); verify_oop(oop_result); @@ -1206,7 +1211,7 @@ // leave it undisturbed. void MacroAssembler::set_vm_result(Register oop_result) { verify_thread(); - Address vm_result_addr(G2_thread, 0, in_bytes(JavaThread::vm_result_offset())); + Address vm_result_addr(G2_thread, JavaThread::vm_result_offset()); verify_oop(oop_result); # ifdef ASSERT @@ -1234,81 +1239,78 @@ #else srl(obj, CardTableModRefBS::card_shift, obj); #endif - assert( tmp != obj, "need separate temp reg"); - Address rs(tmp, (address)byte_map_base); - load_address(rs); - stb(G0, rs.base(), obj); + assert(tmp != obj, "need separate temp reg"); + set((address) byte_map_base, tmp); + stb(G0, tmp, obj); } -// %%% Note: The following six instructions have been moved, -// unchanged, from assembler_sparc.inline.hpp. -// They will be refactored at a later date. - -void MacroAssembler::sethi(intptr_t imm22a, - Register d, - bool ForceRelocatable, - RelocationHolder const& rspec) { - Address adr( d, (address)imm22a, rspec ); - MacroAssembler::sethi( adr, ForceRelocatable ); -} - - -void MacroAssembler::sethi(Address& a, bool ForceRelocatable) { + +void MacroAssembler::internal_sethi(const AddressLiteral& addrlit, Register d, bool ForceRelocatable) { address save_pc; int shiftcnt; - // if addr of local, do not need to load it - assert(a.base() != FP && a.base() != SP, "just use ld or st for locals"); #ifdef _LP64 # ifdef CHECK_DELAY - assert_not_delayed( (char *)"cannot put two instructions in delay slot" ); + assert_not_delayed((char*) "cannot put two instructions in delay slot"); # endif v9_dep(); -// ForceRelocatable = 1; save_pc = pc(); - if (a.hi32() == 0 && a.low32() >= 0) { - Assembler::sethi(a.low32(), a.base(), a.rspec()); + + int msb32 = (int) (addrlit.value() >> 32); + int lsb32 = (int) (addrlit.value()); + + if (msb32 == 0 && lsb32 >= 0) { + Assembler::sethi(lsb32, d, addrlit.rspec()); } - else if (a.hi32() == -1) { - Assembler::sethi(~a.low32(), a.base(), a.rspec()); - xor3(a.base(), ~low10(~0), a.base()); + else if (msb32 == -1) { + Assembler::sethi(~lsb32, d, addrlit.rspec()); + xor3(d, ~low10(~0), d); } else { - Assembler::sethi(a.hi32(), a.base(), a.rspec() ); // 22 - if ( a.hi32() & 0x3ff ) // Any bits? - or3( a.base(), a.hi32() & 0x3ff ,a.base() ); // High 32 bits are now in low 32 - if ( a.low32() & 0xFFFFFC00 ) { // done? - if( (a.low32() >> 20) & 0xfff ) { // Any bits set? - sllx(a.base(), 12, a.base()); // Make room for next 12 bits - or3( a.base(), (a.low32() >> 20) & 0xfff,a.base() ); // Or in next 12 - shiftcnt = 0; // We already shifted + Assembler::sethi(msb32, d, addrlit.rspec()); // msb 22-bits + if (msb32 & 0x3ff) // Any bits? + or3(d, msb32 & 0x3ff, d); // msb 32-bits are now in lsb 32 + if (lsb32 & 0xFFFFFC00) { // done? + if ((lsb32 >> 20) & 0xfff) { // Any bits set? + sllx(d, 12, d); // Make room for next 12 bits + or3(d, (lsb32 >> 20) & 0xfff, d); // Or in next 12 + shiftcnt = 0; // We already shifted } else shiftcnt = 12; - if( (a.low32() >> 10) & 0x3ff ) { - sllx(a.base(), shiftcnt+10, a.base());// Make room for last 10 bits - or3( a.base(), (a.low32() >> 10) & 0x3ff,a.base() ); // Or in next 10 + if ((lsb32 >> 10) & 0x3ff) { + sllx(d, shiftcnt + 10, d); // Make room for last 10 bits + or3(d, (lsb32 >> 10) & 0x3ff, d); // Or in next 10 shiftcnt = 0; } else shiftcnt = 10; - sllx(a.base(), shiftcnt+10 , a.base()); // Shift leaving disp field 0'd + sllx(d, shiftcnt + 10, d); // Shift leaving disp field 0'd } else - sllx( a.base(), 32, a.base() ); + sllx(d, 32, d); } - // Pad out the instruction sequence so it can be - // patched later. - if ( ForceRelocatable || (a.rtype() != relocInfo::none && - a.rtype() != relocInfo::runtime_call_type) ) { - while ( pc() < (save_pc + (7 * BytesPerInstWord )) ) + // Pad out the instruction sequence so it can be patched later. + if (ForceRelocatable || (addrlit.rtype() != relocInfo::none && + addrlit.rtype() != relocInfo::runtime_call_type)) { + while (pc() < (save_pc + (7 * BytesPerInstWord))) nop(); } #else - Assembler::sethi(a.hi(), a.base(), a.rspec()); + Assembler::sethi(addrlit.value(), d, addrlit.rspec()); #endif - +} + + +void MacroAssembler::sethi(const AddressLiteral& addrlit, Register d) { + internal_sethi(addrlit, d, false); } + +void MacroAssembler::patchable_sethi(const AddressLiteral& addrlit, Register d) { + internal_sethi(addrlit, d, true); +} + + int MacroAssembler::size_of_sethi(address a, bool worst_case) { #ifdef _LP64 if (worst_case) return 7; @@ -1339,61 +1341,50 @@ return size_of_sethi(NULL, true) + 1; } -void MacroAssembler::set(intptr_t value, Register d, - RelocationHolder const& rspec) { - Address val( d, (address)value, rspec); - - if ( rspec.type() == relocInfo::none ) { + +void MacroAssembler::internal_set(const AddressLiteral& addrlit, Register d, bool ForceRelocatable) { + intptr_t value = addrlit.value(); + + if (!ForceRelocatable && addrlit.rspec().type() == relocInfo::none) { // can optimize - if (-4096 <= value && value <= 4095) { + if (-4096 <= value && value <= 4095) { or3(G0, value, d); // setsw (this leaves upper 32 bits sign-extended) return; } if (inv_hi22(hi22(value)) == value) { - sethi(val); + sethi(addrlit, d); return; } } - assert_not_delayed( (char *)"cannot put two instructions in delay slot" ); - sethi( val ); - if (rspec.type() != relocInfo::none || (value & 0x3ff) != 0) { - add( d, value & 0x3ff, d, rspec); + assert_not_delayed((char*) "cannot put two instructions in delay slot"); + internal_sethi(addrlit, d, ForceRelocatable); + if (ForceRelocatable || addrlit.rspec().type() != relocInfo::none || addrlit.low10() != 0) { + add(d, addrlit.low10(), d, addrlit.rspec()); } } -void MacroAssembler::setsw(int value, Register d, - RelocationHolder const& rspec) { - Address val( d, (address)value, rspec); - if ( rspec.type() == relocInfo::none ) { - // can optimize - if (-4096 <= value && value <= 4095) { - or3(G0, value, d); - return; - } - if (inv_hi22(hi22(value)) == value) { - sethi( val ); -#ifndef _LP64 - if ( value < 0 ) { - assert_not_delayed(); - sra (d, G0, d); - } -#endif - return; - } - } - assert_not_delayed(); - sethi( val ); - add( d, value & 0x3ff, d, rspec); - - // (A negative value could be loaded in 2 insns with sethi/xor, - // but it would take a more complex relocation.) -#ifndef _LP64 - if ( value < 0) - sra(d, G0, d); -#endif +void MacroAssembler::set(const AddressLiteral& al, Register d) { + internal_set(al, d, false); +} + +void MacroAssembler::set(intptr_t value, Register d) { + AddressLiteral al(value); + internal_set(al, d, false); } -// %%% End of moved six set instructions. +void MacroAssembler::set(address addr, Register d, RelocationHolder const& rspec) { + AddressLiteral al(addr, rspec); + internal_set(al, d, false); +} + +void MacroAssembler::patchable_set(const AddressLiteral& al, Register d) { + internal_set(al, d, true); +} + +void MacroAssembler::patchable_set(intptr_t value, Register d) { + AddressLiteral al(value); + internal_set(al, d, true); +} void MacroAssembler::set64(jlong value, Register d, Register tmp) { @@ -1512,17 +1503,17 @@ } -Address MacroAssembler::allocate_oop_address(jobject obj, Register d) { +AddressLiteral MacroAssembler::allocate_oop_address(jobject obj) { assert(oop_recorder() != NULL, "this assembler needs an OopRecorder"); int oop_index = oop_recorder()->allocate_index(obj); - return Address(d, address(obj), oop_Relocation::spec(oop_index)); + return AddressLiteral(obj, oop_Relocation::spec(oop_index)); } -Address MacroAssembler::constant_oop_address(jobject obj, Register d) { +AddressLiteral MacroAssembler::constant_oop_address(jobject obj) { assert(oop_recorder() != NULL, "this assembler needs an OopRecorder"); int oop_index = oop_recorder()->find_index(obj); - return Address(d, address(obj), oop_Relocation::spec(oop_index)); + return AddressLiteral(obj, oop_Relocation::spec(oop_index)); } void MacroAssembler::set_narrow_oop(jobject obj, Register d) { @@ -1682,7 +1673,7 @@ sprintf(real_msg, "%s%s(%s:%d)", msg, buffer, file, line); // Call indirectly to solve generation ordering problem - Address a(O7, (address)StubRoutines::verify_oop_subroutine_entry_address()); + AddressLiteral a(StubRoutines::verify_oop_subroutine_entry_address()); // Make some space on stack above the current register window. // Enough to hold 8 64-bit registers. @@ -1718,7 +1709,7 @@ sprintf(real_msg, "%s at SP+%d (%s:%d)", msg, addr.disp(), file, line); // Call indirectly to solve generation ordering problem - Address a(O7, (address)StubRoutines::verify_oop_subroutine_entry_address()); + AddressLiteral a(StubRoutines::verify_oop_subroutine_entry_address()); // Make some space on stack above the current register window. // Enough to hold 8 64-bit registers. @@ -1772,11 +1763,7 @@ { // count number of verifies Register O2_adr = O2; Register O3_accum = O3; - Address count_addr( O2_adr, (address) StubRoutines::verify_oop_count_addr() ); - sethi(count_addr); - ld(count_addr, O3_accum); - inc(O3_accum); - st(O3_accum, count_addr); + inc_counter(StubRoutines::verify_oop_count_addr(), O2_adr, O3_accum); } Register O2_mask = O2; @@ -1870,8 +1857,8 @@ assert(StubRoutines::Sparc::stop_subroutine_entry_address(), "hasn't been generated yet"); // call indirectly to solve generation ordering problem - Address a(O5, (address)StubRoutines::Sparc::stop_subroutine_entry_address()); - load_ptr_contents(a, O5); + AddressLiteral al(StubRoutines::Sparc::stop_subroutine_entry_address()); + load_ptr_contents(al, O5); jmpl(O5, 0, O7); delayed()->nop(); } @@ -1891,7 +1878,7 @@ assert(StubRoutines::Sparc::stop_subroutine_entry_address(), "hasn't been generated yet"); // call indirectly to solve generation ordering problem - Address a(O5, (address)StubRoutines::Sparc::stop_subroutine_entry_address()); + AddressLiteral a(StubRoutines::Sparc::stop_subroutine_entry_address()); load_ptr_contents(a, O5); jmpl(O5, 0, O7); delayed()->nop(); @@ -2003,7 +1990,7 @@ subcc( Rparam_words, Argument::n_register_parameters, Rresult); // how many mem words? Label no_extras; br( negative, true, pt, no_extras ); // if neg, clear reg - delayed()->set( 0, Rresult); // annuled, so only if taken + delayed()->set(0, Rresult); // annuled, so only if taken bind( no_extras ); } @@ -2623,7 +2610,7 @@ return RegisterOrConstant(value + offset); // load indirectly to solve generation ordering problem - Address a(tmp, (address) delayed_value_addr); + AddressLiteral a(delayed_value_addr); load_ptr_contents(a, tmp); #ifdef ASSERT @@ -3029,6 +3016,58 @@ +void MacroAssembler::check_method_handle_type(Register mtype_reg, Register mh_reg, + Register temp_reg, + Label& wrong_method_type) { + assert_different_registers(mtype_reg, mh_reg, temp_reg); + // compare method type against that of the receiver + RegisterOrConstant mhtype_offset = delayed_value(java_dyn_MethodHandle::type_offset_in_bytes, temp_reg); + ld_ptr(mh_reg, mhtype_offset, temp_reg); + cmp(temp_reg, mtype_reg); + br(Assembler::notEqual, false, Assembler::pn, wrong_method_type); + delayed()->nop(); +} + + +void MacroAssembler::jump_to_method_handle_entry(Register mh_reg, Register temp_reg) { + assert(mh_reg == G3_method_handle, "caller must put MH object in G3"); + assert_different_registers(mh_reg, temp_reg); + + // pick out the interpreted side of the handler + ld_ptr(mh_reg, delayed_value(java_dyn_MethodHandle::vmentry_offset_in_bytes, temp_reg), temp_reg); + + // off we go... + ld_ptr(temp_reg, MethodHandleEntry::from_interpreted_entry_offset_in_bytes(), temp_reg); + jmp(temp_reg, 0); + + // for the various stubs which take control at this point, + // see MethodHandles::generate_method_handle_stub + + // (Can any caller use this delay slot? If so, add an option for supression.) + delayed()->nop(); +} + +RegisterOrConstant MacroAssembler::argument_offset(RegisterOrConstant arg_slot, + int extra_slot_offset) { + // cf. TemplateTable::prepare_invoke(), if (load_receiver). + int stackElementSize = Interpreter::stackElementWords() * wordSize; + int offset = Interpreter::expr_offset_in_bytes(extra_slot_offset+0); + int offset1 = Interpreter::expr_offset_in_bytes(extra_slot_offset+1); + assert(offset1 - offset == stackElementSize, "correct arithmetic"); + if (arg_slot.is_constant()) { + offset += arg_slot.as_constant() * stackElementSize; + return offset; + } else { + Register temp = arg_slot.as_register(); + sll_ptr(temp, exact_log2(stackElementSize), temp); + if (offset != 0) + add(temp, offset, temp); + return temp; + } +} + + + void MacroAssembler::biased_locking_enter(Register obj_reg, Register mark_reg, Register temp_reg, Label& done, Label* slow_case, @@ -3055,21 +3094,21 @@ delayed()->nop(); load_klass(obj_reg, temp_reg); - ld_ptr(Address(temp_reg, 0, Klass::prototype_header_offset_in_bytes() + klassOopDesc::klass_part_offset_in_bytes()), temp_reg); + ld_ptr(Address(temp_reg, Klass::prototype_header_offset_in_bytes() + klassOopDesc::klass_part_offset_in_bytes()), temp_reg); or3(G2_thread, temp_reg, temp_reg); xor3(mark_reg, temp_reg, temp_reg); andcc(temp_reg, ~((int) markOopDesc::age_mask_in_place), temp_reg); if (counters != NULL) { cond_inc(Assembler::equal, (address) counters->biased_lock_entry_count_addr(), mark_reg, temp_reg); // Reload mark_reg as we may need it later - ld_ptr(Address(obj_reg, 0, oopDesc::mark_offset_in_bytes()), mark_reg); + ld_ptr(Address(obj_reg, oopDesc::mark_offset_in_bytes()), mark_reg); } brx(Assembler::equal, true, Assembler::pt, done); delayed()->nop(); Label try_revoke_bias; Label try_rebias; - Address mark_addr = Address(obj_reg, 0, oopDesc::mark_offset_in_bytes()); + Address mark_addr = Address(obj_reg, oopDesc::mark_offset_in_bytes()); assert(mark_addr.disp() == 0, "cas must take a zero displacement"); // At this point we know that the header has the bias pattern and @@ -3133,7 +3172,7 @@ // FIXME: due to a lack of registers we currently blow away the age // bits in this situation. Should attempt to preserve them. load_klass(obj_reg, temp_reg); - ld_ptr(Address(temp_reg, 0, Klass::prototype_header_offset_in_bytes() + klassOopDesc::klass_part_offset_in_bytes()), temp_reg); + ld_ptr(Address(temp_reg, Klass::prototype_header_offset_in_bytes() + klassOopDesc::klass_part_offset_in_bytes()), temp_reg); or3(G2_thread, temp_reg, temp_reg); casn(mark_addr.base(), mark_reg, temp_reg); // If the biasing toward our thread failed, this means that @@ -3164,7 +3203,7 @@ // FIXME: due to a lack of registers we currently blow away the age // bits in this situation. Should attempt to preserve them. load_klass(obj_reg, temp_reg); - ld_ptr(Address(temp_reg, 0, Klass::prototype_header_offset_in_bytes() + klassOopDesc::klass_part_offset_in_bytes()), temp_reg); + ld_ptr(Address(temp_reg, Klass::prototype_header_offset_in_bytes() + klassOopDesc::klass_part_offset_in_bytes()), temp_reg); casn(mark_addr.base(), mark_reg, temp_reg); // Fall through to the normal CAS-based lock, because no matter what // the result of the above CAS, some thread must have succeeded in @@ -3231,7 +3270,7 @@ Register Rbox, Register Rscratch, BiasedLockingCounters* counters, bool try_bias) { - Address mark_addr(Roop, 0, oopDesc::mark_offset_in_bytes()); + Address mark_addr(Roop, oopDesc::mark_offset_in_bytes()); verify_oop(Roop); Label done ; @@ -3334,7 +3373,7 @@ // If m->owner != null goto IsLocked // Pessimistic form: Test-and-CAS vs CAS // The optimistic form avoids RTS->RTO cache line upgrades. - ld_ptr (Address (Rmark, 0, ObjectMonitor::owner_offset_in_bytes()-2), Rscratch) ; + ld_ptr (Rmark, ObjectMonitor::owner_offset_in_bytes() - 2, Rscratch); andcc (Rscratch, Rscratch, G0) ; brx (Assembler::notZero, false, Assembler::pn, done) ; delayed()->nop() ; @@ -3430,7 +3469,7 @@ // Test-and-CAS vs CAS // Pessimistic form avoids futile (doomed) CAS attempts // The optimistic form avoids RTS->RTO cache line upgrades. - ld_ptr (Address (Rmark, 0, ObjectMonitor::owner_offset_in_bytes()-2), Rscratch) ; + ld_ptr (Rmark, ObjectMonitor::owner_offset_in_bytes() - 2, Rscratch); andcc (Rscratch, Rscratch, G0) ; brx (Assembler::notZero, false, Assembler::pn, done) ; delayed()->nop() ; @@ -3456,7 +3495,7 @@ void MacroAssembler::compiler_unlock_object(Register Roop, Register Rmark, Register Rbox, Register Rscratch, bool try_bias) { - Address mark_addr(Roop, 0, oopDesc::mark_offset_in_bytes()); + Address mark_addr(Roop, oopDesc::mark_offset_in_bytes()); Label done ; @@ -3516,14 +3555,14 @@ // Note that we use 1-0 locking by default for the inflated case. We // close the resultant (and rare) race by having contented threads in // monitorenter periodically poll _owner. - ld_ptr (Address(Rmark, 0, ObjectMonitor::owner_offset_in_bytes()-2), Rscratch) ; - ld_ptr (Address(Rmark, 0, ObjectMonitor::recursions_offset_in_bytes()-2), Rbox) ; + ld_ptr (Rmark, ObjectMonitor::owner_offset_in_bytes() - 2, Rscratch); + ld_ptr (Rmark, ObjectMonitor::recursions_offset_in_bytes() - 2, Rbox); xor3 (Rscratch, G2_thread, Rscratch) ; orcc (Rbox, Rscratch, Rbox) ; brx (Assembler::notZero, false, Assembler::pn, done) ; delayed()-> - ld_ptr (Address (Rmark, 0, ObjectMonitor::EntryList_offset_in_bytes()-2), Rscratch) ; - ld_ptr (Address (Rmark, 0, ObjectMonitor::cxq_offset_in_bytes()-2), Rbox) ; + ld_ptr (Rmark, ObjectMonitor::EntryList_offset_in_bytes() - 2, Rscratch); + ld_ptr (Rmark, ObjectMonitor::cxq_offset_in_bytes() - 2, Rbox); orcc (Rbox, Rscratch, G0) ; if (EmitSync & 65536) { Label LSucc ; @@ -3531,12 +3570,12 @@ delayed()->nop() ; br (Assembler::always, false, Assembler::pt, done) ; delayed()-> - st_ptr (G0, Address (Rmark, 0, ObjectMonitor::owner_offset_in_bytes()-2)) ; + st_ptr (G0, Rmark, ObjectMonitor::owner_offset_in_bytes() - 2); bind (LSucc) ; - st_ptr (G0, Address (Rmark, 0, ObjectMonitor::owner_offset_in_bytes()-2)) ; + st_ptr (G0, Rmark, ObjectMonitor::owner_offset_in_bytes() - 2); if (os::is_MP()) { membar (StoreLoad) ; } - ld_ptr (Address (Rmark, 0, ObjectMonitor::succ_offset_in_bytes()-2), Rscratch) ; + ld_ptr (Rmark, ObjectMonitor::succ_offset_in_bytes() - 2, Rscratch); andcc (Rscratch, Rscratch, G0) ; brx (Assembler::notZero, false, Assembler::pt, done) ; delayed()-> andcc (G0, G0, G0) ; @@ -3554,7 +3593,7 @@ delayed()->nop() ; br (Assembler::always, false, Assembler::pt, done) ; delayed()-> - st_ptr (G0, Address (Rmark, 0, ObjectMonitor::owner_offset_in_bytes()-2)) ; + st_ptr (G0, Rmark, ObjectMonitor::owner_offset_in_bytes() - 2); } bind (LStacked) ; @@ -3953,20 +3992,26 @@ bind(L); } -void MacroAssembler::inc_counter(address counter_ptr, Register Rtmp1, Register Rtmp2) { - Address counter_addr(Rtmp1, counter_ptr); - load_contents(counter_addr, Rtmp2); +void MacroAssembler::inc_counter(address counter_addr, Register Rtmp1, Register Rtmp2) { + AddressLiteral addrlit(counter_addr); + sethi(addrlit, Rtmp1); // Move hi22 bits into temporary register. + Address addr(Rtmp1, addrlit.low10()); // Build an address with low10 bits. + ld(addr, Rtmp2); inc(Rtmp2); - store_contents(Rtmp2, counter_addr); + st(Rtmp2, addr); +} + +void MacroAssembler::inc_counter(int* counter_addr, Register Rtmp1, Register Rtmp2) { + inc_counter((address) counter_addr, Rtmp1, Rtmp2); } SkipIfEqual::SkipIfEqual( MacroAssembler* masm, Register temp, const bool* flag_addr, Assembler::Condition condition) { _masm = masm; - Address flag(temp, (address)flag_addr, relocInfo::none); - _masm->sethi(flag); - _masm->ldub(flag, temp); + AddressLiteral flag(flag_addr); + _masm->sethi(flag, temp); + _masm->ldub(temp, flag.low10(), temp); _masm->tst(temp); _masm->br(condition, false, Assembler::pt, _label); _masm->delayed()->nop(); @@ -4281,8 +4326,8 @@ #else masm.srl(O0, CardTableModRefBS::card_shift, O0); #endif - Address rs(O1, (address)byte_map_base); - masm.load_address(rs); // O1 := + AddressLiteral addrlit(byte_map_base); + masm.set(addrlit, O1); // O1 := masm.ldub(O0, O1, O2); // O2 := [O0 + O1] masm.br_on_reg_cond(Assembler::rc_nz, /*annul*/false, Assembler::pt, @@ -4442,10 +4487,9 @@ #else post_filter_masm->srl(store_addr, CardTableModRefBS::card_shift, store_addr); #endif - assert( tmp != store_addr, "need separate temp reg"); - Address rs(tmp, (address)bs->byte_map_base); - load_address(rs); - stb(G0, rs.base(), store_addr); + assert(tmp != store_addr, "need separate temp reg"); + set(bs->byte_map_base, tmp); + stb(G0, tmp, store_addr); } bind(filtered); @@ -4464,24 +4508,6 @@ card_table_write(bs->byte_map_base, tmp, store_addr); } -// Loading values by size and signed-ness -void MacroAssembler::load_sized_value(Register s1, RegisterOrConstant s2, Register d, - int size_in_bytes, bool is_signed) { - switch (size_in_bytes ^ (is_signed ? -1 : 0)) { - case ~8: // fall through: - case 8: ld_long( s1, s2, d ); break; - case ~4: ldsw( s1, s2, d ); break; - case 4: lduw( s1, s2, d ); break; - case ~2: ldsh( s1, s2, d ); break; - case 2: lduh( s1, s2, d ); break; - case ~1: ldsb( s1, s2, d ); break; - case 1: ldub( s1, s2, d ); break; - default: ShouldNotReachHere(); - } -} - - - void MacroAssembler::load_klass(Register src_oop, Register klass) { // The number of bytes in this code is used by // MachCallDynamicJavaNode::ret_addr_offset() @@ -4511,12 +4537,12 @@ } } -void MacroAssembler::load_heap_oop(const Address& s, Register d, int offset) { +void MacroAssembler::load_heap_oop(const Address& s, Register d) { if (UseCompressedOops) { - lduw(s, d, offset); + lduw(s, d); decode_heap_oop(d); } else { - ld_ptr(s, d, offset); + ld_ptr(s, d); } } @@ -4662,7 +4688,7 @@ void MacroAssembler::reinit_heapbase() { if (UseCompressedOops) { // call indirectly to solve generation ordering problem - Address base(G6_heapbase, (address)Universe::narrow_oop_base_addr()); + AddressLiteral base(Universe::narrow_oop_base_addr()); load_ptr_contents(base, G6_heapbase); } } diff -r 5d4dd2f5f6a1 -r 7a485bc4da16 src/cpu/sparc/vm/assembler_sparc.hpp --- a/src/cpu/sparc/vm/assembler_sparc.hpp Fri Apr 17 15:50:12 2009 +0100 +++ b/src/cpu/sparc/vm/assembler_sparc.hpp Thu May 07 10:30:17 2009 -0700 @@ -84,6 +84,10 @@ REGISTER_DECLARATION(Register, Gtemp , G5); +// JSR 292 fixed register usages: +REGISTER_DECLARATION(Register, G5_method_type , G5); +REGISTER_DECLARATION(Register, G3_method_handle , G3); + // The compiler requires that G5_megamorphic_method is G5_inline_cache_klass, // because a single patchable "set" instruction (NativeMovConstReg, // or NativeMovConstPatching for compiler1) instruction @@ -91,9 +95,13 @@ // call site is an inline cache or is megamorphic. See the function // CompiledIC::set_to_megamorphic. // -// On the other hand, G5_inline_cache_klass must differ from G5_method, -// because both registers are needed for an inline cache that calls -// an interpreted method. +// If a inline cache targets an interpreted method, then the +// G5 register will be used twice during the call. First, +// the call site will be patched to load a compiledICHolder +// into G5. (This is an ordered pair of ic_klass, method.) +// The c2i adapter will first check the ic_klass, then load +// G5_method with the method part of the pair just before +// jumping into the interpreter. // // Note that G5_method is only the method-self for the interpreter, // and is logically unrelated to G5_megamorphic_method. @@ -266,21 +274,90 @@ class Address VALUE_OBJ_CLASS_SPEC { private: - Register _base; -#ifdef _LP64 - int _hi32; // bits 63::32 - int _low32; // bits 31::0 + Register _base; // Base register. + RegisterOrConstant _index_or_disp; // Index register or constant displacement. + RelocationHolder _rspec; + + public: + Address() : _base(noreg), _index_or_disp(noreg) {} + + Address(Register base, RegisterOrConstant index_or_disp) + : _base(base), + _index_or_disp(index_or_disp) { + } + + Address(Register base, Register index) + : _base(base), + _index_or_disp(index) { + } + + Address(Register base, int disp) + : _base(base), + _index_or_disp(disp) { + } + +#ifdef ASSERT + // ByteSize is only a class when ASSERT is defined, otherwise it's an int. + Address(Register base, ByteSize disp) + : _base(base), + _index_or_disp(in_bytes(disp)) { + } #endif - int _hi; - int _disp; - RelocationHolder _rspec; - - RelocationHolder rspec_from_rtype(relocInfo::relocType rt, address a = NULL) { - switch (rt) { + + // accessors + Register base() const { return _base; } + Register index() const { return _index_or_disp.as_register(); } + int disp() const { return _index_or_disp.as_constant(); } + + bool has_index() const { return _index_or_disp.is_register(); } + bool has_disp() const { return _index_or_disp.is_constant(); } + + const relocInfo::relocType rtype() { return _rspec.type(); } + const RelocationHolder& rspec() { return _rspec; } + + RelocationHolder rspec(int offset) const { + return offset == 0 ? _rspec : _rspec.plus(offset); + } + + inline bool is_simm13(int offset = 0); // check disp+offset for overflow + + Address plus_disp(int plusdisp) const { // bump disp by a small amount + assert(_index_or_disp.is_constant(), "must have a displacement"); + Address a(base(), disp() + plusdisp); + return a; + } + + Address after_save() const { + Address a = (*this); + a._base = a._base->after_save(); + return a; + } + + Address after_restore() const { + Address a = (*this); + a._base = a._base->after_restore(); + return a; + } + + // Convert the raw encoding form into the form expected by the + // constructor for Address. + static Address make_raw(int base, int index, int scale, int disp, bool disp_is_oop); + + friend class Assembler; +}; + + +class AddressLiteral VALUE_OBJ_CLASS_SPEC { + private: + address _address; + RelocationHolder _rspec; + + RelocationHolder rspec_from_rtype(relocInfo::relocType rtype, address addr) { + switch (rtype) { case relocInfo::external_word_type: - return external_word_Relocation::spec(a); + return external_word_Relocation::spec(addr); case relocInfo::internal_word_type: - return internal_word_Relocation::spec(a); + return internal_word_Relocation::spec(addr); #ifdef _LP64 case relocInfo::opt_virtual_call_type: return opt_virtual_call_Relocation::spec(); @@ -297,127 +374,86 @@ } } + protected: + // creation + AddressLiteral() : _address(NULL), _rspec(NULL) {} + public: - Address(Register b, address a, relocInfo::relocType rt = relocInfo::none) - : _rspec(rspec_from_rtype(rt, a)) - { - _base = b; -#ifdef _LP64 - _hi32 = (intptr_t)a >> 32; // top 32 bits in 64 bit word - _low32 = (intptr_t)a & ~0; // low 32 bits in 64 bit word -#endif - _hi = (intptr_t)a & ~0x3ff; // top 22 bits in low word - _disp = (intptr_t)a & 0x3ff; // bottom 10 bits - } - - Address(Register b, address a, RelocationHolder const& rspec) - : _rspec(rspec) - { - _base = b; + AddressLiteral(address addr, RelocationHolder const& rspec) + : _address(addr), + _rspec(rspec) {} + + // Some constructors to avoid casting at the call site. + AddressLiteral(jobject obj, RelocationHolder const& rspec) + : _address((address) obj), + _rspec(rspec) {} + + AddressLiteral(intptr_t value, RelocationHolder const& rspec) + : _address((address) value), + _rspec(rspec) {} + + AddressLiteral(address addr, relocInfo::relocType rtype = relocInfo::none) + : _address((address) addr), + _rspec(rspec_from_rtype(rtype, (address) addr)) {} + + // Some constructors to avoid casting at the call site. + AddressLiteral(address* addr, relocInfo::relocType rtype = relocInfo::none) + : _address((address) addr), + _rspec(rspec_from_rtype(rtype, (address) addr)) {} + + AddressLiteral(bool* addr, relocInfo::relocType rtype = relocInfo::none) + : _address((address) addr), + _rspec(rspec_from_rtype(rtype, (address) addr)) {} + + AddressLiteral(const bool* addr, relocInfo::relocType rtype = relocInfo::none) + : _address((address) addr), + _rspec(rspec_from_rtype(rtype, (address) addr)) {} + + AddressLiteral(signed char* addr, relocInfo::relocType rtype = relocInfo::none) + : _address((address) addr), + _rspec(rspec_from_rtype(rtype, (address) addr)) {} + + AddressLiteral(int* addr, relocInfo::relocType rtype = relocInfo::none) + : _address((address) addr), + _rspec(rspec_from_rtype(rtype, (address) addr)) {} + + AddressLiteral(intptr_t addr, relocInfo::relocType rtype = relocInfo::none) + : _address((address) addr), + _rspec(rspec_from_rtype(rtype, (address) addr)) {} + #ifdef _LP64 - _hi32 = (intptr_t)a >> 32; // top 32 bits in 64 bit word - _low32 = (intptr_t)a & ~0; // low 32 bits in 64 bit word -#endif - _hi = (intptr_t)a & ~0x3ff; // top 22 bits - _disp = (intptr_t)a & 0x3ff; // bottom 10 bits - } - - Address(Register b, intptr_t h, intptr_t d, RelocationHolder const& rspec = RelocationHolder()) - : _rspec(rspec) - { - _base = b; -#ifdef _LP64 -// [RGV] Put in Assert to force me to check usage of this constructor - assert( h == 0, "Check usage of this constructor" ); - _hi32 = h; - _low32 = d; - _hi = h; - _disp = d; -#else - _hi = h; - _disp = d; + // 32-bit complains about a multiple declaration for int*. + AddressLiteral(intptr_t* addr, relocInfo::relocType rtype = relocInfo::none) + : _address((address) addr), + _rspec(rspec_from_rtype(rtype, (address) addr)) {} #endif - } - - Address() - : _rspec(RelocationHolder()) - { - _base = G0; -#ifdef _LP64 - _hi32 = 0; - _low32 = 0; -#endif - _hi = 0; - _disp = 0; - } - - // fancier constructors - - enum addr_type { - extra_in_argument, // in the In registers - extra_out_argument // in the Outs - }; - - Address( addr_type, int ); - - // accessors - - Register base() const { return _base; } -#ifdef _LP64 - int hi32() const { return _hi32; } - int low32() const { return _low32; } -#endif - int hi() const { return _hi; } - int disp() const { return _disp; } -#ifdef _LP64 - intptr_t value() const { return ((intptr_t)_hi32 << 32) | - (intptr_t)(uint32_t)_low32; } -#else - int value() const { return _hi | _disp; } -#endif - const relocInfo::relocType rtype() { return _rspec.type(); } - const RelocationHolder& rspec() { return _rspec; } - - RelocationHolder rspec(int offset) const { + + AddressLiteral(oop addr, relocInfo::relocType rtype = relocInfo::none) + : _address((address) addr), + _rspec(rspec_from_rtype(rtype, (address) addr)) {} + + AddressLiteral(float* addr, relocInfo::relocType rtype = relocInfo::none) + : _address((address) addr), + _rspec(rspec_from_rtype(rtype, (address) addr)) {} + + AddressLiteral(double* addr, relocInfo::relocType rtype = relocInfo::none) + : _address((address) addr), + _rspec(rspec_from_rtype(rtype, (address) addr)) {} + + intptr_t value() const { return (intptr_t) _address; } + int low10() const; + + const relocInfo::relocType rtype() const { return _rspec.type(); } + const RelocationHolder& rspec() const { return _rspec; } + + RelocationHolder rspec(int offset) const { return offset == 0 ? _rspec : _rspec.plus(offset); } - - inline bool is_simm13(int offset = 0); // check disp+offset for overflow - - Address plus_disp(int disp) const { // bump disp by a small amount - Address a = (*this); - a._disp += disp; - return a; - } - - Address split_disp() const { // deal with disp overflow - Address a = (*this); - int hi_disp = _disp & ~0x3ff; - if (hi_disp != 0) { - a._disp -= hi_disp; - a._hi += hi_disp; - } - return a; - } - - Address after_save() const { - Address a = (*this); - a._base = a._base->after_save(); - return a; - } - - Address after_restore() const { - Address a = (*this); - a._base = a._base->after_restore(); - return a; - } - - friend class Assembler; }; inline Address RegisterImpl::address_in_saved_window() const { - return (Address(SP, 0, (sp_offset_in_saved_window() * wordSize) + STACK_BIAS)); + return (Address(SP, (sp_offset_in_saved_window() * wordSize) + STACK_BIAS)); } @@ -487,11 +523,7 @@ // When applied to a register-based argument, give the corresponding address // into the 6-word area "into which callee may store register arguments" // (This is a different place than the corresponding register-save area location.) - Address address_in_frame() const { - return Address( is_in() ? Address::extra_in_argument - : Address::extra_out_argument, - _number ); - } + Address address_in_frame() const; // debugging const char* name() const; @@ -513,6 +545,7 @@ friend class AbstractAssembler; + friend class AddressLiteral; // code patchers need various routines like inv_wdisp() friend class NativeInstruction; @@ -1085,11 +1118,11 @@ // pp 135 (addc was addx in v8) - inline void add( Register s1, Register s2, Register d ); - inline void add( Register s1, int simm13a, Register d, relocInfo::relocType rtype = relocInfo::none); - inline void add( Register s1, int simm13a, Register d, RelocationHolder const& rspec); - inline void add( Register s1, RegisterOrConstant s2, Register d, int offset = 0); - inline void add( const Address& a, Register d, int offset = 0); + inline void add(Register s1, Register s2, Register d ); + inline void add(Register s1, int simm13a, Register d, relocInfo::relocType rtype = relocInfo::none); + inline void add(Register s1, int simm13a, Register d, RelocationHolder const& rspec); + inline void add(Register s1, RegisterOrConstant s2, Register d, int offset = 0); + inline void add(const Address& a, Register d, int offset = 0) { add( a.base(), a.disp() + offset, d, a.rspec(offset)); } void addcc( Register s1, Register s2, Register d ) { emit_long( op(arith_op) | rd(d) | op3(add_op3 | cc_bit_op3) | rs1(s1) | rs2(s2) ); } void addcc( Register s1, int simm13a, Register d ) { emit_long( op(arith_op) | rd(d) | op3(add_op3 | cc_bit_op3) | rs1(s1) | immed(true) | simm(simm13a, 13) ); } @@ -1244,14 +1277,12 @@ void jmpl( Register s1, Register s2, Register d ); void jmpl( Register s1, int simm13a, Register d, RelocationHolder const& rspec = RelocationHolder() ); - inline void jmpl( Address& a, Register d, int offset = 0); - // 171 - inline void ldf( FloatRegisterImpl::Width w, Register s1, Register s2, FloatRegister d ); - inline void ldf( FloatRegisterImpl::Width w, Register s1, int simm13a, FloatRegister d ); - - inline void ldf( FloatRegisterImpl::Width w, const Address& a, FloatRegister d, int offset = 0); + inline void ldf(FloatRegisterImpl::Width w, Register s1, Register s2, FloatRegister d); + inline void ldf(FloatRegisterImpl::Width w, Register s1, int simm13a, FloatRegister d, RelocationHolder const& rspec = RelocationHolder()); + + inline void ldf(FloatRegisterImpl::Width w, const Address& a, FloatRegister d, int offset = 0); inline void ldfsr( Register s1, Register s2 ); @@ -1295,15 +1326,20 @@ inline void ldd( Register s1, Register s2, Register d ); inline void ldd( Register s1, int simm13a, Register d); - inline void ldsb( const Address& a, Register d, int offset = 0 ); - inline void ldsh( const Address& a, Register d, int offset = 0 ); - inline void ldsw( const Address& a, Register d, int offset = 0 ); - inline void ldub( const Address& a, Register d, int offset = 0 ); - inline void lduh( const Address& a, Register d, int offset = 0 ); - inline void lduw( const Address& a, Register d, int offset = 0 ); - inline void ldx( const Address& a, Register d, int offset = 0 ); - inline void ld( const Address& a, Register d, int offset = 0 ); - inline void ldd( const Address& a, Register d, int offset = 0 ); +#ifdef ASSERT + // ByteSize is only a class when ASSERT is defined, otherwise it's an int. + inline void ld( Register s1, ByteSize simm13a, Register d); +#endif + + inline void ldsb(const Address& a, Register d, int offset = 0); + inline void ldsh(const Address& a, Register d, int offset = 0); + inline void ldsw(const Address& a, Register d, int offset = 0); + inline void ldub(const Address& a, Register d, int offset = 0); + inline void lduh(const Address& a, Register d, int offset = 0); + inline void lduw(const Address& a, Register d, int offset = 0); + inline void ldx( const Address& a, Register d, int offset = 0); + inline void ld( const Address& a, Register d, int offset = 0); + inline void ldd( const Address& a, Register d, int offset = 0); inline void ldub( Register s1, RegisterOrConstant s2, Register d ); inline void ldsb( Register s1, RegisterOrConstant s2, Register d ); @@ -1528,6 +1564,11 @@ inline void std( Register d, Register s1, Register s2 ); inline void std( Register d, Register s1, int simm13a); +#ifdef ASSERT + // ByteSize is only a class when ASSERT is defined, otherwise it's an int. + inline void st( Register d, Register s1, ByteSize simm13a); +#endif + inline void stb( Register d, const Address& a, int offset = 0 ); inline void sth( Register d, const Address& a, int offset = 0 ); inline void stw( Register d, const Address& a, int offset = 0 ); @@ -1676,8 +1717,8 @@ #define JMP2(r1, r2) jmp(r1, r2, __FILE__, __LINE__) #define JMP(r1, off) jmp(r1, off, __FILE__, __LINE__) -#define JUMP(a, off) jump(a, off, __FILE__, __LINE__) -#define JUMPL(a, d, off) jumpl(a, d, off, __FILE__, __LINE__) +#define JUMP(a, temp, off) jump(a, temp, off, __FILE__, __LINE__) +#define JUMPL(a, temp, d, off) jumpl(a, temp, d, off, __FILE__, __LINE__) class MacroAssembler: public Assembler { @@ -1822,17 +1863,26 @@ #endif // sethi Macro handles optimizations and relocations - void sethi( Address& a, bool ForceRelocatable = false ); - void sethi( intptr_t imm22a, Register d, bool ForceRelocatable = false, RelocationHolder const& rspec = RelocationHolder()); +private: + void internal_sethi(const AddressLiteral& addrlit, Register d, bool ForceRelocatable); +public: + void sethi(const AddressLiteral& addrlit, Register d); + void patchable_sethi(const AddressLiteral& addrlit, Register d); // compute the size of a sethi/set static int size_of_sethi( address a, bool worst_case = false ); static int worst_case_size_of_set(); // set may be either setsw or setuw (high 32 bits may be zero or sign) - void set( intptr_t value, Register d, RelocationHolder const& rspec = RelocationHolder() ); - void setsw( int value, Register d, RelocationHolder const& rspec = RelocationHolder() ); - void set64( jlong value, Register d, Register tmp); +private: + void internal_set(const AddressLiteral& al, Register d, bool ForceRelocatable); +public: + void set(const AddressLiteral& addrlit, Register d); + void set(intptr_t value, Register d); + void set(address addr, Register d, RelocationHolder const& rspec); + void patchable_set(const AddressLiteral& addrlit, Register d); + void patchable_set(intptr_t value, Register d); + void set64(jlong value, Register d, Register tmp); // sign-extend 32 to 64 inline void signx( Register s, Register d ) { sra( s, G0, d); } @@ -1922,23 +1972,22 @@ inline void mov( int simm13a, Register d) { or3( G0, simm13a, d); } // address pseudos: make these names unlike instruction names to avoid confusion - inline void split_disp( Address& a, Register temp ); inline intptr_t load_pc_address( Register reg, int bytes_to_skip ); - inline void load_address( Address& a, int offset = 0 ); - inline void load_contents( Address& a, Register d, int offset = 0 ); - inline void load_ptr_contents( Address& a, Register d, int offset = 0 ); - inline void store_contents( Register s, Address& a, int offset = 0 ); - inline void store_ptr_contents( Register s, Address& a, int offset = 0 ); - inline void jumpl_to( Address& a, Register d, int offset = 0 ); - inline void jump_to( Address& a, int offset = 0 ); + inline void load_contents(AddressLiteral& addrlit, Register d, int offset = 0); + inline void load_ptr_contents(AddressLiteral& addrlit, Register d, int offset = 0); + inline void store_contents(Register s, AddressLiteral& addrlit, Register temp, int offset = 0); + inline void store_ptr_contents(Register s, AddressLiteral& addrlit, Register temp, int offset = 0); + inline void jumpl_to(AddressLiteral& addrlit, Register temp, Register d, int offset = 0); + inline void jump_to(AddressLiteral& addrlit, Register temp, int offset = 0); + inline void jump_indirect_to(Address& a, Register temp, int ld_offset = 0, int jmp_offset = 0); // ring buffer traceable jumps void jmp2( Register r1, Register r2, const char* file, int line ); void jmp ( Register r1, int offset, const char* file, int line ); - void jumpl( Address& a, Register d, int offset, const char* file, int line ); - void jump ( Address& a, int offset, const char* file, int line ); + void jumpl(AddressLiteral& addrlit, Register temp, Register d, int offset, const char* file, int line); + void jump (AddressLiteral& addrlit, Register temp, int offset, const char* file, int line); // argument pseudos: @@ -1963,29 +2012,31 @@ // Functions for isolating 64 bit loads for LP64 // ld_ptr will perform ld for 32 bit VM's and ldx for 64 bit VM's // st_ptr will perform st for 32 bit VM's and stx for 64 bit VM's - inline void ld_ptr( Register s1, Register s2, Register d ); - inline void ld_ptr( Register s1, int simm13a, Register d); - inline void ld_ptr( Register s1, RegisterOrConstant s2, Register d ); - inline void ld_ptr( const Address& a, Register d, int offset = 0 ); - inline void st_ptr( Register d, Register s1, Register s2 ); - inline void st_ptr( Register d, Register s1, int simm13a); - inline void st_ptr( Register d, Register s1, RegisterOrConstant s2 ); - inline void st_ptr( Register d, const Address& a, int offset = 0 ); + inline void ld_ptr(Register s1, Register s2, Register d); + inline void ld_ptr(Register s1, int simm13a, Register d); + inline void ld_ptr(Register s1, RegisterOrConstant s2, Register d); + inline void ld_ptr(const Address& a, Register d, int offset = 0); + inline void st_ptr(Register d, Register s1, Register s2); + inline void st_ptr(Register d, Register s1, int simm13a); + inline void st_ptr(Register d, Register s1, RegisterOrConstant s2); + inline void st_ptr(Register d, const Address& a, int offset = 0); + +#ifdef ASSERT + // ByteSize is only a class when ASSERT is defined, otherwise it's an int. + inline void ld_ptr(Register s1, ByteSize simm13a, Register d); + inline void st_ptr(Register d, Register s1, ByteSize simm13a); +#endif // ld_long will perform ld for 32 bit VM's and ldx for 64 bit VM's // st_long will perform st for 32 bit VM's and stx for 64 bit VM's - inline void ld_long( Register s1, Register s2, Register d ); - inline void ld_long( Register s1, int simm13a, Register d ); - inline void ld_long( Register s1, RegisterOrConstant s2, Register d ); - inline void ld_long( const Address& a, Register d, int offset = 0 ); - inline void st_long( Register d, Register s1, Register s2 ); - inline void st_long( Register d, Register s1, int simm13a ); - inline void st_long( Register d, Register s1, RegisterOrConstant s2 ); - inline void st_long( Register d, const Address& a, int offset = 0 ); - - // Loading values by size and signed-ness - void load_sized_value(Register s1, RegisterOrConstant s2, Register d, - int size_in_bytes, bool is_signed); + inline void ld_long(Register s1, Register s2, Register d); + inline void ld_long(Register s1, int simm13a, Register d); + inline void ld_long(Register s1, RegisterOrConstant s2, Register d); + inline void ld_long(const Address& a, Register d, int offset = 0); + inline void st_long(Register d, Register s1, Register s2); + inline void st_long(Register d, Register s1, int simm13a); + inline void st_long(Register d, Register s1, RegisterOrConstant s2); + inline void st_long(Register d, const Address& a, int offset = 0); // Helpers for address formation. // They update the dest in place, whether it is a register or constant. @@ -2040,8 +2091,8 @@ // These are idioms to flag the need for care with accessing bools but on // this platform we assume byte size - inline void stbool( Register d, const Address& a, int offset = 0 ) { stb(d, a, offset); } - inline void ldbool( const Address& a, Register d, int offset = 0 ) { ldsb( a, d, offset ); } + inline void stbool(Register d, const Address& a) { stb(d, a); } + inline void ldbool(const Address& a, Register d) { ldsb(a, d); } inline void tstbool( Register s ) { tst(s); } inline void movbool( bool boolconst, Register d) { mov( (int) boolconst, d); } @@ -2051,7 +2102,7 @@ void store_klass_gap(Register s, Register dst_oop); // oop manipulations - void load_heap_oop(const Address& s, Register d, int offset = 0); + void load_heap_oop(const Address& s, Register d); void load_heap_oop(Register s1, Register s2, Register d); void load_heap_oop(Register s1, int simm13a, Register d); void store_heap_oop(Register d, Register s1, Register s2); @@ -2181,11 +2232,11 @@ void print_CPU_state(); // oops in code - Address allocate_oop_address( jobject obj, Register d ); // allocate_index - Address constant_oop_address( jobject obj, Register d ); // find_index - inline void set_oop ( jobject obj, Register d ); // uses allocate_oop_address - inline void set_oop_constant( jobject obj, Register d ); // uses constant_oop_address - inline void set_oop ( Address obj_addr ); // same as load_address + AddressLiteral allocate_oop_address(jobject obj); // allocate_index + AddressLiteral constant_oop_address(jobject obj); // find_index + inline void set_oop (jobject obj, Register d); // uses allocate_oop_address + inline void set_oop_constant (jobject obj, Register d); // uses constant_oop_address + inline void set_oop (AddressLiteral& obj_addr, Register d); // same as load_address void set_narrow_oop( jobject obj, Register d ); @@ -2366,6 +2417,16 @@ Register temp2_reg, Label& L_success); + // method handles (JSR 292) + void check_method_handle_type(Register mtype_reg, Register mh_reg, + Register temp_reg, + Label& wrong_method_type); + void jump_to_method_handle_entry(Register mh_reg, Register temp_reg); + // offset relative to Gargs of argument at tos[arg_slot]. + // (arg_slot == 0 means the last argument, not the first). + RegisterOrConstant argument_offset(RegisterOrConstant arg_slot, + int extra_slot_offset = 0); + // Stack overflow checking @@ -2391,7 +2452,8 @@ // Conditionally (non-atomically) increments passed counter address, preserving condition codes. void cond_inc(Condition cond, address counter_addr, Register Rtemp1, Register Rtemp2); // Unconditional increment. - void inc_counter(address counter_addr, Register Rtemp1, Register Rtemp2); + void inc_counter(address counter_addr, Register Rtmp1, Register Rtmp2); + void inc_counter(int* counter_addr, Register Rtmp1, Register Rtmp2); #undef VIRTUAL diff -r 5d4dd2f5f6a1 -r 7a485bc4da16 src/cpu/sparc/vm/assembler_sparc.inline.hpp --- a/src/cpu/sparc/vm/assembler_sparc.inline.hpp Fri Apr 17 15:50:12 2009 +0100 +++ b/src/cpu/sparc/vm/assembler_sparc.inline.hpp Thu May 07 10:30:17 2009 -0700 @@ -38,6 +38,11 @@ inline bool Address::is_simm13(int offset) { return Assembler::is_simm13(disp() + offset); } +inline int AddressLiteral::low10() const { + return Assembler::low10(value()); +} + + // inlines for SPARC assembler -- dmu 5/97 inline void Assembler::check_delay() { @@ -63,10 +68,9 @@ } -inline void Assembler::add( Register s1, Register s2, Register d ) { emit_long( op(arith_op) | rd(d) | op3(add_op3) | rs1(s1) | rs2(s2) ); } -inline void Assembler::add( Register s1, int simm13a, Register d, relocInfo::relocType rtype ) { emit_data( op(arith_op) | rd(d) | op3(add_op3) | rs1(s1) | immed(true) | simm(simm13a, 13), rtype ); } -inline void Assembler::add( Register s1, int simm13a, Register d, RelocationHolder const& rspec ) { emit_data( op(arith_op) | rd(d) | op3(add_op3) | rs1(s1) | immed(true) | simm(simm13a, 13), rspec ); } -inline void Assembler::add( const Address& a, Register d, int offset) { add( a.base(), a.disp() + offset, d, a.rspec(offset)); } +inline void Assembler::add(Register s1, Register s2, Register d ) { emit_long( op(arith_op) | rd(d) | op3(add_op3) | rs1(s1) | rs2(s2) ); } +inline void Assembler::add(Register s1, int simm13a, Register d, relocInfo::relocType rtype ) { emit_data( op(arith_op) | rd(d) | op3(add_op3) | rs1(s1) | immed(true) | simm(simm13a, 13), rtype ); } +inline void Assembler::add(Register s1, int simm13a, Register d, RelocationHolder const& rspec ) { emit_data( op(arith_op) | rd(d) | op3(add_op3) | rs1(s1) | immed(true) | simm(simm13a, 13), rspec ); } inline void Assembler::bpr( RCondition c, bool a, Predict p, Register s1, address d, relocInfo::relocType rt ) { v9_only(); emit_data( op(branch_op) | annul(a) | cond(c) | op2(bpr_op2) | wdisp16(intptr_t(d), intptr_t(pc())) | predict(p) | rs1(s1), rt); has_delay_slot(); } inline void Assembler::bpr( RCondition c, bool a, Predict p, Register s1, Label& L) { bpr( c, a, p, s1, target(L)); } @@ -95,13 +99,10 @@ inline void Assembler::jmpl( Register s1, Register s2, Register d ) { emit_long( op(arith_op) | rd(d) | op3(jmpl_op3) | rs1(s1) | rs2(s2)); has_delay_slot(); } inline void Assembler::jmpl( Register s1, int simm13a, Register d, RelocationHolder const& rspec ) { emit_data( op(arith_op) | rd(d) | op3(jmpl_op3) | rs1(s1) | immed(true) | simm(simm13a, 13), rspec); has_delay_slot(); } -inline void Assembler::jmpl( Address& a, Register d, int offset) { jmpl( a.base(), a.disp() + offset, d, a.rspec(offset)); } - +inline void Assembler::ldf(FloatRegisterImpl::Width w, Register s1, Register s2, FloatRegister d) { emit_long( op(ldst_op) | fd(d, w) | alt_op3(ldf_op3, w) | rs1(s1) | rs2(s2) ); } +inline void Assembler::ldf(FloatRegisterImpl::Width w, Register s1, int simm13a, FloatRegister d, RelocationHolder const& rspec) { emit_data( op(ldst_op) | fd(d, w) | alt_op3(ldf_op3, w) | rs1(s1) | immed(true) | simm(simm13a, 13), rspec); } -inline void Assembler::ldf( FloatRegisterImpl::Width w, Register s1, Register s2, FloatRegister d) { emit_long( op(ldst_op) | fd(d, w) | alt_op3(ldf_op3, w) | rs1(s1) | rs2(s2) ); } -inline void Assembler::ldf( FloatRegisterImpl::Width w, Register s1, int simm13a, FloatRegister d) { emit_data( op(ldst_op) | fd(d, w) | alt_op3(ldf_op3, w) | rs1(s1) | immed(true) | simm(simm13a, 13)); } - -inline void Assembler::ldf( FloatRegisterImpl::Width w, const Address& a, FloatRegister d, int offset) { relocate(a.rspec(offset)); ldf( w, a.base(), a.disp() + offset, d); } +inline void Assembler::ldf(FloatRegisterImpl::Width w, const Address& a, FloatRegister d, int offset) { relocate(a.rspec(offset)); ldf( w, a.base(), a.disp() + offset, d); } inline void Assembler::ldfsr( Register s1, Register s2) { v9_dep(); emit_long( op(ldst_op) | op3(ldfsr_op3) | rs1(s1) | rs2(s2) ); } inline void Assembler::ldfsr( Register s1, int simm13a) { v9_dep(); emit_data( op(ldst_op) | op3(ldfsr_op3) | rs1(s1) | immed(true) | simm(simm13a, 13)); } @@ -136,49 +137,68 @@ #ifdef _LP64 // Make all 32 bit loads signed so 64 bit registers maintain proper sign -inline void Assembler::ld( Register s1, Register s2, Register d) { ldsw( s1, s2, d); } -inline void Assembler::ld( Register s1, int simm13a, Register d) { ldsw( s1, simm13a, d); } +inline void Assembler::ld( Register s1, Register s2, Register d) { ldsw( s1, s2, d); } +inline void Assembler::ld( Register s1, int simm13a, Register d) { ldsw( s1, simm13a, d); } #else -inline void Assembler::ld( Register s1, Register s2, Register d) { lduw( s1, s2, d); } -inline void Assembler::ld( Register s1, int simm13a, Register d) { lduw( s1, simm13a, d); } +inline void Assembler::ld( Register s1, Register s2, Register d) { lduw( s1, s2, d); } +inline void Assembler::ld( Register s1, int simm13a, Register d) { lduw( s1, simm13a, d); } +#endif + +#ifdef ASSERT + // ByteSize is only a class when ASSERT is defined, otherwise it's an int. +# ifdef _LP64 +inline void Assembler::ld( Register s1, ByteSize simm13a, Register d) { ldsw( s1, in_bytes(simm13a), d); } +# else +inline void Assembler::ld( Register s1, ByteSize simm13a, Register d) { lduw( s1, in_bytes(simm13a), d); } +# endif #endif -inline void Assembler::ldub( Register s1, RegisterOrConstant s2, Register d) { - if (s2.is_register()) ldsb(s1, s2.as_register(), d); - else ldsb(s1, s2.as_constant(), d); +inline void Assembler::ld( const Address& a, Register d, int offset) { + if (a.has_index()) { assert(offset == 0, ""); ld( a.base(), a.index(), d); } + else { ld( a.base(), a.disp() + offset, d); } } -inline void Assembler::ldsb( Register s1, RegisterOrConstant s2, Register d) { - if (s2.is_register()) ldsb(s1, s2.as_register(), d); - else ldsb(s1, s2.as_constant(), d); +inline void Assembler::ldsb(const Address& a, Register d, int offset) { + if (a.has_index()) { assert(offset == 0, ""); ldsb(a.base(), a.index(), d); } + else { ldsb(a.base(), a.disp() + offset, d); } } -inline void Assembler::lduh( Register s1, RegisterOrConstant s2, Register d) { - if (s2.is_register()) ldsh(s1, s2.as_register(), d); - else ldsh(s1, s2.as_constant(), d); +inline void Assembler::ldsh(const Address& a, Register d, int offset) { + if (a.has_index()) { assert(offset == 0, ""); ldsh(a.base(), a.index(), d); } + else { ldsh(a.base(), a.disp() + offset, d); } } -inline void Assembler::ldsh( Register s1, RegisterOrConstant s2, Register d) { - if (s2.is_register()) ldsh(s1, s2.as_register(), d); - else ldsh(s1, s2.as_constant(), d); +inline void Assembler::ldsw(const Address& a, Register d, int offset) { + if (a.has_index()) { assert(offset == 0, ""); ldsw(a.base(), a.index(), d); } + else { ldsw(a.base(), a.disp() + offset, d); } +} +inline void Assembler::ldub(const Address& a, Register d, int offset) { + if (a.has_index()) { assert(offset == 0, ""); ldub(a.base(), a.index(), d); } + else { ldub(a.base(), a.disp() + offset, d); } } -inline void Assembler::lduw( Register s1, RegisterOrConstant s2, Register d) { - if (s2.is_register()) ldsw(s1, s2.as_register(), d); - else ldsw(s1, s2.as_constant(), d); +inline void Assembler::lduh(const Address& a, Register d, int offset) { + if (a.has_index()) { assert(offset == 0, ""); lduh(a.base(), a.index(), d); } + else { lduh(a.base(), a.disp() + offset, d); } } -inline void Assembler::ldsw( Register s1, RegisterOrConstant s2, Register d) { - if (s2.is_register()) ldsw(s1, s2.as_register(), d); - else ldsw(s1, s2.as_constant(), d); +inline void Assembler::lduw(const Address& a, Register d, int offset) { + if (a.has_index()) { assert(offset == 0, ""); lduw(a.base(), a.index(), d); } + else { lduw(a.base(), a.disp() + offset, d); } +} +inline void Assembler::ldd( const Address& a, Register d, int offset) { + if (a.has_index()) { assert(offset == 0, ""); ldd( a.base(), a.index(), d); } + else { ldd( a.base(), a.disp() + offset, d); } } -inline void Assembler::ldx( Register s1, RegisterOrConstant s2, Register d) { - if (s2.is_register()) ldx(s1, s2.as_register(), d); - else ldx(s1, s2.as_constant(), d); +inline void Assembler::ldx( const Address& a, Register d, int offset) { + if (a.has_index()) { assert(offset == 0, ""); ldx( a.base(), a.index(), d); } + else { ldx( a.base(), a.disp() + offset, d); } } -inline void Assembler::ld( Register s1, RegisterOrConstant s2, Register d) { - if (s2.is_register()) ld(s1, s2.as_register(), d); - else ld(s1, s2.as_constant(), d); -} -inline void Assembler::ldd( Register s1, RegisterOrConstant s2, Register d) { - if (s2.is_register()) ldd(s1, s2.as_register(), d); - else ldd(s1, s2.as_constant(), d); -} + +inline void Assembler::ldub(Register s1, RegisterOrConstant s2, Register d) { ldub(Address(s1, s2), d); } +inline void Assembler::ldsb(Register s1, RegisterOrConstant s2, Register d) { ldsb(Address(s1, s2), d); } +inline void Assembler::lduh(Register s1, RegisterOrConstant s2, Register d) { lduh(Address(s1, s2), d); } +inline void Assembler::ldsh(Register s1, RegisterOrConstant s2, Register d) { ldsh(Address(s1, s2), d); } +inline void Assembler::lduw(Register s1, RegisterOrConstant s2, Register d) { lduw(Address(s1, s2), d); } +inline void Assembler::ldsw(Register s1, RegisterOrConstant s2, Register d) { ldsw(Address(s1, s2), d); } +inline void Assembler::ldx( Register s1, RegisterOrConstant s2, Register d) { ldx( Address(s1, s2), d); } +inline void Assembler::ld( Register s1, RegisterOrConstant s2, Register d) { ld( Address(s1, s2), d); } +inline void Assembler::ldd( Register s1, RegisterOrConstant s2, Register d) { ldd( Address(s1, s2), d); } // form effective addresses this way: inline void Assembler::add( Register s1, RegisterOrConstant s2, Register d, int offset) { @@ -187,17 +207,6 @@ if (offset != 0) add(d, offset, d); } -inline void Assembler::ld( const Address& a, Register d, int offset ) { relocate(a.rspec(offset)); ld( a.base(), a.disp() + offset, d ); } -inline void Assembler::ldsb( const Address& a, Register d, int offset ) { relocate(a.rspec(offset)); ldsb( a.base(), a.disp() + offset, d ); } -inline void Assembler::ldsh( const Address& a, Register d, int offset ) { relocate(a.rspec(offset)); ldsh( a.base(), a.disp() + offset, d ); } -inline void Assembler::ldsw( const Address& a, Register d, int offset ) { relocate(a.rspec(offset)); ldsw( a.base(), a.disp() + offset, d ); } -inline void Assembler::ldub( const Address& a, Register d, int offset ) { relocate(a.rspec(offset)); ldub( a.base(), a.disp() + offset, d ); } -inline void Assembler::lduh( const Address& a, Register d, int offset ) { relocate(a.rspec(offset)); lduh( a.base(), a.disp() + offset, d ); } -inline void Assembler::lduw( const Address& a, Register d, int offset ) { relocate(a.rspec(offset)); lduw( a.base(), a.disp() + offset, d ); } -inline void Assembler::ldd( const Address& a, Register d, int offset ) { relocate(a.rspec(offset)); ldd( a.base(), a.disp() + offset, d ); } -inline void Assembler::ldx( const Address& a, Register d, int offset ) { relocate(a.rspec(offset)); ldx( a.base(), a.disp() + offset, d ); } - - inline void Assembler::ldstub( Register s1, Register s2, Register d) { emit_long( op(ldst_op) | rd(d) | op3(ldstub_op3) | rs1(s1) | rs2(s2) ); } inline void Assembler::ldstub( Register s1, int simm13a, Register d) { emit_data( op(ldst_op) | rd(d) | op3(ldstub_op3) | rs1(s1) | immed(true) | simm(simm13a, 13)); } @@ -240,36 +249,44 @@ inline void Assembler::std( Register d, Register s1, Register s2) { v9_dep(); assert(d->is_even(), "not even"); emit_long( op(ldst_op) | rd(d) | op3(std_op3) | rs1(s1) | rs2(s2) ); } inline void Assembler::std( Register d, Register s1, int simm13a) { v9_dep(); assert(d->is_even(), "not even"); emit_data( op(ldst_op) | rd(d) | op3(std_op3) | rs1(s1) | immed(true) | simm(simm13a, 13)); } -inline void Assembler::st( Register d, Register s1, Register s2) { stw(d, s1, s2); } -inline void Assembler::st( Register d, Register s1, int simm13a) { stw(d, s1, simm13a); } +inline void Assembler::st( Register d, Register s1, Register s2) { stw(d, s1, s2); } +inline void Assembler::st( Register d, Register s1, int simm13a) { stw(d, s1, simm13a); } + +#ifdef ASSERT +// ByteSize is only a class when ASSERT is defined, otherwise it's an int. +inline void Assembler::st( Register d, Register s1, ByteSize simm13a) { stw(d, s1, in_bytes(simm13a)); } +#endif -inline void Assembler::stb( Register d, Register s1, RegisterOrConstant s2) { - if (s2.is_register()) stb(d, s1, s2.as_register()); - else stb(d, s1, s2.as_constant()); +inline void Assembler::stb(Register d, const Address& a, int offset) { + if (a.has_index()) { assert(offset == 0, ""); stb(d, a.base(), a.index() ); } + else { stb(d, a.base(), a.disp() + offset); } } -inline void Assembler::sth( Register d, Register s1, RegisterOrConstant s2) { - if (s2.is_register()) sth(d, s1, s2.as_register()); - else sth(d, s1, s2.as_constant()); +inline void Assembler::sth(Register d, const Address& a, int offset) { + if (a.has_index()) { assert(offset == 0, ""); sth(d, a.base(), a.index() ); } + else { sth(d, a.base(), a.disp() + offset); } } -inline void Assembler::stx( Register d, Register s1, RegisterOrConstant s2) { - if (s2.is_register()) stx(d, s1, s2.as_register()); - else stx(d, s1, s2.as_constant()); +inline void Assembler::stw(Register d, const Address& a, int offset) { + if (a.has_index()) { assert(offset == 0, ""); stw(d, a.base(), a.index() ); } + else { stw(d, a.base(), a.disp() + offset); } +} +inline void Assembler::st( Register d, const Address& a, int offset) { + if (a.has_index()) { assert(offset == 0, ""); st( d, a.base(), a.index() ); } + else { st( d, a.base(), a.disp() + offset); } } -inline void Assembler::std( Register d, Register s1, RegisterOrConstant s2) { - if (s2.is_register()) std(d, s1, s2.as_register()); - else std(d, s1, s2.as_constant()); +inline void Assembler::std(Register d, const Address& a, int offset) { + if (a.has_index()) { assert(offset == 0, ""); std(d, a.base(), a.index() ); } + else { std(d, a.base(), a.disp() + offset); } } -inline void Assembler::st( Register d, Register s1, RegisterOrConstant s2) { - if (s2.is_register()) st(d, s1, s2.as_register()); - else st(d, s1, s2.as_constant()); +inline void Assembler::stx(Register d, const Address& a, int offset) { + if (a.has_index()) { assert(offset == 0, ""); stx(d, a.base(), a.index() ); } + else { stx(d, a.base(), a.disp() + offset); } } -inline void Assembler::stb( Register d, const Address& a, int offset) { relocate(a.rspec(offset)); stb( d, a.base(), a.disp() + offset); } -inline void Assembler::sth( Register d, const Address& a, int offset) { relocate(a.rspec(offset)); sth( d, a.base(), a.disp() + offset); } -inline void Assembler::stw( Register d, const Address& a, int offset) { relocate(a.rspec(offset)); stw( d, a.base(), a.disp() + offset); } -inline void Assembler::st( Register d, const Address& a, int offset) { relocate(a.rspec(offset)); st( d, a.base(), a.disp() + offset); } -inline void Assembler::std( Register d, const Address& a, int offset) { relocate(a.rspec(offset)); std( d, a.base(), a.disp() + offset); } -inline void Assembler::stx( Register d, const Address& a, int offset) { relocate(a.rspec(offset)); stx( d, a.base(), a.disp() + offset); } +inline void Assembler::stb(Register d, Register s1, RegisterOrConstant s2) { stb(d, Address(s1, s2)); } +inline void Assembler::sth(Register d, Register s1, RegisterOrConstant s2) { sth(d, Address(s1, s2)); } +inline void Assembler::stx(Register d, Register s1, RegisterOrConstant s2) { stx(d, Address(s1, s2)); } +inline void Assembler::std(Register d, Register s1, RegisterOrConstant s2) { std(d, Address(s1, s2)); } +inline void Assembler::st( Register d, Register s1, RegisterOrConstant s2) { st( d, Address(s1, s2)); } // v8 p 99 @@ -294,39 +311,46 @@ // Use the right loads/stores for the platform inline void MacroAssembler::ld_ptr( Register s1, Register s2, Register d ) { #ifdef _LP64 - Assembler::ldx( s1, s2, d); + Assembler::ldx(s1, s2, d); #else - Assembler::ld( s1, s2, d); + Assembler::ld( s1, s2, d); #endif } inline void MacroAssembler::ld_ptr( Register s1, int simm13a, Register d ) { #ifdef _LP64 - Assembler::ldx( s1, simm13a, d); + Assembler::ldx(s1, simm13a, d); #else - Assembler::ld( s1, simm13a, d); + Assembler::ld( s1, simm13a, d); #endif } +#ifdef ASSERT +// ByteSize is only a class when ASSERT is defined, otherwise it's an int. +inline void MacroAssembler::ld_ptr( Register s1, ByteSize simm13a, Register d ) { + ld_ptr(s1, in_bytes(simm13a), d); +} +#endif + inline void MacroAssembler::ld_ptr( Register s1, RegisterOrConstant s2, Register d ) { #ifdef _LP64 - Assembler::ldx( s1, s2, d); + Assembler::ldx(s1, s2, d); #else - Assembler::ld( s1, s2, d); + Assembler::ld( s1, s2, d); #endif } -inline void MacroAssembler::ld_ptr( const Address& a, Register d, int offset ) { +inline void MacroAssembler::ld_ptr(const Address& a, Register d, int offset) { #ifdef _LP64 - Assembler::ldx( a, d, offset ); + Assembler::ldx(a, d, offset); #else - Assembler::ld( a, d, offset ); + Assembler::ld( a, d, offset); #endif } inline void MacroAssembler::st_ptr( Register d, Register s1, Register s2 ) { #ifdef _LP64 - Assembler::stx( d, s1, s2); + Assembler::stx(d, s1, s2); #else Assembler::st( d, s1, s2); #endif @@ -334,25 +358,32 @@ inline void MacroAssembler::st_ptr( Register d, Register s1, int simm13a ) { #ifdef _LP64 - Assembler::stx( d, s1, simm13a); + Assembler::stx(d, s1, simm13a); #else Assembler::st( d, s1, simm13a); #endif } +#ifdef ASSERT +// ByteSize is only a class when ASSERT is defined, otherwise it's an int. +inline void MacroAssembler::st_ptr( Register d, Register s1, ByteSize simm13a ) { + st_ptr(d, s1, in_bytes(simm13a)); +} +#endif + inline void MacroAssembler::st_ptr( Register d, Register s1, RegisterOrConstant s2 ) { #ifdef _LP64 - Assembler::stx( d, s1, s2); + Assembler::stx(d, s1, s2); #else Assembler::st( d, s1, s2); #endif } -inline void MacroAssembler::st_ptr( Register d, const Address& a, int offset) { +inline void MacroAssembler::st_ptr(Register d, const Address& a, int offset) { #ifdef _LP64 - Assembler::stx( d, a, offset); + Assembler::stx(d, a, offset); #else - Assembler::st( d, a, offset); + Assembler::st( d, a, offset); #endif } @@ -381,11 +412,11 @@ #endif } -inline void MacroAssembler::ld_long( const Address& a, Register d, int offset ) { +inline void MacroAssembler::ld_long(const Address& a, Register d, int offset) { #ifdef _LP64 - Assembler::ldx(a, d, offset ); + Assembler::ldx(a, d, offset); #else - Assembler::ldd(a, d, offset ); + Assembler::ldd(a, d, offset); #endif } @@ -427,7 +458,7 @@ #ifdef _LP64 Assembler::sllx(s1, s2, d); #else - Assembler::sll(s1, s2, d); + Assembler::sll( s1, s2, d); #endif } @@ -435,7 +466,7 @@ #ifdef _LP64 Assembler::sllx(s1, imm6a, d); #else - Assembler::sll(s1, imm6a, d); + Assembler::sll( s1, imm6a, d); #endif } @@ -443,7 +474,7 @@ #ifdef _LP64 Assembler::srlx(s1, s2, d); #else - Assembler::srl(s1, s2, d); + Assembler::srl( s1, s2, d); #endif } @@ -451,7 +482,7 @@ #ifdef _LP64 Assembler::srlx(s1, imm6a, d); #else - Assembler::srl(s1, imm6a, d); + Assembler::srl( s1, imm6a, d); #endif } @@ -541,9 +572,8 @@ disp = (intptr_t)d - (intptr_t)pc(); if ( disp != (intptr_t)(int32_t)disp ) { relocate(rt); - Address dest(O7, (address)d); - sethi(dest, /*ForceRelocatable=*/ true); - jmpl(dest, O7); + AddressLiteral dest(d); + jumpl_to(dest, O7, O7); } else { Assembler::call( d, rt ); @@ -603,87 +633,72 @@ return thepc; } -inline void MacroAssembler::load_address( Address& a, int offset ) { + +inline void MacroAssembler::load_contents(AddressLiteral& addrlit, Register d, int offset) { assert_not_delayed(); -#ifdef _LP64 - sethi(a); - add(a, a.base(), offset); -#else - if (a.hi() == 0 && a.rtype() == relocInfo::none) { - set(a.disp() + offset, a.base()); - } - else { - sethi(a); - add(a, a.base(), offset); - } -#endif -} - - -inline void MacroAssembler::split_disp( Address& a, Register temp ) { - assert_not_delayed(); - a = a.split_disp(); - Assembler::sethi(a.hi(), temp, a.rspec()); - add(a.base(), temp, a.base()); + sethi(addrlit, d); + ld(d, addrlit.low10() + offset, d); } -inline void MacroAssembler::load_contents( Address& a, Register d, int offset ) { +inline void MacroAssembler::load_ptr_contents(AddressLiteral& addrlit, Register d, int offset) { assert_not_delayed(); - sethi(a); - ld(a, d, offset); + sethi(addrlit, d); + ld_ptr(d, addrlit.low10() + offset, d); } -inline void MacroAssembler::load_ptr_contents( Address& a, Register d, int offset ) { +inline void MacroAssembler::store_contents(Register s, AddressLiteral& addrlit, Register temp, int offset) { assert_not_delayed(); - sethi(a); - ld_ptr(a, d, offset); + sethi(addrlit, temp); + st(s, temp, addrlit.low10() + offset); } -inline void MacroAssembler::store_contents( Register s, Address& a, int offset ) { +inline void MacroAssembler::store_ptr_contents(Register s, AddressLiteral& addrlit, Register temp, int offset) { assert_not_delayed(); - sethi(a); - st(s, a, offset); -} - - -inline void MacroAssembler::store_ptr_contents( Register s, Address& a, int offset ) { - assert_not_delayed(); - sethi(a); - st_ptr(s, a, offset); + sethi(addrlit, temp); + st_ptr(s, temp, addrlit.low10() + offset); } // This code sequence is relocatable to any address, even on LP64. -inline void MacroAssembler::jumpl_to( Address& a, Register d, int offset ) { +inline void MacroAssembler::jumpl_to(AddressLiteral& addrlit, Register temp, Register d, int offset) { assert_not_delayed(); // Force fixed length sethi because NativeJump and NativeFarCall don't handle // variable length instruction streams. - sethi(a, /*ForceRelocatable=*/ true); - jmpl(a, d, offset); + patchable_sethi(addrlit, temp); + jmpl(temp, addrlit.low10() + offset, d); } -inline void MacroAssembler::jump_to( Address& a, int offset ) { - jumpl_to( a, G0, offset ); +inline void MacroAssembler::jump_to(AddressLiteral& addrlit, Register temp, int offset) { + jumpl_to(addrlit, temp, G0, offset); } -inline void MacroAssembler::set_oop( jobject obj, Register d ) { - set_oop(allocate_oop_address(obj, d)); +inline void MacroAssembler::jump_indirect_to(Address& a, Register temp, + int ld_offset, int jmp_offset) { + assert_not_delayed(); + //sethi(al); // sethi is caller responsibility for this one + ld_ptr(a, temp, ld_offset); + jmp(temp, jmp_offset); } -inline void MacroAssembler::set_oop_constant( jobject obj, Register d ) { - set_oop(constant_oop_address(obj, d)); +inline void MacroAssembler::set_oop(jobject obj, Register d) { + set_oop(allocate_oop_address(obj), d); } -inline void MacroAssembler::set_oop( Address obj_addr ) { - assert(obj_addr.rspec().type()==relocInfo::oop_type, "must be an oop reloc"); - load_address(obj_addr); +inline void MacroAssembler::set_oop_constant(jobject obj, Register d) { + set_oop(constant_oop_address(obj), d); +} + + +inline void MacroAssembler::set_oop(AddressLiteral& obj_addr, Register d) { + assert(obj_addr.rspec().type() == relocInfo::oop_type, "must be an oop reloc"); + set(obj_addr, d); } diff -r 5d4dd2f5f6a1 -r 7a485bc4da16 src/cpu/sparc/vm/c1_CodeStubs_sparc.cpp --- a/src/cpu/sparc/vm/c1_CodeStubs_sparc.cpp Fri Apr 17 15:50:12 2009 +0100 +++ b/src/cpu/sparc/vm/c1_CodeStubs_sparc.cpp Thu May 07 10:30:17 2009 -0700 @@ -1,5 +1,5 @@ /* - * Copyright 1999-2007 Sun Microsystems, Inc. All Rights Reserved. + * Copyright 1999-2009 Sun Microsystems, Inc. All Rights Reserved. * DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER. * * This code is free software; you can redistribute it and/or modify it @@ -277,10 +277,11 @@ if (_id == load_klass_id) { // produce a copy of the load klass instruction for use by the being initialized case +#ifdef ASSERT address start = __ pc(); - Address addr = Address(_obj, address(NULL), oop_Relocation::spec(_oop_index)); - __ sethi(addr, true); - __ add(addr, _obj, 0); +#endif + AddressLiteral addrlit(NULL, oop_Relocation::spec(_oop_index)); + __ patchable_set(addrlit, _obj); #ifdef ASSERT for (int i = 0; i < _bytes_to_copy; i++) { diff -r 5d4dd2f5f6a1 -r 7a485bc4da16 src/cpu/sparc/vm/c1_FrameMap_sparc.cpp --- a/src/cpu/sparc/vm/c1_FrameMap_sparc.cpp Fri Apr 17 15:50:12 2009 +0100 +++ b/src/cpu/sparc/vm/c1_FrameMap_sparc.cpp Thu May 07 10:30:17 2009 -0700 @@ -1,5 +1,5 @@ /* - * Copyright 1999-2006 Sun Microsystems, Inc. All Rights Reserved. + * Copyright 1999-2009 Sun Microsystems, Inc. All Rights Reserved. * DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER. * * This code is free software; you can redistribute it and/or modify it @@ -327,7 +327,7 @@ Address FrameMap::make_new_address(ByteSize sp_offset) const { - return Address(SP, 0, STACK_BIAS + in_bytes(sp_offset)); + return Address(SP, STACK_BIAS + in_bytes(sp_offset)); } diff -r 5d4dd2f5f6a1 -r 7a485bc4da16 src/cpu/sparc/vm/c1_LIRAssembler_sparc.cpp --- a/src/cpu/sparc/vm/c1_LIRAssembler_sparc.cpp Fri Apr 17 15:50:12 2009 +0100 +++ b/src/cpu/sparc/vm/c1_LIRAssembler_sparc.cpp Thu May 07 10:30:17 2009 -0700 @@ -196,7 +196,7 @@ // verify the interpreter's monitor has a non-null object { Label L; - __ ld_ptr(Address(OSR_buf, 0, slot_offset + BasicObjectLock::obj_offset_in_bytes()), O7); + __ ld_ptr(OSR_buf, slot_offset + BasicObjectLock::obj_offset_in_bytes(), O7); __ cmp(G0, O7); __ br(Assembler::notEqual, false, Assembler::pt, L); __ delayed()->nop(); @@ -205,9 +205,9 @@ } #endif // ASSERT // Copy the lock field into the compiled activation. - __ ld_ptr(Address(OSR_buf, 0, slot_offset + BasicObjectLock::lock_offset_in_bytes()), O7); + __ ld_ptr(OSR_buf, slot_offset + BasicObjectLock::lock_offset_in_bytes(), O7); __ st_ptr(O7, frame_map()->address_for_monitor_lock(i)); - __ ld_ptr(Address(OSR_buf, 0, slot_offset + BasicObjectLock::obj_offset_in_bytes()), O7); + __ ld_ptr(OSR_buf, slot_offset + BasicObjectLock::obj_offset_in_bytes(), O7); __ st_ptr(O7, frame_map()->address_for_monitor_object(i)); } } @@ -238,21 +238,21 @@ int offset_offset = java_lang_String::offset_offset_in_bytes(); // first character position int count_offset = java_lang_String:: count_offset_in_bytes(); - __ ld_ptr(Address(str0, 0, value_offset), tmp0); - __ ld(Address(str0, 0, offset_offset), tmp2); + __ ld_ptr(str0, value_offset, tmp0); + __ ld(str0, offset_offset, tmp2); __ add(tmp0, arrayOopDesc::base_offset_in_bytes(T_CHAR), tmp0); - __ ld(Address(str0, 0, count_offset), str0); + __ ld(str0, count_offset, str0); __ sll(tmp2, exact_log2(sizeof(jchar)), tmp2); // str1 may be null add_debug_info_for_null_check_here(info); - __ ld_ptr(Address(str1, 0, value_offset), tmp1); + __ ld_ptr(str1, value_offset, tmp1); __ add(tmp0, tmp2, tmp0); - __ ld(Address(str1, 0, offset_offset), tmp2); + __ ld(str1, offset_offset, tmp2); __ add(tmp1, arrayOopDesc::base_offset_in_bytes(T_CHAR), tmp1); - __ ld(Address(str1, 0, count_offset), str1); + __ ld(str1, count_offset, str1); __ sll(tmp2, exact_log2(sizeof(jchar)), tmp2); __ subcc(str0, str1, O7); __ add(tmp1, tmp2, tmp1); @@ -412,9 +412,9 @@ #endif // ASSERT compilation()->offsets()->set_value(CodeOffsets::Deopt, code_offset()); - Address deopt_blob(G3_scratch, SharedRuntime::deopt_blob()->unpack()); - - __ JUMP(deopt_blob, 0); // sethi;jmp + AddressLiteral deopt_blob(SharedRuntime::deopt_blob()->unpack()); + + __ JUMP(deopt_blob, G3_scratch, 0); // sethi;jmp __ delayed()->nop(); assert(code_offset() - offset <= deopt_handler_size, "overflow"); @@ -441,13 +441,12 @@ int oop_index = __ oop_recorder()->allocate_index((jobject)NULL); PatchingStub* patch = new PatchingStub(_masm, PatchingStub::load_klass_id, oop_index); - Address addr = Address(reg, address(NULL), oop_Relocation::spec(oop_index)); - assert(addr.rspec().type() == relocInfo::oop_type, "must be an oop reloc"); + AddressLiteral addrlit(NULL, oop_Relocation::spec(oop_index)); + assert(addrlit.rspec().type() == relocInfo::oop_type, "must be an oop reloc"); // It may not seem necessary to use a sethi/add pair to load a NULL into dest, but the // NULL will be dynamically patched later and the patched value may be large. We must // therefore generate the sethi/add as a placeholders - __ sethi(addr, true); - __ add(addr, reg, 0); + __ patchable_set(addrlit, reg); patching_epilog(patch, lir_patch_normal, reg, info); } @@ -706,7 +705,7 @@ void LIR_Assembler::vtable_call(int vtable_offset, CodeEmitInfo* info) { add_debug_info_for_null_check_here(info); - __ ld_ptr(Address(O0, 0, oopDesc::klass_offset_in_bytes()), G3_scratch); + __ ld_ptr(O0, oopDesc::klass_offset_in_bytes(), G3_scratch); if (__ is_simm13(vtable_offset) ) { __ ld_ptr(G3_scratch, vtable_offset, G5_method); } else { @@ -715,7 +714,7 @@ // ld_ptr, set_hi, set __ ld_ptr(G3_scratch, G5_method, G5_method); } - __ ld_ptr(G5_method, in_bytes(methodOopDesc::from_compiled_offset()), G3_scratch); + __ ld_ptr(G5_method, methodOopDesc::from_compiled_offset(), G3_scratch); __ callr(G3_scratch, G0); // the peephole pass fills the delay slot } @@ -738,8 +737,7 @@ default : ShouldNotReachHere(); } } else { - __ sethi(disp & ~0x3ff, O7, true); - __ add(O7, disp & 0x3ff, O7); + __ set(disp, O7); if (info != NULL) add_debug_info_for_null_check_here(info); load_offset = code_offset(); switch(ld_type) { @@ -775,8 +773,7 @@ default : ShouldNotReachHere(); } } else { - __ sethi(offset & ~0x3ff, O7, true); - __ add(O7, offset & 0x3ff, O7); + __ set(offset, O7); if (info != NULL) add_debug_info_for_null_check_here(info); switch (type) { case T_BOOLEAN: // fall through @@ -813,8 +810,7 @@ __ ldf(w, s, disp, d); } } else { - __ sethi(disp & ~0x3ff, O7, true); - __ add(O7, disp & 0x3ff, O7); + __ set(disp, O7); if (info != NULL) add_debug_info_for_null_check_here(info); __ ldf(w, s, O7, d); } @@ -839,8 +835,7 @@ __ stf(w, value, base, offset); } } else { - __ sethi(offset & ~0x3ff, O7, true); - __ add(O7, offset & 0x3ff, O7); + __ set(offset, O7); if (info != NULL) add_debug_info_for_null_check_here(info); __ stf(w, value, O7, base); } @@ -852,8 +847,7 @@ if (!Assembler::is_simm13(offset + (type == T_LONG) ? wordSize : 0)) { assert(!unaligned, "can't handle this"); // for offsets larger than a simm13 we setup the offset in O7 - __ sethi(offset & ~0x3ff, O7, true); - __ add(O7, offset & 0x3ff, O7); + __ set(offset, O7); store_offset = store(from_reg, base, O7, type); } else { if (type == T_ARRAY || type == T_OBJECT) __ verify_oop(from_reg->as_register()); @@ -937,8 +931,7 @@ assert(base != O7, "destroying register"); assert(!unaligned, "can't handle this"); // for offsets larger than a simm13 we setup the offset in O7 - __ sethi(offset & ~0x3ff, O7, true); - __ add(O7, offset & 0x3ff, O7); + __ set(offset, O7); load_offset = load(base, O7, to_reg, type); } else { load_offset = code_offset(); @@ -1213,7 +1206,7 @@ assert(to_reg->is_single_fpu(), "wrong register kind"); __ set(con, O7); - Address temp_slot(SP, 0, (frame::register_save_words * wordSize) + STACK_BIAS); + Address temp_slot(SP, (frame::register_save_words * wordSize) + STACK_BIAS); __ st(O7, temp_slot); __ ldf(FloatRegisterImpl::S, temp_slot, to_reg->as_float_reg()); } @@ -1238,8 +1231,8 @@ } else { ShouldNotReachHere(); assert(to_reg->is_double_fpu(), "wrong register kind"); - Address temp_slot_lo(SP, 0, ((frame::register_save_words ) * wordSize) + STACK_BIAS); - Address temp_slot_hi(SP, 0, ((frame::register_save_words) * wordSize) + (longSize/2) + STACK_BIAS); + Address temp_slot_lo(SP, ((frame::register_save_words ) * wordSize) + STACK_BIAS); + Address temp_slot_hi(SP, ((frame::register_save_words) * wordSize) + (longSize/2) + STACK_BIAS); __ set(low(con), O7); __ st(O7, temp_slot_lo); __ set(high(con), O7); @@ -1267,17 +1260,16 @@ break; } RelocationHolder rspec = internal_word_Relocation::spec(const_addr); + AddressLiteral const_addrlit(const_addr, rspec); if (to_reg->is_single_fpu()) { - __ sethi( (intx)const_addr & ~0x3ff, O7, true, rspec); + __ patchable_sethi(const_addrlit, O7); __ relocate(rspec); - - int offset = (intx)const_addr & 0x3ff; - __ ldf (FloatRegisterImpl::S, O7, offset, to_reg->as_float_reg()); + __ ldf(FloatRegisterImpl::S, O7, const_addrlit.low10(), to_reg->as_float_reg()); } else { assert(to_reg->is_single_cpu(), "Must be a cpu register."); - __ set((intx)const_addr, O7, rspec); + __ set(const_addrlit, O7); load(O7, 0, to_reg->as_register(), T_INT); } } @@ -1293,10 +1285,10 @@ RelocationHolder rspec = internal_word_Relocation::spec(const_addr); if (to_reg->is_double_fpu()) { - __ sethi( (intx)const_addr & ~0x3ff, O7, true, rspec); - int offset = (intx)const_addr & 0x3ff; + AddressLiteral const_addrlit(const_addr, rspec); + __ patchable_sethi(const_addrlit, O7); __ relocate(rspec); - __ ldf (FloatRegisterImpl::D, O7, offset, to_reg->as_double_reg()); + __ ldf (FloatRegisterImpl::D, O7, const_addrlit.low10(), to_reg->as_double_reg()); } else { assert(to_reg->is_double_cpu(), "Must be a long register."); #ifdef _LP64 @@ -1317,7 +1309,7 @@ Address LIR_Assembler::as_Address(LIR_Address* addr) { Register reg = addr->base()->as_register(); - return Address(reg, 0, addr->disp()); + return Address(reg, addr->disp()); } @@ -1360,13 +1352,13 @@ Address LIR_Assembler::as_Address_hi(LIR_Address* addr) { Address base = as_Address(addr); - return Address(base.base(), 0, base.disp() + hi_word_offset_in_bytes); + return Address(base.base(), base.disp() + hi_word_offset_in_bytes); } Address LIR_Assembler::as_Address_lo(LIR_Address* addr) { Address base = as_Address(addr); - return Address(base.base(), 0, base.disp() + lo_word_offset_in_bytes); + return Address(base.base(), base.disp() + lo_word_offset_in_bytes); } @@ -1396,8 +1388,7 @@ if (addr->index()->is_illegal()) { if (!Assembler::is_simm13(disp_value) && (!unaligned || Assembler::is_simm13(disp_value + 4))) { if (needs_patching) { - __ sethi(0, O7, true); - __ add(O7, 0, O7); + __ patchable_set(0, O7); } else { __ set(disp_value, O7); } @@ -1544,8 +1535,7 @@ if (addr->index()->is_illegal()) { if (!Assembler::is_simm13(disp_value) && (!unaligned || Assembler::is_simm13(disp_value + 4))) { if (needs_patching) { - __ sethi(0, O7, true); - __ add(O7, 0, O7); + __ patchable_set(0, O7); } else { __ set(disp_value, O7); } @@ -1627,8 +1617,8 @@ __ set_oop(NULL, G5); // must be set to -1 at code generation time - Address a(G3, (address)-1); - __ jump_to(a, 0); + AddressLiteral addrlit(-1); + __ jump_to(addrlit, G3); __ delayed()->nop(); assert(__ offset() - start <= call_stub_size, "stub too big"); @@ -2063,7 +2053,7 @@ address pc_for_athrow = __ pc(); int pc_for_athrow_offset = __ offset(); RelocationHolder rspec = internal_word_Relocation::spec(pc_for_athrow); - __ set((intptr_t)pc_for_athrow, Oissuing_pc, rspec); + __ set(pc_for_athrow, Oissuing_pc, rspec); add_call_info(pc_for_athrow_offset, info); // for exception handler __ call(Runtime1::entry_for(Runtime1::handle_exception_id), relocInfo::runtime_call_type); @@ -2451,7 +2441,7 @@ } - Address flags_addr(mdo, 0, md->byte_offset_of_slot(data, DataLayout::flags_offset()) - mdo_offset_bias); + Address flags_addr(mdo, md->byte_offset_of_slot(data, DataLayout::flags_offset()) - mdo_offset_bias); __ ldub(flags_addr, data_val); __ or3(data_val, BitData::null_seen_byte_constant(), data_val); __ stb(data_val, flags_addr); @@ -2738,7 +2728,7 @@ __ add(mdo, O7, mdo); } - Address counter_addr(mdo, 0, md->byte_offset_of_slot(data, CounterData::count_offset()) - mdo_offset_bias); + Address counter_addr(mdo, md->byte_offset_of_slot(data, CounterData::count_offset()) - mdo_offset_bias); __ lduw(counter_addr, tmp1); __ add(tmp1, DataLayout::counter_increment, tmp1); __ stw(tmp1, counter_addr); @@ -2764,8 +2754,8 @@ for (i = 0; i < VirtualCallData::row_limit(); i++) { ciKlass* receiver = vc_data->receiver(i); if (known_klass->equals(receiver)) { - Address data_addr(mdo, 0, md->byte_offset_of_slot(data, - VirtualCallData::receiver_count_offset(i)) - + Address data_addr(mdo, md->byte_offset_of_slot(data, + VirtualCallData::receiver_count_offset(i)) - mdo_offset_bias); __ lduw(data_addr, tmp1); __ add(tmp1, DataLayout::counter_increment, tmp1); @@ -2782,11 +2772,11 @@ for (i = 0; i < VirtualCallData::row_limit(); i++) { ciKlass* receiver = vc_data->receiver(i); if (receiver == NULL) { - Address recv_addr(mdo, 0, md->byte_offset_of_slot(data, VirtualCallData::receiver_offset(i)) - + Address recv_addr(mdo, md->byte_offset_of_slot(data, VirtualCallData::receiver_offset(i)) - mdo_offset_bias); jobject2reg(known_klass->encoding(), tmp1); __ st_ptr(tmp1, recv_addr); - Address data_addr(mdo, 0, md->byte_offset_of_slot(data, VirtualCallData::receiver_count_offset(i)) - + Address data_addr(mdo, md->byte_offset_of_slot(data, VirtualCallData::receiver_count_offset(i)) - mdo_offset_bias); __ lduw(data_addr, tmp1); __ add(tmp1, DataLayout::counter_increment, tmp1); @@ -2795,20 +2785,20 @@ } } } else { - load(Address(recv, 0, oopDesc::klass_offset_in_bytes()), recv, T_OBJECT); + load(Address(recv, oopDesc::klass_offset_in_bytes()), recv, T_OBJECT); Label update_done; uint i; for (i = 0; i < VirtualCallData::row_limit(); i++) { Label next_test; // See if the receiver is receiver[n]. - Address receiver_addr(mdo, 0, md->byte_offset_of_slot(data, VirtualCallData::receiver_offset(i)) - + Address receiver_addr(mdo, md->byte_offset_of_slot(data, VirtualCallData::receiver_offset(i)) - mdo_offset_bias); __ ld_ptr(receiver_addr, tmp1); __ verify_oop(tmp1); __ cmp(recv, tmp1); __ brx(Assembler::notEqual, false, Assembler::pt, next_test); __ delayed()->nop(); - Address data_addr(mdo, 0, md->byte_offset_of_slot(data, VirtualCallData::receiver_count_offset(i)) - + Address data_addr(mdo, md->byte_offset_of_slot(data, VirtualCallData::receiver_count_offset(i)) - mdo_offset_bias); __ lduw(data_addr, tmp1); __ add(tmp1, DataLayout::counter_increment, tmp1); @@ -2821,7 +2811,7 @@ // Didn't find receiver; find next empty slot and fill it in for (i = 0; i < VirtualCallData::row_limit(); i++) { Label next_test; - Address recv_addr(mdo, 0, md->byte_offset_of_slot(data, VirtualCallData::receiver_offset(i)) - + Address recv_addr(mdo, md->byte_offset_of_slot(data, VirtualCallData::receiver_offset(i)) - mdo_offset_bias); load(recv_addr, tmp1, T_OBJECT); __ tst(tmp1); @@ -2829,8 +2819,8 @@ __ delayed()->nop(); __ st_ptr(recv, recv_addr); __ set(DataLayout::counter_increment, tmp1); - __ st_ptr(tmp1, Address(mdo, 0, md->byte_offset_of_slot(data, VirtualCallData::receiver_count_offset(i)) - - mdo_offset_bias)); + __ st_ptr(tmp1, mdo, md->byte_offset_of_slot(data, VirtualCallData::receiver_count_offset(i)) - + mdo_offset_bias); if (i < (VirtualCallData::row_limit() - 1)) { __ br(Assembler::always, false, Assembler::pt, update_done); __ delayed()->nop(); diff -r 5d4dd2f5f6a1 -r 7a485bc4da16 src/cpu/sparc/vm/c1_MacroAssembler_sparc.cpp --- a/src/cpu/sparc/vm/c1_MacroAssembler_sparc.cpp Fri Apr 17 15:50:12 2009 +0100 +++ b/src/cpu/sparc/vm/c1_MacroAssembler_sparc.cpp Thu May 07 10:30:17 2009 -0700 @@ -1,5 +1,5 @@ /* - * Copyright 1999-2008 Sun Microsystems, Inc. All Rights Reserved. + * Copyright 1999-2009 Sun Microsystems, Inc. All Rights Reserved. * DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER. * * This code is free software; you can redistribute it and/or modify it @@ -29,13 +29,13 @@ Label L; const Register temp_reg = G3_scratch; // Note: needs more testing of out-of-line vs. inline slow case - Address ic_miss(temp_reg, SharedRuntime::get_ic_miss_stub()); verify_oop(receiver); ld_ptr(receiver, oopDesc::klass_offset_in_bytes(), temp_reg); cmp(temp_reg, iCache); brx(Assembler::equal, true, Assembler::pt, L); delayed()->nop(); - jump_to(ic_miss, 0); + AddressLiteral ic_miss(SharedRuntime::get_ic_miss_stub()); + jump_to(ic_miss, temp_reg); delayed()->nop(); align(CodeEntryAlignment); bind(L); @@ -84,7 +84,7 @@ Label done; - Address mark_addr(Roop, 0, oopDesc::mark_offset_in_bytes()); + Address mark_addr(Roop, oopDesc::mark_offset_in_bytes()); // The following move must be the first instruction of emitted since debug // information may be generated for it. @@ -132,7 +132,7 @@ Label done; - Address mark_addr(Roop, 0, oopDesc::mark_offset_in_bytes()); + Address mark_addr(Roop, oopDesc::mark_offset_in_bytes()); assert(mark_addr.disp() == 0, "cas must take a zero displacement"); if (UseBiasedLocking) { @@ -370,7 +370,7 @@ void C1_MacroAssembler::verify_stack_oop(int stack_offset) { if (!VerifyOops) return; - verify_oop_addr(Address(SP, 0, stack_offset + STACK_BIAS)); + verify_oop_addr(Address(SP, stack_offset + STACK_BIAS)); } void C1_MacroAssembler::verify_not_null_oop(Register r) { diff -r 5d4dd2f5f6a1 -r 7a485bc4da16 src/cpu/sparc/vm/c1_Runtime1_sparc.cpp --- a/src/cpu/sparc/vm/c1_Runtime1_sparc.cpp Fri Apr 17 15:50:12 2009 +0100 +++ b/src/cpu/sparc/vm/c1_Runtime1_sparc.cpp Thu May 07 10:30:17 2009 -0700 @@ -1,5 +1,5 @@ /* - * Copyright 1999-2007 Sun Microsystems, Inc. All Rights Reserved. + * Copyright 1999-2009 Sun Microsystems, Inc. All Rights Reserved. * DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER. * * This code is free software; you can redistribute it and/or modify it @@ -57,13 +57,13 @@ // check for pending exceptions { Label L; - Address exception_addr(G2_thread, 0, in_bytes(Thread::pending_exception_offset())); + Address exception_addr(G2_thread, Thread::pending_exception_offset()); ld_ptr(exception_addr, Gtemp); br_null(Gtemp, false, pt, L); delayed()->nop(); - Address vm_result_addr(G2_thread, 0, in_bytes(JavaThread::vm_result_offset())); + Address vm_result_addr(G2_thread, JavaThread::vm_result_offset()); st_ptr(G0, vm_result_addr); - Address vm_result_addr_2(G2_thread, 0, in_bytes(JavaThread::vm_result_2_offset())); + Address vm_result_addr_2(G2_thread, JavaThread::vm_result_2_offset()); st_ptr(G0, vm_result_addr_2); if (frame_size() == no_frame_size) { @@ -73,8 +73,8 @@ } else if (_stub_id == Runtime1::forward_exception_id) { should_not_reach_here(); } else { - Address exc(G4, Runtime1::entry_for(Runtime1::forward_exception_id)); - jump_to(exc, 0); + AddressLiteral exc(Runtime1::entry_for(Runtime1::forward_exception_id)); + jump_to(exc, G4); delayed()->nop(); } bind(L); @@ -85,7 +85,7 @@ get_vm_result (oop_result1); } else { // be a little paranoid and clear the result - Address vm_result_addr(G2_thread, 0, in_bytes(JavaThread::vm_result_offset())); + Address vm_result_addr(G2_thread, JavaThread::vm_result_offset()); st_ptr(G0, vm_result_addr); } @@ -93,7 +93,7 @@ get_vm_result_2(oop_result2); } else { // be a little paranoid and clear the result - Address vm_result_addr_2(G2_thread, 0, in_bytes(JavaThread::vm_result_2_offset())); + Address vm_result_addr_2(G2_thread, JavaThread::vm_result_2_offset()); st_ptr(G0, vm_result_addr_2); } @@ -479,8 +479,8 @@ Register G4_length = G4; // Incoming Register O0_obj = O0; // Outgoing - Address klass_lh(G5_klass, 0, ((klassOopDesc::header_size() * HeapWordSize) - + Klass::layout_helper_offset_in_bytes())); + Address klass_lh(G5_klass, ((klassOopDesc::header_size() * HeapWordSize) + + Klass::layout_helper_offset_in_bytes())); assert(Klass::_lh_header_size_shift % BitsPerByte == 0, "bytewise"); assert(Klass::_lh_header_size_mask == 0xFF, "bytewise"); // Use this offset to pick out an individual byte of the layout_helper: @@ -902,8 +902,8 @@ __ srl(addr, CardTableModRefBS::card_shift, addr); #endif - Address rs(cardtable, (address)byte_map_base); - __ load_address(rs); // cardtable := + AddressLiteral rs(byte_map_base); + __ set(rs, cardtable); // cardtable := __ ldub(addr, cardtable, tmp); // tmp := [addr + cardtable] __ br_on_reg_cond(Assembler::rc_nz, /*annul*/false, Assembler::pt, @@ -1022,8 +1022,8 @@ __ restore(); - Address exc(G4, Runtime1::entry_for(Runtime1::unwind_exception_id)); - __ jump_to(exc, 0); + AddressLiteral exc(Runtime1::entry_for(Runtime1::unwind_exception_id)); + __ jump_to(exc, G4); __ delayed()->nop(); diff -r 5d4dd2f5f6a1 -r 7a485bc4da16 src/cpu/sparc/vm/cppInterpreter_sparc.cpp --- a/src/cpu/sparc/vm/cppInterpreter_sparc.cpp Fri Apr 17 15:50:12 2009 +0100 +++ b/src/cpu/sparc/vm/cppInterpreter_sparc.cpp Thu May 07 10:30:17 2009 -0700 @@ -1,5 +1,5 @@ /* - * Copyright 2007-2008 Sun Microsystems, Inc. All Rights Reserved. + * Copyright 2007-2009 Sun Microsystems, Inc. All Rights Reserved. * DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER. * * This code is free software; you can redistribute it and/or modify it @@ -1017,6 +1017,7 @@ const int slop_factor = 2*wordSize; const int fixed_size = ((sizeof(BytecodeInterpreter) + slop_factor) >> LogBytesPerWord) + // what is the slop factor? + //6815692//methodOopDesc::extra_stack_words() + // extra push slots for MH adapters frame::memory_parameter_word_sp_offset + // register save area + param window (native ? frame::interpreter_frame_extra_outgoing_argument_words : 0); // JNI, class @@ -1163,6 +1164,9 @@ __ st_ptr(O2, XXX_STATE(_stack)); // PREPUSH __ lduh(max_stack, O3); // Full size expression stack + guarantee(!EnableMethodHandles, "no support yet for java.dyn.MethodHandle"); //6815692 + //6815692//if (EnableMethodHandles) + //6815692// __ inc(O3, methodOopDesc::extra_stack_entries()); __ sll(O3, LogBytesPerWord, O3); __ sub(O2, O3, O3); // __ sub(O3, wordSize, O3); // so prepush doesn't look out of bounds @@ -2017,7 +2021,9 @@ const int fixed_size = sizeof(BytecodeInterpreter)/wordSize + // interpreter state object frame::memory_parameter_word_sp_offset; // register save area + param window + const int extra_stack = 0; //6815692//methodOopDesc::extra_stack_entries(); return (round_to(max_stack + + extra_stack + slop_factor + fixed_size + monitor_size + @@ -2104,7 +2110,8 @@ // Need +1 here because stack_base points to the word just above the first expr stack entry // and stack_limit is supposed to point to the word just below the last expr stack entry. // See generate_compute_interpreter_state. - to_fill->_stack_limit = stack_base - (method->max_stack() + 1); + int extra_stack = 0; //6815692//methodOopDesc::extra_stack_entries(); + to_fill->_stack_limit = stack_base - (method->max_stack() + 1 + extra_stack); to_fill->_monitor_base = (BasicObjectLock*) monitor_base; // sparc specific diff -r 5d4dd2f5f6a1 -r 7a485bc4da16 src/cpu/sparc/vm/dump_sparc.cpp --- a/src/cpu/sparc/vm/dump_sparc.cpp Fri Apr 17 15:50:12 2009 +0100 +++ b/src/cpu/sparc/vm/dump_sparc.cpp Thu May 07 10:30:17 2009 -0700 @@ -1,5 +1,5 @@ /* - * Copyright 2004-2007 Sun Microsystems, Inc. All Rights Reserved. + * Copyright 2004-2009 Sun Microsystems, Inc. All Rights Reserved. * DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER. * * This code is free software; you can redistribute it and/or modify it @@ -106,8 +106,7 @@ __ and3(L0, 255, L4); // Isolate L3 = method offset;. __ sll(L4, LogBytesPerWord, L4); __ ld_ptr(L3, L4, L4); // Get address of correct virtual method - Address method(L4, 0); - __ jmpl(method, G0); // Jump to correct method. + __ jmpl(L4, 0, G0); // Jump to correct method. __ delayed()->restore(); // Restore registers. __ flush(); diff -r 5d4dd2f5f6a1 -r 7a485bc4da16 src/cpu/sparc/vm/icBuffer_sparc.cpp --- a/src/cpu/sparc/vm/icBuffer_sparc.cpp Fri Apr 17 15:50:12 2009 +0100 +++ b/src/cpu/sparc/vm/icBuffer_sparc.cpp Thu May 07 10:30:17 2009 -0700 @@ -1,5 +1,5 @@ /* - * Copyright 1997-2006 Sun Microsystems, Inc. All Rights Reserved. + * Copyright 1997-2009 Sun Microsystems, Inc. All Rights Reserved. * DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER. * * This code is free software; you can redistribute it and/or modify it @@ -46,14 +46,13 @@ // (1) the oop is old (i.e., doesn't matter for scavenges) // (2) these ICStubs are removed *before* a GC happens, so the roots disappear assert(cached_oop == NULL || cached_oop->is_perm(), "must be old oop"); - Address cached_oop_addr(G5_inline_cache_reg, address(cached_oop)); - // Force the sethi to generate the fixed sequence so next_instruction_address works - masm->sethi(cached_oop_addr, true /* ForceRelocatable */ ); - masm->add(cached_oop_addr, G5_inline_cache_reg); + AddressLiteral cached_oop_addrlit(cached_oop, relocInfo::none); + // Force the set to generate the fixed sequence so next_instruction_address works + masm->patchable_set(cached_oop_addrlit, G5_inline_cache_reg); assert(G3_scratch != G5_method, "Do not clobber the method oop in the transition stub"); assert(G3_scratch != G5_inline_cache_reg, "Do not clobber the inline cache register in the transition stub"); - Address entry(G3_scratch, entry_point); - masm->JUMP(entry, 0); + AddressLiteral entry(entry_point); + masm->JUMP(entry, G3_scratch, 0); masm->delayed()->nop(); masm->flush(); } diff -r 5d4dd2f5f6a1 -r 7a485bc4da16 src/cpu/sparc/vm/interp_masm_sparc.cpp --- a/src/cpu/sparc/vm/interp_masm_sparc.cpp Fri Apr 17 15:50:12 2009 +0100 +++ b/src/cpu/sparc/vm/interp_masm_sparc.cpp Thu May 07 10:30:17 2009 -0700 @@ -1,5 +1,5 @@ /* - * Copyright 1997-2008 Sun Microsystems, Inc. All Rights Reserved. + * Copyright 1997-2009 Sun Microsystems, Inc. All Rights Reserved. * DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER. * * This code is free software; you can redistribute it and/or modify it @@ -35,8 +35,8 @@ // This file specializes the assember with interpreter-specific macros -const Address InterpreterMacroAssembler::l_tmp( FP, 0, (frame::interpreter_frame_l_scratch_fp_offset * wordSize ) + STACK_BIAS); -const Address InterpreterMacroAssembler::d_tmp( FP, 0, (frame::interpreter_frame_d_scratch_fp_offset * wordSize) + STACK_BIAS); +const Address InterpreterMacroAssembler::l_tmp(FP, (frame::interpreter_frame_l_scratch_fp_offset * wordSize) + STACK_BIAS); +const Address InterpreterMacroAssembler::d_tmp(FP, (frame::interpreter_frame_d_scratch_fp_offset * wordSize) + STACK_BIAS); #else // CC_INTERP #ifndef STATE @@ -78,14 +78,12 @@ sll(Lbyte_code, LogBytesPerWord, Lbyte_code); // multiply by wordSize ld_ptr(IdispatchTables, Lbyte_code, IdispatchAddress);// get entry addr #else - ldub( Lbcp, bcp_incr, Lbyte_code); // load next bytecode + ldub( Lbcp, bcp_incr, Lbyte_code); // load next bytecode // dispatch table to use - Address tbl(G3_scratch, (address)Interpreter::dispatch_table(state)); - - sethi(tbl); - sll(Lbyte_code, LogBytesPerWord, Lbyte_code); // multiply by wordSize - add(tbl, tbl.base(), 0); - ld_ptr( G3_scratch, Lbyte_code, IdispatchAddress); // get entry addr + AddressLiteral tbl(Interpreter::dispatch_table(state)); + sll(Lbyte_code, LogBytesPerWord, Lbyte_code); // multiply by wordSize + set(tbl, G3_scratch); // compute addr of table + ld_ptr(G3_scratch, Lbyte_code, IdispatchAddress); // get entry addr #endif } @@ -165,8 +163,7 @@ Label L; // Check the "pending popframe condition" flag in the current thread - Address popframe_condition_addr(G2_thread, 0, in_bytes(JavaThread::popframe_condition_offset())); - ld(popframe_condition_addr, scratch_reg); + ld(G2_thread, JavaThread::popframe_condition_offset(), scratch_reg); // Initiate popframe handling only if it is not already being processed. If the flag // has the popframe_processing bit set, it means that this code is called *during* popframe @@ -192,11 +189,10 @@ void InterpreterMacroAssembler::load_earlyret_value(TosState state) { Register thr_state = G4_scratch; - ld_ptr(Address(G2_thread, 0, in_bytes(JavaThread::jvmti_thread_state_offset())), - thr_state); - const Address tos_addr(thr_state, 0, in_bytes(JvmtiThreadState::earlyret_tos_offset())); - const Address oop_addr(thr_state, 0, in_bytes(JvmtiThreadState::earlyret_oop_offset())); - const Address val_addr(thr_state, 0, in_bytes(JvmtiThreadState::earlyret_value_offset())); + ld_ptr(G2_thread, JavaThread::jvmti_thread_state_offset(), thr_state); + const Address tos_addr(thr_state, JvmtiThreadState::earlyret_tos_offset()); + const Address oop_addr(thr_state, JvmtiThreadState::earlyret_oop_offset()); + const Address val_addr(thr_state, JvmtiThreadState::earlyret_value_offset()); switch (state) { case ltos: ld_long(val_addr, Otos_l); break; case atos: ld_ptr(oop_addr, Otos_l); @@ -222,8 +218,7 @@ if (JvmtiExport::can_force_early_return()) { Label L; Register thr_state = G3_scratch; - ld_ptr(Address(G2_thread, 0, in_bytes(JavaThread::jvmti_thread_state_offset())), - thr_state); + ld_ptr(G2_thread, JavaThread::jvmti_thread_state_offset(), thr_state); tst(thr_state); br(zero, false, pt, L); // if (thread->jvmti_thread_state() == NULL) exit; delayed()->nop(); @@ -231,16 +226,14 @@ // Initiate earlyret handling only if it is not already being processed. // If the flag has the earlyret_processing bit set, it means that this code // is called *during* earlyret handling - we don't want to reenter. - ld(Address(thr_state, 0, in_bytes(JvmtiThreadState::earlyret_state_offset())), - G4_scratch); + ld(thr_state, JvmtiThreadState::earlyret_state_offset(), G4_scratch); cmp(G4_scratch, JvmtiThreadState::earlyret_pending); br(Assembler::notEqual, false, pt, L); delayed()->nop(); // Call Interpreter::remove_activation_early_entry() to get the address of the // same-named entrypoint in the generated interpreter code - Address tos_addr(thr_state, 0, in_bytes(JvmtiThreadState::earlyret_tos_offset())); - ld(tos_addr, Otos_l1); + ld(thr_state, JvmtiThreadState::earlyret_tos_offset(), Otos_l1); call_VM_leaf(noreg, CAST_FROM_FN_PTR(address, Interpreter::remove_activation_early_entry), Otos_l1); // Jump to Interpreter::_remove_activation_early_entry @@ -294,10 +287,9 @@ } else { #endif // dispatch table to use - Address tbl(G3_scratch, (address)table); - + AddressLiteral tbl(table); sll(Lbyte_code, LogBytesPerWord, Lbyte_code); // multiply by wordSize - load_address(tbl); // compute addr of table + set(tbl, G3_scratch); // compute addr of table ld_ptr(G3_scratch, Lbyte_code, G3_scratch); // get entry addr #ifdef FAST_DISPATCH } @@ -601,26 +593,17 @@ // Reset SP by subtracting more space from Lesp. Label done; - - const Address max_stack (Lmethod, 0, in_bytes(methodOopDesc::max_stack_offset())); - const Address access_flags(Lmethod, 0, in_bytes(methodOopDesc::access_flags_offset())); - verify_oop(Lmethod); - - - assert( G4_scratch != Gframe_size, - "Only you can prevent register aliasing!"); + assert(G4_scratch != Gframe_size, "Only you can prevent register aliasing!"); // A native does not need to do this, since its callee does not change SP. - ld(access_flags, Gframe_size); + ld(Lmethod, methodOopDesc::access_flags_offset(), Gframe_size); // Load access flags. btst(JVM_ACC_NATIVE, Gframe_size); br(Assembler::notZero, false, Assembler::pt, done); delayed()->nop(); - // // Compute max expression stack+register save area - // - lduh( max_stack, Gframe_size ); + lduh(Lmethod, in_bytes(methodOopDesc::max_stack_offset()), Gframe_size); // Load max stack. if (TaggedStackInterpreter) sll ( Gframe_size, 1, Gframe_size); // max_stack * 2 for TAGS add( Gframe_size, frame::memory_parameter_word_sp_offset, Gframe_size ); @@ -721,8 +704,7 @@ verify_thread(); Label skip_compiled_code; - const Address interp_only (G2_thread, 0, in_bytes(JavaThread::interp_only_mode_offset())); - + const Address interp_only(G2_thread, JavaThread::interp_only_mode_offset()); ld(interp_only, scratch); tst(scratch); br(Assembler::notZero, true, Assembler::pn, skip_compiled_code); @@ -916,8 +898,8 @@ Register Rscratch, Label& ok ) { assert(throw_entry_point != NULL, "entry point must be generated by now"); - Address dest(Rscratch, throw_entry_point); - jump_to(dest); + AddressLiteral dest(throw_entry_point); + jump_to(dest, Rscratch); delayed()->nop(); bind(ok); } @@ -1035,18 +1017,18 @@ Label unlocked, unlock, no_unlock; // get the value of _do_not_unlock_if_synchronized into G1_scratch - const Address do_not_unlock_if_synchronized(G2_thread, 0, - in_bytes(JavaThread::do_not_unlock_if_synchronized_offset())); + const Address do_not_unlock_if_synchronized(G2_thread, + JavaThread::do_not_unlock_if_synchronized_offset()); ldbool(do_not_unlock_if_synchronized, G1_scratch); stbool(G0, do_not_unlock_if_synchronized); // reset the flag // check if synchronized method - const Address access_flags(Lmethod, 0, in_bytes(methodOopDesc::access_flags_offset())); + const Address access_flags(Lmethod, methodOopDesc::access_flags_offset()); interp_verify_oop(Otos_i, state, __FILE__, __LINE__); push(state); // save tos - ld(access_flags, G3_scratch); + ld(access_flags, G3_scratch); // Load access flags. btst(JVM_ACC_SYNCHRONIZED, G3_scratch); - br( zero, false, pt, unlocked); + br(zero, false, pt, unlocked); delayed()->nop(); // Don't unlock anything if the _do_not_unlock_if_synchronized flag @@ -1236,8 +1218,8 @@ Register obj_reg = Object; Register mark_reg = G4_scratch; Register temp_reg = G1_scratch; - Address lock_addr = Address(lock_reg, 0, BasicObjectLock::lock_offset_in_bytes()); - Address mark_addr = Address(obj_reg, 0, oopDesc::mark_offset_in_bytes()); + Address lock_addr(lock_reg, BasicObjectLock::lock_offset_in_bytes()); + Address mark_addr(obj_reg, oopDesc::mark_offset_in_bytes()); Label done; Label slow_case; @@ -1315,9 +1297,8 @@ Register obj_reg = G3_scratch; Register mark_reg = G4_scratch; Register displaced_header_reg = G1_scratch; - Address lock_addr = Address(lock_reg, 0, BasicObjectLock::lock_offset_in_bytes()); - Address lockobj_addr = Address(lock_reg, 0, BasicObjectLock::obj_offset_in_bytes()); - Address mark_addr = Address(obj_reg, 0, oopDesc::mark_offset_in_bytes()); + Address lockobj_addr(lock_reg, BasicObjectLock::obj_offset_in_bytes()); + Address mark_addr(obj_reg, oopDesc::mark_offset_in_bytes()); Label done; if (UseBiasedLocking) { @@ -1328,7 +1309,8 @@ } // Test first if we are in the fast recursive case - ld_ptr(lock_addr, displaced_header_reg, BasicLock::displaced_header_offset_in_bytes()); + Address lock_addr(lock_reg, BasicObjectLock::lock_offset_in_bytes() + BasicLock::displaced_header_offset_in_bytes()); + ld_ptr(lock_addr, displaced_header_reg); br_null(displaced_header_reg, true, Assembler::pn, done); delayed()->st_ptr(G0, lockobj_addr); // free entry @@ -1384,7 +1366,7 @@ Label zero_continue; // Test MDO to avoid the call if it is NULL. - ld_ptr(Lmethod, in_bytes(methodOopDesc::method_data_offset()), ImethodDataPtr); + ld_ptr(Lmethod, methodOopDesc::method_data_offset(), ImethodDataPtr); test_method_data_pointer(zero_continue); call_VM_leaf(noreg, CAST_FROM_FN_PTR(address, InterpreterRuntime::bcp_to_di), Lmethod, Lbcp); set_method_data_pointer_offset(O0); @@ -1413,7 +1395,7 @@ // If the mdp is valid, it will point to a DataLayout header which is // consistent with the bcp. The converse is highly probable also. lduh(ImethodDataPtr, in_bytes(DataLayout::bci_offset()), G3_scratch); - ld_ptr(Address(Lmethod, 0, in_bytes(methodOopDesc::const_offset())), O5); + ld_ptr(Lmethod, methodOopDesc::const_offset(), O5); add(G3_scratch, in_bytes(constMethodOopDesc::codes_offset()), G3_scratch); add(G3_scratch, O5, G3_scratch); cmp(Lbcp, G3_scratch); @@ -1424,7 +1406,7 @@ // %%% should use call_VM_leaf here? //call_VM_leaf(noreg, ..., Lmethod, Lbcp, ImethodDataPtr); save_frame_and_mov(sizeof(jdouble) / wordSize, Lmethod, O0, Lbcp, O1); - Address d_save(FP, 0, -sizeof(jdouble) + STACK_BIAS); + Address d_save(FP, -sizeof(jdouble) + STACK_BIAS); stf(FloatRegisterImpl::D, Ftos_d, d_save); mov(temp_reg->after_save(), O2); save_thread(L7_thread_cache); @@ -1456,14 +1438,14 @@ #endif // Test to see if we should create a method data oop - Address profile_limit(Rtmp, (address)&InvocationCounter::InterpreterProfileLimit); + AddressLiteral profile_limit((address) &InvocationCounter::InterpreterProfileLimit); #ifdef _LP64 delayed()->nop(); - sethi(profile_limit); + sethi(profile_limit, Rtmp); #else - delayed()->sethi(profile_limit); + delayed()->sethi(profile_limit, Rtmp); #endif - ld(profile_limit, Rtmp); + ld(Rtmp, profile_limit.low10(), Rtmp); cmp(invocation_count, Rtmp); br(Assembler::lessUnsigned, false, Assembler::pn, profile_continue); delayed()->nop(); @@ -1521,7 +1503,7 @@ Register bumped_count, bool decrement) { // Locate the counter at a fixed offset from the mdp: - Address counter(ImethodDataPtr, 0, constant); + Address counter(ImethodDataPtr, constant); increment_mdp_data_at(counter, bumped_count, decrement); } @@ -1535,7 +1517,7 @@ bool decrement) { // Add the constant to reg to get the offset. add(ImethodDataPtr, reg, scratch2); - Address counter(scratch2, 0, constant); + Address counter(scratch2, constant); increment_mdp_data_at(counter, bumped_count, decrement); } @@ -2201,7 +2183,7 @@ Address InterpreterMacroAssembler::top_most_monitor() { - return Address(FP, 0, top_most_monitor_byte_offset()); + return Address(FP, top_most_monitor_byte_offset()); } @@ -2214,15 +2196,15 @@ void InterpreterMacroAssembler::increment_invocation_counter( Register Rtmp, Register Rtmp2 ) { assert(UseCompiler, "incrementing must be useful"); #ifdef CC_INTERP - Address inv_counter(G5_method, 0, in_bytes(methodOopDesc::invocation_counter_offset() - + InvocationCounter::counter_offset())); - Address be_counter(G5_method, 0, in_bytes(methodOopDesc::backedge_counter_offset() - + InvocationCounter::counter_offset())); + Address inv_counter(G5_method, methodOopDesc::invocation_counter_offset() + + InvocationCounter::counter_offset()); + Address be_counter (G5_method, methodOopDesc::backedge_counter_offset() + + InvocationCounter::counter_offset()); #else - Address inv_counter(Lmethod, 0, in_bytes(methodOopDesc::invocation_counter_offset() - + InvocationCounter::counter_offset())); - Address be_counter(Lmethod, 0, in_bytes(methodOopDesc::backedge_counter_offset() - + InvocationCounter::counter_offset())); + Address inv_counter(Lmethod, methodOopDesc::invocation_counter_offset() + + InvocationCounter::counter_offset()); + Address be_counter (Lmethod, methodOopDesc::backedge_counter_offset() + + InvocationCounter::counter_offset()); #endif /* CC_INTERP */ int delta = InvocationCounter::count_increment; @@ -2250,15 +2232,15 @@ void InterpreterMacroAssembler::increment_backedge_counter( Register Rtmp, Register Rtmp2 ) { assert(UseCompiler, "incrementing must be useful"); #ifdef CC_INTERP - Address be_counter(G5_method, 0, in_bytes(methodOopDesc::backedge_counter_offset() - + InvocationCounter::counter_offset())); - Address inv_counter(G5_method, 0, in_bytes(methodOopDesc::invocation_counter_offset() - + InvocationCounter::counter_offset())); + Address be_counter (G5_method, methodOopDesc::backedge_counter_offset() + + InvocationCounter::counter_offset()); + Address inv_counter(G5_method, methodOopDesc::invocation_counter_offset() + + InvocationCounter::counter_offset()); #else - Address be_counter(Lmethod, 0, in_bytes(methodOopDesc::backedge_counter_offset() - + InvocationCounter::counter_offset())); - Address inv_counter(Lmethod, 0, in_bytes(methodOopDesc::invocation_counter_offset() - + InvocationCounter::counter_offset())); + Address be_counter (Lmethod, methodOopDesc::backedge_counter_offset() + + InvocationCounter::counter_offset()); + Address inv_counter(Lmethod, methodOopDesc::invocation_counter_offset() + + InvocationCounter::counter_offset()); #endif /* CC_INTERP */ int delta = InvocationCounter::count_increment; // Load each counter in a register @@ -2289,7 +2271,7 @@ assert_different_registers(backedge_count, Rtmp, branch_bcp); assert(UseOnStackReplacement,"Must UseOnStackReplacement to test_backedge_count_for_osr"); - Address limit(Rtmp, address(&InvocationCounter::InterpreterBackwardBranchLimit)); + AddressLiteral limit(&InvocationCounter::InterpreterBackwardBranchLimit); load_contents(limit, Rtmp); cmp(backedge_count, Rtmp); br(Assembler::lessUnsigned, false, Assembler::pt, did_not_overflow); @@ -2435,9 +2417,7 @@ if (JvmtiExport::can_post_interpreter_events()) { Label L; Register temp_reg = O5; - - const Address interp_only (G2_thread, 0, in_bytes(JavaThread::interp_only_mode_offset())); - + const Address interp_only(G2_thread, JavaThread::interp_only_mode_offset()); ld(interp_only, temp_reg); tst(temp_reg); br(zero, false, pt, L); @@ -2489,9 +2469,7 @@ if (mode == NotifyJVMTI && JvmtiExport::can_post_interpreter_events()) { Label L; Register temp_reg = O5; - - const Address interp_only (G2_thread, 0, in_bytes(JavaThread::interp_only_mode_offset())); - + const Address interp_only(G2_thread, JavaThread::interp_only_mode_offset()); ld(interp_only, temp_reg); tst(temp_reg); br(zero, false, pt, L); diff -r 5d4dd2f5f6a1 -r 7a485bc4da16 src/cpu/sparc/vm/interpreterGenerator_sparc.hpp --- a/src/cpu/sparc/vm/interpreterGenerator_sparc.hpp Fri Apr 17 15:50:12 2009 +0100 +++ b/src/cpu/sparc/vm/interpreterGenerator_sparc.hpp Thu May 07 10:30:17 2009 -0700 @@ -1,5 +1,5 @@ /* - * Copyright 1997-2007 Sun Microsystems, Inc. All Rights Reserved. + * Copyright 1997-2009 Sun Microsystems, Inc. All Rights Reserved. * DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER. * * This code is free software; you can redistribute it and/or modify it @@ -29,6 +29,7 @@ address generate_normal_entry(bool synchronized); address generate_native_entry(bool synchronized); address generate_abstract_entry(void); + address generate_method_handle_entry(void); address generate_math_entry(AbstractInterpreter::MethodKind kind); address generate_empty_entry(void); address generate_accessor_entry(void); diff -r 5d4dd2f5f6a1 -r 7a485bc4da16 src/cpu/sparc/vm/interpreterRT_sparc.cpp --- a/src/cpu/sparc/vm/interpreterRT_sparc.cpp Fri Apr 17 15:50:12 2009 +0100 +++ b/src/cpu/sparc/vm/interpreterRT_sparc.cpp Thu May 07 10:30:17 2009 -0700 @@ -1,5 +1,5 @@ /* - * Copyright 1998-2006 Sun Microsystems, Inc. All Rights Reserved. + * Copyright 1998-2009 Sun Microsystems, Inc. All Rights Reserved. * DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER. * * This code is free software; you can redistribute it and/or modify it @@ -105,7 +105,7 @@ // the handle for a receiver will never be null bool do_NULL_check = offset() != 0 || is_static(); - Address h_arg = Address(Llocals, 0, Interpreter::local_offset_in_bytes(offset())); + Address h_arg = Address(Llocals, Interpreter::local_offset_in_bytes(offset())); __ ld_ptr(h_arg, Rtmp1); #ifdef ASSERT if (TaggedStackInterpreter) { @@ -120,14 +120,14 @@ } #endif // ASSERT if (!do_NULL_check) { - __ add(h_arg, Rtmp2); + __ add(h_arg.base(), h_arg.disp(), Rtmp2); } else { if (Rtmp1 == Rtmp2) __ tst(Rtmp1); else __ addcc(G0, Rtmp1, Rtmp2); // optimize mov/test pair Label L; __ brx(Assembler::notZero, true, Assembler::pt, L); - __ delayed()->add(h_arg, Rtmp2); + __ delayed()->add(h_arg.base(), h_arg.disp(), Rtmp2); __ bind(L); } __ store_ptr_argument(Rtmp2, jni_arg); // this is often a no-op @@ -140,10 +140,10 @@ iterate(fingerprint); // return result handler - Address result_handler(Lscratch, Interpreter::result_handler(method()->result_type())); - __ sethi(result_handler); + AddressLiteral result_handler(Interpreter::result_handler(method()->result_type())); + __ sethi(result_handler, Lscratch); __ retl(); - __ delayed()->add(result_handler, result_handler.base()); + __ delayed()->add(Lscratch, result_handler.low10(), Lscratch); __ flush(); } diff -r 5d4dd2f5f6a1 -r 7a485bc4da16 src/cpu/sparc/vm/interpreter_sparc.cpp --- a/src/cpu/sparc/vm/interpreter_sparc.cpp Fri Apr 17 15:50:12 2009 +0100 +++ b/src/cpu/sparc/vm/interpreter_sparc.cpp Thu May 07 10:30:17 2009 -0700 @@ -1,5 +1,5 @@ /* - * Copyright 1997-2007 Sun Microsystems, Inc. All Rights Reserved. + * Copyright 1997-2009 Sun Microsystems, Inc. All Rights Reserved. * DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER. * * This code is free software; you can redistribute it and/or modify it @@ -235,6 +235,19 @@ } + +// Method handle invoker +// Dispatch a method of the form java.dyn.MethodHandles::invoke(...) +address InterpreterGenerator::generate_method_handle_entry(void) { + if (!EnableMethodHandles) { + return generate_abstract_entry(); + } + return generate_abstract_entry(); //6815692// +} + + + + //---------------------------------------------------------------------------------------------------- // Entry points & stack frame layout // @@ -364,6 +377,7 @@ case Interpreter::empty : entry_point = ((InterpreterGenerator*)this)->generate_empty_entry(); break; case Interpreter::accessor : entry_point = ((InterpreterGenerator*)this)->generate_accessor_entry(); break; case Interpreter::abstract : entry_point = ((InterpreterGenerator*)this)->generate_abstract_entry(); break; + case Interpreter::method_handle : entry_point = ((InterpreterGenerator*)this)->generate_method_handle_entry(); break; case Interpreter::java_lang_math_sin : break; case Interpreter::java_lang_math_cos : break; case Interpreter::java_lang_math_tan : break; diff -r 5d4dd2f5f6a1 -r 7a485bc4da16 src/cpu/sparc/vm/jniFastGetField_sparc.cpp --- a/src/cpu/sparc/vm/jniFastGetField_sparc.cpp Fri Apr 17 15:50:12 2009 +0100 +++ b/src/cpu/sparc/vm/jniFastGetField_sparc.cpp Thu May 07 10:30:17 2009 -0700 @@ -1,5 +1,5 @@ /* - * Copyright 2004-2006 Sun Microsystems, Inc. All Rights Reserved. + * Copyright 2004-2009 Sun Microsystems, Inc. All Rights Reserved. * DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER. * * This code is free software; you can redistribute it and/or modify it @@ -57,10 +57,10 @@ Label label1, label2; - address cnt_addr = SafepointSynchronize::safepoint_counter_addr(); - Address ca(O3, cnt_addr); - __ sethi (ca); - __ ld (ca, G4); + AddressLiteral cnt_addrlit(SafepointSynchronize::safepoint_counter_addr()); + __ sethi (cnt_addrlit, O3); + Address cnt_addr(O3, cnt_addrlit.low10()); + __ ld (cnt_addr, G4); __ andcc (G4, 1, G0); __ br (Assembler::notZero, false, Assembler::pn, label1); __ delayed()->srl (O2, 2, O4); @@ -77,7 +77,7 @@ default: ShouldNotReachHere(); } - __ ld (ca, O5); + __ ld (cnt_addr, O5); __ cmp (O5, G4); __ br (Assembler::notEqual, false, Assembler::pn, label2); __ delayed()->mov (O7, G1); @@ -136,10 +136,10 @@ Label label1, label2; - address cnt_addr = SafepointSynchronize::safepoint_counter_addr(); - Address ca(G3, cnt_addr); - __ sethi (ca); - __ ld (ca, G4); + AddressLiteral cnt_addrlit(SafepointSynchronize::safepoint_counter_addr()); + __ sethi (cnt_addrlit, G3); + Address cnt_addr(G3, cnt_addrlit.low10()); + __ ld (cnt_addr, G4); __ andcc (G4, 1, G0); __ br (Assembler::notZero, false, Assembler::pn, label1); __ delayed()->srl (O2, 2, O4); @@ -159,7 +159,7 @@ __ ldx (O5, 0, O3); #endif - __ ld (ca, G1); + __ ld (cnt_addr, G1); __ cmp (G1, G4); __ br (Assembler::notEqual, false, Assembler::pn, label2); __ delayed()->mov (O7, G1); @@ -208,10 +208,10 @@ Label label1, label2; - address cnt_addr = SafepointSynchronize::safepoint_counter_addr(); - Address ca(O3, cnt_addr); - __ sethi (ca); - __ ld (ca, G4); + AddressLiteral cnt_addrlit(SafepointSynchronize::safepoint_counter_addr()); + __ sethi (cnt_addrlit, O3); + Address cnt_addr(O3, cnt_addrlit.low10()); + __ ld (cnt_addr, G4); __ andcc (G4, 1, G0); __ br (Assembler::notZero, false, Assembler::pn, label1); __ delayed()->srl (O2, 2, O4); @@ -225,7 +225,7 @@ default: ShouldNotReachHere(); } - __ ld (ca, O5); + __ ld (cnt_addr, O5); __ cmp (O5, G4); __ br (Assembler::notEqual, false, Assembler::pn, label2); __ delayed()->mov (O7, G1); diff -r 5d4dd2f5f6a1 -r 7a485bc4da16 src/cpu/sparc/vm/methodHandles_sparc.cpp --- /dev/null Thu Jan 01 00:00:00 1970 +0000 +++ b/src/cpu/sparc/vm/methodHandles_sparc.cpp Thu May 07 10:30:17 2009 -0700 @@ -0,0 +1,70 @@ +/* + * Copyright 1997-2009 Sun Microsystems, Inc. All Rights Reserved. + * DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER. + * + * This code is free software; you can redistribute it and/or modify it + * under the terms of the GNU General Public License version 2 only, as + * published by the Free Software Foundation. + * + * This code is distributed in the hope that it will be useful, but WITHOUT + * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or + * FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License + * version 2 for more details (a copy is included in the LICENSE file that + * accompanied this code). + * + * You should have received a copy of the GNU General Public License version + * 2 along with this work; if not, write to the Free Software Foundation, + * Inc., 51 Franklin St, Fifth Floor, Boston, MA 02110-1301 USA. + * + * Please contact Sun Microsystems, Inc., 4150 Network Circle, Santa Clara, + * CA 95054 USA or visit www.sun.com if you need additional information or + * have any questions. + * + */ + +#include "incls/_precompiled.incl" +#include "incls/_methodHandles_sparc.cpp.incl" + +#define __ _masm-> + +address MethodHandleEntry::start_compiled_entry(MacroAssembler* _masm, + address interpreted_entry) { + __ align(wordSize); + address target = __ pc() + sizeof(Data); + while (__ pc() < target) { + __ nop(); + __ align(wordSize); + } + + MethodHandleEntry* me = (MethodHandleEntry*) __ pc(); + me->set_end_address(__ pc()); // set a temporary end_address + me->set_from_interpreted_entry(interpreted_entry); + me->set_type_checking_entry(NULL); + + return (address) me; +} + +MethodHandleEntry* MethodHandleEntry::finish_compiled_entry(MacroAssembler* _masm, + address start_addr) { + MethodHandleEntry* me = (MethodHandleEntry*) start_addr; + assert(me->end_address() == start_addr, "valid ME"); + + // Fill in the real end_address: + __ align(wordSize); + me->set_end_address(__ pc()); + + return me; +} + + +// Code generation +address MethodHandles::generate_method_handle_interpreter_entry(MacroAssembler* _masm) { + ShouldNotReachHere(); //NYI, 6815692 + return NULL; +} + +// Generate an "entry" field for a method handle. +// This determines how the method handle will respond to calls. +void MethodHandles::generate_method_handle_stub(MacroAssembler* _masm, MethodHandles::EntryKind ek) { + ShouldNotReachHere(); //NYI, 6815692 +} diff -r 5d4dd2f5f6a1 -r 7a485bc4da16 src/cpu/sparc/vm/nativeInst_sparc.cpp --- a/src/cpu/sparc/vm/nativeInst_sparc.cpp Fri Apr 17 15:50:12 2009 +0100 +++ b/src/cpu/sparc/vm/nativeInst_sparc.cpp Thu May 07 10:30:17 2009 -0700 @@ -1,5 +1,5 @@ /* - * Copyright 1997-2008 Sun Microsystems, Inc. All Rights Reserved. + * Copyright 1997-2009 Sun Microsystems, Inc. All Rights Reserved. * DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER. * * This code is free software; you can redistribute it and/or modify it @@ -38,8 +38,7 @@ destreg = inv_rd(*(unsigned int *)instaddr); // Generate a the new sequence - Address dest( destreg, (address)x ); - _masm->sethi( dest, true ); + _masm->patchable_sethi(x, destreg); ICache::invalidate_range(instaddr, 7 * BytesPerInstWord); } @@ -227,8 +226,8 @@ CodeBuffer buf(addr_at(0), instruction_size + 1); MacroAssembler* _masm = new MacroAssembler(&buf); // Generate the new sequence - Address(O7, dest); - _masm->jumpl_to(dest, O7); + AddressLiteral(dest); + _masm->jumpl_to(dest, O7, O7); ICache::invalidate_range(addr_at(0), instruction_size ); #endif } @@ -361,10 +360,12 @@ VM_Version::allow_all(); - a->sethi(0xaaaabbbb, I3, true, RelocationHolder::none); - a->add(I3, low10(0xaaaabbbb), I3); - a->sethi(0xccccdddd, O2, true, RelocationHolder::none); - a->add(O2, low10(0xccccdddd), O2); + AddressLiteral al1(0xaaaabbbb, relocInfo::external_word_type); + a->sethi(al1, I3); + a->add(I3, al1.low10(), I3); + AddressLiteral al2(0xccccdddd, relocInfo::external_word_type); + a->sethi(al2, O2); + a->add(O2, al2.low10(), O2); nm = nativeMovConstReg_at( cb.code_begin() ); nm->print(); @@ -468,12 +469,14 @@ VM_Version::allow_all(); - a->sethi(0xaaaabbbb, I3, true, RelocationHolder::none); + AddressLiteral al1(0xaaaabbbb, relocInfo::external_word_type); + a->sethi(al1, I3); a->nop(); - a->add(I3, low10(0xaaaabbbb), I3); - a->sethi(0xccccdddd, O2, true, RelocationHolder::none); + a->add(I3, al1.low10(), I3); + AddressLiteral al2(0xccccdddd, relocInfo::external_word_type); + a->sethi(al2, O2); a->nop(); - a->add(O2, low10(0xccccdddd), O2); + a->add(O2, al2.low10(), O2); nm = nativeMovConstRegPatching_at( cb.code_begin() ); nm->print(); @@ -562,51 +565,53 @@ VM_Version::allow_all(); - a->ldsw( G5, low10(0xffffffff), G4 ); idx++; - a->sethi(0xaaaabbbb, I3, true, RelocationHolder::none); a->add(I3, low10(0xaaaabbbb), I3); + AddressLiteral al1(0xffffffff, relocInfo::external_word_type); + AddressLiteral al2(0xaaaabbbb, relocInfo::external_word_type); + a->ldsw( G5, al1.low10(), G4 ); idx++; + a->sethi(al2, I3); a->add(I3, al2.low10(), I3); a->ldsw( G5, I3, G4 ); idx++; - a->ldsb( G5, low10(0xffffffff), G4 ); idx++; - a->sethi(0xaaaabbbb, I3, true, RelocationHolder::none); a->add(I3, low10(0xaaaabbbb), I3); + a->ldsb( G5, al1.low10(), G4 ); idx++; + a->sethi(al2, I3); a->add(I3, al2.low10(), I3); a->ldsb( G5, I3, G4 ); idx++; - a->ldsh( G5, low10(0xffffffff), G4 ); idx++; - a->sethi(0xaaaabbbb, I3, true, RelocationHolder::none); a->add(I3, low10(0xaaaabbbb), I3); + a->ldsh( G5, al1.low10(), G4 ); idx++; + a->sethi(al2, I3); a->add(I3, al2.low10(), I3); a->ldsh( G5, I3, G4 ); idx++; - a->lduw( G5, low10(0xffffffff), G4 ); idx++; - a->sethi(0xaaaabbbb, I3, true, RelocationHolder::none); a->add(I3, low10(0xaaaabbbb), I3); + a->lduw( G5, al1.low10(), G4 ); idx++; + a->sethi(al2, I3); a->add(I3, al2.low10(), I3); a->lduw( G5, I3, G4 ); idx++; - a->ldub( G5, low10(0xffffffff), G4 ); idx++; - a->sethi(0xaaaabbbb, I3, true, RelocationHolder::none); a->add(I3, low10(0xaaaabbbb), I3); + a->ldub( G5, al1.low10(), G4 ); idx++; + a->sethi(al2, I3); a->add(I3, al2.low10(), I3); a->ldub( G5, I3, G4 ); idx++; - a->lduh( G5, low10(0xffffffff), G4 ); idx++; - a->sethi(0xaaaabbbb, I3, true, RelocationHolder::none); a->add(I3, low10(0xaaaabbbb), I3); + a->lduh( G5, al1.low10(), G4 ); idx++; + a->sethi(al2, I3); a->add(I3, al2.low10(), I3); a->lduh( G5, I3, G4 ); idx++; - a->ldx( G5, low10(0xffffffff), G4 ); idx++; - a->sethi(0xaaaabbbb, I3, true, RelocationHolder::none); a->add(I3, low10(0xaaaabbbb), I3); + a->ldx( G5, al1.low10(), G4 ); idx++; + a->sethi(al2, I3); a->add(I3, al2.low10(), I3); a->ldx( G5, I3, G4 ); idx++; - a->ldd( G5, low10(0xffffffff), G4 ); idx++; - a->sethi(0xaaaabbbb, I3, true, RelocationHolder::none); a->add(I3, low10(0xaaaabbbb), I3); + a->ldd( G5, al1.low10(), G4 ); idx++; + a->sethi(al2, I3); a->add(I3, al2.low10(), I3); a->ldd( G5, I3, G4 ); idx++; a->ldf( FloatRegisterImpl::D, O2, -1, F14 ); idx++; - a->sethi(0xaaaabbbb, I3, true, RelocationHolder::none); a->add(I3, low10(0xaaaabbbb), I3); + a->sethi(al2, I3); a->add(I3, al2.low10(), I3); a->ldf( FloatRegisterImpl::S, O0, I3, F15 ); idx++; - a->stw( G5, G4, low10(0xffffffff) ); idx++; - a->sethi(0xaaaabbbb, I3, true, RelocationHolder::none); a->add(I3, low10(0xaaaabbbb), I3); + a->stw( G5, G4, al1.low10() ); idx++; + a->sethi(al2, I3); a->add(I3, al2.low10(), I3); a->stw( G5, G4, I3 ); idx++; - a->stb( G5, G4, low10(0xffffffff) ); idx++; - a->sethi(0xaaaabbbb, I3, true, RelocationHolder::none); a->add(I3, low10(0xaaaabbbb), I3); + a->stb( G5, G4, al1.low10() ); idx++; + a->sethi(al2, I3); a->add(I3, al2.low10(), I3); a->stb( G5, G4, I3 ); idx++; - a->sth( G5, G4, low10(0xffffffff) ); idx++; - a->sethi(0xaaaabbbb, I3, true, RelocationHolder::none); a->add(I3, low10(0xaaaabbbb), I3); + a->sth( G5, G4, al1.low10() ); idx++; + a->sethi(al2, I3); a->add(I3, al2.low10(), I3); a->sth( G5, G4, I3 ); idx++; - a->stx( G5, G4, low10(0xffffffff) ); idx++; - a->sethi(0xaaaabbbb, I3, true, RelocationHolder::none); a->add(I3, low10(0xaaaabbbb), I3); + a->stx( G5, G4, al1.low10() ); idx++; + a->sethi(al2, I3); a->add(I3, al2.low10(), I3); a->stx( G5, G4, I3 ); idx++; - a->std( G5, G4, low10(0xffffffff) ); idx++; - a->sethi(0xaaaabbbb, I3, true, RelocationHolder::none); a->add(I3, low10(0xaaaabbbb), I3); + a->std( G5, G4, al1.low10() ); idx++; + a->sethi(al2, I3); a->add(I3, al2.low10(), I3); a->std( G5, G4, I3 ); idx++; a->stf( FloatRegisterImpl::S, F18, O2, -1 ); idx++; - a->sethi(0xaaaabbbb, I3, true, RelocationHolder::none); a->add(I3, low10(0xaaaabbbb), I3); + a->sethi(al2, I3); a->add(I3, al2.low10(), I3); a->stf( FloatRegisterImpl::S, F15, O0, I3 ); idx++; nm = nativeMovRegMem_at( cb.code_begin() ); @@ -705,51 +710,52 @@ VM_Version::allow_all(); - a->ldsw( G5, low10(0xffffffff), G4 ); idx++; - a->sethi(0xaaaabbbb, I3, true, RelocationHolder::none); a->nop(); a->add(I3, low10(0xaaaabbbb), I3); + AddressLiteral al(0xffffffff, relocInfo::external_word_type); + a->ldsw( G5, al.low10(), G4); idx++; + a->sethi(al, I3); a->nop(); a->add(I3, al.low10(), I3); a->ldsw( G5, I3, G4 ); idx++; - a->ldsb( G5, low10(0xffffffff), G4 ); idx++; - a->sethi(0xaaaabbbb, I3, true, RelocationHolder::none); a->nop(); a->add(I3, low10(0xaaaabbbb), I3); + a->ldsb( G5, al.low10(), G4); idx++; + a->sethi(al, I3); a->nop(); a->add(I3, al.low10(), I3); a->ldsb( G5, I3, G4 ); idx++; - a->ldsh( G5, low10(0xffffffff), G4 ); idx++; - a->sethi(0xaaaabbbb, I3, true, RelocationHolder::none); a->nop(); a->add(I3, low10(0xaaaabbbb), I3); + a->ldsh( G5, al.low10(), G4); idx++; + a->sethi(al, I3); a->nop(); a->add(I3, al.low10(), I3); a->ldsh( G5, I3, G4 ); idx++; - a->lduw( G5, low10(0xffffffff), G4 ); idx++; - a->sethi(0xaaaabbbb, I3, true, RelocationHolder::none); a->nop(); a->add(I3, low10(0xaaaabbbb), I3); + a->lduw( G5, al.low10(), G4); idx++; + a->sethi(al, I3); a->nop(); a->add(I3, al.low10(), I3); a->lduw( G5, I3, G4 ); idx++; - a->ldub( G5, low10(0xffffffff), G4 ); idx++; - a->sethi(0xaaaabbbb, I3, true, RelocationHolder::none); a->nop(); a->add(I3, low10(0xaaaabbbb), I3); + a->ldub( G5, al.low10(), G4); idx++; + a->sethi(al, I3); a->nop(); a->add(I3, al.low10(), I3); a->ldub( G5, I3, G4 ); idx++; - a->lduh( G5, low10(0xffffffff), G4 ); idx++; - a->sethi(0xaaaabbbb, I3, true, RelocationHolder::none); a->nop(); a->add(I3, low10(0xaaaabbbb), I3); + a->lduh( G5, al.low10(), G4); idx++; + a->sethi(al, I3); a->nop(); a->add(I3, al.low10(), I3); a->lduh( G5, I3, G4 ); idx++; - a->ldx( G5, low10(0xffffffff), G4 ); idx++; - a->sethi(0xaaaabbbb, I3, true, RelocationHolder::none); a->nop(); a->add(I3, low10(0xaaaabbbb), I3); - a->ldx( G5, I3, G4 ); idx++; - a->ldd( G5, low10(0xffffffff), G4 ); idx++; - a->sethi(0xaaaabbbb, I3, true, RelocationHolder::none); a->nop(); a->add(I3, low10(0xaaaabbbb), I3); - a->ldd( G5, I3, G4 ); idx++; - a->ldf( FloatRegisterImpl::D, O2, -1, F14 ); idx++; - a->sethi(0xaaaabbbb, I3, true, RelocationHolder::none); a->nop(); a->add(I3, low10(0xaaaabbbb), I3); - a->ldf( FloatRegisterImpl::S, O0, I3, F15 ); idx++; + a->ldx( G5, al.low10(), G4); idx++; + a->sethi(al, I3); a->nop(); a->add(I3, al.low10(), I3); + a->ldx( G5, I3, G4 ); idx++; + a->ldd( G5, al.low10(), G4); idx++; + a->sethi(al, I3); a->nop(); a->add(I3, al.low10(), I3); + a->ldd( G5, I3, G4 ); idx++; + a->ldf( FloatRegisterImpl::D, O2, -1, F14 ); idx++; + a->sethi(al, I3); a->nop(); a->add(I3, al.low10(), I3); + a->ldf( FloatRegisterImpl::S, O0, I3, F15 ); idx++; - a->stw( G5, G4, low10(0xffffffff) ); idx++; - a->sethi(0xaaaabbbb, I3, true, RelocationHolder::none); a->nop(); a->add(I3, low10(0xaaaabbbb), I3); + a->stw( G5, G4, al.low10()); idx++; + a->sethi(al, I3); a->nop(); a->add(I3, al.low10(), I3); a->stw( G5, G4, I3 ); idx++; - a->stb( G5, G4, low10(0xffffffff) ); idx++; - a->sethi(0xaaaabbbb, I3, true, RelocationHolder::none); a->nop(); a->add(I3, low10(0xaaaabbbb), I3); + a->stb( G5, G4, al.low10()); idx++; + a->sethi(al, I3); a->nop(); a->add(I3, al.low10(), I3); a->stb( G5, G4, I3 ); idx++; - a->sth( G5, G4, low10(0xffffffff) ); idx++; - a->sethi(0xaaaabbbb, I3, true, RelocationHolder::none); a->nop(); a->add(I3, low10(0xaaaabbbb), I3); + a->sth( G5, G4, al.low10()); idx++; + a->sethi(al, I3); a->nop(); a->add(I3, al.low10(), I3); a->sth( G5, G4, I3 ); idx++; - a->stx( G5, G4, low10(0xffffffff) ); idx++; - a->sethi(0xaaaabbbb, I3, true, RelocationHolder::none); a->nop(); a->add(I3, low10(0xaaaabbbb), I3); + a->stx( G5, G4, al.low10()); idx++; + a->sethi(al, I3); a->nop(); a->add(I3, al.low10(), I3); a->stx( G5, G4, I3 ); idx++; - a->std( G5, G4, low10(0xffffffff) ); idx++; - a->sethi(0xaaaabbbb, I3, true, RelocationHolder::none); a->nop(); a->add(I3, low10(0xaaaabbbb), I3); + a->std( G5, G4, al.low10()); idx++; + a->sethi(al, I3); a->nop(); a->add(I3, al.low10(), I3); a->std( G5, G4, I3 ); idx++; a->stf( FloatRegisterImpl::S, F18, O2, -1 ); idx++; - a->sethi(0xaaaabbbb, I3, true, RelocationHolder::none); a->nop(); a->add(I3, low10(0xaaaabbbb), I3); + a->sethi(al, I3); a->nop(); a->add(I3, al.low10(), I3); a->stf( FloatRegisterImpl::S, F15, O0, I3 ); idx++; nm = nativeMovRegMemPatching_at( cb.code_begin() ); @@ -833,11 +839,12 @@ VM_Version::allow_all(); - a->sethi(0x7fffbbbb, I3, true, RelocationHolder::none); - a->jmpl(I3, low10(0x7fffbbbb), G0, RelocationHolder::none); + AddressLiteral al(0x7fffbbbb, relocInfo::external_word_type); + a->sethi(al, I3); + a->jmpl(I3, al.low10(), G0, RelocationHolder::none); a->delayed()->nop(); - a->sethi(0x7fffbbbb, I3, true, RelocationHolder::none); - a->jmpl(I3, low10(0x7fffbbbb), L3, RelocationHolder::none); + a->sethi(al, I3); + a->jmpl(I3, al.low10(), L3, RelocationHolder::none); a->delayed()->nop(); nj = nativeJump_at( cb.code_begin() ); diff -r 5d4dd2f5f6a1 -r 7a485bc4da16 src/cpu/sparc/vm/register_definitions_sparc.cpp --- a/src/cpu/sparc/vm/register_definitions_sparc.cpp Fri Apr 17 15:50:12 2009 +0100 +++ b/src/cpu/sparc/vm/register_definitions_sparc.cpp Thu May 07 10:30:17 2009 -0700 @@ -1,5 +1,5 @@ /* - * Copyright 2002-2008 Sun Microsystems, Inc. All Rights Reserved. + * Copyright 2002-2009 Sun Microsystems, Inc. All Rights Reserved. * DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER. * * This code is free software; you can redistribute it and/or modify it @@ -142,6 +142,8 @@ REGISTER_DEFINITION(Register, G3_scratch); REGISTER_DEFINITION(Register, G4_scratch); REGISTER_DEFINITION(Register, Gtemp); +REGISTER_DEFINITION(Register, G5_method_type); +REGISTER_DEFINITION(Register, G3_method_handle); REGISTER_DEFINITION(Register, Lentry_args); #ifdef CC_INTERP diff -r 5d4dd2f5f6a1 -r 7a485bc4da16 src/cpu/sparc/vm/relocInfo_sparc.cpp --- a/src/cpu/sparc/vm/relocInfo_sparc.cpp Fri Apr 17 15:50:12 2009 +0100 +++ b/src/cpu/sparc/vm/relocInfo_sparc.cpp Thu May 07 10:30:17 2009 -0700 @@ -1,5 +1,5 @@ /* - * Copyright 1998-2008 Sun Microsystems, Inc. All Rights Reserved. + * Copyright 1998-2009 Sun Microsystems, Inc. All Rights Reserved. * DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER. * * This code is free software; you can redistribute it and/or modify it @@ -99,13 +99,6 @@ break; } ip->set_data64_sethi( ip->addr_at(0), (intptr_t)x ); -#ifdef COMPILER2 - // [RGV] Someone must have missed putting in a reloc entry for the - // add in compiler2. - inst2 = ip->long_at( NativeMovConstReg::add_offset ); - guarantee(Assembler::inv_op(inst2)==Assembler::arith_op, "arith op"); - ip->set_long_at(NativeMovConstReg::add_offset,ip->set_data32_simm13( inst2, (intptr_t)x+o)); -#endif #else guarantee(Assembler::inv_op2(inst)==Assembler::sethi_op2, "must be sethi"); inst &= ~Assembler::hi22( -1); diff -r 5d4dd2f5f6a1 -r 7a485bc4da16 src/cpu/sparc/vm/runtime_sparc.cpp --- a/src/cpu/sparc/vm/runtime_sparc.cpp Fri Apr 17 15:50:12 2009 +0100 +++ b/src/cpu/sparc/vm/runtime_sparc.cpp Thu May 07 10:30:17 2009 -0700 @@ -1,5 +1,5 @@ /* - * Copyright 1998-2006 Sun Microsystems, Inc. All Rights Reserved. + * Copyright 1998-2009 Sun Microsystems, Inc. All Rights Reserved. * DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER. * * This code is free software; you can redistribute it and/or modify it @@ -74,8 +74,8 @@ int start = __ offset(); __ verify_thread(); - __ st_ptr(Oexception, Address(G2_thread, 0, in_bytes(JavaThread::exception_oop_offset()))); - __ st_ptr(Oissuing_pc, Address(G2_thread, 0, in_bytes(JavaThread::exception_pc_offset()))); + __ st_ptr(Oexception, G2_thread, JavaThread::exception_oop_offset()); + __ st_ptr(Oissuing_pc, G2_thread, JavaThread::exception_pc_offset()); // This call does all the hard work. It checks if an exception catch // exists in the method. @@ -120,19 +120,19 @@ // Since this may be the deopt blob we must set O7 to look like we returned // from the original pc that threw the exception - __ ld_ptr(Address(G2_thread, 0, in_bytes(JavaThread::exception_pc_offset())), O7); + __ ld_ptr(G2_thread, JavaThread::exception_pc_offset(), O7); __ sub(O7, frame::pc_return_offset, O7); assert(Assembler::is_simm13(in_bytes(JavaThread::exception_oop_offset())), "exception offset overflows simm13, following ld instruction cannot be in delay slot"); - __ ld_ptr(Address(G2_thread, 0, in_bytes(JavaThread::exception_oop_offset())), Oexception); // O0 + __ ld_ptr(G2_thread, JavaThread::exception_oop_offset(), Oexception); // O0 #ifdef ASSERT - __ st_ptr(G0, Address(G2_thread, 0, in_bytes(JavaThread::exception_handler_pc_offset()))); - __ st_ptr(G0, Address(G2_thread, 0, in_bytes(JavaThread::exception_pc_offset()))); + __ st_ptr(G0, G2_thread, JavaThread::exception_handler_pc_offset()); + __ st_ptr(G0, G2_thread, JavaThread::exception_pc_offset()); #endif __ JMP(G3_scratch, 0); // Clear the exception oop so GC no longer processes it as a root. - __ delayed()->st_ptr(G0, Address(G2_thread, 0, in_bytes(JavaThread::exception_oop_offset()))); + __ delayed()->st_ptr(G0, G2_thread, JavaThread::exception_oop_offset()); // ------------- // make sure all code is generated diff -r 5d4dd2f5f6a1 -r 7a485bc4da16 src/cpu/sparc/vm/sharedRuntime_sparc.cpp --- a/src/cpu/sparc/vm/sharedRuntime_sparc.cpp Fri Apr 17 15:50:12 2009 +0100 +++ b/src/cpu/sparc/vm/sharedRuntime_sparc.cpp Thu May 07 10:30:17 2009 -0700 @@ -1,5 +1,5 @@ /* - * Copyright 2003-2008 Sun Microsystems, Inc. All Rights Reserved. + * Copyright 2003-2009 Sun Microsystems, Inc. All Rights Reserved. * DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER. * * This code is free software; you can redistribute it and/or modify it @@ -625,9 +625,9 @@ __ mov(I7, O1); // VM needs caller's callsite // Must be a leaf call... // can be very far once the blob has been relocated - Address dest(O7, CAST_FROM_FN_PTR(address, SharedRuntime::fixup_callers_callsite)); + AddressLiteral dest(CAST_FROM_FN_PTR(address, SharedRuntime::fixup_callers_callsite)); __ relocate(relocInfo::runtime_call_type); - __ jumpl_to(dest, O7); + __ jumpl_to(dest, O7, O7); __ delayed()->mov(G2_thread, L7_thread_cache); __ mov(L7_thread_cache, G2_thread); __ mov(L1, G1); @@ -937,12 +937,12 @@ // Inputs: // G2_thread - TLS // G5_method - Method oop - // O0 - Flag telling us to restore SP from O5 - // O4_args - Pointer to interpreter's args - // O5 - Caller's saved SP, to be restored if needed + // G4 (Gargs) - Pointer to interpreter's args + // O0..O4 - free for scratch + // O5_savedSP - Caller's saved SP, to be restored if needed // O6 - Current SP! // O7 - Valid return address - // L0-L7, I0-I7 - Caller's temps (no frame pushed yet) + // L0-L7, I0-I7 - Caller's temps (no frame pushed yet) // Outputs: // G2_thread - TLS @@ -954,7 +954,7 @@ // F0-F7 - more outgoing args - // O4 is about to get loaded up with compiled callee's args + // Gargs is the incoming argument base, and also an outgoing argument. __ sub(Gargs, BytesPerWord, Gargs); #ifdef ASSERT @@ -1152,7 +1152,7 @@ #ifndef _LP64 if (g3_crushed) { // Rats load was wasted, at least it is in cache... - __ ld_ptr(G5_method, in_bytes(methodOopDesc::from_compiled_offset()), G3); + __ ld_ptr(G5_method, methodOopDesc::from_compiled_offset(), G3); } #endif /* _LP64 */ @@ -1165,7 +1165,7 @@ // we try and find the callee by normal means a safepoint // is possible. So we stash the desired callee in the thread // and the vm will find there should this case occur. - Address callee_target_addr(G2_thread, 0, in_bytes(JavaThread::callee_target_offset())); + Address callee_target_addr(G2_thread, JavaThread::callee_target_offset()); __ st_ptr(G5_method, callee_target_addr); if (StressNonEntrant) { @@ -1218,7 +1218,7 @@ Register R_temp = G1; // another scratch register #endif - Address ic_miss(G3_scratch, SharedRuntime::get_ic_miss_stub()); + AddressLiteral ic_miss(SharedRuntime::get_ic_miss_stub()); __ verify_oop(O0); __ verify_oop(G5_method); @@ -1240,7 +1240,7 @@ Label ok, ok2; __ brx(Assembler::equal, false, Assembler::pt, ok); __ delayed()->ld_ptr(G5_method, compiledICHolderOopDesc::holder_method_offset(), G5_method); - __ jump_to(ic_miss); + __ jump_to(ic_miss, G3_scratch); __ delayed()->nop(); __ bind(ok); @@ -1251,7 +1251,7 @@ __ bind(ok2); __ br_null(G3_scratch, false, __ pt, skip_fixup); __ delayed()->ld_ptr(G5_method, in_bytes(methodOopDesc::interpreter_entry_offset()), G3_scratch); - __ jump_to(ic_miss); + __ jump_to(ic_miss, G3_scratch); __ delayed()->nop(); } @@ -1444,8 +1444,8 @@ // without calling into the VM: it's the empty function. Just pop this // frame and then jump to forward_exception_entry; O7 will contain the // native caller's return PC. - Address exception_entry(G3_scratch, StubRoutines::forward_exception_entry()); - __ jump_to(exception_entry); + AddressLiteral exception_entry(StubRoutines::forward_exception_entry()); + __ jump_to(exception_entry, G3_scratch); __ delayed()->restore(); // Pop this frame off. __ bind(L); } @@ -1822,14 +1822,14 @@ { Label L; const Register temp_reg = G3_scratch; - Address ic_miss(temp_reg, SharedRuntime::get_ic_miss_stub()); + AddressLiteral ic_miss(SharedRuntime::get_ic_miss_stub()); __ verify_oop(O0); __ load_klass(O0, temp_reg); __ cmp(temp_reg, G5_inline_cache_reg); __ brx(Assembler::equal, true, Assembler::pt, L); __ delayed()->nop(); - __ jump_to(ic_miss, 0); + __ jump_to(ic_miss, temp_reg); __ delayed()->nop(); __ align(CodeEntryAlignment); __ bind(L); @@ -2261,21 +2261,19 @@ // Transition from _thread_in_Java to _thread_in_native. __ set(_thread_in_native, G3_scratch); - __ st(G3_scratch, G2_thread, in_bytes(JavaThread::thread_state_offset())); + __ st(G3_scratch, G2_thread, JavaThread::thread_state_offset()); // We flushed the windows ages ago now mark them as flushed // mark windows as flushed __ set(JavaFrameAnchor::flushed, G3_scratch); - Address flags(G2_thread, - 0, - in_bytes(JavaThread::frame_anchor_offset()) + in_bytes(JavaFrameAnchor::flags_offset())); + Address flags(G2_thread, JavaThread::frame_anchor_offset() + JavaFrameAnchor::flags_offset()); #ifdef _LP64 - Address dest(O7, method->native_function()); + AddressLiteral dest(method->native_function()); __ relocate(relocInfo::runtime_call_type); - __ jumpl_to(dest, O7); + __ jumpl_to(dest, O7, O7); #else __ call(method->native_function(), relocInfo::runtime_call_type); #endif @@ -2316,7 +2314,7 @@ // Block, if necessary, before resuming in _thread_in_Java state. // In order for GC to work, don't clear the last_Java_sp until after blocking. { Label no_block; - Address sync_state(G3_scratch, SafepointSynchronize::address_of_state()); + AddressLiteral sync_state(SafepointSynchronize::address_of_state()); // Switch thread to "native transition" state before reading the synchronization state. // This additional state is necessary because reading and testing the synchronization @@ -2326,7 +2324,7 @@ // Thread A is resumed to finish this native method, but doesn't block here since it // didn't see any synchronization is progress, and escapes. __ set(_thread_in_native_trans, G3_scratch); - __ st(G3_scratch, G2_thread, in_bytes(JavaThread::thread_state_offset())); + __ st(G3_scratch, G2_thread, JavaThread::thread_state_offset()); if(os::is_MP()) { if (UseMembar) { // Force this write out before the read below @@ -2343,10 +2341,9 @@ __ cmp(G3_scratch, SafepointSynchronize::_not_synchronized); Label L; - Address suspend_state(G2_thread, 0, in_bytes(JavaThread::suspend_flags_offset())); + Address suspend_state(G2_thread, JavaThread::suspend_flags_offset()); __ br(Assembler::notEqual, false, Assembler::pn, L); - __ delayed()-> - ld(suspend_state, G3_scratch); + __ delayed()->ld(suspend_state, G3_scratch); __ cmp(G3_scratch, 0); __ br(Assembler::equal, false, Assembler::pt, no_block); __ delayed()->nop(); @@ -2372,11 +2369,11 @@ __ set(_thread_in_Java, G3_scratch); - __ st(G3_scratch, G2_thread, in_bytes(JavaThread::thread_state_offset())); + __ st(G3_scratch, G2_thread, JavaThread::thread_state_offset()); Label no_reguard; - __ ld(G2_thread, in_bytes(JavaThread::stack_guard_state_offset()), G3_scratch); + __ ld(G2_thread, JavaThread::stack_guard_state_offset(), G3_scratch); __ cmp(G3_scratch, JavaThread::stack_guard_yellow_disabled); __ br(Assembler::notEqual, false, Assembler::pt, no_reguard); __ delayed()->nop(); @@ -2684,14 +2681,14 @@ { Label L; const Register temp_reg = G3_scratch; - Address ic_miss(temp_reg, SharedRuntime::get_ic_miss_stub()); + AddressLiteral ic_miss(SharedRuntime::get_ic_miss_stub()); __ verify_oop(O0); __ ld_ptr(O0, oopDesc::klass_offset_in_bytes(), temp_reg); __ cmp(temp_reg, G5_inline_cache_reg); __ brx(Assembler::equal, true, Assembler::pt, L); __ delayed()->nop(); - __ jump_to(ic_miss, 0); + __ jump_to(ic_miss, temp_reg); __ delayed()->nop(); __ align(CodeEntryAlignment); __ bind(L); @@ -3155,15 +3152,13 @@ // Do this after the caller's return address is on top of stack if (UseStackBanging) { // Get total frame size for interpreted frames - __ ld(Address(O2UnrollBlock, 0, - Deoptimization::UnrollBlock::total_frame_sizes_offset_in_bytes()), O4); + __ ld(O2UnrollBlock, Deoptimization::UnrollBlock::total_frame_sizes_offset_in_bytes(), O4); __ bang_stack_size(O4, O3, G3_scratch); } - __ ld(Address(O2UnrollBlock, 0, Deoptimization::UnrollBlock::number_of_frames_offset_in_bytes()), O4array_size); - __ ld_ptr(Address(O2UnrollBlock, 0, Deoptimization::UnrollBlock::frame_pcs_offset_in_bytes()), G3pcs); - - __ ld_ptr(Address(O2UnrollBlock, 0, Deoptimization::UnrollBlock::frame_sizes_offset_in_bytes()), O3array); + __ ld(O2UnrollBlock, Deoptimization::UnrollBlock::number_of_frames_offset_in_bytes(), O4array_size); + __ ld_ptr(O2UnrollBlock, Deoptimization::UnrollBlock::frame_pcs_offset_in_bytes(), G3pcs); + __ ld_ptr(O2UnrollBlock, Deoptimization::UnrollBlock::frame_sizes_offset_in_bytes(), O3array); // Adjust old interpreter frame to make space for new frame's extra java locals // @@ -3176,7 +3171,7 @@ // for each frame we create and keep up the illusion every where. // - __ ld(Address(O2UnrollBlock, 0, Deoptimization::UnrollBlock::caller_adjustment_offset_in_bytes()), O7); + __ ld(O2UnrollBlock, Deoptimization::UnrollBlock::caller_adjustment_offset_in_bytes(), O7); __ mov(SP, O5_savedSP); // remember initial sender's original sp before adjustment __ sub(SP, O7, SP); @@ -3225,9 +3220,9 @@ Register I5exception_tmp = I5; Register G4exception_tmp = G4_scratch; int frame_size_words; - Address saved_Freturn0_addr(FP, 0, -sizeof(double) + STACK_BIAS); + Address saved_Freturn0_addr(FP, -sizeof(double) + STACK_BIAS); #if !defined(_LP64) && defined(COMPILER2) - Address saved_Greturn1_addr(FP, 0, -sizeof(double) -sizeof(jlong) + STACK_BIAS); + Address saved_Greturn1_addr(FP, -sizeof(double) -sizeof(jlong) + STACK_BIAS); #endif Label cont; @@ -3289,7 +3284,7 @@ // save exception oop in JavaThread and fall through into the // exception_in_tls case since they are handled in same way except // for where the pending exception is kept. - __ st_ptr(Oexception, G2_thread, in_bytes(JavaThread::exception_oop_offset())); + __ st_ptr(Oexception, G2_thread, JavaThread::exception_oop_offset()); // // Vanilla deoptimization with an exception pending in exception_oop @@ -3306,7 +3301,7 @@ { // verify that there is really an exception oop in exception_oop Label has_exception; - __ ld_ptr(G2_thread, in_bytes(JavaThread::exception_oop_offset()), Oexception); + __ ld_ptr(G2_thread, JavaThread::exception_oop_offset(), Oexception); __ br_notnull(Oexception, false, Assembler::pt, has_exception); __ delayed()-> nop(); __ stop("no exception in thread"); @@ -3314,7 +3309,7 @@ // verify that there is no pending exception Label no_pending_exception; - Address exception_addr(G2_thread, 0, in_bytes(Thread::pending_exception_offset())); + Address exception_addr(G2_thread, Thread::pending_exception_offset()); __ ld_ptr(exception_addr, Oexception); __ br_null(Oexception, false, Assembler::pt, no_pending_exception); __ delayed()->nop(); diff -r 5d4dd2f5f6a1 -r 7a485bc4da16 src/cpu/sparc/vm/sparc.ad --- a/src/cpu/sparc/vm/sparc.ad Fri Apr 17 15:50:12 2009 +0100 +++ b/src/cpu/sparc/vm/sparc.ad Thu May 07 10:30:17 2009 -0700 @@ -980,8 +980,8 @@ // This code sequence is relocatable to any address, even on LP64. if ( force_far_call ) { __ relocate(rtype); - Address dest(O7, (address)entry_point); - __ jumpl_to(dest, O7); + AddressLiteral dest(entry_point); + __ jumpl_to(dest, O7, O7); } else #endif @@ -1031,17 +1031,6 @@ void emit_lo(CodeBuffer &cbuf, int val) { } void emit_hi(CodeBuffer &cbuf, int val) { } -void emit_ptr(CodeBuffer &cbuf, intptr_t val, Register reg, bool ForceRelocatable) { - MacroAssembler _masm(&cbuf); - if (ForceRelocatable) { - Address addr(reg, (address)val); - __ sethi(addr, ForceRelocatable); - __ add(addr, reg); - } else { - __ set(val, reg); - } -} - //============================================================================= @@ -1149,8 +1138,8 @@ // If this does safepoint polling, then do it here if( do_polling() && ra_->C->is_method_compilation() ) { - Address polling_page(L0, (address)os::get_polling_page()); - __ sethi(polling_page, false); + AddressLiteral polling_page(os::get_polling_page()); + __ sethi(polling_page, L0); __ relocate(relocInfo::poll_return_type); __ ld_ptr( L0, 0, G0 ); } @@ -1576,8 +1565,8 @@ __ set_oop(NULL, reg_to_register_object(Matcher::inline_cache_reg_encode())); __ set_inst_mark(); - Address a(G3, (address)-1); - __ JUMP(a, 0); + AddressLiteral addrlit(-1); + __ JUMP(addrlit, G3, 0); __ delayed()->nop(); @@ -1662,7 +1651,7 @@ // Emit exception handler code. int emit_exception_handler(CodeBuffer& cbuf) { Register temp_reg = G3; - Address exception_blob(temp_reg, OptoRuntime::exception_blob()->instructions_begin()); + AddressLiteral exception_blob(OptoRuntime::exception_blob()->instructions_begin()); MacroAssembler _masm(&cbuf); address base = @@ -1671,7 +1660,7 @@ int offset = __ offset(); - __ JUMP(exception_blob, 0); // sethi;jmp + __ JUMP(exception_blob, temp_reg, 0); // sethi;jmp __ delayed()->nop(); assert(__ offset() - offset <= (int) size_exception_handler(), "overflow"); @@ -1685,7 +1674,7 @@ // Can't use any of the current frame's registers as we may have deopted // at a poll and everything (including G3) can be live. Register temp_reg = L0; - Address deopt_blob(temp_reg, SharedRuntime::deopt_blob()->unpack()); + AddressLiteral deopt_blob(SharedRuntime::deopt_blob()->unpack()); MacroAssembler _masm(&cbuf); address base = @@ -1694,7 +1683,7 @@ int offset = __ offset(); __ save_frame(0); - __ JUMP(deopt_blob, 0); // sethi;jmp + __ JUMP(deopt_blob, temp_reg, 0); // sethi;jmp __ delayed()->restore(); assert(__ offset() - offset <= (int) size_deopt_handler(), "overflow"); @@ -2261,9 +2250,8 @@ address table_base = __ address_table_constant(_index2label); RelocationHolder rspec = internal_word_Relocation::spec(table_base); - // Load table address - Address the_pc(table_reg, table_base, rspec); - __ load_address(the_pc); + // Move table address into a register. + __ set(table_base, table_reg, rspec); // Jump to base address + switch value __ ld_ptr(table_reg, switch_reg, table_reg); @@ -2402,13 +2390,13 @@ // The 64 bit pointer is stored in the generated code stream enc_class SetPtr( immP src, iRegP rd ) %{ Register dest = reg_to_register_object($rd$$reg); + MacroAssembler _masm(&cbuf); // [RGV] This next line should be generated from ADLC if ( _opnds[1]->constant_is_oop() ) { intptr_t val = $src$$constant; - MacroAssembler _masm(&cbuf); __ set_oop_constant((jobject)val, dest); } else { // non-oop pointers, e.g. card mark base, heap top - emit_ptr(cbuf, $src$$constant, dest, /*ForceRelocatable=*/ false); + __ set($src$$constant, dest); } %} @@ -2789,46 +2777,6 @@ __ set64( $src$$constant, dest, temp ); %} - enc_class LdImmF(immF src, regF dst, o7RegP tmp) %{ // Load Immediate - address float_address = MacroAssembler(&cbuf).float_constant($src$$constant); - RelocationHolder rspec = internal_word_Relocation::spec(float_address); -#ifdef _LP64 - Register tmp_reg = reg_to_register_object($tmp$$reg); - cbuf.relocate(cbuf.code_end(), rspec, 0); - emit_ptr(cbuf, (intptr_t)float_address, tmp_reg, /*ForceRelocatable=*/ true); - emit3_simm10( cbuf, Assembler::ldst_op, $dst$$reg, Assembler::ldf_op3, $tmp$$reg, 0 ); -#else // _LP64 - uint *code; - int tmp_reg = $tmp$$reg; - - cbuf.relocate(cbuf.code_end(), rspec, 0); - emit2_22( cbuf, Assembler::branch_op, tmp_reg, Assembler::sethi_op2, (intptr_t) float_address ); - - cbuf.relocate(cbuf.code_end(), rspec, 0); - emit3_simm10( cbuf, Assembler::ldst_op, $dst$$reg, Assembler::ldf_op3, tmp_reg, (intptr_t) float_address ); -#endif // _LP64 - %} - - enc_class LdImmD(immD src, regD dst, o7RegP tmp) %{ // Load Immediate - address double_address = MacroAssembler(&cbuf).double_constant($src$$constant); - RelocationHolder rspec = internal_word_Relocation::spec(double_address); -#ifdef _LP64 - Register tmp_reg = reg_to_register_object($tmp$$reg); - cbuf.relocate(cbuf.code_end(), rspec, 0); - emit_ptr(cbuf, (intptr_t)double_address, tmp_reg, /*ForceRelocatable=*/ true); - emit3_simm10( cbuf, Assembler::ldst_op, $dst$$reg, Assembler::lddf_op3, $tmp$$reg, 0 ); -#else // _LP64 - uint *code; - int tmp_reg = $tmp$$reg; - - cbuf.relocate(cbuf.code_end(), rspec, 0); - emit2_22( cbuf, Assembler::branch_op, tmp_reg, Assembler::sethi_op2, (intptr_t) double_address ); - - cbuf.relocate(cbuf.code_end(), rspec, 0); - emit3_simm10( cbuf, Assembler::ldst_op, $dst$$reg, Assembler::lddf_op3, tmp_reg, (intptr_t) double_address ); -#endif // _LP64 - %} - enc_class LdReplImmI(immI src, regD dst, o7RegP tmp, int count, int width) %{ // Load a constant replicated "count" times with width "width" int bit_width = $width$$constant * 8; @@ -2840,28 +2788,15 @@ val |= elt_val; } jdouble dval = *(jdouble*)&val; // coerce to double type - address double_address = MacroAssembler(&cbuf).double_constant(dval); + MacroAssembler _masm(&cbuf); + address double_address = __ double_constant(dval); RelocationHolder rspec = internal_word_Relocation::spec(double_address); -#ifdef _LP64 - Register tmp_reg = reg_to_register_object($tmp$$reg); - cbuf.relocate(cbuf.code_end(), rspec, 0); - emit_ptr(cbuf, (intptr_t)double_address, tmp_reg, /*ForceRelocatable=*/ true); - emit3_simm10( cbuf, Assembler::ldst_op, $dst$$reg, Assembler::lddf_op3, $tmp$$reg, 0 ); -#else // _LP64 - uint *code; - int tmp_reg = $tmp$$reg; - - cbuf.relocate(cbuf.code_end(), rspec, 0); - emit2_22( cbuf, Assembler::branch_op, tmp_reg, Assembler::sethi_op2, (intptr_t) double_address ); - - cbuf.relocate(cbuf.code_end(), rspec, 0); - emit3_simm10( cbuf, Assembler::ldst_op, $dst$$reg, Assembler::lddf_op3, tmp_reg, (intptr_t) double_address ); -#endif // _LP64 - %} - - - enc_class ShouldNotEncodeThis ( ) %{ - ShouldNotCallThis(); + AddressLiteral addrlit(double_address, rspec); + + __ sethi(addrlit, $tmp$$Register); + // XXX This is a quick fix for 6833573. + //__ ldf(FloatRegisterImpl::D, $tmp$$Register, addrlit.low10(), $dst$$FloatRegister, rspec); + __ ldf(FloatRegisterImpl::D, $tmp$$Register, addrlit.low10(), as_DoubleFloatRegister($dst$$reg), rspec); %} // Compiler ensures base is doubleword aligned and cnt is count of doublewords @@ -2901,19 +2836,19 @@ int count_offset = java_lang_String:: count_offset_in_bytes(); // load str1 (jchar*) base address into tmp1_reg - __ load_heap_oop(Address(str1_reg, 0, value_offset), tmp1_reg); - __ ld(Address(str1_reg, 0, offset_offset), result_reg); + __ load_heap_oop(str1_reg, value_offset, tmp1_reg); + __ ld(str1_reg, offset_offset, result_reg); __ add(tmp1_reg, arrayOopDesc::base_offset_in_bytes(T_CHAR), tmp1_reg); - __ ld(Address(str1_reg, 0, count_offset), str1_reg); // hoisted + __ ld(str1_reg, count_offset, str1_reg); // hoisted __ sll(result_reg, exact_log2(sizeof(jchar)), result_reg); - __ load_heap_oop(Address(str2_reg, 0, value_offset), tmp2_reg); // hoisted + __ load_heap_oop(str2_reg, value_offset, tmp2_reg); // hoisted __ add(result_reg, tmp1_reg, tmp1_reg); // load str2 (jchar*) base address into tmp2_reg - // __ ld_ptr(Address(str2_reg, 0, value_offset), tmp2_reg); // hoisted - __ ld(Address(str2_reg, 0, offset_offset), result_reg); + // __ ld_ptr(str2_reg, value_offset, tmp2_reg); // hoisted + __ ld(str2_reg, offset_offset, result_reg); __ add(tmp2_reg, arrayOopDesc::base_offset_in_bytes(T_CHAR), tmp2_reg); - __ ld(Address(str2_reg, 0, count_offset), str2_reg); // hoisted + __ ld(str2_reg, count_offset, str2_reg); // hoisted __ sll(result_reg, exact_log2(sizeof(jchar)), result_reg); __ subcc(str1_reg, str2_reg, O7); // hoisted __ add(result_reg, tmp2_reg, tmp2_reg); @@ -2922,8 +2857,8 @@ // difference of the string lengths (stack) // discard string base pointers, after loading up the lengths - // __ ld(Address(str1_reg, 0, count_offset), str1_reg); // hoisted - // __ ld(Address(str2_reg, 0, count_offset), str2_reg); // hoisted + // __ ld(str1_reg, count_offset, str1_reg); // hoisted + // __ ld(str2_reg, count_offset, str2_reg); // hoisted // See if the lengths are different, and calculate min in str1_reg. // Stash diff in O7 in case we need it for a tie-breaker. @@ -3020,19 +2955,19 @@ int count_offset = java_lang_String:: count_offset_in_bytes(); // load str1 (jchar*) base address into tmp1_reg - __ load_heap_oop(Address(str1_reg, 0, value_offset), tmp1_reg); - __ ld(Address(str1_reg, 0, offset_offset), result_reg); + __ load_heap_oop(Address(str1_reg, value_offset), tmp1_reg); + __ ld(Address(str1_reg, offset_offset), result_reg); __ add(tmp1_reg, arrayOopDesc::base_offset_in_bytes(T_CHAR), tmp1_reg); - __ ld(Address(str1_reg, 0, count_offset), str1_reg); // hoisted + __ ld(Address(str1_reg, count_offset), str1_reg); // hoisted __ sll(result_reg, exact_log2(sizeof(jchar)), result_reg); - __ load_heap_oop(Address(str2_reg, 0, value_offset), tmp2_reg); // hoisted + __ load_heap_oop(Address(str2_reg, value_offset), tmp2_reg); // hoisted __ add(result_reg, tmp1_reg, tmp1_reg); // load str2 (jchar*) base address into tmp2_reg - // __ ld_ptr(Address(str2_reg, 0, value_offset), tmp2_reg); // hoisted - __ ld(Address(str2_reg, 0, offset_offset), result_reg); + // __ ld_ptr(Address(str2_reg, value_offset), tmp2_reg); // hoisted + __ ld(Address(str2_reg, offset_offset), result_reg); __ add(tmp2_reg, arrayOopDesc::base_offset_in_bytes(T_CHAR), tmp2_reg); - __ ld(Address(str2_reg, 0, count_offset), str2_reg); // hoisted + __ ld(Address(str2_reg, count_offset), str2_reg); // hoisted __ sll(result_reg, exact_log2(sizeof(jchar)), result_reg); __ cmp(str1_reg, str2_reg); // hoisted __ add(result_reg, tmp2_reg, tmp2_reg); @@ -3139,8 +3074,8 @@ __ delayed()->mov(G0, result_reg); // not equal //load the lengths of arrays - __ ld(Address(ary1_reg, 0, length_offset), tmp1_reg); - __ ld(Address(ary2_reg, 0, length_offset), tmp2_reg); + __ ld(Address(ary1_reg, length_offset), tmp1_reg); + __ ld(Address(ary2_reg, length_offset), tmp2_reg); // return false if the two arrays are not equal length __ cmp(tmp1_reg, tmp2_reg); @@ -3202,19 +3137,20 @@ enc_class enc_rethrow() %{ cbuf.set_inst_mark(); Register temp_reg = G3; - Address rethrow_stub(temp_reg, OptoRuntime::rethrow_stub()); + AddressLiteral rethrow_stub(OptoRuntime::rethrow_stub()); assert(temp_reg != reg_to_register_object(R_I0_num), "temp must not break oop_reg"); MacroAssembler _masm(&cbuf); #ifdef ASSERT __ save_frame(0); - Address last_rethrow_addr(L1, (address)&last_rethrow); - __ sethi(last_rethrow_addr); + AddressLiteral last_rethrow_addrlit(&last_rethrow); + __ sethi(last_rethrow_addrlit, L1); + Address addr(L1, last_rethrow_addrlit.low10()); __ get_pc(L2); __ inc(L2, 3 * BytesPerInstWord); // skip this & 2 more insns to point at jump_to - __ st_ptr(L2, last_rethrow_addr); + __ st_ptr(L2, addr); __ restore(); #endif - __ JUMP(rethrow_stub, 0); // sethi;jmp + __ JUMP(rethrow_stub, temp_reg, 0); // sethi;jmp __ delayed()->nop(); %} @@ -5493,8 +5429,9 @@ size(4); format %{ "LDSB $mem,$dst\t! byte" %} - opcode(Assembler::ldsb_op3); - ins_encode(simple_form3_mem_reg( mem, dst ) ); + ins_encode %{ + __ ldsb($mem$$Address, $dst$$Register); + %} ins_pipe(iload_mask_mem); %} @@ -5505,8 +5442,9 @@ size(4); format %{ "LDSB $mem,$dst\t! byte -> long" %} - opcode(Assembler::ldsb_op3); - ins_encode(simple_form3_mem_reg( mem, dst ) ); + ins_encode %{ + __ ldsb($mem$$Address, $dst$$Register); + %} ins_pipe(iload_mask_mem); %} @@ -5517,8 +5455,9 @@ size(4); format %{ "LDUB $mem,$dst\t! ubyte" %} - opcode(Assembler::ldub_op3); - ins_encode(simple_form3_mem_reg( mem, dst ) ); + ins_encode %{ + __ ldub($mem$$Address, $dst$$Register); + %} ins_pipe(iload_mask_mem); %} @@ -5529,8 +5468,9 @@ size(4); format %{ "LDUB $mem,$dst\t! ubyte -> long" %} - opcode(Assembler::ldub_op3); - ins_encode(simple_form3_mem_reg( mem, dst ) ); + ins_encode %{ + __ ldub($mem$$Address, $dst$$Register); + %} ins_pipe(iload_mask_mem); %} @@ -5541,8 +5481,9 @@ size(4); format %{ "LDSH $mem,$dst\t! short" %} - opcode(Assembler::ldsh_op3); - ins_encode(simple_form3_mem_reg( mem, dst ) ); + ins_encode %{ + __ ldsh($mem$$Address, $dst$$Register); + %} ins_pipe(iload_mask_mem); %} @@ -5553,8 +5494,9 @@ size(4); format %{ "LDSH $mem,$dst\t! short -> long" %} - opcode(Assembler::ldsh_op3); - ins_encode(simple_form3_mem_reg( mem, dst ) ); + ins_encode %{ + __ ldsh($mem$$Address, $dst$$Register); + %} ins_pipe(iload_mask_mem); %} @@ -5565,8 +5507,9 @@ size(4); format %{ "LDUH $mem,$dst\t! ushort/char" %} - opcode(Assembler::lduh_op3); - ins_encode(simple_form3_mem_reg( mem, dst ) ); + ins_encode %{ + __ lduh($mem$$Address, $dst$$Register); + %} ins_pipe(iload_mask_mem); %} @@ -5577,8 +5520,9 @@ size(4); format %{ "LDUH $mem,$dst\t! ushort/char -> long" %} - opcode(Assembler::lduh_op3); - ins_encode(simple_form3_mem_reg( mem, dst ) ); + ins_encode %{ + __ lduh($mem$$Address, $dst$$Register); + %} ins_pipe(iload_mask_mem); %} @@ -5589,8 +5533,9 @@ size(4); format %{ "LDUW $mem,$dst\t! int" %} - opcode(Assembler::lduw_op3); - ins_encode(simple_form3_mem_reg( mem, dst ) ); + ins_encode %{ + __ lduw($mem$$Address, $dst$$Register); + %} ins_pipe(iload_mem); %} @@ -5601,8 +5546,9 @@ size(4); format %{ "LDSW $mem,$dst\t! int -> long" %} - opcode(Assembler::ldsw_op3); - ins_encode(simple_form3_mem_reg( mem, dst ) ); + ins_encode %{ + __ ldsw($mem$$Address, $dst$$Register); + %} ins_pipe(iload_mem); %} @@ -5613,8 +5559,9 @@ size(4); format %{ "LDUW $mem,$dst\t! uint -> long" %} - opcode(Assembler::lduw_op3); - ins_encode(simple_form3_mem_reg( mem, dst ) ); + ins_encode %{ + __ lduw($mem$$Address, $dst$$Register); + %} ins_pipe(iload_mem); %} @@ -5625,8 +5572,9 @@ size(4); format %{ "LDX $mem,$dst\t! long" %} - opcode(Assembler::ldx_op3); - ins_encode(simple_form3_mem_reg( mem, dst ) ); + ins_encode %{ + __ ldx($mem$$Address, $dst$$Register); + %} ins_pipe(iload_mem); %} @@ -5721,31 +5669,29 @@ #ifndef _LP64 format %{ "LDUW $mem,$dst\t! ptr" %} - opcode(Assembler::lduw_op3, 0, REGP_OP); + ins_encode %{ + __ lduw($mem$$Address, $dst$$Register); + %} #else format %{ "LDX $mem,$dst\t! ptr" %} - opcode(Assembler::ldx_op3, 0, REGP_OP); + ins_encode %{ + __ ldx($mem$$Address, $dst$$Register); + %} #endif - ins_encode( form3_mem_reg( mem, dst ) ); ins_pipe(iload_mem); %} // Load Compressed Pointer instruct loadN(iRegN dst, memory mem) %{ - match(Set dst (LoadN mem)); - ins_cost(MEMORY_REF_COST); - size(4); - - format %{ "LDUW $mem,$dst\t! compressed ptr" %} - ins_encode %{ - Register index = $mem$$index$$Register; - if (index != G0) { - __ lduw($mem$$base$$Register, index, $dst$$Register); - } else { - __ lduw($mem$$base$$Register, $mem$$disp, $dst$$Register); - } - %} - ins_pipe(iload_mem); + match(Set dst (LoadN mem)); + ins_cost(MEMORY_REF_COST); + size(4); + + format %{ "LDUW $mem,$dst\t! compressed ptr" %} + ins_encode %{ + __ lduw($mem$$Address, $dst$$Register); + %} + ins_pipe(iload_mem); %} // Load Klass Pointer @@ -5756,12 +5702,15 @@ #ifndef _LP64 format %{ "LDUW $mem,$dst\t! klass ptr" %} - opcode(Assembler::lduw_op3, 0, REGP_OP); + ins_encode %{ + __ lduw($mem$$Address, $dst$$Register); + %} #else format %{ "LDX $mem,$dst\t! klass ptr" %} - opcode(Assembler::ldx_op3, 0, REGP_OP); + ins_encode %{ + __ ldx($mem$$Address, $dst$$Register); + %} #endif - ins_encode( form3_mem_reg( mem, dst ) ); ins_pipe(iload_mem); %} @@ -5772,16 +5721,8 @@ size(4); format %{ "LDUW $mem,$dst\t! compressed klass ptr" %} - ins_encode %{ - Register base = as_Register($mem$$base); - Register index = as_Register($mem$$index); - Register dst = $dst$$Register; - if (index != G0) { - __ lduw(base, index, dst); - } else { - __ lduw(base, $mem$$disp, dst); - } + __ lduw($mem$$Address, $dst$$Register); %} ins_pipe(iload_mem); %} @@ -5867,8 +5808,8 @@ ins_cost(DEFAULT_COST); format %{ "SET $src,$dst\t!ptr" %} ins_encode %{ - Address polling_page(reg_to_register_object($dst$$reg), (address)os::get_polling_page()); - __ sethi(polling_page, false ); + AddressLiteral polling_page(os::get_polling_page()); + __ sethi(polling_page, reg_to_register_object($dst$$reg)); %} ins_pipe(loadConP_poll); %} @@ -5927,14 +5868,21 @@ effect(KILL tmp); #ifdef _LP64 - size(36); + size(8*4); #else - size(8); + size(2*4); #endif format %{ "SETHI hi(&$src),$tmp\t!get float $src from table\n\t" "LDF [$tmp+lo(&$src)],$dst" %} - ins_encode( LdImmF(src, dst, tmp) ); + ins_encode %{ + address float_address = __ float_constant($src$$constant); + RelocationHolder rspec = internal_word_Relocation::spec(float_address); + AddressLiteral addrlit(float_address, rspec); + + __ sethi(addrlit, $tmp$$Register); + __ ldf(FloatRegisterImpl::S, $tmp$$Register, addrlit.low10(), $dst$$FloatRegister, rspec); + %} ins_pipe(loadConFD); %} @@ -5943,14 +5891,23 @@ effect(KILL tmp); #ifdef _LP64 - size(36); + size(8*4); #else - size(8); + size(2*4); #endif format %{ "SETHI hi(&$src),$tmp\t!get double $src from table\n\t" "LDDF [$tmp+lo(&$src)],$dst" %} - ins_encode( LdImmD(src, dst, tmp) ); + ins_encode %{ + address double_address = __ double_constant($src$$constant); + RelocationHolder rspec = internal_word_Relocation::spec(double_address); + AddressLiteral addrlit(double_address, rspec); + + __ sethi(addrlit, $tmp$$Register); + // XXX This is a quick fix for 6833573. + //__ ldf(FloatRegisterImpl::D, $tmp$$Register, addrlit.low10(), $dst$$FloatRegister, rspec); + __ ldf(FloatRegisterImpl::D, $tmp$$Register, addrlit.low10(), as_DoubleFloatRegister($dst$$reg), rspec); + %} ins_pipe(loadConFD); %} diff -r 5d4dd2f5f6a1 -r 7a485bc4da16 src/cpu/sparc/vm/stubGenerator_sparc.cpp --- a/src/cpu/sparc/vm/stubGenerator_sparc.cpp Fri Apr 17 15:50:12 2009 +0100 +++ b/src/cpu/sparc/vm/stubGenerator_sparc.cpp Thu May 07 10:30:17 2009 -0700 @@ -1,5 +1,5 @@ /* - * Copyright 1997-2008 Sun Microsystems, Inc. All Rights Reserved. + * Copyright 1997-2009 Sun Microsystems, Inc. All Rights Reserved. * DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER. * * This code is free software; you can redistribute it and/or modify it @@ -68,16 +68,9 @@ #ifdef PRODUCT #define inc_counter_np(a,b,c) (0) #else - void inc_counter_np_(int& counter, Register t1, Register t2) { - Address counter_addr(t2, (address) &counter); - __ sethi(counter_addr); - __ ld(counter_addr, t1); - __ inc(t1); - __ st(t1, counter_addr); - } #define inc_counter_np(counter, t1, t2) \ BLOCK_COMMENT("inc_counter " #counter); \ - inc_counter_np_(counter, t1, t2); + __ inc_counter(&counter, t1, t2); #endif //---------------------------------------------------------------------------------------------------- @@ -325,9 +318,9 @@ __ verify_thread(); const Register& temp_reg = Gtemp; - Address pending_exception_addr (G2_thread, 0, in_bytes(Thread::pending_exception_offset())); - Address exception_file_offset_addr(G2_thread, 0, in_bytes(Thread::exception_file_offset ())); - Address exception_line_offset_addr(G2_thread, 0, in_bytes(Thread::exception_line_offset ())); + Address pending_exception_addr (G2_thread, Thread::pending_exception_offset()); + Address exception_file_offset_addr(G2_thread, Thread::exception_file_offset ()); + Address exception_line_offset_addr(G2_thread, Thread::exception_line_offset ()); // set pending exception __ verify_oop(Oexception); @@ -340,8 +333,8 @@ // complete return to VM assert(StubRoutines::_call_stub_return_address != NULL, "must have been generated before"); - Address stub_ret(temp_reg, StubRoutines::_call_stub_return_address); - __ jump_to(stub_ret); + AddressLiteral stub_ret(StubRoutines::_call_stub_return_address); + __ jump_to(stub_ret, temp_reg); __ delayed()->nop(); return start; @@ -366,7 +359,7 @@ const Register& handler_reg = Gtemp; - Address exception_addr (G2_thread, 0, in_bytes(Thread::pending_exception_offset())); + Address exception_addr(G2_thread, Thread::pending_exception_offset()); #ifdef ASSERT // make sure that this code is only executed if there is a pending exception @@ -456,8 +449,7 @@ int frame_complete = __ offset(); if (restore_saved_exception_pc) { - Address saved_exception_pc(G2_thread, 0, in_bytes(JavaThread::saved_exception_pc_offset())); - __ ld_ptr(saved_exception_pc, I7); + __ ld_ptr(G2_thread, JavaThread::saved_exception_pc_offset(), I7); __ sub(I7, frame::pc_return_offset, I7); } @@ -481,7 +473,7 @@ #ifdef ASSERT Label L; - Address exception_addr(G2_thread, 0, in_bytes(Thread::pending_exception_offset())); + Address exception_addr(G2_thread, Thread::pending_exception_offset()); Register scratch_reg = Gtemp; __ ld_ptr(exception_addr, scratch_reg); __ br_notnull(scratch_reg, false, Assembler::pt, L); @@ -835,7 +827,7 @@ address start = __ pc(); const int preserve_register_words = (64 * 2); - Address preserve_addr(FP, 0, (-preserve_register_words * wordSize) + STACK_BIAS); + Address preserve_addr(FP, (-preserve_register_words * wordSize) + STACK_BIAS); Register Lthread = L7_thread_cache; int i; @@ -1106,21 +1098,19 @@ __ srl_ptr(addr, CardTableModRefBS::card_shift, addr); __ srl_ptr(count, CardTableModRefBS::card_shift, count); __ sub(count, addr, count); - Address rs(tmp, (address)ct->byte_map_base); - __ load_address(rs); + AddressLiteral rs(ct->byte_map_base); + __ set(rs, tmp); __ BIND(L_loop); - __ stb(G0, rs.base(), addr); + __ stb(G0, tmp, addr); __ subcc(count, 1, count); __ brx(Assembler::greaterEqual, false, Assembler::pt, L_loop); __ delayed()->add(addr, 1, addr); - - } + } break; case BarrierSet::ModRef: break; - default : + default: ShouldNotReachHere(); - } } diff -r 5d4dd2f5f6a1 -r 7a485bc4da16 src/cpu/sparc/vm/templateInterpreter_sparc.cpp --- a/src/cpu/sparc/vm/templateInterpreter_sparc.cpp Fri Apr 17 15:50:12 2009 +0100 +++ b/src/cpu/sparc/vm/templateInterpreter_sparc.cpp Thu May 07 10:30:17 2009 -0700 @@ -1,5 +1,5 @@ /* - * Copyright 1997-2008 Sun Microsystems, Inc. All Rights Reserved. + * Copyright 1997-2009 Sun Microsystems, Inc. All Rights Reserved. * DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER. * * This code is free software; you can redistribute it and/or modify it @@ -87,8 +87,8 @@ } // throw exception assert(Interpreter::throw_exception_entry() != NULL, "generate it first"); - Address thrower(G3_scratch, Interpreter::throw_exception_entry()); - __ jump_to (thrower); + AddressLiteral thrower(Interpreter::throw_exception_entry()); + __ jump_to(thrower, G3_scratch); __ delayed()->nop(); return entry; } @@ -108,6 +108,24 @@ } +// Arguments are: required type in G5_method_type, and +// failing object (or NULL) in G3_method_handle. +address TemplateInterpreterGenerator::generate_WrongMethodType_handler() { + address entry = __ pc(); + // expression stack must be empty before entering the VM if an exception + // happened + __ empty_expression_stack(); + // load exception object + __ call_VM(Oexception, + CAST_FROM_FN_PTR(address, + InterpreterRuntime::throw_WrongMethodTypeException), + G5_method_type, // required + G3_method_handle); // actual + __ should_not_reach_here(); + return entry; +} + + address TemplateInterpreterGenerator::generate_ArrayIndexOutOfBounds_handler(const char* name) { address entry = __ pc(); // expression stack must be empty before entering the VM if an exception happened @@ -132,7 +150,8 @@ } -address TemplateInterpreterGenerator::generate_return_entry_for(TosState state, int step) { +address TemplateInterpreterGenerator::generate_return_entry_for(TosState state, int step, bool unbox) { + assert(!unbox, "NYI");//6815692// address compiled_entry = __ pc(); Label cont; @@ -168,8 +187,8 @@ const Register cache = G3_scratch; const Register size = G1_scratch; __ get_cache_and_index_at_bcp(cache, G1_scratch, 1); - __ ld_ptr(Address(cache, 0, in_bytes(constantPoolCacheOopDesc::base_offset()) + - in_bytes(ConstantPoolCacheEntry::flags_offset())), size); + __ ld_ptr(cache, constantPoolCacheOopDesc::base_offset() + + ConstantPoolCacheEntry::flags_offset(), size); __ and3(size, 0xFF, size); // argument size in words __ sll(size, Interpreter::logStackElementSize(), size); // each argument size in bytes __ add(Lesp, size, Lesp); // pop arguments @@ -183,9 +202,8 @@ address entry = __ pc(); __ get_constant_pool_cache(LcpoolCache); // load LcpoolCache { Label L; - Address exception_addr (G2_thread, 0, in_bytes(Thread::pending_exception_offset())); - - __ ld_ptr(exception_addr, Gtemp); + Address exception_addr(G2_thread, Thread::pending_exception_offset()); + __ ld_ptr(exception_addr, Gtemp); // Load pending exception. __ tst(Gtemp); __ brx(Assembler::equal, false, Assembler::pt, L); __ delayed()->nop(); @@ -264,7 +282,7 @@ // Update standard invocation counters __ increment_invocation_counter(O0, G3_scratch); if (ProfileInterpreter) { // %%% Merge this into methodDataOop - Address interpreter_invocation_counter(Lmethod, 0, in_bytes(methodOopDesc::interpreter_invocation_counter_offset())); + Address interpreter_invocation_counter(Lmethod, methodOopDesc::interpreter_invocation_counter_offset()); __ ld(interpreter_invocation_counter, G3_scratch); __ inc(G3_scratch); __ st(G3_scratch, interpreter_invocation_counter); @@ -272,9 +290,9 @@ if (ProfileInterpreter && profile_method != NULL) { // Test to see if we should create a method data oop - Address profile_limit(G3_scratch, (address)&InvocationCounter::InterpreterProfileLimit); - __ sethi(profile_limit); - __ ld(profile_limit, G3_scratch); + AddressLiteral profile_limit(&InvocationCounter::InterpreterProfileLimit); + __ sethi(profile_limit, G3_scratch); + __ ld(G3_scratch, profile_limit.low10(), G3_scratch); __ cmp(O0, G3_scratch); __ br(Assembler::lessUnsigned, false, Assembler::pn, *profile_method_continue); __ delayed()->nop(); @@ -283,9 +301,9 @@ __ test_method_data_pointer(*profile_method); } - Address invocation_limit(G3_scratch, (address)&InvocationCounter::InterpreterInvocationLimit); - __ sethi(invocation_limit); - __ ld(invocation_limit, G3_scratch); + AddressLiteral invocation_limit(&InvocationCounter::InterpreterInvocationLimit); + __ sethi(invocation_limit, G3_scratch); + __ ld(G3_scratch, invocation_limit.low10(), G3_scratch); __ cmp(O0, G3_scratch); __ br(Assembler::greaterEqualUnsigned, false, Assembler::pn, *overflow); __ delayed()->nop(); @@ -296,8 +314,7 @@ // ebx - methodOop // void InterpreterGenerator::lock_method(void) { - const Address access_flags (Lmethod, 0, in_bytes(methodOopDesc::access_flags_offset())); - __ ld(access_flags, O0); + __ ld(Lmethod, in_bytes(methodOopDesc::access_flags_offset()), O0); // Load access flags. #ifdef ASSERT { Label ok; @@ -341,8 +358,7 @@ Register Rscratch, Register Rscratch2) { const int page_size = os::vm_page_size(); - Address saved_exception_pc(G2_thread, 0, - in_bytes(JavaThread::saved_exception_pc_offset())); + Address saved_exception_pc(G2_thread, JavaThread::saved_exception_pc_offset()); Label after_frame_check; assert_different_registers(Rframe_size, Rscratch, Rscratch2); @@ -354,7 +370,7 @@ __ delayed()->nop(); // get the stack base, and in debug, verify it is non-zero - __ ld_ptr( G2_thread, in_bytes(Thread::stack_base_offset()), Rscratch ); + __ ld_ptr( G2_thread, Thread::stack_base_offset(), Rscratch ); #ifdef ASSERT Label base_not_zero; __ cmp( Rscratch, G0 ); @@ -366,7 +382,7 @@ // get the stack size, and in debug, verify it is non-zero assert( sizeof(size_t) == sizeof(intptr_t), "wrong load size" ); - __ ld_ptr( G2_thread, in_bytes(Thread::stack_size_offset()), Rscratch2 ); + __ ld_ptr( G2_thread, Thread::stack_size_offset(), Rscratch2 ); #ifdef ASSERT Label size_not_zero; __ cmp( Rscratch2, G0 ); @@ -441,13 +457,14 @@ // (gri - 2/25/2000) - const Address size_of_parameters(G5_method, 0, in_bytes(methodOopDesc::size_of_parameters_offset())); - const Address size_of_locals (G5_method, 0, in_bytes(methodOopDesc::size_of_locals_offset())); - const Address max_stack (G5_method, 0, in_bytes(methodOopDesc::max_stack_offset())); + const Address size_of_parameters(G5_method, methodOopDesc::size_of_parameters_offset()); + const Address size_of_locals (G5_method, methodOopDesc::size_of_locals_offset()); + const Address max_stack (G5_method, methodOopDesc::max_stack_offset()); int rounded_vm_local_words = round_to( frame::interpreter_frame_vm_local_words, WordsPerLong ); const int extra_space = rounded_vm_local_words + // frame local scratch space + //6815692//methodOopDesc::extra_stack_words() + // extra push slots for MH adapters frame::memory_parameter_word_sp_offset + // register save area (native_call ? frame::interpreter_frame_extra_outgoing_argument_words : 0); @@ -519,8 +536,8 @@ if (native_call) { __ mov(G0, Lbcp); } else { - __ ld_ptr(Address(G5_method, 0, in_bytes(methodOopDesc::const_offset())), Lbcp ); - __ add(Address(Lbcp, 0, in_bytes(constMethodOopDesc::codes_offset())), Lbcp ); + __ ld_ptr(G5_method, methodOopDesc::const_offset(), Lbcp); + __ add(Lbcp, in_bytes(constMethodOopDesc::codes_offset()), Lbcp); } __ mov( G5_method, Lmethod); // set Lmethod __ get_constant_pool_cache( LcpoolCache ); // set LcpoolCache @@ -558,8 +575,8 @@ // do nothing for empty methods (do not even increment invocation counter) if ( UseFastEmptyMethods) { // If we need a safepoint check, generate full interpreter entry. - Address sync_state(G3_scratch, SafepointSynchronize::address_of_state()); - __ load_contents(sync_state, G3_scratch); + AddressLiteral sync_state(SafepointSynchronize::address_of_state()); + __ set(sync_state, G3_scratch); __ cmp(G3_scratch, SafepointSynchronize::_not_synchronized); __ br(Assembler::notEqual, false, Assembler::pn, slow_path); __ delayed()->nop(); @@ -597,7 +614,7 @@ if ( UseFastAccessorMethods && !UseCompressedOops ) { // Check if we need to reach a safepoint and generate full interpreter // frame if so. - Address sync_state(G3_scratch, SafepointSynchronize::address_of_state()); + AddressLiteral sync_state(SafepointSynchronize::address_of_state()); __ load_contents(sync_state, G3_scratch); __ cmp(G3_scratch, SafepointSynchronize::_not_synchronized); __ br(Assembler::notEqual, false, Assembler::pn, slow_path); @@ -612,8 +629,8 @@ // read first instruction word and extract bytecode @ 1 and index @ 2 // get first 4 bytes of the bytecodes (big endian!) - __ ld_ptr(Address(G5_method, 0, in_bytes(methodOopDesc::const_offset())), G1_scratch); - __ ld(Address(G1_scratch, 0, in_bytes(constMethodOopDesc::codes_offset())), G1_scratch); + __ ld_ptr(G5_method, methodOopDesc::const_offset(), G1_scratch); + __ ld(G1_scratch, constMethodOopDesc::codes_offset(), G1_scratch); // move index @ 2 far left then to the right most two bytes. __ sll(G1_scratch, 2*BitsPerByte, G1_scratch); @@ -621,7 +638,7 @@ ConstantPoolCacheEntry::size()) * BytesPerWord), G1_scratch); // get constant pool cache - __ ld_ptr(G5_method, in_bytes(methodOopDesc::constants_offset()), G3_scratch); + __ ld_ptr(G5_method, methodOopDesc::constants_offset(), G3_scratch); __ ld_ptr(G3_scratch, constantPoolOopDesc::cache_offset_in_bytes(), G3_scratch); // get specific constant pool cache entry @@ -630,7 +647,7 @@ // Check the constant Pool cache entry to see if it has been resolved. // If not, need the slow path. ByteSize cp_base_offset = constantPoolCacheOopDesc::base_offset(); - __ ld_ptr(G3_scratch, in_bytes(cp_base_offset + ConstantPoolCacheEntry::indices_offset()), G1_scratch); + __ ld_ptr(G3_scratch, cp_base_offset + ConstantPoolCacheEntry::indices_offset(), G1_scratch); __ srl(G1_scratch, 2*BitsPerByte, G1_scratch); __ and3(G1_scratch, 0xFF, G1_scratch); __ cmp(G1_scratch, Bytecodes::_getfield); @@ -638,8 +655,8 @@ __ delayed()->nop(); // Get the type and return field offset from the constant pool cache - __ ld_ptr(G3_scratch, in_bytes(cp_base_offset + ConstantPoolCacheEntry::flags_offset()), G1_scratch); - __ ld_ptr(G3_scratch, in_bytes(cp_base_offset + ConstantPoolCacheEntry::f2_offset()), G3_scratch); + __ ld_ptr(G3_scratch, cp_base_offset + ConstantPoolCacheEntry::flags_offset(), G1_scratch); + __ ld_ptr(G3_scratch, cp_base_offset + ConstantPoolCacheEntry::f2_offset(), G3_scratch); Label xreturn_path; // Need to differentiate between igetfield, agetfield, bgetfield etc. @@ -698,7 +715,7 @@ // make sure registers are different! assert_different_registers(G2_thread, G5_method, Gargs, Gtmp1, Gtmp2); - const Address Laccess_flags (Lmethod, 0, in_bytes(methodOopDesc::access_flags_offset())); + const Address Laccess_flags(Lmethod, methodOopDesc::access_flags_offset()); __ verify_oop(G5_method); @@ -708,7 +725,7 @@ // make sure method is native & not abstract // rethink these assertions - they can be simplified and shared (gri 2/25/2000) #ifdef ASSERT - __ ld(G5_method, in_bytes(methodOopDesc::access_flags_offset()), Gtmp1); + __ ld(G5_method, methodOopDesc::access_flags_offset(), Gtmp1); { Label L; __ btst(JVM_ACC_NATIVE, Gtmp1); @@ -735,10 +752,10 @@ // this slot will be set later, we initialize it to null here just in // case we get a GC before the actual value is stored later - __ st_ptr(G0, Address(FP, 0, (frame::interpreter_frame_oop_temp_offset*wordSize) + STACK_BIAS)); + __ st_ptr(G0, FP, (frame::interpreter_frame_oop_temp_offset * wordSize) + STACK_BIAS); - const Address do_not_unlock_if_synchronized(G2_thread, 0, - in_bytes(JavaThread::do_not_unlock_if_synchronized_offset())); + const Address do_not_unlock_if_synchronized(G2_thread, + JavaThread::do_not_unlock_if_synchronized_offset()); // Since at this point in the method invocation the exception handler // would try to exit the monitor of synchronized methods which hasn't // been entered yet, we set the thread local variable @@ -805,12 +822,13 @@ // get signature handler { Label L; - __ ld_ptr(Address(Lmethod, 0, in_bytes(methodOopDesc::signature_handler_offset())), G3_scratch); + Address signature_handler(Lmethod, methodOopDesc::signature_handler_offset()); + __ ld_ptr(signature_handler, G3_scratch); __ tst(G3_scratch); __ brx(Assembler::notZero, false, Assembler::pt, L); __ delayed()->nop(); __ call_VM(noreg, CAST_FROM_FN_PTR(address, InterpreterRuntime::prepare_native_call), Lmethod); - __ ld_ptr(Address(Lmethod, 0, in_bytes(methodOopDesc::signature_handler_offset())), G3_scratch); + __ ld_ptr(signature_handler, G3_scratch); __ bind(L); } @@ -823,10 +841,9 @@ // Flush the method pointer to the register save area __ st_ptr(Lmethod, SP, (Lmethod->sp_offset_in_saved_window() * wordSize) + STACK_BIAS); __ mov(Llocals, O1); + // calculate where the mirror handle body is allocated in the interpreter frame: - - Address mirror(FP, 0, (frame::interpreter_frame_oop_temp_offset*wordSize) + STACK_BIAS); - __ add(mirror, O2); + __ add(FP, (frame::interpreter_frame_oop_temp_offset * wordSize) + STACK_BIAS, O2); // Calculate current frame size __ sub(SP, FP, O3); // Calculate negative of current frame size @@ -863,14 +880,13 @@ __ ld(Laccess_flags, O0); __ btst(JVM_ACC_STATIC, O0); __ br( Assembler::zero, false, Assembler::pt, not_static); - __ delayed()-> - // get native function entry point(O0 is a good temp until the very end) - ld_ptr(Address(Lmethod, 0, in_bytes(methodOopDesc::native_function_offset())), O0); + // get native function entry point(O0 is a good temp until the very end) + __ delayed()->ld_ptr(Lmethod, in_bytes(methodOopDesc::native_function_offset()), O0); // for static methods insert the mirror argument const int mirror_offset = klassOopDesc::klass_part_offset_in_bytes() + Klass::java_mirror_offset_in_bytes(); - __ ld_ptr(Address(Lmethod, 0, in_bytes(methodOopDesc:: constants_offset())), O1); - __ ld_ptr(Address(O1, 0, constantPoolOopDesc::pool_holder_offset_in_bytes()), O1); + __ ld_ptr(Lmethod, methodOopDesc:: constants_offset(), O1); + __ ld_ptr(O1, constantPoolOopDesc::pool_holder_offset_in_bytes(), O1); __ ld_ptr(O1, mirror_offset, O1); #ifdef ASSERT if (!PrintSignatureHandlers) // do not dirty the output with this @@ -925,15 +941,13 @@ __ flush_windows(); // mark windows as flushed - Address flags(G2_thread, - 0, - in_bytes(JavaThread::frame_anchor_offset()) + in_bytes(JavaFrameAnchor::flags_offset())); + Address flags(G2_thread, JavaThread::frame_anchor_offset() + JavaFrameAnchor::flags_offset()); __ set(JavaFrameAnchor::flushed, G3_scratch); __ st(G3_scratch, flags); // Transition from _thread_in_Java to _thread_in_native. We are already safepoint ready. - Address thread_state(G2_thread, 0, in_bytes(JavaThread::thread_state_offset())); + Address thread_state(G2_thread, JavaThread::thread_state_offset()); #ifdef ASSERT { Label L; __ ld(thread_state, G3_scratch); @@ -963,7 +977,7 @@ // Block, if necessary, before resuming in _thread_in_Java state. // In order for GC to work, don't clear the last_Java_sp until after blocking. { Label no_block; - Address sync_state(G3_scratch, SafepointSynchronize::address_of_state()); + AddressLiteral sync_state(SafepointSynchronize::address_of_state()); // Switch thread to "native transition" state before reading the synchronization state. // This additional state is necessary because reading and testing the synchronization @@ -990,10 +1004,8 @@ __ cmp(G3_scratch, SafepointSynchronize::_not_synchronized); Label L; - Address suspend_state(G2_thread, 0, in_bytes(JavaThread::suspend_flags_offset())); __ br(Assembler::notEqual, false, Assembler::pn, L); - __ delayed()-> - ld(suspend_state, G3_scratch); + __ delayed()->ld(G2_thread, JavaThread::suspend_flags_offset(), G3_scratch); __ cmp(G3_scratch, 0); __ br(Assembler::equal, false, Assembler::pt, no_block); __ delayed()->nop(); @@ -1035,7 +1047,7 @@ __ st(G3_scratch, thread_state); // reset handle block - __ ld_ptr(G2_thread, in_bytes(JavaThread::active_handles_offset()), G3_scratch); + __ ld_ptr(G2_thread, JavaThread::active_handles_offset(), G3_scratch); __ st_ptr(G0, G3_scratch, JNIHandleBlock::top_offset_in_bytes()); // If we have an oop result store it where it will be safe for any further gc @@ -1064,8 +1076,7 @@ // handle exceptions (exception handling will handle unlocking!) { Label L; - Address exception_addr (G2_thread, 0, in_bytes(Thread::pending_exception_offset())); - + Address exception_addr(G2_thread, Thread::pending_exception_offset()); __ ld_ptr(exception_addr, Gtemp); __ tst(Gtemp); __ brx(Assembler::equal, false, Assembler::pt, L); @@ -1151,11 +1162,11 @@ // make sure registers are different! assert_different_registers(G2_thread, G5_method, Gargs, Gtmp1, Gtmp2); - const Address size_of_parameters(G5_method, 0, in_bytes(methodOopDesc::size_of_parameters_offset())); - const Address size_of_locals (G5_method, 0, in_bytes(methodOopDesc::size_of_locals_offset())); + const Address size_of_parameters(G5_method, methodOopDesc::size_of_parameters_offset()); + const Address size_of_locals (G5_method, methodOopDesc::size_of_locals_offset()); // Seems like G5_method is live at the point this is used. So we could make this look consistent // and use in the asserts. - const Address access_flags (Lmethod, 0, in_bytes(methodOopDesc::access_flags_offset())); + const Address access_flags (Lmethod, methodOopDesc::access_flags_offset()); __ verify_oop(G5_method); @@ -1165,7 +1176,7 @@ // make sure method is not native & not abstract // rethink these assertions - they can be simplified and shared (gri 2/25/2000) #ifdef ASSERT - __ ld(G5_method, in_bytes(methodOopDesc::access_flags_offset()), Gtmp1); + __ ld(G5_method, methodOopDesc::access_flags_offset(), Gtmp1); { Label L; __ btst(JVM_ACC_NATIVE, Gtmp1); @@ -1220,8 +1231,8 @@ __ brx( Assembler::lessEqualUnsigned, true, Assembler::pt, clear_loop ); __ delayed()->st_ptr( init_value, O2, 0 ); - const Address do_not_unlock_if_synchronized(G2_thread, 0, - in_bytes(JavaThread::do_not_unlock_if_synchronized_offset())); + const Address do_not_unlock_if_synchronized(G2_thread, + JavaThread::do_not_unlock_if_synchronized_offset()); // Since at this point in the method invocation the exception handler // would try to exit the monitor of synchronized methods which hasn't // been entered yet, we set the thread local variable @@ -1447,6 +1458,7 @@ round_to(callee_extra_locals * Interpreter::stackElementWords(), WordsPerLong); const int max_stack_words = max_stack * Interpreter::stackElementWords(); return (round_to((max_stack_words + //6815692//+ methodOopDesc::extra_stack_words() + rounded_vm_local_words + frame::memory_parameter_word_sp_offset), WordsPerLong) // already rounded @@ -1696,7 +1708,7 @@ // Interpreter::_remove_activation_preserving_args_entry = __ pc(); - Address popframe_condition_addr (G2_thread, 0, in_bytes(JavaThread::popframe_condition_offset())); + Address popframe_condition_addr(G2_thread, JavaThread::popframe_condition_offset()); // Set the popframe_processing bit in popframe_condition indicating that we are // currently handling popframe, so that call_VMs that may happen later do not trigger new // popframe handling cycles. @@ -1738,7 +1750,7 @@ __ call_VM_leaf(L7_thread_cache, CAST_FROM_FN_PTR(address, Deoptimization::popframe_preserve_args), G2_thread, Gtmp1, Gtmp2); // Inform deoptimization that it is responsible for restoring these arguments __ set(JavaThread::popframe_force_deopt_reexecution_bit, Gtmp1); - Address popframe_condition_addr(G2_thread, 0, in_bytes(JavaThread::popframe_condition_offset())); + Address popframe_condition_addr(G2_thread, JavaThread::popframe_condition_offset()); __ st(Gtmp1, popframe_condition_addr); // Return from the current method @@ -1787,7 +1799,7 @@ __ verify_oop(Oexception); const int return_reg_adjustment = frame::pc_return_offset; - Address issuing_pc_addr(I7, 0, return_reg_adjustment); + Address issuing_pc_addr(I7, return_reg_adjustment); // We are done with this activation frame; find out where to go next. // The continuation point will be an exception handler, which expects @@ -1833,8 +1845,8 @@ __ empty_expression_stack(); __ load_earlyret_value(state); - __ ld_ptr(Address(G2_thread, 0, in_bytes(JavaThread::jvmti_thread_state_offset())), G3_scratch); - Address cond_addr(G3_scratch, 0, in_bytes(JvmtiThreadState::earlyret_state_offset())); + __ ld_ptr(G2_thread, JavaThread::jvmti_thread_state_offset(), G3_scratch); + Address cond_addr(G3_scratch, JvmtiThreadState::earlyret_state_offset()); // Clear the earlyret state __ stw(G0 /* JvmtiThreadState::earlyret_inactive */, cond_addr); @@ -1901,43 +1913,33 @@ // helpers for generate_and_dispatch void TemplateInterpreterGenerator::count_bytecode() { - Address c(G3_scratch, (address)&BytecodeCounter::_counter_value); - __ load_contents(c, G4_scratch); - __ inc(G4_scratch); - __ st(G4_scratch, c); + __ inc_counter(&BytecodeCounter::_counter_value, G3_scratch, G4_scratch); } void TemplateInterpreterGenerator::histogram_bytecode(Template* t) { - Address bucket( G3_scratch, (address) &BytecodeHistogram::_counters[t->bytecode()] ); - __ load_contents(bucket, G4_scratch); - __ inc(G4_scratch); - __ st(G4_scratch, bucket); + __ inc_counter(&BytecodeHistogram::_counters[t->bytecode()], G3_scratch, G4_scratch); } void TemplateInterpreterGenerator::histogram_bytecode_pair(Template* t) { - address index_addr = (address)&BytecodePairHistogram::_index; - Address index(G3_scratch, index_addr); - - address counters_addr = (address)&BytecodePairHistogram::_counters; - Address counters(G3_scratch, counters_addr); + AddressLiteral index (&BytecodePairHistogram::_index); + AddressLiteral counters((address) &BytecodePairHistogram::_counters); // get index, shift out old bytecode, bring in new bytecode, and store it // _index = (_index >> log2_number_of_codes) | // (bytecode << log2_number_of_codes); - - __ load_contents( index, G4_scratch ); + __ load_contents(index, G4_scratch); __ srl( G4_scratch, BytecodePairHistogram::log2_number_of_codes, G4_scratch ); __ set( ((int)t->bytecode()) << BytecodePairHistogram::log2_number_of_codes, G3_scratch ); __ or3( G3_scratch, G4_scratch, G4_scratch ); - __ store_contents( G4_scratch, index ); + __ store_contents(G4_scratch, index, G3_scratch); // bump bucket contents // _counters[_index] ++; - __ load_address( counters ); // loads into G3_scratch + __ set(counters, G3_scratch); // loads into G3_scratch __ sll( G4_scratch, LogBytesPerWord, G4_scratch ); // Index is word address __ add (G3_scratch, G4_scratch, G3_scratch); // Add in index __ ld (G3_scratch, 0, G4_scratch); @@ -1958,9 +1960,9 @@ void TemplateInterpreterGenerator::stop_interpreter_at() { - Address counter(G3_scratch , (address)&BytecodeCounter::_counter_value); - __ load_contents (counter, G3_scratch ); - Address stop_at(G4_scratch, (address)&StopInterpreterAt); + AddressLiteral counter(&BytecodeCounter::_counter_value); + __ load_contents(counter, G3_scratch); + AddressLiteral stop_at(&StopInterpreterAt); __ load_ptr_contents(stop_at, G4_scratch); __ cmp(G3_scratch, G4_scratch); __ breakpoint_trap(Assembler::equal); diff -r 5d4dd2f5f6a1 -r 7a485bc4da16 src/cpu/sparc/vm/templateTable_sparc.cpp --- a/src/cpu/sparc/vm/templateTable_sparc.cpp Fri Apr 17 15:50:12 2009 +0100 +++ b/src/cpu/sparc/vm/templateTable_sparc.cpp Thu May 07 10:30:17 2009 -0700 @@ -1,5 +1,5 @@ /* - * Copyright 1997-2008 Sun Microsystems, Inc. All Rights Reserved. + * Copyright 1997-2009 Sun Microsystems, Inc. All Rights Reserved. * DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER. * * This code is free software; you can redistribute it and/or modify it @@ -131,7 +131,7 @@ Address TemplateTable::at_bcp(int offset) { assert(_desc->uses_bcp(), "inconsistent uses_bcp information"); - return Address( Lbcp, 0, offset); + return Address(Lbcp, offset); } @@ -217,9 +217,9 @@ case 1: p = &one; break; case 2: p = &two; break; } - Address a(G3_scratch, (address)p); - __ sethi(a); - __ ldf(FloatRegisterImpl::S, a, Ftos_f); + AddressLiteral a(p); + __ sethi(a, G3_scratch); + __ ldf(FloatRegisterImpl::S, G3_scratch, a.low10(), Ftos_f); } @@ -232,9 +232,9 @@ case 0: p = &zero; break; case 1: p = &one; break; } - Address a(G3_scratch, (address)p); - __ sethi(a); - __ ldf(FloatRegisterImpl::D, a, Ftos_d); + AddressLiteral a(p); + __ sethi(a, G3_scratch); + __ ldf(FloatRegisterImpl::D, G3_scratch, a.low10(), Ftos_d); } @@ -1548,7 +1548,7 @@ // non-JSR normal-branch stuff occurring below. if( is_jsr ) { // compute return address as bci in Otos_i - __ ld_ptr(Address(Lmethod, 0, in_bytes(methodOopDesc::const_offset())), G3_scratch); + __ ld_ptr(Lmethod, methodOopDesc::const_offset(), G3_scratch); __ sub(Lbcp, G3_scratch, G3_scratch); __ sub(G3_scratch, in_bytes(constMethodOopDesc::codes_offset()) - (is_wide ? 5 : 3), Otos_i); @@ -1665,7 +1665,7 @@ __ profile_ret(vtos, Otos_i, G4_scratch); - __ ld_ptr(Address(Lmethod, 0, in_bytes(methodOopDesc::const_offset())), G3_scratch); + __ ld_ptr(Lmethod, methodOopDesc::const_offset(), G3_scratch); __ add(G3_scratch, Otos_i, G3_scratch); __ add(G3_scratch, in_bytes(constMethodOopDesc::codes_offset()), Lbcp); __ dispatch_next(vtos); @@ -1680,7 +1680,7 @@ __ profile_ret(vtos, Otos_i, G4_scratch); - __ ld_ptr(Address(Lmethod, 0, in_bytes(methodOopDesc::const_offset())), G3_scratch); + __ ld_ptr(Lmethod, methodOopDesc::const_offset(), G3_scratch); __ add(G3_scratch, Otos_i, G3_scratch); __ add(G3_scratch, in_bytes(constMethodOopDesc::codes_offset()), Lbcp); __ dispatch_next(vtos); @@ -1968,8 +1968,8 @@ Label resolved; __ get_cache_and_index_at_bcp(Rcache, index, 1); - __ ld_ptr(Address(Rcache, 0, in_bytes(constantPoolCacheOopDesc::base_offset() + - ConstantPoolCacheEntry::indices_offset())), Lbyte_code); + __ ld_ptr(Rcache, constantPoolCacheOopDesc::base_offset() + + ConstantPoolCacheEntry::indices_offset(), Lbyte_code); __ srl( Lbyte_code, shift_count, Lbyte_code ); __ and3( Lbyte_code, 0xFF, Lbyte_code ); @@ -2029,11 +2029,11 @@ resolve_cache_and_index(byte_no, Rcache, Rscratch); } - __ ld_ptr(Address(Rcache, 0, method_offset), Rmethod); + __ ld_ptr(Rcache, method_offset, Rmethod); if (Ritable_index != noreg) { - __ ld_ptr(Address(Rcache, 0, index_offset), Ritable_index); + __ ld_ptr(Rcache, index_offset, Ritable_index); } - __ ld_ptr(Address(Rcache, 0, flags_offset), Rflags); + __ ld_ptr(Rcache, flags_offset, Rflags); } // The Rcache register must be set before call @@ -2047,13 +2047,10 @@ ByteSize cp_base_offset = constantPoolCacheOopDesc::base_offset(); - __ ld_ptr(Address(Rcache, 0, in_bytes(cp_base_offset + - ConstantPoolCacheEntry::flags_offset())), Rflags); - __ ld_ptr(Address(Rcache, 0, in_bytes(cp_base_offset + - ConstantPoolCacheEntry::f2_offset())), Roffset); + __ ld_ptr(Rcache, cp_base_offset + ConstantPoolCacheEntry::flags_offset(), Rflags); + __ ld_ptr(Rcache, cp_base_offset + ConstantPoolCacheEntry::f2_offset(), Roffset); if (is_static) { - __ ld_ptr(Address(Rcache, 0, in_bytes(cp_base_offset + - ConstantPoolCacheEntry::f1_offset())), Robj); + __ ld_ptr(Rcache, cp_base_offset + ConstantPoolCacheEntry::f1_offset(), Robj); } } @@ -2070,9 +2067,7 @@ // the time to call into the VM. Label Label1; assert_different_registers(Rcache, index, G1_scratch); - Address get_field_access_count_addr(G1_scratch, - (address)JvmtiExport::get_field_access_count_addr(), - relocInfo::none); + AddressLiteral get_field_access_count_addr(JvmtiExport::get_field_access_count_addr()); __ load_contents(get_field_access_count_addr, G1_scratch); __ tst(G1_scratch); __ br(Assembler::zero, false, Assembler::pt, Label1); @@ -2293,7 +2288,7 @@ __ get_cache_and_index_at_bcp(Rcache, index, 1); jvmti_post_field_access(Rcache, index, /*is_static*/false, /*has_tos*/true); - __ ld_ptr(Address(Rcache, 0, in_bytes(cp_base_offset + ConstantPoolCacheEntry::f2_offset())), Roffset); + __ ld_ptr(Rcache, cp_base_offset + ConstantPoolCacheEntry::f2_offset(), Roffset); __ null_check(Otos_i); __ verify_oop(Otos_i); @@ -2304,7 +2299,7 @@ Assembler::Membar_mask_bits(Assembler::LoadLoad | Assembler::LoadStore); if (__ membar_has_effect(membar_bits)) { // Get volatile flag - __ ld_ptr(Address(Rcache, 0, in_bytes(cp_base_offset + ConstantPoolCacheEntry::f2_offset())), Rflags); + __ ld_ptr(Rcache, cp_base_offset + ConstantPoolCacheEntry::f2_offset(), Rflags); __ set((1 << ConstantPoolCacheEntry::volatileField), Lscratch); } @@ -2355,7 +2350,7 @@ // Check to see if a field modification watch has been set before we take // the time to call into the VM. Label done; - Address get_field_modification_count_addr(G4_scratch, (address)JvmtiExport::get_field_modification_count_addr(), relocInfo::none); + AddressLiteral get_field_modification_count_addr(JvmtiExport::get_field_modification_count_addr()); __ load_contents(get_field_modification_count_addr, G4_scratch); __ tst(G4_scratch); __ br(Assembler::zero, false, Assembler::pt, done); @@ -2408,9 +2403,7 @@ // the time to call into the VM. Label Label1; assert_different_registers(Rcache, index, G1_scratch); - Address get_field_modification_count_addr(G1_scratch, - (address)JvmtiExport::get_field_modification_count_addr(), - relocInfo::none); + AddressLiteral get_field_modification_count_addr(JvmtiExport::get_field_modification_count_addr()); __ load_contents(get_field_modification_count_addr, G1_scratch); __ tst(G1_scratch); __ br(Assembler::zero, false, Assembler::pt, Label1); @@ -2433,7 +2426,7 @@ // the type to determine where the object is. Label two_word, valsizeknown; - __ ld_ptr(Address(G1_scratch, 0, in_bytes(cp_base_offset + ConstantPoolCacheEntry::flags_offset())), Rflags); + __ ld_ptr(G1_scratch, cp_base_offset + ConstantPoolCacheEntry::flags_offset(), Rflags); __ mov(Lesp, G4_scratch); __ srl(Rflags, ConstantPoolCacheEntry::tosBits, Rflags); // Make sure we don't need to mask Rflags for tosBits after the above shift @@ -2689,8 +2682,7 @@ Label notVolatile, checkVolatile, exit; if (__ membar_has_effect(read_bits) || __ membar_has_effect(write_bits)) { - __ ld_ptr(Address(Rcache, 0, in_bytes(cp_base_offset + - ConstantPoolCacheEntry::flags_offset())), Rflags); + __ ld_ptr(Rcache, cp_base_offset + ConstantPoolCacheEntry::flags_offset(), Rflags); __ set((1 << ConstantPoolCacheEntry::volatileField), Lscratch); __ and3(Rflags, Lscratch, Lscratch); if (__ membar_has_effect(read_bits)) { @@ -2702,8 +2694,7 @@ } } - __ ld_ptr(Address(Rcache, 0, in_bytes(cp_base_offset + - ConstantPoolCacheEntry::f2_offset())), Roffset); + __ ld_ptr(Rcache, cp_base_offset + ConstantPoolCacheEntry::f2_offset(), Roffset); pop_and_check_object(Rclass); switch (bytecode()) { @@ -2755,7 +2746,7 @@ // access constant pool cache (is resolved) __ get_cache_and_index_at_bcp(Rcache, G4_scratch, 2); - __ ld_ptr(Address(Rcache, 0, in_bytes(constantPoolCacheOopDesc::base_offset() + ConstantPoolCacheEntry::f2_offset())), Roffset); + __ ld_ptr(Rcache, constantPoolCacheOopDesc::base_offset() + ConstantPoolCacheEntry::f2_offset(), Roffset); __ add(Lbcp, 1, Lbcp); // needed to report exception at the correct bcp __ verify_oop(Rreceiver); @@ -2775,7 +2766,7 @@ if (__ membar_has_effect(membar_bits)) { // Get is_volatile value in Rflags and check if membar is needed - __ ld_ptr(Address(Rcache, 0, in_bytes(constantPoolCacheOopDesc::base_offset() + ConstantPoolCacheEntry::flags_offset())), Rflags); + __ ld_ptr(Rcache, constantPoolCacheOopDesc::base_offset() + ConstantPoolCacheEntry::flags_offset(), Rflags); // Test volatile Label notVolatile; @@ -2853,8 +2844,8 @@ __ verify_oop(O0); // get return address - Address table(Rtemp, (address)Interpreter::return_3_addrs_by_index_table()); - __ load_address(table); + AddressLiteral table(Interpreter::return_3_addrs_by_index_table()); + __ set(table, Rtemp); __ srl(Rret, ConstantPoolCacheEntry::tosBits, Rret); // get return type // Make sure we don't need to mask Rret for tosBits after the above shift ConstantPoolCacheEntry::verify_tosBits(); @@ -2886,7 +2877,7 @@ __ verify_oop(G5_method); // Load receiver from stack slot - __ lduh(Address(G5_method, 0, in_bytes(methodOopDesc::size_of_parameters_offset())), G4_scratch); + __ lduh(G5_method, in_bytes(methodOopDesc::size_of_parameters_offset()), G4_scratch); __ load_receiver(G4_scratch, O0); // receiver NULL check @@ -2895,8 +2886,8 @@ __ profile_final_call(O4); // get return address - Address table(Rtemp, (address)Interpreter::return_3_addrs_by_index_table()); - __ load_address(table); + AddressLiteral table(Interpreter::return_3_addrs_by_index_table()); + __ set(table, Rtemp); __ srl(Rret, ConstantPoolCacheEntry::tosBits, Rret); // get return type // Make sure we don't need to mask Rret for tosBits after the above shift ConstantPoolCacheEntry::verify_tosBits(); @@ -2920,7 +2911,7 @@ __ verify_oop(G5_method); - __ lduh(Address(G5_method, 0, in_bytes(methodOopDesc::size_of_parameters_offset())), G4_scratch); + __ lduh(G5_method, in_bytes(methodOopDesc::size_of_parameters_offset()), G4_scratch); __ load_receiver(G4_scratch, O0); // receiver NULL check @@ -2929,8 +2920,8 @@ __ profile_call(O4); // get return address - Address table(Rtemp, (address)Interpreter::return_3_addrs_by_index_table()); - __ load_address(table); + AddressLiteral table(Interpreter::return_3_addrs_by_index_table()); + __ set(table, Rtemp); __ srl(Rret, ConstantPoolCacheEntry::tosBits, Rret); // get return type // Make sure we don't need to mask Rret for tosBits after the above shift ConstantPoolCacheEntry::verify_tosBits(); @@ -2956,8 +2947,8 @@ __ profile_call(O4); // get return address - Address table(Rtemp, (address)Interpreter::return_3_addrs_by_index_table()); - __ load_address(table); + AddressLiteral table(Interpreter::return_3_addrs_by_index_table()); + __ set(table, Rtemp); __ srl(Rret, ConstantPoolCacheEntry::tosBits, Rret); // get return type // Make sure we don't need to mask Rret for tosBits after the above shift ConstantPoolCacheEntry::verify_tosBits(); @@ -3021,8 +3012,8 @@ __ mov(Rflags, Rret); // get return address - Address table(Rscratch, (address)Interpreter::return_5_addrs_by_index_table()); - __ load_address(table); + AddressLiteral table(Interpreter::return_5_addrs_by_index_table()); + __ set(table, Rscratch); __ srl(Rret, ConstantPoolCacheEntry::tosBits, Rret); // get return type // Make sure we don't need to mask Rret for tosBits after the above shift ConstantPoolCacheEntry::verify_tosBits(); @@ -3059,7 +3050,7 @@ Label search; Register Rtemp = Rflags; - __ ld(Address(RklassOop, 0, instanceKlass::vtable_length_offset() * wordSize), Rtemp); + __ ld(RklassOop, instanceKlass::vtable_length_offset() * wordSize, Rtemp); if (align_object_offset(1) > 1) { __ round_to(Rtemp, align_object_offset(1)); } @@ -3125,6 +3116,24 @@ } +void TemplateTable::invokedynamic(int byte_no) { + transition(vtos, vtos); + + if (!EnableInvokeDynamic) { + // We should not encounter this bytecode if !EnableInvokeDynamic. + // The verifier will stop it. However, if we get past the verifier, + // this will stop the thread in a reasonable way, without crashing the JVM. + __ call_VM(noreg, CAST_FROM_FN_PTR(address, + InterpreterRuntime::throw_IncompatibleClassChangeError)); + // the call_VM checks for exception, so we should never return here. + __ should_not_reach_here(); + return; + } + + __ stop("invokedynamic NYI");//6815692// +} + + //---------------------------------------------------------------------------------------------------- // Allocation @@ -3624,9 +3633,9 @@ transition(vtos, vtos); __ ldub(Lbcp, 1, G3_scratch);// get next bc __ sll(G3_scratch, LogBytesPerWord, G3_scratch); - Address ep(G4_scratch, (address)Interpreter::_wentry_point); - __ load_address(ep); - __ ld_ptr(ep.base(), G3_scratch, G3_scratch); + AddressLiteral ep(Interpreter::_wentry_point); + __ set(ep, G4_scratch); + __ ld_ptr(G4_scratch, G3_scratch, G3_scratch); __ jmp(G3_scratch, G0); __ delayed()->nop(); // Note: the Lbcp increment step is part of the individual wide bytecode implementations diff -r 5d4dd2f5f6a1 -r 7a485bc4da16 src/cpu/sparc/vm/vtableStubs_sparc.cpp --- a/src/cpu/sparc/vm/vtableStubs_sparc.cpp Fri Apr 17 15:50:12 2009 +0100 +++ b/src/cpu/sparc/vm/vtableStubs_sparc.cpp Thu May 07 10:30:17 2009 -0700 @@ -1,5 +1,5 @@ /* - * Copyright 1997-2008 Sun Microsystems, Inc. All Rights Reserved. + * Copyright 1997-2009 Sun Microsystems, Inc. All Rights Reserved. * DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER. * * This code is free software; you can redistribute it and/or modify it @@ -48,11 +48,7 @@ #ifndef PRODUCT if (CountCompiledCalls) { - Address ctr(G5, SharedRuntime::nof_megamorphic_calls_addr()); - __ sethi(ctr); - __ ld(ctr, G3_scratch); - __ inc(G3_scratch); - __ st(G3_scratch, ctr); + __ inc_counter(SharedRuntime::nof_megamorphic_calls_addr(), G5, G3_scratch); } #endif /* PRODUCT */ @@ -114,6 +110,9 @@ (int)(s->code_end() - __ pc())); } guarantee(__ pc() <= s->code_end(), "overflowed buffer"); + // shut the door on sizing bugs + int slop = 2*BytesPerInstWord; // 32-bit offset is this much larger than a 13-bit one + assert(vtable_index > 10 || __ pc() + slop <= s->code_end(), "room for sethi;add"); s->set_exception_points(npe_addr, ame_addr); return s; @@ -151,11 +150,7 @@ #ifndef PRODUCT if (CountCompiledCalls) { - Address ctr(L0, SharedRuntime::nof_megamorphic_calls_addr()); - __ sethi(ctr); - __ ld(ctr, L1); - __ inc(L1); - __ st(L1, ctr); + __ inc_counter(SharedRuntime::nof_megamorphic_calls_addr(), L0, L1); } #endif /* PRODUCT */ @@ -195,8 +190,8 @@ __ delayed()->nop(); __ bind(throw_icce); - Address icce(G3_scratch, StubRoutines::throw_IncompatibleClassChangeError_entry()); - __ jump_to(icce, 0); + AddressLiteral icce(StubRoutines::throw_IncompatibleClassChangeError_entry()); + __ jump_to(icce, G3_scratch); __ delayed()->restore(); masm->flush(); @@ -208,6 +203,9 @@ (int)(s->code_end() - __ pc())); } guarantee(__ pc() <= s->code_end(), "overflowed buffer"); + // shut the door on sizing bugs + int slop = 2*BytesPerInstWord; // 32-bit offset is this much larger than a 13-bit one + assert(itable_index > 10 || __ pc() + slop <= s->code_end(), "room for sethi;add"); s->set_exception_points(npe_addr, ame_addr); return s; @@ -233,6 +231,50 @@ return (basic + slop); } } + + // In order to tune these parameters, run the JVM with VM options + // +PrintMiscellaneous and +WizardMode to see information about + // actual itable stubs. Look for lines like this: + // itable #1 at 0x5551212[116] left over: 8 + // Reduce the constants so that the "left over" number is 8 + // Do not aim at a left-over number of zero, because a very + // large vtable or itable offset (> 4K) will require an extra + // sethi/or pair of instructions. + // + // The JVM98 app. _202_jess has a megamorphic interface call. + // The itable code looks like this: + // Decoding VtableStub itbl[1]@16 + // ld [ %o0 + 4 ], %g3 + // save %sp, -64, %sp + // ld [ %g3 + 0xe8 ], %l2 + // sll %l2, 2, %l2 + // add %l2, 0x134, %l2 + // and %l2, -8, %l2 ! NOT_LP64 only + // add %g3, %l2, %l2 + // add %g3, 4, %g3 + // ld [ %l2 ], %l5 + // brz,pn %l5, throw_icce + // cmp %l5, %g5 + // be %icc, success + // add %l2, 8, %l2 + // loop: + // ld [ %l2 ], %l5 + // brz,pn %l5, throw_icce + // cmp %l5, %g5 + // bne,pn %icc, loop + // add %l2, 8, %l2 + // success: + // ld [ %l2 + -4 ], %l2 + // ld [ %g3 + %l2 ], %l5 + // restore %l5, 0, %g5 + // ld [ %g5 + 0x44 ], %g3 + // jmp %g3 + // nop + // throw_icce: + // sethi %hi(throw_ICCE_entry), %g3 + // ! 5 more instructions here, LP64_ONLY + // jmp %g3 + %lo(throw_ICCE_entry) + // restore } diff -r 5d4dd2f5f6a1 -r 7a485bc4da16 src/cpu/x86/vm/assembler_x86.cpp --- a/src/cpu/x86/vm/assembler_x86.cpp Fri Apr 17 15:50:12 2009 +0100 +++ b/src/cpu/x86/vm/assembler_x86.cpp Thu May 07 10:30:17 2009 -0700 @@ -7609,6 +7609,83 @@ } +// registers on entry: +// - rax ('check' register): required MethodType +// - rcx: method handle +// - rdx, rsi, or ?: killable temp +void MacroAssembler::check_method_handle_type(Register mtype_reg, Register mh_reg, + Register temp_reg, + Label& wrong_method_type) { + if (UseCompressedOops) unimplemented(); // field accesses must decode + // compare method type against that of the receiver + cmpptr(mtype_reg, Address(mh_reg, delayed_value(java_dyn_MethodHandle::type_offset_in_bytes, temp_reg))); + jcc(Assembler::notEqual, wrong_method_type); +} + + +// A method handle has a "vmslots" field which gives the size of its +// argument list in JVM stack slots. This field is either located directly +// in every method handle, or else is indirectly accessed through the +// method handle's MethodType. This macro hides the distinction. +void MacroAssembler::load_method_handle_vmslots(Register vmslots_reg, Register mh_reg, + Register temp_reg) { + if (UseCompressedOops) unimplemented(); // field accesses must decode + // load mh.type.form.vmslots + if (java_dyn_MethodHandle::vmslots_offset_in_bytes() != 0) { + // hoist vmslots into every mh to avoid dependent load chain + movl(vmslots_reg, Address(mh_reg, delayed_value(java_dyn_MethodHandle::vmslots_offset_in_bytes, temp_reg))); + } else { + Register temp2_reg = vmslots_reg; + movptr(temp2_reg, Address(mh_reg, delayed_value(java_dyn_MethodHandle::type_offset_in_bytes, temp_reg))); + movptr(temp2_reg, Address(temp2_reg, delayed_value(java_dyn_MethodType::form_offset_in_bytes, temp_reg))); + movl(vmslots_reg, Address(temp2_reg, delayed_value(java_dyn_MethodTypeForm::vmslots_offset_in_bytes, temp_reg))); + } +} + + +// registers on entry: +// - rcx: method handle +// - rdx: killable temp (interpreted only) +// - rax: killable temp (compiled only) +void MacroAssembler::jump_to_method_handle_entry(Register mh_reg, Register temp_reg) { + assert(mh_reg == rcx, "caller must put MH object in rcx"); + assert_different_registers(mh_reg, temp_reg); + + if (UseCompressedOops) unimplemented(); // field accesses must decode + + // pick out the interpreted side of the handler + movptr(temp_reg, Address(mh_reg, delayed_value(java_dyn_MethodHandle::vmentry_offset_in_bytes, temp_reg))); + + // off we go... + jmp(Address(temp_reg, MethodHandleEntry::from_interpreted_entry_offset_in_bytes())); + + // for the various stubs which take control at this point, + // see MethodHandles::generate_method_handle_stub +} + + +Address MacroAssembler::argument_address(RegisterOrConstant arg_slot, + int extra_slot_offset) { + // cf. TemplateTable::prepare_invoke(), if (load_receiver). + int stackElementSize = Interpreter::stackElementSize(); + int offset = Interpreter::expr_offset_in_bytes(extra_slot_offset+0); +#ifdef ASSERT + int offset1 = Interpreter::expr_offset_in_bytes(extra_slot_offset+1); + assert(offset1 - offset == stackElementSize, "correct arithmetic"); +#endif + Register scale_reg = noreg; + Address::ScaleFactor scale_factor = Address::no_scale; + if (arg_slot.is_constant()) { + offset += arg_slot.as_constant() * stackElementSize; + } else { + scale_reg = arg_slot.as_register(); + scale_factor = Address::times(stackElementSize); + } + offset += wordSize; // return PC is on stack + return Address(rsp, scale_reg, scale_factor, offset); +} + + void MacroAssembler::verify_oop_addr(Address addr, const char* s) { if (!VerifyOops) return; diff -r 5d4dd2f5f6a1 -r 7a485bc4da16 src/cpu/x86/vm/assembler_x86.hpp --- a/src/cpu/x86/vm/assembler_x86.hpp Fri Apr 17 15:50:12 2009 +0100 +++ b/src/cpu/x86/vm/assembler_x86.hpp Thu May 07 10:30:17 2009 -0700 @@ -1857,6 +1857,16 @@ Register temp_reg, Label& L_success); + // method handles (JSR 292) + void check_method_handle_type(Register mtype_reg, Register mh_reg, + Register temp_reg, + Label& wrong_method_type); + void load_method_handle_vmslots(Register vmslots_reg, Register mh_reg, + Register temp_reg); + void jump_to_method_handle_entry(Register mh_reg, Register temp_reg); + Address argument_address(RegisterOrConstant arg_slot, int extra_slot_offset = 0); + + //---- void set_word_if_not_zero(Register reg); // sets reg to 1 if not zero, otherwise 0 diff -r 5d4dd2f5f6a1 -r 7a485bc4da16 src/cpu/x86/vm/cppInterpreter_x86.cpp --- a/src/cpu/x86/vm/cppInterpreter_x86.cpp Fri Apr 17 15:50:12 2009 +0100 +++ b/src/cpu/x86/vm/cppInterpreter_x86.cpp Thu May 07 10:30:17 2009 -0700 @@ -513,10 +513,11 @@ // compute full expression stack limit const Address size_of_stack (rbx, methodOopDesc::max_stack_offset()); + const int extra_stack = 0; //6815692//methodOopDesc::extra_stack_words(); __ load_unsigned_short(rdx, size_of_stack); // get size of expression stack in words __ negptr(rdx); // so we can subtract in next step // Allocate expression stack - __ lea(rsp, Address(rsp, rdx, Address::times_ptr)); + __ lea(rsp, Address(rsp, rdx, Address::times_ptr, -extra_stack)); __ movptr(STATE(_stack_limit), rsp); } @@ -659,8 +660,9 @@ // Always give one monitor to allow us to start interp if sync method. // Any additional monitors need a check when moving the expression stack const int one_monitor = frame::interpreter_frame_monitor_size() * wordSize; + const int extra_stack = 0; //6815692//methodOopDesc::extra_stack_entries(); __ load_unsigned_short(rax, size_of_stack); // get size of expression stack in words - __ lea(rax, Address(noreg, rax, Interpreter::stackElementScale(), one_monitor)); + __ lea(rax, Address(noreg, rax, Interpreter::stackElementScale(), extra_stack + one_monitor)); __ lea(rax, Address(rax, rdx, Interpreter::stackElementScale(), overhead_size)); #ifdef ASSERT @@ -2185,6 +2187,7 @@ case Interpreter::empty : entry_point = ((InterpreterGenerator*)this)->generate_empty_entry(); break; case Interpreter::accessor : entry_point = ((InterpreterGenerator*)this)->generate_accessor_entry(); break; case Interpreter::abstract : entry_point = ((InterpreterGenerator*)this)->generate_abstract_entry(); break; + case Interpreter::method_handle : entry_point = ((InterpreterGenerator*)this)->generate_method_handle_entry(); break; case Interpreter::java_lang_math_sin : // fall thru case Interpreter::java_lang_math_cos : // fall thru @@ -2224,7 +2227,8 @@ const int overhead_size = sizeof(BytecodeInterpreter)/wordSize + ( frame::sender_sp_offset - frame::link_offset) + 2; - const int method_stack = (method->max_locals() + method->max_stack()) * + const int extra_stack = 0; //6815692//methodOopDesc::extra_stack_entries(); + const int method_stack = (method->max_locals() + method->max_stack() + extra_stack) * Interpreter::stackElementWords(); return overhead_size + method_stack + stub_code; } @@ -2289,7 +2293,8 @@ // Need +1 here because stack_base points to the word just above the first expr stack entry // and stack_limit is supposed to point to the word just below the last expr stack entry. // See generate_compute_interpreter_state. - to_fill->_stack_limit = stack_base - (method->max_stack() + 1); + int extra_stack = 0; //6815692//methodOopDesc::extra_stack_entries(); + to_fill->_stack_limit = stack_base - (method->max_stack() + extra_stack + 1); to_fill->_monitor_base = (BasicObjectLock*) monitor_base; to_fill->_self_link = to_fill; @@ -2335,7 +2340,8 @@ monitor_size); // Now with full size expression stack - int full_frame_size = short_frame_size + method->max_stack() * BytesPerWord; + int extra_stack = 0; //6815692//methodOopDesc::extra_stack_entries(); + int full_frame_size = short_frame_size + (method->max_stack() + extra_stack) * BytesPerWord; // and now with only live portion of the expression stack short_frame_size = short_frame_size + tempcount * BytesPerWord; diff -r 5d4dd2f5f6a1 -r 7a485bc4da16 src/cpu/x86/vm/interp_masm_x86_32.cpp --- a/src/cpu/x86/vm/interp_masm_x86_32.cpp Fri Apr 17 15:50:12 2009 +0100 +++ b/src/cpu/x86/vm/interp_masm_x86_32.cpp Thu May 07 10:30:17 2009 -0700 @@ -189,20 +189,33 @@ } -void InterpreterMacroAssembler::get_cache_and_index_at_bcp(Register cache, Register index, int bcp_offset) { +void InterpreterMacroAssembler::get_cache_index_at_bcp(Register reg, int bcp_offset, bool giant_index) { assert(bcp_offset > 0, "bcp is still pointing to start of bytecode"); + if (!giant_index) { + load_unsigned_short(reg, Address(rsi, bcp_offset)); + } else { + assert(EnableInvokeDynamic, "giant index used only for EnableInvokeDynamic"); + movl(reg, Address(rsi, bcp_offset)); + assert(constantPoolCacheOopDesc::decode_secondary_index(~123) == 123, "else change next line"); + notl(reg); // convert to plain index + } +} + + +void InterpreterMacroAssembler::get_cache_and_index_at_bcp(Register cache, Register index, + int bcp_offset, bool giant_index) { assert(cache != index, "must use different registers"); - load_unsigned_short(index, Address(rsi, bcp_offset)); + get_cache_index_at_bcp(index, bcp_offset, giant_index); movptr(cache, Address(rbp, frame::interpreter_frame_cache_offset * wordSize)); assert(sizeof(ConstantPoolCacheEntry) == 4*wordSize, "adjust code below"); shlptr(index, 2); // convert from field index to ConstantPoolCacheEntry index } -void InterpreterMacroAssembler::get_cache_entry_pointer_at_bcp(Register cache, Register tmp, int bcp_offset) { - assert(bcp_offset > 0, "bcp is still pointing to start of bytecode"); +void InterpreterMacroAssembler::get_cache_entry_pointer_at_bcp(Register cache, Register tmp, + int bcp_offset, bool giant_index) { assert(cache != tmp, "must use different register"); - load_unsigned_short(tmp, Address(rsi, bcp_offset)); + get_cache_index_at_bcp(tmp, bcp_offset, giant_index); assert(sizeof(ConstantPoolCacheEntry) == 4*wordSize, "adjust code below"); // convert from field index to ConstantPoolCacheEntry index // and from word offset to byte offset @@ -555,13 +568,18 @@ } -// Jump to from_interpreted entry of a call unless single stepping is possible -// in this thread in which case we must call the i2i entry -void InterpreterMacroAssembler::jump_from_interpreted(Register method, Register temp) { +void InterpreterMacroAssembler::prepare_to_jump_from_interpreted() { // set sender sp lea(rsi, Address(rsp, wordSize)); // record last_sp movptr(Address(rbp, frame::interpreter_frame_last_sp_offset * wordSize), rsi); +} + + +// Jump to from_interpreted entry of a call unless single stepping is possible +// in this thread in which case we must call the i2i entry +void InterpreterMacroAssembler::jump_from_interpreted(Register method, Register temp) { + prepare_to_jump_from_interpreted(); if (JvmtiExport::can_post_interpreter_events()) { Label run_compiled_code; @@ -1209,7 +1227,9 @@ } -void InterpreterMacroAssembler::profile_virtual_call(Register receiver, Register mdp, Register reg2) { +void InterpreterMacroAssembler::profile_virtual_call(Register receiver, Register mdp, + Register reg2, + bool receiver_can_be_null) { if (ProfileInterpreter) { Label profile_continue; @@ -1219,8 +1239,15 @@ // We are making a call. Increment the count. increment_mdp_data_at(mdp, in_bytes(CounterData::count_offset())); + Label skip_receiver_profile; + if (receiver_can_be_null) { + testptr(receiver, receiver); + jcc(Assembler::zero, skip_receiver_profile); + } + // Record the receiver type. record_klass_in_profile(receiver, mdp, reg2); + bind(skip_receiver_profile); // The method data pointer needs to be updated to reflect the new target. update_mdp_by_constant(mdp, diff -r 5d4dd2f5f6a1 -r 7a485bc4da16 src/cpu/x86/vm/interp_masm_x86_32.hpp --- a/src/cpu/x86/vm/interp_masm_x86_32.hpp Fri Apr 17 15:50:12 2009 +0100 +++ b/src/cpu/x86/vm/interp_masm_x86_32.hpp Thu May 07 10:30:17 2009 -0700 @@ -76,8 +76,9 @@ void get_cpool_and_tags(Register cpool, Register tags) { get_constant_pool(cpool); movptr(tags, Address(cpool, constantPoolOopDesc::tags_offset_in_bytes())); } void get_unsigned_2_byte_index_at_bcp(Register reg, int bcp_offset); - void get_cache_and_index_at_bcp(Register cache, Register index, int bcp_offset); - void get_cache_entry_pointer_at_bcp(Register cache, Register tmp, int bcp_offset); + void get_cache_and_index_at_bcp(Register cache, Register index, int bcp_offset, bool giant_index = false); + void get_cache_entry_pointer_at_bcp(Register cache, Register tmp, int bcp_offset, bool giant_index = false); + void get_cache_index_at_bcp(Register index, int bcp_offset, bool giant_index = false); // Expression stack void f2ieee(); // truncate ftos to 32bits @@ -161,6 +162,7 @@ // jump to an invoked target + void prepare_to_jump_from_interpreted(); void jump_from_interpreted(Register method, Register temp); // Returning from interpreted functions @@ -225,7 +227,8 @@ void profile_not_taken_branch(Register mdp); void profile_call(Register mdp); void profile_final_call(Register mdp); - void profile_virtual_call(Register receiver, Register mdp, Register scratch2); + void profile_virtual_call(Register receiver, Register mdp, Register scratch2, + bool receiver_can_be_null = false); void profile_ret(Register return_bci, Register mdp); void profile_null_seen(Register mdp); void profile_typecheck(Register mdp, Register klass, Register scratch); diff -r 5d4dd2f5f6a1 -r 7a485bc4da16 src/cpu/x86/vm/interp_masm_x86_64.cpp --- a/src/cpu/x86/vm/interp_masm_x86_64.cpp Fri Apr 17 15:50:12 2009 +0100 +++ b/src/cpu/x86/vm/interp_masm_x86_64.cpp Thu May 07 10:30:17 2009 -0700 @@ -551,13 +551,18 @@ MacroAssembler::call_VM_leaf_base(entry_point, 3); } -// Jump to from_interpreted entry of a call unless single stepping is possible -// in this thread in which case we must call the i2i entry -void InterpreterMacroAssembler::jump_from_interpreted(Register method, Register temp) { +void InterpreterMacroAssembler::prepare_to_jump_from_interpreted() { // set sender sp lea(r13, Address(rsp, wordSize)); // record last_sp movptr(Address(rbp, frame::interpreter_frame_last_sp_offset * wordSize), r13); +} + + +// Jump to from_interpreted entry of a call unless single stepping is possible +// in this thread in which case we must call the i2i entry +void InterpreterMacroAssembler::jump_from_interpreted(Register method, Register temp) { + prepare_to_jump_from_interpreted(); if (JvmtiExport::can_post_interpreter_events()) { Label run_compiled_code; diff -r 5d4dd2f5f6a1 -r 7a485bc4da16 src/cpu/x86/vm/interp_masm_x86_64.hpp --- a/src/cpu/x86/vm/interp_masm_x86_64.hpp Fri Apr 17 15:50:12 2009 +0100 +++ b/src/cpu/x86/vm/interp_masm_x86_64.hpp Thu May 07 10:30:17 2009 -0700 @@ -1,5 +1,5 @@ /* - * Copyright 2003-2008 Sun Microsystems, Inc. All Rights Reserved. + * Copyright 2003-2009 Sun Microsystems, Inc. All Rights Reserved. * DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER. * * This code is free software; you can redistribute it and/or modify it @@ -176,6 +176,7 @@ void dispatch_via (TosState state, address* table); // jump to an invoked target + void prepare_to_jump_from_interpreted(); void jump_from_interpreted(Register method, Register temp); diff -r 5d4dd2f5f6a1 -r 7a485bc4da16 src/cpu/x86/vm/interpreterGenerator_x86.hpp --- a/src/cpu/x86/vm/interpreterGenerator_x86.hpp Fri Apr 17 15:50:12 2009 +0100 +++ b/src/cpu/x86/vm/interpreterGenerator_x86.hpp Thu May 07 10:30:17 2009 -0700 @@ -1,5 +1,5 @@ /* - * Copyright 1997-2007 Sun Microsystems, Inc. All Rights Reserved. + * Copyright 1997-2009 Sun Microsystems, Inc. All Rights Reserved. * DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER. * * This code is free software; you can redistribute it and/or modify it @@ -32,6 +32,7 @@ address generate_normal_entry(bool synchronized); address generate_native_entry(bool synchronized); address generate_abstract_entry(void); + address generate_method_handle_entry(void); address generate_math_entry(AbstractInterpreter::MethodKind kind); address generate_empty_entry(void); address generate_accessor_entry(void); diff -r 5d4dd2f5f6a1 -r 7a485bc4da16 src/cpu/x86/vm/interpreter_x86_32.cpp --- a/src/cpu/x86/vm/interpreter_x86_32.cpp Fri Apr 17 15:50:12 2009 +0100 +++ b/src/cpu/x86/vm/interpreter_x86_32.cpp Thu May 07 10:30:17 2009 -0700 @@ -1,5 +1,5 @@ /* - * Copyright 1997-2008 Sun Microsystems, Inc. All Rights Reserved. + * Copyright 1997-2009 Sun Microsystems, Inc. All Rights Reserved. * DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER. * * This code is free software; you can redistribute it and/or modify it @@ -201,11 +201,12 @@ address entry_point = __ pc(); // abstract method entry - // remove return address. Not really needed, since exception handling throws away expression stack - __ pop(rbx); - // adjust stack to what a normal return would do - __ mov(rsp, rsi); + // pop return address, reset last_sp to NULL + __ empty_expression_stack(); + __ restore_bcp(); // rsi must be correct for exception handler (was destroyed) + __ restore_locals(); // make sure locals pointer is correct as well (was destroyed) + // throw exception __ call_VM(noreg, CAST_FROM_FN_PTR(address, InterpreterRuntime::throw_AbstractMethodError)); // the call_VM checks for exception, so we should never return here. @@ -214,6 +215,20 @@ return entry_point; } + +// Method handle invoker +// Dispatch a method of the form java.dyn.MethodHandles::invoke(...) +address InterpreterGenerator::generate_method_handle_entry(void) { + if (!EnableMethodHandles) { + return generate_abstract_entry(); + } + + address entry_point = MethodHandles::generate_method_handle_interpreter_entry(_masm); + + return entry_point; +} + + // This method tells the deoptimizer how big an interpreted frame must be: int AbstractInterpreter::size_activation(methodOop method, int tempcount, diff -r 5d4dd2f5f6a1 -r 7a485bc4da16 src/cpu/x86/vm/interpreter_x86_64.cpp --- a/src/cpu/x86/vm/interpreter_x86_64.cpp Fri Apr 17 15:50:12 2009 +0100 +++ b/src/cpu/x86/vm/interpreter_x86_64.cpp Thu May 07 10:30:17 2009 -0700 @@ -1,5 +1,5 @@ /* - * Copyright 2003-2008 Sun Microsystems, Inc. All Rights Reserved. + * Copyright 2003-2009 Sun Microsystems, Inc. All Rights Reserved. * DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER. * * This code is free software; you can redistribute it and/or modify it @@ -294,6 +294,16 @@ } +// Method handle invoker +// Dispatch a method of the form java.dyn.MethodHandles::invoke(...) +address InterpreterGenerator::generate_method_handle_entry(void) { + if (!EnableMethodHandles) { + return generate_abstract_entry(); + } + return generate_abstract_entry(); //6815692// +} + + // Empty method, generate a very fast return. address InterpreterGenerator::generate_empty_entry(void) { diff -r 5d4dd2f5f6a1 -r 7a485bc4da16 src/cpu/x86/vm/methodHandles_x86.cpp --- /dev/null Thu Jan 01 00:00:00 1970 +0000 +++ b/src/cpu/x86/vm/methodHandles_x86.cpp Thu May 07 10:30:17 2009 -0700 @@ -0,0 +1,1133 @@ +/* + * Copyright 1997-2009 Sun Microsystems, Inc. All Rights Reserved. + * DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER. + * + * This code is free software; you can redistribute it and/or modify it + * under the terms of the GNU General Public License version 2 only, as + * published by the Free Software Foundation. + * + * This code is distributed in the hope that it will be useful, but WITHOUT + * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or + * FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License + * version 2 for more details (a copy is included in the LICENSE file that + * accompanied this code). + * + * You should have received a copy of the GNU General Public License version + * 2 along with this work; if not, write to the Free Software Foundation, + * Inc., 51 Franklin St, Fifth Floor, Boston, MA 02110-1301 USA. + * + * Please contact Sun Microsystems, Inc., 4150 Network Circle, Santa Clara, + * CA 95054 USA or visit www.sun.com if you need additional information or + * have any questions. + * + */ + +#include "incls/_precompiled.incl" +#include "incls/_methodHandles_x86.cpp.incl" + +#define __ _masm-> + +address MethodHandleEntry::start_compiled_entry(MacroAssembler* _masm, + address interpreted_entry) { + // Just before the actual machine code entry point, allocate space + // for a MethodHandleEntry::Data record, so that we can manage everything + // from one base pointer. + __ align(wordSize); + address target = __ pc() + sizeof(Data); + while (__ pc() < target) { + __ nop(); + __ align(wordSize); + } + + MethodHandleEntry* me = (MethodHandleEntry*) __ pc(); + me->set_end_address(__ pc()); // set a temporary end_address + me->set_from_interpreted_entry(interpreted_entry); + me->set_type_checking_entry(NULL); + + return (address) me; +} + +MethodHandleEntry* MethodHandleEntry::finish_compiled_entry(MacroAssembler* _masm, + address start_addr) { + MethodHandleEntry* me = (MethodHandleEntry*) start_addr; + assert(me->end_address() == start_addr, "valid ME"); + + // Fill in the real end_address: + __ align(wordSize); + me->set_end_address(__ pc()); + + return me; +} + +#ifdef ASSERT +static void verify_argslot(MacroAssembler* _masm, Register rax_argslot, + const char* error_message) { + // Verify that argslot lies within (rsp, rbp]. + Label L_ok, L_bad; + __ cmpptr(rax_argslot, rbp); + __ jcc(Assembler::above, L_bad); + __ cmpptr(rsp, rax_argslot); + __ jcc(Assembler::below, L_ok); + __ bind(L_bad); + __ stop(error_message); + __ bind(L_ok); +} +#endif + + +// Code generation +address MethodHandles::generate_method_handle_interpreter_entry(MacroAssembler* _masm) { + // rbx: methodOop + // rcx: receiver method handle (must load from sp[MethodTypeForm.vmslots]) + // rsi/r13: sender SP (must preserve; see prepare_to_jump_from_interpreted) + // rdx: garbage temp, blown away + + Register rbx_method = rbx; + Register rcx_recv = rcx; + Register rax_mtype = rax; + Register rdx_temp = rdx; + + // emit WrongMethodType path first, to enable jccb back-branch from main path + Label wrong_method_type; + __ bind(wrong_method_type); + __ push(rax_mtype); // required mtype + __ push(rcx_recv); // bad mh (1st stacked argument) + __ jump(ExternalAddress(Interpreter::throw_WrongMethodType_entry())); + + // here's where control starts out: + __ align(CodeEntryAlignment); + address entry_point = __ pc(); + + // fetch the MethodType from the method handle into rax (the 'check' register) + { + Register tem = rbx_method; + for (jint* pchase = methodOopDesc::method_type_offsets_chain(); (*pchase) != -1; pchase++) { + __ movptr(rax_mtype, Address(tem, *pchase)); + tem = rax_mtype; // in case there is another indirection + } + } + Register rbx_temp = rbx_method; // done with incoming methodOop + + // given the MethodType, find out where the MH argument is buried + __ movptr(rdx_temp, Address(rax_mtype, + __ delayed_value(java_dyn_MethodType::form_offset_in_bytes, rbx_temp))); + __ movl(rdx_temp, Address(rdx_temp, + __ delayed_value(java_dyn_MethodTypeForm::vmslots_offset_in_bytes, rbx_temp))); + __ movptr(rcx_recv, __ argument_address(rdx_temp)); + + __ check_method_handle_type(rax_mtype, rcx_recv, rdx_temp, wrong_method_type); + __ jump_to_method_handle_entry(rcx_recv, rdx_temp); + + return entry_point; +} + +// Helper to insert argument slots into the stack. +// arg_slots must be a multiple of stack_move_unit() and <= 0 +void MethodHandles::insert_arg_slots(MacroAssembler* _masm, + RegisterOrConstant arg_slots, + int arg_mask, + Register rax_argslot, + Register rbx_temp, Register rdx_temp) { + assert_different_registers(rax_argslot, rbx_temp, rdx_temp, + (!arg_slots.is_register() ? rsp : arg_slots.as_register())); + +#ifdef ASSERT + verify_argslot(_masm, rax_argslot, "insertion point must fall within current frame"); + if (arg_slots.is_register()) { + Label L_ok, L_bad; + __ cmpptr(arg_slots.as_register(), (int32_t) NULL_WORD); + __ jcc(Assembler::greater, L_bad); + __ testl(arg_slots.as_register(), -stack_move_unit() - 1); + __ jcc(Assembler::zero, L_ok); + __ bind(L_bad); + __ stop("assert arg_slots <= 0 and clear low bits"); + __ bind(L_ok); + } else { + assert(arg_slots.as_constant() <= 0, ""); + assert(arg_slots.as_constant() % -stack_move_unit() == 0, ""); + } +#endif //ASSERT + +#ifdef _LP64 + if (arg_slots.is_register()) { + // clean high bits of stack motion register (was loaded as an int) + __ movslq(arg_slots.as_register(), arg_slots.as_register()); + } +#endif + + // Make space on the stack for the inserted argument(s). + // Then pull down everything shallower than rax_argslot. + // The stacked return address gets pulled down with everything else. + // That is, copy [rsp, argslot) downward by -size words. In pseudo-code: + // rsp -= size; + // for (rdx = rsp + size; rdx < argslot; rdx++) + // rdx[-size] = rdx[0] + // argslot -= size; + __ mov(rdx_temp, rsp); // source pointer for copy + __ lea(rsp, Address(rsp, arg_slots, Address::times_ptr)); + { + Label loop; + __ bind(loop); + // pull one word down each time through the loop + __ movptr(rbx_temp, Address(rdx_temp, 0)); + __ movptr(Address(rdx_temp, arg_slots, Address::times_ptr), rbx_temp); + __ addptr(rdx_temp, wordSize); + __ cmpptr(rdx_temp, rax_argslot); + __ jcc(Assembler::less, loop); + } + + // Now move the argslot down, to point to the opened-up space. + __ lea(rax_argslot, Address(rax_argslot, arg_slots, Address::times_ptr)); + + if (TaggedStackInterpreter && arg_mask != _INSERT_NO_MASK) { + // The caller has specified a bitmask of tags to put into the opened space. + // This only works when the arg_slots value is an assembly-time constant. + int constant_arg_slots = arg_slots.as_constant() / stack_move_unit(); + int tag_offset = Interpreter::tag_offset_in_bytes() - Interpreter::value_offset_in_bytes(); + for (int slot = 0; slot < constant_arg_slots; slot++) { + BasicType slot_type = ((arg_mask & (1 << slot)) == 0 ? T_OBJECT : T_INT); + int slot_offset = Interpreter::stackElementSize() * slot; + Address tag_addr(rax_argslot, slot_offset + tag_offset); + __ movptr(tag_addr, frame::tag_for_basic_type(slot_type)); + } + // Note that the new argument slots are tagged properly but contain + // garbage at this point. The value portions must be initialized + // by the caller. (Especially references!) + } +} + +// Helper to remove argument slots from the stack. +// arg_slots must be a multiple of stack_move_unit() and >= 0 +void MethodHandles::remove_arg_slots(MacroAssembler* _masm, + RegisterOrConstant arg_slots, + Register rax_argslot, + Register rbx_temp, Register rdx_temp) { + assert_different_registers(rax_argslot, rbx_temp, rdx_temp, + (!arg_slots.is_register() ? rsp : arg_slots.as_register())); + +#ifdef ASSERT + { + // Verify that [argslot..argslot+size) lies within (rsp, rbp). + Label L_ok, L_bad; + __ lea(rbx_temp, Address(rax_argslot, arg_slots, Address::times_ptr)); + __ cmpptr(rbx_temp, rbp); + __ jcc(Assembler::above, L_bad); + __ cmpptr(rsp, rax_argslot); + __ jcc(Assembler::below, L_ok); + __ bind(L_bad); + __ stop("deleted argument(s) must fall within current frame"); + __ bind(L_ok); + } + if (arg_slots.is_register()) { + Label L_ok, L_bad; + __ cmpptr(arg_slots.as_register(), (int32_t) NULL_WORD); + __ jcc(Assembler::less, L_bad); + __ testl(arg_slots.as_register(), -stack_move_unit() - 1); + __ jcc(Assembler::zero, L_ok); + __ bind(L_bad); + __ stop("assert arg_slots >= 0 and clear low bits"); + __ bind(L_ok); + } else { + assert(arg_slots.as_constant() >= 0, ""); + assert(arg_slots.as_constant() % -stack_move_unit() == 0, ""); + } +#endif //ASSERT + +#ifdef _LP64 + if (false) { // not needed, since register is positive + // clean high bits of stack motion register (was loaded as an int) + if (arg_slots.is_register()) + __ movslq(arg_slots.as_register(), arg_slots.as_register()); + } +#endif + + // Pull up everything shallower than rax_argslot. + // Then remove the excess space on the stack. + // The stacked return address gets pulled up with everything else. + // That is, copy [rsp, argslot) upward by size words. In pseudo-code: + // for (rdx = argslot-1; rdx >= rsp; --rdx) + // rdx[size] = rdx[0] + // argslot += size; + // rsp += size; + __ lea(rdx_temp, Address(rax_argslot, -wordSize)); // source pointer for copy + { + Label loop; + __ bind(loop); + // pull one word up each time through the loop + __ movptr(rbx_temp, Address(rdx_temp, 0)); + __ movptr(Address(rdx_temp, arg_slots, Address::times_ptr), rbx_temp); + __ addptr(rdx_temp, -wordSize); + __ cmpptr(rdx_temp, rsp); + __ jcc(Assembler::greaterEqual, loop); + } + + // Now move the argslot up, to point to the just-copied block. + __ lea(rsp, Address(rsp, arg_slots, Address::times_ptr)); + // And adjust the argslot address to point at the deletion point. + __ lea(rax_argslot, Address(rax_argslot, arg_slots, Address::times_ptr)); +} + +#ifndef PRODUCT +void trace_method_handle_stub(const char* adaptername, + oop mh, + intptr_t* entry_sp, + intptr_t* saved_sp) { + // called as a leaf from native code: do not block the JVM! + printf("MH %s "PTR_FORMAT" "PTR_FORMAT" "INTX_FORMAT"\n", adaptername, mh, entry_sp, entry_sp - saved_sp); +} +#endif //PRODUCT + +// Generate an "entry" field for a method handle. +// This determines how the method handle will respond to calls. +void MethodHandles::generate_method_handle_stub(MacroAssembler* _masm, MethodHandles::EntryKind ek) { + // Here is the register state during an interpreted call, + // as set up by generate_method_handle_interpreter_entry(): + // - rbx: garbage temp (was MethodHandle.invoke methodOop, unused) + // - rcx: receiver method handle + // - rax: method handle type (only used by the check_mtype entry point) + // - rsi/r13: sender SP (must preserve; see prepare_to_jump_from_interpreted) + // - rdx: garbage temp, can blow away + + Register rcx_recv = rcx; + Register rax_argslot = rax; + Register rbx_temp = rbx; + Register rdx_temp = rdx; + + guarantee(java_dyn_MethodHandle::vmentry_offset_in_bytes() != 0, "must have offsets"); + + // some handy addresses + Address rbx_method_fie( rbx, methodOopDesc::from_interpreted_offset() ); + + Address rcx_mh_vmtarget( rcx_recv, java_dyn_MethodHandle::vmtarget_offset_in_bytes() ); + Address rcx_dmh_vmindex( rcx_recv, sun_dyn_DirectMethodHandle::vmindex_offset_in_bytes() ); + + Address rcx_bmh_vmargslot( rcx_recv, sun_dyn_BoundMethodHandle::vmargslot_offset_in_bytes() ); + Address rcx_bmh_argument( rcx_recv, sun_dyn_BoundMethodHandle::argument_offset_in_bytes() ); + + Address rcx_amh_vmargslot( rcx_recv, sun_dyn_AdapterMethodHandle::vmargslot_offset_in_bytes() ); + Address rcx_amh_argument( rcx_recv, sun_dyn_AdapterMethodHandle::argument_offset_in_bytes() ); + Address rcx_amh_conversion( rcx_recv, sun_dyn_AdapterMethodHandle::conversion_offset_in_bytes() ); + Address vmarg; // __ argument_address(vmargslot) + + int tag_offset = -1; + if (TaggedStackInterpreter) { + tag_offset = Interpreter::tag_offset_in_bytes() - Interpreter::value_offset_in_bytes(); + assert(tag_offset = wordSize, "stack grows as expected"); + } + + if (have_entry(ek)) { + __ nop(); // empty stubs make SG sick + return; + } + + address interp_entry = __ pc(); + if (UseCompressedOops) __ unimplemented("UseCompressedOops"); + +#ifndef PRODUCT + if (TraceMethodHandles) { + __ push(rax); __ push(rbx); __ push(rcx); __ push(rdx); __ push(rsi); __ push(rdi); + __ lea(rax, Address(rsp, wordSize*6)); // entry_sp + // arguments: + __ push(rsi); // saved_sp + __ push(rax); // entry_sp + __ push(rcx); // mh + __ push(rcx); + __ movptr(Address(rsp, 0), (intptr_t)entry_name(ek)); + __ call_VM_leaf(CAST_FROM_FN_PTR(address, trace_method_handle_stub), 4); + __ pop(rdi); __ pop(rsi); __ pop(rdx); __ pop(rcx); __ pop(rbx); __ pop(rax); + } +#endif //PRODUCT + + switch ((int) ek) { + case _check_mtype: + { + // this stub is special, because it requires a live mtype argument + Register rax_mtype = rax; + + // emit WrongMethodType path first, to enable jccb back-branch + Label wrong_method_type; + __ bind(wrong_method_type); + __ movptr(rdx_temp, ExternalAddress((address) &_entries[_wrong_method_type])); + __ jmp(Address(rdx_temp, MethodHandleEntry::from_interpreted_entry_offset_in_bytes())); + __ hlt(); + + interp_entry = __ pc(); + __ check_method_handle_type(rax_mtype, rcx_recv, rdx_temp, wrong_method_type); + // now rax_mtype is dead; subsequent stubs will use it as a temp + + __ jump_to_method_handle_entry(rcx_recv, rdx_temp); + } + break; + + case _wrong_method_type: + { + // this stub is special, because it requires a live mtype argument + Register rax_mtype = rax; + + interp_entry = __ pc(); + __ push(rax_mtype); // required mtype + __ push(rcx_recv); // random mh (1st stacked argument) + __ jump(ExternalAddress(Interpreter::throw_WrongMethodType_entry())); + } + break; + + case _invokestatic_mh: + case _invokespecial_mh: + { + Register rbx_method = rbx_temp; + __ movptr(rbx_method, rcx_mh_vmtarget); // target is a methodOop + __ verify_oop(rbx_method); + // same as TemplateTable::invokestatic or invokespecial, + // minus the CP setup and profiling: + if (ek == _invokespecial_mh) { + // Must load & check the first argument before entering the target method. + __ load_method_handle_vmslots(rax_argslot, rcx_recv, rdx_temp); + __ movptr(rcx_recv, __ argument_address(rax_argslot, -1)); + __ null_check(rcx_recv); + __ verify_oop(rcx_recv); + } + __ jmp(rbx_method_fie); + } + break; + + case _invokevirtual_mh: + { + // same as TemplateTable::invokevirtual, + // minus the CP setup and profiling: + + // pick out the vtable index and receiver offset from the MH, + // and then we can discard it: + __ load_method_handle_vmslots(rax_argslot, rcx_recv, rdx_temp); + Register rbx_index = rbx_temp; + __ movl(rbx_index, rcx_dmh_vmindex); + // Note: The verifier allows us to ignore rcx_mh_vmtarget. + __ movptr(rcx_recv, __ argument_address(rax_argslot, -1)); + __ null_check(rcx_recv, oopDesc::klass_offset_in_bytes()); + + // get receiver klass + Register rax_klass = rax_argslot; + __ load_klass(rax_klass, rcx_recv); + __ verify_oop(rax_klass); + + // get target methodOop & entry point + const int base = instanceKlass::vtable_start_offset() * wordSize; + assert(vtableEntry::size() * wordSize == wordSize, "adjust the scaling in the code below"); + Address vtable_entry_addr(rax_klass, + rbx_index, Address::times_ptr, + base + vtableEntry::method_offset_in_bytes()); + Register rbx_method = rbx_temp; + __ movl(rbx_method, vtable_entry_addr); + + __ verify_oop(rbx_method); + __ jmp(rbx_method_fie); + } + break; + + case _invokeinterface_mh: + { + // same as TemplateTable::invokeinterface, + // minus the CP setup and profiling: + + // pick out the interface and itable index from the MH. + __ load_method_handle_vmslots(rax_argslot, rcx_recv, rdx_temp); + Register rdx_intf = rdx_temp; + Register rbx_index = rbx_temp; + __ movptr(rdx_intf, rcx_mh_vmtarget); + __ movl(rbx_index, rcx_dmh_vmindex); + __ movptr(rcx_recv, __ argument_address(rax_argslot, -1)); + __ null_check(rcx_recv, oopDesc::klass_offset_in_bytes()); + + // get receiver klass + Register rax_klass = rax_argslot; + __ load_klass(rax_klass, rcx_recv); + __ verify_oop(rax_klass); + + Register rcx_temp = rcx_recv; + Register rbx_method = rbx_index; + + // get interface klass + Label no_such_interface; + __ verify_oop(rdx_intf); + __ lookup_interface_method(rax_klass, rdx_intf, + // note: next two args must be the same: + rbx_index, rbx_method, + rcx_temp, + no_such_interface); + + __ verify_oop(rbx_method); + __ jmp(rbx_method_fie); + __ hlt(); + + __ bind(no_such_interface); + // Throw an exception. + // For historical reasons, it will be IncompatibleClassChangeError. + __ should_not_reach_here(); // %%% FIXME NYI + } + break; + + case _bound_ref_mh: + case _bound_int_mh: + case _bound_long_mh: + case _bound_ref_direct_mh: + case _bound_int_direct_mh: + case _bound_long_direct_mh: + { + bool direct_to_method = (ek >= _bound_ref_direct_mh); + BasicType arg_type = T_ILLEGAL; + if (ek == _bound_long_mh || ek == _bound_long_direct_mh) { + arg_type = T_LONG; + } else if (ek == _bound_int_mh || ek == _bound_int_direct_mh) { + arg_type = T_INT; + } else { + assert(ek == _bound_ref_mh || ek == _bound_ref_direct_mh, "must be ref"); + arg_type = T_OBJECT; + } + int arg_slots = type2size[arg_type]; + int arg_mask = (arg_type == T_OBJECT ? _INSERT_REF_MASK : + arg_slots == 1 ? _INSERT_INT_MASK : _INSERT_LONG_MASK); + + // make room for the new argument: + __ movl(rax_argslot, rcx_bmh_vmargslot); + __ lea(rax_argslot, __ argument_address(rax_argslot)); + insert_arg_slots(_masm, arg_slots * stack_move_unit(), arg_mask, + rax_argslot, rbx_temp, rdx_temp); + + // store bound argument into the new stack slot: + __ movptr(rbx_temp, rcx_bmh_argument); + Address prim_value_addr(rbx_temp, java_lang_boxing_object::value_offset_in_bytes(arg_type)); + if (arg_type == T_OBJECT) { + __ movptr(Address(rax_argslot, 0), rbx_temp); + } else { + __ load_sized_value(rbx_temp, prim_value_addr, + type2aelembytes(arg_type), is_signed_subword_type(arg_type)); + __ movptr(Address(rax_argslot, 0), rbx_temp); +#ifndef _LP64 + if (arg_slots == 2) { + __ movl(rbx_temp, prim_value_addr.plus_disp(wordSize)); + __ movl(Address(rax_argslot, Interpreter::stackElementSize()), rbx_temp); + } +#endif //_LP64 + break; + } + + if (direct_to_method) { + Register rbx_method = rbx_temp; + __ movptr(rbx_method, rcx_mh_vmtarget); + __ verify_oop(rbx_method); + __ jmp(rbx_method_fie); + } else { + __ movptr(rcx_recv, rcx_mh_vmtarget); + __ verify_oop(rcx_recv); + __ jump_to_method_handle_entry(rcx_recv, rdx_temp); + } + } + break; + + case _adapter_retype_only: + // immediately jump to the next MH layer: + __ movptr(rcx_recv, rcx_mh_vmtarget); + __ verify_oop(rcx_recv); + __ jump_to_method_handle_entry(rcx_recv, rdx_temp); + // This is OK when all parameter types widen. + // It is also OK when a return type narrows. + break; + + case _adapter_check_cast: + { + // temps: + Register rbx_klass = rbx_temp; // interesting AMH data + + // check a reference argument before jumping to the next layer of MH: + __ movl(rax_argslot, rcx_amh_vmargslot); + vmarg = __ argument_address(rax_argslot); + + // What class are we casting to? + __ movptr(rbx_klass, rcx_amh_argument); // this is a Class object! + __ movptr(rbx_klass, Address(rbx_klass, java_lang_Class::klass_offset_in_bytes())); + + // get the new MH: + __ movptr(rcx_recv, rcx_mh_vmtarget); + // (now we are done with the old MH) + + Label done; + __ movptr(rdx_temp, vmarg); + __ testl(rdx_temp, rdx_temp); + __ jcc(Assembler::zero, done); // no cast if null + __ load_klass(rdx_temp, rdx_temp); + + // live at this point: + // - rbx_klass: klass required by the target method + // - rdx_temp: argument klass to test + // - rcx_recv: method handle to invoke (after cast succeeds) + __ check_klass_subtype(rdx_temp, rbx_klass, rax_argslot, done); + + // If we get here, the type check failed! + // Call the wrong_method_type stub, passing the failing argument type in rax. + Register rax_mtype = rax_argslot; + __ push(rbx_klass); // missed klass (required type) + __ push(rdx_temp); // bad actual type (1st stacked argument) + __ jump(ExternalAddress(Interpreter::throw_WrongMethodType_entry())); + + __ bind(done); + __ jump_to_method_handle_entry(rcx_recv, rdx_temp); + } + break; + + case _adapter_prim_to_prim: + case _adapter_ref_to_prim: + // handled completely by optimized cases + __ stop("init_AdapterMethodHandle should not issue this"); + break; + + case _adapter_opt_i2i: // optimized subcase of adapt_prim_to_prim +//case _adapter_opt_f2i: // optimized subcase of adapt_prim_to_prim + case _adapter_opt_l2i: // optimized subcase of adapt_prim_to_prim + case _adapter_opt_unboxi: // optimized subcase of adapt_ref_to_prim + { + // perform an in-place conversion to int or an int subword + __ movl(rax_argslot, rcx_amh_vmargslot); + vmarg = __ argument_address(rax_argslot); + + switch (ek) { + case _adapter_opt_i2i: + __ movl(rdx_temp, vmarg); + break; + case _adapter_opt_l2i: + { + // just delete the extra slot; on a little-endian machine we keep the first + __ lea(rax_argslot, __ argument_address(rax_argslot, 1)); + remove_arg_slots(_masm, -stack_move_unit(), + rax_argslot, rbx_temp, rdx_temp); + vmarg = Address(rax_argslot, -Interpreter::stackElementSize()); + __ movl(rdx_temp, vmarg); + } + break; + case _adapter_opt_unboxi: + { + // Load the value up from the heap. + __ movptr(rdx_temp, vmarg); + int value_offset = java_lang_boxing_object::value_offset_in_bytes(T_INT); +#ifdef ASSERT + for (int bt = T_BOOLEAN; bt < T_INT; bt++) { + if (is_subword_type(BasicType(bt))) + assert(value_offset == java_lang_boxing_object::value_offset_in_bytes(BasicType(bt)), ""); + } +#endif + __ null_check(rdx_temp, value_offset); + __ movl(rdx_temp, Address(rdx_temp, value_offset)); + // We load this as a word. Because we are little-endian, + // the low bits will be correct, but the high bits may need cleaning. + // The vminfo will guide us to clean those bits. + } + break; + default: + assert(false, ""); + } + goto finish_int_conversion; + } + + finish_int_conversion: + { + Register rbx_vminfo = rbx_temp; + __ movl(rbx_vminfo, rcx_amh_conversion); + assert(CONV_VMINFO_SHIFT == 0, "preshifted"); + + // get the new MH: + __ movptr(rcx_recv, rcx_mh_vmtarget); + // (now we are done with the old MH) + + // original 32-bit vmdata word must be of this form: + // | MBZ:16 | signBitCount:8 | srcDstTypes:8 | conversionOp:8 | + __ xchgl(rcx, rbx_vminfo); // free rcx for shifts + __ shll(rdx_temp /*, rcx*/); + Label zero_extend, done; + __ testl(rcx, CONV_VMINFO_SIGN_FLAG); + __ jcc(Assembler::zero, zero_extend); + + // this path is taken for int->byte, int->short + __ sarl(rdx_temp /*, rcx*/); + __ jmp(done); + + __ bind(zero_extend); + // this is taken for int->char + __ shrl(rdx_temp /*, rcx*/); + + __ bind(done); + __ movptr(vmarg, rdx_temp); + __ xchgl(rcx, rbx_vminfo); // restore rcx_recv + + __ jump_to_method_handle_entry(rcx_recv, rdx_temp); + } + break; + + case _adapter_opt_i2l: // optimized subcase of adapt_prim_to_prim + case _adapter_opt_unboxl: // optimized subcase of adapt_ref_to_prim + { + // perform an in-place int-to-long or ref-to-long conversion + __ movl(rax_argslot, rcx_amh_vmargslot); + + // on a little-endian machine we keep the first slot and add another after + __ lea(rax_argslot, __ argument_address(rax_argslot, 1)); + insert_arg_slots(_masm, stack_move_unit(), _INSERT_INT_MASK, + rax_argslot, rbx_temp, rdx_temp); + Address vmarg1(rax_argslot, -Interpreter::stackElementSize()); + Address vmarg2 = vmarg1.plus_disp(Interpreter::stackElementSize()); + + switch (ek) { + case _adapter_opt_i2l: + { + __ movl(rdx_temp, vmarg1); + __ sarl(rdx_temp, 31); // __ extend_sign() + __ movl(vmarg2, rdx_temp); // store second word + } + break; + case _adapter_opt_unboxl: + { + // Load the value up from the heap. + __ movptr(rdx_temp, vmarg1); + int value_offset = java_lang_boxing_object::value_offset_in_bytes(T_LONG); + assert(value_offset == java_lang_boxing_object::value_offset_in_bytes(T_DOUBLE), ""); + __ null_check(rdx_temp, value_offset); + __ movl(rbx_temp, Address(rdx_temp, value_offset + 0*BytesPerInt)); + __ movl(rdx_temp, Address(rdx_temp, value_offset + 1*BytesPerInt)); + __ movl(vmarg1, rbx_temp); + __ movl(vmarg2, rdx_temp); + } + break; + default: + assert(false, ""); + } + + __ movptr(rcx_recv, rcx_mh_vmtarget); + __ jump_to_method_handle_entry(rcx_recv, rdx_temp); + } + break; + + case _adapter_opt_f2d: // optimized subcase of adapt_prim_to_prim + case _adapter_opt_d2f: // optimized subcase of adapt_prim_to_prim + { + // perform an in-place floating primitive conversion + __ movl(rax_argslot, rcx_amh_vmargslot); + __ lea(rax_argslot, __ argument_address(rax_argslot, 1)); + if (ek == _adapter_opt_f2d) { + insert_arg_slots(_masm, stack_move_unit(), _INSERT_INT_MASK, + rax_argslot, rbx_temp, rdx_temp); + } + Address vmarg(rax_argslot, -Interpreter::stackElementSize()); + +#ifdef _LP64 + if (ek == _adapter_opt_f2d) { + __ movflt(xmm0, vmarg); + __ cvtss2sd(xmm0, xmm0); + __ movdbl(vmarg, xmm0); + } else { + __ movdbl(xmm0, vmarg); + __ cvtsd2ss(xmm0, xmm0); + __ movflt(vmarg, xmm0); + } +#else //_LP64 + if (ek == _adapter_opt_f2d) { + __ fld_s(vmarg); // load float to ST0 + __ fstp_s(vmarg); // store single + } else if (!TaggedStackInterpreter) { + __ fld_d(vmarg); // load double to ST0 + __ fstp_s(vmarg); // store single + } else { + Address vmarg_tag = vmarg.plus_disp(tag_offset); + Address vmarg2 = vmarg.plus_disp(Interpreter::stackElementSize()); + // vmarg2_tag does not participate in this code + Register rbx_tag = rbx_temp; + __ movl(rbx_tag, vmarg_tag); // preserve tag + __ movl(rdx_temp, vmarg2); // get second word of double + __ movl(vmarg_tag, rdx_temp); // align with first word + __ fld_d(vmarg); // load double to ST0 + __ movl(vmarg_tag, rbx_tag); // restore tag + __ fstp_s(vmarg); // store single + } +#endif //_LP64 + + if (ek == _adapter_opt_d2f) { + remove_arg_slots(_masm, -stack_move_unit(), + rax_argslot, rbx_temp, rdx_temp); + } + + __ movptr(rcx_recv, rcx_mh_vmtarget); + __ jump_to_method_handle_entry(rcx_recv, rdx_temp); + } + break; + + case _adapter_prim_to_ref: + __ unimplemented(entry_name(ek)); // %%% FIXME: NYI + break; + + case _adapter_swap_args: + case _adapter_rot_args: + // handled completely by optimized cases + __ stop("init_AdapterMethodHandle should not issue this"); + break; + + case _adapter_opt_swap_1: + case _adapter_opt_swap_2: + case _adapter_opt_rot_1_up: + case _adapter_opt_rot_1_down: + case _adapter_opt_rot_2_up: + case _adapter_opt_rot_2_down: + { + int rotate = 0, swap_slots = 0; + switch ((int)ek) { + case _adapter_opt_swap_1: swap_slots = 1; break; + case _adapter_opt_swap_2: swap_slots = 2; break; + case _adapter_opt_rot_1_up: swap_slots = 1; rotate++; break; + case _adapter_opt_rot_1_down: swap_slots = 1; rotate--; break; + case _adapter_opt_rot_2_up: swap_slots = 2; rotate++; break; + case _adapter_opt_rot_2_down: swap_slots = 2; rotate--; break; + default: assert(false, ""); + } + + // the real size of the move must be doubled if TaggedStackInterpreter: + int swap_bytes = (int)( swap_slots * Interpreter::stackElementWords() * wordSize ); + + // 'argslot' is the position of the first argument to swap + __ movl(rax_argslot, rcx_amh_vmargslot); + __ lea(rax_argslot, __ argument_address(rax_argslot)); + + // 'vminfo' is the second + Register rbx_destslot = rbx_temp; + __ movl(rbx_destslot, rcx_amh_conversion); + assert(CONV_VMINFO_SHIFT == 0, "preshifted"); + __ andl(rbx_destslot, CONV_VMINFO_MASK); + __ lea(rbx_destslot, __ argument_address(rbx_destslot)); + DEBUG_ONLY(verify_argslot(_masm, rbx_destslot, "swap point must fall within current frame")); + + if (!rotate) { + for (int i = 0; i < swap_bytes; i += wordSize) { + __ movptr(rdx_temp, Address(rax_argslot , i)); + __ push(rdx_temp); + __ movptr(rdx_temp, Address(rbx_destslot, i)); + __ movptr(Address(rax_argslot, i), rdx_temp); + __ pop(rdx_temp); + __ movptr(Address(rbx_destslot, i), rdx_temp); + } + } else { + // push the first chunk, which is going to get overwritten + for (int i = swap_bytes; (i -= wordSize) >= 0; ) { + __ movptr(rdx_temp, Address(rax_argslot, i)); + __ push(rdx_temp); + } + + if (rotate > 0) { + // rotate upward + __ subptr(rax_argslot, swap_bytes); +#ifdef ASSERT + { + // Verify that argslot > destslot, by at least swap_bytes. + Label L_ok; + __ cmpptr(rax_argslot, rbx_destslot); + __ jcc(Assembler::aboveEqual, L_ok); + __ stop("source must be above destination (upward rotation)"); + __ bind(L_ok); + } +#endif + // work argslot down to destslot, copying contiguous data upwards + // pseudo-code: + // rax = src_addr - swap_bytes + // rbx = dest_addr + // while (rax >= rbx) *(rax + swap_bytes) = *(rax + 0), rax--; + Label loop; + __ bind(loop); + __ movptr(rdx_temp, Address(rax_argslot, 0)); + __ movptr(Address(rax_argslot, swap_bytes), rdx_temp); + __ addptr(rax_argslot, -wordSize); + __ cmpptr(rax_argslot, rbx_destslot); + __ jcc(Assembler::aboveEqual, loop); + } else { + __ addptr(rax_argslot, swap_bytes); +#ifdef ASSERT + { + // Verify that argslot < destslot, by at least swap_bytes. + Label L_ok; + __ cmpptr(rax_argslot, rbx_destslot); + __ jcc(Assembler::belowEqual, L_ok); + __ stop("source must be below destination (downward rotation)"); + __ bind(L_ok); + } +#endif + // work argslot up to destslot, copying contiguous data downwards + // pseudo-code: + // rax = src_addr + swap_bytes + // rbx = dest_addr + // while (rax <= rbx) *(rax - swap_bytes) = *(rax + 0), rax++; + Label loop; + __ bind(loop); + __ movptr(rdx_temp, Address(rax_argslot, 0)); + __ movptr(Address(rax_argslot, -swap_bytes), rdx_temp); + __ addptr(rax_argslot, wordSize); + __ cmpptr(rax_argslot, rbx_destslot); + __ jcc(Assembler::belowEqual, loop); + } + + // pop the original first chunk into the destination slot, now free + for (int i = 0; i < swap_bytes; i += wordSize) { + __ pop(rdx_temp); + __ movptr(Address(rbx_destslot, i), rdx_temp); + } + } + + __ movptr(rcx_recv, rcx_mh_vmtarget); + __ jump_to_method_handle_entry(rcx_recv, rdx_temp); + } + break; + + case _adapter_dup_args: + { + // 'argslot' is the position of the first argument to duplicate + __ movl(rax_argslot, rcx_amh_vmargslot); + __ lea(rax_argslot, __ argument_address(rax_argslot)); + + // 'stack_move' is negative number of words to duplicate + Register rdx_stack_move = rdx_temp; + __ movl(rdx_stack_move, rcx_amh_conversion); + __ sarl(rdx_stack_move, CONV_STACK_MOVE_SHIFT); + + int argslot0_num = 0; + Address argslot0 = __ argument_address(RegisterOrConstant(argslot0_num)); + assert(argslot0.base() == rsp, ""); + int pre_arg_size = argslot0.disp(); + assert(pre_arg_size % wordSize == 0, ""); + assert(pre_arg_size > 0, "must include PC"); + + // remember the old rsp+1 (argslot[0]) + Register rbx_oldarg = rbx_temp; + __ lea(rbx_oldarg, argslot0); + + // move rsp down to make room for dups + __ lea(rsp, Address(rsp, rdx_stack_move, Address::times_ptr)); + + // compute the new rsp+1 (argslot[0]) + Register rdx_newarg = rdx_temp; + __ lea(rdx_newarg, argslot0); + + __ push(rdi); // need a temp + // (preceding push must be done after arg addresses are taken!) + + // pull down the pre_arg_size data (PC) + for (int i = -pre_arg_size; i < 0; i += wordSize) { + __ movptr(rdi, Address(rbx_oldarg, i)); + __ movptr(Address(rdx_newarg, i), rdi); + } + + // copy from rax_argslot[0...] down to new_rsp[1...] + // pseudo-code: + // rbx = old_rsp+1 + // rdx = new_rsp+1 + // rax = argslot + // while (rdx < rbx) *rdx++ = *rax++ + Label loop; + __ bind(loop); + __ movptr(rdi, Address(rax_argslot, 0)); + __ movptr(Address(rdx_newarg, 0), rdi); + __ addptr(rax_argslot, wordSize); + __ addptr(rdx_newarg, wordSize); + __ cmpptr(rdx_newarg, rbx_oldarg); + __ jcc(Assembler::less, loop); + + __ pop(rdi); // restore temp + + __ movptr(rcx_recv, rcx_mh_vmtarget); + __ jump_to_method_handle_entry(rcx_recv, rdx_temp); + } + break; + + case _adapter_drop_args: + { + // 'argslot' is the position of the first argument to nuke + __ movl(rax_argslot, rcx_amh_vmargslot); + __ lea(rax_argslot, __ argument_address(rax_argslot)); + + __ push(rdi); // need a temp + // (must do previous push after argslot address is taken) + + // 'stack_move' is number of words to drop + Register rdi_stack_move = rdi; + __ movl(rdi_stack_move, rcx_amh_conversion); + __ sarl(rdi_stack_move, CONV_STACK_MOVE_SHIFT); + remove_arg_slots(_masm, rdi_stack_move, + rax_argslot, rbx_temp, rdx_temp); + + __ pop(rdi); // restore temp + + __ movptr(rcx_recv, rcx_mh_vmtarget); + __ jump_to_method_handle_entry(rcx_recv, rdx_temp); + } + break; + + case _adapter_collect_args: + __ unimplemented(entry_name(ek)); // %%% FIXME: NYI + break; + + case _adapter_spread_args: + // handled completely by optimized cases + __ stop("init_AdapterMethodHandle should not issue this"); + break; + + case _adapter_opt_spread_0: + case _adapter_opt_spread_1: + case _adapter_opt_spread_more: + { + // spread an array out into a group of arguments + int length_constant = -1; + switch (ek) { + case _adapter_opt_spread_0: length_constant = 0; break; + case _adapter_opt_spread_1: length_constant = 1; break; + } + + // find the address of the array argument + __ movl(rax_argslot, rcx_amh_vmargslot); + __ lea(rax_argslot, __ argument_address(rax_argslot)); + + // grab some temps + { __ push(rsi); __ push(rdi); } + // (preceding pushes must be done after argslot address is taken!) +#define UNPUSH_RSI_RDI \ + { __ pop(rdi); __ pop(rsi); } + + // arx_argslot points both to the array and to the first output arg + vmarg = Address(rax_argslot, 0); + + // Get the array value. + Register rsi_array = rsi; + Register rdx_array_klass = rdx_temp; + BasicType elem_type = T_OBJECT; + int length_offset = arrayOopDesc::length_offset_in_bytes(); + int elem0_offset = arrayOopDesc::base_offset_in_bytes(elem_type); + __ movptr(rsi_array, vmarg); + Label skip_array_check; + if (length_constant == 0) { + __ testptr(rsi_array, rsi_array); + __ jcc(Assembler::zero, skip_array_check); + } + __ null_check(rsi_array, oopDesc::klass_offset_in_bytes()); + __ load_klass(rdx_array_klass, rsi_array); + + // Check the array type. + Register rbx_klass = rbx_temp; + __ movptr(rbx_klass, rcx_amh_argument); // this is a Class object! + __ movptr(rbx_klass, Address(rbx_klass, java_lang_Class::klass_offset_in_bytes())); + + Label ok_array_klass, bad_array_klass, bad_array_length; + __ check_klass_subtype(rdx_array_klass, rbx_klass, rdi, ok_array_klass); + // If we get here, the type check failed! + __ jmp(bad_array_klass); + __ bind(ok_array_klass); + + // Check length. + if (length_constant >= 0) { + __ cmpl(Address(rsi_array, length_offset), length_constant); + } else { + Register rbx_vminfo = rbx_temp; + __ movl(rbx_vminfo, rcx_amh_conversion); + assert(CONV_VMINFO_SHIFT == 0, "preshifted"); + __ andl(rbx_vminfo, CONV_VMINFO_MASK); + __ cmpl(rbx_vminfo, Address(rsi_array, length_offset)); + } + __ jcc(Assembler::notEqual, bad_array_length); + + Register rdx_argslot_limit = rdx_temp; + + // Array length checks out. Now insert any required stack slots. + if (length_constant == -1) { + // Form a pointer to the end of the affected region. + __ lea(rdx_argslot_limit, Address(rax_argslot, Interpreter::stackElementSize())); + // 'stack_move' is negative number of words to insert + Register rdi_stack_move = rdi; + __ movl(rdi_stack_move, rcx_amh_conversion); + __ sarl(rdi_stack_move, CONV_STACK_MOVE_SHIFT); + Register rsi_temp = rsi_array; // spill this + insert_arg_slots(_masm, rdi_stack_move, -1, + rax_argslot, rbx_temp, rsi_temp); + // reload the array (since rsi was killed) + __ movptr(rsi_array, vmarg); + } else if (length_constant > 1) { + int arg_mask = 0; + int new_slots = (length_constant - 1); + for (int i = 0; i < new_slots; i++) { + arg_mask <<= 1; + arg_mask |= _INSERT_REF_MASK; + } + insert_arg_slots(_masm, new_slots * stack_move_unit(), arg_mask, + rax_argslot, rbx_temp, rdx_temp); + } else if (length_constant == 1) { + // no stack resizing required + } else if (length_constant == 0) { + remove_arg_slots(_masm, -stack_move_unit(), + rax_argslot, rbx_temp, rdx_temp); + } + + // Copy from the array to the new slots. + // Note: Stack change code preserves integrity of rax_argslot pointer. + // So even after slot insertions, rax_argslot still points to first argument. + if (length_constant == -1) { + // [rax_argslot, rdx_argslot_limit) is the area we are inserting into. + Register rsi_source = rsi_array; + __ lea(rsi_source, Address(rsi_array, elem0_offset)); + Label loop; + __ bind(loop); + __ movptr(rbx_temp, Address(rsi_source, 0)); + __ movptr(Address(rax_argslot, 0), rbx_temp); + __ addptr(rsi_source, type2aelembytes(elem_type)); + if (TaggedStackInterpreter) { + __ movptr(Address(rax_argslot, tag_offset), + frame::tag_for_basic_type(elem_type)); + } + __ addptr(rax_argslot, Interpreter::stackElementSize()); + __ cmpptr(rax_argslot, rdx_argslot_limit); + __ jcc(Assembler::less, loop); + } else if (length_constant == 0) { + __ bind(skip_array_check); + // nothing to copy + } else { + int elem_offset = elem0_offset; + int slot_offset = 0; + for (int index = 0; index < length_constant; index++) { + __ movptr(rbx_temp, Address(rsi_array, elem_offset)); + __ movptr(Address(rax_argslot, slot_offset), rbx_temp); + elem_offset += type2aelembytes(elem_type); + if (TaggedStackInterpreter) { + __ movptr(Address(rax_argslot, slot_offset + tag_offset), + frame::tag_for_basic_type(elem_type)); + } + slot_offset += Interpreter::stackElementSize(); + } + } + + // Arguments are spread. Move to next method handle. + UNPUSH_RSI_RDI; + __ movptr(rcx_recv, rcx_mh_vmtarget); + __ jump_to_method_handle_entry(rcx_recv, rdx_temp); + + __ bind(bad_array_klass); + UNPUSH_RSI_RDI; + __ stop("bad array klass NYI"); + + __ bind(bad_array_length); + UNPUSH_RSI_RDI; + __ stop("bad array length NYI"); + +#undef UNPUSH_RSI_RDI + } + break; + + case _adapter_flyby: + case _adapter_ricochet: + __ unimplemented(entry_name(ek)); // %%% FIXME: NYI + break; + + default: ShouldNotReachHere(); + } + __ hlt(); + + address me_cookie = MethodHandleEntry::start_compiled_entry(_masm, interp_entry); + __ unimplemented(entry_name(ek)); // %%% FIXME: NYI + + init_entry(ek, MethodHandleEntry::finish_compiled_entry(_masm, me_cookie)); +} diff -r 5d4dd2f5f6a1 -r 7a485bc4da16 src/cpu/x86/vm/stubGenerator_x86_32.cpp --- a/src/cpu/x86/vm/stubGenerator_x86_32.cpp Fri Apr 17 15:50:12 2009 +0100 +++ b/src/cpu/x86/vm/stubGenerator_x86_32.cpp Thu May 07 10:30:17 2009 -0700 @@ -2219,6 +2219,16 @@ // arraycopy stubs used by compilers generate_arraycopy_stubs(); + + // generic method handle stubs + if (EnableMethodHandles && SystemDictionary::MethodHandle_klass() != NULL) { + for (MethodHandles::EntryKind ek = MethodHandles::_EK_FIRST; + ek < MethodHandles::_EK_LIMIT; + ek = MethodHandles::EntryKind(1 + (int)ek)) { + StubCodeMark mark(this, "MethodHandle", MethodHandles::entry_name(ek)); + MethodHandles::generate_method_handle_stub(_masm, ek); + } + } } diff -r 5d4dd2f5f6a1 -r 7a485bc4da16 src/cpu/x86/vm/templateInterpreter_x86_32.cpp --- a/src/cpu/x86/vm/templateInterpreter_x86_32.cpp Fri Apr 17 15:50:12 2009 +0100 +++ b/src/cpu/x86/vm/templateInterpreter_x86_32.cpp Thu May 07 10:30:17 2009 -0700 @@ -92,6 +92,33 @@ return entry; } +// Arguments are: required type at TOS+8, failing object (or NULL) at TOS+4. +// pc at TOS (just for debugging) +address TemplateInterpreterGenerator::generate_WrongMethodType_handler() { + address entry = __ pc(); + + __ pop(rbx); // actual failing object is at TOS + __ pop(rax); // required type is at TOS+4 + + __ verify_oop(rbx); + __ verify_oop(rax); + + // Various method handle types use interpreter registers as temps. + __ restore_bcp(); + __ restore_locals(); + + // Expression stack must be empty before entering the VM for an exception. + __ empty_expression_stack(); + __ empty_FPU_stack(); + __ call_VM(noreg, + CAST_FROM_FN_PTR(address, + InterpreterRuntime::throw_WrongMethodTypeException), + // pass required type, failing object (or NULL) + rax, rbx); + return entry; +} + + address TemplateInterpreterGenerator::generate_exception_handler_common(const char* name, const char* message, bool pass_oop) { assert(!pass_oop || message == NULL, "either oop or message but not both"); address entry = __ pc(); @@ -129,13 +156,22 @@ } -address TemplateInterpreterGenerator::generate_return_entry_for(TosState state, int step) { +address TemplateInterpreterGenerator::generate_return_entry_for(TosState state, int step, bool unbox) { + TosState incoming_state = state; + if (EnableInvokeDynamic) { + if (unbox) { + incoming_state = atos; + } + } else { + assert(!unbox, "old behavior"); + } + Label interpreter_entry; address compiled_entry = __ pc(); #ifdef COMPILER2 // The FPU stack is clean if UseSSE >= 2 but must be cleaned in other cases - if ((state == ftos && UseSSE < 1) || (state == dtos && UseSSE < 2)) { + if ((incoming_state == ftos && UseSSE < 1) || (incoming_state == dtos && UseSSE < 2)) { for (int i = 1; i < 8; i++) { __ ffree(i); } @@ -143,7 +179,7 @@ __ empty_FPU_stack(); } #endif - if ((state == ftos && UseSSE < 1) || (state == dtos && UseSSE < 2)) { + if ((incoming_state == ftos && UseSSE < 1) || (incoming_state == dtos && UseSSE < 2)) { __ MacroAssembler::verify_FPU(1, "generate_return_entry_for compiled"); } else { __ MacroAssembler::verify_FPU(0, "generate_return_entry_for compiled"); @@ -159,12 +195,12 @@ // In SSE mode, interpreter returns FP results in xmm0 but they need // to end up back on the FPU so it can operate on them. - if (state == ftos && UseSSE >= 1) { + if (incoming_state == ftos && UseSSE >= 1) { __ subptr(rsp, wordSize); __ movflt(Address(rsp, 0), xmm0); __ fld_s(Address(rsp, 0)); __ addptr(rsp, wordSize); - } else if (state == dtos && UseSSE >= 2) { + } else if (incoming_state == dtos && UseSSE >= 2) { __ subptr(rsp, 2*wordSize); __ movdbl(Address(rsp, 0), xmm0); __ fld_d(Address(rsp, 0)); @@ -180,13 +216,102 @@ __ restore_bcp(); __ restore_locals(); - __ get_cache_and_index_at_bcp(rbx, rcx, 1); + + Label L_fail; + + if (unbox && state != atos) { + // cast and unbox + BasicType type = as_BasicType(state); + if (type == T_BYTE) type = T_BOOLEAN; // FIXME + KlassHandle boxk = SystemDictionaryHandles::box_klass(type); + __ mov32(rbx, ExternalAddress((address) boxk.raw_value())); + __ testl(rax, rax); + Label L_got_value, L_get_value; + // convert nulls to zeroes (avoid NPEs here) + if (!(type == T_FLOAT || type == T_DOUBLE)) { + // if rax already contains zero bits, forge ahead + __ jcc(Assembler::zero, L_got_value); + } else { + __ jcc(Assembler::notZero, L_get_value); + __ fldz(); + __ jmp(L_got_value); + } + __ bind(L_get_value); + __ cmp32(rbx, Address(rax, oopDesc::klass_offset_in_bytes())); + __ jcc(Assembler::notEqual, L_fail); + int offset = java_lang_boxing_object::value_offset_in_bytes(type); + // Cf. TemplateTable::getfield_or_static + switch (type) { + case T_BYTE: // fall through: + case T_BOOLEAN: __ load_signed_byte(rax, Address(rax, offset)); break; + case T_CHAR: __ load_unsigned_short(rax, Address(rax, offset)); break; + case T_SHORT: __ load_signed_short(rax, Address(rax, offset)); break; + case T_INT: __ movl(rax, Address(rax, offset)); break; + case T_FLOAT: __ fld_s(Address(rax, offset)); break; + case T_DOUBLE: __ fld_d(Address(rax, offset)); break; + // Access to java.lang.Double.value does not need to be atomic: + case T_LONG: { __ movl(rdx, Address(rax, offset + 4)); + __ movl(rax, Address(rax, offset + 0)); } break; + default: ShouldNotReachHere(); + } + __ bind(L_got_value); + } + + Label L_got_cache, L_giant_index; + if (EnableInvokeDynamic) { + __ cmpb(Address(rsi, 0), Bytecodes::_invokedynamic); + __ jcc(Assembler::equal, L_giant_index); + } + __ get_cache_and_index_at_bcp(rbx, rcx, 1, false); + __ bind(L_got_cache); + if (unbox && state == atos) { + // insert a casting conversion, to keep verifier sane + Label L_ok, L_ok_pops; + __ testl(rax, rax); + __ jcc(Assembler::zero, L_ok); + __ push(rax); // save the object to check + __ push(rbx); // save CP cache reference + __ movl(rdx, Address(rax, oopDesc::klass_offset_in_bytes())); + __ movl(rbx, Address(rbx, rcx, + Address::times_4, constantPoolCacheOopDesc::base_offset() + + ConstantPoolCacheEntry::f1_offset())); + __ movl(rbx, Address(rbx, __ delayed_value(sun_dyn_CallSiteImpl::type_offset_in_bytes, rcx))); + __ movl(rbx, Address(rbx, __ delayed_value(java_dyn_MethodType::rtype_offset_in_bytes, rcx))); + __ movl(rax, Address(rbx, __ delayed_value(java_lang_Class::klass_offset_in_bytes, rcx))); + __ check_klass_subtype(rdx, rax, rbx, L_ok_pops); + __ pop(rcx); // pop and discard CP cache + __ mov(rbx, rax); // target supertype into rbx for L_fail + __ pop(rax); // failed object into rax for L_fail + __ jmp(L_fail); + + __ bind(L_ok_pops); + // restore pushed temp regs: + __ pop(rbx); + __ pop(rax); + __ bind(L_ok); + } __ movl(rbx, Address(rbx, rcx, Address::times_ptr, constantPoolCacheOopDesc::base_offset() + ConstantPoolCacheEntry::flags_offset())); __ andptr(rbx, 0xFF); __ lea(rsp, Address(rsp, rbx, Interpreter::stackElementScale())); __ dispatch_next(state, step); + + // out of the main line of code... + if (EnableInvokeDynamic) { + __ bind(L_giant_index); + __ get_cache_and_index_at_bcp(rbx, rcx, 1, true); + __ jmp(L_got_cache); + + if (unbox) { + __ bind(L_fail); + __ push(rbx); // missed klass (required) + __ push(rax); // bad object (actual) + __ movptr(rdx, ExternalAddress((address) &Interpreter::_throw_WrongMethodType_entry)); + __ call(rdx); + } + } + return entry; } @@ -1370,6 +1495,7 @@ case Interpreter::empty : entry_point = ((InterpreterGenerator*)this)->generate_empty_entry(); break; case Interpreter::accessor : entry_point = ((InterpreterGenerator*)this)->generate_accessor_entry(); break; case Interpreter::abstract : entry_point = ((InterpreterGenerator*)this)->generate_abstract_entry(); break; + case Interpreter::method_handle : entry_point = ((InterpreterGenerator*)this)->generate_method_handle_entry(); break; case Interpreter::java_lang_math_sin : // fall thru case Interpreter::java_lang_math_cos : // fall thru @@ -1400,7 +1526,8 @@ // be sure to change this if you add/subtract anything to/from the overhead area const int overhead_size = -frame::interpreter_frame_initial_sp_offset; - const int method_stack = (method->max_locals() + method->max_stack()) * + const int extra_stack = methodOopDesc::extra_stack_entries(); + const int method_stack = (method->max_locals() + method->max_stack() + extra_stack) * Interpreter::stackElementWords(); return overhead_size + method_stack + stub_code; } diff -r 5d4dd2f5f6a1 -r 7a485bc4da16 src/cpu/x86/vm/templateInterpreter_x86_64.cpp --- a/src/cpu/x86/vm/templateInterpreter_x86_64.cpp Fri Apr 17 15:50:12 2009 +0100 +++ b/src/cpu/x86/vm/templateInterpreter_x86_64.cpp Thu May 07 10:30:17 2009 -0700 @@ -1,5 +1,5 @@ /* - * Copyright 2003-2008 Sun Microsystems, Inc. All Rights Reserved. + * Copyright 2003-2009 Sun Microsystems, Inc. All Rights Reserved. * DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER. * * This code is free software; you can redistribute it and/or modify it @@ -100,6 +100,26 @@ return entry; } +// Arguments are: required type in rarg1, failing object (or NULL) in rarg2 +address TemplateInterpreterGenerator::generate_WrongMethodType_handler() { + address entry = __ pc(); + + __ pop(c_rarg2); // failing object is at TOS + __ pop(c_rarg1); // required type is at TOS+8 + + // expression stack must be empty before entering the VM if an + // exception happened + __ empty_expression_stack(); + + __ call_VM(noreg, + CAST_FROM_FN_PTR(address, + InterpreterRuntime:: + throw_WrongMethodTypeException), + // pass required type, failing object (or NULL) + c_rarg1, c_rarg2); + return entry; +} + address TemplateInterpreterGenerator::generate_exception_handler_common( const char* name, const char* message, bool pass_oop) { assert(!pass_oop || message == NULL, "either oop or message but not both"); @@ -146,7 +166,8 @@ address TemplateInterpreterGenerator::generate_return_entry_for(TosState state, - int step) { + int step, bool unbox) { + assert(!unbox, "NYI");//6815692// // amd64 doesn't need to do anything special about compiled returns // to the interpreter so the code that exists on x86 to place a sentinel @@ -1393,12 +1414,14 @@ case Interpreter::empty : entry_point = ((InterpreterGenerator*) this)->generate_empty_entry(); break; case Interpreter::accessor : entry_point = ((InterpreterGenerator*) this)->generate_accessor_entry(); break; case Interpreter::abstract : entry_point = ((InterpreterGenerator*) this)->generate_abstract_entry(); break; - case Interpreter::java_lang_math_sin : break; - case Interpreter::java_lang_math_cos : break; - case Interpreter::java_lang_math_tan : break; - case Interpreter::java_lang_math_abs : break; - case Interpreter::java_lang_math_log : break; - case Interpreter::java_lang_math_log10 : break; + case Interpreter::method_handle : entry_point = ((InterpreterGenerator*) this)->generate_method_handle_entry();break; + + case Interpreter::java_lang_math_sin : // fall thru + case Interpreter::java_lang_math_cos : // fall thru + case Interpreter::java_lang_math_tan : // fall thru + case Interpreter::java_lang_math_abs : // fall thru + case Interpreter::java_lang_math_log : // fall thru + case Interpreter::java_lang_math_log10 : // fall thru case Interpreter::java_lang_math_sqrt : entry_point = ((InterpreterGenerator*) this)->generate_math_entry(kind); break; default : ShouldNotReachHere(); break; } @@ -1422,7 +1445,8 @@ -(frame::interpreter_frame_initial_sp_offset) + entry_size; const int stub_code = frame::entry_frame_after_call_words; - const int method_stack = (method->max_locals() + method->max_stack()) * + const int extra_stack = methodOopDesc::extra_stack_entries(); + const int method_stack = (method->max_locals() + method->max_stack() + extra_stack) * Interpreter::stackElementWords(); return (overhead_size + method_stack + stub_code); } diff -r 5d4dd2f5f6a1 -r 7a485bc4da16 src/cpu/x86/vm/templateTable_x86_32.cpp --- a/src/cpu/x86/vm/templateTable_x86_32.cpp Fri Apr 17 15:50:12 2009 +0100 +++ b/src/cpu/x86/vm/templateTable_x86_32.cpp Thu May 07 10:30:17 2009 -0700 @@ -206,12 +206,12 @@ __ call_VM(noreg, CAST_FROM_FN_PTR(address, InterpreterRuntime::set_original_bytecode_at), scratch, rsi, bc); #ifndef ASSERT __ jmpb(patch_done); +#else + __ jmp(patch_done); +#endif __ bind(fast_patch); } -#else - __ jmp(patch_done); - __ bind(fast_patch); - } +#ifdef ASSERT Label okay; __ load_unsigned_byte(scratch, at_bcp(0)); __ cmpl(scratch, (int)Bytecodes::java_code(bytecode)); @@ -2105,6 +2105,7 @@ void TemplateTable::resolve_cache_and_index(int byte_no, Register Rcache, Register index) { assert(byte_no == 1 || byte_no == 2, "byte_no out of range"); + bool is_invokedynamic = (bytecode() == Bytecodes::_invokedynamic); Register temp = rbx; @@ -2112,16 +2113,19 @@ const int shift_count = (1 + byte_no)*BitsPerByte; Label resolved; - __ get_cache_and_index_at_bcp(Rcache, index, 1); - __ movl(temp, Address(Rcache, - index, - Address::times_ptr, - constantPoolCacheOopDesc::base_offset() + ConstantPoolCacheEntry::indices_offset())); - __ shrl(temp, shift_count); - // have we resolved this bytecode? - __ andptr(temp, 0xFF); - __ cmpl(temp, (int)bytecode()); - __ jcc(Assembler::equal, resolved); + __ get_cache_and_index_at_bcp(Rcache, index, 1, is_invokedynamic); + if (is_invokedynamic) { + // we are resolved if the f1 field contains a non-null CallSite object + __ cmpptr(Address(Rcache, index, Address::times_ptr, constantPoolCacheOopDesc::base_offset() + ConstantPoolCacheEntry::f1_offset()), (int32_t) NULL_WORD); + __ jcc(Assembler::notEqual, resolved); + } else { + __ movl(temp, Address(Rcache, index, Address::times_4, constantPoolCacheOopDesc::base_offset() + ConstantPoolCacheEntry::indices_offset())); + __ shrl(temp, shift_count); + // have we resolved this bytecode? + __ andl(temp, 0xFF); + __ cmpl(temp, (int)bytecode()); + __ jcc(Assembler::equal, resolved); + } // resolve first time through address entry; @@ -2134,12 +2138,13 @@ case Bytecodes::_invokespecial : // fall through case Bytecodes::_invokestatic : // fall through case Bytecodes::_invokeinterface: entry = CAST_FROM_FN_PTR(address, InterpreterRuntime::resolve_invoke); break; + case Bytecodes::_invokedynamic : entry = CAST_FROM_FN_PTR(address, InterpreterRuntime::resolve_invokedynamic); break; default : ShouldNotReachHere(); break; } __ movl(temp, (int)bytecode()); __ call_VM(noreg, entry, temp); // Update registers with resolved info - __ get_cache_and_index_at_bcp(Rcache, index, 1); + __ get_cache_and_index_at_bcp(Rcache, index, 1, is_invokedynamic); __ bind(resolved); } @@ -2884,12 +2889,17 @@ } -void TemplateTable::prepare_invoke(Register method, Register index, int byte_no, Bytecodes::Code code) { +void TemplateTable::prepare_invoke(Register method, Register index, int byte_no) { + bool is_invdyn_bootstrap = (byte_no < 0); + if (is_invdyn_bootstrap) byte_no = -byte_no; + // determine flags + Bytecodes::Code code = bytecode(); const bool is_invokeinterface = code == Bytecodes::_invokeinterface; + const bool is_invokedynamic = code == Bytecodes::_invokedynamic; const bool is_invokevirtual = code == Bytecodes::_invokevirtual; const bool is_invokespecial = code == Bytecodes::_invokespecial; - const bool load_receiver = code != Bytecodes::_invokestatic; + const bool load_receiver = (code != Bytecodes::_invokestatic && code != Bytecodes::_invokedynamic); const bool receiver_null_check = is_invokespecial; const bool save_flags = is_invokeinterface || is_invokevirtual; // setup registers & access constant pool cache @@ -2897,6 +2907,8 @@ const Register flags = rdx; assert_different_registers(method, index, recv, flags); + assert(!is_invdyn_bootstrap || is_invokedynamic, "byte_no<0 hack only for invdyn"); + // save 'interpreter return address' __ save_bcp(); @@ -2907,8 +2919,13 @@ __ movl(recv, flags); __ andl(recv, 0xFF); // recv count is 0 based? - __ movptr(recv, Address(rsp, recv, Interpreter::stackElementScale(), -Interpreter::expr_offset_in_bytes(1))); - __ verify_oop(recv); + Address recv_addr(rsp, recv, Interpreter::stackElementScale(), -Interpreter::expr_offset_in_bytes(1)); + if (is_invokedynamic) { + __ lea(recv, recv_addr); + } else { + __ movptr(recv, recv_addr); + __ verify_oop(recv); + } } // do null check if needed @@ -2926,8 +2943,14 @@ ConstantPoolCacheEntry::verify_tosBits(); // load return address { - ExternalAddress table(is_invokeinterface ? (address)Interpreter::return_5_addrs_by_index_table() : - (address)Interpreter::return_3_addrs_by_index_table()); + address table_addr; + if (is_invdyn_bootstrap) + table_addr = (address)Interpreter::return_5_unbox_addrs_by_index_table(); + else if (is_invokeinterface || is_invokedynamic) + table_addr = (address)Interpreter::return_5_addrs_by_index_table(); + else + table_addr = (address)Interpreter::return_3_addrs_by_index_table(); + ExternalAddress table(table_addr); __ movptr(flags, ArrayAddress(table, Address(noreg, flags, Address::times_ptr))); } @@ -2990,7 +3013,7 @@ void TemplateTable::invokevirtual(int byte_no) { transition(vtos, vtos); - prepare_invoke(rbx, noreg, byte_no, bytecode()); + prepare_invoke(rbx, noreg, byte_no); // rbx,: index // rcx: receiver @@ -3002,7 +3025,7 @@ void TemplateTable::invokespecial(int byte_no) { transition(vtos, vtos); - prepare_invoke(rbx, noreg, byte_no, bytecode()); + prepare_invoke(rbx, noreg, byte_no); // do the call __ verify_oop(rbx); __ profile_call(rax); @@ -3012,7 +3035,7 @@ void TemplateTable::invokestatic(int byte_no) { transition(vtos, vtos); - prepare_invoke(rbx, noreg, byte_no, bytecode()); + prepare_invoke(rbx, noreg, byte_no); // do the call __ verify_oop(rbx); __ profile_call(rax); @@ -3028,7 +3051,7 @@ void TemplateTable::invokeinterface(int byte_no) { transition(vtos, vtos); - prepare_invoke(rax, rbx, byte_no, bytecode()); + prepare_invoke(rax, rbx, byte_no); // rax,: Interface // rbx,: index @@ -3102,6 +3125,84 @@ __ should_not_reach_here(); } +void TemplateTable::invokedynamic(int byte_no) { + transition(vtos, vtos); + + if (!EnableInvokeDynamic) { + // We should not encounter this bytecode if !EnableInvokeDynamic. + // The verifier will stop it. However, if we get past the verifier, + // this will stop the thread in a reasonable way, without crashing the JVM. + __ call_VM(noreg, CAST_FROM_FN_PTR(address, + InterpreterRuntime::throw_IncompatibleClassChangeError)); + // the call_VM checks for exception, so we should never return here. + __ should_not_reach_here(); + return; + } + + prepare_invoke(rax, rbx, byte_no); + + // rax: CallSite object (f1) + // rbx: unused (f2) + // rcx: receiver address + // rdx: flags (unused) + + if (ProfileInterpreter) { + Label L; + // %%% should make a type profile for any invokedynamic that takes a ref argument + // profile this call + __ profile_call(rsi); + } + + Label handle_unlinked_site; + __ movptr(rcx, Address(rax, __ delayed_value(sun_dyn_CallSiteImpl::target_offset_in_bytes, rcx))); + __ testptr(rcx, rcx); + __ jcc(Assembler::zero, handle_unlinked_site); + + __ prepare_to_jump_from_interpreted(); + __ jump_to_method_handle_entry(rcx, rdx); + + // Initial calls come here... + __ bind(handle_unlinked_site); + __ pop(rcx); // remove return address pushed by prepare_invoke + + // box stacked arguments into an array for the bootstrap method + address entry = CAST_FROM_FN_PTR(address, InterpreterRuntime::bootstrap_invokedynamic); + __ restore_bcp(); // rsi must be correct for call_VM + __ call_VM(rax, entry, rax); + __ movl(rdi, rax); // protect bootstrap MH from prepare_invoke + + // recompute return address + __ restore_bcp(); // rsi must be correct for prepare_invoke + prepare_invoke(rax, rbx, -byte_no); // smashes rcx, rdx + // rax: CallSite object (f1) + // rbx: unused (f2) + // rdi: bootstrap MH + // rdx: flags + + // now load up the arglist, which has been neatly boxed + __ get_thread(rcx); + __ movptr(rdx, Address(rcx, JavaThread::vm_result_2_offset())); + __ movptr(Address(rcx, JavaThread::vm_result_2_offset()), NULL_WORD); + __ verify_oop(rdx); + // rdx = arglist + + // save SP now, before we add the bootstrap call to the stack + // We must preserve a fiction that the original arguments are outgoing, + // because the return sequence will reset the stack to this point + // and then pop all those arguments. It seems error-prone to use + // a different argument list size just for bootstrapping. + __ prepare_to_jump_from_interpreted(); + + // Now let's play adapter, pushing the real arguments on the stack. + __ pop(rbx); // return PC + __ push(rdi); // boot MH + __ push(rax); // call site + __ push(rdx); // arglist + __ push(rbx); // return PC, again + __ mov(rcx, rdi); + __ jump_to_method_handle_entry(rcx, rdx); +} + //---------------------------------------------------------------------------------------------------- // Allocation diff -r 5d4dd2f5f6a1 -r 7a485bc4da16 src/cpu/x86/vm/templateTable_x86_32.hpp --- a/src/cpu/x86/vm/templateTable_x86_32.hpp Fri Apr 17 15:50:12 2009 +0100 +++ b/src/cpu/x86/vm/templateTable_x86_32.hpp Thu May 07 10:30:17 2009 -0700 @@ -22,8 +22,7 @@ * */ - static void prepare_invoke(Register method, Register index, int byte_no, - Bytecodes::Code code); + static void prepare_invoke(Register method, Register index, int byte_no); static void invokevirtual_helper(Register index, Register recv, Register flags); static void volatile_barrier(Assembler::Membar_mask_bits order_constraint ); diff -r 5d4dd2f5f6a1 -r 7a485bc4da16 src/cpu/x86/vm/templateTable_x86_64.cpp --- a/src/cpu/x86/vm/templateTable_x86_64.cpp Fri Apr 17 15:50:12 2009 +0100 +++ b/src/cpu/x86/vm/templateTable_x86_64.cpp Thu May 07 10:30:17 2009 -0700 @@ -3058,6 +3058,23 @@ return; } +void TemplateTable::invokedynamic(int byte_no) { + transition(vtos, vtos); + + if (!EnableInvokeDynamic) { + // We should not encounter this bytecode if !EnableInvokeDynamic. + // The verifier will stop it. However, if we get past the verifier, + // this will stop the thread in a reasonable way, without crashing the JVM. + __ call_VM(noreg, CAST_FROM_FN_PTR(address, + InterpreterRuntime::throw_IncompatibleClassChangeError)); + // the call_VM checks for exception, so we should never return here. + __ should_not_reach_here(); + return; + } + + __ stop("invokedynamic NYI");//6815692// +} + //----------------------------------------------------------------------------- // Allocation diff -r 5d4dd2f5f6a1 -r 7a485bc4da16 src/cpu/x86/vm/vtableStubs_x86_32.cpp --- a/src/cpu/x86/vm/vtableStubs_x86_32.cpp Fri Apr 17 15:50:12 2009 +0100 +++ b/src/cpu/x86/vm/vtableStubs_x86_32.cpp Thu May 07 10:30:17 2009 -0700 @@ -108,6 +108,9 @@ (int)(s->code_end() - __ pc())); } guarantee(__ pc() <= s->code_end(), "overflowed buffer"); + // shut the door on sizing bugs + int slop = 3; // 32-bit offset is this much larger than an 8-bit one + assert(vtable_index > 10 || __ pc() + slop <= s->code_end(), "room for 32-bit offset"); s->set_exception_points(npe_addr, ame_addr); return s; @@ -181,6 +184,9 @@ (int)(s->code_end() - __ pc())); } guarantee(__ pc() <= s->code_end(), "overflowed buffer"); + // shut the door on sizing bugs + int slop = 3; // 32-bit offset is this much larger than an 8-bit one + assert(itable_index > 10 || __ pc() + slop <= s->code_end(), "room for 32-bit offset"); s->set_exception_points(npe_addr, ame_addr); return s; @@ -196,6 +202,41 @@ // Itable stub size return (DebugVtables ? 256 : 66) + (CountCompiledCalls ? 6 : 0); } + // In order to tune these parameters, run the JVM with VM options + // +PrintMiscellaneous and +WizardMode to see information about + // actual itable stubs. Look for lines like this: + // itable #1 at 0x5551212[65] left over: 3 + // Reduce the constants so that the "left over" number is >=3 + // for the common cases. + // Do not aim at a left-over number of zero, because a + // large vtable or itable index (> 16) will require a 32-bit + // immediate displacement instead of an 8-bit one. + // + // The JVM98 app. _202_jess has a megamorphic interface call. + // The itable code looks like this: + // Decoding VtableStub itbl[1]@1 + // mov 0x4(%ecx),%esi + // mov 0xe8(%esi),%edi + // lea 0x130(%esi,%edi,4),%edi + // add $0x7,%edi + // and $0xfffffff8,%edi + // lea 0x4(%esi),%esi + // mov (%edi),%ebx + // cmp %ebx,%eax + // je success + // loop: + // test %ebx,%ebx + // je throw_icce + // add $0x8,%edi + // mov (%edi),%ebx + // cmp %ebx,%eax + // jne loop + // success: + // mov 0x4(%edi),%edi + // mov (%esi,%edi,1),%ebx + // jmp *0x44(%ebx) + // throw_icce: + // jmp throw_ICCE_entry } int VtableStub::pd_code_alignment() { diff -r 5d4dd2f5f6a1 -r 7a485bc4da16 src/cpu/x86/vm/vtableStubs_x86_64.cpp --- a/src/cpu/x86/vm/vtableStubs_x86_64.cpp Fri Apr 17 15:50:12 2009 +0100 +++ b/src/cpu/x86/vm/vtableStubs_x86_64.cpp Thu May 07 10:30:17 2009 -0700 @@ -106,6 +106,9 @@ (int)(s->code_end() - __ pc())); } guarantee(__ pc() <= s->code_end(), "overflowed buffer"); + // shut the door on sizing bugs + int slop = 3; // 32-bit offset is this much larger than an 8-bit one + assert(vtable_index > 10 || __ pc() + slop <= s->code_end(), "room for 32-bit offset"); s->set_exception_points(npe_addr, ame_addr); return s; @@ -191,6 +194,9 @@ (int)(s->code_end() - __ pc())); } guarantee(__ pc() <= s->code_end(), "overflowed buffer"); + // shut the door on sizing bugs + int slop = 3; // 32-bit offset is this much larger than an 8-bit one + assert(itable_index > 10 || __ pc() + slop <= s->code_end(), "room for 32-bit offset"); s->set_exception_points(npe_addr, ame_addr); return s; @@ -206,6 +212,39 @@ return (DebugVtables ? 512 : 72) + (CountCompiledCalls ? 13 : 0) + (UseCompressedOops ? 32 : 0); // 2 leaqs } + // In order to tune these parameters, run the JVM with VM options + // +PrintMiscellaneous and +WizardMode to see information about + // actual itable stubs. Look for lines like this: + // itable #1 at 0x5551212[71] left over: 3 + // Reduce the constants so that the "left over" number is >=3 + // for the common cases. + // Do not aim at a left-over number of zero, because a + // large vtable or itable index (>= 32) will require a 32-bit + // immediate displacement instead of an 8-bit one. + // + // The JVM98 app. _202_jess has a megamorphic interface call. + // The itable code looks like this: + // Decoding VtableStub itbl[1]@12 + // mov 0x8(%rsi),%r10 + // mov 0x198(%r10),%r11d + // lea 0x218(%r10,%r11,8),%r11 + // lea 0x8(%r10),%r10 + // mov (%r11),%rbx + // cmp %rbx,%rax + // je success + // loop: + // test %rbx,%rbx + // je throw_icce + // add $0x10,%r11 + // mov (%r11),%rbx + // cmp %rbx,%rax + // jne loop + // success: + // mov 0x8(%r11),%r11d + // mov (%r10,%r11,1),%rbx + // jmpq *0x60(%rbx) + // throw_icce: + // jmpq throw_ICCE_entry } int VtableStub::pd_code_alignment() { diff -r 5d4dd2f5f6a1 -r 7a485bc4da16 src/os/windows/vm/os_windows.cpp --- a/src/os/windows/vm/os_windows.cpp Fri Apr 17 15:50:12 2009 +0100 +++ b/src/os/windows/vm/os_windows.cpp Thu May 07 10:30:17 2009 -0700 @@ -2632,6 +2632,8 @@ char* os::reserve_memory_special(size_t bytes, char* addr, bool exec) { + const DWORD prot = exec ? PAGE_EXECUTE_READWRITE : PAGE_READWRITE; + if (UseLargePagesIndividualAllocation) { if (TracePageSizes && Verbose) { tty->print_cr("Reserving large pages individually."); @@ -2694,13 +2696,7 @@ p_new = (char *) VirtualAlloc(next_alloc_addr, bytes_to_rq, MEM_RESERVE | MEM_COMMIT | MEM_LARGE_PAGES, - PAGE_READWRITE); - if (p_new != NULL && exec) { - DWORD oldprot; - // Windows doc says to use VirtualProtect to get execute permissions - VirtualProtect(next_alloc_addr, bytes_to_rq, - PAGE_EXECUTE_READWRITE, &oldprot); - } + prot); } if (p_new == NULL) { @@ -2729,12 +2725,7 @@ } else { // normal policy just allocate it all at once DWORD flag = MEM_RESERVE | MEM_COMMIT | MEM_LARGE_PAGES; - char * res = (char *)VirtualAlloc(NULL, bytes, flag, PAGE_READWRITE); - if (res != NULL && exec) { - DWORD oldprot; - // Windows doc says to use VirtualProtect to get execute permissions - VirtualProtect(res, bytes, PAGE_EXECUTE_READWRITE, &oldprot); - } + char * res = (char *)VirtualAlloc(NULL, bytes, flag, prot); return res; } } diff -r 5d4dd2f5f6a1 -r 7a485bc4da16 src/os_cpu/solaris_x86/vm/globals_solaris_x86.hpp --- a/src/os_cpu/solaris_x86/vm/globals_solaris_x86.hpp Fri Apr 17 15:50:12 2009 +0100 +++ b/src/os_cpu/solaris_x86/vm/globals_solaris_x86.hpp Thu May 07 10:30:17 2009 -0700 @@ -39,7 +39,7 @@ // ThreadStackSize 320 allows TaggedStackInterpreter and a couple of test cases // to run while keeping the number of threads that can be created high. define_pd_global(intx, ThreadStackSize, 320); -define_pd_global(intx, VMThreadStackSize, 256); +define_pd_global(intx, VMThreadStackSize, 512); define_pd_global(intx, SurvivorRatio, 8); define_pd_global(uintx, JVMInvokeMethodSlack, 10*K); #endif // AMD64 diff -r 5d4dd2f5f6a1 -r 7a485bc4da16 src/share/tools/MakeDeps/BuildConfig.java --- a/src/share/tools/MakeDeps/BuildConfig.java Fri Apr 17 15:50:12 2009 +0100 +++ b/src/share/tools/MakeDeps/BuildConfig.java Thu May 07 10:30:17 2009 -0700 @@ -247,7 +247,7 @@ sysDefines.add("HOTSPOT_BUILD_USER="+System.getProperty("user.name")); sysDefines.add("HOTSPOT_BUILD_TARGET=\\\""+get("Build")+"\\\""); sysDefines.add("_JNI_IMPLEMENTATION_"); - sysDefines.add("HOTSPOT_LIB_ARCH=\\\"i486\\\""); + sysDefines.add("HOTSPOT_LIB_ARCH=\\\"i386\\\""); sysDefines.addAll(defines); diff -r 5d4dd2f5f6a1 -r 7a485bc4da16 src/share/tools/hsdis/Makefile --- a/src/share/tools/hsdis/Makefile Fri Apr 17 15:50:12 2009 +0100 +++ b/src/share/tools/hsdis/Makefile Thu May 07 10:30:17 2009 -0700 @@ -22,61 +22,75 @@ # # -# Single gnu makefile for solaris, linux and windows (windows requires mks or -# cygwin). - -ifeq ($(BINUTILS),) -# Pop all the way out of the workspace to look for binutils. -# ...You probably want to override this setting. -BINUTILS = $(shell cd ../../../../..;pwd)/binutils-2.17-$(LIBARCH) -endif +# Single gnu makefile for solaris, linux and windows (windows requires cygwin and mingw) # Default arch; it is changed below as needed. ARCH = i386 OS = $(shell uname) -CPPFLAGS += -I$(BINUTILS)/include -I$(BINUTILS)/bfd -CPPFLAGS += -DHOTSPOT_LIB_ARCH=\"$(LIBARCH)\" -DLIBARCH_$(LIBARCH) -CPPFLAGS += -DHOTSPOT_OS=\"$(OS)\" -DOS_$(OS) - ## OS = SunOS ## ifeq ($(OS),SunOS) -ARCH = $(shell uname -p) +CPU = $(shell uname -p) +ARCH1=$(CPU:i586=i386) +ARCH=$(ARCH1:i686=i386) OS = solaris CC = cc -CCFLAGS += -Kpic -g -CCFLAGS/amd64 += -xarch=amd64 -CCFLAGS/sparcv9 += -xarch=v9 -CCFLAGS += $(CCFLAGS/$(LIBARCH)) +CFLAGS += -KPIC +ifdef LP64 +ifeq ($(ARCH),sparc) +ARCH = sparcv9 +endif +ifeq ($(ARCH),i386) +ARCH = amd64 +endif +endif +CFLAGS/sparcv9 += -xarch=v9 +CFLAGS/amd64 += -m64 +CFLAGS += $(CFLAGS/$(ARCH)) DLDFLAGS += -G +LDFLAGS += -ldl OUTFLAGS += -o $@ LIB_EXT = .so else ## OS = Linux ## ifeq ($(OS),Linux) -CPU = $(shell uname -m) -ifeq ($(CPU),ia64) -ARCH = ia64 +ifneq ($(MINGW),) +LIB_EXT = .dll +CPPFLAGS += -I$(TARGET_DIR)/include +LDFLAGS += -L$(TARGET_DIR)/lib +OS=windows +ifneq ($(findstring x86_64-,$(MINGW)),) +ARCH=amd64 +else +ARCH=i386 +endif +CC = $(MINGW)-gcc +CONFIGURE_ARGS= --host=$(MINGW) --target=$(MINGW) else -ifeq ($(CPU),x86_64) -CCFLAGS += -fPIC -endif # x86_64 -endif # ia64 +CPU = $(shell uname -m) +ARCH1=$(CPU:x86_64=amd64) +ARCH=$(ARCH1:i686=i386) +CFLAGS/i386 += -m32 +CFLAGS/sparc += -m32 +CFLAGS/sparcv9 += -m64 +CFLAGS/amd64 += -m64 +CFLAGS += $(CFLAGS/$(ARCH)) +CFLAGS += -fPIC OS = linux +LIB_EXT = .so CC = gcc -CCFLAGS += -O +endif +CFLAGS += -O DLDFLAGS += -shared +LDFLAGS += -ldl OUTFLAGS += -o $@ -LIB_EXT = .so -CPPFLAGS += -Iinclude -Iinclude/$(OS)_$(ARCH)/ ## OS = Windows ## else # !SunOS, !Linux => Windows -OS = win -CC = cl +OS = windows +CC = gcc #CPPFLAGS += /D"WIN32" /D"_WINDOWS" /D"DEBUG" /D"NDEBUG" -CCFLAGS += /nologo /MD /W3 /WX /O2 /Fo$(@:.dll=.obj) /Gi- -CCFLAGS += -Iinclude -Iinclude/gnu -Iinclude/$(OS)_$(ARCH) -CCFLAGS += /D"HOTSPOT_LIB_ARCH=\"$(LIBARCH)\"" +CFLAGS += /nologo /MD /W3 /WX /O2 /Fo$(@:.dll=.obj) /Gi- +CFLAGS += LIBARCH=\"$(LIBARCH)\"" DLDFLAGS += /dll /subsystem:windows /incremental:no \ /export:decode_instruction OUTFLAGS += /link /out:$@ @@ -94,21 +108,34 @@ endif # LIBARCH64/$(ARCH) endif # LP64 -TARGET_DIR = bin/$(OS) +JDKARCH=$(LIBARCH:i386=i586) + +ifeq ($(BINUTILS),) +# Pop all the way out of the workspace to look for binutils. +# ...You probably want to override this setting. +BINUTILSDIR = $(shell cd build/binutils;pwd) +else +BINUTILSDIR = $(shell cd $(BINUTILS);pwd) +endif + +CPPFLAGS += -I$(BINUTILSDIR)/include -I$(BINUTILS)/bfd -I$(TARGET_DIR)/bfd +CPPFLAGS += -DLIBARCH_$(LIBARCH) -DLIBARCH=\"$(LIBARCH)\" -DLIB_EXT=\"$(LIB_EXT)\" + +TARGET_DIR = build/$(OS)-$(JDKARCH) TARGET = $(TARGET_DIR)/hsdis-$(LIBARCH)$(LIB_EXT) SOURCE = hsdis.c -LIBRARIES = $(BINUTILS)/bfd/libbfd.a \ - $(BINUTILS)/opcodes/libopcodes.a \ - $(BINUTILS)/libiberty/libiberty.a +LIBRARIES = $(TARGET_DIR)/bfd/libbfd.a \ + $(TARGET_DIR)/opcodes/libopcodes.a \ + $(TARGET_DIR)/libiberty/libiberty.a -DEMO_TARGET = $(TARGET_DIR)/hsdis-demo-$(LIBARCH) +DEMO_TARGET = $(TARGET_DIR)/hsdis-demo DEMO_SOURCE = hsdis-demo.c .PHONY: all clean demo both -all: $(TARGET) demo +all: $(TARGET) both: all all64 @@ -117,16 +144,17 @@ demo: $(TARGET) $(DEMO_TARGET) -$(LIBRARIES): - @echo "*** Please build binutils first; see ./README: ***" - @sed < ./README '1,/__________/d' | head -20 - @echo "..."; exit 1 +$(LIBRARIES): $(TARGET_DIR) $(TARGET_DIR)/Makefile + if [ ! -f $@ ]; then cd $(TARGET_DIR); make all-opcodes; fi + +$(TARGET_DIR)/Makefile: + (cd $(TARGET_DIR); CC=$(CC) CFLAGS="$(CFLAGS)" $(BINUTILSDIR)/configure --disable-nls $(CONFIGURE_ARGS)) $(TARGET): $(SOURCE) $(LIBS) $(LIBRARIES) $(TARGET_DIR) - $(CC) $(OUTFLAGS) $(CPPFLAGS) $(CCFLAGS) $(SOURCE) $(DLDFLAGS) $(LIBRARIES) + $(CC) $(OUTFLAGS) $(CPPFLAGS) $(CFLAGS) $(SOURCE) $(DLDFLAGS) $(LIBRARIES) $(DEMO_TARGET): $(DEMO_SOURCE) $(TARGET) $(TARGET_DIR) - $(CC) $(OUTFLAGS) $(CPPFLAGS) $(CCFLAGS) $(DEMO_SOURCE) $(LDFLAGS) + $(CC) $(OUTFLAGS) -DTARGET_DIR=\"$(TARGET_DIR)\" $(CPPFLAGS) -g $(CFLAGS/$(ARCH)) $(DEMO_SOURCE) $(LDFLAGS) $(TARGET_DIR): [ -d $@ ] || mkdir -p $@ diff -r 5d4dd2f5f6a1 -r 7a485bc4da16 src/share/tools/hsdis/README --- a/src/share/tools/hsdis/README Fri Apr 17 15:50:12 2009 +0100 +++ b/src/share/tools/hsdis/README Thu May 07 10:30:17 2009 -0700 @@ -32,61 +32,55 @@ * Building -To build this project you need a build of Gnu binutils to link against. -It is known to work with binutils 2.17. - -The makefile looks for this build in $BINUTILS, or (if that is not set), -in .../binutils-2.17-$LIBARCH, where LIBARCH (as in HotSpot) is one of -the jre subdirectory keywords i386, amd64, sparc, sparcv9, etc. - -To build Gnu binutils, first download a copy of the software: - http://directory.fsf.org/project/binutils/ +To build this project you a copy of GNU binutils to build against. It +is known to work with binutils 2.17 and binutils 2.19.1. Download a +copy of the software from http://directory.fsf.org/project/binutils or +one of it's mirrors. Builds targetting windows should use at least +2.19 and currently requires the use of a cross compiler. -Unpack the binutils tarball into an empty directory: - chdir ../../../../.. - tar -xzf - < ../binutils-2.17.tar.gz - mv binutils-2.17 binutils-2.17-i386 #or binutils-2.17-sparc - cd binutils-2.17-i386 +The makefile looks for the sources in build/binutils or you can +specify it's location to the makefile using BINTUILS=path. It will +configure binutils and build it first and then build and link the +disasembly adapter. Make all will build the default target for your +platform. If you platform support both 32 and 64 simultaneously then +"make both" will build them both at once. "make all64" will +explicitly build the 64 bit version. By default this will build the +disassembler library only. If you build demo it will build a demo +program that attempts to exercise the library. -From inside that directory, run configure and make: - ( export CFLAGS='-fPIC' - ./configure i386-pc-elf ) - gnumake - -(Leave out or change the argument to configure if not on an i386 system.) +Windows -Next, untar again into another empty directory for the LP64 version: - chdir .. - tar -xzf - < ../binutils-2.17.tar.gz - mv binutils-2.17 binutils-2.17-amd64 #or binutils-2.17-sparcv9 - cd binutils-2.17-amd64 - -From inside that directory, run configure for LP64 and make: - ( export ac_cv_c_bigendian=no CFLAGS='-m64 -fPIC' LDFLAGS=-m64 - ./configure amd64-pc-elf ) - gnumake +In theory this should be buildable on Windows but getting a working +GNU build environment on Windows has proven difficult. MINGW should +be able to do it but at the time of this writing I was unable to get +this working. Instead you can use the mingw cross compiler on linux +to produce the windows binaries. For 32-bit windows you can install +mingw32 using your package manager and it will be added to your path +automatically. For 64-bit you need to download the 64 bit mingw from +http://sourceforge.net/projects/mingw-w64. Grab a copy of the +complete toolchain and unpack it somewhere. Put the bin directory of +the toolchain in your path. The mingw installs contain cross compile +versions of gcc that are named with a prefix to indicate what they are +targetting and you must tell the Makefile which one to use. This +should either be i586-mingw32msvc or x86_64-pc-mingw32 depending on +which on you are targetting and there should be a version of gcc in +your path named i586-mingw32msvc-gcc or x86_64-pc-mingw32-gcc. Tell +the makefile what prefix to use to find the mingw tools by using +MINGW=. For example: -The -fPIC option is needed because the generated code will be -linked into the hsdid-$LIBARCH.so binary. If you miss the -option, the JVM will fail to load the disassembler. +make MINGW=i586-mingw32msvc BINTUILS=build/binutils-2.19.1 -You probably want two builds, one for 32 and one for 64 bits. -To build the 64-bit variation of a platforn, add LP64=1 to -the make command line for hsdis. - -So, go back to the hsdis project and build: - chdir .../hsdis - gnumake - gnumake LP64=1 +will build the Win32 cross compiled version of hsdis based on 2.19.1. * Installing -Products are named like bin/$OS/hsdis-$LIBARCH.so. -You can install them on your LD_LIBRARY_PATH, -or inside of your JRE next to $LIBARCH/libjvm.so. +Products are named like build/$OS-$LIBARCH/hsdis-$LIBARCH.so. You can +install them on your LD_LIBRARY_PATH, or inside of your JRE next to +$LIBARCH/libjvm.so. Now test: - export LD_LIBRARY_PATH .../hsdis/bin/solaris:$LD_LIBRARY_PATH + + export LD_LIBRARY_PATH .../hsdis/build/$OS-$LIBARCH:$LD_LIBRARY_PATH dargs='-XX:+UnlockDiagnosticVMOptions -XX:+PrintAssembly' dargs=$dargs' -XX:PrintAssemblyOptions=hsdis-print-bytes' java $dargs -Xbatch CompileCommand=print,*String.hashCode HelloWorld diff -r 5d4dd2f5f6a1 -r 7a485bc4da16 src/share/tools/hsdis/hsdis-demo.c --- a/src/share/tools/hsdis/hsdis-demo.c Fri Apr 17 15:50:12 2009 +0100 +++ b/src/share/tools/hsdis/hsdis-demo.c Thu May 07 10:30:17 2009 -0700 @@ -53,7 +53,7 @@ else if (!strncmp(arg, "-options=", 9)) options = arg+9; else - { printf("Usage: %s [-xml] [name...]\n"); exit(2); } + { printf("Usage: %s [-xml] [name...]\n", av[0]); exit(2); } continue; } greet(arg); @@ -76,26 +76,14 @@ #include "dlfcn.h" -#ifdef HOTSPOT_LIB_ARCH -#define LIBARCH HOTSPOT_LIB_ARCH -#endif -#ifdef HOTSPOT_OS -#define OS HOTSPOT_OS -#endif - #define DECODE_INSTRUCTIONS_NAME "decode_instructions" #define HSDIS_NAME "hsdis" static void* decode_instructions_pv = 0; static const char* hsdis_path[] = { - HSDIS_NAME".so", -#ifdef OS - "bin/"OS"/"HSDIS_NAME".so", -#endif -#ifdef LIBARCH - HSDIS_NAME"-"LIBARCH".so", -#ifdef OS - "bin/"OS"/"HSDIS_NAME"-"LIBARCH".so", -#endif + HSDIS_NAME"-"LIBARCH LIB_EXT, + "./" HSDIS_NAME"-"LIBARCH LIB_EXT, +#ifdef TARGET_DIR + TARGET_DIR"/"HSDIS_NAME"-"LIBARCH LIB_EXT, #endif NULL }; @@ -112,7 +100,7 @@ for (dllib = NULL; dllib == NULL; ) { const char* next_lib = (*next_in_path++); if (next_lib == NULL) - return "cannot find plugin "HSDIS_NAME".so"; + return "cannot find plugin "HSDIS_NAME LIB_EXT; dllib = dlopen(next_lib, RTLD_LAZY); } } diff -r 5d4dd2f5f6a1 -r 7a485bc4da16 src/share/tools/hsdis/hsdis.c --- a/src/share/tools/hsdis/hsdis.c Fri Apr 17 15:50:12 2009 +0100 +++ b/src/share/tools/hsdis/hsdis.c Thu May 07 10:30:17 2009 -0700 @@ -33,6 +33,7 @@ #include #include #include +#include #ifndef bool #define bool int @@ -404,21 +405,21 @@ } static const char* native_arch_name() { - const char* res = HOTSPOT_LIB_ARCH; + const char* res = NULL; +#ifdef LIBARCH_i386 + res = "i386"; +#endif #ifdef LIBARCH_amd64 res = "i386:x86-64"; #endif #ifdef LIBARCH_sparc res = "sparc:v8plusb"; #endif -#ifdef LIBARCH_sparc - res = "sparc:v8plusb"; -#endif #ifdef LIBARCH_sparcv9 res = "sparc:v9b"; #endif if (res == NULL) - res = "HOTSPOT_LIB_ARCH is not set in Makefile!"; + res = "architecture not set in Makefile!"; return res; } diff -r 5d4dd2f5f6a1 -r 7a485bc4da16 src/share/vm/c1/c1_GraphBuilder.cpp --- a/src/share/vm/c1/c1_GraphBuilder.cpp Fri Apr 17 15:50:12 2009 +0100 +++ b/src/share/vm/c1/c1_GraphBuilder.cpp Thu May 07 10:30:17 2009 -0700 @@ -1524,6 +1524,11 @@ code = Bytecodes::_invokespecial; } + if (code == Bytecodes::_invokedynamic) { + BAILOUT("invokedynamic NYI"); // FIXME + return; + } + // NEEDS_CLEANUP // I've added the target-is_loaded() test below but I don't really understand // how klass->is_loaded() can be true and yet target->is_loaded() is false. @@ -2431,8 +2436,8 @@ case Bytecodes::_invokevirtual : // fall through case Bytecodes::_invokespecial : // fall through case Bytecodes::_invokestatic : // fall through + case Bytecodes::_invokedynamic : // fall through case Bytecodes::_invokeinterface: invoke(code); break; - case Bytecodes::_xxxunusedxxx : ShouldNotReachHere(); break; case Bytecodes::_new : new_instance(s.get_index_big()); break; case Bytecodes::_newarray : new_type_array(); break; case Bytecodes::_anewarray : new_object_array(); break; @@ -2571,6 +2576,7 @@ , Bytecodes::_invokevirtual , Bytecodes::_invokespecial , Bytecodes::_invokestatic + , Bytecodes::_invokedynamic , Bytecodes::_invokeinterface , Bytecodes::_new , Bytecodes::_newarray diff -r 5d4dd2f5f6a1 -r 7a485bc4da16 src/share/vm/c1/c1_LinearScan.cpp --- a/src/share/vm/c1/c1_LinearScan.cpp Fri Apr 17 15:50:12 2009 +0100 +++ b/src/share/vm/c1/c1_LinearScan.cpp Thu May 07 10:30:17 2009 -0700 @@ -2956,9 +2956,11 @@ NOT_PRODUCT(print_intervals("After Register Allocation")); NOT_PRODUCT(print_lir(2, "LIR after register allocation:")); - DEBUG_ONLY(verify()); sort_intervals_after_allocation(); + + DEBUG_ONLY(verify()); + eliminate_spill_moves(); assign_reg_num(); CHECK_BAILOUT(); @@ -3147,6 +3149,16 @@ void LinearScan::verify_no_oops_in_fixed_intervals() { + Interval* fixed_intervals; + Interval* other_intervals; + create_unhandled_lists(&fixed_intervals, &other_intervals, is_precolored_cpu_interval, NULL); + + // to ensure a walking until the last instruction id, add a dummy interval + // with a high operation id + other_intervals = new Interval(any_reg); + other_intervals->add_range(max_jint - 2, max_jint - 1); + IntervalWalker* iw = new IntervalWalker(this, fixed_intervals, other_intervals); + LIR_OpVisitState visitor; for (int i = 0; i < block_count(); i++) { BlockBegin* block = block_at(i); @@ -3159,6 +3171,54 @@ visitor.visit(op); + if (visitor.info_count() > 0) { + iw->walk_before(op->id()); + bool check_live = true; + if (op->code() == lir_move) { + LIR_Op1* move = (LIR_Op1*)op; + check_live = (move->patch_code() == lir_patch_none); + } + LIR_OpBranch* branch = op->as_OpBranch(); + if (branch != NULL && branch->stub() != NULL && branch->stub()->is_exception_throw_stub()) { + // Don't bother checking the stub in this case since the + // exception stub will never return to normal control flow. + check_live = false; + } + + // Make sure none of the fixed registers is live across an + // oopmap since we can't handle that correctly. + if (check_live) { + for (Interval* interval = iw->active_first(fixedKind); + interval != Interval::end(); + interval = interval->next()) { + if (interval->current_to() > op->id() + 1) { + // This interval is live out of this op so make sure + // that this interval represents some value that's + // referenced by this op either as an input or output. + bool ok = false; + for_each_visitor_mode(mode) { + int n = visitor.opr_count(mode); + for (int k = 0; k < n; k++) { + LIR_Opr opr = visitor.opr_at(mode, k); + if (opr->is_fixed_cpu()) { + if (interval_at(reg_num(opr)) == interval) { + ok = true; + break; + } + int hi = reg_numHi(opr); + if (hi != -1 && interval_at(hi) == interval) { + ok = true; + break; + } + } + } + } + assert(ok, "fixed intervals should never be live across an oopmap point"); + } + } + } + } + // oop-maps at calls do not contain registers, so check is not needed if (!visitor.has_call()) { diff -r 5d4dd2f5f6a1 -r 7a485bc4da16 src/share/vm/ci/bcEscapeAnalyzer.cpp --- a/src/share/vm/ci/bcEscapeAnalyzer.cpp Fri Apr 17 15:50:12 2009 +0100 +++ b/src/share/vm/ci/bcEscapeAnalyzer.cpp Thu May 07 10:30:17 2009 -0700 @@ -833,6 +833,7 @@ case Bytecodes::_invokevirtual: case Bytecodes::_invokespecial: case Bytecodes::_invokestatic: + case Bytecodes::_invokedynamic: case Bytecodes::_invokeinterface: { bool will_link; ciMethod* target = s.get_method(will_link); @@ -848,9 +849,6 @@ } } break; - case Bytecodes::_xxxunusedxxx: - ShouldNotReachHere(); - break; case Bytecodes::_new: state.apush(allocated_obj); break; diff -r 5d4dd2f5f6a1 -r 7a485bc4da16 src/share/vm/ci/ciMethod.cpp --- a/src/share/vm/ci/ciMethod.cpp Fri Apr 17 15:50:12 2009 +0100 +++ b/src/share/vm/ci/ciMethod.cpp Thu May 07 10:30:17 2009 -0700 @@ -1,5 +1,5 @@ /* - * Copyright 1999-2008 Sun Microsystems, Inc. All Rights Reserved. + * Copyright 1999-2009 Sun Microsystems, Inc. All Rights Reserved. * DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER. * * This code is free software; you can redistribute it and/or modify it @@ -675,6 +675,30 @@ } // ------------------------------------------------------------------ +// invokedynamic support +// +bool ciMethod::is_method_handle_invoke() { + check_is_loaded(); + bool flag = ((flags().as_int() & JVM_MH_INVOKE_BITS) == JVM_MH_INVOKE_BITS); +#ifdef ASSERT + { + VM_ENTRY_MARK; + bool flag2 = get_methodOop()->is_method_handle_invoke(); + assert(flag == flag2, "consistent"); + } +#endif //ASSERT + return flag; +} + +ciInstance* ciMethod::method_handle_type() { + check_is_loaded(); + VM_ENTRY_MARK; + oop mtype = get_methodOop()->method_handle_type(); + return CURRENT_THREAD_ENV->get_object(mtype)->as_instance(); +} + + +// ------------------------------------------------------------------ // ciMethod::build_method_data // // Generate new methodDataOop objects at compile time. diff -r 5d4dd2f5f6a1 -r 7a485bc4da16 src/share/vm/ci/ciMethod.hpp --- a/src/share/vm/ci/ciMethod.hpp Fri Apr 17 15:50:12 2009 +0100 +++ b/src/share/vm/ci/ciMethod.hpp Thu May 07 10:30:17 2009 -0700 @@ -1,5 +1,5 @@ /* - * Copyright 1999-2007 Sun Microsystems, Inc. All Rights Reserved. + * Copyright 1999-2009 Sun Microsystems, Inc. All Rights Reserved. * DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER. * * This code is free software; you can redistribute it and/or modify it @@ -207,6 +207,8 @@ bool check_call(int refinfo_index, bool is_static) const; void build_method_data(); // make sure it exists in the VM also int scale_count(int count, float prof_factor = 1.); // make MDO count commensurate with IIC + bool is_method_handle_invoke(); + ciInstance* method_handle_type(); // What kind of ciObject is this? bool is_method() { return true; } diff -r 5d4dd2f5f6a1 -r 7a485bc4da16 src/share/vm/ci/ciStreams.cpp --- a/src/share/vm/ci/ciStreams.cpp Fri Apr 17 15:50:12 2009 +0100 +++ b/src/share/vm/ci/ciStreams.cpp Thu May 07 10:30:17 2009 -0700 @@ -301,17 +301,19 @@ // If this is a method invocation bytecode, get the constant pool // index of the invoked method. int ciBytecodeStream::get_method_index() { +#ifdef ASSERT switch (cur_bc()) { case Bytecodes::_invokeinterface: - return Bytes::get_Java_u2(_pc-4); case Bytecodes::_invokevirtual: case Bytecodes::_invokespecial: case Bytecodes::_invokestatic: - return get_index_big(); + case Bytecodes::_invokedynamic: + break; default: ShouldNotReachHere(); - return 0; } +#endif + return get_index_int(); } // ------------------------------------------------------------------ @@ -337,6 +339,9 @@ // for checking linkability when retrieving the associated method. ciKlass* ciBytecodeStream::get_declared_method_holder() { bool ignore; + // report as Dynamic for invokedynamic, which is syntactically classless + if (cur_bc() == Bytecodes::_invokedynamic) + return CURRENT_ENV->get_klass_by_name(_holder, ciSymbol::java_dyn_Dynamic(), false); return CURRENT_ENV->get_klass_by_index(_holder, get_method_holder_index(), ignore); } diff -r 5d4dd2f5f6a1 -r 7a485bc4da16 src/share/vm/ci/ciStreams.hpp --- a/src/share/vm/ci/ciStreams.hpp Fri Apr 17 15:50:12 2009 +0100 +++ b/src/share/vm/ci/ciStreams.hpp Thu May 07 10:30:17 2009 -0700 @@ -91,9 +91,10 @@ _end = _start + max; } - address cur_bcp() { return _bc_start; } // Returns bcp to current instruction + address cur_bcp() const { return _bc_start; } // Returns bcp to current instruction int next_bci() const { return _pc -_start; } int cur_bci() const { return _bc_start - _start; } + int instruction_size() const { return _pc - _bc_start; } Bytecodes::Code cur_bc() const{ return check_java(_bc); } Bytecodes::Code next_bc() { return Bytecodes::java_code((Bytecodes::Code)* _pc); } @@ -121,34 +122,39 @@ return check_java(_bc); } - bool is_wide() { return ( _pc == _was_wide ); } + bool is_wide() const { return ( _pc == _was_wide ); } // Get a byte index following this bytecode. // If prefixed with a wide bytecode, get a wide index. int get_index() const { + assert_index_size(is_wide() ? 2 : 1); return (_pc == _was_wide) // was widened? ? Bytes::get_Java_u2(_bc_start+2) // yes, return wide index : _bc_start[1]; // no, return narrow index } - // Set a byte index following this bytecode. - // If prefixed with a wide bytecode, get a wide index. - void put_index(int idx) { - if (_pc == _was_wide) // was widened? - Bytes::put_Java_u2(_bc_start+2,idx); // yes, set wide index - else - _bc_start[1]=idx; // no, set narrow index + // Get 2-byte index (getfield/putstatic/etc) + int get_index_big() const { + assert_index_size(2); + return Bytes::get_Java_u2(_bc_start+1); } - // Get 2-byte index (getfield/putstatic/etc) - int get_index_big() const { return Bytes::get_Java_u2(_bc_start+1); } + // Get 2-byte index (or 4-byte, for invokedynamic) + int get_index_int() const { + return has_giant_index() ? get_index_giant() : get_index_big(); + } + + // Get 4-byte index, for invokedynamic. + int get_index_giant() const { + assert_index_size(4); + return Bytes::get_native_u4(_bc_start+1); + } + + bool has_giant_index() const { return (cur_bc() == Bytecodes::_invokedynamic); } // Get dimensions byte (multinewarray) int get_dimensions() const { return *(unsigned char*)(_pc-1); } - // Get unsigned index fast - int get_index_fast() const { return Bytes::get_native_u2(_pc-2); } - // Sign-extended index byte/short, no widening int get_byte() const { return (int8_t)(_pc[-1]); } int get_short() const { return (int16_t)Bytes::get_Java_u2(_pc-2); } @@ -225,6 +231,22 @@ ciKlass* get_declared_method_holder(); int get_method_holder_index(); int get_method_signature_index(); + + private: + void assert_index_size(int required_size) const { +#ifdef ASSERT + int isize = instruction_size() - (is_wide() ? 1 : 0) - 1; + if (isize == 2 && cur_bc() == Bytecodes::_iinc) + isize = 1; + else if (isize <= 2) + ; // no change + else if (has_giant_index()) + isize = 4; + else + isize = 2; + assert(isize = required_size, "wrong index size"); +#endif + } }; diff -r 5d4dd2f5f6a1 -r 7a485bc4da16 src/share/vm/classfile/classFileParser.cpp --- a/src/share/vm/classfile/classFileParser.cpp Fri Apr 17 15:50:12 2009 +0100 +++ b/src/share/vm/classfile/classFileParser.cpp Thu May 07 10:30:17 2009 -0700 @@ -1842,6 +1842,11 @@ _has_vanilla_constructor = true; } + if (EnableMethodHandles && m->is_method_handle_invoke()) { + THROW_MSG_(vmSymbols::java_lang_VirtualMachineError(), + "Method handle invokers must be defined internally to the VM", nullHandle); + } + return m; } @@ -2465,9 +2470,84 @@ } +// Force MethodHandle.vmentry to be an unmanaged pointer. +// There is no way for a classfile to express this, so we must help it. +void ClassFileParser::java_dyn_MethodHandle_fix_pre(constantPoolHandle cp, + typeArrayHandle* fields_ptr, + FieldAllocationCount *fac_ptr, + TRAPS) { + // Add fake fields for java.dyn.MethodHandle instances + // + // This is not particularly nice, but since there is no way to express + // a native wordSize field in Java, we must do it at this level. + + if (!EnableMethodHandles) return; + + int word_sig_index = 0; + const int cp_size = cp->length(); + for (int index = 1; index < cp_size; index++) { + if (cp->tag_at(index).is_utf8() && + cp->symbol_at(index) == vmSymbols::machine_word_signature()) { + word_sig_index = index; + break; + } + } + + if (word_sig_index == 0) + THROW_MSG(vmSymbols::java_lang_VirtualMachineError(), + "missing I or J signature (for vmentry) in java.dyn.MethodHandle"); + + bool found_vmentry = false; + + const int n = (*fields_ptr)()->length(); + for (int i = 0; i < n; i += instanceKlass::next_offset) { + int name_index = (*fields_ptr)->ushort_at(i + instanceKlass::name_index_offset); + int sig_index = (*fields_ptr)->ushort_at(i + instanceKlass::signature_index_offset); + int acc_flags = (*fields_ptr)->ushort_at(i + instanceKlass::access_flags_offset); + symbolOop f_name = cp->symbol_at(name_index); + symbolOop f_sig = cp->symbol_at(sig_index); + if (f_sig == vmSymbols::byte_signature() && + f_name == vmSymbols::vmentry_name() && + (acc_flags & JVM_ACC_STATIC) == 0) { + // Adjust the field type from byte to an unmanaged pointer. + assert(fac_ptr->nonstatic_byte_count > 0, ""); + fac_ptr->nonstatic_byte_count -= 1; + (*fields_ptr)->ushort_at_put(i + instanceKlass::signature_index_offset, + word_sig_index); + if (wordSize == jintSize) { + fac_ptr->nonstatic_word_count += 1; + } else { + fac_ptr->nonstatic_double_count += 1; + } + + FieldAllocationType atype = (FieldAllocationType) (*fields_ptr)->ushort_at(i+4); + assert(atype == NONSTATIC_BYTE, ""); + FieldAllocationType new_atype = NONSTATIC_WORD; + if (wordSize > jintSize) { + if (Universe::field_type_should_be_aligned(T_LONG)) { + atype = NONSTATIC_ALIGNED_DOUBLE; + } else { + atype = NONSTATIC_DOUBLE; + } + } + (*fields_ptr)->ushort_at_put(i+4, new_atype); + + found_vmentry = true; + break; + } + } + + if (!found_vmentry) + THROW_MSG(vmSymbols::java_lang_VirtualMachineError(), + "missing vmentry byte field in java.dyn.MethodHandle"); + +} + + instanceKlassHandle ClassFileParser::parseClassFile(symbolHandle name, Handle class_loader, Handle protection_domain, + KlassHandle host_klass, GrowableArray* cp_patches, symbolHandle& parsed_name, TRAPS) { @@ -2500,6 +2580,7 @@ } } + _host_klass = host_klass; _cp_patches = cp_patches; instanceKlassHandle nullHandle; @@ -2808,6 +2889,11 @@ java_lang_Class_fix_pre(&methods, &fac, CHECK_(nullHandle)); } + // adjust the vmentry field declaration in java.dyn.MethodHandle + if (EnableMethodHandles && class_name() == vmSymbols::sun_dyn_MethodHandleImpl() && class_loader.is_null()) { + java_dyn_MethodHandle_fix_pre(cp, &fields, &fac, CHECK_(nullHandle)); + } + // Add a fake "discovered" field if it is not present // for compatibility with earlier jdk's. if (class_name() == vmSymbols::java_lang_ref_Reference() @@ -3134,7 +3220,7 @@ this_klass->set_method_ordering(method_ordering()); this_klass->set_initial_method_idnum(methods->length()); this_klass->set_name(cp->klass_name_at(this_class_index)); - if (LinkWellKnownClasses) // I am well known to myself + if (LinkWellKnownClasses || is_anonymous()) // I am well known to myself cp->klass_at_put(this_class_index, this_klass()); // eagerly resolve this_klass->set_protection_domain(protection_domain()); this_klass->set_fields_annotations(fields_annotations()); diff -r 5d4dd2f5f6a1 -r 7a485bc4da16 src/share/vm/classfile/classFileParser.hpp --- a/src/share/vm/classfile/classFileParser.hpp Fri Apr 17 15:50:12 2009 +0100 +++ b/src/share/vm/classfile/classFileParser.hpp Thu May 07 10:30:17 2009 -0700 @@ -1,5 +1,5 @@ /* - * Copyright 1997-2008 Sun Microsystems, Inc. All Rights Reserved. + * Copyright 1997-2009 Sun Microsystems, Inc. All Rights Reserved. * DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER. * * This code is free software; you can redistribute it and/or modify it @@ -33,6 +33,7 @@ u2 _major_version; u2 _minor_version; symbolHandle _class_name; + KlassHandle _host_klass; GrowableArray* _cp_patches; // overrides for CP entries bool _has_finalizer; @@ -145,6 +146,11 @@ // Adjust the next_nonstatic_oop_offset to place the fake fields // before any Java fields. void java_lang_Class_fix_post(int* next_nonstatic_oop_offset); + // Adjust the field allocation counts for java.dyn.MethodHandle to add + // a fake address (void*) field. + void java_dyn_MethodHandle_fix_pre(constantPoolHandle cp, + typeArrayHandle* fields_ptr, + FieldAllocationCount *fac_ptr, TRAPS); // Format checker methods void classfile_parse_error(const char* msg, TRAPS); @@ -204,6 +210,10 @@ char* skip_over_field_name(char* name, bool slash_ok, unsigned int length); char* skip_over_field_signature(char* signature, bool void_ok, unsigned int length, TRAPS); + bool is_anonymous() { + assert(AnonymousClasses || _host_klass.is_null(), ""); + return _host_klass.not_null(); + } bool has_cp_patch_at(int index) { assert(AnonymousClasses, ""); assert(index >= 0, "oob"); @@ -249,11 +259,13 @@ Handle protection_domain, symbolHandle& parsed_name, TRAPS) { - return parseClassFile(name, class_loader, protection_domain, NULL, parsed_name, THREAD); + KlassHandle no_host_klass; + return parseClassFile(name, class_loader, protection_domain, no_host_klass, NULL, parsed_name, THREAD); } instanceKlassHandle parseClassFile(symbolHandle name, Handle class_loader, Handle protection_domain, + KlassHandle host_klass, GrowableArray* cp_patches, symbolHandle& parsed_name, TRAPS); diff -r 5d4dd2f5f6a1 -r 7a485bc4da16 src/share/vm/classfile/classLoader.cpp --- a/src/share/vm/classfile/classLoader.cpp Fri Apr 17 15:50:12 2009 +0100 +++ b/src/share/vm/classfile/classLoader.cpp Thu May 07 10:30:17 2009 -0700 @@ -1217,31 +1217,34 @@ // valid class file. The class loader will check everything else. if (strchr(buffer, '.') == NULL) { _compile_the_world_counter++; - if (_compile_the_world_counter >= CompileTheWorldStartAt && _compile_the_world_counter <= CompileTheWorldStopAt) { - // Construct name without extension - symbolHandle sym = oopFactory::new_symbol_handle(buffer, CHECK); - // Use loader to load and initialize class - klassOop ik = SystemDictionary::resolve_or_null(sym, loader, Handle(), THREAD); - instanceKlassHandle k (THREAD, ik); - if (k.not_null() && !HAS_PENDING_EXCEPTION) { - k->initialize(THREAD); + if (_compile_the_world_counter > CompileTheWorldStopAt) return; + + // Construct name without extension + symbolHandle sym = oopFactory::new_symbol_handle(buffer, CHECK); + // Use loader to load and initialize class + klassOop ik = SystemDictionary::resolve_or_null(sym, loader, Handle(), THREAD); + instanceKlassHandle k (THREAD, ik); + if (k.not_null() && !HAS_PENDING_EXCEPTION) { + k->initialize(THREAD); + } + bool exception_occurred = HAS_PENDING_EXCEPTION; + CLEAR_PENDING_EXCEPTION; + if (CompileTheWorldPreloadClasses && k.not_null()) { + constantPoolKlass::preload_and_initialize_all_classes(k->constants(), THREAD); + if (HAS_PENDING_EXCEPTION) { + // If something went wrong in preloading we just ignore it + CLEAR_PENDING_EXCEPTION; + tty->print_cr("Preloading failed for (%d) %s", _compile_the_world_counter, buffer); } - bool exception_occurred = HAS_PENDING_EXCEPTION; - CLEAR_PENDING_EXCEPTION; + } + + if (_compile_the_world_counter >= CompileTheWorldStartAt) { if (k.is_null() || (exception_occurred && !CompileTheWorldIgnoreInitErrors)) { // If something went wrong (e.g. ExceptionInInitializerError) we skip this class tty->print_cr("CompileTheWorld (%d) : Skipping %s", _compile_the_world_counter, buffer); } else { tty->print_cr("CompileTheWorld (%d) : %s", _compile_the_world_counter, buffer); // Preload all classes to get around uncommon traps - if (CompileTheWorldPreloadClasses) { - constantPoolKlass::preload_and_initialize_all_classes(k->constants(), THREAD); - if (HAS_PENDING_EXCEPTION) { - // If something went wrong in preloading we just ignore it - CLEAR_PENDING_EXCEPTION; - tty->print_cr("Preloading failed for (%d) %s", _compile_the_world_counter, buffer); - } - } // Iterate over all methods in class for (int n = 0; n < k->methods()->length(); n++) { methodHandle m (THREAD, methodOop(k->methods()->obj_at(n))); @@ -1253,16 +1256,28 @@ CLEAR_PENDING_EXCEPTION; tty->print_cr("CompileTheWorld (%d) : Skipping method: %s", _compile_the_world_counter, m->name()->as_C_string()); } - if (TieredCompilation) { - // Clobber the first compile and force second tier compilation - m->clear_code(); - CompileBroker::compile_method(m, InvocationEntryBci, - methodHandle(), 0, "CTW", THREAD); - if (HAS_PENDING_EXCEPTION) { - CLEAR_PENDING_EXCEPTION; - tty->print_cr("CompileTheWorld (%d) : Skipping method: %s", _compile_the_world_counter, m->name()->as_C_string()); + if (TieredCompilation) { + // Clobber the first compile and force second tier compilation + nmethod* nm = m->code(); + if (nm != NULL) { + // Throw out the code so that the code cache doesn't fill up + nm->make_not_entrant(); + m->clear_code(); + } + CompileBroker::compile_method(m, InvocationEntryBci, + methodHandle(), 0, "CTW", THREAD); + if (HAS_PENDING_EXCEPTION) { + CLEAR_PENDING_EXCEPTION; + tty->print_cr("CompileTheWorld (%d) : Skipping method: %s", _compile_the_world_counter, m->name()->as_C_string()); + } } } + + nmethod* nm = m->code(); + if (nm != NULL) { + // Throw out the code so that the code cache doesn't fill up + nm->make_not_entrant(); + m->clear_code(); } } } diff -r 5d4dd2f5f6a1 -r 7a485bc4da16 src/share/vm/classfile/dictionary.cpp --- a/src/share/vm/classfile/dictionary.cpp Fri Apr 17 15:50:12 2009 +0100 +++ b/src/share/vm/classfile/dictionary.cpp Thu May 07 10:30:17 2009 -0700 @@ -1,5 +1,5 @@ /* - * Copyright 2003-2008 Sun Microsystems, Inc. All Rights Reserved. + * Copyright 2003-2009 Sun Microsystems, Inc. All Rights Reserved. * DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER. * * This code is free software; you can redistribute it and/or modify it @@ -549,6 +549,63 @@ } } +SymbolPropertyTable::SymbolPropertyTable(int table_size) + : Hashtable(table_size, sizeof(SymbolPropertyEntry)) +{ +} +SymbolPropertyTable::SymbolPropertyTable(int table_size, HashtableBucket* t, + int number_of_entries) + : Hashtable(table_size, sizeof(SymbolPropertyEntry), t, number_of_entries) +{ +} + + +SymbolPropertyEntry* SymbolPropertyTable::find_entry(int index, unsigned int hash, + symbolHandle sym) { + assert(index == index_for(sym), "incorrect index?"); + for (SymbolPropertyEntry* p = bucket(index); p != NULL; p = p->next()) { + if (p->hash() == hash && p->symbol() == sym()) { + return p; + } + } + return NULL; +} + + +SymbolPropertyEntry* SymbolPropertyTable::add_entry(int index, unsigned int hash, + symbolHandle sym) { + assert_locked_or_safepoint(SystemDictionary_lock); + assert(index == index_for(sym), "incorrect index?"); + assert(find_entry(index, hash, sym) == NULL, "no double entry"); + + SymbolPropertyEntry* p = new_entry(hash, sym()); + Hashtable::add_entry(index, p); + return p; +} + + +void SymbolPropertyTable::oops_do(OopClosure* f) { + for (int index = 0; index < table_size(); index++) { + for (SymbolPropertyEntry* p = bucket(index); p != NULL; p = p->next()) { + f->do_oop((oop*) p->symbol_addr()); + if (p->property_oop() != NULL) { + f->do_oop(p->property_oop_addr()); + } + } + } +} + +void SymbolPropertyTable::methods_do(void f(methodOop)) { + for (int index = 0; index < table_size(); index++) { + for (SymbolPropertyEntry* p = bucket(index); p != NULL; p = p->next()) { + oop prop = p->property_oop(); + if (prop != NULL && prop->is_method()) { + f((methodOop)prop); + } + } + } +} + // ---------------------------------------------------------------------------- #ifndef PRODUCT diff -r 5d4dd2f5f6a1 -r 7a485bc4da16 src/share/vm/classfile/dictionary.hpp --- a/src/share/vm/classfile/dictionary.hpp Fri Apr 17 15:50:12 2009 +0100 +++ b/src/share/vm/classfile/dictionary.hpp Thu May 07 10:30:17 2009 -0700 @@ -1,5 +1,5 @@ /* - * Copyright 2003-2006 Sun Microsystems, Inc. All Rights Reserved. + * Copyright 2003-2009 Sun Microsystems, Inc. All Rights Reserved. * DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER. * * This code is free software; you can redistribute it and/or modify it @@ -217,3 +217,112 @@ tty->print_cr("pd set = #%d", count); } }; + +// Entry in a SymbolPropertyTable, mapping a single symbolOop +// to a managed and an unmanaged pointer. +class SymbolPropertyEntry : public HashtableEntry { + friend class VMStructs; + private: + oop _property_oop; + address _property_data; + + public: + symbolOop symbol() const { return (symbolOop) literal(); } + + oop property_oop() const { return _property_oop; } + void set_property_oop(oop p) { _property_oop = p; } + + address property_data() const { return _property_data; } + void set_property_data(address p) { _property_data = p; } + + SymbolPropertyEntry* next() const { + return (SymbolPropertyEntry*)HashtableEntry::next(); + } + + SymbolPropertyEntry** next_addr() { + return (SymbolPropertyEntry**)HashtableEntry::next_addr(); + } + + oop* symbol_addr() { return literal_addr(); } + oop* property_oop_addr() { return &_property_oop; } + + void print_on(outputStream* st) const { + symbol()->print_value_on(st); + st->print(" -> "); + bool printed = false; + if (property_oop() != NULL) { + property_oop()->print_value_on(st); + printed = true; + } + if (property_data() != NULL) { + if (printed) st->print(" and "); + st->print(INTPTR_FORMAT, property_data()); + printed = true; + } + st->print_cr(printed ? "" : "(empty)"); + } +}; + +// A system-internal mapping of symbols to pointers, both managed +// and unmanaged. Used to record the auto-generation of each method +// MethodHandle.invoke(S)T, for all signatures (S)T. +class SymbolPropertyTable : public Hashtable { + friend class VMStructs; +private: + SymbolPropertyEntry* bucket(int i) { + return (SymbolPropertyEntry*) Hashtable::bucket(i); + } + + // The following method is not MT-safe and must be done under lock. + SymbolPropertyEntry** bucket_addr(int i) { + return (SymbolPropertyEntry**) Hashtable::bucket_addr(i); + } + + void add_entry(int index, SymbolPropertyEntry* new_entry) { + ShouldNotReachHere(); + } + void set_entry(int index, SymbolPropertyEntry* new_entry) { + ShouldNotReachHere(); + } + + SymbolPropertyEntry* new_entry(unsigned int hash, symbolOop symbol) { + SymbolPropertyEntry* entry = (SymbolPropertyEntry*) Hashtable::new_entry(hash, symbol); + entry->set_property_oop(NULL); + entry->set_property_data(NULL); + return entry; + } + +public: + SymbolPropertyTable(int table_size); + SymbolPropertyTable(int table_size, HashtableBucket* t, int number_of_entries); + + void free_entry(SymbolPropertyEntry* entry) { + Hashtable::free_entry(entry); + } + + unsigned int compute_hash(symbolHandle sym) { + // Use the regular identity_hash. + return Hashtable::compute_hash(sym); + } + + // need not be locked; no state change + SymbolPropertyEntry* find_entry(int index, unsigned int hash, symbolHandle name); + + // must be done under SystemDictionary_lock + SymbolPropertyEntry* add_entry(int index, unsigned int hash, symbolHandle name); + + // GC support + void oops_do(OopClosure* f); + void methods_do(void f(methodOop)); + + // Sharing support + void dump(SerializeOopClosure* soc); + void restore(SerializeOopClosure* soc); + void reorder_dictionary(); + +#ifndef PRODUCT + void print(); +#endif + void verify(); +}; + diff -r 5d4dd2f5f6a1 -r 7a485bc4da16 src/share/vm/classfile/javaClasses.cpp --- a/src/share/vm/classfile/javaClasses.cpp Fri Apr 17 15:50:12 2009 +0100 +++ b/src/share/vm/classfile/javaClasses.cpp Thu May 07 10:30:17 2009 -0700 @@ -25,13 +25,24 @@ # include "incls/_precompiled.incl" # include "incls/_javaClasses.cpp.incl" +static bool find_field(instanceKlass* ik, + symbolOop name_symbol, symbolOop signature_symbol, + fieldDescriptor* fd, + bool allow_super = false) { + if (allow_super) + return ik->find_field(name_symbol, signature_symbol, fd) != NULL; + else + return ik->find_local_field(name_symbol, signature_symbol, fd); +} + // Helpful routine for computing field offsets at run time rather than hardcoding them static void compute_offset(int &dest_offset, - klassOop klass_oop, symbolOop name_symbol, symbolOop signature_symbol) { + klassOop klass_oop, symbolOop name_symbol, symbolOop signature_symbol, + bool allow_super = false) { fieldDescriptor fd; instanceKlass* ik = instanceKlass::cast(klass_oop); - if (!ik->find_local_field(name_symbol, signature_symbol, &fd)) { + if (!find_field(ik, name_symbol, signature_symbol, &fd, allow_super)) { ResourceMark rm; tty->print_cr("Invalid layout of %s at %s", ik->external_name(), name_symbol->as_C_string()); fatal("Invalid layout of preloaded class"); @@ -42,14 +53,16 @@ // Same as above but for "optional" offsets that might not be present in certain JDK versions static void compute_optional_offset(int& dest_offset, - klassOop klass_oop, symbolOop name_symbol, symbolOop signature_symbol) { + klassOop klass_oop, symbolOop name_symbol, symbolOop signature_symbol, + bool allow_super = false) { fieldDescriptor fd; instanceKlass* ik = instanceKlass::cast(klass_oop); - if (ik->find_local_field(name_symbol, signature_symbol, &fd)) { + if (find_field(ik, name_symbol, signature_symbol, &fd, allow_super)) { dest_offset = fd.offset(); } } + Handle java_lang_String::basic_create(int length, bool tenured, TRAPS) { // Create the String object first, so there's a chance that the String // and the char array it points to end up in the same cache line. @@ -2107,13 +2120,359 @@ } +// Support for java_dyn_MethodHandle + +int java_dyn_MethodHandle::_type_offset; +int java_dyn_MethodHandle::_vmtarget_offset; +int java_dyn_MethodHandle::_vmentry_offset; +int java_dyn_MethodHandle::_vmslots_offset; + +int sun_dyn_MemberName::_clazz_offset; +int sun_dyn_MemberName::_name_offset; +int sun_dyn_MemberName::_type_offset; +int sun_dyn_MemberName::_flags_offset; +int sun_dyn_MemberName::_vmtarget_offset; +int sun_dyn_MemberName::_vmindex_offset; + +int sun_dyn_DirectMethodHandle::_vmindex_offset; + +int sun_dyn_BoundMethodHandle::_argument_offset; +int sun_dyn_BoundMethodHandle::_vmargslot_offset; + +int sun_dyn_AdapterMethodHandle::_conversion_offset; + +void java_dyn_MethodHandle::compute_offsets() { + klassOop k = SystemDictionary::MethodHandle_klass(); + if (k != NULL && EnableMethodHandles) { + compute_offset(_type_offset, k, vmSymbols::type_name(), vmSymbols::java_dyn_MethodType_signature(), true); + compute_offset(_vmtarget_offset, k, vmSymbols::vmtarget_name(), vmSymbols::object_signature(), true); + compute_offset(_vmentry_offset, k, vmSymbols::vmentry_name(), vmSymbols::machine_word_signature(), true); + + // Note: MH.vmslots (if it is present) is a hoisted copy of MH.type.form.vmslots. + // It is optional pending experiments to keep or toss. + compute_optional_offset(_vmslots_offset, k, vmSymbols::vmslots_name(), vmSymbols::int_signature(), true); + } +} + +void sun_dyn_MemberName::compute_offsets() { + klassOop k = SystemDictionary::MemberName_klass(); + if (k != NULL && EnableMethodHandles) { + compute_offset(_clazz_offset, k, vmSymbols::clazz_name(), vmSymbols::class_signature()); + compute_offset(_name_offset, k, vmSymbols::name_name(), vmSymbols::string_signature()); + compute_offset(_type_offset, k, vmSymbols::type_name(), vmSymbols::object_signature()); + compute_offset(_flags_offset, k, vmSymbols::flags_name(), vmSymbols::int_signature()); + compute_offset(_vmtarget_offset, k, vmSymbols::vmtarget_name(), vmSymbols::object_signature()); + compute_offset(_vmindex_offset, k, vmSymbols::vmindex_name(), vmSymbols::int_signature()); + } +} + +void sun_dyn_DirectMethodHandle::compute_offsets() { + klassOop k = SystemDictionary::DirectMethodHandle_klass(); + if (k != NULL && EnableMethodHandles) { + compute_offset(_vmindex_offset, k, vmSymbols::vmindex_name(), vmSymbols::int_signature(), true); + } +} + +void sun_dyn_BoundMethodHandle::compute_offsets() { + klassOop k = SystemDictionary::BoundMethodHandle_klass(); + if (k != NULL && EnableMethodHandles) { + compute_offset(_vmargslot_offset, k, vmSymbols::vmargslot_name(), vmSymbols::int_signature(), true); + compute_offset(_argument_offset, k, vmSymbols::argument_name(), vmSymbols::object_signature(), true); + } +} + +void sun_dyn_AdapterMethodHandle::compute_offsets() { + klassOop k = SystemDictionary::AdapterMethodHandle_klass(); + if (k != NULL && EnableMethodHandles) { + compute_offset(_conversion_offset, k, vmSymbols::conversion_name(), vmSymbols::int_signature(), true); + } +} + +oop java_dyn_MethodHandle::type(oop mh) { + return mh->obj_field(_type_offset); +} + +void java_dyn_MethodHandle::set_type(oop mh, oop mtype) { + mh->obj_field_put(_type_offset, mtype); +} + +int java_dyn_MethodHandle::vmslots(oop mh) { + int vmslots_offset = _vmslots_offset; + if (vmslots_offset != 0) { +#ifdef ASSERT + int x = mh->int_field(vmslots_offset); + int y = compute_vmslots(mh); + assert(x == y, "correct hoisted value"); +#endif + return mh->int_field(vmslots_offset); + } else { + return compute_vmslots(mh); + } +} + +// if MH.vmslots exists, hoist into it the value of type.form.vmslots +void java_dyn_MethodHandle::init_vmslots(oop mh) { + int vmslots_offset = _vmslots_offset; + if (vmslots_offset != 0) { + mh->int_field_put(vmslots_offset, compute_vmslots(mh)); + } +} + +// fetch type.form.vmslots, which is the number of JVM stack slots +// required to carry the arguments of this MH +int java_dyn_MethodHandle::compute_vmslots(oop mh) { + oop mtype = type(mh); + if (mtype == NULL) return 0; // Java code would get NPE + oop form = java_dyn_MethodType::form(mtype); + if (form == NULL) return 0; // Java code would get NPE + return java_dyn_MethodTypeForm::vmslots(form); +} + +// fetch the low-level entry point for this mh +MethodHandleEntry* java_dyn_MethodHandle::vmentry(oop mh) { + return (MethodHandleEntry*) mh->address_field(_vmentry_offset); +} + +void java_dyn_MethodHandle::set_vmentry(oop mh, MethodHandleEntry* me) { + assert(_vmentry_offset != 0, "must be present"); + + // This is always the final step that initializes a valid method handle: + mh->release_address_field_put(_vmentry_offset, (address) me); + + // There should be enough memory barriers on exit from native methods + // to ensure that the MH is fully initialized to all threads before + // Java code can publish it in global data structures. + // But just in case, we use release_address_field_put. +} + +/// MemberName accessors + +oop sun_dyn_MemberName::clazz(oop mname) { + assert(is_instance(mname), "wrong type"); + return mname->obj_field(_clazz_offset); +} + +void sun_dyn_MemberName::set_clazz(oop mname, oop clazz) { + assert(is_instance(mname), "wrong type"); + mname->obj_field_put(_clazz_offset, clazz); +} + +oop sun_dyn_MemberName::name(oop mname) { + assert(is_instance(mname), "wrong type"); + return mname->obj_field(_name_offset); +} + +void sun_dyn_MemberName::set_name(oop mname, oop name) { + assert(is_instance(mname), "wrong type"); + mname->obj_field_put(_name_offset, name); +} + +oop sun_dyn_MemberName::type(oop mname) { + assert(is_instance(mname), "wrong type"); + return mname->obj_field(_type_offset); +} + +void sun_dyn_MemberName::set_type(oop mname, oop type) { + assert(is_instance(mname), "wrong type"); + mname->obj_field_put(_type_offset, type); +} + +int sun_dyn_MemberName::flags(oop mname) { + assert(is_instance(mname), "wrong type"); + return mname->int_field(_flags_offset); +} + +void sun_dyn_MemberName::set_flags(oop mname, int flags) { + assert(is_instance(mname), "wrong type"); + mname->int_field_put(_flags_offset, flags); +} + +oop sun_dyn_MemberName::vmtarget(oop mname) { + assert(is_instance(mname), "wrong type"); + return mname->obj_field(_vmtarget_offset); +} + +void sun_dyn_MemberName::set_vmtarget(oop mname, oop ref) { + assert(is_instance(mname), "wrong type"); + mname->obj_field_put(_vmtarget_offset, ref); +} + +int sun_dyn_MemberName::vmindex(oop mname) { + assert(is_instance(mname), "wrong type"); + return mname->int_field(_vmindex_offset); +} + +void sun_dyn_MemberName::set_vmindex(oop mname, int index) { + assert(is_instance(mname), "wrong type"); + mname->int_field_put(_vmindex_offset, index); +} + +oop java_dyn_MethodHandle::vmtarget(oop mh) { + assert(is_instance(mh), "MH only"); + return mh->obj_field(_vmtarget_offset); +} + +void java_dyn_MethodHandle::set_vmtarget(oop mh, oop ref) { + assert(is_instance(mh), "MH only"); + mh->obj_field_put(_vmtarget_offset, ref); +} + +int sun_dyn_DirectMethodHandle::vmindex(oop mh) { + assert(is_instance(mh), "DMH only"); + return mh->int_field(_vmindex_offset); +} + +void sun_dyn_DirectMethodHandle::set_vmindex(oop mh, int index) { + assert(is_instance(mh), "DMH only"); + mh->int_field_put(_vmindex_offset, index); +} + +int sun_dyn_BoundMethodHandle::vmargslot(oop mh) { + assert(is_instance(mh), "BMH only"); + return mh->int_field(_vmargslot_offset); +} + +oop sun_dyn_BoundMethodHandle::argument(oop mh) { + assert(is_instance(mh), "BMH only"); + return mh->obj_field(_argument_offset); +} + +int sun_dyn_AdapterMethodHandle::conversion(oop mh) { + assert(is_instance(mh), "AMH only"); + return mh->int_field(_conversion_offset); +} + +void sun_dyn_AdapterMethodHandle::set_conversion(oop mh, int conv) { + assert(is_instance(mh), "AMH only"); + mh->int_field_put(_conversion_offset, conv); +} + + +// Support for java_dyn_MethodType + +int java_dyn_MethodType::_rtype_offset; +int java_dyn_MethodType::_ptypes_offset; +int java_dyn_MethodType::_form_offset; + +void java_dyn_MethodType::compute_offsets() { + klassOop k = SystemDictionary::MethodType_klass(); + if (k != NULL) { + compute_offset(_rtype_offset, k, vmSymbols::rtype_name(), vmSymbols::class_signature()); + compute_offset(_ptypes_offset, k, vmSymbols::ptypes_name(), vmSymbols::class_array_signature()); + compute_offset(_form_offset, k, vmSymbols::form_name(), vmSymbols::java_dyn_MethodTypeForm_signature()); + } +} + +void java_dyn_MethodType::print_signature(oop mt, outputStream* st) { + st->print("("); + objArrayOop pts = ptypes(mt); + for (int i = 0, limit = pts->length(); i < limit; i++) { + java_lang_Class::print_signature(pts->obj_at(i), st); + } + st->print(")"); + java_lang_Class::print_signature(rtype(mt), st); +} + +symbolOop java_dyn_MethodType::as_signature(oop mt, bool intern_if_not_found, TRAPS) { + ResourceMark rm; + stringStream buffer(128); + print_signature(mt, &buffer); + const char* sigstr = buffer.base(); + int siglen = (int) buffer.size(); + if (!intern_if_not_found) + return SymbolTable::probe(sigstr, siglen); + else + return oopFactory::new_symbol(sigstr, siglen, THREAD); +} + +oop java_dyn_MethodType::rtype(oop mt) { + assert(is_instance(mt), "must be a MethodType"); + return mt->obj_field(_rtype_offset); +} + +objArrayOop java_dyn_MethodType::ptypes(oop mt) { + assert(is_instance(mt), "must be a MethodType"); + return (objArrayOop) mt->obj_field(_ptypes_offset); +} + +oop java_dyn_MethodType::form(oop mt) { + assert(is_instance(mt), "must be a MethodType"); + return mt->obj_field(_form_offset); +} + +oop java_dyn_MethodType::ptype(oop mt, int idx) { + return ptypes(mt)->obj_at(idx); +} + + + +// Support for java_dyn_MethodTypeForm + +int java_dyn_MethodTypeForm::_vmslots_offset; +int java_dyn_MethodTypeForm::_erasedType_offset; + +void java_dyn_MethodTypeForm::compute_offsets() { + klassOop k = SystemDictionary::MethodTypeForm_klass(); + if (k != NULL) { + compute_optional_offset(_vmslots_offset, k, vmSymbols::vmslots_name(), vmSymbols::int_signature(), true); + compute_optional_offset(_erasedType_offset, k, vmSymbols::erasedType_name(), vmSymbols::java_dyn_MethodType_signature(), true); + } +} + +int java_dyn_MethodTypeForm::vmslots(oop mtform) { + assert(mtform->klass() == SystemDictionary::MethodTypeForm_klass(), "MTForm only"); + return mtform->int_field(_vmslots_offset); +} + +oop java_dyn_MethodTypeForm::erasedType(oop mtform) { + assert(mtform->klass() == SystemDictionary::MethodTypeForm_klass(), "MTForm only"); + return mtform->obj_field(_erasedType_offset); +} + + +// Support for sun_dyn_CallSiteImpl + +int sun_dyn_CallSiteImpl::_type_offset; +int sun_dyn_CallSiteImpl::_target_offset; +int sun_dyn_CallSiteImpl::_vmmethod_offset; + +void sun_dyn_CallSiteImpl::compute_offsets() { + if (!EnableInvokeDynamic) return; + klassOop k = SystemDictionary::CallSiteImpl_klass(); + if (k != NULL) { + compute_offset(_type_offset, k, vmSymbols::type_name(), vmSymbols::java_dyn_MethodType_signature(), true); + compute_offset(_target_offset, k, vmSymbols::target_name(), vmSymbols::java_dyn_MethodHandle_signature(), true); + compute_offset(_vmmethod_offset, k, vmSymbols::vmmethod_name(), vmSymbols::object_signature(), true); + } +} + +oop sun_dyn_CallSiteImpl::type(oop site) { + return site->obj_field(_type_offset); +} + +oop sun_dyn_CallSiteImpl::target(oop site) { + return site->obj_field(_target_offset); +} + +void sun_dyn_CallSiteImpl::set_target(oop site, oop target) { + site->obj_field_put(_target_offset, target); +} + +oop sun_dyn_CallSiteImpl::vmmethod(oop site) { + return site->obj_field(_vmmethod_offset); +} + +void sun_dyn_CallSiteImpl::set_vmmethod(oop site, oop ref) { + site->obj_field_put(_vmmethod_offset, ref); +} + + // Support for java_security_AccessControlContext int java_security_AccessControlContext::_context_offset = 0; int java_security_AccessControlContext::_privilegedContext_offset = 0; int java_security_AccessControlContext::_isPrivileged_offset = 0; - void java_security_AccessControlContext::compute_offsets() { assert(_isPrivileged_offset == 0, "offsets should be initialized only once"); fieldDescriptor fd; @@ -2442,6 +2801,18 @@ java_lang_System::compute_offsets(); java_lang_Thread::compute_offsets(); java_lang_ThreadGroup::compute_offsets(); + if (EnableMethodHandles) { + java_dyn_MethodHandle::compute_offsets(); + sun_dyn_MemberName::compute_offsets(); + sun_dyn_DirectMethodHandle::compute_offsets(); + sun_dyn_BoundMethodHandle::compute_offsets(); + sun_dyn_AdapterMethodHandle::compute_offsets(); + java_dyn_MethodType::compute_offsets(); + java_dyn_MethodTypeForm::compute_offsets(); + } + if (EnableInvokeDynamic) { + sun_dyn_CallSiteImpl::compute_offsets(); + } java_security_AccessControlContext::compute_offsets(); // Initialize reflection classes. The layouts of these classes // changed with the new reflection implementation in JDK 1.4, and @@ -2459,6 +2830,9 @@ sun_reflect_UnsafeStaticFieldAccessorImpl::compute_offsets(); } sun_misc_AtomicLongCSImpl::compute_offsets(); + + // generated interpreter code wants to know about the offsets we just computed: + AbstractAssembler::update_delayed_values(); } #ifndef PRODUCT diff -r 5d4dd2f5f6a1 -r 7a485bc4da16 src/share/vm/classfile/javaClasses.hpp --- a/src/share/vm/classfile/javaClasses.hpp Fri Apr 17 15:50:12 2009 +0100 +++ b/src/share/vm/classfile/javaClasses.hpp Thu May 07 10:30:17 2009 -0700 @@ -151,6 +151,12 @@ // Conversion static klassOop as_klassOop(oop java_class); static BasicType as_BasicType(oop java_class, klassOop* reference_klass = NULL); + static BasicType as_BasicType(oop java_class, KlassHandle* reference_klass) { + klassOop refk_oop = NULL; + BasicType result = as_BasicType(java_class, &refk_oop); + (*reference_klass) = KlassHandle(refk_oop); + return result; + } static symbolOop as_signature(oop java_class, bool intern_if_not_found, TRAPS); static void print_signature(oop java_class, outputStream *st); // Testing @@ -778,6 +784,311 @@ }; +// Interface to java.dyn.MethodHandle objects + +class MethodHandleEntry; + +class java_dyn_MethodHandle: AllStatic { + friend class JavaClasses; + + private: + static int _vmentry_offset; // assembly code trampoline for MH + static int _vmtarget_offset; // class-specific target reference + static int _type_offset; // the MethodType of this MH + static int _vmslots_offset; // OPTIONAL hoisted type.form.vmslots + + static void compute_offsets(); + + public: + // Accessors + static oop type(oop mh); + static void set_type(oop mh, oop mtype); + + static oop vmtarget(oop mh); + static void set_vmtarget(oop mh, oop target); + + static MethodHandleEntry* vmentry(oop mh); + static void set_vmentry(oop mh, MethodHandleEntry* data); + + static int vmslots(oop mh); + static void init_vmslots(oop mh); + static int compute_vmslots(oop mh); + + // Testers + static bool is_subclass(klassOop klass) { + return Klass::cast(klass)->is_subclass_of(SystemDictionary::MethodHandle_klass()); + } + static bool is_instance(oop obj) { + return obj != NULL && is_subclass(obj->klass()); + } + + // Accessors for code generation: + static int type_offset_in_bytes() { return _type_offset; } + static int vmtarget_offset_in_bytes() { return _vmtarget_offset; } + static int vmentry_offset_in_bytes() { return _vmentry_offset; } + static int vmslots_offset_in_bytes() { return _vmslots_offset; } +}; + +class sun_dyn_DirectMethodHandle: public java_dyn_MethodHandle { + friend class JavaClasses; + + private: + // _vmtarget_offset; // method or class or interface + static int _vmindex_offset; // negative or vtable idx or itable idx + static void compute_offsets(); + + public: + // Accessors + static int vmindex(oop mh); + static void set_vmindex(oop mh, int index); + + // Testers + static bool is_subclass(klassOop klass) { + return Klass::cast(klass)->is_subclass_of(SystemDictionary::DirectMethodHandle_klass()); + } + static bool is_instance(oop obj) { + return obj != NULL && is_subclass(obj->klass()); + } + + // Accessors for code generation: + static int vmindex_offset_in_bytes() { return _vmindex_offset; } +}; + +class sun_dyn_BoundMethodHandle: public java_dyn_MethodHandle { + friend class JavaClasses; + + private: + static int _argument_offset; // argument value bound into this MH + static int _vmargslot_offset; // relevant argument slot (<= vmslots) + static void compute_offsets(); + +public: + static oop argument(oop mh); + static void set_argument(oop mh, oop ref); + + static jint vmargslot(oop mh); + static void set_vmargslot(oop mh, jint slot); + + // Testers + static bool is_subclass(klassOop klass) { + return Klass::cast(klass)->is_subclass_of(SystemDictionary::BoundMethodHandle_klass()); + } + static bool is_instance(oop obj) { + return obj != NULL && is_subclass(obj->klass()); + } + + static int argument_offset_in_bytes() { return _argument_offset; } + static int vmargslot_offset_in_bytes() { return _vmargslot_offset; } +}; + +class sun_dyn_AdapterMethodHandle: public sun_dyn_BoundMethodHandle { + friend class JavaClasses; + + private: + static int _conversion_offset; // type of conversion to apply + static void compute_offsets(); + + public: + static int conversion(oop mh); + static void set_conversion(oop mh, int conv); + + // Testers + static bool is_subclass(klassOop klass) { + return Klass::cast(klass)->is_subclass_of(SystemDictionary::AdapterMethodHandle_klass()); + } + static bool is_instance(oop obj) { + return obj != NULL && is_subclass(obj->klass()); + } + + // Relevant integer codes (keep these in synch. with MethodHandleNatives.Constants): + enum { + OP_RETYPE_ONLY = 0x0, // no argument changes; straight retype + OP_CHECK_CAST = 0x1, // ref-to-ref conversion; requires a Class argument + OP_PRIM_TO_PRIM = 0x2, // converts from one primitive to another + OP_REF_TO_PRIM = 0x3, // unboxes a wrapper to produce a primitive + OP_PRIM_TO_REF = 0x4, // boxes a primitive into a wrapper (NYI) + OP_SWAP_ARGS = 0x5, // swap arguments (vminfo is 2nd arg) + OP_ROT_ARGS = 0x6, // rotate arguments (vminfo is displaced arg) + OP_DUP_ARGS = 0x7, // duplicates one or more arguments (at TOS) + OP_DROP_ARGS = 0x8, // remove one or more argument slots + OP_COLLECT_ARGS = 0x9, // combine one or more arguments into a varargs (NYI) + OP_SPREAD_ARGS = 0xA, // expand in place a varargs array (of known size) + OP_FLYBY = 0xB, // operate first on reified argument list (NYI) + OP_RICOCHET = 0xC, // run an adapter chain on the return value (NYI) + CONV_OP_LIMIT = 0xD, // limit of CONV_OP enumeration + + CONV_OP_MASK = 0xF00, // this nybble contains the conversion op field + CONV_VMINFO_MASK = 0x0FF, // LSB is reserved for JVM use + CONV_VMINFO_SHIFT = 0, // position of bits in CONV_VMINFO_MASK + CONV_OP_SHIFT = 8, // position of bits in CONV_OP_MASK + CONV_DEST_TYPE_SHIFT = 12, // byte 2 has the adapter BasicType (if needed) + CONV_SRC_TYPE_SHIFT = 16, // byte 2 has the source BasicType (if needed) + CONV_STACK_MOVE_SHIFT = 20, // high 12 bits give signed SP change + CONV_STACK_MOVE_MASK = (1 << (32 - CONV_STACK_MOVE_SHIFT)) - 1 + }; + + static int conversion_offset_in_bytes() { return _conversion_offset; } +}; + + +// Interface to sun.dyn.MemberName objects +// (These are a private interface for Java code to query the class hierarchy.) + +class sun_dyn_MemberName: AllStatic { + friend class JavaClasses; + + private: + // From java.dyn.MemberName: + // private Class clazz; // class in which the method is defined + // private String name; // may be null if not yet materialized + // private Object type; // may be null if not yet materialized + // private int flags; // modifier bits; see reflect.Modifier + // private Object vmtarget; // VM-specific target value + // private int vmindex; // method index within class or interface + static int _clazz_offset; + static int _name_offset; + static int _type_offset; + static int _flags_offset; + static int _vmtarget_offset; + static int _vmindex_offset; + + static void compute_offsets(); + + public: + // Accessors + static oop clazz(oop mname); + static void set_clazz(oop mname, oop clazz); + + static oop type(oop mname); + static void set_type(oop mname, oop type); + + static oop name(oop mname); + static void set_name(oop mname, oop name); + + static int flags(oop mname); + static void set_flags(oop mname, int flags); + + static int modifiers(oop mname) { return (u2) flags(mname); } + static void set_modifiers(oop mname, int mods) + { set_flags(mname, (flags(mname) &~ (u2)-1) | (u2)mods); } + + static oop vmtarget(oop mname); + static void set_vmtarget(oop mname, oop target); + + static int vmindex(oop mname); + static void set_vmindex(oop mname, int index); + + // Testers + static bool is_subclass(klassOop klass) { + return Klass::cast(klass)->is_subclass_of(SystemDictionary::MemberName_klass()); + } + static bool is_instance(oop obj) { + return obj != NULL && is_subclass(obj->klass()); + } + + // Relevant integer codes (keep these in synch. with MethodHandleNatives.Constants): + enum { + MN_IS_METHOD = 0x00010000, // method (not constructor) + MN_IS_CONSTRUCTOR = 0x00020000, // constructor + MN_IS_FIELD = 0x00040000, // field + MN_IS_TYPE = 0x00080000, // nested type + MN_SEARCH_SUPERCLASSES = 0x00100000, // for MHN.getMembers + MN_SEARCH_INTERFACES = 0x00200000, // for MHN.getMembers + VM_INDEX_UNINITIALIZED = -99 + }; + + // Accessors for code generation: + static int clazz_offset_in_bytes() { return _clazz_offset; } + static int type_offset_in_bytes() { return _type_offset; } + static int name_offset_in_bytes() { return _name_offset; } + static int flags_offset_in_bytes() { return _flags_offset; } + static int vmtarget_offset_in_bytes() { return _vmtarget_offset; } + static int vmindex_offset_in_bytes() { return _vmindex_offset; } +}; + + +// Interface to java.dyn.MethodType objects + +class java_dyn_MethodType: AllStatic { + friend class JavaClasses; + + private: + static int _rtype_offset; + static int _ptypes_offset; + static int _form_offset; + + static void compute_offsets(); + + public: + // Accessors + static oop rtype(oop mt); + static objArrayOop ptypes(oop mt); + static oop form(oop mt); + + static oop ptype(oop mt, int index); + + static symbolOop as_signature(oop mt, bool intern_if_not_found, TRAPS); + static void print_signature(oop mt, outputStream* st); + + static bool is_instance(oop obj) { + return obj != NULL && obj->klass() == SystemDictionary::MethodType_klass(); + } + + // Accessors for code generation: + static int rtype_offset_in_bytes() { return _rtype_offset; } + static int ptypes_offset_in_bytes() { return _ptypes_offset; } + static int form_offset_in_bytes() { return _form_offset; } +}; + +class java_dyn_MethodTypeForm: AllStatic { + friend class JavaClasses; + + private: + static int _vmslots_offset; // number of argument slots needed + static int _erasedType_offset; // erasedType = canonical MethodType + + static void compute_offsets(); + + public: + // Accessors + static int vmslots(oop mtform); + static oop erasedType(oop mtform); + + // Accessors for code generation: + static int vmslots_offset_in_bytes() { return _vmslots_offset; } + static int erasedType_offset_in_bytes() { return _erasedType_offset; } +}; + + +// Interface to sun.dyn.CallSiteImpl objects + +class sun_dyn_CallSiteImpl: AllStatic { + friend class JavaClasses; + +private: + static int _type_offset; + static int _target_offset; + static int _vmmethod_offset; + + static void compute_offsets(); + +public: + // Accessors + static oop type(oop site); + + static oop target(oop site); + static void set_target(oop site, oop target); + + static oop vmmethod(oop site); + static void set_vmmethod(oop site, oop ref); + + // Accessors for code generation: + static int target_offset_in_bytes() { return _target_offset; } + static int type_offset_in_bytes() { return _type_offset; } + static int vmmethod_offset_in_bytes() { return _vmmethod_offset; } +}; + + // Interface to java.security.AccessControlContext objects class java_security_AccessControlContext: AllStatic { diff -r 5d4dd2f5f6a1 -r 7a485bc4da16 src/share/vm/classfile/systemDictionary.cpp --- a/src/share/vm/classfile/systemDictionary.cpp Fri Apr 17 15:50:12 2009 +0100 +++ b/src/share/vm/classfile/systemDictionary.cpp Thu May 07 10:30:17 2009 -0700 @@ -31,6 +31,7 @@ Dictionary* SystemDictionary::_shared_dictionary = NULL; LoaderConstraintTable* SystemDictionary::_loader_constraints = NULL; ResolutionErrorTable* SystemDictionary::_resolution_errors = NULL; +SymbolPropertyTable* SystemDictionary::_invoke_method_table = NULL; int SystemDictionary::_number_of_modifications = 0; @@ -966,6 +967,8 @@ instanceKlassHandle k = ClassFileParser(st).parseClassFile(class_name, class_loader, protection_domain, + host_klass, + cp_patches, parsed_name, THREAD); @@ -1691,6 +1694,10 @@ // represent classes we're actively loading. placeholders_do(blk); + // Visit extra methods + if (invoke_method_table() != NULL) + invoke_method_table()->oops_do(blk); + // Loader constraints. We must keep the symbolOop used in the name alive. constraints()->always_strong_classes_do(blk); @@ -1726,6 +1733,10 @@ // Adjust dictionary dictionary()->oops_do(f); + // Visit extra methods + if (invoke_method_table() != NULL) + invoke_method_table()->oops_do(f); + // Partially loaded classes placeholders()->oops_do(f); @@ -1798,6 +1809,8 @@ void SystemDictionary::methods_do(void f(methodOop)) { dictionary()->methods_do(f); + if (invoke_method_table() != NULL) + invoke_method_table()->methods_do(f); } // ---------------------------------------------------------------------------- @@ -1830,6 +1843,7 @@ _number_of_modifications = 0; _loader_constraints = new LoaderConstraintTable(_loader_constraint_size); _resolution_errors = new ResolutionErrorTable(_resolution_error_size); + // _invoke_method_table is allocated lazily in find_method_handle_invoke() // Allocate private object used as system class loader lock _system_loader_lock_obj = oopFactory::new_system_objArray(0, CHECK); @@ -1891,6 +1905,9 @@ wk_klass_name_limits[0] = s; } } + + // move the starting value forward to the limit: + start_id = limit_id; } @@ -1924,6 +1941,27 @@ instanceKlass::cast(WK_KLASS(final_reference_klass))->set_reference_type(REF_FINAL); instanceKlass::cast(WK_KLASS(phantom_reference_klass))->set_reference_type(REF_PHANTOM); + WKID meth_group_start = WK_KLASS_ENUM_NAME(MethodHandle_klass); + WKID meth_group_end = WK_KLASS_ENUM_NAME(WrongMethodTypeException_klass); + initialize_wk_klasses_until(meth_group_start, scan, CHECK); + if (EnableMethodHandles) { + initialize_wk_klasses_through(meth_group_start, scan, CHECK); + } + if (_well_known_klasses[meth_group_start] == NULL) { + // Skip the rest of the method handle classes, if MethodHandle is not loaded. + scan = WKID(meth_group_end+1); + } + WKID indy_group_start = WK_KLASS_ENUM_NAME(Linkage_klass); + WKID indy_group_end = WK_KLASS_ENUM_NAME(Dynamic_klass); + initialize_wk_klasses_until(indy_group_start, scan, CHECK); + if (EnableInvokeDynamic) { + initialize_wk_klasses_through(indy_group_start, scan, CHECK); + } + if (_well_known_klasses[indy_group_start] == NULL) { + // Skip the rest of the dynamic typing classes, if Linkage is not loaded. + scan = WKID(indy_group_end+1); + } + initialize_wk_klasses_until(WKID_LIMIT, scan, CHECK); _box_klasses[T_BOOLEAN] = WK_KLASS(boolean_klass); @@ -2254,6 +2292,161 @@ } +methodOop SystemDictionary::find_method_handle_invoke(symbolHandle signature, + Handle class_loader, + Handle protection_domain, + TRAPS) { + if (!EnableMethodHandles) return NULL; + assert(class_loader.is_null() && protection_domain.is_null(), + "cannot load specialized versions of MethodHandle.invoke"); + if (invoke_method_table() == NULL) { + // create this side table lazily + _invoke_method_table = new SymbolPropertyTable(_invoke_method_size); + } + unsigned int hash = invoke_method_table()->compute_hash(signature); + int index = invoke_method_table()->hash_to_index(hash); + SymbolPropertyEntry* spe = invoke_method_table()->find_entry(index, hash, signature); + if (spe == NULL || spe->property_oop() == NULL) { + // Must create lots of stuff here, but outside of the SystemDictionary lock. + Handle mt = compute_method_handle_type(signature(), + class_loader, protection_domain, + CHECK_NULL); + KlassHandle mh_klass = SystemDictionaryHandles::MethodHandle_klass(); + methodHandle m = methodOopDesc::make_invoke_method(mh_klass, signature, + mt, CHECK_NULL); + // Now grab the lock. We might have to throw away the new method, + // if a racing thread has managed to install one at the same time. + { + MutexLocker ml(SystemDictionary_lock, Thread::current()); + spe = invoke_method_table()->find_entry(index, hash, signature); + if (spe == NULL) + spe = invoke_method_table()->add_entry(index, hash, signature); + if (spe->property_oop() == NULL) + spe->set_property_oop(m()); + } + } + methodOop m = (methodOop) spe->property_oop(); + assert(m->is_method(), ""); + return m; +} + +// Ask Java code to find or construct a java.dyn.MethodType for the given +// signature, as interpreted relative to the given class loader. +// Because of class loader constraints, all method handle usage must be +// consistent with this loader. +Handle SystemDictionary::compute_method_handle_type(symbolHandle signature, + Handle class_loader, + Handle protection_domain, + TRAPS) { + Handle empty; + int npts = ArgumentCount(signature()).size(); + objArrayHandle pts = oopFactory::new_objArray(SystemDictionary::class_klass(), npts, CHECK_(empty)); + int arg = 0; + Handle rt; // the return type from the signature + for (SignatureStream ss(signature()); !ss.is_done(); ss.next()) { + oop mirror; + if (!ss.is_object()) { + mirror = Universe::java_mirror(ss.type()); + } else { + symbolOop name_oop = ss.as_symbol(CHECK_(empty)); + symbolHandle name(THREAD, name_oop); + klassOop klass = resolve_or_fail(name, + class_loader, protection_domain, + true, CHECK_(empty)); + mirror = Klass::cast(klass)->java_mirror(); + } + if (ss.at_return_type()) + rt = Handle(THREAD, mirror); + else + pts->obj_at_put(arg++, mirror); + } + assert(arg == npts, ""); + + // call MethodType java.dyn.MethodType::makeImpl(Class rt, Class[] pts, false, true) + bool varargs = false, trusted = true; + JavaCallArguments args(Handle(THREAD, rt())); + args.push_oop(pts()); + args.push_int(false); + args.push_int(trusted); + JavaValue result(T_OBJECT); + JavaCalls::call_static(&result, + SystemDictionary::MethodType_klass(), + vmSymbols::makeImpl_name(), vmSymbols::makeImpl_signature(), + &args, CHECK_(empty)); + return Handle(THREAD, (oop) result.get_jobject()); +} + + +// Ask Java code to find or construct a java.dyn.CallSite for the given +// name and signature, as interpreted relative to the given class loader. +Handle SystemDictionary::make_dynamic_call_site(KlassHandle caller, + int caller_method_idnum, + int caller_bci, + symbolHandle name, + methodHandle mh_invdyn, + TRAPS) { + Handle empty; + // call sun.dyn.CallSiteImpl::makeSite(caller, name, mtype, cmid, cbci) + oop name_str_oop = StringTable::intern(name(), CHECK_(empty)); // not a handle! + JavaCallArguments args(Handle(THREAD, caller->java_mirror())); + args.push_oop(name_str_oop); + args.push_oop(mh_invdyn->method_handle_type()); + args.push_int(caller_method_idnum); + args.push_int(caller_bci); + JavaValue result(T_OBJECT); + JavaCalls::call_static(&result, + SystemDictionary::CallSiteImpl_klass(), + vmSymbols::makeSite_name(), vmSymbols::makeSite_signature(), + &args, CHECK_(empty)); + oop call_site_oop = (oop) result.get_jobject(); + sun_dyn_CallSiteImpl::set_vmmethod(call_site_oop, mh_invdyn()); + if (TraceMethodHandles) { + tty->print_cr("Linked invokedynamic bci=%d site="INTPTR_FORMAT":", caller_bci, call_site_oop); + call_site_oop->print(); + tty->cr(); + } + return call_site_oop; +} + +Handle SystemDictionary::find_bootstrap_method(KlassHandle caller, + KlassHandle search_bootstrap_klass, + TRAPS) { + Handle empty; + if (!caller->oop_is_instance()) return empty; + + instanceKlassHandle ik(THREAD, caller()); + + if (ik->bootstrap_method() != NULL) { + return Handle(THREAD, ik->bootstrap_method()); + } + + // call java.dyn.Linkage::findBootstrapMethod(caller, sbk) + JavaCallArguments args(Handle(THREAD, ik->java_mirror())); + if (search_bootstrap_klass.is_null()) + args.push_oop(Handle()); + else + args.push_oop(search_bootstrap_klass->java_mirror()); + JavaValue result(T_OBJECT); + JavaCalls::call_static(&result, + SystemDictionary::Linkage_klass(), + vmSymbols::findBootstrapMethod_name(), + vmSymbols::findBootstrapMethod_signature(), + &args, CHECK_(empty)); + oop boot_method_oop = (oop) result.get_jobject(); + + if (boot_method_oop != NULL) { + // probably no race conditions, but let's be careful: + if (Atomic::cmpxchg_ptr(boot_method_oop, ik->adr_bootstrap_method(), NULL) == NULL) + ik->set_bootstrap_method(boot_method_oop); + else + boot_method_oop = ik->bootstrap_method(); + } else { + boot_method_oop = ik->bootstrap_method(); + } + + return Handle(THREAD, boot_method_oop); +} + // Since the identity hash code for symbols changes when the symbols are // moved from the regular perm gen (hash in the mark word) to the shared // spaces (hash is the address), the classes loaded into the dictionary diff -r 5d4dd2f5f6a1 -r 7a485bc4da16 src/share/vm/classfile/systemDictionary.hpp --- a/src/share/vm/classfile/systemDictionary.hpp Fri Apr 17 15:50:12 2009 +0100 +++ b/src/share/vm/classfile/systemDictionary.hpp Thu May 07 10:30:17 2009 -0700 @@ -63,6 +63,7 @@ class LoaderConstraintTable; class HashtableBucket; class ResolutionErrorTable; +class SymbolPropertyTable; // Certain classes are preloaded, such as java.lang.Object and java.lang.String. // They are all "well-known", in the sense that no class loader is allowed @@ -131,6 +132,22 @@ template(reflect_constant_pool_klass, sun_reflect_ConstantPool, Opt_Only_JDK15) \ template(reflect_unsafe_static_field_accessor_impl_klass, sun_reflect_UnsafeStaticFieldAccessorImpl, Opt_Only_JDK15) \ \ + /* support for dynamic typing; it's OK if these are NULL in earlier JDKs */ \ + template(MethodHandle_klass, java_dyn_MethodHandle, Opt) \ + template(MemberName_klass, sun_dyn_MemberName, Opt) \ + template(MethodHandleImpl_klass, sun_dyn_MethodHandleImpl, Opt) \ + template(AdapterMethodHandle_klass, sun_dyn_AdapterMethodHandle, Opt) \ + template(BoundMethodHandle_klass, sun_dyn_BoundMethodHandle, Opt) \ + template(DirectMethodHandle_klass, sun_dyn_DirectMethodHandle, Opt) \ + template(MethodType_klass, java_dyn_MethodType, Opt) \ + template(MethodTypeForm_klass, java_dyn_MethodTypeForm, Opt) \ + template(WrongMethodTypeException_klass, java_dyn_WrongMethodTypeException, Opt) \ + template(Linkage_klass, java_dyn_Linkage, Opt) \ + template(CallSite_klass, java_dyn_CallSite, Opt) \ + template(CallSiteImpl_klass, sun_dyn_CallSiteImpl, Opt) \ + template(Dynamic_klass, java_dyn_Dynamic, Opt) \ + /* Note: MethodHandle must be first, and Dynamic last in group */ \ + \ template(vector_klass, java_util_Vector, Pre) \ template(hashtable_klass, java_util_Hashtable, Pre) \ template(stringBuffer_klass, java_lang_StringBuffer, Pre) \ @@ -444,6 +461,32 @@ static char* check_signature_loaders(symbolHandle signature, Handle loader1, Handle loader2, bool is_method, TRAPS); + // JSR 292 + // find the java.dyn.MethodHandles::invoke method for a given signature + static methodOop find_method_handle_invoke(symbolHandle signature, + Handle class_loader, + Handle protection_domain, + TRAPS); + // ask Java to compute the java.dyn.MethodType object for a given signature + static Handle compute_method_handle_type(symbolHandle signature, + Handle class_loader, + Handle protection_domain, + TRAPS); + // ask Java to create a dynamic call site, while linking an invokedynamic op + static Handle make_dynamic_call_site(KlassHandle caller, + int caller_method_idnum, + int caller_bci, + symbolHandle name, + methodHandle mh_invoke, + TRAPS); + + // coordinate with Java about bootstrap methods + static Handle find_bootstrap_method(KlassHandle caller, + // This argument is non-null only when a + // classfile attribute has been found: + KlassHandle search_bootstrap_klass, + TRAPS); + // Utility for printing loader "name" as part of tracing constraints static const char* loader_name(oop loader) { return ((loader) == NULL ? "" : @@ -460,6 +503,7 @@ enum Constants { _loader_constraint_size = 107, // number of entries in constraint table _resolution_error_size = 107, // number of entries in resolution error table + _invoke_method_size = 139, // number of entries in invoke method table _nof_buckets = 1009 // number of buckets in hash table }; @@ -489,6 +533,9 @@ // Resolution errors static ResolutionErrorTable* _resolution_errors; + // Invoke methods (JSR 292) + static SymbolPropertyTable* _invoke_method_table; + public: // for VM_CounterDecay iteration support friend class CounterDecay; @@ -506,6 +553,7 @@ static PlaceholderTable* placeholders() { return _placeholders; } static LoaderConstraintTable* constraints() { return _loader_constraints; } static ResolutionErrorTable* resolution_errors() { return _resolution_errors; } + static SymbolPropertyTable* invoke_method_table() { return _invoke_method_table; } // Basic loading operations static klassOop resolve_instance_class_or_null(symbolHandle class_name, Handle class_loader, Handle protection_domain, TRAPS); diff -r 5d4dd2f5f6a1 -r 7a485bc4da16 src/share/vm/classfile/verifier.cpp --- a/src/share/vm/classfile/verifier.cpp Fri Apr 17 15:50:12 2009 +0100 +++ b/src/share/vm/classfile/verifier.cpp Thu May 07 10:30:17 2009 -0700 @@ -1174,6 +1174,7 @@ &this_uninit, return_type, cp, CHECK_VERIFY(this)); no_control_flow = false; break; case Bytecodes::_invokeinterface : + case Bytecodes::_invokedynamic : verify_invoke_instructions( &bcs, code_length, ¤t_frame, &this_uninit, return_type, cp, CHECK_VERIFY(this)); @@ -1895,12 +1896,23 @@ Bytecodes::Code opcode = bcs->code(); unsigned int types = (opcode == Bytecodes::_invokeinterface ? 1 << JVM_CONSTANT_InterfaceMethodref + : opcode == Bytecodes::_invokedynamic + ? 1 << JVM_CONSTANT_NameAndType : 1 << JVM_CONSTANT_Methodref); verify_cp_type(index, cp, types, CHECK_VERIFY(this)); // Get method name and signature - symbolHandle method_name(THREAD, cp->name_ref_at(index)); - symbolHandle method_sig(THREAD, cp->signature_ref_at(index)); + symbolHandle method_name; + symbolHandle method_sig; + if (opcode == Bytecodes::_invokedynamic) { + int name_index = cp->name_ref_index_at(index); + int sig_index = cp->signature_ref_index_at(index); + method_name = symbolHandle(THREAD, cp->symbol_at(name_index)); + method_sig = symbolHandle(THREAD, cp->symbol_at(sig_index)); + } else { + method_name = symbolHandle(THREAD, cp->name_ref_at(index)); + method_sig = symbolHandle(THREAD, cp->signature_ref_at(index)); + } if (!SignatureVerifier::is_valid_method_signature(method_sig)) { class_format_error( @@ -1910,8 +1922,17 @@ } // Get referenced class type - VerificationType ref_class_type = cp_ref_index_to_type( - index, cp, CHECK_VERIFY(this)); + VerificationType ref_class_type; + if (opcode == Bytecodes::_invokedynamic) { + if (!EnableInvokeDynamic) { + class_format_error( + "invokedynamic instructions not enabled on this JVM", + _klass->external_name()); + return; + } + } else { + ref_class_type = cp_ref_index_to_type(index, cp, CHECK_VERIFY(this)); + } // For a small signature length, we just allocate 128 bytes instead // of parsing the signature once to find its size. @@ -1970,6 +1991,14 @@ } } + if (opcode == Bytecodes::_invokedynamic) { + address bcp = bcs->bcp(); + if (*(bcp+3) != 0 || *(bcp+4) != 0) { + verify_error(bci, "Third and fourth operand bytes of invokedynamic must be zero"); + return; + } + } + if (method_name->byte_at(0) == '<') { // Make sure can only be invoked by invokespecial if (opcode != Bytecodes::_invokespecial || @@ -1994,7 +2023,8 @@ current_frame->pop_stack(sig_types[i], CHECK_VERIFY(this)); } // Check objectref on operand stack - if (opcode != Bytecodes::_invokestatic) { + if (opcode != Bytecodes::_invokestatic && + opcode != Bytecodes::_invokedynamic) { if (method_name() == vmSymbols::object_initializer_name()) { // method verify_invoke_init(bcs, ref_class_type, current_frame, code_length, this_uninit, cp, CHECK_VERIFY(this)); diff -r 5d4dd2f5f6a1 -r 7a485bc4da16 src/share/vm/classfile/vmSymbols.hpp --- a/src/share/vm/classfile/vmSymbols.hpp Fri Apr 17 15:50:12 2009 +0100 +++ b/src/share/vm/classfile/vmSymbols.hpp Thu May 07 10:30:17 2009 -0700 @@ -216,7 +216,34 @@ template(sun_reflect_UnsafeStaticFieldAccessorImpl, "sun/reflect/UnsafeStaticFieldAccessorImpl")\ template(base_name, "base") \ \ - /* common method names */ \ + /* Support for JSR 292 & invokedynamic (JDK 1.7 and above) */ \ + template(java_dyn_Dynamic, "java/dyn/Dynamic") \ + template(java_dyn_Linkage, "java/dyn/Linkage") \ + template(java_dyn_CallSite, "java/dyn/CallSite") \ + template(java_dyn_MethodHandle, "java/dyn/MethodHandle") \ + template(java_dyn_MethodType, "java/dyn/MethodType") \ + template(java_dyn_WrongMethodTypeException, "java/dyn/WrongMethodTypeException") \ + template(java_dyn_MethodType_signature, "Ljava/dyn/MethodType;") \ + template(java_dyn_MethodHandle_signature, "Ljava/dyn/MethodHandle;") \ + /* internal classes known only to the JVM: */ \ + template(java_dyn_MethodTypeForm, "java/dyn/MethodTypeForm") \ + template(java_dyn_MethodTypeForm_signature, "Ljava/dyn/MethodTypeForm;") \ + template(sun_dyn_MemberName, "sun/dyn/MemberName") \ + template(sun_dyn_MethodHandleImpl, "sun/dyn/MethodHandleImpl") \ + template(sun_dyn_AdapterMethodHandle, "sun/dyn/AdapterMethodHandle") \ + template(sun_dyn_BoundMethodHandle, "sun/dyn/BoundMethodHandle") \ + template(sun_dyn_DirectMethodHandle, "sun/dyn/DirectMethodHandle") \ + template(sun_dyn_CallSiteImpl, "sun/dyn/CallSiteImpl") \ + template(makeImpl_name, "makeImpl") /*MethodType::makeImpl*/ \ + template(makeImpl_signature, "(Ljava/lang/Class;[Ljava/lang/Class;ZZ)Ljava/dyn/MethodType;") \ + template(makeSite_name, "makeSite") /*CallSiteImpl::makeImpl*/ \ + template(makeSite_signature, "(Ljava/lang/Class;Ljava/lang/String;Ljava/dyn/MethodType;II)Ljava/dyn/CallSite;") \ + template(findBootstrapMethod_name, "findBootstrapMethod") \ + template(findBootstrapMethod_signature, "(Ljava/lang/Class;Ljava/lang/Class;)Ljava/dyn/MethodHandle;") \ + NOT_LP64( do_alias(machine_word_signature, int_signature) ) \ + LP64_ONLY( do_alias(machine_word_signature, long_signature) ) \ + \ + /* common method and field names */ \ template(object_initializer_name, "") \ template(class_initializer_name, "") \ template(println_name, "println") \ @@ -289,6 +316,23 @@ template(bitCount_name, "bitCount") \ template(profile_name, "profile") \ template(equals_name, "equals") \ + template(target_name, "target") \ + template(toString_name, "toString") \ + template(values_name, "values") \ + template(receiver_name, "receiver") \ + template(vmmethod_name, "vmmethod") \ + template(vmtarget_name, "vmtarget") \ + template(vmentry_name, "vmentry") \ + template(vmslots_name, "vmslots") \ + template(vmindex_name, "vmindex") \ + template(vmargslot_name, "vmargslot") \ + template(flags_name, "flags") \ + template(argument_name, "argument") \ + template(conversion_name, "conversion") \ + template(rtype_name, "rtype") \ + template(ptypes_name, "ptypes") \ + template(form_name, "form") \ + template(erasedType_name, "erasedType") \ \ /* non-intrinsic name/signature pairs: */ \ template(register_method_name, "register") \ @@ -353,6 +397,7 @@ template(void_classloader_signature, "()Ljava/lang/ClassLoader;") \ template(void_object_signature, "()Ljava/lang/Object;") \ template(void_class_signature, "()Ljava/lang/Class;") \ + template(void_string_signature, "()Ljava/lang/String;") \ template(object_array_object_object_signature, "(Ljava/lang/Object;[Ljava/lang/Object;)Ljava/lang/Object;")\ template(exception_void_signature, "(Ljava/lang/Exception;)V") \ template(protectiondomain_signature, "[Ljava/security/ProtectionDomain;") \ diff -r 5d4dd2f5f6a1 -r 7a485bc4da16 src/share/vm/code/vtableStubs.cpp --- a/src/share/vm/code/vtableStubs.cpp Fri Apr 17 15:50:12 2009 +0100 +++ b/src/share/vm/code/vtableStubs.cpp Thu May 07 10:30:17 2009 -0700 @@ -107,13 +107,11 @@ s = create_itable_stub(vtable_index); } enter(is_vtable_stub, vtable_index, s); -#ifndef PRODUCT if (PrintAdapterHandlers) { tty->print_cr("Decoding VtableStub %s[%d]@%d", is_vtable_stub? "vtbl": "itbl", vtable_index, VtableStub::receiver_location()); Disassembler::decode(s->code_begin(), s->code_end()); } -#endif } return s->entry_point(); } diff -r 5d4dd2f5f6a1 -r 7a485bc4da16 src/share/vm/includeDB_compiler1 --- a/src/share/vm/includeDB_compiler1 Fri Apr 17 15:50:12 2009 +0100 +++ b/src/share/vm/includeDB_compiler1 Thu May 07 10:30:17 2009 -0700 @@ -270,6 +270,7 @@ c1_LinearScan.cpp bitMap.inline.hpp c1_LinearScan.cpp c1_CFGPrinter.hpp +c1_LinearScan.cpp c1_CodeStubs.hpp c1_LinearScan.cpp c1_Compilation.hpp c1_LinearScan.cpp c1_FrameMap.hpp c1_LinearScan.cpp c1_IR.hpp diff -r 5d4dd2f5f6a1 -r 7a485bc4da16 src/share/vm/includeDB_core --- a/src/share/vm/includeDB_core Fri Apr 17 15:50:12 2009 +0100 +++ b/src/share/vm/includeDB_core Thu May 07 10:30:17 2009 -0700 @@ -254,6 +254,7 @@ assembler_.cpp collectedHeap.inline.hpp assembler_.cpp interfaceSupport.hpp assembler_.cpp interpreter.hpp +assembler_.cpp methodHandles.hpp assembler_.cpp objectMonitor.hpp assembler_.cpp os.hpp assembler_.cpp resourceArea.hpp @@ -1274,6 +1275,7 @@ cpCacheKlass.cpp collectedHeap.hpp cpCacheKlass.cpp constantPoolOop.hpp cpCacheKlass.cpp cpCacheKlass.hpp +cpCacheKlass.cpp genOopClosures.inline.hpp cpCacheKlass.cpp handles.inline.hpp cpCacheKlass.cpp javaClasses.hpp cpCacheKlass.cpp markSweep.inline.hpp @@ -2202,6 +2204,7 @@ interpreter_.cpp jvmtiExport.hpp interpreter_.cpp jvmtiThreadState.hpp interpreter_.cpp methodDataOop.hpp +interpreter_.cpp methodHandles.hpp interpreter_.cpp methodOop.hpp interpreter_.cpp oop.inline.hpp interpreter_.cpp sharedRuntime.hpp @@ -2596,6 +2599,7 @@ linkResolver.cpp instanceKlass.hpp linkResolver.cpp interpreterRuntime.hpp linkResolver.cpp linkResolver.hpp +linkResolver.cpp methodHandles.hpp linkResolver.cpp nativeLookup.hpp linkResolver.cpp objArrayOop.hpp linkResolver.cpp reflection.hpp @@ -2812,6 +2816,25 @@ methodDataOop.hpp orderAccess.hpp methodDataOop.hpp universe.hpp +methodHandles.hpp frame.inline.hpp +methodHandles.hpp globals.hpp +methodHandles.hpp interfaceSupport.hpp +methodHandles.hpp javaClasses.hpp +methodHandles.hpp vmSymbols.hpp + +methodHandles.cpp allocation.inline.hpp +methodHandles.cpp interpreter.hpp +methodHandles.cpp javaCalls.hpp +methodHandles.cpp methodHandles.hpp +methodHandles.cpp oopFactory.hpp +methodHandles.cpp reflection.hpp +methodHandles.cpp signature.hpp +methodHandles.cpp symbolTable.hpp + +methodHandles_.cpp allocation.inline.hpp +methodHandles_.cpp interpreter.hpp +methodHandles_.cpp methodHandles.hpp + methodKlass.cpp collectedHeap.inline.hpp methodKlass.cpp constMethodKlass.hpp methodKlass.cpp gcLocker.hpp @@ -3061,6 +3084,7 @@ oop.inline.hpp arrayOop.hpp oop.inline.hpp atomic.hpp oop.inline.hpp barrierSet.inline.hpp +oop.inline.hpp bytes_.hpp oop.inline.hpp cardTableModRefBS.hpp oop.inline.hpp collectedHeap.inline.hpp oop.inline.hpp compactingPermGenGen.hpp @@ -3674,6 +3698,7 @@ sharedRuntime.cpp interpreter.hpp sharedRuntime.cpp javaCalls.hpp sharedRuntime.cpp jvmtiExport.hpp +sharedRuntime.cpp methodHandles.hpp sharedRuntime.cpp jvmtiRedefineClassesTrace.hpp sharedRuntime.cpp nativeInst_.hpp sharedRuntime.cpp nativeLookup.hpp @@ -3862,6 +3887,7 @@ stubGenerator_.cpp handles.inline.hpp stubGenerator_.cpp instanceOop.hpp stubGenerator_.cpp interpreter.hpp +stubGenerator_.cpp methodHandles.hpp stubGenerator_.cpp methodOop.hpp stubGenerator_.cpp nativeInst_.hpp stubGenerator_.cpp objArrayKlass.hpp @@ -4076,6 +4102,7 @@ templateTable_.cpp interpreterRuntime.hpp templateTable_.cpp interpreter.hpp templateTable_.cpp methodDataOop.hpp +templateTable_.cpp methodHandles.hpp templateTable_.cpp objArrayKlass.hpp templateTable_.cpp oop.inline.hpp templateTable_.cpp sharedRuntime.hpp diff -r 5d4dd2f5f6a1 -r 7a485bc4da16 src/share/vm/includeDB_gc_parallel --- a/src/share/vm/includeDB_gc_parallel Fri Apr 17 15:50:12 2009 +0100 +++ b/src/share/vm/includeDB_gc_parallel Thu May 07 10:30:17 2009 -0700 @@ -36,6 +36,18 @@ constantPoolKlass.cpp psScavenge.inline.hpp constantPoolKlass.cpp parOopClosures.inline.hpp +constantPoolKlass.cpp cardTableRS.hpp +constantPoolKlass.cpp oop.pcgc.inline.hpp +constantPoolKlass.cpp psPromotionManager.inline.hpp +constantPoolKlass.cpp psScavenge.inline.hpp +constantPoolKlass.cpp parOopClosures.inline.hpp + +cpCacheKlass.cpp cardTableRS.hpp +cpCacheKlass.cpp oop.pcgc.inline.hpp +cpCacheKlass.cpp psPromotionManager.inline.hpp +cpCacheKlass.cpp psScavenge.inline.hpp +cpCacheKlass.cpp parOopClosures.inline.hpp + genCollectedHeap.cpp concurrentMarkSweepThread.hpp genCollectedHeap.cpp vmCMSOperations.hpp diff -r 5d4dd2f5f6a1 -r 7a485bc4da16 src/share/vm/includeDB_jvmti --- a/src/share/vm/includeDB_jvmti Fri Apr 17 15:50:12 2009 +0100 +++ b/src/share/vm/includeDB_jvmti Thu May 07 10:30:17 2009 -0700 @@ -28,6 +28,7 @@ jvmtiClassFileReconstituter.cpp bytes_.hpp jvmtiClassFileReconstituter.cpp jvmtiClassFileReconstituter.hpp jvmtiClassFileReconstituter.cpp symbolTable.hpp +jvmtiClassFileReconstituter.cpp signature.hpp jvmtiClassFileReconstituter.hpp jvmtiEnv.hpp diff -r 5d4dd2f5f6a1 -r 7a485bc4da16 src/share/vm/interpreter/abstractInterpreter.hpp --- a/src/share/vm/interpreter/abstractInterpreter.hpp Fri Apr 17 15:50:12 2009 +0100 +++ b/src/share/vm/interpreter/abstractInterpreter.hpp Thu May 07 10:30:17 2009 -0700 @@ -1,5 +1,5 @@ /* - * Copyright 1997-2007 Sun Microsystems, Inc. All Rights Reserved. + * Copyright 1997-2009 Sun Microsystems, Inc. All Rights Reserved. * DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER. * * This code is free software; you can redistribute it and/or modify it @@ -61,6 +61,7 @@ empty, // empty method (code: _return) accessor, // accessor method (code: _aload_0, _getfield, _(a|i)return) abstract, // abstract method (throws an AbstractMethodException) + method_handle, // java.dyn.MethodHandles::invoke java_lang_math_sin, // implementation of java.lang.Math.sin (x) java_lang_math_cos, // implementation of java.lang.Math.cos (x) java_lang_math_tan, // implementation of java.lang.Math.tan (x) @@ -91,8 +92,6 @@ static address _rethrow_exception_entry; // rethrows an activation in previous frame - - friend class AbstractInterpreterGenerator; friend class InterpreterGenerator; friend class InterpreterMacroAssembler; @@ -218,6 +217,73 @@ stackElementSize()) + tag_offset_in_bytes(); } + // access to stacked values according to type: + static oop* oop_addr_in_slot(intptr_t* slot_addr) { + return (oop*) slot_addr; + } + static jint* int_addr_in_slot(intptr_t* slot_addr) { + if ((int) sizeof(jint) < wordSize && !Bytes::is_Java_byte_ordering_different()) + // big-endian LP64 + return (jint*)(slot_addr + 1) - 1; + else + return (jint*) slot_addr; + } + static jlong long_in_slot(intptr_t* slot_addr) { + if (sizeof(intptr_t) >= sizeof(jlong)) { + return *(jlong*) slot_addr; + } else if (!TaggedStackInterpreter) { + return Bytes::get_native_u8((address)slot_addr); + } else { + assert(sizeof(intptr_t) * 2 == sizeof(jlong), "ILP32"); + // assemble the long in memory order (not arithmetic order) + union { jlong j; jint i[2]; } u; + u.i[0] = (jint) slot_addr[0*stackElementSize()]; + u.i[1] = (jint) slot_addr[1*stackElementSize()]; + return u.j; + } + } + static void set_long_in_slot(intptr_t* slot_addr, jlong value) { + if (sizeof(intptr_t) >= sizeof(jlong)) { + *(jlong*) slot_addr = value; + } else if (!TaggedStackInterpreter) { + Bytes::put_native_u8((address)slot_addr, value); + } else { + assert(sizeof(intptr_t) * 2 == sizeof(jlong), "ILP32"); + // assemble the long in memory order (not arithmetic order) + union { jlong j; jint i[2]; } u; + u.j = value; + slot_addr[0*stackElementSize()] = (intptr_t) u.i[0]; + slot_addr[1*stackElementSize()] = (intptr_t) u.i[1]; + } + } + static void get_jvalue_in_slot(intptr_t* slot_addr, BasicType type, jvalue* value) { + switch (type) { + case T_BOOLEAN: value->z = *int_addr_in_slot(slot_addr); break; + case T_CHAR: value->c = *int_addr_in_slot(slot_addr); break; + case T_BYTE: value->b = *int_addr_in_slot(slot_addr); break; + case T_SHORT: value->s = *int_addr_in_slot(slot_addr); break; + case T_INT: value->i = *int_addr_in_slot(slot_addr); break; + case T_LONG: value->j = long_in_slot(slot_addr); break; + case T_FLOAT: value->f = *(jfloat*)int_addr_in_slot(slot_addr); break; + case T_DOUBLE: value->d = jdouble_cast(long_in_slot(slot_addr)); break; + case T_OBJECT: value->l = (jobject)*oop_addr_in_slot(slot_addr); break; + default: ShouldNotReachHere(); + } + } + static void set_jvalue_in_slot(intptr_t* slot_addr, BasicType type, jvalue* value) { + switch (type) { + case T_BOOLEAN: *int_addr_in_slot(slot_addr) = (value->z != 0); break; + case T_CHAR: *int_addr_in_slot(slot_addr) = value->c; break; + case T_BYTE: *int_addr_in_slot(slot_addr) = value->b; break; + case T_SHORT: *int_addr_in_slot(slot_addr) = value->s; break; + case T_INT: *int_addr_in_slot(slot_addr) = value->i; break; + case T_LONG: set_long_in_slot(slot_addr, value->j); break; + case T_FLOAT: *(jfloat*)int_addr_in_slot(slot_addr) = value->f; break; + case T_DOUBLE: set_long_in_slot(slot_addr, jlong_cast(value->d)); break; + case T_OBJECT: *oop_addr_in_slot(slot_addr) = (oop) value->l; break; + default: ShouldNotReachHere(); + } + } }; //------------------------------------------------------------------------------------------------------------------------ diff -r 5d4dd2f5f6a1 -r 7a485bc4da16 src/share/vm/interpreter/bytecode.cpp --- a/src/share/vm/interpreter/bytecode.cpp Fri Apr 17 15:50:12 2009 +0100 +++ b/src/share/vm/interpreter/bytecode.cpp Thu May 07 10:30:17 2009 -0700 @@ -34,12 +34,6 @@ } -void Bytecode::set_fast_index(int i) { - assert(0 <= i && i < 0x10000, "illegal index value"); - Bytes::put_native_u2(addr_at(1), (jushort)i); -} - - bool Bytecode::check_must_rewrite() const { assert(Bytecodes::can_rewrite(code()), "post-check only"); @@ -118,7 +112,12 @@ int Bytecode_invoke::index() const { - return Bytes::get_Java_u2(bcp() + 1); + // Note: Rewriter::rewrite changes the Java_u2 of an invokedynamic to a native_u4, + // at the same time it allocates per-call-site CP cache entries. + if (has_giant_index()) + return Bytes::get_native_u4(bcp() + 1); + else + return Bytes::get_Java_u2(bcp() + 1); } diff -r 5d4dd2f5f6a1 -r 7a485bc4da16 src/share/vm/interpreter/bytecode.hpp --- a/src/share/vm/interpreter/bytecode.hpp Fri Apr 17 15:50:12 2009 +0100 +++ b/src/share/vm/interpreter/bytecode.hpp Thu May 07 10:30:17 2009 -0700 @@ -65,14 +65,6 @@ // The base class for different kinds of bytecode abstractions. // Provides the primitive operations to manipulate code relative // to an objects 'this' pointer. -// -// Note: Even though it seems that the fast_index & set_fast_index -// functions are machine specific, they're not. They only use -// the natural way to store a 16bit index on a given machine, -// independent of the particular byte ordering. Since all other -// places in the system that refer to these indices use the -// same method (the natural byte ordering on the platform) -// this will always work and be machine-independent). class Bytecode: public ThisRelativeObj { protected: @@ -83,24 +75,40 @@ // Attributes address bcp() const { return addr_at(0); } address next_bcp() const { return addr_at(0) + Bytecodes::length_at(bcp()); } + int instruction_size() const { return Bytecodes::length_at(bcp()); } Bytecodes::Code code() const { return Bytecodes::code_at(addr_at(0)); } Bytecodes::Code java_code() const { return Bytecodes::java_code(code()); } bool must_rewrite() const { return Bytecodes::can_rewrite(code()) && check_must_rewrite(); } bool is_active_breakpoint() const { return Bytecodes::is_active_breakpoint_at(bcp()); } - int one_byte_index() const { return byte_at(1); } - int two_byte_index() const { return (byte_at(1) << 8) + byte_at(2); } + int one_byte_index() const { assert_index_size(1); return byte_at(1); } + int two_byte_index() const { assert_index_size(2); return (byte_at(1) << 8) + byte_at(2); } + int offset() const { return (two_byte_index() << 16) >> 16; } address destination() const { return bcp() + offset(); } - int fast_index() const { return Bytes::get_native_u2(addr_at(1)); } // Attribute modification void set_code(Bytecodes::Code code); - void set_fast_index(int i); // Creation inline friend Bytecode* Bytecode_at(address bcp); + + private: + void assert_index_size(int required_size) const { +#ifdef ASSERT + int isize = instruction_size() - 1; + if (isize == 2 && code() == Bytecodes::_iinc) + isize = 1; + else if (isize <= 2) + ; // no change + else if (code() == Bytecodes::_invokedynamic) + isize = 4; + else + isize = 2; + assert(isize = required_size, "wrong index size"); +#endif + } }; inline Bytecode* Bytecode_at(address bcp) { @@ -195,6 +203,9 @@ bool is_invokevirtual() const { return adjusted_invoke_code() == Bytecodes::_invokevirtual; } bool is_invokestatic() const { return adjusted_invoke_code() == Bytecodes::_invokestatic; } bool is_invokespecial() const { return adjusted_invoke_code() == Bytecodes::_invokespecial; } + bool is_invokedynamic() const { return adjusted_invoke_code() == Bytecodes::_invokedynamic; } + + bool has_giant_index() const { return is_invokedynamic(); } bool is_valid() const { return is_invokeinterface() || is_invokevirtual() || diff -r 5d4dd2f5f6a1 -r 7a485bc4da16 src/share/vm/interpreter/bytecodeStream.hpp --- a/src/share/vm/interpreter/bytecodeStream.hpp Fri Apr 17 15:50:12 2009 +0100 +++ b/src/share/vm/interpreter/bytecodeStream.hpp Thu May 07 10:30:17 2009 -0700 @@ -109,6 +109,7 @@ Bytecodes::Code code() const { return _code; } bool is_wide() const { return _is_wide; } + int instruction_size() const { return (_next_bci - _bci); } bool is_last_bytecode() const { return _next_bci >= _end_bci; } address bcp() const { return method()->code_base() + _bci; } @@ -122,8 +123,29 @@ int dest_w() const { return bci() + (int )Bytes::get_Java_u4(bcp() + 1); } // Unsigned indices, widening - int get_index() const { return (is_wide()) ? Bytes::get_Java_u2(bcp() + 2) : bcp()[1]; } - int get_index_big() const { return (int)Bytes::get_Java_u2(bcp() + 1); } + int get_index() const { assert_index_size(is_wide() ? 2 : 1); + return (is_wide()) ? Bytes::get_Java_u2(bcp() + 2) : bcp()[1]; } + int get_index_big() const { assert_index_size(2); + return (int)Bytes::get_Java_u2(bcp() + 1); } + int get_index_int() const { return has_giant_index() ? get_index_giant() : get_index_big(); } + int get_index_giant() const { assert_index_size(4); return Bytes::get_native_u4(bcp() + 1); } + int has_giant_index() const { return (code() == Bytecodes::_invokedynamic); } + + private: + void assert_index_size(int required_size) const { +#ifdef ASSERT + int isize = instruction_size() - (int)_is_wide - 1; + if (isize == 2 && code() == Bytecodes::_iinc) + isize = 1; + else if (isize <= 2) + ; // no change + else if (has_giant_index()) + isize = 4; + else + isize = 2; + assert(isize = required_size, "wrong index size"); +#endif + } }; // In BytecodeStream, non-java bytecodes will be translated into the diff -r 5d4dd2f5f6a1 -r 7a485bc4da16 src/share/vm/interpreter/bytecodeTracer.cpp --- a/src/share/vm/interpreter/bytecodeTracer.cpp Fri Apr 17 15:50:12 2009 +0100 +++ b/src/share/vm/interpreter/bytecodeTracer.cpp Thu May 07 10:30:17 2009 -0700 @@ -48,12 +48,15 @@ int get_index() { return *(address)_next_pc++; } int get_big_index() { int i=Bytes::get_Java_u2(_next_pc); _next_pc+=2; return i; } + int get_giant_index() { int i=Bytes::get_native_u4(_next_pc); _next_pc+=4; return i; } int get_index_special() { return (is_wide()) ? get_big_index() : get_index(); } methodOop method() { return _current_method; } bool is_wide() { return _is_wide; } + bool check_index(int i, bool in_cp_cache, int& cp_index, outputStream* st = tty); void print_constant(int i, outputStream* st = tty); + void print_field_or_method(int i, outputStream* st = tty); void print_attributes(Bytecodes::Code code, int bci, outputStream* st = tty); void bytecode_epilog(int bci, outputStream* st = tty); @@ -182,7 +185,71 @@ } } +bool BytecodePrinter::check_index(int i, bool in_cp_cache, int& cp_index, outputStream* st) { + constantPoolOop constants = method()->constants(); + int ilimit = constants->length(), climit = 0; + + constantPoolCacheOop cache = NULL; + if (in_cp_cache) { + cache = constants->cache(); + if (cache != NULL) { + //climit = cache->length(); // %%% private! + size_t size = cache->size() * HeapWordSize; + size -= sizeof(constantPoolCacheOopDesc); + size /= sizeof(ConstantPoolCacheEntry); + climit = (int) size; + } + } + + if (in_cp_cache && constantPoolCacheOopDesc::is_secondary_index(i)) { + i = constantPoolCacheOopDesc::decode_secondary_index(i); + st->print(" secondary cache[%d] of", i); + if (i >= 0 && i < climit) { + if (!cache->entry_at(i)->is_secondary_entry()) { + st->print_cr(" not secondary entry?", i); + return false; + } + i = cache->entry_at(i)->main_entry_index(); + goto check_cache_index; + } else { + st->print_cr(" not in cache[*]?", i); + return false; + } + } + + if (cache != NULL) { + i = Bytes::swap_u2(i); + if (WizardMode) st->print(" (swap=%d)", i); + goto check_cache_index; + } + + check_cp_index: + if (i >= 0 && i < ilimit) { + if (WizardMode) st->print(" cp[%d]", i); + cp_index = i; + return true; + } + + st->print_cr(" CP[%d] not in CP", i); + return false; + + check_cache_index: + if (i >= 0 && i < climit) { + if (cache->entry_at(i)->is_secondary_entry()) { + st->print_cr(" secondary entry?"); + return false; + } + i = cache->entry_at(i)->constant_pool_index(); + goto check_cp_index; + } + st->print_cr(" not in CP[*]?", i); + return false; +} + void BytecodePrinter::print_constant(int i, outputStream* st) { + int orig_i = i; + if (!check_index(orig_i, false, i, st)) return; + constantPoolOop constants = method()->constants(); constantTag tag = constants->tag_at(i); @@ -203,7 +270,31 @@ st->print_cr(" %s", constants->resolved_klass_at(i)->klass_part()->external_name()); } else if (tag.is_unresolved_klass()) { st->print_cr(" ", i); - } else ShouldNotReachHere(); + } else { + st->print_cr(" bad tag=%d at %d", tag.value(), i); + } +} + +void BytecodePrinter::print_field_or_method(int i, outputStream* st) { + int orig_i = i; + if (!check_index(orig_i, true, i, st)) return; + + constantPoolOop constants = method()->constants(); + constantTag tag = constants->tag_at(i); + + switch (tag.value()) { + case JVM_CONSTANT_InterfaceMethodref: + case JVM_CONSTANT_Methodref: + case JVM_CONSTANT_Fieldref: + break; + default: + st->print_cr(" bad tag=%d at %d", tag.value(), i); + return; + } + + symbolOop name = constants->name_ref_at(orig_i); + symbolOop signature = constants->signature_ref_at(orig_i); + st->print_cr(" %d <%s> <%s> ", i, name->as_C_string(), signature->as_C_string()); } @@ -354,36 +445,28 @@ case Bytecodes::_putstatic: case Bytecodes::_getstatic: case Bytecodes::_putfield: - case Bytecodes::_getfield: { - int i = get_big_index(); - constantPoolOop constants = method()->constants(); - symbolOop field = constants->name_ref_at(i); - st->print_cr(" %d <%s>", i, field->as_C_string()); - } + case Bytecodes::_getfield: + print_field_or_method(get_big_index(), st); break; case Bytecodes::_invokevirtual: case Bytecodes::_invokespecial: case Bytecodes::_invokestatic: - { int i = get_big_index(); - constantPoolOop constants = method()->constants(); - symbolOop name = constants->name_ref_at(i); - symbolOop signature = constants->signature_ref_at(i); - st->print_cr(" %d <%s> <%s> ", i, name->as_C_string(), signature->as_C_string()); - } + print_field_or_method(get_big_index(), st); break; case Bytecodes::_invokeinterface: { int i = get_big_index(); int n = get_index(); - get_index(); - constantPoolOop constants = method()->constants(); - symbolOop name = constants->name_ref_at(i); - symbolOop signature = constants->signature_ref_at(i); - st->print_cr(" %d <%s> <%s> %d", i, name->as_C_string(), signature->as_C_string(), n); + get_index(); // ignore zero byte + print_field_or_method(i, st); } break; + case Bytecodes::_invokedynamic: + print_field_or_method(get_giant_index(), st); + break; + case Bytecodes::_new: case Bytecodes::_checkcast: case Bytecodes::_instanceof: diff -r 5d4dd2f5f6a1 -r 7a485bc4da16 src/share/vm/interpreter/bytecodes.cpp --- a/src/share/vm/interpreter/bytecodes.cpp Fri Apr 17 15:50:12 2009 +0100 +++ b/src/share/vm/interpreter/bytecodes.cpp Thu May 07 10:30:17 2009 -0700 @@ -357,7 +357,7 @@ def(_invokespecial , "invokespecial" , "bjj" , NULL , T_ILLEGAL, -1, true); def(_invokestatic , "invokestatic" , "bjj" , NULL , T_ILLEGAL, 0, true); def(_invokeinterface , "invokeinterface" , "bjj__", NULL , T_ILLEGAL, -1, true); - def(_xxxunusedxxx , "xxxunusedxxx" , NULL , NULL , T_VOID , 0, false); + def(_invokedynamic , "invokedynamic" , "bjjjj", NULL , T_ILLEGAL, -1, true ); def(_new , "new" , "bii" , NULL , T_OBJECT , 1, true ); def(_newarray , "newarray" , "bc" , NULL , T_OBJECT , 0, true ); def(_anewarray , "anewarray" , "bii" , NULL , T_OBJECT , 0, true ); diff -r 5d4dd2f5f6a1 -r 7a485bc4da16 src/share/vm/interpreter/bytecodes.hpp --- a/src/share/vm/interpreter/bytecodes.hpp Fri Apr 17 15:50:12 2009 +0100 +++ b/src/share/vm/interpreter/bytecodes.hpp Thu May 07 10:30:17 2009 -0700 @@ -218,7 +218,7 @@ _invokespecial = 183, // 0xb7 _invokestatic = 184, // 0xb8 _invokeinterface = 185, // 0xb9 - _xxxunusedxxx = 186, // 0xba + _invokedynamic = 186, // 0xba // if EnableInvokeDynamic _new = 187, // 0xbb _newarray = 188, // 0xbc _anewarray = 189, // 0xbd diff -r 5d4dd2f5f6a1 -r 7a485bc4da16 src/share/vm/interpreter/cppInterpreter.cpp --- a/src/share/vm/interpreter/cppInterpreter.cpp Fri Apr 17 15:50:12 2009 +0100 +++ b/src/share/vm/interpreter/cppInterpreter.cpp Thu May 07 10:30:17 2009 -0700 @@ -1,5 +1,5 @@ /* - * Copyright 1997-2007 Sun Microsystems, Inc. All Rights Reserved. + * Copyright 1997-2009 Sun Microsystems, Inc. All Rights Reserved. * DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER. * * This code is free software; you can redistribute it and/or modify it @@ -114,6 +114,7 @@ method_entry(empty); method_entry(accessor); method_entry(abstract); + method_entry(method_handle); method_entry(java_lang_math_sin ); method_entry(java_lang_math_cos ); method_entry(java_lang_math_tan ); diff -r 5d4dd2f5f6a1 -r 7a485bc4da16 src/share/vm/interpreter/interpreter.cpp --- a/src/share/vm/interpreter/interpreter.cpp Fri Apr 17 15:50:12 2009 +0100 +++ b/src/share/vm/interpreter/interpreter.cpp Thu May 07 10:30:17 2009 -0700 @@ -1,5 +1,5 @@ /* - * Copyright 1997-2007 Sun Microsystems, Inc. All Rights Reserved. + * Copyright 1997-2009 Sun Microsystems, Inc. All Rights Reserved. * DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER. * * This code is free software; you can redistribute it and/or modify it @@ -168,10 +168,14 @@ // Abstract method? if (m->is_abstract()) return abstract; + // Invoker for method handles? + if (m->is_method_handle_invoke()) return method_handle; + // Native method? // Note: This test must come _before_ the test for intrinsic // methods. See also comments below. if (m->is_native()) { + assert(!m->is_method_handle_invoke(), "overlapping bits here, watch out"); return m->is_synchronized() ? native_synchronized : native; } @@ -249,6 +253,7 @@ case empty : tty->print("empty" ); break; case accessor : tty->print("accessor" ); break; case abstract : tty->print("abstract" ); break; + case method_handle : tty->print("method_handle" ); break; case java_lang_math_sin : tty->print("java_lang_math_sin" ); break; case java_lang_math_cos : tty->print("java_lang_math_cos" ); break; case java_lang_math_tan : tty->print("java_lang_math_tan" ); break; diff -r 5d4dd2f5f6a1 -r 7a485bc4da16 src/share/vm/interpreter/interpreterRuntime.cpp --- a/src/share/vm/interpreter/interpreterRuntime.cpp Fri Apr 17 15:50:12 2009 +0100 +++ b/src/share/vm/interpreter/interpreterRuntime.cpp Thu May 07 10:30:17 2009 -0700 @@ -1,5 +1,5 @@ /* - * Copyright 1997-2007 Sun Microsystems, Inc. All Rights Reserved. + * Copyright 1997-2009 Sun Microsystems, Inc. All Rights Reserved. * DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER. * * This code is free software; you can redistribute it and/or modify it @@ -293,6 +293,24 @@ THROW_MSG(vmSymbols::java_lang_ClassCastException(), message); IRT_END +// required can be either a MethodType, or a Class (for a single argument) +// actual (if not null) can be either a MethodHandle, or an arbitrary value (for a single argument) +IRT_ENTRY(void, InterpreterRuntime::throw_WrongMethodTypeException(JavaThread* thread, + oopDesc* required, + oopDesc* actual)) { + ResourceMark rm(thread); + char* message = SharedRuntime::generate_wrong_method_type_message(thread, required, actual); + + if (ProfileTraps) { + note_trap(thread, Deoptimization::Reason_constraint, CHECK); + } + + // create exception + THROW_MSG(vmSymbols::java_dyn_WrongMethodTypeException(), message); +} +IRT_END + + // exception_handler_for_exception(...) returns the continuation address, // the exception oop (via TLS) and sets the bci/bcp for the continuation. @@ -663,6 +681,133 @@ IRT_END +// First time execution: Resolve symbols, create a permanent CallSiteImpl object. +IRT_ENTRY(void, InterpreterRuntime::resolve_invokedynamic(JavaThread* thread)) { + ResourceMark rm(thread); + + assert(EnableInvokeDynamic, ""); + + const Bytecodes::Code bytecode = Bytecodes::_invokedynamic; + + methodHandle caller_method(thread, method(thread)); + + // first determine if there is a bootstrap method + { + KlassHandle caller_klass(thread, caller_method->method_holder()); + Handle bootm = SystemDictionary::find_bootstrap_method(caller_klass, KlassHandle(), CHECK); + if (bootm.is_null()) { + // If there is no bootstrap method, throw IncompatibleClassChangeError. + // This is a valid generic error type for resolution (JLS 12.3.3). + char buf[200]; + jio_snprintf(buf, sizeof(buf), "Class %s has not declared a bootstrap method for invokedynamic", + (Klass::cast(caller_klass()))->external_name()); + THROW_MSG(vmSymbols::java_lang_IncompatibleClassChangeError(), buf); + } + } + + constantPoolHandle pool(thread, caller_method->constants()); + pool->set_invokedynamic(); // mark header to flag active call sites + + int raw_index = four_byte_index(thread); + assert(constantPoolCacheOopDesc::is_secondary_index(raw_index), "invokedynamic indexes marked specially"); + + // there are two CPC entries that are of interest: + int site_index = constantPoolCacheOopDesc::decode_secondary_index(raw_index); + int main_index = pool->cache()->entry_at(site_index)->main_entry_index(); + // and there is one CP entry, a NameAndType: + int nt_index = pool->map_instruction_operand_to_index(raw_index); + + // first resolve the signature to a MH.invoke methodOop + if (!pool->cache()->entry_at(main_index)->is_resolved(bytecode)) { + JvmtiHideSingleStepping jhss(thread); + CallInfo info; + LinkResolver::resolve_invoke(info, Handle(), pool, + raw_index, bytecode, CHECK); + // The main entry corresponds to a JVM_CONSTANT_NameAndType, and serves + // as a common reference point for all invokedynamic call sites with + // that exact call descriptor. We will link it in the CP cache exactly + // as if it were an invokevirtual of MethodHandle.invoke. + pool->cache()->entry_at(main_index)->set_method( + bytecode, + info.resolved_method(), + info.vtable_index()); + assert(pool->cache()->entry_at(main_index)->is_vfinal(), "f2 must be a methodOop"); + } + + // The method (f2 entry) of the main entry is the MH.invoke for the + // invokedynamic target call signature. + intptr_t f2_value = pool->cache()->entry_at(main_index)->f2(); + methodHandle mh_invdyn(THREAD, (methodOop) f2_value); + assert(mh_invdyn.not_null() && mh_invdyn->is_method() && mh_invdyn->is_method_handle_invoke(), + "correct result from LinkResolver::resolve_invokedynamic"); + + symbolHandle call_site_name(THREAD, pool->nt_name_ref_at(nt_index)); + Handle call_site + = SystemDictionary::make_dynamic_call_site(caller_method->method_holder(), + caller_method->method_idnum(), + caller_method->bci_from(bcp(thread)), + call_site_name, + mh_invdyn, + CHECK); + + // In the secondary entry, the f1 field is the call site, and the f2 (index) + // field is some data about the invoke site. + int extra_data = 0; + pool->cache()->entry_at(site_index)->set_dynamic_call(call_site(), extra_data); +} +IRT_END + + +// Called on first time execution, and also whenever the CallSite.target is null. +// FIXME: Do more of this in Java code. +IRT_ENTRY(void, InterpreterRuntime::bootstrap_invokedynamic(JavaThread* thread, oopDesc* call_site)) { + methodHandle mh_invdyn(thread, (methodOop) sun_dyn_CallSiteImpl::vmmethod(call_site)); + Handle mh_type(thread, mh_invdyn->method_handle_type()); + objArrayHandle mh_ptypes(thread, java_dyn_MethodType::ptypes(mh_type())); + + // squish the arguments down to a single array + int nargs = mh_ptypes->length(); + objArrayHandle arg_array; + { + objArrayOop aaoop = oopFactory::new_objArray(SystemDictionary::object_klass(), nargs, CHECK); + arg_array = objArrayHandle(thread, aaoop); + } + frame fr = thread->last_frame(); + assert(fr.interpreter_frame_bcp() != NULL, "sanity"); + int tos_offset = 0; + for (int i = nargs; --i >= 0; ) { + intptr_t* slot_addr = fr.interpreter_frame_tos_at(tos_offset++); + oop ptype = mh_ptypes->obj_at(i); + oop arg = NULL; + if (!java_lang_Class::is_primitive(ptype)) { + arg = *(oop*) slot_addr; + } else { + BasicType bt = java_lang_Class::primitive_type(ptype); + assert(frame::interpreter_frame_expression_stack_direction() < 0, "else reconsider this code"); + jvalue value; + Interpreter::get_jvalue_in_slot(slot_addr, bt, &value); + tos_offset += type2size[bt]-1; + arg = java_lang_boxing_object::create(bt, &value, CHECK); + // FIXME: These boxing objects are not canonicalized under + // the Java autoboxing rules. They should be... + // The best approach would be to push the arglist creation into Java. + // The JVM should use a lower-level interface to communicate argument lists. + } + arg_array->obj_at_put(i, arg); + } + + // now find the bootstrap method + oop bootstrap_mh_oop = instanceKlass::cast(fr.interpreter_frame_method()->method_holder())->bootstrap_method(); + assert(bootstrap_mh_oop != NULL, "resolve_invokedynamic ensures a BSM"); + + // return the bootstrap method and argument array via vm_result/_2 + thread->set_vm_result(bootstrap_mh_oop); + thread->set_vm_result_2(arg_array()); +} +IRT_END + + + //------------------------------------------------------------------------------------------------------------------------ // Miscellaneous diff -r 5d4dd2f5f6a1 -r 7a485bc4da16 src/share/vm/interpreter/interpreterRuntime.hpp --- a/src/share/vm/interpreter/interpreterRuntime.hpp Fri Apr 17 15:50:12 2009 +0100 +++ b/src/share/vm/interpreter/interpreterRuntime.hpp Thu May 07 10:30:17 2009 -0700 @@ -1,5 +1,5 @@ /* - * Copyright 1997-2008 Sun Microsystems, Inc. All Rights Reserved. + * Copyright 1997-2009 Sun Microsystems, Inc. All Rights Reserved. * DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER. * * This code is free software; you can redistribute it and/or modify it @@ -42,8 +42,11 @@ static bool already_resolved(JavaThread *thread) { return cache_entry(thread)->is_resolved(code(thread)); } static int one_byte_index(JavaThread *thread) { return bcp(thread)[1]; } static int two_byte_index(JavaThread *thread) { return Bytes::get_Java_u2(bcp(thread) + 1); } + static int four_byte_index(JavaThread *thread) { return Bytes::get_native_u4(bcp(thread) + 1); } static int number_of_dimensions(JavaThread *thread) { return bcp(thread)[3]; } - static ConstantPoolCacheEntry* cache_entry(JavaThread *thread) { return method(thread)->constants()->cache()->entry_at(Bytes::get_native_u2(bcp(thread) + 1)); } + + static ConstantPoolCacheEntry* cache_entry_at(JavaThread *thread, int i) { return method(thread)->constants()->cache()->entry_at(i); } + static ConstantPoolCacheEntry* cache_entry(JavaThread *thread) { return cache_entry_at(thread, Bytes::get_native_u2(bcp(thread) + 1)); } static void note_trap(JavaThread *thread, int reason, TRAPS); public: @@ -66,6 +69,7 @@ static void throw_StackOverflowError(JavaThread* thread); static void throw_ArrayIndexOutOfBoundsException(JavaThread* thread, char* name, jint index); static void throw_ClassCastException(JavaThread* thread, oopDesc* obj); + static void throw_WrongMethodTypeException(JavaThread* thread, oopDesc* mtype = NULL, oopDesc* mhandle = NULL); static void create_exception(JavaThread* thread, char* name, char* message); static void create_klass_exception(JavaThread* thread, char* name, oopDesc* obj); static address exception_handler_for_exception(JavaThread* thread, oopDesc* exception); @@ -82,7 +86,9 @@ static void new_illegal_monitor_state_exception(JavaThread* thread); // Calls - static void resolve_invoke (JavaThread* thread, Bytecodes::Code bytecode); + static void resolve_invoke (JavaThread* thread, Bytecodes::Code bytecode); + static void resolve_invokedynamic(JavaThread* thread); + static void bootstrap_invokedynamic(JavaThread* thread, oopDesc* call_site); // Breakpoints static void _breakpoint(JavaThread* thread, methodOopDesc* method, address bcp); diff -r 5d4dd2f5f6a1 -r 7a485bc4da16 src/share/vm/interpreter/linkResolver.cpp --- a/src/share/vm/interpreter/linkResolver.cpp Fri Apr 17 15:50:12 2009 +0100 +++ b/src/share/vm/interpreter/linkResolver.cpp Thu May 07 10:30:17 2009 -0700 @@ -1,5 +1,5 @@ /* - * Copyright 1997-2007 Sun Microsystems, Inc. All Rights Reserved. + * Copyright 1997-2009 Sun Microsystems, Inc. All Rights Reserved. * DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER. * * This code is free software; you can redistribute it and/or modify it @@ -151,6 +151,20 @@ result = methodHandle(THREAD, ik->lookup_method_in_all_interfaces(name(), signature())); } +void LinkResolver::lookup_implicit_method(methodHandle& result, KlassHandle klass, symbolHandle name, symbolHandle signature, TRAPS) { + if (EnableMethodHandles && MethodHandles::enabled() && + name == vmSymbolHandles::invoke_name() && klass() == SystemDictionary::MethodHandle_klass()) { + methodOop result_oop = SystemDictionary::find_method_handle_invoke(signature, + Handle(), + Handle(), + CHECK); + if (result_oop != NULL) { + assert(result_oop->is_method_handle_invoke() && result_oop->signature() == signature(), "consistent"); + result = methodHandle(THREAD, result_oop); + } + } +} + void LinkResolver::check_method_accessability(KlassHandle ref_klass, KlassHandle resolved_klass, KlassHandle sel_klass, @@ -240,6 +254,11 @@ lookup_method_in_interfaces(resolved_method, resolved_klass, method_name, method_signature, CHECK); if (resolved_method.is_null()) { + // JSR 292: see if this is an implicitly generated method MethodHandle.invoke(*...) + lookup_implicit_method(resolved_method, resolved_klass, method_name, method_signature, CHECK); + } + + if (resolved_method.is_null()) { // 4. method lookup failed ResourceMark rm(THREAD); THROW_MSG(vmSymbols::java_lang_NoSuchMethodError(), @@ -928,6 +947,7 @@ case Bytecodes::_invokestatic : resolve_invokestatic (result, pool, index, CHECK); break; case Bytecodes::_invokespecial : resolve_invokespecial (result, pool, index, CHECK); break; case Bytecodes::_invokevirtual : resolve_invokevirtual (result, recv, pool, index, CHECK); break; + case Bytecodes::_invokedynamic : resolve_invokedynamic (result, pool, index, CHECK); break; case Bytecodes::_invokeinterface: resolve_invokeinterface(result, recv, pool, index, CHECK); break; } return; @@ -989,6 +1009,30 @@ resolve_interface_call(result, recv, recvrKlass, resolved_klass, method_name, method_signature, current_klass, true, true, CHECK); } + +void LinkResolver::resolve_invokedynamic(CallInfo& result, constantPoolHandle pool, int raw_index, TRAPS) { + assert(EnableInvokeDynamic, ""); + + // This guy is reached from InterpreterRuntime::resolve_invokedynamic. + + assert(constantPoolCacheOopDesc::is_secondary_index(raw_index), "must be secondary index"); + int nt_index = pool->map_instruction_operand_to_index(raw_index); + + // At this point, we only need the signature, and can ignore the name. + symbolHandle method_signature(THREAD, pool->nt_signature_ref_at(nt_index)); + symbolHandle method_name = vmSymbolHandles::invoke_name(); + KlassHandle resolved_klass = SystemDictionaryHandles::MethodHandle_klass(); + + // JSR 292: this must be an implicitly generated method MethodHandle.invoke(*...) + // The extra MH receiver will be inserted into the stack on every call. + methodHandle resolved_method; + lookup_implicit_method(resolved_method, resolved_klass, method_name, method_signature, CHECK); + if (resolved_method.is_null()) { + THROW(vmSymbols::java_lang_InternalError()); + } + result.set_virtual(resolved_klass, KlassHandle(), resolved_method, resolved_method, resolved_method->vtable_index(), CHECK); +} + //------------------------------------------------------------------------------------------------------------------------ #ifndef PRODUCT diff -r 5d4dd2f5f6a1 -r 7a485bc4da16 src/share/vm/interpreter/linkResolver.hpp --- a/src/share/vm/interpreter/linkResolver.hpp Fri Apr 17 15:50:12 2009 +0100 +++ b/src/share/vm/interpreter/linkResolver.hpp Thu May 07 10:30:17 2009 -0700 @@ -1,5 +1,5 @@ /* - * Copyright 1997-2005 Sun Microsystems, Inc. All Rights Reserved. + * Copyright 1997-2009 Sun Microsystems, Inc. All Rights Reserved. * DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER. * * This code is free software; you can redistribute it and/or modify it @@ -103,6 +103,7 @@ static void lookup_method_in_klasses (methodHandle& result, KlassHandle klass, symbolHandle name, symbolHandle signature, TRAPS); static void lookup_instance_method_in_klasses (methodHandle& result, KlassHandle klass, symbolHandle name, symbolHandle signature, TRAPS); static void lookup_method_in_interfaces (methodHandle& result, KlassHandle klass, symbolHandle name, symbolHandle signature, TRAPS); + static void lookup_implicit_method (methodHandle& result, KlassHandle klass, symbolHandle name, symbolHandle signature, TRAPS); static int vtable_index_of_miranda_method(KlassHandle klass, symbolHandle name, symbolHandle signature, TRAPS); @@ -166,6 +167,7 @@ static void resolve_invokespecial (CallInfo& result, constantPoolHandle pool, int index, TRAPS); static void resolve_invokevirtual (CallInfo& result, Handle recv, constantPoolHandle pool, int index, TRAPS); static void resolve_invokeinterface(CallInfo& result, Handle recv, constantPoolHandle pool, int index, TRAPS); + static void resolve_invokedynamic (CallInfo& result, constantPoolHandle pool, int index, TRAPS); static void resolve_invoke (CallInfo& result, Handle recv, constantPoolHandle pool, int index, Bytecodes::Code byte, TRAPS); }; diff -r 5d4dd2f5f6a1 -r 7a485bc4da16 src/share/vm/interpreter/rewriter.cpp --- a/src/share/vm/interpreter/rewriter.cpp Fri Apr 17 15:50:12 2009 +0100 +++ b/src/share/vm/interpreter/rewriter.cpp Thu May 07 10:30:17 2009 -0700 @@ -25,39 +25,50 @@ # include "incls/_precompiled.incl" # include "incls/_rewriter.cpp.incl" - -// Computes an index_map (new_index -> original_index) for contant pool entries +// Computes a CPC map (new_index -> original_index) for constant pool entries // that are referred to by the interpreter at runtime via the constant pool cache. -void Rewriter::compute_index_maps(constantPoolHandle pool, intArray*& index_map, intStack*& inverse_index_map) { - const int length = pool->length(); - index_map = new intArray(length, -1); - // Choose an initial value large enough that we don't get frequent - // calls to grow(). - inverse_index_map = new intStack(length / 2); +// Also computes a CP map (original_index -> new_index). +// Marks entries in CP which require additional processing. +void Rewriter::compute_index_maps() { + const int length = _pool->length(); + init_cp_map(length); for (int i = 0; i < length; i++) { - switch (pool->tag_at(i).value()) { + int tag = _pool->tag_at(i).value(); + switch (tag) { + case JVM_CONSTANT_InterfaceMethodref: case JVM_CONSTANT_Fieldref : // fall through case JVM_CONSTANT_Methodref : // fall through - case JVM_CONSTANT_InterfaceMethodref: { - index_map->at_put(i, inverse_index_map->length()); - inverse_index_map->append(i); - } + add_cp_cache_entry(i); + break; } } + + guarantee((int)_cp_cache_map.length()-1 <= (int)((u2)-1), + "all cp cache indexes fit in a u2"); } -// Creates a constant pool cache given an inverse_index_map +int Rewriter::add_extra_cp_cache_entry(int main_entry) { + // Hack: We put it on the map as an encoded value. + // The only place that consumes this is ConstantPoolCacheEntry::set_initial_state + int encoded = constantPoolCacheOopDesc::encode_secondary_index(main_entry); + int plain_secondary_index = _cp_cache_map.append(encoded); + return constantPoolCacheOopDesc::encode_secondary_index(plain_secondary_index); +} + + + +// Creates a constant pool cache given a CPC map // This creates the constant pool cache initially in a state // that is unsafe for concurrent GC processing but sets it to // a safe mode before the constant pool cache is returned. -constantPoolCacheHandle Rewriter::new_constant_pool_cache(intArray& inverse_index_map, TRAPS) { - const int length = inverse_index_map.length(); - constantPoolCacheOop cache = oopFactory::new_constantPoolCache(length, - methodOopDesc::IsUnsafeConc, - CHECK_(constantPoolCacheHandle())); - cache->initialize(inverse_index_map); - return constantPoolCacheHandle(THREAD, cache); +void Rewriter::make_constant_pool_cache(TRAPS) { + const int length = _cp_cache_map.length(); + constantPoolCacheOop cache = + oopFactory::new_constantPoolCache(length, methodOopDesc::IsUnsafeConc, CHECK); + cache->initialize(_cp_cache_map); + _pool->set_cache(cache); + cache->set_constant_pool(_pool()); } @@ -101,8 +112,38 @@ } +// Rewrite a classfile-order CP index into a native-order CPC index. +int Rewriter::rewrite_member_reference(address bcp, int offset) { + address p = bcp + offset; + int cp_index = Bytes::get_Java_u2(p); + int cache_index = cp_entry_to_cp_cache(cp_index); + Bytes::put_native_u2(p, cache_index); + return cp_index; +} + + +void Rewriter::rewrite_invokedynamic(address bcp, int offset, int delete_me) { + address p = bcp + offset; + assert(p[-1] == Bytecodes::_invokedynamic, ""); + int cp_index = Bytes::get_Java_u2(p); + int cpc = maybe_add_cp_cache_entry(cp_index); // add lazily + int cpc2 = add_extra_cp_cache_entry(cpc); + + // Replace the trailing four bytes with a CPC index for the dynamic + // call site. Unlike other CPC entries, there is one per bytecode, + // not just one per distinct CP entry. In other words, the + // CPC-to-CP relation is many-to-one for invokedynamic entries. + // This means we must use a larger index size than u2 to address + // all these entries. That is the main reason invokedynamic + // must have a five-byte instruction format. (Of course, other JVM + // implementations can use the bytes for other purposes.) + Bytes::put_native_u4(p, cpc2); + // Note: We use native_u4 format exclusively for 4-byte indexes. +} + + // Rewrites a method given the index_map information -methodHandle Rewriter::rewrite_method(methodHandle method, intArray& index_map, TRAPS) { +void Rewriter::scan_method(methodOop method) { int nof_jsrs = 0; bool has_monitor_bytecodes = false; @@ -121,6 +162,7 @@ int bc_length; for (int bci = 0; bci < code_length; bci += bc_length) { address bcp = code_base + bci; + int prefix_length = 0; c = (Bytecodes::Code)(*bcp); // Since we have the code, see if we can get the length @@ -135,6 +177,7 @@ // by 'wide'. We don't currently examine any of the bytecodes // modified by wide, but in case we do in the future... if (c == Bytecodes::_wide) { + prefix_length = 1; c = (Bytecodes::Code)bcp[1]; } } @@ -159,12 +202,13 @@ case Bytecodes::_putfield : // fall through case Bytecodes::_invokevirtual : // fall through case Bytecodes::_invokespecial : // fall through - case Bytecodes::_invokestatic : // fall through - case Bytecodes::_invokeinterface: { - address p = bcp + 1; - Bytes::put_native_u2(p, index_map[Bytes::get_Java_u2(p)]); + case Bytecodes::_invokestatic : + case Bytecodes::_invokeinterface: + rewrite_member_reference(bcp, prefix_length+1); break; - } + case Bytecodes::_invokedynamic: + rewrite_invokedynamic(bcp, prefix_length+1, int(sizeof"@@@@DELETE ME")); + break; case Bytecodes::_jsr : // fall through case Bytecodes::_jsr_w : nof_jsrs++; break; case Bytecodes::_monitorenter : // fall through @@ -182,53 +226,56 @@ // have to be rewritten, so we run the oopMapGenerator on the method if (nof_jsrs > 0) { method->set_has_jsrs(); - ResolveOopMapConflicts romc(method); - methodHandle original_method = method; - method = romc.do_potential_rewrite(CHECK_(methodHandle())); - if (method() != original_method()) { - // Insert invalid bytecode into original methodOop and set - // interpreter entrypoint, so that a executing this method - // will manifest itself in an easy recognizable form. - address bcp = original_method->bcp_from(0); - *bcp = (u1)Bytecodes::_shouldnotreachhere; - int kind = Interpreter::method_kind(original_method); - original_method->set_interpreter_kind(kind); - } + // Second pass will revisit this method. + assert(method->has_jsrs(), ""); + } +} - // Update monitor matching info. - if (romc.monitor_safe()) { - method->set_guaranteed_monitor_matching(); - } +// After constant pool is created, revisit methods containing jsrs. +methodHandle Rewriter::rewrite_jsrs(methodHandle method, TRAPS) { + ResolveOopMapConflicts romc(method); + methodHandle original_method = method; + method = romc.do_potential_rewrite(CHECK_(methodHandle())); + if (method() != original_method()) { + // Insert invalid bytecode into original methodOop and set + // interpreter entrypoint, so that a executing this method + // will manifest itself in an easy recognizable form. + address bcp = original_method->bcp_from(0); + *bcp = (u1)Bytecodes::_shouldnotreachhere; + int kind = Interpreter::method_kind(original_method); + original_method->set_interpreter_kind(kind); } - // Setup method entrypoints for compiler and interpreter - method->link_method(method, CHECK_(methodHandle())); + // Update monitor matching info. + if (romc.monitor_safe()) { + method->set_guaranteed_monitor_matching(); + } return method; } void Rewriter::rewrite(instanceKlassHandle klass, TRAPS) { - // gather starting points ResourceMark rm(THREAD); - constantPoolHandle pool (THREAD, klass->constants()); - objArrayHandle methods (THREAD, klass->methods()); - assert(pool->cache() == NULL, "constant pool cache must not be set yet"); + Rewriter rw(klass, CHECK); + // (That's all, folks.) +} + +Rewriter::Rewriter(instanceKlassHandle klass, TRAPS) + : _klass(klass), + // gather starting points + _pool( THREAD, klass->constants()), + _methods(THREAD, klass->methods()) +{ + assert(_pool->cache() == NULL, "constant pool cache must not be set yet"); // determine index maps for methodOop rewriting - intArray* index_map = NULL; - intStack* inverse_index_map = NULL; - compute_index_maps(pool, index_map, inverse_index_map); + compute_index_maps(); - // allocate constant pool cache - constantPoolCacheHandle cache = new_constant_pool_cache(*inverse_index_map, CHECK); - pool->set_cache(cache()); - cache->set_constant_pool(pool()); - - if (RegisterFinalizersAtInit && klass->name() == vmSymbols::java_lang_Object()) { - int i = methods->length(); + if (RegisterFinalizersAtInit && _klass->name() == vmSymbols::java_lang_Object()) { + int i = _methods->length(); while (i-- > 0) { - methodOop method = (methodOop)methods->obj_at(i); + methodOop method = (methodOop)_methods->obj_at(i); if (method->intrinsic_id() == vmIntrinsics::_Object_init) { // rewrite the return bytecodes of Object. to register the // object for finalization if needed. @@ -239,13 +286,27 @@ } } - // rewrite methods - { int i = methods->length(); - while (i-- > 0) { - methodHandle m(THREAD, (methodOop)methods->obj_at(i)); - m = rewrite_method(m, *index_map, CHECK); + // rewrite methods, in two passes + int i, len = _methods->length(); + + for (i = len; --i >= 0; ) { + methodOop method = (methodOop)_methods->obj_at(i); + scan_method(method); + } + + // allocate constant pool cache, now that we've seen all the bytecodes + make_constant_pool_cache(CHECK); + + for (i = len; --i >= 0; ) { + methodHandle m(THREAD, (methodOop)_methods->obj_at(i)); + + if (m->has_jsrs()) { + m = rewrite_jsrs(m, CHECK); // Method might have gotten rewritten. - methods->obj_at_put(i, m()); + _methods->obj_at_put(i, m()); } + + // Set up method entry points for compiler and interpreter. + m->link_method(m, CHECK); } } diff -r 5d4dd2f5f6a1 -r 7a485bc4da16 src/share/vm/interpreter/rewriter.hpp --- a/src/share/vm/interpreter/rewriter.hpp Fri Apr 17 15:50:12 2009 +0100 +++ b/src/share/vm/interpreter/rewriter.hpp Thu May 07 10:30:17 2009 -0700 @@ -25,13 +25,44 @@ // The Rewriter adds caches to the constant pool and rewrites bytecode indices // pointing into the constant pool for better interpreter performance. -class Rewriter: public AllStatic { +class Rewriter: public StackObj { private: - static void compute_index_maps(constantPoolHandle pool, intArray*& index_map, intStack*& inverse_index_map); - static constantPoolCacheHandle new_constant_pool_cache(intArray& inverse_index_map, TRAPS); - static methodHandle rewrite_method(methodHandle method, intArray& index_map, TRAPS); - static void rewrite_Object_init(methodHandle method, TRAPS); + instanceKlassHandle _klass; + constantPoolHandle _pool; + objArrayHandle _methods; + intArray _cp_map; + intStack _cp_cache_map; + + void init_cp_map(int length) { + _cp_map.initialize(length, -1); + // Choose an initial value large enough that we don't get frequent + // calls to grow(). + _cp_cache_map.initialize(length / 2); + } + int cp_entry_to_cp_cache(int i) { assert(has_cp_cache(i), "oob"); return _cp_map[i]; } + bool has_cp_cache(int i) { return (uint)i < (uint)_cp_map.length() && _cp_map[i] >= 0; } + int maybe_add_cp_cache_entry(int i) { return has_cp_cache(i) ? _cp_map[i] : add_cp_cache_entry(i); } + int add_cp_cache_entry(int cp_index) { + assert(_cp_map[cp_index] == -1, "not twice on same cp_index"); + int cache_index = _cp_cache_map.append(cp_index); + _cp_map.at_put(cp_index, cache_index); + assert(cp_entry_to_cp_cache(cp_index) == cache_index, ""); + return cache_index; + } + int add_extra_cp_cache_entry(int main_entry); + + // All the work goes in here: + Rewriter(instanceKlassHandle klass, TRAPS); + + void compute_index_maps(); + void make_constant_pool_cache(TRAPS); + void scan_method(methodOop m); + methodHandle rewrite_jsrs(methodHandle m, TRAPS); + void rewrite_Object_init(methodHandle m, TRAPS); + int rewrite_member_reference(address bcp, int offset); + void rewrite_invokedynamic(address bcp, int offset, int cp_index); public: + // Driver routine: static void rewrite(instanceKlassHandle klass, TRAPS); }; diff -r 5d4dd2f5f6a1 -r 7a485bc4da16 src/share/vm/interpreter/templateInterpreter.cpp --- a/src/share/vm/interpreter/templateInterpreter.cpp Fri Apr 17 15:50:12 2009 +0100 +++ b/src/share/vm/interpreter/templateInterpreter.cpp Thu May 07 10:30:17 2009 -0700 @@ -1,5 +1,5 @@ /* - * Copyright 1997-2007 Sun Microsystems, Inc. All Rights Reserved. + * Copyright 1997-2009 Sun Microsystems, Inc. All Rights Reserved. * DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER. * * This code is free software; you can redistribute it and/or modify it @@ -168,6 +168,7 @@ address TemplateInterpreter::_throw_ArrayStoreException_entry = NULL; address TemplateInterpreter::_throw_ArithmeticException_entry = NULL; address TemplateInterpreter::_throw_ClassCastException_entry = NULL; +address TemplateInterpreter::_throw_WrongMethodType_entry = NULL; address TemplateInterpreter::_throw_NullPointerException_entry = NULL; address TemplateInterpreter::_throw_StackOverflowError_entry = NULL; address TemplateInterpreter::_throw_exception_entry = NULL; @@ -177,12 +178,14 @@ #endif // !PRODUCT EntryPoint TemplateInterpreter::_return_entry[TemplateInterpreter::number_of_return_entries]; EntryPoint TemplateInterpreter::_earlyret_entry; +EntryPoint TemplateInterpreter::_return_unbox_entry; EntryPoint TemplateInterpreter::_deopt_entry [TemplateInterpreter::number_of_deopt_entries ]; EntryPoint TemplateInterpreter::_continuation_entry; EntryPoint TemplateInterpreter::_safept_entry; address TemplateInterpreter::_return_3_addrs_by_index[TemplateInterpreter::number_of_return_addrs]; address TemplateInterpreter::_return_5_addrs_by_index[TemplateInterpreter::number_of_return_addrs]; +address TemplateInterpreter::_return_5_unbox_addrs_by_index[TemplateInterpreter::number_of_return_addrs]; DispatchTable TemplateInterpreter::_active_table; DispatchTable TemplateInterpreter::_normal_table; @@ -250,6 +253,22 @@ } } + if (EnableInvokeDynamic) { + CodeletMark cm(_masm, "unboxing return entry points"); + Interpreter::_return_unbox_entry = + EntryPoint( + generate_return_unbox_entry_for(btos, 5), + generate_return_unbox_entry_for(ctos, 5), + generate_return_unbox_entry_for(stos, 5), + generate_return_unbox_entry_for(atos, 5), // cast conversion + generate_return_unbox_entry_for(itos, 5), + generate_return_unbox_entry_for(ltos, 5), + generate_return_unbox_entry_for(ftos, 5), + generate_return_unbox_entry_for(dtos, 5), + Interpreter::_return_entry[5].entry(vtos) // no unboxing for void + ); + } + { CodeletMark cm(_masm, "earlyret entry points"); Interpreter::_earlyret_entry = EntryPoint( @@ -297,8 +316,11 @@ for (int j = 0; j < number_of_states; j++) { const TosState states[] = {btos, ctos, stos, itos, ltos, ftos, dtos, atos, vtos}; - Interpreter::_return_3_addrs_by_index[Interpreter::TosState_as_index(states[j])] = Interpreter::return_entry(states[j], 3); - Interpreter::_return_5_addrs_by_index[Interpreter::TosState_as_index(states[j])] = Interpreter::return_entry(states[j], 5); + int index = Interpreter::TosState_as_index(states[j]); + Interpreter::_return_3_addrs_by_index[index] = Interpreter::return_entry(states[j], 3); + Interpreter::_return_5_addrs_by_index[index] = Interpreter::return_entry(states[j], 5); + if (EnableInvokeDynamic) + Interpreter::_return_5_unbox_addrs_by_index[index] = Interpreter::return_unbox_entry(states[j], 5); } { CodeletMark cm(_masm, "continuation entry points"); @@ -341,6 +363,7 @@ Interpreter::_throw_ArrayStoreException_entry = generate_klass_exception_handler("java/lang/ArrayStoreException" ); Interpreter::_throw_ArithmeticException_entry = generate_exception_handler("java/lang/ArithmeticException" , "/ by zero"); Interpreter::_throw_ClassCastException_entry = generate_ClassCastException_handler(); + Interpreter::_throw_WrongMethodType_entry = generate_WrongMethodType_handler(); Interpreter::_throw_NullPointerException_entry = generate_exception_handler("java/lang/NullPointerException" , NULL ); Interpreter::_throw_StackOverflowError_entry = generate_StackOverflowError_handler(); } @@ -358,6 +381,7 @@ method_entry(empty) method_entry(accessor) method_entry(abstract) + method_entry(method_handle) method_entry(java_lang_math_sin ) method_entry(java_lang_math_cos ) method_entry(java_lang_math_tan ) @@ -523,6 +547,18 @@ } +address TemplateInterpreter::return_unbox_entry(TosState state, int length) { + assert(EnableInvokeDynamic, ""); + if (state == vtos) { + // no unboxing to do, actually + return return_entry(state, length); + } else { + assert(length == 5, "unboxing entries generated for invokedynamic only"); + return _return_unbox_entry.entry(state); + } +} + + address TemplateInterpreter::deopt_entry(TosState state, int length) { guarantee(0 <= length && length < Interpreter::number_of_deopt_entries, "illegal length"); return _deopt_entry[length].entry(state); diff -r 5d4dd2f5f6a1 -r 7a485bc4da16 src/share/vm/interpreter/templateInterpreter.hpp --- a/src/share/vm/interpreter/templateInterpreter.hpp Fri Apr 17 15:50:12 2009 +0100 +++ b/src/share/vm/interpreter/templateInterpreter.hpp Thu May 07 10:30:17 2009 -0700 @@ -1,5 +1,5 @@ /* - * Copyright 1997-2007 Sun Microsystems, Inc. All Rights Reserved. + * Copyright 1997-2009 Sun Microsystems, Inc. All Rights Reserved. * DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER. * * This code is free software; you can redistribute it and/or modify it @@ -77,14 +77,15 @@ friend class VMStructs; friend class InterpreterMacroAssembler; friend class TemplateInterpreterGenerator; + friend class InterpreterGenerator; friend class TemplateTable; // friend class Interpreter; public: enum MoreConstants { - number_of_return_entries = 9, // number of return entry points - number_of_deopt_entries = 9, // number of deoptimization entry points - number_of_return_addrs = 9 // number of return addresses + number_of_return_entries = number_of_states, // number of return entry points + number_of_deopt_entries = number_of_states, // number of deoptimization entry points + number_of_return_addrs = number_of_states // number of return addresses }; protected: @@ -93,6 +94,7 @@ static address _throw_ArrayStoreException_entry; static address _throw_ArithmeticException_entry; static address _throw_ClassCastException_entry; + static address _throw_WrongMethodType_entry; static address _throw_NullPointerException_entry; static address _throw_exception_entry; @@ -108,12 +110,14 @@ #endif // !PRODUCT static EntryPoint _return_entry[number_of_return_entries]; // entry points to return to from a call static EntryPoint _earlyret_entry; // entry point to return early from a call + static EntryPoint _return_unbox_entry; // entry point to unbox a return value from a call static EntryPoint _deopt_entry[number_of_deopt_entries]; // entry points to return to from a deoptimization static EntryPoint _continuation_entry; static EntryPoint _safept_entry; static address _return_3_addrs_by_index[number_of_return_addrs]; // for invokevirtual return entries static address _return_5_addrs_by_index[number_of_return_addrs]; // for invokeinterface return entries + static address _return_5_unbox_addrs_by_index[number_of_return_addrs]; // for invokedynamic bootstrap methods static DispatchTable _active_table; // the active dispatch table (used by the interpreter for dispatch) static DispatchTable _normal_table; // the normal dispatch table (used to set the active table in normal mode) @@ -137,6 +141,7 @@ static address remove_activation_entry() { return _remove_activation_entry; } static address throw_exception_entry() { return _throw_exception_entry; } static address throw_ArithmeticException_entry() { return _throw_ArithmeticException_entry; } + static address throw_WrongMethodType_entry() { return _throw_WrongMethodType_entry; } static address throw_NullPointerException_entry() { return _throw_NullPointerException_entry; } static address throw_StackOverflowError_entry() { return _throw_StackOverflowError_entry; } @@ -154,10 +159,12 @@ // Support for invokes static address* return_3_addrs_by_index_table() { return _return_3_addrs_by_index; } static address* return_5_addrs_by_index_table() { return _return_5_addrs_by_index; } + static address* return_5_unbox_addrs_by_index_table() { return _return_5_unbox_addrs_by_index; } static int TosState_as_index(TosState state); // computes index into return_3_entry_by_index table static address return_entry (TosState state, int length); static address deopt_entry (TosState state, int length); + static address return_unbox_entry(TosState state, int length); // Safepoint support static void notice_safepoints(); // stops the thread when reaching a safepoint diff -r 5d4dd2f5f6a1 -r 7a485bc4da16 src/share/vm/interpreter/templateInterpreterGenerator.hpp --- a/src/share/vm/interpreter/templateInterpreterGenerator.hpp Fri Apr 17 15:50:12 2009 +0100 +++ b/src/share/vm/interpreter/templateInterpreterGenerator.hpp Thu May 07 10:30:17 2009 -0700 @@ -1,5 +1,5 @@ /* - * Copyright 1997-2007 Sun Microsystems, Inc. All Rights Reserved. + * Copyright 1997-2009 Sun Microsystems, Inc. All Rights Reserved. * DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER. * * This code is free software; you can redistribute it and/or modify it @@ -48,9 +48,13 @@ } address generate_exception_handler_common(const char* name, const char* message, bool pass_oop); address generate_ClassCastException_handler(); + address generate_WrongMethodType_handler(); address generate_ArrayIndexOutOfBounds_handler(const char* name); address generate_continuation_for(TosState state); - address generate_return_entry_for(TosState state, int step); + address generate_return_entry_for(TosState state, int step, bool unbox = false); + address generate_return_unbox_entry_for(TosState state, int step) { + return generate_return_entry_for(state, step, true); + } address generate_earlyret_entry_for(TosState state); address generate_deopt_entry_for(TosState state, int step); address generate_safept_entry_for(TosState state, address runtime_entry); diff -r 5d4dd2f5f6a1 -r 7a485bc4da16 src/share/vm/interpreter/templateTable.cpp --- a/src/share/vm/interpreter/templateTable.cpp Fri Apr 17 15:50:12 2009 +0100 +++ b/src/share/vm/interpreter/templateTable.cpp Thu May 07 10:30:17 2009 -0700 @@ -442,6 +442,7 @@ def(Bytecodes::_invokespecial , ubcp|disp|clvm|____, vtos, vtos, invokespecial , 1 ); def(Bytecodes::_invokestatic , ubcp|disp|clvm|____, vtos, vtos, invokestatic , 1 ); def(Bytecodes::_invokeinterface , ubcp|disp|clvm|____, vtos, vtos, invokeinterface , 1 ); + def(Bytecodes::_invokedynamic , ubcp|disp|clvm|____, vtos, vtos, invokedynamic , 1 ); def(Bytecodes::_new , ubcp|____|clvm|____, vtos, atos, _new , _ ); def(Bytecodes::_newarray , ubcp|____|clvm|____, itos, atos, newarray , _ ); def(Bytecodes::_anewarray , ubcp|____|clvm|____, itos, atos, anewarray , _ ); @@ -503,7 +504,6 @@ def(Bytecodes::_fast_invokevfinal , ubcp|disp|clvm|____, vtos, vtos, fast_invokevfinal , 2 ); - def(Bytecodes::_fast_linearswitch , ubcp|disp|____|____, itos, vtos, fast_linearswitch , _ ); def(Bytecodes::_fast_binaryswitch , ubcp|disp|____|____, itos, vtos, fast_binaryswitch , _ ); diff -r 5d4dd2f5f6a1 -r 7a485bc4da16 src/share/vm/interpreter/templateTable.hpp --- a/src/share/vm/interpreter/templateTable.hpp Fri Apr 17 15:50:12 2009 +0100 +++ b/src/share/vm/interpreter/templateTable.hpp Thu May 07 10:30:17 2009 -0700 @@ -261,6 +261,7 @@ static void invokespecial(int byte_no); static void invokestatic(int byte_no); static void invokeinterface(int byte_no); + static void invokedynamic(int byte_no); static void fast_invokevfinal(int byte_no); static void getfield_or_static(int byte_no, bool is_static); diff -r 5d4dd2f5f6a1 -r 7a485bc4da16 src/share/vm/memory/dump.cpp --- a/src/share/vm/memory/dump.cpp Fri Apr 17 15:50:12 2009 +0100 +++ b/src/share/vm/memory/dump.cpp Thu May 07 10:30:17 2009 -0700 @@ -1,5 +1,5 @@ /* - * Copyright 2003-2008 Sun Microsystems, Inc. All Rights Reserved. + * Copyright 2003-2009 Sun Microsystems, Inc. All Rights Reserved. * DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER. * * This code is free software; you can redistribute it and/or modify it @@ -928,6 +928,10 @@ // shared classes at runtime, where constraints were previously created. guarantee(SystemDictionary::constraints()->number_of_entries() == 0, "loader constraints are not saved"); + // Revisit and implement this if we prelink method handle call sites: + guarantee(SystemDictionary::invoke_method_table() == NULL || + SystemDictionary::invoke_method_table()->number_of_entries() == 0, + "invoke method table is not saved"); GenCollectedHeap* gch = GenCollectedHeap::heap(); // At this point, many classes have been loaded. diff -r 5d4dd2f5f6a1 -r 7a485bc4da16 src/share/vm/oops/constantPoolKlass.cpp --- a/src/share/vm/oops/constantPoolKlass.cpp Fri Apr 17 15:50:12 2009 +0100 +++ b/src/share/vm/oops/constantPoolKlass.cpp Thu May 07 10:30:17 2009 -0700 @@ -312,6 +312,7 @@ if (cp->flags() != 0) { st->print(" - flags : 0x%x", cp->flags()); if (cp->has_pseudo_string()) st->print(" has_pseudo_string"); + if (cp->has_invokedynamic()) st->print(" has_invokedynamic"); st->cr(); } diff -r 5d4dd2f5f6a1 -r 7a485bc4da16 src/share/vm/oops/constantPoolOop.cpp --- a/src/share/vm/oops/constantPoolOop.cpp Fri Apr 17 15:50:12 2009 +0100 +++ b/src/share/vm/oops/constantPoolOop.cpp Thu May 07 10:30:17 2009 -0700 @@ -249,32 +249,41 @@ } -symbolOop constantPoolOopDesc::uncached_name_ref_at(int which) { - jint ref_index = name_and_type_at(uncached_name_and_type_ref_index_at(which)); - int name_index = extract_low_short_from_int(ref_index); +symbolOop constantPoolOopDesc::impl_name_ref_at(int which, bool uncached) { + int name_index = name_ref_index_at(impl_name_and_type_ref_index_at(which, uncached)); return symbol_at(name_index); } -symbolOop constantPoolOopDesc::uncached_signature_ref_at(int which) { - jint ref_index = name_and_type_at(uncached_name_and_type_ref_index_at(which)); - int signature_index = extract_high_short_from_int(ref_index); +symbolOop constantPoolOopDesc::impl_signature_ref_at(int which, bool uncached) { + int signature_index = signature_ref_index_at(impl_name_and_type_ref_index_at(which, uncached)); return symbol_at(signature_index); } -int constantPoolOopDesc::uncached_name_and_type_ref_index_at(int which) { - jint ref_index = field_or_method_at(which, true); +int constantPoolOopDesc::impl_name_and_type_ref_index_at(int which, bool uncached) { + jint ref_index = field_or_method_at(which, uncached); return extract_high_short_from_int(ref_index); } -int constantPoolOopDesc::uncached_klass_ref_index_at(int which) { - jint ref_index = field_or_method_at(which, true); +int constantPoolOopDesc::impl_klass_ref_index_at(int which, bool uncached) { + jint ref_index = field_or_method_at(which, uncached); return extract_low_short_from_int(ref_index); } + +int constantPoolOopDesc::map_instruction_operand_to_index(int operand) { + if (constantPoolCacheOopDesc::is_secondary_index(operand)) { + return cache()->main_entry_at(operand)->constant_pool_index(); + } + assert((int)(u2)operand == operand, "clean u2"); + int index = Bytes::swap_u2(operand); + return cache()->entry_at(index)->constant_pool_index(); +} + + void constantPoolOopDesc::verify_constant_pool_resolve(constantPoolHandle this_oop, KlassHandle k, TRAPS) { if (k->oop_is_instance() || k->oop_is_objArray()) { instanceKlassHandle holder (THREAD, this_oop->pool_holder()); @@ -290,26 +299,14 @@ } -int constantPoolOopDesc::klass_ref_index_at(int which) { - jint ref_index = field_or_method_at(which, false); +int constantPoolOopDesc::name_ref_index_at(int which_nt) { + jint ref_index = name_and_type_at(which_nt); return extract_low_short_from_int(ref_index); } -int constantPoolOopDesc::name_and_type_ref_index_at(int which) { - jint ref_index = field_or_method_at(which, false); - return extract_high_short_from_int(ref_index); -} - - -int constantPoolOopDesc::name_ref_index_at(int which) { - jint ref_index = name_and_type_at(which); - return extract_low_short_from_int(ref_index); -} - - -int constantPoolOopDesc::signature_ref_index_at(int which) { - jint ref_index = name_and_type_at(which); +int constantPoolOopDesc::signature_ref_index_at(int which_nt) { + jint ref_index = name_and_type_at(which_nt); return extract_high_short_from_int(ref_index); } @@ -353,20 +350,6 @@ } -symbolOop constantPoolOopDesc::name_ref_at(int which) { - jint ref_index = name_and_type_at(name_and_type_ref_index_at(which)); - int name_index = extract_low_short_from_int(ref_index); - return symbol_at(name_index); -} - - -symbolOop constantPoolOopDesc::signature_ref_at(int which) { - jint ref_index = name_and_type_at(name_and_type_ref_index_at(which)); - int signature_index = extract_high_short_from_int(ref_index); - return symbol_at(signature_index); -} - - BasicType constantPoolOopDesc::basic_type_for_signature_at(int which) { return FieldType::basic_type(symbol_at(which)); } diff -r 5d4dd2f5f6a1 -r 7a485bc4da16 src/share/vm/oops/constantPoolOop.hpp --- a/src/share/vm/oops/constantPoolOop.hpp Fri Apr 17 15:50:12 2009 +0100 +++ b/src/share/vm/oops/constantPoolOop.hpp Thu May 07 10:30:17 2009 -0700 @@ -53,6 +53,7 @@ void release_tag_at_put(int which, jbyte t) { tags()->release_byte_at_put(which, t); } enum FlagBit { + FB_has_invokedynamic = 1, FB_has_pseudo_string = 2 }; @@ -96,7 +97,9 @@ typeArrayOop tags() const { return _tags; } bool has_pseudo_string() const { return flag_at(FB_has_pseudo_string); } + bool has_invokedynamic() const { return flag_at(FB_has_invokedynamic); } void set_pseudo_string() { set_flag_at(FB_has_pseudo_string); } + void set_invokedynamic() { set_flag_at(FB_has_invokedynamic); } // Klass holding pool klassOop pool_holder() const { return _pool_holder; } @@ -338,24 +341,28 @@ return *int_at_addr(which); } - // The following methods (klass_ref_at, klass_ref_at_noresolve, name_ref_at, - // signature_ref_at, klass_ref_index_at, name_and_type_ref_index_at, - // name_ref_index_at, signature_ref_index_at) all expect constant pool indices + // The following methods (name/signature/klass_ref_at, klass_ref_at_noresolve, + // name_and_type_ref_index_at) all expect constant pool indices // from the bytecodes to be passed in, which are actually potentially byte-swapped - // contstant pool cache indices. See field_or_method_at. + // or rewritten constant pool cache indices. They all call map_instruction_operand_to_index. + int map_instruction_operand_to_index(int operand); + + // There are also "uncached" versions which do not map the operand index; see below. // Lookup for entries consisting of (klass_index, name_and_type index) klassOop klass_ref_at(int which, TRAPS); symbolOop klass_ref_at_noresolve(int which); - symbolOop name_ref_at(int which); - symbolOop signature_ref_at(int which); // the type descriptor + symbolOop name_ref_at(int which) { return impl_name_ref_at(which, false); } + symbolOop signature_ref_at(int which) { return impl_signature_ref_at(which, false); } - int klass_ref_index_at(int which); - int name_and_type_ref_index_at(int which); + int klass_ref_index_at(int which) { return impl_klass_ref_index_at(which, false); } + int name_and_type_ref_index_at(int which) { return impl_name_and_type_ref_index_at(which, false); } // Lookup for entries consisting of (name_index, signature_index) - int name_ref_index_at(int which); - int signature_ref_index_at(int which); + int name_ref_index_at(int which_nt); // == low-order jshort of name_and_type_at(which_nt) + int signature_ref_index_at(int which_nt); // == high-order jshort of name_and_type_at(which_nt) + symbolOop nt_name_ref_at(int which_nt) { return symbol_at(name_ref_index_at(which_nt)); } + symbolOop nt_signature_ref_at(int which_nt) { return symbol_at(signature_ref_index_at(which_nt)); } BasicType basic_type_for_signature_at(int which); @@ -397,10 +404,10 @@ // Routines currently used for annotations (only called by jvm.cpp) but which might be used in the // future by other Java code. These take constant pool indices rather than possibly-byte-swapped // constant pool cache indices as do the peer methods above. - symbolOop uncached_name_ref_at(int which); - symbolOop uncached_signature_ref_at(int which); - int uncached_klass_ref_index_at(int which); - int uncached_name_and_type_ref_index_at(int which); + symbolOop uncached_name_ref_at(int which) { return impl_name_ref_at(which, true); } + symbolOop uncached_signature_ref_at(int which) { return impl_signature_ref_at(which, true); } + int uncached_klass_ref_index_at(int which) { return impl_klass_ref_index_at(which, true); } + int uncached_name_and_type_ref_index_at(int which) { return impl_name_and_type_ref_index_at(which, true); } // Sharing int pre_resolve_shared_klasses(TRAPS); @@ -413,16 +420,19 @@ private: + symbolOop impl_name_ref_at(int which, bool uncached); + symbolOop impl_signature_ref_at(int which, bool uncached); + int impl_klass_ref_index_at(int which, bool uncached); + int impl_name_and_type_ref_index_at(int which, bool uncached); + // Takes either a constant pool cache index in possibly byte-swapped // byte order (which comes from the bytecodes after rewriting) or, // if "uncached" is true, a vanilla constant pool index jint field_or_method_at(int which, bool uncached) { - int i = -1; - if (uncached || cache() == NULL) { - i = which; - } else { + int i = which; + if (!uncached && cache() != NULL) { // change byte-ordering and go via cache - i = cache()->entry_at(Bytes::swap_u2(which))->constant_pool_index(); + i = map_instruction_operand_to_index(which); } assert(tag_at(i).is_field_or_method(), "Corrupted constant pool"); return *int_at_addr(i); diff -r 5d4dd2f5f6a1 -r 7a485bc4da16 src/share/vm/oops/cpCacheKlass.cpp --- a/src/share/vm/oops/cpCacheKlass.cpp Fri Apr 17 15:50:12 2009 +0100 +++ b/src/share/vm/oops/cpCacheKlass.cpp Thu May 07 10:30:17 2009 -0700 @@ -169,11 +169,47 @@ void constantPoolCacheKlass::oop_copy_contents(PSPromotionManager* pm, oop obj) { assert(obj->is_constantPoolCache(), "should be constant pool"); + if (EnableInvokeDynamic) { + constantPoolCacheOop cache = (constantPoolCacheOop)obj; + // during a scavenge, it is safe to inspect my pool, since it is perm + constantPoolOop pool = cache->constant_pool(); + assert(pool->is_constantPool(), "should be constant pool"); + if (pool->has_invokedynamic()) { + for (int i = 0; i < cache->length(); i++) { + ConstantPoolCacheEntry* e = cache->entry_at(i); + oop* p = (oop*)&e->_f1; + if (e->is_secondary_entry()) { + if (PSScavenge::should_scavenge(p)) + pm->claim_or_forward_breadth(p); + assert(!(e->is_vfinal() && PSScavenge::should_scavenge((oop*)&e->_f2)), + "no live oops here"); + } + } + } + } } void constantPoolCacheKlass::oop_push_contents(PSPromotionManager* pm, oop obj) { assert(obj->is_constantPoolCache(), "should be constant pool"); + if (EnableInvokeDynamic) { + constantPoolCacheOop cache = (constantPoolCacheOop)obj; + // during a scavenge, it is safe to inspect my pool, since it is perm + constantPoolOop pool = cache->constant_pool(); + assert(pool->is_constantPool(), "should be constant pool"); + if (pool->has_invokedynamic()) { + for (int i = 0; i < cache->length(); i++) { + ConstantPoolCacheEntry* e = cache->entry_at(i); + oop* p = (oop*)&e->_f1; + if (e->is_secondary_entry()) { + if (PSScavenge::should_scavenge(p)) + pm->claim_or_forward_depth(p); + assert(!(e->is_vfinal() && PSScavenge::should_scavenge((oop*)&e->_f2)), + "no live oops here"); + } + } + } + } } int diff -r 5d4dd2f5f6a1 -r 7a485bc4da16 src/share/vm/oops/cpCacheOop.cpp --- a/src/share/vm/oops/cpCacheOop.cpp Fri Apr 17 15:50:12 2009 +0100 +++ b/src/share/vm/oops/cpCacheOop.cpp Thu May 07 10:30:17 2009 -0700 @@ -29,8 +29,18 @@ // Implememtation of ConstantPoolCacheEntry void ConstantPoolCacheEntry::set_initial_state(int index) { - assert(0 <= index && index < 0x10000, "sanity check"); + if (constantPoolCacheOopDesc::is_secondary_index(index)) { + // Hack: The rewriter is trying to say that this entry itself + // will be a secondary entry. + int main_index = constantPoolCacheOopDesc::decode_secondary_index(index); + assert(0 <= main_index && main_index < 0x10000, "sanity check"); + _indices = (main_index << 16); + assert(main_entry_index() == main_index, ""); + return; + } + assert(0 < index && index < 0x10000, "sanity check"); _indices = index; + assert(constant_pool_index() == index, ""); } @@ -136,6 +146,7 @@ int byte_no = -1; bool needs_vfinal_flag = false; switch (invoke_code) { + case Bytecodes::_invokedynamic: case Bytecodes::_invokevirtual: case Bytecodes::_invokeinterface: { if (method->can_be_statically_bound()) { @@ -211,6 +222,23 @@ } +void ConstantPoolCacheEntry::set_dynamic_call(Handle call_site, int extra_data) { + methodOop method = (methodOop) sun_dyn_CallSiteImpl::vmmethod(call_site()); + assert(method->is_method(), "must be initialized properly"); + int param_size = method->size_of_parameters(); + assert(param_size > 1, "method argument size must include MH.this & initial dynamic receiver"); + param_size -= 1; // do not count MH.this; it is not stacked for invokedynamic + if (Atomic::cmpxchg_ptr(call_site(), &_f1, NULL) == NULL) { + // racing threads might be trying to install their own favorites + set_f1(call_site()); + } + set_f2(extra_data); + set_flags(as_flags(as_TosState(method->result_type()), method->is_final_method(), false, false, false, true) | param_size); + // do not do set_bytecode on a secondary CP cache entry + //set_bytecode_1(Bytecodes::_invokedynamic); +} + + class LocalOopClosure: public OopClosure { private: void (*_f)(oop*); @@ -392,7 +420,11 @@ // print separator if (index == 0) tty->print_cr(" -------------"); // print entry - tty->print_cr("%3d (%08x) [%02x|%02x|%5d]", index, this, bytecode_2(), bytecode_1(), constant_pool_index()); + tty->print_cr("%3d (%08x) ", index, this); + if (is_secondary_entry()) + tty->print_cr("[%5d|secondary]", main_entry_index()); + else + tty->print_cr("[%02x|%02x|%5d]", bytecode_2(), bytecode_1(), constant_pool_index()); tty->print_cr(" [ %08x]", (address)(oop)_f1); tty->print_cr(" [ %08x]", _f2); tty->print_cr(" [ %08x]", _flags); diff -r 5d4dd2f5f6a1 -r 7a485bc4da16 src/share/vm/oops/cpCacheOop.hpp --- a/src/share/vm/oops/cpCacheOop.hpp Fri Apr 17 15:50:12 2009 +0100 +++ b/src/share/vm/oops/cpCacheOop.hpp Thu May 07 10:30:17 2009 -0700 @@ -89,6 +89,7 @@ // _f1 = method for all but virtual calls, unused by virtual calls // (note: for interface calls, which are essentially virtual, // contains klassOop for the corresponding interface. +// for invokedynamic, f1 contains the CallSite object for the invocation // _f2 = method/vtable index for virtual calls only, unused by all other // calls. The vf flag indicates this is a method pointer not an // index. @@ -108,6 +109,8 @@ class ConstantPoolCacheEntry VALUE_OBJ_CLASS_SPEC { friend class VMStructs; + friend class constantPoolCacheKlass; + private: volatile intx _indices; // constant pool index & rewrite bytecodes volatile oop _f1; // entry specific oop field @@ -175,6 +178,11 @@ int index // Method index into interface ); + void set_dynamic_call( + Handle call_site, // Resolved java.dyn.CallSite (f1) + int extra_data // (f2) + ); + void set_parameter_size(int value) { assert(parameter_size() == 0 || parameter_size() == value, "size must not change"); @@ -216,7 +224,11 @@ } // Accessors - int constant_pool_index() const { return _indices & 0xFFFF; } + bool is_secondary_entry() const { return (_indices & 0xFFFF) == 0; } + int constant_pool_index() const { assert((_indices & 0xFFFF) != 0, "must be main entry"); + return (_indices & 0xFFFF); } + int main_entry_index() const { assert((_indices & 0xFFFF) == 0, "must be secondary entry"); + return ((uintx)_indices >> 16); } Bytecodes::Code bytecode_1() const { return Bytecodes::cast((_indices >> 16) & 0xFF); } Bytecodes::Code bytecode_2() const { return Bytecodes::cast((_indices >> 24) & 0xFF); } volatile oop f1() const { return _f1; } @@ -314,10 +326,30 @@ // Initialization void initialize(intArray& inverse_index_map); + // Secondary indexes. + // They must look completely different from normal indexes. + // The main reason is that byte swapping is sometimes done on normal indexes. + // Also, it is helpful for debugging to tell the two apart. + static bool is_secondary_index(int i) { return (i < 0); } + static int decode_secondary_index(int i) { assert(is_secondary_index(i), ""); return ~i; } + static int encode_secondary_index(int i) { assert(!is_secondary_index(i), ""); return ~i; } + // Accessors void set_constant_pool(constantPoolOop pool) { oop_store_without_check((oop*)&_constant_pool, (oop)pool); } constantPoolOop constant_pool() const { return _constant_pool; } ConstantPoolCacheEntry* entry_at(int i) const { assert(0 <= i && i < length(), "index out of bounds"); return base() + i; } + ConstantPoolCacheEntry* main_entry_at(int i) const { + ConstantPoolCacheEntry* e; + if (is_secondary_index(i)) { + // run through an extra level of indirection: + i = decode_secondary_index(i); + e = entry_at(i); + i = e->main_entry_index(); + } + e = entry_at(i); + assert(!e->is_secondary_entry(), "only one level of indirection"); + return e; + } // GC support // If the _length field has not been set, the size of the diff -r 5d4dd2f5f6a1 -r 7a485bc4da16 src/share/vm/oops/generateOopMap.cpp --- a/src/share/vm/oops/generateOopMap.cpp Fri Apr 17 15:50:12 2009 +0100 +++ b/src/share/vm/oops/generateOopMap.cpp Thu May 07 10:30:17 2009 -0700 @@ -1252,8 +1252,9 @@ case Bytecodes::_invokevirtual: case Bytecodes::_invokespecial: case Bytecodes::_invokestatic: + case Bytecodes::_invokedynamic: case Bytecodes::_invokeinterface: - int idx = currentBC->get_index_big(); + int idx = currentBC->get_index_int(); constantPoolOop cp = method()->constants(); int nameAndTypeIdx = cp->name_and_type_ref_index_at(idx); int signatureIdx = cp->signature_ref_index_at(nameAndTypeIdx); @@ -1283,8 +1284,9 @@ case Bytecodes::_invokevirtual: case Bytecodes::_invokespecial: case Bytecodes::_invokestatic: + case Bytecodes::_invokedynamic: case Bytecodes::_invokeinterface: - int idx = currentBC->get_index_big(); + int idx = currentBC->get_index_int(); constantPoolOop cp = method()->constants(); int nameAndTypeIdx = cp->name_and_type_ref_index_at(idx); int signatureIdx = cp->signature_ref_index_at(nameAndTypeIdx); @@ -1310,6 +1312,7 @@ case Bytecodes::_invokevirtual: case Bytecodes::_invokespecial: case Bytecodes::_invokestatic: + case Bytecodes::_invokedynamic: case Bytecodes::_invokeinterface: _itr_send = itr; _report_result_for_send = true; @@ -1556,6 +1559,7 @@ case Bytecodes::_invokevirtual: case Bytecodes::_invokespecial: do_method(false, false, itr->get_index_big(), itr->bci()); break; case Bytecodes::_invokestatic: do_method(true, false, itr->get_index_big(), itr->bci()); break; + case Bytecodes::_invokedynamic: do_method(false, true, itr->get_index_int(), itr->bci()); break; case Bytecodes::_invokeinterface: do_method(false, true, itr->get_index_big(), itr->bci()); break; case Bytecodes::_newarray: case Bytecodes::_anewarray: pp_new_ref(vCTS, itr->bci()); break; @@ -1899,7 +1903,7 @@ // Dig up signature for field in constant pool constantPoolOop cp = _method->constants(); int nameAndTypeIdx = cp->name_and_type_ref_index_at(idx); - int signatureIdx = cp->signature_ref_index_at(nameAndTypeIdx); + int signatureIdx = cp->signature_ref_index_at(nameAndTypeIdx); // @@@@@ symbolOop signature = cp->symbol_at(signatureIdx); // Parse method signature diff -r 5d4dd2f5f6a1 -r 7a485bc4da16 src/share/vm/oops/instanceKlass.hpp --- a/src/share/vm/oops/instanceKlass.hpp Fri Apr 17 15:50:12 2009 +0100 +++ b/src/share/vm/oops/instanceKlass.hpp Thu May 07 10:30:17 2009 -0700 @@ -163,6 +163,8 @@ klassOop _implementors[implementors_limit]; // Generic signature, or null if none. symbolOop _generic_signature; + // invokedynamic bootstrap method (a java.dyn.MethodHandle) + oop _bootstrap_method; // Annotations for this class, or null if none. typeArrayOop _class_annotations; // Annotation objects (byte arrays) for fields, or null if no annotations. @@ -464,6 +466,10 @@ u2 method_index) { _enclosing_method_class_index = class_index; _enclosing_method_method_index = method_index; } + // JSR 292 support + oop bootstrap_method() const { return _bootstrap_method; } + void set_bootstrap_method(oop mh) { oop_store(&_bootstrap_method, mh); } + // jmethodID support static jmethodID get_jmethod_id(instanceKlassHandle ik_h, size_t idnum, jmethodID new_id, jmethodID* new_jmeths); @@ -744,6 +750,7 @@ oop* adr_inner_classes() const { return (oop*)&this->_inner_classes;} oop* adr_implementors() const { return (oop*)&this->_implementors[0];} oop* adr_generic_signature() const { return (oop*)&this->_generic_signature;} + oop* adr_bootstrap_method() const { return (oop*)&this->_bootstrap_method;} oop* adr_methods_jmethod_ids() const { return (oop*)&this->_methods_jmethod_ids;} oop* adr_methods_cached_itable_indices() const { return (oop*)&this->_methods_cached_itable_indices;} oop* adr_class_annotations() const { return (oop*)&this->_class_annotations;} diff -r 5d4dd2f5f6a1 -r 7a485bc4da16 src/share/vm/oops/instanceKlassKlass.cpp --- a/src/share/vm/oops/instanceKlassKlass.cpp Fri Apr 17 15:50:12 2009 +0100 +++ b/src/share/vm/oops/instanceKlassKlass.cpp Thu May 07 10:30:17 2009 -0700 @@ -84,6 +84,7 @@ MarkSweep::mark_and_push(ik->adr_host_klass()); MarkSweep::mark_and_push(ik->adr_signers()); MarkSweep::mark_and_push(ik->adr_generic_signature()); + MarkSweep::mark_and_push(ik->adr_bootstrap_method()); MarkSweep::mark_and_push(ik->adr_class_annotations()); MarkSweep::mark_and_push(ik->adr_fields_annotations()); MarkSweep::mark_and_push(ik->adr_methods_annotations()); @@ -124,6 +125,7 @@ PSParallelCompact::mark_and_push(cm, ik->adr_host_klass()); PSParallelCompact::mark_and_push(cm, ik->adr_signers()); PSParallelCompact::mark_and_push(cm, ik->adr_generic_signature()); + PSParallelCompact::mark_and_push(cm, ik->adr_bootstrap_method()); PSParallelCompact::mark_and_push(cm, ik->adr_class_annotations()); PSParallelCompact::mark_and_push(cm, ik->adr_fields_annotations()); PSParallelCompact::mark_and_push(cm, ik->adr_methods_annotations()); @@ -170,6 +172,7 @@ blk->do_oop(&ik->adr_implementors()[i]); } blk->do_oop(ik->adr_generic_signature()); + blk->do_oop(ik->adr_bootstrap_method()); blk->do_oop(ik->adr_class_annotations()); blk->do_oop(ik->adr_fields_annotations()); blk->do_oop(ik->adr_methods_annotations()); @@ -230,6 +233,8 @@ } adr = ik->adr_generic_signature(); if (mr.contains(adr)) blk->do_oop(adr); + adr = ik->adr_bootstrap_method(); + if (mr.contains(adr)) blk->do_oop(adr); adr = ik->adr_class_annotations(); if (mr.contains(adr)) blk->do_oop(adr); adr = ik->adr_fields_annotations(); @@ -274,6 +279,7 @@ MarkSweep::adjust_pointer(&ik->adr_implementors()[i]); } MarkSweep::adjust_pointer(ik->adr_generic_signature()); + MarkSweep::adjust_pointer(ik->adr_bootstrap_method()); MarkSweep::adjust_pointer(ik->adr_class_annotations()); MarkSweep::adjust_pointer(ik->adr_fields_annotations()); MarkSweep::adjust_pointer(ik->adr_methods_annotations()); @@ -454,6 +460,7 @@ ik->set_breakpoints(NULL); ik->init_previous_versions(); ik->set_generic_signature(NULL); + ik->set_bootstrap_method(NULL); ik->release_set_methods_jmethod_ids(NULL); ik->release_set_methods_cached_itable_indices(NULL); ik->set_class_annotations(NULL); @@ -578,6 +585,11 @@ } // pvw is cleaned up } // rm is cleaned up + if (ik->bootstrap_method() != NULL) { + st->print(BULLET"bootstrap method: "); + ik->bootstrap_method()->print_value_on(st); + st->cr(); + } if (ik->generic_signature() != NULL) { st->print(BULLET"generic signature: "); ik->generic_signature()->print_value_on(st); diff -r 5d4dd2f5f6a1 -r 7a485bc4da16 src/share/vm/oops/methodDataOop.cpp --- a/src/share/vm/oops/methodDataOop.cpp Fri Apr 17 15:50:12 2009 +0100 +++ b/src/share/vm/oops/methodDataOop.cpp Thu May 07 10:30:17 2009 -0700 @@ -442,6 +442,8 @@ case Bytecodes::_invokevirtual: case Bytecodes::_invokeinterface: return VirtualCallData::static_cell_count(); + case Bytecodes::_invokedynamic: + return CounterData::static_cell_count(); case Bytecodes::_ret: return RetData::static_cell_count(); case Bytecodes::_ifeq: @@ -570,6 +572,11 @@ cell_count = VirtualCallData::static_cell_count(); tag = DataLayout::virtual_call_data_tag; break; + case Bytecodes::_invokedynamic: + // %%% should make a type profile for any invokedynamic that takes a ref argument + cell_count = CounterData::static_cell_count(); + tag = DataLayout::counter_data_tag; + break; case Bytecodes::_ret: cell_count = RetData::static_cell_count(); tag = DataLayout::ret_data_tag; diff -r 5d4dd2f5f6a1 -r 7a485bc4da16 src/share/vm/oops/methodKlass.cpp --- a/src/share/vm/oops/methodKlass.cpp Fri Apr 17 15:50:12 2009 +0100 +++ b/src/share/vm/oops/methodKlass.cpp Thu May 07 10:30:17 2009 -0700 @@ -298,7 +298,11 @@ m->code()->print_value_on(st); st->cr(); } - if (m->is_native()) { + if (m->is_method_handle_invoke()) { + st->print_cr(" - invoke method type: " INTPTR_FORMAT, (address) m->method_handle_type()); + // m is classified as native, but it does not have an interesting + // native_function or signature handler + } else if (m->is_native()) { st->print_cr(" - native function: " INTPTR_FORMAT, m->native_function()); st->print_cr(" - signature handler: " INTPTR_FORMAT, m->signature_handler()); } diff -r 5d4dd2f5f6a1 -r 7a485bc4da16 src/share/vm/oops/methodOop.cpp --- a/src/share/vm/oops/methodOop.cpp Fri Apr 17 15:50:12 2009 +0100 +++ b/src/share/vm/oops/methodOop.cpp Thu May 07 10:30:17 2009 -0700 @@ -161,7 +161,7 @@ int methodOopDesc::bci_from(address bcp) const { - assert(is_native() && bcp == code_base() || contains(bcp), "bcp doesn't belong to this method"); + assert(is_native() && bcp == code_base() || contains(bcp) || is_error_reported(), "bcp doesn't belong to this method"); return bcp - code_base(); } @@ -304,6 +304,12 @@ } +int methodOopDesc::extra_stack_words() { + // not an inline function, to avoid a header dependency on Interpreter + return extra_stack_entries() * Interpreter::stackElementSize(); +} + + void methodOopDesc::compute_size_of_parameters(Thread *thread) { symbolHandle h_signature(thread, signature()); ArgumentSizeComputer asc(h_signature); @@ -564,6 +570,11 @@ bool methodOopDesc::is_not_compilable(int comp_level) const { + if (is_method_handle_invoke()) { + // compilers must recognize this method specially, or not at all + return true; + } + methodDataOop mdo = method_data(); if (mdo != NULL && (uint)mdo->decompile_count() > (uint)PerMethodRecompilationCutoff) { @@ -651,7 +662,7 @@ assert(entry != NULL, "interpreter entry must be non-null"); // Sets both _i2i_entry and _from_interpreted_entry set_interpreter_entry(entry); - if (is_native()) { + if (is_native() && !is_method_handle_invoke()) { set_native_function( SharedRuntime::native_method_throw_unsatisfied_link_error_entry(), !native_bind_event_is_interesting); @@ -783,6 +794,100 @@ return false; } +// Constant pool structure for invoke methods: +enum { + _imcp_invoke_name = 1, // utf8: 'invoke' + _imcp_invoke_signature, // utf8: (variable symbolOop) + _imcp_method_type_value, // string: (variable java/dyn/MethodType, sic) + _imcp_limit +}; + +oop methodOopDesc::method_handle_type() const { + if (!is_method_handle_invoke()) { assert(false, "caller resp."); return NULL; } + oop mt = constants()->resolved_string_at(_imcp_method_type_value); + assert(mt->klass() == SystemDictionary::MethodType_klass(), ""); + return mt; +} + +jint* methodOopDesc::method_type_offsets_chain() { + static jint pchase[] = { -1, -1, -1 }; + if (pchase[0] == -1) { + jint step0 = in_bytes(constants_offset()); + jint step1 = (constantPoolOopDesc::header_size() + _imcp_method_type_value) * HeapWordSize; + // do this in reverse to avoid races: + OrderAccess::release_store(&pchase[1], step1); + OrderAccess::release_store(&pchase[0], step0); + } + return pchase; +} + +methodHandle methodOopDesc::make_invoke_method(KlassHandle holder, + symbolHandle signature, + Handle method_type, TRAPS) { + methodHandle empty; + + assert(holder() == SystemDictionary::MethodHandle_klass(), + "must be a JSR 292 magic type"); + + if (TraceMethodHandles) { + tty->print("Creating invoke method for "); + signature->print_value(); + tty->cr(); + } + + constantPoolHandle cp; + { + constantPoolOop cp_oop = oopFactory::new_constantPool(_imcp_limit, IsSafeConc, CHECK_(empty)); + cp = constantPoolHandle(THREAD, cp_oop); + } + cp->symbol_at_put(_imcp_invoke_name, vmSymbols::invoke_name()); + cp->symbol_at_put(_imcp_invoke_signature, signature()); + cp->string_at_put(_imcp_method_type_value, vmSymbols::void_signature()); + cp->set_pool_holder(holder()); + + // set up the fancy stuff: + cp->pseudo_string_at_put(_imcp_method_type_value, method_type()); + methodHandle m; + { + int flags_bits = (JVM_MH_INVOKE_BITS | JVM_ACC_PUBLIC | JVM_ACC_FINAL); + methodOop m_oop = oopFactory::new_method(0, accessFlags_from(flags_bits), + 0, 0, 0, IsSafeConc, CHECK_(empty)); + m = methodHandle(THREAD, m_oop); + } + m->set_constants(cp()); + m->set_name_index(_imcp_invoke_name); + m->set_signature_index(_imcp_invoke_signature); + assert(m->name() == vmSymbols::invoke_name(), ""); + assert(m->signature() == signature(), ""); +#ifdef CC_INTERP + ResultTypeFinder rtf(signature()); + m->set_result_index(rtf.type()); +#endif + m->compute_size_of_parameters(THREAD); + m->set_exception_table(Universe::the_empty_int_array()); + + // Finally, set up its entry points. + assert(m->method_handle_type() == method_type(), ""); + assert(m->can_be_statically_bound(), ""); + m->set_vtable_index(methodOopDesc::nonvirtual_vtable_index); + m->link_method(m, CHECK_(empty)); + +#ifdef ASSERT + // Make sure the pointer chase works. + address p = (address) m(); + for (jint* pchase = method_type_offsets_chain(); (*pchase) != -1; pchase++) { + p = *(address*)(p + (*pchase)); + } + assert((oop)p == method_type(), "pointer chase is correct"); +#endif + + if (TraceMethodHandles) + m->print_on(tty); + + return m; +} + + methodHandle methodOopDesc:: clone_with_new_data(methodHandle m, u_char* new_code, int new_code_length, u_char* new_compressed_linenumber_table, int new_compressed_linenumber_size, TRAPS) { diff -r 5d4dd2f5f6a1 -r 7a485bc4da16 src/share/vm/oops/methodOop.hpp --- a/src/share/vm/oops/methodOop.hpp Fri Apr 17 15:50:12 2009 +0100 +++ b/src/share/vm/oops/methodOop.hpp Thu May 07 10:30:17 2009 -0700 @@ -320,6 +320,7 @@ enum VtableIndexFlag { // Valid vtable indexes are non-negative (>= 0). // These few negative values are used as sentinels. + highest_unused_vtable_index_value = -5, invalid_vtable_index = -4, // distinct from any valid vtable index garbage_vtable_index = -3, // not yet linked; no vtable layout yet nonvirtual_vtable_index = -2 // there is no need for vtable dispatch @@ -523,6 +524,21 @@ // Reflection support bool is_overridden_in(klassOop k) const; + // JSR 292 support + bool is_method_handle_invoke() const { return access_flags().is_method_handle_invoke(); } + static methodHandle make_invoke_method(KlassHandle holder, + symbolHandle signature, + Handle method_type, + TRAPS); + // these operate only on invoke methods: + oop method_handle_type() const; + static jint* method_type_offsets_chain(); // series of pointer-offsets, terminated by -1 + // presize interpreter frames for extra interpreter stack entries, if needed + // method handles want to be able to push a few extra values (e.g., a bound receiver), and + // invokedynamic sometimes needs to push a bootstrap method, call site, and arglist, + // all without checking for a stack overflow + static int extra_stack_entries() { return (EnableMethodHandles ? (int)MethodHandlePushLimit : 0) + (EnableInvokeDynamic ? 3 : 0); } + static int extra_stack_words(); // = extra_stack_entries() * Interpreter::stackElementSize() // RedefineClasses() support: bool is_old() const { return access_flags().is_old(); } void set_is_old() { _access_flags.set_is_old(); } diff -r 5d4dd2f5f6a1 -r 7a485bc4da16 src/share/vm/oops/oop.hpp --- a/src/share/vm/oops/oop.hpp Fri Apr 17 15:50:12 2009 +0100 +++ b/src/share/vm/oops/oop.hpp Thu May 07 10:30:17 2009 -0700 @@ -263,6 +263,9 @@ jdouble double_field_acquire(int offset) const; void release_double_field_put(int offset, jdouble contents); + address address_field_acquire(int offset) const; + void release_address_field_put(int offset, address contents); + // printing functions for VM debugging void print_on(outputStream* st) const; // First level print void print_value_on(outputStream* st) const; // Second level print. diff -r 5d4dd2f5f6a1 -r 7a485bc4da16 src/share/vm/oops/oop.inline.hpp --- a/src/share/vm/oops/oop.inline.hpp Fri Apr 17 15:50:12 2009 +0100 +++ b/src/share/vm/oops/oop.inline.hpp Thu May 07 10:30:17 2009 -0700 @@ -349,6 +349,9 @@ inline jdouble oopDesc::double_field_acquire(int offset) const { return OrderAccess::load_acquire(double_field_addr(offset)); } inline void oopDesc::release_double_field_put(int offset, jdouble contents) { OrderAccess::release_store(double_field_addr(offset), contents); } +inline address oopDesc::address_field_acquire(int offset) const { return (address) OrderAccess::load_ptr_acquire(address_field_addr(offset)); } +inline void oopDesc::release_address_field_put(int offset, address contents) { OrderAccess::release_store_ptr(address_field_addr(offset), contents); } + inline int oopDesc::size_given_klass(Klass* klass) { int lh = klass->layout_helper(); int s = lh >> LogHeapWordSize; // deliver size scaled by wordSize diff -r 5d4dd2f5f6a1 -r 7a485bc4da16 src/share/vm/opto/buildOopMap.cpp --- a/src/share/vm/opto/buildOopMap.cpp Fri Apr 17 15:50:12 2009 +0100 +++ b/src/share/vm/opto/buildOopMap.cpp Thu May 07 10:30:17 2009 -0700 @@ -363,6 +363,20 @@ */ #endif +#ifdef ASSERT + for( OopMapStream oms1(omap, OopMapValue::derived_oop_value); !oms1.is_done(); oms1.next()) { + OopMapValue omv1 = oms1.current(); + bool found = false; + for( OopMapStream oms2(omap,OopMapValue::oop_value); !oms2.is_done(); oms2.next()) { + if( omv1.content_reg() == oms2.current().reg() ) { + found = true; + break; + } + } + assert( found, "derived with no base in oopmap" ); + } +#endif + return omap; } diff -r 5d4dd2f5f6a1 -r 7a485bc4da16 src/share/vm/opto/bytecodeInfo.cpp --- a/src/share/vm/opto/bytecodeInfo.cpp Fri Apr 17 15:50:12 2009 +0100 +++ b/src/share/vm/opto/bytecodeInfo.cpp Thu May 07 10:30:17 2009 -0700 @@ -321,7 +321,7 @@ // stricter than callee_holder->is_initialized() ciBytecodeStream iter(caller_method); iter.force_bci(caller_bci); - int index = iter.get_index_big(); + int index = iter.get_index_int(); if( !caller_method->is_klass_loaded(index, true) ) { return false; } diff -r 5d4dd2f5f6a1 -r 7a485bc4da16 src/share/vm/opto/callnode.cpp --- a/src/share/vm/opto/callnode.cpp Fri Apr 17 15:50:12 2009 +0100 +++ b/src/share/vm/opto/callnode.cpp Thu May 07 10:30:17 2009 -0700 @@ -1043,6 +1043,51 @@ //============================================================================= uint AllocateArrayNode::size_of() const { return sizeof(*this); } +Node* AllocateArrayNode::Ideal(PhaseGVN *phase, bool can_reshape) { + if (remove_dead_region(phase, can_reshape)) return this; + + const Type* type = phase->type(Ideal_length()); + if (type->isa_int() && type->is_int()->_hi < 0) { + if (can_reshape) { + PhaseIterGVN *igvn = phase->is_IterGVN(); + // Unreachable fall through path (negative array length), + // the allocation can only throw so disconnect it. + Node* proj = proj_out(TypeFunc::Control); + Node* catchproj = NULL; + if (proj != NULL) { + for (DUIterator_Fast imax, i = proj->fast_outs(imax); i < imax; i++) { + Node *cn = proj->fast_out(i); + if (cn->is_Catch()) { + catchproj = cn->as_Multi()->proj_out(CatchProjNode::fall_through_index); + break; + } + } + } + if (catchproj != NULL && catchproj->outcnt() > 0 && + (catchproj->outcnt() > 1 || + catchproj->unique_out()->Opcode() != Op_Halt)) { + assert(catchproj->is_CatchProj(), "must be a CatchProjNode"); + Node* nproj = catchproj->clone(); + igvn->register_new_node_with_optimizer(nproj); + + Node *frame = new (phase->C, 1) ParmNode( phase->C->start(), TypeFunc::FramePtr ); + frame = phase->transform(frame); + // Halt & Catch Fire + Node *halt = new (phase->C, TypeFunc::Parms) HaltNode( nproj, frame ); + phase->C->root()->add_req(halt); + phase->transform(halt); + + igvn->replace_node(catchproj, phase->C->top()); + return this; + } + } else { + // Can't correct it during regular GVN so register for IGVN + phase->C->record_for_igvn(this); + } + } + return NULL; +} + // Retrieve the length from the AllocateArrayNode. Narrow the type with a // CastII, if appropriate. If we are not allowed to create new nodes, and // a CastII is appropriate, return NULL. diff -r 5d4dd2f5f6a1 -r 7a485bc4da16 src/share/vm/opto/callnode.hpp --- a/src/share/vm/opto/callnode.hpp Fri Apr 17 15:50:12 2009 +0100 +++ b/src/share/vm/opto/callnode.hpp Thu May 07 10:30:17 2009 -0700 @@ -762,6 +762,7 @@ } virtual int Opcode() const; virtual uint size_of() const; // Size is bigger + virtual Node *Ideal(PhaseGVN *phase, bool can_reshape); // Dig the length operand out of a array allocation site. Node* Ideal_length() { diff -r 5d4dd2f5f6a1 -r 7a485bc4da16 src/share/vm/opto/chaitin.cpp --- a/src/share/vm/opto/chaitin.cpp Fri Apr 17 15:50:12 2009 +0100 +++ b/src/share/vm/opto/chaitin.cpp Thu May 07 10:30:17 2009 -0700 @@ -1423,17 +1423,33 @@ // pointers derived from NULL! These are always along paths that // can't happen at run-time but the optimizer cannot deduce it so // we have to handle it gracefully. + assert(!derived->bottom_type()->isa_narrowoop() || + derived->bottom_type()->make_ptr()->is_ptr()->_offset == 0, "sanity"); const TypePtr *tj = derived->bottom_type()->isa_ptr(); // If its an OOP with a non-zero offset, then it is derived. - if( tj->_offset == 0 ) { + if( tj == NULL || tj->_offset == 0 ) { derived_base_map[derived->_idx] = derived; return derived; } // Derived is NULL+offset? Base is NULL! if( derived->is_Con() ) { - Node *base = new (C, 1) ConPNode( TypePtr::NULL_PTR ); - uint no_lidx = 0; // an unmatched constant in debug info has no LRG - _names.extend(base->_idx, no_lidx); + Node *base = _matcher.mach_null(); + assert(base != NULL, "sanity"); + if (base->in(0) == NULL) { + // Initialize it once and make it shared: + // set control to _root and place it into Start block + // (where top() node is placed). + base->init_req(0, _cfg._root); + Block *startb = _cfg._bbs[C->top()->_idx]; + startb->_nodes.insert(startb->find_node(C->top()), base ); + _cfg._bbs.map( base->_idx, startb ); + assert (n2lidx(base) == 0, "should not have LRG yet"); + } + if (n2lidx(base) == 0) { + new_lrg(base, maxlrg++); + } + assert(base->in(0) == _cfg._root && + _cfg._bbs[base->_idx] == _cfg._bbs[C->top()->_idx], "base NULL should be shared"); derived_base_map[derived->_idx] = base; return base; } @@ -1460,9 +1476,13 @@ } // Now we see we need a base-Phi here to merge the bases - base = new (C, derived->req()) PhiNode( derived->in(0), base->bottom_type() ); - for( i = 1; i < derived->req(); i++ ) + const Type *t = base->bottom_type(); + base = new (C, derived->req()) PhiNode( derived->in(0), t ); + for( i = 1; i < derived->req(); i++ ) { base->init_req(i, find_base_for_derived(derived_base_map, derived->in(i), maxlrg)); + t = t->meet(base->in(i)->bottom_type()); + } + base->as_Phi()->set_type(t); // Search the current block for an existing base-Phi Block *b = _cfg._bbs[derived->_idx]; @@ -1560,6 +1580,8 @@ // This works because we are still in SSA during this call. Node *derived = lrgs(neighbor)._def; const TypePtr *tj = derived->bottom_type()->isa_ptr(); + assert(!derived->bottom_type()->isa_narrowoop() || + derived->bottom_type()->make_ptr()->is_ptr()->_offset == 0, "sanity"); // If its an OOP with a non-zero offset, then it is derived. if( tj && tj->_offset != 0 && tj->isa_oop_ptr() ) { Node *base = find_base_for_derived( derived_base_map, derived, maxlrg ); diff -r 5d4dd2f5f6a1 -r 7a485bc4da16 src/share/vm/opto/doCall.cpp --- a/src/share/vm/opto/doCall.cpp Fri Apr 17 15:50:12 2009 +0100 +++ b/src/share/vm/opto/doCall.cpp Thu May 07 10:30:17 2009 -0700 @@ -248,6 +248,14 @@ holder_klass); return true; } + if (dest_method->is_method_handle_invoke() + && holder_klass->name() == ciSymbol::java_dyn_Dynamic()) { + // FIXME: NYI + uncommon_trap(Deoptimization::Reason_unhandled, + Deoptimization::Action_none, + holder_klass); + return true; + } assert(dest_method->will_link(method()->holder(), klass, bc()), "dest_method: typeflow responsibility"); return false; @@ -748,6 +756,7 @@ case Bytecodes::_invokevirtual: increment_counter(SharedRuntime::nof_inlined_calls_addr()); break; case Bytecodes::_invokeinterface: increment_counter(SharedRuntime::nof_inlined_interface_calls_addr()); break; case Bytecodes::_invokestatic: + case Bytecodes::_invokedynamic: case Bytecodes::_invokespecial: increment_counter(SharedRuntime::nof_inlined_static_calls_addr()); break; default: fatal("unexpected call bytecode"); } @@ -756,6 +765,7 @@ case Bytecodes::_invokevirtual: increment_counter(SharedRuntime::nof_normal_calls_addr()); break; case Bytecodes::_invokeinterface: increment_counter(SharedRuntime::nof_interface_calls_addr()); break; case Bytecodes::_invokestatic: + case Bytecodes::_invokedynamic: case Bytecodes::_invokespecial: increment_counter(SharedRuntime::nof_static_calls_addr()); break; default: fatal("unexpected call bytecode"); } diff -r 5d4dd2f5f6a1 -r 7a485bc4da16 src/share/vm/opto/graphKit.cpp --- a/src/share/vm/opto/graphKit.cpp Fri Apr 17 15:50:12 2009 +0100 +++ b/src/share/vm/opto/graphKit.cpp Thu May 07 10:30:17 2009 -0700 @@ -947,6 +947,7 @@ case Bytecodes::_invokevirtual: case Bytecodes::_invokespecial: case Bytecodes::_invokestatic: + case Bytecodes::_invokedynamic: case Bytecodes::_invokeinterface: { bool is_static = (depth == 0); @@ -2979,6 +2980,7 @@ // See comments on new_instance for the meaning of the other arguments. Node* GraphKit::new_array(Node* klass_node, // array klass (maybe variable) Node* length, // number of array elements + int nargs, // number of arguments to push back for uncommon trap bool raw_mem_only, // affect only raw memory Node* *return_size_val) { jint layout_con = Klass::_lh_neutral_value; @@ -2994,6 +2996,7 @@ Node* cmp_lh = _gvn.transform( new(C, 3) CmpINode(layout_val, intcon(layout_con)) ); Node* bol_lh = _gvn.transform( new(C, 2) BoolNode(cmp_lh, BoolTest::eq) ); { BuildCutout unless(this, bol_lh, PROB_MAX); + _sp += nargs; uncommon_trap(Deoptimization::Reason_class_check, Deoptimization::Action_maybe_recompile); } diff -r 5d4dd2f5f6a1 -r 7a485bc4da16 src/share/vm/opto/graphKit.hpp --- a/src/share/vm/opto/graphKit.hpp Fri Apr 17 15:50:12 2009 +0100 +++ b/src/share/vm/opto/graphKit.hpp Thu May 07 10:30:17 2009 -0700 @@ -699,7 +699,7 @@ Node* slow_test = NULL, bool raw_mem_only = false, Node* *return_size_val = NULL); - Node* new_array(Node* klass_node, Node* count_val, + Node* new_array(Node* klass_node, Node* count_val, int nargs, bool raw_mem_only = false, Node* *return_size_val = NULL); // Handy for making control flow diff -r 5d4dd2f5f6a1 -r 7a485bc4da16 src/share/vm/opto/library_call.cpp --- a/src/share/vm/opto/library_call.cpp Fri Apr 17 15:50:12 2009 +0100 +++ b/src/share/vm/opto/library_call.cpp Thu May 07 10:30:17 2009 -0700 @@ -3055,9 +3055,7 @@ // Normal case: The array type has been cached in the java.lang.Class. // The following call works fine even if the array type is polymorphic. // It could be a dynamic mix of int[], boolean[], Object[], etc. - _sp += nargs; // set original stack for use by uncommon_trap - Node* obj = new_array(klass_node, count_val); - _sp -= nargs; + Node* obj = new_array(klass_node, count_val, nargs); result_reg->init_req(_normal_path, control()); result_val->init_req(_normal_path, obj); result_io ->init_req(_normal_path, i_o()); @@ -3179,9 +3177,7 @@ Node* orig_tail = _gvn.transform( new(C, 3) SubINode(orig_length, start) ); Node* moved = generate_min_max(vmIntrinsics::_min, orig_tail, length); - _sp += nargs; // set original stack for use by uncommon_trap - Node* newcopy = new_array(klass_node, length); - _sp -= nargs; + Node* newcopy = new_array(klass_node, length, nargs); // Generate a direct call to the right arraycopy function(s). // We know the copy is disjoint but we might not know if the @@ -3903,10 +3899,8 @@ set_control(array_ctl); Node* obj_length = load_array_length(obj); Node* obj_size = NULL; - _sp += nargs; // set original stack for use by uncommon_trap - Node* alloc_obj = new_array(obj_klass, obj_length, + Node* alloc_obj = new_array(obj_klass, obj_length, nargs, raw_mem_only, &obj_size); - _sp -= nargs; assert(obj_size != NULL, ""); Node* raw_obj = alloc_obj->in(1); assert(raw_obj->is_Proj() && raw_obj->in(0)->is_Allocate(), ""); diff -r 5d4dd2f5f6a1 -r 7a485bc4da16 src/share/vm/opto/macro.cpp --- a/src/share/vm/opto/macro.cpp Fri Apr 17 15:50:12 2009 +0100 +++ b/src/share/vm/opto/macro.cpp Thu May 07 10:30:17 2009 -0700 @@ -806,8 +806,7 @@ } } else if (use->is_AddP()) { // raw memory addresses used only by the initialization - _igvn.hash_delete(use); - _igvn.subsume_node(use, C->top()); + _igvn.replace_node(use, C->top()); } else { assert(false, "only Initialize or AddP expected"); } @@ -1291,8 +1290,7 @@ if (_fallthroughcatchproj != NULL) { ctrl = _fallthroughcatchproj->clone(); transform_later(ctrl); - _igvn.hash_delete(_fallthroughcatchproj); - _igvn.subsume_node(_fallthroughcatchproj, result_region); + _igvn.replace_node(_fallthroughcatchproj, result_region); } else { ctrl = top(); } @@ -1303,8 +1301,7 @@ } else { slow_result = _resproj->clone(); transform_later(slow_result); - _igvn.hash_delete(_resproj); - _igvn.subsume_node(_resproj, result_phi_rawoop); + _igvn.replace_node(_resproj, result_phi_rawoop); } // Plug slow-path into result merge point @@ -1613,18 +1610,15 @@ assert(membar != NULL && membar->Opcode() == Op_MemBarAcquire, ""); Node* ctrlproj = membar->proj_out(TypeFunc::Control); Node* memproj = membar->proj_out(TypeFunc::Memory); - _igvn.hash_delete(ctrlproj); - _igvn.subsume_node(ctrlproj, fallthroughproj); - _igvn.hash_delete(memproj); - _igvn.subsume_node(memproj, memproj_fallthrough); + _igvn.replace_node(ctrlproj, fallthroughproj); + _igvn.replace_node(memproj, memproj_fallthrough); // Delete FastLock node also if this Lock node is unique user // (a loop peeling may clone a Lock node). Node* flock = alock->as_Lock()->fastlock_node(); if (flock->outcnt() == 1) { assert(flock->unique_out() == alock, "sanity"); - _igvn.hash_delete(flock); - _igvn.subsume_node(flock, top()); + _igvn.replace_node(flock, top()); } } @@ -1634,20 +1628,16 @@ MemBarNode* membar = ctrl->in(0)->as_MemBar(); assert(membar->Opcode() == Op_MemBarRelease && mem->is_Proj() && membar == mem->in(0), ""); - _igvn.hash_delete(fallthroughproj); - _igvn.subsume_node(fallthroughproj, ctrl); - _igvn.hash_delete(memproj_fallthrough); - _igvn.subsume_node(memproj_fallthrough, mem); + _igvn.replace_node(fallthroughproj, ctrl); + _igvn.replace_node(memproj_fallthrough, mem); fallthroughproj = ctrl; memproj_fallthrough = mem; ctrl = membar->in(TypeFunc::Control); mem = membar->in(TypeFunc::Memory); } - _igvn.hash_delete(fallthroughproj); - _igvn.subsume_node(fallthroughproj, ctrl); - _igvn.hash_delete(memproj_fallthrough); - _igvn.subsume_node(memproj_fallthrough, mem); + _igvn.replace_node(fallthroughproj, ctrl); + _igvn.replace_node(memproj_fallthrough, mem); return true; } @@ -1879,13 +1869,12 @@ region->init_req(1, slow_ctrl); // region inputs are now complete transform_later(region); - _igvn.subsume_node(_fallthroughproj, region); + _igvn.replace_node(_fallthroughproj, region); Node *memproj = transform_later( new(C, 1) ProjNode(call, TypeFunc::Memory) ); mem_phi->init_req(1, memproj ); transform_later(mem_phi); - _igvn.hash_delete(_memproj_fallthrough); - _igvn.subsume_node(_memproj_fallthrough, mem_phi); + _igvn.replace_node(_memproj_fallthrough, mem_phi); } //------------------------------expand_unlock_node---------------------- @@ -1943,14 +1932,13 @@ region->init_req(1, slow_ctrl); // region inputs are now complete transform_later(region); - _igvn.subsume_node(_fallthroughproj, region); + _igvn.replace_node(_fallthroughproj, region); Node *memproj = transform_later( new(C, 1) ProjNode(call, TypeFunc::Memory) ); mem_phi->init_req(1, memproj ); mem_phi->init_req(2, mem); transform_later(mem_phi); - _igvn.hash_delete(_memproj_fallthrough); - _igvn.subsume_node(_memproj_fallthrough, mem_phi); + _igvn.replace_node(_memproj_fallthrough, mem_phi); } //------------------------------expand_macro_nodes---------------------- @@ -1969,9 +1957,7 @@ if (n->is_AbstractLock()) { success = eliminate_locking_node(n->as_AbstractLock()); } else if (n->Opcode() == Op_Opaque1 || n->Opcode() == Op_Opaque2) { - _igvn.add_users_to_worklist(n); - _igvn.hash_delete(n); - _igvn.subsume_node(n, n->in(1)); + _igvn.replace_node(n, n->in(1)); success = true; } assert(success == (C->macro_count() < old_macro_count), "elimination reduces macro count"); diff -r 5d4dd2f5f6a1 -r 7a485bc4da16 src/share/vm/opto/matcher.cpp --- a/src/share/vm/opto/matcher.cpp Fri Apr 17 15:50:12 2009 +0100 +++ b/src/share/vm/opto/matcher.cpp Thu May 07 10:30:17 2009 -0700 @@ -275,6 +275,12 @@ C->print_method("Before Matching"); + // Create new ideal node ConP #NULL even if it does exist in old space + // to avoid false sharing if the corresponding mach node is not used. + // The corresponding mach node is only used in rare cases for derived + // pointers. + Node* new_ideal_null = ConNode::make(C, TypePtr::NULL_PTR); + // Swap out to old-space; emptying new-space Arena *old = C->node_arena()->move_contents(C->old_arena()); @@ -316,7 +322,16 @@ } } + // Generate new mach node for ConP #NULL + assert(new_ideal_null != NULL, "sanity"); + _mach_null = match_tree(new_ideal_null); + // Don't set control, it will confuse GCM since there are no uses. + // The control will be set when this node is used first time + // in find_base_for_derived(). + assert(_mach_null != NULL, ""); + C->set_root(xroot->is_Root() ? xroot->as_Root() : NULL); + #ifdef ASSERT verify_new_nodes_only(xroot); #endif diff -r 5d4dd2f5f6a1 -r 7a485bc4da16 src/share/vm/opto/matcher.hpp --- a/src/share/vm/opto/matcher.hpp Fri Apr 17 15:50:12 2009 +0100 +++ b/src/share/vm/opto/matcher.hpp Thu May 07 10:30:17 2009 -0700 @@ -109,6 +109,9 @@ Node* _mem_node; // Ideal memory node consumed by mach node #endif + // Mach node for ConP #NULL + MachNode* _mach_null; + public: int LabelRootDepth; static const int base2reg[]; // Map Types to machine register types @@ -122,6 +125,8 @@ static RegMask mreg2regmask[]; static RegMask STACK_ONLY_mask; + MachNode* mach_null() const { return _mach_null; } + bool is_shared( Node *n ) { return _shared.test(n->_idx) != 0; } void set_shared( Node *n ) { _shared.set(n->_idx); } bool is_visited( Node *n ) { return _visited.test(n->_idx) != 0; } diff -r 5d4dd2f5f6a1 -r 7a485bc4da16 src/share/vm/opto/memnode.cpp --- a/src/share/vm/opto/memnode.cpp Fri Apr 17 15:50:12 2009 +0100 +++ b/src/share/vm/opto/memnode.cpp Thu May 07 10:30:17 2009 -0700 @@ -218,6 +218,26 @@ // Don't bother trying to transform a dead node if( ctl && ctl->is_top() ) return NodeSentinel; + PhaseIterGVN *igvn = phase->is_IterGVN(); + // Wait if control on the worklist. + if (ctl && can_reshape && igvn != NULL) { + Node* bol = NULL; + Node* cmp = NULL; + if (ctl->in(0)->is_If()) { + assert(ctl->is_IfTrue() || ctl->is_IfFalse(), "sanity"); + bol = ctl->in(0)->in(1); + if (bol->is_Bool()) + cmp = ctl->in(0)->in(1)->in(1); + } + if (igvn->_worklist.member(ctl) || + (bol != NULL && igvn->_worklist.member(bol)) || + (cmp != NULL && igvn->_worklist.member(cmp)) ) { + // This control path may be dead. + // Delay this memory node transformation until the control is processed. + phase->is_IterGVN()->_worklist.push(this); + return NodeSentinel; // caller will return NULL + } + } // Ignore if memory is dead, or self-loop Node *mem = in(MemNode::Memory); if( phase->type( mem ) == Type::TOP ) return NodeSentinel; // caller will return NULL @@ -227,14 +247,22 @@ const Type *t_adr = phase->type( address ); if( t_adr == Type::TOP ) return NodeSentinel; // caller will return NULL - PhaseIterGVN *igvn = phase->is_IterGVN(); - if( can_reshape && igvn != NULL && igvn->_worklist.member(address) ) { + if( can_reshape && igvn != NULL && + (igvn->_worklist.member(address) || phase->type(address) != adr_type()) ) { // The address's base and type may change when the address is processed. // Delay this mem node transformation until the address is processed. phase->is_IterGVN()->_worklist.push(this); return NodeSentinel; // caller will return NULL } +#ifdef ASSERT + Node* base = NULL; + if (address->is_AddP()) + base = address->in(AddPNode::Base); + assert(base == NULL || t_adr->isa_rawptr() || + !phase->type(base)->higher_equal(TypePtr::NULL_PTR), "NULL+offs not RAW address?"); +#endif + // Avoid independent memory operations Node* old_mem = mem; @@ -1307,22 +1335,20 @@ set_req(MemNode::Control,ctrl); } - // Check for useless control edge in some common special cases - if (in(MemNode::Control) != NULL) { - intptr_t ignore = 0; - Node* base = AddPNode::Ideal_base_and_offset(address, phase, ignore); - if (base != NULL + intptr_t ignore = 0; + Node* base = AddPNode::Ideal_base_and_offset(address, phase, ignore); + if (base != NULL + && phase->C->get_alias_index(phase->type(address)->is_ptr()) != Compile::AliasIdxRaw) { + // Check for useless control edge in some common special cases + if (in(MemNode::Control) != NULL && phase->type(base)->higher_equal(TypePtr::NOTNULL) - && phase->C->get_alias_index(phase->type(address)->is_ptr()) != Compile::AliasIdxRaw && all_controls_dominate(base, phase->C->start())) { // A method-invariant, non-null address (constant or 'this' argument). set_req(MemNode::Control, NULL); } - } - - if (EliminateAutoBox && can_reshape && in(Address)->is_AddP()) { - Node* base = in(Address)->in(AddPNode::Base); - if (base != NULL) { + + if (EliminateAutoBox && can_reshape) { + assert(!phase->type(base)->higher_equal(TypePtr::NULL_PTR), "the autobox pointer should be non-null"); Compile::AliasType* atp = phase->C->alias_type(adr_type()); if (is_autobox_object(atp)) { Node* result = eliminate_autobox(phase); @@ -1455,10 +1481,11 @@ jt = _type; } - if (EliminateAutoBox) { + if (EliminateAutoBox && adr->is_AddP()) { // The pointers in the autobox arrays are always non-null - Node* base = in(Address)->in(AddPNode::Base); - if (base != NULL) { + Node* base = adr->in(AddPNode::Base); + if (base != NULL && + !phase->type(base)->higher_equal(TypePtr::NULL_PTR)) { Compile::AliasType* atp = phase->C->alias_type(base->adr_type()); if (is_autobox_cache(atp)) { return jt->join(TypePtr::NOTNULL)->is_ptr(); diff -r 5d4dd2f5f6a1 -r 7a485bc4da16 src/share/vm/opto/output.cpp --- a/src/share/vm/opto/output.cpp Fri Apr 17 15:50:12 2009 +0100 +++ b/src/share/vm/opto/output.cpp Thu May 07 10:30:17 2009 -0700 @@ -2256,7 +2256,8 @@ // bother scheduling them. Node *last = bb->_nodes[_bb_end]; if( last->is_Catch() || - (last->is_Mach() && last->as_Mach()->ideal_Opcode() == Op_Halt) ) { + // Exclude unreachable path case when Halt node is in a separate block. + (_bb_end > 1 && last->is_Mach() && last->as_Mach()->ideal_Opcode() == Op_Halt) ) { // There must be a prior call. Skip it. while( !bb->_nodes[--_bb_end]->is_Call() ) { assert( bb->_nodes[_bb_end]->is_Proj(), "skipping projections after expected call" ); diff -r 5d4dd2f5f6a1 -r 7a485bc4da16 src/share/vm/opto/parse.hpp --- a/src/share/vm/opto/parse.hpp Fri Apr 17 15:50:12 2009 +0100 +++ b/src/share/vm/opto/parse.hpp Thu May 07 10:30:17 2009 -0700 @@ -476,7 +476,7 @@ void do_newarray(BasicType elemtype); void do_anewarray(); void do_multianewarray(); - Node* expand_multianewarray(ciArrayKlass* array_klass, Node* *lengths, int ndimensions); + Node* expand_multianewarray(ciArrayKlass* array_klass, Node* *lengths, int ndimensions, int nargs); // implementation of jsr/ret void do_jsr(); diff -r 5d4dd2f5f6a1 -r 7a485bc4da16 src/share/vm/opto/parse1.cpp --- a/src/share/vm/opto/parse1.cpp Fri Apr 17 15:50:12 2009 +0100 +++ b/src/share/vm/opto/parse1.cpp Thu May 07 10:30:17 2009 -0700 @@ -828,6 +828,7 @@ break; case Bytecodes::_invokestatic: + case Bytecodes::_invokedynamic: case Bytecodes::_invokespecial: case Bytecodes::_invokevirtual: case Bytecodes::_invokeinterface: diff -r 5d4dd2f5f6a1 -r 7a485bc4da16 src/share/vm/opto/parse2.cpp --- a/src/share/vm/opto/parse2.cpp Fri Apr 17 15:50:12 2009 +0100 +++ b/src/share/vm/opto/parse2.cpp Thu May 07 10:30:17 2009 -0700 @@ -2156,6 +2156,7 @@ break; case Bytecodes::_invokestatic: + case Bytecodes::_invokedynamic: case Bytecodes::_invokespecial: case Bytecodes::_invokevirtual: case Bytecodes::_invokeinterface: diff -r 5d4dd2f5f6a1 -r 7a485bc4da16 src/share/vm/opto/parse3.cpp --- a/src/share/vm/opto/parse3.cpp Fri Apr 17 15:50:12 2009 +0100 +++ b/src/share/vm/opto/parse3.cpp Thu May 07 10:30:17 2009 -0700 @@ -335,7 +335,7 @@ const TypeKlassPtr* array_klass_type = TypeKlassPtr::make(array_klass); Node* count_val = pop(); - Node* obj = new_array(makecon(array_klass_type), count_val); + Node* obj = new_array(makecon(array_klass_type), count_val, 1); push(obj); } @@ -345,17 +345,17 @@ Node* count_val = pop(); const TypeKlassPtr* array_klass = TypeKlassPtr::make(ciTypeArrayKlass::make(elem_type)); - Node* obj = new_array(makecon(array_klass), count_val); + Node* obj = new_array(makecon(array_klass), count_val, 1); // Push resultant oop onto stack push(obj); } // Expand simple expressions like new int[3][5] and new Object[2][nonConLen]. // Also handle the degenerate 1-dimensional case of anewarray. -Node* Parse::expand_multianewarray(ciArrayKlass* array_klass, Node* *lengths, int ndimensions) { +Node* Parse::expand_multianewarray(ciArrayKlass* array_klass, Node* *lengths, int ndimensions, int nargs) { Node* length = lengths[0]; assert(length != NULL, ""); - Node* array = new_array(makecon(TypeKlassPtr::make(array_klass)), length); + Node* array = new_array(makecon(TypeKlassPtr::make(array_klass)), length, nargs); if (ndimensions > 1) { jint length_con = find_int_con(length, -1); guarantee(length_con >= 0, "non-constant multianewarray"); @@ -364,7 +364,7 @@ const Type* elemtype = _gvn.type(array)->is_aryptr()->elem(); const intptr_t header = arrayOopDesc::base_offset_in_bytes(T_OBJECT); for (jint i = 0; i < length_con; i++) { - Node* elem = expand_multianewarray(array_klass_1, &lengths[1], ndimensions-1); + Node* elem = expand_multianewarray(array_klass_1, &lengths[1], ndimensions-1, nargs); intptr_t offset = header + ((intptr_t)i << LogBytesPerHeapOop); Node* eaddr = basic_plus_adr(array, offset); store_oop_to_array(control(), array, eaddr, adr_type, elem, elemtype, T_OBJECT); @@ -419,7 +419,7 @@ // Can use multianewarray instead of [a]newarray if only one dimension, // or if all non-final dimensions are small constants. if (expand_count == 1 || (1 <= expand_count && expand_count <= expand_limit)) { - Node* obj = expand_multianewarray(array_klass, &length[0], ndimensions); + Node* obj = expand_multianewarray(array_klass, &length[0], ndimensions, ndimensions); push(obj); return; } diff -r 5d4dd2f5f6a1 -r 7a485bc4da16 src/share/vm/opto/parseHelper.cpp --- a/src/share/vm/opto/parseHelper.cpp Fri Apr 17 15:50:12 2009 +0100 +++ b/src/share/vm/opto/parseHelper.cpp Thu May 07 10:30:17 2009 -0700 @@ -414,6 +414,7 @@ profile_receiver_type(receiver); break; case Bytecodes::_invokestatic: + case Bytecodes::_invokedynamic: case Bytecodes::_invokespecial: break; default: fatal("unexpected call bytecode"); diff -r 5d4dd2f5f6a1 -r 7a485bc4da16 src/share/vm/prims/jvm.cpp --- a/src/share/vm/prims/jvm.cpp Fri Apr 17 15:50:12 2009 +0100 +++ b/src/share/vm/prims/jvm.cpp Thu May 07 10:30:17 2009 -0700 @@ -2222,6 +2222,9 @@ case JVM_CONSTANT_InterfaceMethodref: case JVM_CONSTANT_Methodref: return cp->uncached_name_ref_at(cp_index)->as_utf8(); + case JVM_CONSTANT_NameAndType: + // for invokedynamic + return cp->nt_name_ref_at(cp_index)->as_utf8(); default: fatal("JVM_GetCPMethodNameUTF: illegal constant"); } @@ -2239,6 +2242,9 @@ case JVM_CONSTANT_InterfaceMethodref: case JVM_CONSTANT_Methodref: return cp->uncached_signature_ref_at(cp_index)->as_utf8(); + case JVM_CONSTANT_NameAndType: + // for invokedynamic + return cp->nt_signature_ref_at(cp_index)->as_utf8(); default: fatal("JVM_GetCPMethodSignatureUTF: illegal constant"); } diff -r 5d4dd2f5f6a1 -r 7a485bc4da16 src/share/vm/prims/jvmtiClassFileReconstituter.cpp --- a/src/share/vm/prims/jvmtiClassFileReconstituter.cpp Fri Apr 17 15:50:12 2009 +0100 +++ b/src/share/vm/prims/jvmtiClassFileReconstituter.cpp Thu May 07 10:30:17 2009 -0700 @@ -659,15 +659,21 @@ case Bytecodes::_invokevirtual : // fall through case Bytecodes::_invokespecial : // fall through case Bytecodes::_invokestatic : // fall through + case Bytecodes::_invokedynamic : // fall through case Bytecodes::_invokeinterface : assert(len == 3 || (code == Bytecodes::_invokeinterface && len ==5), "sanity check"); + int cpci = Bytes::get_native_u2(bcp+1); + bool is_invokedynamic = (EnableInvokeDynamic && code == Bytecodes::_invokedynamic); + if (is_invokedynamic) + cpci = Bytes::get_native_u4(bcp+1); // cache cannot be pre-fetched since some classes won't have it yet ConstantPoolCacheEntry* entry = - mh->constants()->cache()->entry_at(Bytes::get_native_u2(bcp+1)); + mh->constants()->cache()->main_entry_at(cpci); int i = entry->constant_pool_index(); assert(i < mh->constants()->length(), "sanity check"); Bytes::put_Java_u2((address)(p+1), (u2)i); // java byte ordering + if (is_invokedynamic) *(p+3) = *(p+4) = 0; break; } } diff -r 5d4dd2f5f6a1 -r 7a485bc4da16 src/share/vm/prims/methodComparator.cpp --- a/src/share/vm/prims/methodComparator.cpp Fri Apr 17 15:50:12 2009 +0100 +++ b/src/share/vm/prims/methodComparator.cpp Thu May 07 10:30:17 2009 -0700 @@ -148,8 +148,8 @@ case Bytecodes::_invokespecial : // fall through case Bytecodes::_invokestatic : // fall through case Bytecodes::_invokeinterface : { - u2 cpci_old = _s_old->get_index_big(); - u2 cpci_new = _s_new->get_index_big(); + u2 cpci_old = _s_old->get_index_int(); + u2 cpci_new = _s_new->get_index_int(); // Check if the names of classes, field/method names and signatures at these indexes // are the same. Indices which are really into constantpool cache (rather than constant // pool itself) are accepted by the constantpool query routines below. diff -r 5d4dd2f5f6a1 -r 7a485bc4da16 src/share/vm/prims/methodHandles.cpp --- /dev/null Thu Jan 01 00:00:00 1970 +0000 +++ b/src/share/vm/prims/methodHandles.cpp Thu May 07 10:30:17 2009 -0700 @@ -0,0 +1,2383 @@ +/* + * Copyright 2008-2009 Sun Microsystems, Inc. All Rights Reserved. + * DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER. + * + * This code is free software; you can redistribute it and/or modify it + * under the terms of the GNU General Public License version 2 only, as + * published by the Free Software Foundation. + * + * This code is distributed in the hope that it will be useful, but WITHOUT + * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or + * FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License + * version 2 for more details (a copy is included in the LICENSE file that + * accompanied this code). + * + * You should have received a copy of the GNU General Public License version + * 2 along with this work; if not, write to the Free Software Foundation, + * Inc., 51 Franklin St, Fifth Floor, Boston, MA 02110-1301 USA. + * + * Please contact Sun Microsystems, Inc., 4150 Network Circle, Santa Clara, + * CA 95054 USA or visit www.sun.com if you need additional information or + * have any questions. + * + */ + +/* + * JSR 292 reference implementation: method handles + */ + +#include "incls/_precompiled.incl" +#include "incls/_methodHandles.cpp.incl" + +bool MethodHandles::_enabled = false; // set true after successful native linkage + +MethodHandleEntry* MethodHandles::_entries[MethodHandles::_EK_LIMIT] = {NULL}; +const char* MethodHandles::_entry_names[_EK_LIMIT+1] = { + "check_mtype", + "wrong_method_type", // what happens when there is a type mismatch + "invokestatic", // how a MH emulates invokestatic + "invokespecial", // ditto for the other invokes... + "invokevirtual", + "invokeinterface", + "bound_ref", // these are for BMH... + "bound_int", + "bound_long", + "bound_ref_direct", // (direct versions have a direct methodOop) + "bound_int_direct", + "bound_long_direct", + + // starting at _adapter_mh_first: + "adapter_retype_only", // these are for AMH... + "adapter_check_cast", + "adapter_prim_to_prim", + "adapter_ref_to_prim", + "adapter_prim_to_ref", + "adapter_swap_args", + "adapter_rot_args", + "adapter_dup_args", + "adapter_drop_args", + "adapter_collect_args", + "adapter_spread_args", + "adapter_flyby", + "adapter_ricochet", + + // optimized adapter types: + "adapter_swap_args/1", + "adapter_swap_args/2", + "adapter_rot_args/1,up", + "adapter_rot_args/1,down", + "adapter_rot_args/2,up", + "adapter_rot_args/2,down", + "adapter_prim_to_prim/i2i", + "adapter_prim_to_prim/l2i", + "adapter_prim_to_prim/d2f", + "adapter_prim_to_prim/i2l", + "adapter_prim_to_prim/f2d", + "adapter_ref_to_prim/unboxi", + "adapter_ref_to_prim/unboxl", + "adapter_spread_args/0", + "adapter_spread_args/1", + "adapter_spread_args/more", + + NULL +}; + +#ifdef ASSERT +bool MethodHandles::spot_check_entry_names() { + assert(!strcmp(entry_name(_invokestatic_mh), "invokestatic"), ""); + assert(!strcmp(entry_name(_bound_ref_mh), "bound_ref"), ""); + assert(!strcmp(entry_name(_adapter_retype_only), "adapter_retype_only"), ""); + assert(!strcmp(entry_name(_adapter_ricochet), "adapter_ricochet"), ""); + assert(!strcmp(entry_name(_adapter_opt_unboxi), "adapter_ref_to_prim/unboxi"), ""); + return true; +} +#endif + +void MethodHandles::set_enabled(bool z) { + if (_enabled != z) { + guarantee(z && EnableMethodHandles, "can only enable once, and only if -XX:+EnableMethodHandles"); + _enabled = z; + } +} + +// Note: A method which does not have a TRAPS argument cannot block in the GC +// or throw exceptions. Such methods are used in this file to do something quick +// and local, like parse a data structure. For speed, such methods work on plain +// oops, not handles. Trapping methods uniformly operate on handles. + +methodOop MethodHandles::decode_vmtarget(oop vmtarget, int vmindex, oop mtype, + klassOop& receiver_limit_result, int& decode_flags_result) { + if (vmtarget == NULL) return NULL; + assert(methodOopDesc::nonvirtual_vtable_index < 0, "encoding"); + if (vmindex < 0) { + // this DMH performs no dispatch; it is directly bound to a methodOop + // A MemberName may either be directly bound to a methodOop, + // or it may use the klass/index form; both forms mean the same thing. + methodOop m = decode_methodOop(methodOop(vmtarget), decode_flags_result); + if ((decode_flags_result & _dmf_has_receiver) != 0 + && java_dyn_MethodType::is_instance(mtype)) { + // Extract receiver type restriction from mtype.ptypes[0]. + objArrayOop ptypes = java_dyn_MethodType::ptypes(mtype); + oop ptype0 = (ptypes == NULL || ptypes->length() < 1) ? oop(NULL) : ptypes->obj_at(0); + if (java_lang_Class::is_instance(ptype0)) + receiver_limit_result = java_lang_Class::as_klassOop(ptype0); + } + if (vmindex == methodOopDesc::nonvirtual_vtable_index) { + // this DMH can be an "invokespecial" version + decode_flags_result &= ~_dmf_does_dispatch; + } else { + assert(vmindex == methodOopDesc::invalid_vtable_index, "random vmindex?"); + } + return m; + } else { + decode_flags_result |= MethodHandles::_dmf_does_dispatch; + assert(vmtarget->is_klass(), "must be class or interface"); + receiver_limit_result = (klassOop)vmtarget; + Klass* tk = Klass::cast((klassOop)vmtarget); + if (tk->is_interface()) { + // an itable linkage is + decode_flags_result |= MethodHandles::_dmf_from_interface; + return klassItable::method_for_itable_index((klassOop)vmtarget, vmindex); + } else { + if (!tk->oop_is_instance()) + tk = instanceKlass::cast(SystemDictionary::object_klass()); + return ((instanceKlass*)tk)->method_at_vtable(vmindex); + } + } +} + +// MemberName and DirectMethodHandle have the same linkage to the JVM internals. +// (MemberName is the non-operational name used for queries and setup.) + +methodOop MethodHandles::decode_DirectMethodHandle(oop mh, klassOop& receiver_limit_result, int& decode_flags_result) { + oop vmtarget = sun_dyn_DirectMethodHandle::vmtarget(mh); + int vmindex = sun_dyn_DirectMethodHandle::vmindex(mh); + oop mtype = sun_dyn_DirectMethodHandle::type(mh); + return decode_vmtarget(vmtarget, vmindex, mtype, receiver_limit_result, decode_flags_result); +} + +methodOop MethodHandles::decode_BoundMethodHandle(oop mh, klassOop& receiver_limit_result, int& decode_flags_result) { + assert(mh->klass() == SystemDictionary::BoundMethodHandle_klass(), ""); + for (oop bmh = mh;;) { + // Bound MHs can be stacked to bind several arguments. + oop target = java_dyn_MethodHandle::vmtarget(bmh); + if (target == NULL) return NULL; + decode_flags_result |= MethodHandles::_dmf_binds_argument; + klassOop tk = target->klass(); + if (tk == SystemDictionary::BoundMethodHandle_klass()) { + bmh = target; + continue; + } else { + if (java_dyn_MethodHandle::is_subclass(tk)) { + //assert(tk == SystemDictionary::DirectMethodHandle_klass(), "end of BMH chain must be DMH"); + return decode_MethodHandle(target, receiver_limit_result, decode_flags_result); + } else { + // Optimized case: binding a receiver to a non-dispatched DMH + // short-circuits directly to the methodOop. + assert(target->is_method(), "must be a simple method"); + methodOop m = (methodOop) target; + DEBUG_ONLY(int argslot = sun_dyn_BoundMethodHandle::vmargslot(bmh)); + assert(argslot == m->size_of_parameters() - 1, "must be initial argument (receiver)"); + decode_flags_result |= MethodHandles::_dmf_binds_method; + return m; + } + } + } +} + +methodOop MethodHandles::decode_AdapterMethodHandle(oop mh, klassOop& receiver_limit_result, int& decode_flags_result) { + assert(mh->klass() == SystemDictionary::AdapterMethodHandle_klass(), ""); + for (oop amh = mh;;) { + // Adapter MHs can be stacked to convert several arguments. + int conv_op = adapter_conversion_op(sun_dyn_AdapterMethodHandle::conversion(amh)); + decode_flags_result |= (_dmf_adapter_lsb << conv_op) & _DMF_ADAPTER_MASK; + oop target = java_dyn_MethodHandle::vmtarget(amh); + if (target == NULL) return NULL; + klassOop tk = target->klass(); + if (tk == SystemDictionary::AdapterMethodHandle_klass()) { + amh = target; + continue; + } else { + // must be a BMH (which will bind some more arguments) or a DMH (for the final call) + return MethodHandles::decode_MethodHandle(target, receiver_limit_result, decode_flags_result); + } + } +} + +methodOop MethodHandles::decode_MethodHandle(oop mh, klassOop& receiver_limit_result, int& decode_flags_result) { + if (mh == NULL) return NULL; + klassOop mhk = mh->klass(); + assert(java_dyn_MethodHandle::is_subclass(mhk), "must be a MethodHandle"); + if (mhk == SystemDictionary::DirectMethodHandle_klass()) { + return decode_DirectMethodHandle(mh, receiver_limit_result, decode_flags_result); + } else if (mhk == SystemDictionary::BoundMethodHandle_klass()) { + return decode_BoundMethodHandle(mh, receiver_limit_result, decode_flags_result); + } else if (mhk == SystemDictionary::AdapterMethodHandle_klass()) { + return decode_AdapterMethodHandle(mh, receiver_limit_result, decode_flags_result); + } else { + assert(false, "cannot parse this MH"); + return NULL; // random MH? + } +} + +methodOop MethodHandles::decode_methodOop(methodOop m, int& decode_flags_result) { + assert(m->is_method(), ""); + if (m->is_static()) { + // check that signature begins '(L' or '([' (not '(I', '()', etc.) + symbolOop sig = m->signature(); + BasicType recv_bt = char2type(sig->byte_at(1)); + // Note: recv_bt might be T_ILLEGAL if byte_at(2) is ')' + assert(sig->byte_at(0) == '(', "must be method sig"); + if (recv_bt == T_OBJECT || recv_bt == T_ARRAY) + decode_flags_result |= _dmf_has_receiver; + } else { + // non-static method + decode_flags_result |= _dmf_has_receiver; + if (!m->can_be_statically_bound() && !m->is_initializer()) { + decode_flags_result |= _dmf_does_dispatch; + if (Klass::cast(m->method_holder())->is_interface()) + decode_flags_result |= _dmf_from_interface; + } + } + return m; +} + + +// A trusted party is handing us a cookie to determine a method. +// Let's boil it down to the method oop they really want. +methodOop MethodHandles::decode_method(oop x, klassOop& receiver_limit_result, int& decode_flags_result) { + decode_flags_result = 0; + receiver_limit_result = NULL; + klassOop xk = x->klass(); + if (xk == Universe::methodKlassObj()) { + return decode_methodOop((methodOop) x, decode_flags_result); + } else if (xk == SystemDictionary::MemberName_klass()) { + // Note: This only works if the MemberName has already been resolved. + return decode_MemberName(x, receiver_limit_result, decode_flags_result); + } else if (java_dyn_MethodHandle::is_subclass(xk)) { + return decode_MethodHandle(x, receiver_limit_result, decode_flags_result); + } else if (xk == SystemDictionary::reflect_method_klass()) { + oop clazz = java_lang_reflect_Method::clazz(x); + int slot = java_lang_reflect_Method::slot(x); + klassOop k = java_lang_Class::as_klassOop(clazz); + if (k != NULL && Klass::cast(k)->oop_is_instance()) + return decode_methodOop(instanceKlass::cast(k)->method_with_idnum(slot), + decode_flags_result); + } else if (xk == SystemDictionary::reflect_constructor_klass()) { + oop clazz = java_lang_reflect_Constructor::clazz(x); + int slot = java_lang_reflect_Constructor::slot(x); + klassOop k = java_lang_Class::as_klassOop(clazz); + if (k != NULL && Klass::cast(k)->oop_is_instance()) + return decode_methodOop(instanceKlass::cast(k)->method_with_idnum(slot), + decode_flags_result); + } else { + // unrecognized object + assert(!x->is_method(), "already checked"); + assert(!sun_dyn_MemberName::is_instance(x), "already checked"); + } + return NULL; +} + + +int MethodHandles::decode_MethodHandle_stack_pushes(oop mh) { + if (mh->klass() == SystemDictionary::DirectMethodHandle_klass()) + return 0; // no push/pop + int this_vmslots = java_dyn_MethodHandle::vmslots(mh); + int last_vmslots = 0; + oop last_mh = mh; + for (;;) { + oop target = java_dyn_MethodHandle::vmtarget(last_mh); + if (target->klass() == SystemDictionary::DirectMethodHandle_klass()) { + last_vmslots = java_dyn_MethodHandle::vmslots(target); + break; + } else if (!java_dyn_MethodHandle::is_instance(target)) { + // might be klass or method + assert(target->is_method(), "must get here with a direct ref to method"); + last_vmslots = methodOop(target)->size_of_parameters(); + break; + } + last_mh = target; + } + // If I am called with fewer VM slots than my ultimate callee, + // it must be that I push the additionally needed slots. + // Likewise if am called with more VM slots, I will pop them. + return (last_vmslots - this_vmslots); +} + + +// MemberName support + +// import sun_dyn_MemberName.* +enum { + IS_METHOD = sun_dyn_MemberName::MN_IS_METHOD, + IS_CONSTRUCTOR = sun_dyn_MemberName::MN_IS_CONSTRUCTOR, + IS_FIELD = sun_dyn_MemberName::MN_IS_FIELD, + IS_TYPE = sun_dyn_MemberName::MN_IS_TYPE, + SEARCH_SUPERCLASSES = sun_dyn_MemberName::MN_SEARCH_SUPERCLASSES, + SEARCH_INTERFACES = sun_dyn_MemberName::MN_SEARCH_INTERFACES, + ALL_KINDS = IS_METHOD | IS_CONSTRUCTOR | IS_FIELD | IS_TYPE, + VM_INDEX_UNINITIALIZED = sun_dyn_MemberName::VM_INDEX_UNINITIALIZED +}; + +void MethodHandles::init_MemberName(oop mname_oop, oop target_oop) { + if (target_oop->klass() == SystemDictionary::reflect_field_klass()) { + oop clazz = java_lang_reflect_Field::clazz(target_oop); // fd.field_holder() + int slot = java_lang_reflect_Field::slot(target_oop); // fd.index() + int mods = java_lang_reflect_Field::modifiers(target_oop); + klassOop k = java_lang_Class::as_klassOop(clazz); + int offset = instanceKlass::cast(k)->offset_from_fields(slot); + init_MemberName(mname_oop, k, accessFlags_from(mods), offset); + } else { + int decode_flags = 0; klassOop receiver_limit = NULL; + methodOop m = MethodHandles::decode_method(target_oop, + receiver_limit, decode_flags); + bool do_dispatch = ((decode_flags & MethodHandles::_dmf_does_dispatch) != 0); + init_MemberName(mname_oop, m, do_dispatch); + } +} + +void MethodHandles::init_MemberName(oop mname_oop, methodOop m, bool do_dispatch) { + int flags = ((m->is_initializer() ? IS_CONSTRUCTOR : IS_METHOD) + | (jushort)( m->access_flags().as_short() & JVM_RECOGNIZED_METHOD_MODIFIERS )); + oop vmtarget = m; + int vmindex = methodOopDesc::invalid_vtable_index; // implies no info yet + if (!do_dispatch || (flags & IS_CONSTRUCTOR) || m->can_be_statically_bound()) + vmindex = methodOopDesc::nonvirtual_vtable_index; // implies never any dispatch + assert(vmindex != VM_INDEX_UNINITIALIZED, "Java sentinel value"); + sun_dyn_MemberName::set_vmtarget(mname_oop, vmtarget); + sun_dyn_MemberName::set_vmindex(mname_oop, vmindex); + sun_dyn_MemberName::set_flags(mname_oop, flags); +} + +void MethodHandles::init_MemberName(oop mname_oop, klassOop field_holder, AccessFlags mods, int offset) { + int flags = (IS_FIELD | (jushort)( mods.as_short() & JVM_RECOGNIZED_FIELD_MODIFIERS )); + oop vmtarget = field_holder; + int vmindex = offset; // implies no info yet + assert(vmindex != VM_INDEX_UNINITIALIZED, "bad alias on vmindex"); + sun_dyn_MemberName::set_vmtarget(mname_oop, vmtarget); + sun_dyn_MemberName::set_vmindex(mname_oop, vmindex); + sun_dyn_MemberName::set_flags(mname_oop, flags); +} + + +methodOop MethodHandles::decode_MemberName(oop mname, klassOop& receiver_limit_result, int& decode_flags_result) { + int flags = sun_dyn_MemberName::flags(mname); + if ((flags & (IS_METHOD | IS_CONSTRUCTOR)) == 0) return NULL; // not invocable + oop vmtarget = sun_dyn_MemberName::vmtarget(mname); + int vmindex = sun_dyn_MemberName::vmindex(mname); + if (vmindex == VM_INDEX_UNINITIALIZED) return NULL; // not resolved + return decode_vmtarget(vmtarget, vmindex, NULL, receiver_limit_result, decode_flags_result); +} + +// An unresolved member name is a mere symbolic reference. +// Resolving it plants a vmtarget/vmindex in it, +// which refers dirctly to JVM internals. +void MethodHandles::resolve_MemberName(Handle mname, TRAPS) { + assert(sun_dyn_MemberName::is_instance(mname()), ""); +#ifdef ASSERT + // If this assert throws, renegotiate the sentinel value used by the Java code, + // so that it is distinct from any valid vtable index value, and any special + // values defined in methodOopDesc::VtableIndexFlag. + // The point of the slop is to give the Java code and the JVM some room + // to independently specify sentinel values. + const int sentinel_slop = 10; + const int sentinel_limit = methodOopDesc::highest_unused_vtable_index_value - sentinel_slop; + assert(VM_INDEX_UNINITIALIZED < sentinel_limit, "Java sentinel != JVM sentinels"); +#endif + if (sun_dyn_MemberName::vmindex(mname()) != VM_INDEX_UNINITIALIZED) + return; // already resolved + oop defc_oop = sun_dyn_MemberName::clazz(mname()); + oop name_str = sun_dyn_MemberName::name(mname()); + oop type_str = sun_dyn_MemberName::type(mname()); + int flags = sun_dyn_MemberName::flags(mname()); + + if (defc_oop == NULL || name_str == NULL || type_str == NULL) { + THROW_MSG(vmSymbols::java_lang_IllegalArgumentException(), "nothing to resolve"); + } + klassOop defc_klassOop = java_lang_Class::as_klassOop(defc_oop); + defc_oop = NULL; // safety + if (defc_klassOop == NULL) return; // a primitive; no resolution possible + if (!Klass::cast(defc_klassOop)->oop_is_instance()) { + if (!Klass::cast(defc_klassOop)->oop_is_array()) return; + defc_klassOop = SystemDictionary::object_klass(); + } + instanceKlassHandle defc(THREAD, defc_klassOop); + defc_klassOop = NULL; // safety + if (defc.is_null()) { + THROW_MSG(vmSymbols::java_lang_InternalError(), "primitive class"); + } + defc->link_class(CHECK); + + // convert the external string name to an internal symbol + symbolHandle name(THREAD, java_lang_String::as_symbol_or_null(name_str)); + if (name.is_null()) return; // no such name + name_str = NULL; // safety + + // convert the external string or reflective type to an internal signature + bool force_signature = (name() == vmSymbols::invoke_name()); + symbolHandle type; { + symbolOop type_sym = NULL; + if (java_dyn_MethodType::is_instance(type_str)) { + type_sym = java_dyn_MethodType::as_signature(type_str, force_signature, CHECK); + } else if (java_lang_Class::is_instance(type_str)) { + type_sym = java_lang_Class::as_signature(type_str, force_signature, CHECK); + } else if (java_lang_String::is_instance(type_str)) { + if (force_signature) { + type = java_lang_String::as_symbol(type_str, CHECK); + } else { + type_sym = java_lang_String::as_symbol_or_null(type_str); + } + } else { + THROW_MSG(vmSymbols::java_lang_InternalError(), "unrecognized type"); + } + if (type_sym != NULL) + type = symbolHandle(THREAD, type_sym); + } + if (type.is_null()) return; // no such signature exists in the VM + type_str = NULL; // safety + + // Time to do the lookup. + switch (flags & ALL_KINDS) { + case IS_METHOD: + { + CallInfo result; + { + EXCEPTION_MARK; + if ((flags & JVM_ACC_STATIC) != 0) { + LinkResolver::resolve_static_call(result, + defc, name, type, KlassHandle(), false, false, THREAD); + } else if (defc->is_interface()) { + LinkResolver::resolve_interface_call(result, Handle(), defc, + defc, name, type, KlassHandle(), false, false, THREAD); + } else { + LinkResolver::resolve_virtual_call(result, Handle(), defc, + defc, name, type, KlassHandle(), false, false, THREAD); + } + if (HAS_PENDING_EXCEPTION) { + CLEAR_PENDING_EXCEPTION; + return; + } + } + methodHandle m = result.resolved_method(); + oop vmtarget = NULL; + int vmindex = methodOopDesc::nonvirtual_vtable_index; + if (defc->is_interface()) { + vmindex = klassItable::compute_itable_index(m()); + assert(vmindex >= 0, ""); + } else if (result.has_vtable_index()) { + vmindex = result.vtable_index(); + assert(vmindex >= 0, ""); + } + assert(vmindex != VM_INDEX_UNINITIALIZED, ""); + if (vmindex < 0) { + assert(result.is_statically_bound(), ""); + vmtarget = m(); + } else { + vmtarget = result.resolved_klass()->as_klassOop(); + } + int mods = (m->access_flags().as_short() & JVM_RECOGNIZED_METHOD_MODIFIERS); + sun_dyn_MemberName::set_vmtarget(mname(), vmtarget); + sun_dyn_MemberName::set_vmindex(mname(), vmindex); + sun_dyn_MemberName::set_modifiers(mname(), mods); + DEBUG_ONLY(int junk; klassOop junk2); + assert(decode_MemberName(mname(), junk2, junk) == result.resolved_method()(), + "properly stored for later decoding"); + return; + } + case IS_CONSTRUCTOR: + { + CallInfo result; + { + EXCEPTION_MARK; + if (name() == vmSymbols::object_initializer_name()) { + LinkResolver::resolve_special_call(result, + defc, name, type, KlassHandle(), false, THREAD); + } else { + break; // will throw after end of switch + } + if (HAS_PENDING_EXCEPTION) { + CLEAR_PENDING_EXCEPTION; + return; + } + } + assert(result.is_statically_bound(), ""); + methodHandle m = result.resolved_method(); + oop vmtarget = m(); + int vmindex = methodOopDesc::nonvirtual_vtable_index; + int mods = (m->access_flags().as_short() & JVM_RECOGNIZED_METHOD_MODIFIERS); + sun_dyn_MemberName::set_vmtarget(mname(), vmtarget); + sun_dyn_MemberName::set_vmindex(mname(), vmindex); + sun_dyn_MemberName::set_modifiers(mname(), mods); + DEBUG_ONLY(int junk; klassOop junk2); + assert(decode_MemberName(mname(), junk2, junk) == result.resolved_method()(), + "properly stored for later decoding"); + return; + } + case IS_FIELD: + { + // This is taken from LinkResolver::resolve_field, sans access checks. + fieldDescriptor fd; // find_field initializes fd if found + KlassHandle sel_klass(THREAD, instanceKlass::cast(defc())->find_field(name(), type(), &fd)); + // check if field exists; i.e., if a klass containing the field def has been selected + if (sel_klass.is_null()) return; + oop vmtarget = sel_klass->as_klassOop(); + int vmindex = fd.offset(); + int mods = (fd.access_flags().as_short() & JVM_RECOGNIZED_FIELD_MODIFIERS); + if (vmindex == VM_INDEX_UNINITIALIZED) break; // should not happen + sun_dyn_MemberName::set_vmtarget(mname(), vmtarget); + sun_dyn_MemberName::set_vmindex(mname(), vmindex); + sun_dyn_MemberName::set_modifiers(mname(), mods); + return; + } + } + THROW_MSG(vmSymbols::java_lang_InternalError(), "unrecognized MemberName format"); +} + +// Conversely, a member name which is only initialized from JVM internals +// may have null defc, name, and type fields. +// Resolving it plants a vmtarget/vmindex in it, +// which refers directly to JVM internals. +void MethodHandles::expand_MemberName(Handle mname, int suppress, TRAPS) { + assert(sun_dyn_MemberName::is_instance(mname()), ""); + oop vmtarget = sun_dyn_MemberName::vmtarget(mname()); + int vmindex = sun_dyn_MemberName::vmindex(mname()); + if (vmtarget == NULL || vmindex == VM_INDEX_UNINITIALIZED) { + THROW_MSG(vmSymbols::java_lang_IllegalArgumentException(), "nothing to expand"); + } + + bool have_defc = (sun_dyn_MemberName::clazz(mname()) != NULL); + bool have_name = (sun_dyn_MemberName::name(mname()) != NULL); + bool have_type = (sun_dyn_MemberName::type(mname()) != NULL); + int flags = sun_dyn_MemberName::flags(mname()); + + if (suppress != 0) { + if (suppress & _suppress_defc) have_defc = true; + if (suppress & _suppress_name) have_name = true; + if (suppress & _suppress_type) have_type = true; + } + + if (have_defc && have_name && have_type) return; // nothing needed + + switch (flags & ALL_KINDS) { + case IS_METHOD: + case IS_CONSTRUCTOR: + { + klassOop receiver_limit = NULL; + int decode_flags = 0; + methodHandle m(THREAD, decode_vmtarget(vmtarget, vmindex, NULL, + receiver_limit, decode_flags)); + if (m.is_null()) break; + if (!have_defc) { + klassOop defc = m->method_holder(); + if (receiver_limit != NULL && receiver_limit != defc + && Klass::cast(receiver_limit)->is_subtype_of(defc)) + defc = receiver_limit; + sun_dyn_MemberName::set_clazz(mname(), Klass::cast(defc)->java_mirror()); + } + if (!have_name) { + //not java_lang_String::create_from_symbol; let's intern member names + Handle name = StringTable::intern(m->name(), CHECK); + sun_dyn_MemberName::set_name(mname(), name()); + } + if (!have_type) { + Handle type = java_lang_String::create_from_symbol(m->signature(), CHECK); + sun_dyn_MemberName::set_type(mname(), type()); + } + return; + } + case IS_FIELD: + { + // This is taken from LinkResolver::resolve_field, sans access checks. + if (!vmtarget->is_klass()) break; + if (!Klass::cast((klassOop) vmtarget)->oop_is_instance()) break; + instanceKlassHandle defc(THREAD, (klassOop) vmtarget); + bool is_static = ((flags & JVM_ACC_STATIC) != 0); + fieldDescriptor fd; // find_field initializes fd if found + if (!defc->find_field_from_offset(vmindex, is_static, &fd)) + break; // cannot expand + if (!have_defc) { + sun_dyn_MemberName::set_clazz(mname(), defc->java_mirror()); + } + if (!have_name) { + //not java_lang_String::create_from_symbol; let's intern member names + Handle name = StringTable::intern(fd.name(), CHECK); + sun_dyn_MemberName::set_name(mname(), name()); + } + if (!have_type) { + Handle type = java_lang_String::create_from_symbol(fd.signature(), CHECK); + sun_dyn_MemberName::set_type(mname(), type()); + } + return; + } + } + THROW_MSG(vmSymbols::java_lang_InternalError(), "unrecognized MemberName format"); +} + +int MethodHandles::find_MemberNames(klassOop k, + symbolOop name, symbolOop sig, + int mflags, klassOop caller, + int skip, objArrayOop results) { + DEBUG_ONLY(No_Safepoint_Verifier nsv); + // this code contains no safepoints! + + // %%% take caller into account! + + if (k == NULL || !Klass::cast(k)->oop_is_instance()) return -1; + + int rfill = 0, rlimit = results->length(), rskip = skip; + // overflow measurement: + int overflow = 0, overflow_limit = MAX2(1000, rlimit); + + int match_flags = mflags; + bool search_superc = ((match_flags & SEARCH_SUPERCLASSES) != 0); + bool search_intfc = ((match_flags & SEARCH_INTERFACES) != 0); + bool local_only = !(search_superc | search_intfc); + bool classes_only = false; + + if (name != NULL) { + if (name->utf8_length() == 0) return 0; // a match is not possible + } + if (sig != NULL) { + if (sig->utf8_length() == 0) return 0; // a match is not possible + if (sig->byte_at(0) == '(') + match_flags &= ~(IS_FIELD | IS_TYPE); + else + match_flags &= ~(IS_CONSTRUCTOR | IS_METHOD); + } + + if ((match_flags & IS_TYPE) != 0) { + // NYI, and Core Reflection works quite well for this query + } + + if ((match_flags & IS_FIELD) != 0) { + for (FieldStream st(k, local_only, !search_intfc); !st.eos(); st.next()) { + if (name != NULL && st.name() != name) + continue; + if (sig != NULL && st.signature() != sig) + continue; + // passed the filters + if (rskip > 0) { + --rskip; + } else if (rfill < rlimit) { + oop result = results->obj_at(rfill++); + if (!sun_dyn_MemberName::is_instance(result)) + return -99; // caller bug! + MethodHandles::init_MemberName(result, st.klass()->as_klassOop(), st.access_flags(), st.offset()); + } else if (++overflow >= overflow_limit) { + match_flags = 0; break; // got tired of looking at overflow + } + } + } + + if ((match_flags & (IS_METHOD | IS_CONSTRUCTOR)) != 0) { + // watch out for these guys: + symbolOop init_name = vmSymbols::object_initializer_name(); + symbolOop clinit_name = vmSymbols::class_initializer_name(); + if (name == clinit_name) clinit_name = NULL; // hack for exposing + bool negate_name_test = false; + // fix name so that it captures the intention of IS_CONSTRUCTOR + if (!(match_flags & IS_METHOD)) { + // constructors only + if (name == NULL) { + name = init_name; + } else if (name != init_name) { + return 0; // no constructors of this method name + } + } else if (!(match_flags & IS_CONSTRUCTOR)) { + // methods only + if (name == NULL) { + name = init_name; + negate_name_test = true; // if we see the name, we *omit* the entry + } else if (name == init_name) { + return 0; // no methods of this constructor name + } + } else { + // caller will accept either sort; no need to adjust name + } + for (MethodStream st(k, local_only, !search_intfc); !st.eos(); st.next()) { + methodOop m = st.method(); + symbolOop m_name = m->name(); + if (m_name == clinit_name) + continue; + if (name != NULL && ((m_name != name) ^ negate_name_test)) + continue; + if (sig != NULL && m->signature() != sig) + continue; + // passed the filters + if (rskip > 0) { + --rskip; + } else if (rfill < rlimit) { + oop result = results->obj_at(rfill++); + if (!sun_dyn_MemberName::is_instance(result)) + return -99; // caller bug! + MethodHandles::init_MemberName(result, m, true); + } else if (++overflow >= overflow_limit) { + match_flags = 0; break; // got tired of looking at overflow + } + } + } + + // return number of elements we at leasted wanted to initialize + return rfill + overflow; +} + + + + +// Decode the vmtarget field of a method handle. +// Sanitize out methodOops, klassOops, and any other non-Java data. +// This is for debugging and reflection. +oop MethodHandles::encode_target(Handle mh, int format, TRAPS) { + assert(java_dyn_MethodHandle::is_instance(mh()), "must be a MH"); + if (format == ETF_HANDLE_OR_METHOD_NAME) { + oop target = java_dyn_MethodHandle::vmtarget(mh()); + if (target == NULL) { + return NULL; // unformed MH + } + klassOop tklass = target->klass(); + if (Klass::cast(tklass)->is_subclass_of(SystemDictionary::object_klass())) { + return target; // target is another MH (or something else?) + } + } + if (format == ETF_DIRECT_HANDLE) { + oop target = mh(); + for (;;) { + if (target->klass() == SystemDictionary::DirectMethodHandle_klass()) { + return target; + } + if (!java_dyn_MethodHandle::is_instance(target)){ + return NULL; // unformed MH + } + target = java_dyn_MethodHandle::vmtarget(target); + } + } + // cases of metadata in MH.vmtarget: + // - AMH can have methodOop for static invoke with bound receiver + // - DMH can have methodOop for static invoke (on variable receiver) + // - DMH can have klassOop for dispatched (non-static) invoke + klassOop receiver_limit = NULL; + int decode_flags = 0; + methodOop m = decode_MethodHandle(mh(), receiver_limit, decode_flags); + if (m == NULL) return NULL; + switch (format) { + case ETF_REFLECT_METHOD: + // same as jni_ToReflectedMethod: + if (m->is_initializer()) { + return Reflection::new_constructor(m, THREAD); + } else { + return Reflection::new_method(m, UseNewReflection, false, THREAD); + } + + case ETF_HANDLE_OR_METHOD_NAME: // method, not handle + case ETF_METHOD_NAME: + { + if (SystemDictionary::MemberName_klass() == NULL) break; + instanceKlassHandle mname_klass(THREAD, SystemDictionary::MemberName_klass()); + mname_klass->initialize(CHECK_NULL); + Handle mname = mname_klass->allocate_instance_handle(CHECK_NULL); + sun_dyn_MemberName::set_vmindex(mname(), VM_INDEX_UNINITIALIZED); + bool do_dispatch = ((decode_flags & MethodHandles::_dmf_does_dispatch) != 0); + init_MemberName(mname(), m, do_dispatch); + expand_MemberName(mname, 0, CHECK_NULL); + return mname(); + } + } + + // Unknown format code. + char msg[50]; + jio_snprintf(msg, sizeof(msg), "unknown getTarget format=%d", format); + THROW_MSG_NULL(vmSymbols::java_lang_IllegalArgumentException(), msg); +} + +bool MethodHandles::class_cast_needed(klassOop src, klassOop dst) { + if (src == dst || dst == SystemDictionary::object_klass()) + return false; // quickest checks + Klass* srck = Klass::cast(src); + Klass* dstk = Klass::cast(dst); + if (dstk->is_interface()) { + // interface receivers can safely be viewed as untyped, + // because interface calls always include a dynamic check + //dstk = Klass::cast(SystemDictionary::object_klass()); + return false; + } + if (srck->is_interface()) { + // interface arguments must be viewed as untyped + //srck = Klass::cast(SystemDictionary::object_klass()); + return true; + } + return !srck->is_subclass_of(dstk->as_klassOop()); +} + +static oop object_java_mirror() { + return Klass::cast(SystemDictionary::object_klass())->java_mirror(); +} + +bool MethodHandles::same_basic_type_for_arguments(BasicType src, + BasicType dst, + bool for_return) { + // return values can always be forgotten: + if (for_return && dst == T_VOID) return true; + assert(src != T_VOID && dst != T_VOID, "should not be here"); + if (src == dst) return true; + if (type2size[src] != type2size[dst]) return false; + // allow reinterpretation casts for integral widening + if (is_subword_type(src)) { // subwords can fit in int or other subwords + if (dst == T_INT) // any subword fits in an int + return true; + if (src == T_BOOLEAN) // boolean fits in any subword + return is_subword_type(dst); + if (src == T_BYTE && dst == T_SHORT) + return true; // remaining case: byte fits in short + } + // allow float/fixed reinterpretation casts + if (src == T_FLOAT) return dst == T_INT; + if (src == T_INT) return dst == T_FLOAT; + if (src == T_DOUBLE) return dst == T_LONG; + if (src == T_LONG) return dst == T_DOUBLE; + return false; +} + +const char* MethodHandles::check_method_receiver(methodOop m, + klassOop passed_recv_type) { + assert(!m->is_static(), "caller resp."); + if (passed_recv_type == NULL) + return "receiver type is primitive"; + if (class_cast_needed(passed_recv_type, m->method_holder())) { + Klass* formal = Klass::cast(m->method_holder()); + return SharedRuntime::generate_class_cast_message("receiver type", + formal->external_name()); + } + return NULL; // checks passed +} + +// Verify that m's signature can be called type-safely by a method handle +// of the given method type 'mtype'. +// It takes a TRAPS argument because it must perform symbol lookups. +void MethodHandles::verify_method_signature(methodHandle m, + Handle mtype, + int first_ptype_pos, + KlassHandle insert_ptype, + TRAPS) { + objArrayHandle ptypes(THREAD, java_dyn_MethodType::ptypes(mtype())); + int pnum = first_ptype_pos; + int pmax = ptypes->length(); + int mnum = 0; // method argument + const char* err = NULL; + for (SignatureStream ss(m->signature()); !ss.is_done(); ss.next()) { + oop ptype_oop = NULL; + if (ss.at_return_type()) { + if (pnum != pmax) + { err = "too many arguments"; break; } + ptype_oop = java_dyn_MethodType::rtype(mtype()); + } else { + if (pnum >= pmax) + { err = "not enough arguments"; break; } + if (pnum >= 0) + ptype_oop = ptypes->obj_at(pnum); + else if (insert_ptype.is_null()) + ptype_oop = NULL; + else + ptype_oop = insert_ptype->java_mirror(); + pnum += 1; + mnum += 1; + } + klassOop mklass = NULL; + BasicType mtype = ss.type(); + if (mtype == T_ARRAY) mtype = T_OBJECT; // fold all refs to T_OBJECT + if (mtype == T_OBJECT) { + if (ptype_oop == NULL) { + // null matches any reference + continue; + } + // If we fail to resolve types at this point, we will throw an error. + symbolOop name_oop = ss.as_symbol(CHECK); + symbolHandle name(THREAD, name_oop); + instanceKlass* mk = instanceKlass::cast(m->method_holder()); + Handle loader(THREAD, mk->class_loader()); + Handle domain(THREAD, mk->protection_domain()); + mklass = SystemDictionary::resolve_or_fail(name, loader, domain, + true, CHECK); + } + if (ptype_oop == NULL) { + // null does not match any non-reference; use Object to report the error + ptype_oop = object_java_mirror(); + } + klassOop pklass = NULL; + BasicType ptype = java_lang_Class::as_BasicType(ptype_oop, &pklass); + if (!ss.at_return_type()) { + err = check_argument_type_change(ptype, pklass, mtype, mklass, mnum); + } else { + err = check_return_type_change(mtype, mklass, ptype, pklass); // note reversal! + } + if (err != NULL) break; + } + + if (err != NULL) { + THROW_MSG(vmSymbols::java_lang_InternalError(), err); + } +} + +// Main routine for verifying the MethodHandle.type of a proposed +// direct or bound-direct method handle. +void MethodHandles::verify_method_type(methodHandle m, + Handle mtype, + bool has_bound_recv, + KlassHandle bound_recv_type, + TRAPS) { + bool m_needs_receiver = !m->is_static(); + + const char* err = NULL; + + int first_ptype_pos = m_needs_receiver ? 1 : 0; + if (has_bound_recv && err == NULL) { + first_ptype_pos -= 1; + if (m_needs_receiver && bound_recv_type.is_null()) + { err = "bound receiver is not an object"; goto die; } + } + + if (m_needs_receiver && err == NULL) { + objArrayOop ptypes = java_dyn_MethodType::ptypes(mtype()); + if (ptypes->length() < first_ptype_pos) + { err = "receiver argument is missing"; goto die; } + if (first_ptype_pos == -1) + err = check_method_receiver(m(), bound_recv_type->as_klassOop()); + else + err = check_method_receiver(m(), java_lang_Class::as_klassOop(ptypes->obj_at(0))); + if (err != NULL) goto die; + } + + // Check the other arguments for mistypes. + verify_method_signature(m, mtype, first_ptype_pos, bound_recv_type, CHECK); + return; + + die: + THROW_MSG(vmSymbols::java_lang_InternalError(), err); +} + +void MethodHandles::verify_vmslots(Handle mh, TRAPS) { + // Verify vmslots. + int check_slots = argument_slot_count(java_dyn_MethodHandle::type(mh())); + if (java_dyn_MethodHandle::vmslots(mh()) != check_slots) { + THROW_MSG(vmSymbols::java_lang_InternalError(), "bad vmslots in BMH"); + } +} + +void MethodHandles::verify_vmargslot(Handle mh, int argnum, int argslot, TRAPS) { + // Verify that argslot points at the given argnum. + int check_slot = argument_slot(java_dyn_MethodHandle::type(mh()), argnum); + if (argslot != check_slot || argslot < 0) { + const char* fmt = "for argnum of %d, vmargslot is %d, should be %d"; + size_t msglen = strlen(fmt) + 3*11 + 1; + char* msg = NEW_RESOURCE_ARRAY(char, msglen); + jio_snprintf(msg, msglen, fmt, argnum, argslot, check_slot); + THROW_MSG(vmSymbols::java_lang_InternalError(), msg); + } +} + +// Verify the correspondence between two method types. +// Apart from the advertised changes, caller method type X must +// be able to invoke the callee method Y type with no violations +// of type integrity. +// Return NULL if all is well, else a short error message. +const char* MethodHandles::check_method_type_change(oop src_mtype, int src_beg, int src_end, + int insert_argnum, oop insert_type, + int change_argnum, oop change_type, + int delete_argnum, + oop dst_mtype, int dst_beg, int dst_end) { + objArrayOop src_ptypes = java_dyn_MethodType::ptypes(src_mtype); + objArrayOop dst_ptypes = java_dyn_MethodType::ptypes(dst_mtype); + + int src_max = src_ptypes->length(); + int dst_max = dst_ptypes->length(); + + if (src_end == -1) src_end = src_max; + if (dst_end == -1) dst_end = dst_max; + + assert(0 <= src_beg && src_beg <= src_end && src_end <= src_max, "oob"); + assert(0 <= dst_beg && dst_beg <= dst_end && dst_end <= dst_max, "oob"); + + // pending actions; set to -1 when done: + int ins_idx = insert_argnum, chg_idx = change_argnum, del_idx = delete_argnum; + + const char* err = NULL; + + // Walk along each array of parameter types, including a virtual + // NULL end marker at the end of each. + for (int src_idx = src_beg, dst_idx = dst_beg; + (src_idx <= src_end && dst_idx <= dst_end); + src_idx++, dst_idx++) { + oop src_type = (src_idx == src_end) ? oop(NULL) : src_ptypes->obj_at(src_idx); + oop dst_type = (dst_idx == dst_end) ? oop(NULL) : dst_ptypes->obj_at(dst_idx); + bool fix_null_src_type = false; + + // Perform requested edits. + if (ins_idx == src_idx) { + // note that the inserted guy is never affected by a change or deletion + ins_idx = -1; + src_type = insert_type; + fix_null_src_type = true; + --src_idx; // back up to process src type on next loop + src_idx = src_end; + } else { + // note that the changed guy can be immediately deleted + if (chg_idx == src_idx) { + chg_idx = -1; + assert(src_idx < src_end, "oob"); + src_type = change_type; + fix_null_src_type = true; + } + if (del_idx == src_idx) { + del_idx = -1; + assert(src_idx < src_end, "oob"); + --dst_idx; + continue; // rerun loop after skipping this position + } + } + + if (src_type == NULL && fix_null_src_type) + // explicit null in this case matches any dest reference + src_type = (java_lang_Class::is_primitive(dst_type) ? object_java_mirror() : dst_type); + + // Compare the two argument types. + if (src_type != dst_type) { + if (src_type == NULL) return "not enough arguments"; + if (dst_type == NULL) return "too many arguments"; + err = check_argument_type_change(src_type, dst_type, dst_idx); + if (err != NULL) return err; + } + } + + // Now compare return types also. + oop src_rtype = java_dyn_MethodType::rtype(src_mtype); + oop dst_rtype = java_dyn_MethodType::rtype(dst_mtype); + if (src_rtype != dst_rtype) { + err = check_return_type_change(dst_rtype, src_rtype); // note reversal! + if (err != NULL) return err; + } + + assert(err == NULL, ""); + return NULL; // all is well +} + + +const char* MethodHandles::check_argument_type_change(BasicType src_type, + klassOop src_klass, + BasicType dst_type, + klassOop dst_klass, + int argnum) { + const char* err = NULL; + + // just in case: + if (src_type == T_ARRAY) src_type = T_OBJECT; + if (dst_type == T_ARRAY) dst_type = T_OBJECT; + + // Produce some nice messages if VerifyMethodHandles is turned on: + if (!same_basic_type_for_arguments(src_type, dst_type, (argnum < 0))) { + if (src_type == T_OBJECT) { + err = ((argnum >= 0) + ? "type mismatch: passing a %s for method argument #%d, which expects primitive %s" + : "type mismatch: returning a %s, but caller expects primitive %s"); + } else if (dst_type == T_OBJECT) { + err = ((argnum < 0) + ? "type mismatch: passing a primitive %s for method argument #%d, which expects %s" + : "type mismatch: returning a primitive %s, but caller expects %s"); + } else { + err = ((argnum < 0) + ? "type mismatch: passing a %s for method argument #%d, which expects %s" + : "type mismatch: returning a %s, but caller expects %s"); + } + } else if (src_type == T_OBJECT && class_cast_needed(src_klass, dst_klass)) { + if (!class_cast_needed(dst_klass, src_klass)) { + err = ((argnum < 0) + ? "cast required: passing a %s for method argument #%d, which expects %s" + : "cast required: returning a %s, but caller expects %s"); + } else { + err = ((argnum < 0) + ? "reference mismatch: passing a %s for method argument #%d, which expects %s" + : "reference mismatch: returning a %s, but caller expects %s"); + } + } else { + // passed the obstacle course + return NULL; + } + + // format, format, format + const char* src_name = type2name(src_type); + const char* dst_name = type2name(dst_type); + if (src_type == T_OBJECT) src_name = Klass::cast(src_klass)->external_name(); + if (dst_type == T_OBJECT) dst_name = Klass::cast(dst_klass)->external_name(); + if (src_name == NULL) src_name = "unknown type"; + if (dst_name == NULL) dst_name = "unknown type"; + + size_t msglen = strlen(err) + strlen(src_name) + strlen(dst_name) + (argnum < 10 ? 1 : 11); + char* msg = NEW_RESOURCE_ARRAY(char, msglen + 1); + if (argnum >= 0) { + assert(strstr(err, "%d") != NULL, ""); + jio_snprintf(msg, msglen, err, src_name, argnum, dst_name); + } else { + assert(strstr(err, "%d") == NULL, ""); + jio_snprintf(msg, msglen, err, src_name, dst_name); + } + return msg; +} + +// Compute the depth within the stack of the given argument, i.e., +// the combined size of arguments to the right of the given argument. +// For the last argument (ptypes.length-1) this will be zero. +// For the first argument (0) this will be the size of all +// arguments but that one. For the special number -1, this +// will be the size of all arguments, including the first. +// If the argument is neither -1 nor a valid argument index, +// then return a negative number. Otherwise, the result +// is in the range [0..vmslots] inclusive. +int MethodHandles::argument_slot(oop method_type, int arg) { + objArrayOop ptypes = java_dyn_MethodType::ptypes(method_type); + int argslot = 0; + int len = ptypes->length(); + if (arg < -1 || arg >= len) return -99; + for (int i = len-1; i > arg; i--) { + BasicType bt = java_lang_Class::as_BasicType(ptypes->obj_at(i)); + argslot += type2size[bt]; + } + assert(argument_slot_to_argnum(method_type, argslot) == arg, "inverse works"); + return argslot; +} + +// Given a slot number, return the argument number. +int MethodHandles::argument_slot_to_argnum(oop method_type, int query_argslot) { + objArrayOop ptypes = java_dyn_MethodType::ptypes(method_type); + int argslot = 0; + int len = ptypes->length(); + for (int i = len-1; i >= 0; i--) { + if (query_argslot == argslot) return i; + BasicType bt = java_lang_Class::as_BasicType(ptypes->obj_at(i)); + argslot += type2size[bt]; + } + // return pseudo-arg deepest in stack: + if (query_argslot == argslot) return -1; + return -99; // oob slot, or splitting a double-slot arg +} + +methodHandle MethodHandles::dispatch_decoded_method(methodHandle m, + KlassHandle receiver_limit, + int decode_flags, + KlassHandle receiver_klass, + TRAPS) { + assert((decode_flags & ~_DMF_DIRECT_MASK) == 0, "must be direct method reference"); + assert((decode_flags & _dmf_has_receiver) != 0, "must have a receiver or first reference argument"); + + if (!m->is_static() && + (receiver_klass.is_null() || !receiver_klass->is_subtype_of(m->method_holder()))) + // given type does not match class of method, or receiver is null! + // caller should have checked this, but let's be extra careful... + return methodHandle(); + + if (receiver_limit.not_null() && + (receiver_klass.not_null() && !receiver_klass->is_subtype_of(receiver_limit()))) + // given type is not limited to the receiver type + // note that a null receiver can match any reference value, for a static method + return methodHandle(); + + if (!(decode_flags & MethodHandles::_dmf_does_dispatch)) { + // pre-dispatched or static method (null receiver is OK for static) + return m; + + } else if (receiver_klass.is_null()) { + // null receiver value; cannot dispatch + return methodHandle(); + + } else if (!(decode_flags & MethodHandles::_dmf_from_interface)) { + // perform virtual dispatch + int vtable_index = m->vtable_index(); + guarantee(vtable_index >= 0, "valid vtable index"); + + // receiver_klass might be an arrayKlassOop but all vtables start at + // the same place. The cast is to avoid virtual call and assertion. + // See also LinkResolver::runtime_resolve_virtual_method. + instanceKlass* inst = (instanceKlass*)Klass::cast(receiver_klass()); + DEBUG_ONLY(inst->verify_vtable_index(vtable_index)); + methodOop m_oop = inst->method_at_vtable(vtable_index); + return methodHandle(THREAD, m_oop); + + } else { + // perform interface dispatch + int itable_index = klassItable::compute_itable_index(m()); + guarantee(itable_index >= 0, "valid itable index"); + instanceKlass* inst = instanceKlass::cast(receiver_klass()); + methodOop m_oop = inst->method_at_itable(m->method_holder(), itable_index, THREAD); + return methodHandle(THREAD, m_oop); + } +} + +void MethodHandles::verify_DirectMethodHandle(Handle mh, methodHandle m, TRAPS) { + // Verify type. + Handle mtype(THREAD, java_dyn_MethodHandle::type(mh())); + verify_method_type(m, mtype, false, KlassHandle(), CHECK); + + // Verify vmslots. + if (java_dyn_MethodHandle::vmslots(mh()) != m->size_of_parameters()) { + THROW_MSG(vmSymbols::java_lang_InternalError(), "bad vmslots in DMH"); + } +} + +void MethodHandles::init_DirectMethodHandle(Handle mh, methodHandle m, bool do_dispatch, TRAPS) { + // Check arguments. + if (mh.is_null() || m.is_null() || + (!do_dispatch && m->is_abstract())) { + THROW(vmSymbols::java_lang_InternalError()); + } + + java_dyn_MethodHandle::init_vmslots(mh()); + + if (VerifyMethodHandles) { + // The privileged code which invokes this routine should not make + // a mistake about types, but it's better to verify. + verify_DirectMethodHandle(mh, m, CHECK); + } + + // Finally, after safety checks are done, link to the target method. + // We will follow the same path as the latter part of + // InterpreterRuntime::resolve_invoke(), which first finds the method + // and then decides how to populate the constant pool cache entry + // that links the interpreter calls to the method. We need the same + // bits, and will use the same calling sequence code. + + int vmindex = methodOopDesc::garbage_vtable_index; + oop vmtarget = NULL; + + instanceKlass::cast(m->method_holder())->link_class(CHECK); + + MethodHandleEntry* me = NULL; + if (do_dispatch && Klass::cast(m->method_holder())->is_interface()) { + // We are simulating an invokeinterface instruction. + // (We might also be simulating an invokevirtual on a miranda method, + // but it is safe to treat it as an invokeinterface.) + assert(!m->can_be_statically_bound(), "no final methods on interfaces"); + vmindex = klassItable::compute_itable_index(m()); + assert(vmindex >= 0, "(>=0) == do_dispatch"); + // Set up same bits as ConstantPoolCacheEntry::set_interface_call(). + vmtarget = m->method_holder(); // the interface + me = MethodHandles::entry(MethodHandles::_invokeinterface_mh); + } else if (!do_dispatch || m->can_be_statically_bound()) { + // We are simulating an invokestatic or invokespecial instruction. + // Set up the method pointer, just like ConstantPoolCacheEntry::set_method(). + vmtarget = m(); + // this does not help dispatch, but it will make it possible to parse this MH: + vmindex = methodOopDesc::nonvirtual_vtable_index; + assert(vmindex < 0, "(>=0) == do_dispatch"); + if (!m->is_static()) { + me = MethodHandles::entry(MethodHandles::_invokespecial_mh); + } else { + me = MethodHandles::entry(MethodHandles::_invokestatic_mh); + // Part of the semantics of a static call is an initialization barrier. + // For a DMH, it is done now, when the handle is created. + Klass* k = Klass::cast(m->method_holder()); + if (k->should_be_initialized()) { + k->initialize(CHECK); + } + } + } else { + // We are simulating an invokevirtual instruction. + // Set up the vtable index, just like ConstantPoolCacheEntry::set_method(). + // The key logic is LinkResolver::runtime_resolve_virtual_method. + vmindex = m->vtable_index(); + vmtarget = m->method_holder(); + me = MethodHandles::entry(MethodHandles::_invokevirtual_mh); + } + + if (me == NULL) { THROW(vmSymbols::java_lang_InternalError()); } + + sun_dyn_DirectMethodHandle::set_vmtarget(mh(), vmtarget); + sun_dyn_DirectMethodHandle::set_vmindex(mh(), vmindex); + DEBUG_ONLY(int flags; klassOop rlimit); + assert(MethodHandles::decode_method(mh(), rlimit, flags) == m(), + "properly stored for later decoding"); + DEBUG_ONLY(bool actual_do_dispatch = ((flags & _dmf_does_dispatch) != 0)); + assert(!(actual_do_dispatch && !do_dispatch), + "do not perform dispatch if !do_dispatch specified"); + assert(actual_do_dispatch == (vmindex >= 0), "proper later decoding of do_dispatch"); + assert(decode_MethodHandle_stack_pushes(mh()) == 0, "DMH does not move stack"); + + // Done! + java_dyn_MethodHandle::set_vmentry(mh(), me); +} + +void MethodHandles::verify_BoundMethodHandle_with_receiver(Handle mh, + methodHandle m, + TRAPS) { + // Verify type. + oop receiver = sun_dyn_BoundMethodHandle::argument(mh()); + Handle mtype(THREAD, java_dyn_MethodHandle::type(mh())); + KlassHandle bound_recv_type; + if (receiver != NULL) bound_recv_type = KlassHandle(THREAD, receiver->klass()); + verify_method_type(m, mtype, true, bound_recv_type, CHECK); + + int receiver_pos = m->size_of_parameters() - 1; + + // Verify MH.vmargslot, which should point at the bound receiver. + verify_vmargslot(mh, -1, sun_dyn_BoundMethodHandle::vmargslot(mh()), CHECK); + //verify_vmslots(mh, CHECK); + + // Verify vmslots. + if (java_dyn_MethodHandle::vmslots(mh()) != receiver_pos) { + THROW_MSG(vmSymbols::java_lang_InternalError(), "bad vmslots in BMH (receiver)"); + } +} + +// Initialize a BMH with a receiver bound directly to a methodOop. +void MethodHandles::init_BoundMethodHandle_with_receiver(Handle mh, + methodHandle original_m, + KlassHandle receiver_limit, + int decode_flags, + TRAPS) { + // Check arguments. + if (mh.is_null() || original_m.is_null()) { + THROW(vmSymbols::java_lang_InternalError()); + } + + KlassHandle receiver_klass; + { + oop receiver_oop = sun_dyn_BoundMethodHandle::argument(mh()); + if (receiver_oop != NULL) + receiver_klass = KlassHandle(THREAD, receiver_oop->klass()); + } + methodHandle m = dispatch_decoded_method(original_m, + receiver_limit, decode_flags, + receiver_klass, + CHECK); + if (m.is_null()) { THROW(vmSymbols::java_lang_InternalError()); } + if (m->is_abstract()) { THROW(vmSymbols::java_lang_AbstractMethodError()); } + + java_dyn_MethodHandle::init_vmslots(mh()); + + if (VerifyMethodHandles) { + verify_BoundMethodHandle_with_receiver(mh, m, CHECK); + } + + sun_dyn_BoundMethodHandle::set_vmtarget(mh(), m()); + + DEBUG_ONLY(int junk; klassOop junk2); + assert(MethodHandles::decode_method(mh(), junk2, junk) == m(), "properly stored for later decoding"); + assert(decode_MethodHandle_stack_pushes(mh()) == 1, "BMH pushes one stack slot"); + + // Done! + java_dyn_MethodHandle::set_vmentry(mh(), MethodHandles::entry(MethodHandles::_bound_ref_direct_mh)); +} + +void MethodHandles::verify_BoundMethodHandle(Handle mh, Handle target, int argnum, + bool direct_to_method, TRAPS) { + Handle ptype_handle(THREAD, + java_dyn_MethodType::ptype(java_dyn_MethodHandle::type(target()), argnum)); + KlassHandle ptype_klass; + BasicType ptype = java_lang_Class::as_BasicType(ptype_handle(), &ptype_klass); + int slots_pushed = type2size[ptype]; + + oop argument = sun_dyn_BoundMethodHandle::argument(mh()); + + const char* err = NULL; + + switch (ptype) { + case T_OBJECT: + if (argument != NULL) + // we must implicitly convert from the arg type to the outgoing ptype + err = check_argument_type_change(T_OBJECT, argument->klass(), ptype, ptype_klass(), argnum); + break; + + case T_ARRAY: case T_VOID: + assert(false, "array, void do not appear here"); + default: + if (ptype != T_INT && !is_subword_type(ptype)) { + err = "unexpected parameter type"; + break; + } + // check subrange of Integer.value, if necessary + if (argument == NULL || argument->klass() != SystemDictionary::int_klass()) { + err = "bound integer argument must be of type java.lang.Integer"; + break; + } + if (ptype != T_INT) { + int value_offset = java_lang_boxing_object::value_offset_in_bytes(T_INT); + jint value = argument->int_field(value_offset); + int vminfo = adapter_subword_vminfo(ptype); + jint subword = truncate_subword_from_vminfo(value, vminfo); + if (value != subword) { + err = "bound subword value does not fit into the subword type"; + break; + } + } + break; + case T_FLOAT: + case T_DOUBLE: + case T_LONG: + { + // we must implicitly convert from the unboxed arg type to the outgoing ptype + BasicType argbox = java_lang_boxing_object::basic_type(argument); + if (argbox != ptype) { + err = check_argument_type_change(T_OBJECT, (argument == NULL + ? SystemDictionary::object_klass() + : argument->klass()), + ptype, ptype_klass(), argnum); + assert(err != NULL, "this must be an error"); + } + break; + } + } + + if (err == NULL) { + DEBUG_ONLY(int this_pushes = decode_MethodHandle_stack_pushes(mh())); + if (direct_to_method) { + assert(this_pushes == slots_pushed, "BMH pushes one or two stack slots"); + assert(slots_pushed <= MethodHandlePushLimit, ""); + } else { + int prev_pushes = decode_MethodHandle_stack_pushes(target()); + assert(this_pushes == slots_pushed + prev_pushes, "BMH stack motion must be correct"); + // do not blow the stack; use a Java-based adapter if this limit is exceeded + if (slots_pushed + prev_pushes > MethodHandlePushLimit) + err = "too many bound parameters"; + } + } + + if (err == NULL) { + // Verify the rest of the method type. + err = check_method_type_insertion(java_dyn_MethodHandle::type(mh()), + argnum, ptype_handle(), + java_dyn_MethodHandle::type(target())); + } + + if (err != NULL) { + THROW_MSG(vmSymbols::java_lang_InternalError(), err); + } +} + +void MethodHandles::init_BoundMethodHandle(Handle mh, Handle target, int argnum, TRAPS) { + // Check arguments. + if (mh.is_null() || target.is_null() || !java_dyn_MethodHandle::is_instance(target())) { + THROW(vmSymbols::java_lang_InternalError()); + } + + java_dyn_MethodHandle::init_vmslots(mh()); + + if (VerifyMethodHandles) { + int insert_after = argnum - 1; + verify_vmargslot(mh, insert_after, sun_dyn_BoundMethodHandle::vmargslot(mh()), CHECK); + verify_vmslots(mh, CHECK); + } + + // If (a) the target is a direct non-dispatched method handle, + // or (b) the target is a dispatched direct method handle and we + // are binding the receiver, cut out the middle-man. + // Do this by decoding the DMH and using its methodOop directly as vmtarget. + bool direct_to_method = false; + if (OptimizeMethodHandles && + target->klass() == SystemDictionary::DirectMethodHandle_klass() && + (argnum == 0 || sun_dyn_DirectMethodHandle::vmindex(target()) < 0)) { + int decode_flags = 0; klassOop receiver_limit_oop = NULL; + methodHandle m(THREAD, decode_method(target(), receiver_limit_oop, decode_flags)); + if (m.is_null()) { THROW_MSG(vmSymbols::java_lang_InternalError(), "DMH failed to decode"); } + DEBUG_ONLY(int m_vmslots = m->size_of_parameters() - 1); // pos. of 1st arg. + assert(sun_dyn_BoundMethodHandle::vmslots(mh()) == m_vmslots, "type w/ m sig"); + if (argnum == 0 && (decode_flags & _dmf_has_receiver) != 0) { + KlassHandle receiver_limit(THREAD, receiver_limit_oop); + init_BoundMethodHandle_with_receiver(mh, m, + receiver_limit, decode_flags, + CHECK); + return; + } + + // Even if it is not a bound receiver, we still might be able + // to bind another argument and still invoke the methodOop directly. + if (!(decode_flags & _dmf_does_dispatch)) { + direct_to_method = true; + sun_dyn_BoundMethodHandle::set_vmtarget(mh(), m()); + } + } + if (!direct_to_method) + sun_dyn_BoundMethodHandle::set_vmtarget(mh(), target()); + + if (VerifyMethodHandles) { + verify_BoundMethodHandle(mh, target, argnum, direct_to_method, CHECK); + } + + // Next question: Is this a ref, int, or long bound value? + oop ptype_oop = java_dyn_MethodType::ptype(java_dyn_MethodHandle::type(target()), argnum); + BasicType ptype = java_lang_Class::as_BasicType(ptype_oop); + int slots_pushed = type2size[ptype]; + + MethodHandleEntry* me = NULL; + if (ptype == T_OBJECT) { + if (direct_to_method) me = MethodHandles::entry(_bound_ref_direct_mh); + else me = MethodHandles::entry(_bound_ref_mh); + } else if (slots_pushed == 2) { + if (direct_to_method) me = MethodHandles::entry(_bound_long_direct_mh); + else me = MethodHandles::entry(_bound_long_mh); + } else if (slots_pushed == 1) { + if (direct_to_method) me = MethodHandles::entry(_bound_int_direct_mh); + else me = MethodHandles::entry(_bound_int_mh); + } else { + assert(false, ""); + } + + // Done! + java_dyn_MethodHandle::set_vmentry(mh(), me); +} + +static void throw_InternalError_for_bad_conversion(int conversion, const char* err, TRAPS) { + char msg[200]; + jio_snprintf(msg, sizeof(msg), "bad adapter (conversion=0x%08x): %s", conversion, err); + THROW_MSG(vmSymbols::java_lang_IllegalArgumentException(), msg); +} + +void MethodHandles::verify_AdapterMethodHandle(Handle mh, int argnum, TRAPS) { + jint conversion = sun_dyn_AdapterMethodHandle::conversion(mh()); + int argslot = sun_dyn_AdapterMethodHandle::vmargslot(mh()); + + verify_vmargslot(mh, argnum, argslot, CHECK); + verify_vmslots(mh, CHECK); + + jint conv_op = adapter_conversion_op(conversion); + if (!conv_op_valid(conv_op)) { + throw_InternalError_for_bad_conversion(conversion, "unknown conversion op", THREAD); + return; + } + EntryKind ek = adapter_entry_kind(conv_op); + + int stack_move = adapter_conversion_stack_move(conversion); + BasicType src = adapter_conversion_src_type(conversion); + BasicType dest = adapter_conversion_dest_type(conversion); + int vminfo = adapter_conversion_vminfo(conversion); // should be zero + + Handle argument(THREAD, sun_dyn_AdapterMethodHandle::argument(mh())); + Handle target(THREAD, sun_dyn_AdapterMethodHandle::vmtarget(mh())); + Handle src_mtype(THREAD, java_dyn_MethodHandle::type(mh())); + Handle dst_mtype(THREAD, java_dyn_MethodHandle::type(target())); + + const char* err = NULL; + + if (err == NULL) { + // Check that the correct argument is supplied, but only if it is required. + switch (ek) { + case _adapter_check_cast: // target type of cast + case _adapter_ref_to_prim: // wrapper type from which to unbox + case _adapter_prim_to_ref: // wrapper type to box into + case _adapter_collect_args: // array type to collect into + case _adapter_spread_args: // array type to spread from + if (!java_lang_Class::is_instance(argument()) + || java_lang_Class::is_primitive(argument())) + { err = "adapter requires argument of type java.lang.Class"; break; } + if (ek == _adapter_collect_args || + ek == _adapter_spread_args) { + // Make sure it is a suitable collection type. (Array, for now.) + Klass* ak = Klass::cast(java_lang_Class::as_klassOop(argument())); + if (!ak->oop_is_objArray()) { + { err = "adapter requires argument of type java.lang.Class"; break; } + } + } + break; + case _adapter_flyby: + case _adapter_ricochet: + if (!java_dyn_MethodHandle::is_instance(argument())) + { err = "MethodHandle adapter argument required"; break; } + break; + default: + if (argument.not_null()) + { err = "adapter has spurious argument"; break; } + break; + } + } + + if (err == NULL) { + // Check that the src/dest types are supplied if needed. + switch (ek) { + case _adapter_prim_to_prim: + if (!is_java_primitive(src) || !is_java_primitive(dest) || src == dest) { + err = "adapter requires primitive src/dest conversion subfields"; break; + } + if ( (src == T_FLOAT || src == T_DOUBLE) && !(dest == T_FLOAT || dest == T_DOUBLE) || + !(src == T_FLOAT || src == T_DOUBLE) && (dest == T_FLOAT || dest == T_DOUBLE)) { + err = "adapter cannot convert beween floating and fixed-point"; break; + } + break; + case _adapter_ref_to_prim: + if (src != T_OBJECT || !is_java_primitive(dest) + || argument() != Klass::cast(SystemDictionary::box_klass(dest))->java_mirror()) { + err = "adapter requires primitive dest conversion subfield"; break; + } + break; + case _adapter_prim_to_ref: + if (!is_java_primitive(src) || dest != T_OBJECT + || argument() != Klass::cast(SystemDictionary::box_klass(src))->java_mirror()) { + err = "adapter requires primitive src conversion subfield"; break; + } + break; + case _adapter_swap_args: + case _adapter_rot_args: + { + if (!src || src != dest) { + err = "adapter requires src/dest conversion subfields for swap"; break; + } + int swap_size = type2size[src]; + oop src_mtype = sun_dyn_AdapterMethodHandle::type(target()); + oop dest_mtype = sun_dyn_AdapterMethodHandle::type(mh()); + int slot_limit = sun_dyn_AdapterMethodHandle::vmslots(src_mtype); + int src_slot = argslot; + int dest_slot = vminfo; + bool rotate_up = (src_slot > dest_slot); // upward rotation + int src_arg = argnum; + int dest_arg = argument_slot_to_argnum(dest_mtype, dest_slot); + verify_vmargslot(mh, dest_arg, dest_slot, CHECK); + if (!(dest_slot >= src_slot + swap_size) && + !(src_slot >= dest_slot + swap_size)) { + err = "source, destination slots must be distinct"; + } else if (ek == _adapter_swap_args && !(src_slot > dest_slot)) { + err = "source of swap must be deeper in stack"; + } else if (ek == _adapter_swap_args) { + err = check_argument_type_change(java_dyn_MethodType::ptype(src_mtype, dest_arg), + java_dyn_MethodType::ptype(dest_mtype, src_arg), + dest_arg); + } else if (ek == _adapter_rot_args) { + if (rotate_up) { + assert((src_slot > dest_slot) && (src_arg < dest_arg), ""); + // rotate up: [dest_slot..src_slot-ss] --> [dest_slot+ss..src_slot] + // that is: [src_arg+1..dest_arg] --> [src_arg..dest_arg-1] + for (int i = src_arg+1; i <= dest_arg && err == NULL; i++) { + err = check_argument_type_change(java_dyn_MethodType::ptype(src_mtype, i), + java_dyn_MethodType::ptype(dest_mtype, i-1), + i); + } + } else { // rotate down + assert((src_slot < dest_slot) && (src_arg > dest_arg), ""); + // rotate down: [src_slot+ss..dest_slot] --> [src_slot..dest_slot-ss] + // that is: [dest_arg..src_arg-1] --> [dst_arg+1..src_arg] + for (int i = dest_arg; i <= src_arg-1 && err == NULL; i++) { + err = check_argument_type_change(java_dyn_MethodType::ptype(src_mtype, i), + java_dyn_MethodType::ptype(dest_mtype, i+1), + i); + } + } + } + if (err == NULL) + err = check_argument_type_change(java_dyn_MethodType::ptype(src_mtype, src_arg), + java_dyn_MethodType::ptype(dest_mtype, dest_arg), + src_arg); + } + break; + case _adapter_collect_args: + case _adapter_spread_args: + { + BasicType coll_type = (ek == _adapter_collect_args) ? dest : src; + BasicType elem_type = (ek == _adapter_collect_args) ? src : dest; + if (coll_type != T_OBJECT || elem_type != T_OBJECT) { + err = "adapter requires src/dest subfields"; break; + // later: + // - consider making coll be a primitive array + // - consider making coll be a heterogeneous collection + } + } + break; + default: + if (src != 0 || dest != 0) { + err = "adapter has spurious src/dest conversion subfields"; break; + } + break; + } + } + + if (err == NULL) { + // Check the stack_move subfield. + // It must always report the net change in stack size, positive or negative. + int slots_pushed = stack_move / stack_move_unit(); + switch (ek) { + case _adapter_prim_to_prim: + case _adapter_ref_to_prim: + case _adapter_prim_to_ref: + if (slots_pushed != type2size[dest] - type2size[src]) { + err = "wrong stack motion for primitive conversion"; + } + break; + case _adapter_dup_args: + if (slots_pushed <= 0) { + err = "adapter requires conversion subfield slots_pushed > 0"; + } + break; + case _adapter_drop_args: + if (slots_pushed >= 0) { + err = "adapter requires conversion subfield slots_pushed < 0"; + } + break; + case _adapter_collect_args: + if (slots_pushed > 1) { + err = "adapter requires conversion subfield slots_pushed <= 1"; + } + break; + case _adapter_spread_args: + if (slots_pushed < -1) { + err = "adapter requires conversion subfield slots_pushed >= -1"; + } + break; + default: + if (stack_move != 0) { + err = "adapter has spurious stack_move conversion subfield"; + } + break; + } + if (err == NULL && stack_move != slots_pushed * stack_move_unit()) { + err = "stack_move conversion subfield must be multiple of stack_move_unit"; + } + } + + if (err == NULL) { + // Make sure this adapter does not push too deeply. + int slots_pushed = stack_move / stack_move_unit(); + int this_vmslots = java_dyn_MethodHandle::vmslots(mh()); + int prev_vmslots = java_dyn_MethodHandle::vmslots(target()); + if (slots_pushed != (this_vmslots - prev_vmslots)) { + err = "stack_move inconsistent with previous and current MethodType vmslots"; + } else if (slots_pushed > 0) { + // verify stack_move against MethodHandlePushLimit + int prev_pushes = decode_MethodHandle_stack_pushes(target()); + // do not blow the stack; use a Java-based adapter if this limit is exceeded + if (slots_pushed + prev_pushes > MethodHandlePushLimit) { + err = "adapter pushes too many parameters"; + } + } + + // While we're at it, check that the stack motion decoder works: + DEBUG_ONLY(int prev_pushes = decode_MethodHandle_stack_pushes(target())); + DEBUG_ONLY(int this_pushes = decode_MethodHandle_stack_pushes(mh())); + assert(this_pushes == slots_pushed + prev_pushes, "AMH stack motion must be correct"); + } + + if (err == NULL && vminfo != 0) { + switch (ek) { + case _adapter_swap_args: + case _adapter_rot_args: + break; // OK + default: + err = "vminfo subfield is reserved to the JVM"; + } + } + + // Do additional ad hoc checks. + if (err == NULL) { + switch (ek) { + case _adapter_retype_only: + err = check_method_type_passthrough(src_mtype(), dst_mtype()); + break; + + case _adapter_check_cast: + { + // The actual value being checked must be a reference: + err = check_argument_type_change(java_dyn_MethodType::ptype(src_mtype(), argnum), + object_java_mirror(), argnum); + if (err != NULL) break; + + // The output of the cast must fit with the destination argument: + Handle cast_class = argument; + err = check_method_type_conversion(src_mtype(), + argnum, cast_class(), + dst_mtype()); + } + break; + + // %%% TO DO: continue in remaining cases to verify src/dst_mtype if VerifyMethodHandles + } + } + + if (err != NULL) { + throw_InternalError_for_bad_conversion(conversion, err, THREAD); + return; + } + +} + +void MethodHandles::init_AdapterMethodHandle(Handle mh, Handle target, int argnum, TRAPS) { + oop argument = sun_dyn_AdapterMethodHandle::argument(mh()); + int argslot = sun_dyn_AdapterMethodHandle::vmargslot(mh()); + jint conversion = sun_dyn_AdapterMethodHandle::conversion(mh()); + jint conv_op = adapter_conversion_op(conversion); + + // adjust the adapter code to the internal EntryKind enumeration: + EntryKind ek_orig = adapter_entry_kind(conv_op); + EntryKind ek_opt = ek_orig; // may be optimized + + // Finalize the vmtarget field (Java initialized it to null). + if (!java_dyn_MethodHandle::is_instance(target())) { + throw_InternalError_for_bad_conversion(conversion, "bad target", THREAD); + return; + } + sun_dyn_AdapterMethodHandle::set_vmtarget(mh(), target()); + + if (VerifyMethodHandles) { + verify_AdapterMethodHandle(mh, argnum, CHECK); + } + + int stack_move = adapter_conversion_stack_move(conversion); + BasicType src = adapter_conversion_src_type(conversion); + BasicType dest = adapter_conversion_dest_type(conversion); + int vminfo = adapter_conversion_vminfo(conversion); // should be zero + + const char* err = NULL; + + // Now it's time to finish the case analysis and pick a MethodHandleEntry. + switch (ek_orig) { + case _adapter_retype_only: + case _adapter_check_cast: + case _adapter_dup_args: + case _adapter_drop_args: + // these work fine via general case code + break; + + case _adapter_prim_to_prim: + { + // Non-subword cases are {int,float,long,double} -> {int,float,long,double}. + // And, the {float,double} -> {int,long} cases must be handled by Java. + switch (type2size[src] *4+ type2size[dest]) { + case 1 *4+ 1: + assert(src == T_INT || is_subword_type(src), "source is not float"); + // Subword-related cases are int -> {boolean,byte,char,short}. + ek_opt = _adapter_opt_i2i; + vminfo = adapter_subword_vminfo(dest); + break; + case 2 *4+ 1: + if (src == T_LONG && (dest == T_INT || is_subword_type(dest))) { + ek_opt = _adapter_opt_l2i; + vminfo = adapter_subword_vminfo(dest); + } else if (src == T_DOUBLE && dest == T_FLOAT) { + ek_opt = _adapter_opt_d2f; + } else { + assert(false, ""); + } + break; + case 1 *4+ 2: + if (src == T_INT && dest == T_LONG) { + ek_opt = _adapter_opt_i2l; + } else if (src == T_FLOAT && dest == T_DOUBLE) { + ek_opt = _adapter_opt_f2d; + } else { + assert(false, ""); + } + break; + default: + assert(false, ""); + break; + } + } + break; + + case _adapter_ref_to_prim: + { + switch (type2size[dest]) { + case 1: + ek_opt = _adapter_opt_unboxi; + vminfo = adapter_subword_vminfo(dest); + break; + case 2: + ek_opt = _adapter_opt_unboxl; + break; + default: + assert(false, ""); + break; + } + } + break; + + case _adapter_prim_to_ref: + goto throw_not_impl; // allocates, hence could block + + case _adapter_swap_args: + case _adapter_rot_args: + { + int swap_slots = type2size[src]; + oop mtype = sun_dyn_AdapterMethodHandle::type(mh()); + int slot_limit = sun_dyn_AdapterMethodHandle::vmslots(mtype); + int src_slot = argslot; + int dest_slot = vminfo; + int rotate = (ek_orig == _adapter_swap_args) ? 0 : (src_slot > dest_slot) ? 1 : -1; + switch (swap_slots) { + case 1: + ek_opt = (!rotate ? _adapter_opt_swap_1 : + rotate > 0 ? _adapter_opt_rot_1_up : _adapter_opt_rot_1_down); + break; + case 2: + ek_opt = (!rotate ? _adapter_opt_swap_2 : + rotate > 0 ? _adapter_opt_rot_2_up : _adapter_opt_rot_2_down); + break; + default: + assert(false, ""); + break; + } + } + break; + + case _adapter_collect_args: + goto throw_not_impl; // allocates, hence could block + + case _adapter_spread_args: + { + // vminfo will be the required length of the array + int slots_pushed = stack_move / stack_move_unit(); + int array_size = slots_pushed + 1; + assert(array_size >= 0, ""); + vminfo = array_size; + switch (array_size) { + case 0: ek_opt = _adapter_opt_spread_0; break; + case 1: ek_opt = _adapter_opt_spread_1; break; + default: ek_opt = _adapter_opt_spread_more; break; + } + if ((vminfo & CONV_VMINFO_MASK) != vminfo) + goto throw_not_impl; // overflow + } + break; + + case _adapter_flyby: + case _adapter_ricochet: + goto throw_not_impl; // runs Java code, hence could block + + default: + // should have failed much earlier; must be a missing case here + assert(false, "incomplete switch"); + // and fall through: + + throw_not_impl: + // FIXME: these adapters are NYI + err = "adapter not yet implemented in the JVM"; + break; + } + + if (err != NULL) { + throw_InternalError_for_bad_conversion(conversion, err, THREAD); + return; + } + + // Rebuild the conversion value; maybe parts of it were changed. + jint new_conversion = adapter_conversion(conv_op, src, dest, stack_move, vminfo); + + // Finalize the conversion field. (Note that it is final to Java code.) + sun_dyn_AdapterMethodHandle::set_conversion(mh(), new_conversion); + + // Done! + java_dyn_MethodHandle::set_vmentry(mh(), entry(ek_opt)); + + // There should be enough memory barriers on exit from native methods + // to ensure that the MH is fully initialized to all threads before + // Java code can publish it in global data structures. +} + +// +// Here are the native methods on sun.dyn.MethodHandleImpl. +// They are the private interface between this JVM and the HotSpot-specific +// Java code that implements JSR 292 method handles. +// +// Note: We use a JVM_ENTRY macro to define each of these, for this is the way +// that intrinsic (non-JNI) native methods are defined in HotSpot. +// + +// direct method handles for invokestatic or invokespecial +// void init(DirectMethodHandle self, MemberName ref, boolean doDispatch, Class caller); +JVM_ENTRY(void, MHI_init_DMH(JNIEnv *env, jobject igcls, jobject mh_jh, + jobject target_jh, jboolean do_dispatch, jobject caller_jh)) { + ResourceMark rm; // for error messages + + // This is the guy we are initializing: + if (mh_jh == NULL) { THROW(vmSymbols::java_lang_InternalError()); } + Handle mh(THREAD, JNIHandles::resolve_non_null(mh_jh)); + + // Early returns out of this method leave the DMH in an unfinished state. + assert(java_dyn_MethodHandle::vmentry(mh()) == NULL, "must be safely null"); + + // which method are we really talking about? + if (target_jh == NULL) { THROW(vmSymbols::java_lang_InternalError()); } + oop target_oop = JNIHandles::resolve_non_null(target_jh); + if (sun_dyn_MemberName::is_instance(target_oop) && + sun_dyn_MemberName::vmindex(target_oop) == VM_INDEX_UNINITIALIZED) { + Handle mname(THREAD, target_oop); + MethodHandles::resolve_MemberName(mname, CHECK); + target_oop = mname(); // in case of GC + } + + int decode_flags = 0; klassOop receiver_limit = NULL; + methodHandle m(THREAD, + MethodHandles::decode_method(target_oop, + receiver_limit, decode_flags)); + if (m.is_null()) { THROW_MSG(vmSymbols::java_lang_InternalError(), "no such method"); } + + // The trusted Java code that calls this method should already have performed + // access checks on behalf of the given caller. But, we can verify this. + if (VerifyMethodHandles && caller_jh != NULL) { + KlassHandle caller(THREAD, java_lang_Class::as_klassOop(JNIHandles::resolve_non_null(caller_jh))); + // If this were a bytecode, the first access check would be against + // the "reference class" mentioned in the CONSTANT_Methodref. + // For that class, we use the defining class of m, + // or a more specific receiver limit if available. + klassOop reference_klass = m->method_holder(); // OK approximation + if (receiver_limit != NULL && receiver_limit != reference_klass) { + if (!Klass::cast(receiver_limit)->is_subtype_of(reference_klass)) + THROW_MSG(vmSymbols::java_lang_InternalError(), "receiver limit out of bounds"); // Java code bug + reference_klass = receiver_limit; + } + // Emulate LinkResolver::check_klass_accessability. + if (!Reflection::verify_class_access(caller->as_klassOop(), + reference_klass, + true)) { + THROW_MSG(vmSymbols::java_lang_InternalError(), Klass::cast(m->method_holder())->external_name()); + } + // If there were a bytecode, the next step would be to lookup the method + // in the reference class, then then check the method's access bits. + // Emulate LinkResolver::check_method_accessability. + klassOop resolved_klass = m->method_holder(); + if (!Reflection::verify_field_access(caller->as_klassOop(), + resolved_klass, reference_klass, + m->access_flags(), + true)) { + // %%% following cutout belongs in Reflection::verify_field_access? + bool same_pm = Reflection::is_same_package_member(caller->as_klassOop(), + reference_klass, THREAD); + if (!same_pm) { + THROW_MSG(vmSymbols::java_lang_InternalError(), m->name_and_sig_as_C_string()); + } + } + } + + MethodHandles::init_DirectMethodHandle(mh, m, (do_dispatch != JNI_FALSE), CHECK); +} +JVM_END + +// bound method handles +JVM_ENTRY(void, MHI_init_BMH(JNIEnv *env, jobject igcls, jobject mh_jh, + jobject target_jh, int argnum)) { + ResourceMark rm; // for error messages + + // This is the guy we are initializing: + if (mh_jh == NULL) { THROW(vmSymbols::java_lang_InternalError()); } + Handle mh(THREAD, JNIHandles::resolve_non_null(mh_jh)); + + // Early returns out of this method leave the BMH in an unfinished state. + assert(java_dyn_MethodHandle::vmentry(mh()) == NULL, "must be safely null"); + + if (target_jh == NULL) { THROW(vmSymbols::java_lang_InternalError()); } + Handle target(THREAD, JNIHandles::resolve_non_null(target_jh)); + + if (!java_dyn_MethodHandle::is_instance(target())) { + // Target object is a reflective method. (%%% Do we need this alternate path?) + Untested("init_BMH of non-MH"); + if (argnum != 0) { THROW(vmSymbols::java_lang_InternalError()); } + int decode_flags = 0; klassOop receiver_limit_oop = NULL; + methodHandle m(THREAD, + MethodHandles::decode_method(target(), + receiver_limit_oop, + decode_flags)); + KlassHandle receiver_limit(THREAD, receiver_limit_oop); + MethodHandles::init_BoundMethodHandle_with_receiver(mh, m, + receiver_limit, + decode_flags, + CHECK); + return; + } + + // Build a BMH on top of a DMH or another BMH: + MethodHandles::init_BoundMethodHandle(mh, target, argnum, CHECK); +} +JVM_END + +// adapter method handles +JVM_ENTRY(void, MHI_init_AMH(JNIEnv *env, jobject igcls, jobject mh_jh, + jobject target_jh, int argnum)) { + // This is the guy we are initializing: + if (mh_jh == NULL || target_jh == NULL) { + THROW(vmSymbols::java_lang_InternalError()); + } + Handle mh(THREAD, JNIHandles::resolve_non_null(mh_jh)); + Handle target(THREAD, JNIHandles::resolve_non_null(target_jh)); + + // Early returns out of this method leave the AMH in an unfinished state. + assert(java_dyn_MethodHandle::vmentry(mh()) == NULL, "must be safely null"); + + MethodHandles::init_AdapterMethodHandle(mh, target, argnum, CHECK); +} +JVM_END + +// method type forms +JVM_ENTRY(void, MHI_init_MT(JNIEnv *env, jobject igcls, jobject erased_jh)) { + if (erased_jh == NULL) return; + if (TraceMethodHandles) { + tty->print("creating MethodType form "); + if (WizardMode || Verbose) { // Warning: this calls Java code on the MH! + // call Object.toString() + symbolOop name = vmSymbols::toString_name(), sig = vmSymbols::void_string_signature(); + JavaCallArguments args(Handle(THREAD, JNIHandles::resolve_non_null(erased_jh))); + JavaValue result(T_OBJECT); + JavaCalls::call_virtual(&result, SystemDictionary::object_klass(), name, sig, + &args, CHECK); + Handle str(THREAD, (oop)result.get_jobject()); + java_lang_String::print(str, tty); + } + tty->cr(); + } +} +JVM_END + +// debugging and reflection +JVM_ENTRY(jobject, MHI_getTarget(JNIEnv *env, jobject igcls, jobject mh_jh, jint format)) { + Handle mh(THREAD, JNIHandles::resolve(mh_jh)); + if (!java_dyn_MethodHandle::is_instance(mh())) { + THROW_NULL(vmSymbols::java_lang_IllegalArgumentException()); + } + oop target = MethodHandles::encode_target(mh, format, CHECK_NULL); + return JNIHandles::make_local(THREAD, target); +} +JVM_END + +JVM_ENTRY(jint, MHI_getConstant(JNIEnv *env, jobject igcls, jint which)) { + switch (which) { + case MethodHandles::GC_JVM_PUSH_LIMIT: + guarantee(MethodHandlePushLimit >= 2 && MethodHandlePushLimit <= 0xFF, + "MethodHandlePushLimit parameter must be in valid range"); + return MethodHandlePushLimit; + case MethodHandles::GC_JVM_STACK_MOVE_LIMIT: + // return number of words per slot, signed according to stack direction + return MethodHandles::stack_move_unit(); + } + return 0; +} +JVM_END + +#ifndef PRODUCT +#define EACH_NAMED_CON(template) \ + template(MethodHandles,GC_JVM_PUSH_LIMIT) \ + template(MethodHandles,GC_JVM_STACK_MOVE_LIMIT) \ + template(MethodHandles,ETF_HANDLE_OR_METHOD_NAME) \ + template(MethodHandles,ETF_DIRECT_HANDLE) \ + template(MethodHandles,ETF_METHOD_NAME) \ + template(MethodHandles,ETF_REFLECT_METHOD) \ + template(sun_dyn_MemberName,MN_IS_METHOD) \ + template(sun_dyn_MemberName,MN_IS_CONSTRUCTOR) \ + template(sun_dyn_MemberName,MN_IS_FIELD) \ + template(sun_dyn_MemberName,MN_IS_TYPE) \ + template(sun_dyn_MemberName,MN_SEARCH_SUPERCLASSES) \ + template(sun_dyn_MemberName,MN_SEARCH_INTERFACES) \ + template(sun_dyn_MemberName,VM_INDEX_UNINITIALIZED) \ + template(sun_dyn_AdapterMethodHandle,OP_RETYPE_ONLY) \ + template(sun_dyn_AdapterMethodHandle,OP_CHECK_CAST) \ + template(sun_dyn_AdapterMethodHandle,OP_PRIM_TO_PRIM) \ + template(sun_dyn_AdapterMethodHandle,OP_REF_TO_PRIM) \ + template(sun_dyn_AdapterMethodHandle,OP_PRIM_TO_REF) \ + template(sun_dyn_AdapterMethodHandle,OP_SWAP_ARGS) \ + template(sun_dyn_AdapterMethodHandle,OP_ROT_ARGS) \ + template(sun_dyn_AdapterMethodHandle,OP_DUP_ARGS) \ + template(sun_dyn_AdapterMethodHandle,OP_DROP_ARGS) \ + template(sun_dyn_AdapterMethodHandle,OP_COLLECT_ARGS) \ + template(sun_dyn_AdapterMethodHandle,OP_SPREAD_ARGS) \ + template(sun_dyn_AdapterMethodHandle,OP_FLYBY) \ + template(sun_dyn_AdapterMethodHandle,OP_RICOCHET) \ + template(sun_dyn_AdapterMethodHandle,CONV_OP_LIMIT) \ + template(sun_dyn_AdapterMethodHandle,CONV_OP_MASK) \ + template(sun_dyn_AdapterMethodHandle,CONV_VMINFO_MASK) \ + template(sun_dyn_AdapterMethodHandle,CONV_VMINFO_SHIFT) \ + template(sun_dyn_AdapterMethodHandle,CONV_OP_SHIFT) \ + template(sun_dyn_AdapterMethodHandle,CONV_DEST_TYPE_SHIFT) \ + template(sun_dyn_AdapterMethodHandle,CONV_SRC_TYPE_SHIFT) \ + template(sun_dyn_AdapterMethodHandle,CONV_STACK_MOVE_SHIFT) \ + template(sun_dyn_AdapterMethodHandle,CONV_STACK_MOVE_MASK) \ + /*end*/ + +#define ONE_PLUS(scope,value) 1+ +static const int con_value_count = EACH_NAMED_CON(ONE_PLUS) 0; +#define VALUE_COMMA(scope,value) scope::value, +static const int con_values[con_value_count+1] = { EACH_NAMED_CON(VALUE_COMMA) 0 }; +#define STRING_NULL(scope,value) #value "\0" +static const char con_names[] = { EACH_NAMED_CON(STRING_NULL) }; + +#undef ONE_PLUS +#undef VALUE_COMMA +#undef STRING_NULL +#undef EACH_NAMED_CON +#endif + +JVM_ENTRY(jint, MHI_getNamedCon(JNIEnv *env, jobject igcls, jint which, jobjectArray box_jh)) { +#ifndef PRODUCT + if (which >= 0 && which < con_value_count) { + int con = con_values[which]; + objArrayOop box = (objArrayOop) JNIHandles::resolve(box_jh); + if (box != NULL && box->klass() == Universe::objectArrayKlassObj() && box->length() > 0) { + const char* str = &con_names[0]; + for (int i = 0; i < which; i++) + str += strlen(str) + 1; // skip name and null + oop name = java_lang_String::create_oop_from_str(str, CHECK_0); + box->obj_at_put(0, name); + } + return con; + } +#endif + return 0; +} +JVM_END + +// void init(MemberName self, AccessibleObject ref) +JVM_ENTRY(void, MHI_init_Mem(JNIEnv *env, jobject igcls, jobject mname_jh, jobject target_jh)) { + if (mname_jh == NULL || target_jh == NULL) { THROW(vmSymbols::java_lang_InternalError()); } + Handle mname(THREAD, JNIHandles::resolve_non_null(mname_jh)); + oop target_oop = JNIHandles::resolve_non_null(target_jh); + MethodHandles::init_MemberName(mname(), target_oop); +} +JVM_END + +// void expand(MemberName self) +JVM_ENTRY(void, MHI_expand_Mem(JNIEnv *env, jobject igcls, jobject mname_jh)) { + if (mname_jh == NULL) { THROW(vmSymbols::java_lang_InternalError()); } + Handle mname(THREAD, JNIHandles::resolve_non_null(mname_jh)); + MethodHandles::expand_MemberName(mname, 0, CHECK); +} +JVM_END + +// void resolve(MemberName self, Class caller) +JVM_ENTRY(void, MHI_resolve_Mem(JNIEnv *env, jobject igcls, jobject mname_jh, jclass caller_jh)) { + if (mname_jh == NULL) { THROW(vmSymbols::java_lang_InternalError()); } + Handle mname(THREAD, JNIHandles::resolve_non_null(mname_jh)); + // %%% take caller into account! + MethodHandles::resolve_MemberName(mname, CHECK); +} +JVM_END + +// static native int getMembers(Class defc, String matchName, String matchSig, +// int matchFlags, Class caller, int skip, MemberName[] results); +JVM_ENTRY(jint, MHI_getMembers(JNIEnv *env, jobject igcls, + jclass clazz_jh, jstring name_jh, jstring sig_jh, + int mflags, jclass caller_jh, jint skip, jobjectArray results_jh)) { + if (clazz_jh == NULL || results_jh == NULL) return -1; + klassOop k_oop = java_lang_Class::as_klassOop(JNIHandles::resolve_non_null(clazz_jh)); + + objArrayOop results = (objArrayOop) JNIHandles::resolve(results_jh); + if (results == NULL || !results->is_objArray()) return -1; + + symbolOop name = NULL, sig = NULL; + if (name_jh != NULL) { + name = java_lang_String::as_symbol_or_null(JNIHandles::resolve_non_null(name_jh)); + if (name == NULL) return 0; // a match is not possible + } + if (sig_jh != NULL) { + sig = java_lang_String::as_symbol_or_null(JNIHandles::resolve_non_null(sig_jh)); + if (sig == NULL) return 0; // a match is not possible + } + + klassOop caller = NULL; + if (caller_jh != NULL) { + oop caller_oop = JNIHandles::resolve_non_null(caller_jh); + if (!java_lang_Class::is_instance(caller_oop)) return -1; + caller = java_lang_Class::as_klassOop(caller_oop); + } + + if (name != NULL && sig != NULL && results != NULL) { + // try a direct resolve + // %%% TO DO + } + + int res = MethodHandles::find_MemberNames(k_oop, name, sig, mflags, + caller, skip, results); + // TO DO: expand at least some of the MemberNames, to avoid massive callbacks + return res; +} +JVM_END + + +JVM_ENTRY(void, MH_linkCallSite(JNIEnv *env, jobject igcls, jobject site_jh, jobject target_jh)) { + // No special action required, yet. + oop site_oop = JNIHandles::resolve(site_jh); + if (site_oop == NULL || site_oop->klass() != SystemDictionary::CallSiteImpl_klass()) + THROW_MSG(vmSymbols::java_lang_IllegalArgumentException(), "call site"); + sun_dyn_CallSiteImpl::set_target(site_oop, JNIHandles::resolve(target_jh)); +} +JVM_END + + +/// JVM_RegisterMethodHandleMethods + +#define ADR "J" + +#define LANG "Ljava/lang/" +#define JDYN "Ljava/dyn/" +#define IDYN "Lsun/dyn/" + +#define OBJ LANG"Object;" +#define CLS LANG"Class;" +#define STRG LANG"String;" +#define MT JDYN"MethodType;" +#define MH JDYN"MethodHandle;" +#define MHI IDYN"MethodHandleImpl;" +#define MEM IDYN"MemberName;" +#define AMH IDYN"AdapterMethodHandle;" +#define BMH IDYN"BoundMethodHandle;" +#define DMH IDYN"DirectMethodHandle;" +#define CSTI IDYN"CallSiteImpl;" + +#define CC (char*) /*cast a literal from (const char*)*/ +#define FN_PTR(f) CAST_FROM_FN_PTR(void*, &f) + +// These are the native methods on sun.dyn.MethodHandleNatives. +static JNINativeMethod methods[] = { + // void init(MemberName self, AccessibleObject ref) + {CC"init", CC"("AMH""MH"I)V", FN_PTR(MHI_init_AMH)}, + {CC"init", CC"("BMH""OBJ"I)V", FN_PTR(MHI_init_BMH)}, + {CC"init", CC"("DMH""OBJ"Z"CLS")V", FN_PTR(MHI_init_DMH)}, + {CC"init", CC"("MT")V", FN_PTR(MHI_init_MT)}, + {CC"init", CC"("MEM""OBJ")V", FN_PTR(MHI_init_Mem)}, + {CC"expand", CC"("MEM")V", FN_PTR(MHI_expand_Mem)}, + {CC"resolve", CC"("MEM""CLS")V", FN_PTR(MHI_resolve_Mem)}, + {CC"getTarget", CC"("MH"I)"OBJ, FN_PTR(MHI_getTarget)}, + {CC"getConstant", CC"(I)I", FN_PTR(MHI_getConstant)}, + // static native int getNamedCon(int which, Object[] name) + {CC"getNamedCon", CC"(I["OBJ")I", FN_PTR(MHI_getNamedCon)}, + // static native int getMembers(Class defc, String matchName, String matchSig, + // int matchFlags, Class caller, int skip, MemberName[] results); + {CC"getMembers", CC"("CLS""STRG""STRG"I"CLS"I["MEM")I", FN_PTR(MHI_getMembers)} +}; + +// More entry points specifically for EnableInvokeDynamic. +static JNINativeMethod methods2[] = { + {CC"linkCallSite", CC"("CSTI MH")V", FN_PTR(MH_linkCallSite)} +}; + + +// This one function is exported, used by NativeLookup. + +JVM_ENTRY(void, JVM_RegisterMethodHandleMethods(JNIEnv *env, jclass MHN_class)) { + assert(MethodHandles::spot_check_entry_names(), "entry enum is OK"); + + // note: this explicit warning-producing stuff will be replaced by auto-detection of the JSR 292 classes + + if (!EnableMethodHandles) { + warning("JSR 292 method handles are disabled in this JVM. Use -XX:+EnableMethodHandles to enable."); + return; // bind nothing + } + + { + ThreadToNativeFromVM ttnfv(thread); + + int status = env->RegisterNatives(MHN_class, methods, sizeof(methods)/sizeof(JNINativeMethod)); + if (env->ExceptionOccurred()) { + MethodHandles::set_enabled(false); + warning("JSR 292 method handle code is mismatched to this JVM. Disabling support."); + env->ExceptionClear(); + } else { + MethodHandles::set_enabled(true); + } + } + + if (!EnableInvokeDynamic) { + warning("JSR 292 invokedynamic is disabled in this JVM. Use -XX:+EnableInvokeDynamic to enable."); + return; // bind nothing + } + + { + ThreadToNativeFromVM ttnfv(thread); + + int status = env->RegisterNatives(MHN_class, methods2, sizeof(methods2)/sizeof(JNINativeMethod)); + if (env->ExceptionOccurred()) { + MethodHandles::set_enabled(false); + warning("JSR 292 method handle code is mismatched to this JVM. Disabling support."); + env->ExceptionClear(); + } else { + MethodHandles::set_enabled(true); + } + } +} +JVM_END diff -r 5d4dd2f5f6a1 -r 7a485bc4da16 src/share/vm/prims/methodHandles.hpp --- /dev/null Thu Jan 01 00:00:00 1970 +0000 +++ b/src/share/vm/prims/methodHandles.hpp Thu May 07 10:30:17 2009 -0700 @@ -0,0 +1,435 @@ +/* + * Copyright 2008-2009 Sun Microsystems, Inc. All Rights Reserved. + * DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER. + * + * This code is free software; you can redistribute it and/or modify it + * under the terms of the GNU General Public License version 2 only, as + * published by the Free Software Foundation. + * + * This code is distributed in the hope that it will be useful, but WITHOUT + * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or + * FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License + * version 2 for more details (a copy is included in the LICENSE file that + * accompanied this code). + * + * You should have received a copy of the GNU General Public License version + * 2 along with this work; if not, write to the Free Software Foundation, + * Inc., 51 Franklin St, Fifth Floor, Boston, MA 02110-1301 USA. + * + * Please contact Sun Microsystems, Inc., 4150 Network Circle, Santa Clara, + * CA 95054 USA or visit www.sun.com if you need additional information or + * have any questions. + * + */ + +class MacroAssembler; +class Label; +class MethodHandleEntry; + +class MethodHandles: AllStatic { + // JVM support for MethodHandle, MethodType, and related types + // in java.dyn and java.dyn.hotspot. + // See also javaClasses for layouts java_dyn_Method{Handle,Type,Type::Form}. + public: + enum EntryKind { + _check_mtype, // how a caller calls a MH + _wrong_method_type, // what happens when there is a type mismatch + _invokestatic_mh, // how a MH emulates invokestatic + _invokespecial_mh, // ditto for the other invokes... + _invokevirtual_mh, + _invokeinterface_mh, + _bound_ref_mh, // reference argument is bound + _bound_int_mh, // int argument is bound (via an Integer or Float) + _bound_long_mh, // long argument is bound (via a Long or Double) + _bound_ref_direct_mh, // same as above, with direct linkage to methodOop + _bound_int_direct_mh, + _bound_long_direct_mh, + + _adapter_mh_first, // adapter sequence goes here... + _adapter_retype_only = _adapter_mh_first + sun_dyn_AdapterMethodHandle::OP_RETYPE_ONLY, + _adapter_check_cast = _adapter_mh_first + sun_dyn_AdapterMethodHandle::OP_CHECK_CAST, + _adapter_prim_to_prim = _adapter_mh_first + sun_dyn_AdapterMethodHandle::OP_PRIM_TO_PRIM, + _adapter_ref_to_prim = _adapter_mh_first + sun_dyn_AdapterMethodHandle::OP_REF_TO_PRIM, + _adapter_prim_to_ref = _adapter_mh_first + sun_dyn_AdapterMethodHandle::OP_PRIM_TO_REF, + _adapter_swap_args = _adapter_mh_first + sun_dyn_AdapterMethodHandle::OP_SWAP_ARGS, + _adapter_rot_args = _adapter_mh_first + sun_dyn_AdapterMethodHandle::OP_ROT_ARGS, + _adapter_dup_args = _adapter_mh_first + sun_dyn_AdapterMethodHandle::OP_DUP_ARGS, + _adapter_drop_args = _adapter_mh_first + sun_dyn_AdapterMethodHandle::OP_DROP_ARGS, + _adapter_collect_args = _adapter_mh_first + sun_dyn_AdapterMethodHandle::OP_COLLECT_ARGS, + _adapter_spread_args = _adapter_mh_first + sun_dyn_AdapterMethodHandle::OP_SPREAD_ARGS, + _adapter_flyby = _adapter_mh_first + sun_dyn_AdapterMethodHandle::OP_FLYBY, + _adapter_ricochet = _adapter_mh_first + sun_dyn_AdapterMethodHandle::OP_RICOCHET, + _adapter_mh_last = _adapter_mh_first + sun_dyn_AdapterMethodHandle::CONV_OP_LIMIT - 1, + + // Optimized adapter types + + // argument list reordering + _adapter_opt_swap_1, + _adapter_opt_swap_2, + _adapter_opt_rot_1_up, + _adapter_opt_rot_1_down, + _adapter_opt_rot_2_up, + _adapter_opt_rot_2_down, + // primitive single to single: + _adapter_opt_i2i, // i2c, i2z, i2b, i2s + // primitive double to single: + _adapter_opt_l2i, + _adapter_opt_d2f, + // primitive single to double: + _adapter_opt_i2l, + _adapter_opt_f2d, + // conversion between floating point and integer type is handled by Java + + // reference to primitive: + _adapter_opt_unboxi, + _adapter_opt_unboxl, + + // spreading (array length cases 0, 1, >=2) + _adapter_opt_spread_0, + _adapter_opt_spread_1, + _adapter_opt_spread_more, + + _EK_LIMIT, + _EK_FIRST = 0 + }; + + public: + static bool enabled() { return _enabled; } + static void set_enabled(bool z); + + private: + enum { // import sun_dyn_AdapterMethodHandle::CONV_OP_* + CONV_OP_LIMIT = sun_dyn_AdapterMethodHandle::CONV_OP_LIMIT, + CONV_OP_MASK = sun_dyn_AdapterMethodHandle::CONV_OP_MASK, + CONV_VMINFO_MASK = sun_dyn_AdapterMethodHandle::CONV_VMINFO_MASK, + CONV_VMINFO_SHIFT = sun_dyn_AdapterMethodHandle::CONV_VMINFO_SHIFT, + CONV_OP_SHIFT = sun_dyn_AdapterMethodHandle::CONV_OP_SHIFT, + CONV_DEST_TYPE_SHIFT = sun_dyn_AdapterMethodHandle::CONV_DEST_TYPE_SHIFT, + CONV_SRC_TYPE_SHIFT = sun_dyn_AdapterMethodHandle::CONV_SRC_TYPE_SHIFT, + CONV_STACK_MOVE_SHIFT = sun_dyn_AdapterMethodHandle::CONV_STACK_MOVE_SHIFT, + CONV_STACK_MOVE_MASK = sun_dyn_AdapterMethodHandle::CONV_STACK_MOVE_MASK + }; + + static bool _enabled; + static MethodHandleEntry* _entries[_EK_LIMIT]; + static const char* _entry_names[_EK_LIMIT+1]; + static bool ek_valid(EntryKind ek) { return (uint)ek < (uint)_EK_LIMIT; } + static bool conv_op_valid(int op) { return (uint)op < (uint)CONV_OP_LIMIT; } + + public: + static bool have_entry(EntryKind ek) { return ek_valid(ek) && _entries[ek] != NULL; } + static MethodHandleEntry* entry(EntryKind ek) { assert(ek_valid(ek), "initialized"); + return _entries[ek]; } + static const char* entry_name(EntryKind ek) { assert(ek_valid(ek), "oob"); + return _entry_names[ek]; } + static EntryKind adapter_entry_kind(int op) { assert(conv_op_valid(op), "oob"); + return EntryKind(_adapter_mh_first + op); } + + static void init_entry(EntryKind ek, MethodHandleEntry* me) { + assert(ek_valid(ek), "oob"); + assert(_entries[ek] == NULL, "no double initialization"); + _entries[ek] = me; + } + + static jint adapter_conversion(int conv_op, BasicType src, BasicType dest, + int stack_move = 0, int vminfo = 0) { + assert(conv_op_valid(conv_op), "oob"); + jint conv = ((conv_op << CONV_OP_SHIFT) + | (src << CONV_SRC_TYPE_SHIFT) + | (dest << CONV_DEST_TYPE_SHIFT) + | (stack_move << CONV_STACK_MOVE_SHIFT) + | (vminfo << CONV_VMINFO_SHIFT) + ); + assert(adapter_conversion_op(conv) == conv_op, "decode conv_op"); + assert(adapter_conversion_src_type(conv) == src, "decode src"); + assert(adapter_conversion_dest_type(conv) == dest, "decode dest"); + assert(adapter_conversion_stack_move(conv) == stack_move, "decode stack_move"); + assert(adapter_conversion_vminfo(conv) == vminfo, "decode vminfo"); + return conv; + } + static int adapter_conversion_op(jint conv) { + return ((conv >> CONV_OP_SHIFT) & 0xF); + } + static BasicType adapter_conversion_src_type(jint conv) { + return (BasicType)((conv >> CONV_SRC_TYPE_SHIFT) & 0xF); + } + static BasicType adapter_conversion_dest_type(jint conv) { + return (BasicType)((conv >> CONV_DEST_TYPE_SHIFT) & 0xF); + } + static int adapter_conversion_stack_move(jint conv) { + return (conv >> CONV_STACK_MOVE_SHIFT); + } + static int adapter_conversion_vminfo(jint conv) { + return (conv >> CONV_VMINFO_SHIFT) & CONV_VMINFO_MASK; + } + + // Offset in words that the interpreter stack pointer moves when an argument is pushed. + // The stack_move value must always be a multiple of this. + static int stack_move_unit() { + return frame::interpreter_frame_expression_stack_direction() * Interpreter::stackElementWords(); + } + + enum { CONV_VMINFO_SIGN_FLAG = 0x80 }; + static int adapter_subword_vminfo(BasicType dest) { + if (dest == T_BOOLEAN) return (BitsPerInt - 1); + if (dest == T_CHAR) return (BitsPerInt - 16); + if (dest == T_BYTE) return (BitsPerInt - 8) | CONV_VMINFO_SIGN_FLAG; + if (dest == T_SHORT) return (BitsPerInt - 16) | CONV_VMINFO_SIGN_FLAG; + return 0; // case T_INT + } + // Here is the transformation the i2i adapter must perform: + static int truncate_subword_from_vminfo(jint value, int vminfo) { + jint tem = value << vminfo; + if ((vminfo & CONV_VMINFO_SIGN_FLAG) != 0) { + return (jint)tem >> vminfo; + } else { + return (juint)tem >> vminfo; + } + } + + static inline address from_compiled_entry(EntryKind ek); + static inline address from_interpreted_entry(EntryKind ek); + + // helpers for decode_method. + static methodOop decode_methodOop(methodOop m, int& decode_flags_result); + static methodOop decode_vmtarget(oop vmtarget, int vmindex, oop mtype, klassOop& receiver_limit_result, int& decode_flags_result); + static methodOop decode_MemberName(oop mname, klassOop& receiver_limit_result, int& decode_flags_result); + static methodOop decode_MethodHandle(oop mh, klassOop& receiver_limit_result, int& decode_flags_result); + static methodOop decode_DirectMethodHandle(oop mh, klassOop& receiver_limit_result, int& decode_flags_result); + static methodOop decode_BoundMethodHandle(oop mh, klassOop& receiver_limit_result, int& decode_flags_result); + static methodOop decode_AdapterMethodHandle(oop mh, klassOop& receiver_limit_result, int& decode_flags_result); + + // Find out how many stack slots an mh pushes or pops. + // The result is *not* reported as a multiple of stack_move_unit(); + // It is a signed net number of pushes (a difference in vmslots). + // To compare with a stack_move value, first multiply by stack_move_unit(). + static int decode_MethodHandle_stack_pushes(oop mh); + + public: + // working with member names + static void resolve_MemberName(Handle mname, TRAPS); // compute vmtarget/vmindex from name/type + static void expand_MemberName(Handle mname, int suppress, TRAPS); // expand defc/name/type if missing + static void init_MemberName(oop mname_oop, oop target); // compute vmtarget/vmindex from target + static void init_MemberName(oop mname_oop, methodOop m, bool do_dispatch); + static void init_MemberName(oop mname_oop, klassOop field_holder, AccessFlags mods, int offset); + static int find_MemberNames(klassOop k, symbolOop name, symbolOop sig, + int mflags, klassOop caller, + int skip, objArrayOop results); + // bit values for suppress argument to expand_MemberName: + enum { _suppress_defc = 1, _suppress_name = 2, _suppress_type = 4 }; + + // called from InterpreterGenerator and StubGenerator + static address generate_method_handle_interpreter_entry(MacroAssembler* _masm); + static void generate_method_handle_stub(MacroAssembler* _masm, EntryKind ek); + + // argument list parsing + static int argument_slot(oop method_type, int arg); + static int argument_slot_count(oop method_type) { return argument_slot(method_type, -1); } + static int argument_slot_to_argnum(oop method_type, int argslot); + + // Runtime support + enum { // bit-encoded flags from decode_method or decode_vmref + _dmf_has_receiver = 0x01, // target method has leading reference argument + _dmf_does_dispatch = 0x02, // method handle performs virtual or interface dispatch + _dmf_from_interface = 0x04, // peforms interface dispatch + _DMF_DIRECT_MASK = (_dmf_from_interface*2 - _dmf_has_receiver), + _dmf_binds_method = 0x08, + _dmf_binds_argument = 0x10, + _DMF_BOUND_MASK = (_dmf_binds_argument*2 - _dmf_binds_method), + _dmf_adapter_lsb = 0x20, + _DMF_ADAPTER_MASK = (_dmf_adapter_lsb << CONV_OP_LIMIT) - _dmf_adapter_lsb + }; + static methodOop decode_method(oop x, klassOop& receiver_limit_result, int& decode_flags_result); + enum { + // format of query to getConstant: + GC_JVM_PUSH_LIMIT = 0, + GC_JVM_STACK_MOVE_LIMIT = 1, + + // format of result from getTarget / encode_target: + ETF_HANDLE_OR_METHOD_NAME = 0, // all available data (immediate MH or method) + ETF_DIRECT_HANDLE = 1, // ultimate method handle (will be a DMH, may be self) + ETF_METHOD_NAME = 2, // ultimate method as MemberName + ETF_REFLECT_METHOD = 3 // ultimate method as java.lang.reflect object (sans refClass) + }; + static int get_named_constant(int which, Handle name_box, TRAPS); + static oop encode_target(Handle mh, int format, TRAPS); // report vmtarget (to Java code) + static bool class_cast_needed(klassOop src, klassOop dst); + + private: + // These checkers operate on a pair of whole MethodTypes: + static const char* check_method_type_change(oop src_mtype, int src_beg, int src_end, + int insert_argnum, oop insert_type, + int change_argnum, oop change_type, + int delete_argnum, + oop dst_mtype, int dst_beg, int dst_end); + static const char* check_method_type_insertion(oop src_mtype, + int insert_argnum, oop insert_type, + oop dst_mtype) { + oop no_ref = NULL; + return check_method_type_change(src_mtype, 0, -1, + insert_argnum, insert_type, + -1, no_ref, -1, dst_mtype, 0, -1); + } + static const char* check_method_type_conversion(oop src_mtype, + int change_argnum, oop change_type, + oop dst_mtype) { + oop no_ref = NULL; + return check_method_type_change(src_mtype, 0, -1, -1, no_ref, + change_argnum, change_type, + -1, dst_mtype, 0, -1); + } + static const char* check_method_type_passthrough(oop src_mtype, oop dst_mtype) { + oop no_ref = NULL; + return check_method_type_change(src_mtype, 0, -1, + -1, no_ref, -1, no_ref, -1, + dst_mtype, 0, -1); + } + + // These checkers operate on pairs of argument or return types: + static const char* check_argument_type_change(BasicType src_type, klassOop src_klass, + BasicType dst_type, klassOop dst_klass, + int argnum); + + static const char* check_argument_type_change(oop src_type, oop dst_type, + int argnum) { + klassOop src_klass = NULL, dst_klass = NULL; + BasicType src_bt = java_lang_Class::as_BasicType(src_type, &src_klass); + BasicType dst_bt = java_lang_Class::as_BasicType(dst_type, &dst_klass); + return check_argument_type_change(src_bt, src_klass, + dst_bt, dst_klass, argnum); + } + + static const char* check_return_type_change(oop src_type, oop dst_type) { + return check_argument_type_change(src_type, dst_type, -1); + } + + static const char* check_return_type_change(BasicType src_type, klassOop src_klass, + BasicType dst_type, klassOop dst_klass) { + return check_argument_type_change(src_type, src_klass, dst_type, dst_klass, -1); + } + + static const char* check_method_receiver(methodOop m, klassOop passed_recv_type); + + // These verifiers can block, and will throw an error if the checking fails: + static void verify_vmslots(Handle mh, TRAPS); + static void verify_vmargslot(Handle mh, int argnum, int argslot, TRAPS); + + static void verify_method_type(methodHandle m, Handle mtype, + bool has_bound_oop, + KlassHandle bound_oop_type, + TRAPS); + + static void verify_method_signature(methodHandle m, Handle mtype, + int first_ptype_pos, + KlassHandle insert_ptype, TRAPS); + + static void verify_DirectMethodHandle(Handle mh, methodHandle m, TRAPS); + static void verify_BoundMethodHandle(Handle mh, Handle target, int argnum, + bool direct_to_method, TRAPS); + static void verify_BoundMethodHandle_with_receiver(Handle mh, methodHandle m, TRAPS); + static void verify_AdapterMethodHandle(Handle mh, int argnum, TRAPS); + + public: + + // Fill in the fields of a DirectMethodHandle mh. (MH.type must be pre-filled.) + static void init_DirectMethodHandle(Handle mh, methodHandle method, bool do_dispatch, TRAPS); + + // Fill in the fields of a BoundMethodHandle mh. (MH.type, BMH.argument must be pre-filled.) + static void init_BoundMethodHandle(Handle mh, Handle target, int argnum, TRAPS); + static void init_BoundMethodHandle_with_receiver(Handle mh, + methodHandle original_m, + KlassHandle receiver_limit, + int decode_flags, + TRAPS); + + // Fill in the fields of an AdapterMethodHandle mh. (MH.type must be pre-filled.) + static void init_AdapterMethodHandle(Handle mh, Handle target, int argnum, TRAPS); + +#ifdef ASSERT + static bool spot_check_entry_names(); +#endif + + private: + static methodHandle dispatch_decoded_method(methodHandle m, + KlassHandle receiver_limit, + int decode_flags, + KlassHandle receiver_klass, + TRAPS); + + static bool same_basic_type_for_arguments(BasicType src, BasicType dst, + bool for_return = false); + static bool same_basic_type_for_returns(BasicType src, BasicType dst) { + return same_basic_type_for_arguments(src, dst, true); + } + + enum { // arg_mask values + _INSERT_NO_MASK = -1, + _INSERT_REF_MASK = 0, + _INSERT_INT_MASK = 1, + _INSERT_LONG_MASK = 3 + }; + static void insert_arg_slots(MacroAssembler* _masm, + RegisterOrConstant arg_slots, + int arg_mask, + Register rax_argslot, + Register rbx_temp, Register rdx_temp); + + static void remove_arg_slots(MacroAssembler* _masm, + RegisterOrConstant arg_slots, + Register rax_argslot, + Register rbx_temp, Register rdx_temp); +}; + + +// Access methods for the "entry" field of a java.dyn.MethodHandle. +// The field is primarily a jump target for compiled calls. +// However, we squirrel away some nice pointers for other uses, +// just before the jump target. +// Aspects of a method handle entry: +// - from_compiled_entry - stub used when compiled code calls the MH +// - from_interpreted_entry - stub used when the interpreter calls the MH +// - type_checking_entry - stub for runtime casting between MHForm siblings (NYI) +class MethodHandleEntry { + public: + class Data { + friend class MethodHandleEntry; + size_t _total_size; // size including Data and code stub + MethodHandleEntry* _type_checking_entry; + address _from_interpreted_entry; + MethodHandleEntry* method_entry() { return (MethodHandleEntry*)(this + 1); } + }; + + Data* data() { return (Data*)this - 1; } + + address start_address() { return (address) data(); } + address end_address() { return start_address() + data()->_total_size; } + + address from_compiled_entry() { return (address) this; } + + address from_interpreted_entry() { return data()->_from_interpreted_entry; } + void set_from_interpreted_entry(address e) { data()->_from_interpreted_entry = e; } + + MethodHandleEntry* type_checking_entry() { return data()->_type_checking_entry; } + void set_type_checking_entry(MethodHandleEntry* e) { data()->_type_checking_entry = e; } + + void set_end_address(address end_addr) { + size_t total_size = end_addr - start_address(); + assert(total_size > 0 && total_size < 0x1000, "reasonable end address"); + data()->_total_size = total_size; + } + + // Compiler support: + static int from_interpreted_entry_offset_in_bytes() { + return (int)( offset_of(Data, _from_interpreted_entry) - sizeof(Data) ); + } + static int type_checking_entry_offset_in_bytes() { + return (int)( offset_of(Data, _from_interpreted_entry) - sizeof(Data) ); + } + + static address start_compiled_entry(MacroAssembler* _masm, + address interpreted_entry = NULL); + static MethodHandleEntry* finish_compiled_entry(MacroAssembler* masm, address start_addr); +}; + +address MethodHandles::from_compiled_entry(EntryKind ek) { return entry(ek)->from_compiled_entry(); } +address MethodHandles::from_interpreted_entry(EntryKind ek) { return entry(ek)->from_interpreted_entry(); } diff -r 5d4dd2f5f6a1 -r 7a485bc4da16 src/share/vm/prims/nativeLookup.cpp --- a/src/share/vm/prims/nativeLookup.cpp Fri Apr 17 15:50:12 2009 +0100 +++ b/src/share/vm/prims/nativeLookup.cpp Thu May 07 10:30:17 2009 -0700 @@ -1,5 +1,5 @@ /* - * Copyright 1997-2006 Sun Microsystems, Inc. All Rights Reserved. + * Copyright 1997-2009 Sun Microsystems, Inc. All Rights Reserved. * DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER. * * This code is free software; you can redistribute it and/or modify it @@ -78,6 +78,7 @@ extern "C" { void JNICALL JVM_RegisterUnsafeMethods(JNIEnv *env, jclass unsafecls); + void JNICALL JVM_RegisterMethodHandleMethods(JNIEnv *env, jclass unsafecls); void JNICALL JVM_RegisterPerfMethods(JNIEnv *env, jclass perfclass); } @@ -97,6 +98,9 @@ if (strstr(jni_name, "Java_sun_misc_Unsafe_registerNatives") != NULL) { return CAST_FROM_FN_PTR(address, JVM_RegisterUnsafeMethods); } + if (strstr(jni_name, "Java_sun_dyn_MethodHandleNatives_registerNatives") != NULL) { + return CAST_FROM_FN_PTR(address, JVM_RegisterMethodHandleMethods); + } if (strstr(jni_name, "Java_sun_misc_Perf_registerNatives") != NULL) { return CAST_FROM_FN_PTR(address, JVM_RegisterPerfMethods); } diff -r 5d4dd2f5f6a1 -r 7a485bc4da16 src/share/vm/runtime/arguments.cpp --- a/src/share/vm/runtime/arguments.cpp Fri Apr 17 15:50:12 2009 +0100 +++ b/src/share/vm/runtime/arguments.cpp Thu May 07 10:30:17 2009 -0700 @@ -2627,6 +2627,19 @@ } #endif // PRODUCT + if (EnableInvokeDynamic && !EnableMethodHandles) { + if (!FLAG_IS_DEFAULT(EnableMethodHandles)) { + warning("forcing EnableMethodHandles true to allow EnableInvokeDynamic"); + } + EnableMethodHandles = true; + } + if (EnableMethodHandles && !AnonymousClasses) { + if (!FLAG_IS_DEFAULT(AnonymousClasses)) { + warning("forcing AnonymousClasses true to enable EnableMethodHandles"); + } + AnonymousClasses = true; + } + if (PrintGCDetails) { // Turn on -verbose:gc options as well PrintGC = true; diff -r 5d4dd2f5f6a1 -r 7a485bc4da16 src/share/vm/runtime/globals.hpp --- a/src/share/vm/runtime/globals.hpp Fri Apr 17 15:50:12 2009 +0100 +++ b/src/share/vm/runtime/globals.hpp Thu May 07 10:30:17 2009 -0700 @@ -3301,6 +3301,27 @@ product(bool, AnonymousClasses, false, \ "support sun.misc.Unsafe.defineAnonymousClass") \ \ + product(bool, EnableMethodHandles, false, \ + "support method handles (true by default under JSR 292)") \ + \ + diagnostic(intx, MethodHandlePushLimit, 3, \ + "number of additional stack slots a method handle may push") \ + \ + develop(bool, TraceMethodHandles, false, \ + "trace internal method handle operations") \ + \ + diagnostic(bool, VerifyMethodHandles, trueInDebug, \ + "perform extra checks when constructing method handles") \ + \ + diagnostic(bool, OptimizeMethodHandles, true, \ + "when constructing method handles, try to improve them") \ + \ + product(bool, EnableInvokeDynamic, false, \ + "recognize the invokedynamic instruction") \ + \ + develop(bool, TraceInvokeDynamic, false, \ + "trace internal invoke dynamic operations") \ + \ product(bool, TaggedStackInterpreter, false, \ "Insert tags in interpreter execution stack for oopmap generaion")\ \ diff -r 5d4dd2f5f6a1 -r 7a485bc4da16 src/share/vm/runtime/javaCalls.cpp --- a/src/share/vm/runtime/javaCalls.cpp Fri Apr 17 15:50:12 2009 +0100 +++ b/src/share/vm/runtime/javaCalls.cpp Thu May 07 10:30:17 2009 -0700 @@ -37,11 +37,6 @@ guarantee(!thread->is_Compiler_thread(), "cannot make java calls from the compiler"); _result = result; - // Make sure that that the value of the higest_lock is at least the same as the current stackpointer, - // since, the Java code is highly likely to use locks. - // Use '(address)this' to guarantee that highest_lock address is conservative and inside our thread - thread->update_highest_lock((address)this); - // Allocate handle block for Java code. This must be done before we change thread_state to _thread_in_Java_or_stub, // since it can potentially block. JNIHandleBlock* new_handles = JNIHandleBlock::allocate_block(thread); diff -r 5d4dd2f5f6a1 -r 7a485bc4da16 src/share/vm/runtime/sharedRuntime.cpp --- a/src/share/vm/runtime/sharedRuntime.cpp Fri Apr 17 15:50:12 2009 +0100 +++ b/src/share/vm/runtime/sharedRuntime.cpp Thu May 07 10:30:17 2009 -0700 @@ -1471,9 +1471,73 @@ return generate_class_cast_message(objName, targetKlass->external_name()); } +char* SharedRuntime::generate_wrong_method_type_message(JavaThread* thread, + oopDesc* required, + oopDesc* actual) { + assert(EnableMethodHandles, ""); + oop singleKlass = wrong_method_type_is_for_single_argument(thread, required); + if (singleKlass != NULL) { + const char* objName = "argument or return value"; + if (actual != NULL) { + // be flexible about the junk passed in: + klassOop ak = (actual->is_klass() + ? (klassOop)actual + : actual->klass()); + objName = Klass::cast(ak)->external_name(); + } + Klass* targetKlass = Klass::cast(required->is_klass() + ? (klassOop)required + : java_lang_Class::as_klassOop(required)); + return generate_class_cast_message(objName, targetKlass->external_name()); + } else { + // %%% need to get the MethodType string, without messing around too much + // Get a signature from the invoke instruction + const char* mhName = "method handle"; + const char* targetType = "the required signature"; + vframeStream vfst(thread, true); + if (!vfst.at_end()) { + Bytecode_invoke* call = Bytecode_invoke_at(vfst.method(), vfst.bci()); + methodHandle target; + { + EXCEPTION_MARK; + target = call->static_target(THREAD); + if (HAS_PENDING_EXCEPTION) { CLEAR_PENDING_EXCEPTION; } + } + if (target.not_null() + && target->is_method_handle_invoke() + && required == target->method_handle_type()) { + targetType = target->signature()->as_C_string(); + } + } + klassOop kignore; int fignore; + methodOop actual_method = MethodHandles::decode_method(actual, + kignore, fignore); + if (actual_method != NULL) { + if (actual_method->name() == vmSymbols::invoke_name()) + mhName = "$"; + else + mhName = actual_method->signature()->as_C_string(); + if (mhName[0] == '$') + mhName = actual_method->signature()->as_C_string(); + } + return generate_class_cast_message(mhName, targetType, + " cannot be called as "); + } +} + +oop SharedRuntime::wrong_method_type_is_for_single_argument(JavaThread* thr, + oopDesc* required) { + if (required == NULL) return NULL; + if (required->klass() == SystemDictionary::class_klass()) + return required; + if (required->is_klass()) + return Klass::cast(klassOop(required))->java_mirror(); + return NULL; +} + + char* SharedRuntime::generate_class_cast_message( - const char* objName, const char* targetKlassName) { - const char* desc = " cannot be cast to "; + const char* objName, const char* targetKlassName, const char* desc) { size_t msglen = strlen(objName) + strlen(desc) + strlen(targetKlassName) + 1; char* message = NEW_RESOURCE_ARRAY(char, msglen); diff -r 5d4dd2f5f6a1 -r 7a485bc4da16 src/share/vm/runtime/sharedRuntime.hpp --- a/src/share/vm/runtime/sharedRuntime.hpp Fri Apr 17 15:50:12 2009 +0100 +++ b/src/share/vm/runtime/sharedRuntime.hpp Thu May 07 10:30:17 2009 -0700 @@ -212,10 +212,32 @@ static char* generate_class_cast_message(JavaThread* thr, const char* name); /** + * Fill in the message for a WrongMethodTypeException + * + * @param thr the current thread + * @param mtype (optional) expected method type (or argument class) + * @param mhandle (optional) actual method handle (or argument) + * @return the dynamically allocated exception message + * + * BCP for the frame on top of the stack must refer to an + * 'invokevirtual' op for a method handle, or an 'invokedyamic' op. + * The caller (or one of its callers) must use a ResourceMark + * in order to correctly free the result. + */ + static char* generate_wrong_method_type_message(JavaThread* thr, + oopDesc* mtype = NULL, + oopDesc* mhandle = NULL); + + /** Return non-null if the mtype is a klass or Class, not a MethodType. */ + static oop wrong_method_type_is_for_single_argument(JavaThread* thr, + oopDesc* mtype); + + /** * Fill in the "X cannot be cast to a Y" message for ClassCastException * * @param name the name of the class of the object attempted to be cast * @param klass the name of the target klass attempt + * @param gripe the specific kind of problem being reported * @return the dynamically allocated exception message (must be freed * by the caller using a resource mark) * @@ -224,7 +246,8 @@ * The caller (or one of it's callers) must use a ResourceMark * in order to correctly free the result. */ - static char* generate_class_cast_message(const char* name, const char* klass); + static char* generate_class_cast_message(const char* name, const char* klass, + const char* gripe = " cannot be cast to "); // Resolves a call site- may patch in the destination of the call into the // compiled code. diff -r 5d4dd2f5f6a1 -r 7a485bc4da16 src/share/vm/runtime/synchronizer.cpp --- a/src/share/vm/runtime/synchronizer.cpp Fri Apr 17 15:50:12 2009 +0100 +++ b/src/share/vm/runtime/synchronizer.cpp Thu May 07 10:30:17 2009 -0700 @@ -1117,10 +1117,10 @@ // Optimization: if the mark->locker stack address is associated // with this thread we could simply set m->_owner = Self and - // m->OwnerIsThread = 1. Note that a thread can inflate an object + // m->OwnerIsThread = 1. Note that a thread can inflate an object // that it has stack-locked -- as might happen in wait() -- directly // with CAS. That is, we can avoid the xchg-NULL .... ST idiom. - m->set_owner (mark->locker()); + m->set_owner(mark->locker()); m->set_object(object); // TODO-FIXME: assert BasicLock->dhw != 0. @@ -1214,10 +1214,9 @@ BiasedLocking::revoke_at_safepoint(obj); } assert(!obj->mark()->has_bias_pattern(), "biases should be revoked by now"); - } - - THREAD->update_highest_lock((address)lock); - slow_enter (obj, lock, THREAD) ; + } + + slow_enter (obj, lock, THREAD) ; } void ObjectSynchronizer::fast_exit(oop object, BasicLock* lock, TRAPS) { diff -r 5d4dd2f5f6a1 -r 7a485bc4da16 src/share/vm/runtime/thread.cpp --- a/src/share/vm/runtime/thread.cpp Fri Apr 17 15:50:12 2009 +0100 +++ b/src/share/vm/runtime/thread.cpp Thu May 07 10:30:17 2009 -0700 @@ -128,7 +128,6 @@ debug_only(_allow_allocation_count = 0;) NOT_PRODUCT(_allow_safepoint_count = 0;) CHECK_UNHANDLED_OOPS_ONLY(_gc_locked_out_count = 0;) - _highest_lock = NULL; _jvmti_env_iteration_count = 0; _vm_operation_started_count = 0; _vm_operation_completed_count = 0; @@ -790,19 +789,6 @@ } #endif -bool Thread::lock_is_in_stack(address adr) const { - assert(Thread::current() == this, "lock_is_in_stack can only be called from current thread"); - // High limit: highest_lock is set during thread execution - // Low limit: address of the local variable dummy, rounded to 4K boundary. - // (The rounding helps finding threads in unsafe mode, even if the particular stack - // frame has been popped already. Correct as long as stacks are at least 4K long and aligned.) - address end = os::current_stack_pointer(); - if (_highest_lock >= adr && adr >= end) return true; - - return false; -} - - bool Thread::is_in_stack(address adr) const { assert(Thread::current() == this, "is_in_stack can only be called from current thread"); address end = os::current_stack_pointer(); @@ -818,8 +804,7 @@ // should be revisited, and they should be removed if possible. bool Thread::is_lock_owned(address adr) const { - if (lock_is_in_stack(adr) ) return true; - return false; + return (_stack_base >= adr && adr >= (_stack_base - _stack_size)); } bool Thread::set_as_starting_thread() { @@ -1664,7 +1649,7 @@ } bool JavaThread::is_lock_owned(address adr) const { - if (lock_is_in_stack(adr)) return true; + if (Thread::is_lock_owned(adr)) return true; for (MonitorChunk* chunk = monitor_chunks(); chunk != NULL; chunk = chunk->next()) { if (chunk->contains(adr)) return true; @@ -2443,7 +2428,7 @@ if (thread_oop != NULL && java_lang_Thread::is_daemon(thread_oop)) st->print("daemon "); Thread::print_on(st); // print guess for valid stack memory region (assume 4K pages); helps lock debugging - st->print_cr("[" INTPTR_FORMAT ".." INTPTR_FORMAT "]", (intptr_t)last_Java_sp() & ~right_n_bits(12), highest_lock()); + st->print_cr("[" INTPTR_FORMAT "]", (intptr_t)last_Java_sp() & ~right_n_bits(12)); if (thread_oop != NULL && JDK_Version::is_gte_jdk15x_version()) { st->print_cr(" java.lang.Thread.State: %s", java_lang_Thread::thread_status_name(thread_oop)); } @@ -3733,25 +3718,13 @@ // heavyweight monitors, then the owner is the stack address of the // Lock Word in the owning Java thread's stack. // - // We can't use Thread::is_lock_owned() or Thread::lock_is_in_stack() because - // those routines rely on the "current" stack pointer. That would be our - // stack pointer which is not relevant to the question. Instead we use the - // highest lock ever entered by the thread and find the thread that is - // higher than and closest to our target stack address. - // - address least_diff = 0; - bool least_diff_initialized = false; JavaThread* the_owner = NULL; { MutexLockerEx ml(doLock ? Threads_lock : NULL); ALL_JAVA_THREADS(q) { - address addr = q->highest_lock(); - if (addr == NULL || addr < owner) continue; // thread has entered no monitors or is too low - address diff = (address)(addr - owner); - if (!least_diff_initialized || diff < least_diff) { - least_diff_initialized = true; - least_diff = diff; + if (q->is_lock_owned(owner)) { the_owner = q; + break; } } } diff -r 5d4dd2f5f6a1 -r 7a485bc4da16 src/share/vm/runtime/thread.hpp --- a/src/share/vm/runtime/thread.hpp Fri Apr 17 15:50:12 2009 +0100 +++ b/src/share/vm/runtime/thread.hpp Thu May 07 10:30:17 2009 -0700 @@ -200,14 +200,6 @@ friend class ThreadLocalStorage; friend class GC_locker; - // In order for all threads to be able to use fast locking, we need to know the highest stack - // address of where a lock is on the stack (stacks normally grow towards lower addresses). This - // variable is initially set to NULL, indicating no locks are used by the thread. During the thread's - // execution, it will be set whenever locking can happen, i.e., when we call out to Java code or use - // an ObjectLocker. The value is never decreased, hence, it will over the lifetime of a thread - // approximate the real stackbase. - address _highest_lock; // Highest stack address where a JavaLock exist - ThreadLocalAllocBuffer _tlab; // Thread-local eden int _vm_operation_started_count; // VM_Operation support @@ -400,18 +392,14 @@ // Sweeper support void nmethods_do(); - // Fast-locking support - address highest_lock() const { return _highest_lock; } - void update_highest_lock(address base) { if (base > _highest_lock) _highest_lock = base; } - // Tells if adr belong to this thread. This is used // for checking if a lock is owned by the running thread. - // Warning: the method can only be used on the running thread - // Fast lock support uses these methods - virtual bool lock_is_in_stack(address adr) const; + + // Used by fast lock support virtual bool is_lock_owned(address adr) const; // Check if address is in the stack of the thread (not just for locks). + // Warning: the method can only be used on the running thread bool is_in_stack(address adr) const; // Sets this thread as starting thread. Returns failure if thread diff -r 5d4dd2f5f6a1 -r 7a485bc4da16 src/share/vm/runtime/vmStructs.cpp --- a/src/share/vm/runtime/vmStructs.cpp Fri Apr 17 15:50:12 2009 +0100 +++ b/src/share/vm/runtime/vmStructs.cpp Thu May 07 10:30:17 2009 -0700 @@ -656,7 +656,6 @@ \ volatile_nonstatic_field(Thread, _suspend_flags, uint32_t) \ nonstatic_field(Thread, _active_handles, JNIHandleBlock*) \ - nonstatic_field(Thread, _highest_lock, address) \ nonstatic_field(Thread, _tlab, ThreadLocalAllocBuffer) \ nonstatic_field(Thread, _current_pending_monitor, ObjectMonitor*) \ nonstatic_field(Thread, _current_pending_monitor_is_from_java, bool) \ diff -r 5d4dd2f5f6a1 -r 7a485bc4da16 src/share/vm/utilities/accessFlags.hpp --- a/src/share/vm/utilities/accessFlags.hpp Fri Apr 17 15:50:12 2009 +0100 +++ b/src/share/vm/utilities/accessFlags.hpp Thu May 07 10:30:17 2009 -0700 @@ -1,5 +1,5 @@ /* - * Copyright 1997-2007 Sun Microsystems, Inc. All Rights Reserved. + * Copyright 1997-2009 Sun Microsystems, Inc. All Rights Reserved. * DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER. * * This code is free software; you can redistribute it and/or modify it @@ -47,6 +47,8 @@ JVM_ACC_IS_OLD = 0x00010000, // RedefineClasses() has replaced this method JVM_ACC_IS_OBSOLETE = 0x00020000, // RedefineClasses() has made method obsolete JVM_ACC_IS_PREFIXED_NATIVE = 0x00040000, // JVMTI has prefixed this native method + JVM_MH_INVOKE_BITS // = 0x10001100 // MethodHandle.invoke quasi-native + = (JVM_ACC_NATIVE | JVM_ACC_SYNTHETIC | JVM_ACC_MONITOR_MATCH), // klassOop flags JVM_ACC_HAS_MIRANDA_METHODS = 0x10000000, // True if this class has miranda methods in it's vtable @@ -72,6 +74,7 @@ // flags accepted by set_field_flags() JVM_ACC_FIELD_FLAGS = 0x00008000 | JVM_ACC_WRITTEN_FLAGS + }; @@ -114,6 +117,15 @@ bool is_obsolete () const { return (_flags & JVM_ACC_IS_OBSOLETE ) != 0; } bool is_prefixed_native () const { return (_flags & JVM_ACC_IS_PREFIXED_NATIVE ) != 0; } + // JSR 292: A method of the form MethodHandle.invoke(A...)R method is + // neither bytecoded nor a JNI native, but rather a fast call through + // a lightweight method handle object. Because it is not bytecoded, + // it has the native bit set, but the monitor-match bit is also set + // to distinguish it from a JNI native (which never has the match bit set). + // The synthetic bit is also present, because such a method is never + // explicitly defined in Java code. + bool is_method_handle_invoke () const { return (_flags & JVM_MH_INVOKE_BITS) == JVM_MH_INVOKE_BITS; } + // klassOop flags bool has_miranda_methods () const { return (_flags & JVM_ACC_HAS_MIRANDA_METHODS ) != 0; } bool has_vanilla_constructor () const { return (_flags & JVM_ACC_HAS_VANILLA_CONSTRUCTOR) != 0; } @@ -199,6 +211,14 @@ jshort as_short() { return (jshort)_flags; } jint as_int() { return _flags; } + inline friend AccessFlags accessFlags_from(jint flags); + // Printing/debugging void print_on(outputStream* st) const PRODUCT_RETURN; }; + +inline AccessFlags accessFlags_from(jint flags) { + AccessFlags af; + af._flags = flags; + return af; +} diff -r 5d4dd2f5f6a1 -r 7a485bc4da16 src/share/vm/utilities/exceptions.hpp --- a/src/share/vm/utilities/exceptions.hpp Fri Apr 17 15:50:12 2009 +0100 +++ b/src/share/vm/utilities/exceptions.hpp Thu May 07 10:30:17 2009 -0700 @@ -237,6 +237,9 @@ #define THROW_ARG_0(name, signature, arg) THROW_ARG_(name, signature, arg, 0) #define THROW_MSG_CAUSE_0(name, message, cause) THROW_MSG_CAUSE_(name, message, cause, 0) +#define THROW_NULL(name) THROW_(name, NULL) +#define THROW_MSG_NULL(name, message) THROW_MSG_(name, message, NULL) + // The CATCH macro checks that no exception has been thrown by a function; it is used at // call sites about which is statically known that the callee cannot throw an exception // even though it is declared with TRAPS. diff -r 5d4dd2f5f6a1 -r 7a485bc4da16 src/share/vm/utilities/globalDefinitions.hpp --- a/src/share/vm/utilities/globalDefinitions.hpp Fri Apr 17 15:50:12 2009 +0100 +++ b/src/share/vm/utilities/globalDefinitions.hpp Thu May 07 10:30:17 2009 -0700 @@ -408,6 +408,15 @@ return T_BOOLEAN <= t && t <= T_LONG; } +inline bool is_subword_type(BasicType t) { + // these guys are processed exactly like T_INT in calling sequences: + return (t == T_BOOLEAN || t == T_CHAR || t == T_BYTE || t == T_SHORT); +} + +inline bool is_signed_subword_type(BasicType t) { + return (t == T_BYTE || t == T_SHORT); +} + // Convert a char from a classfile signature to a BasicType inline BasicType char2type(char c) { switch( c ) { @@ -563,8 +572,8 @@ enum TosState { // describes the tos cache contents btos = 0, // byte, bool tos cached - ctos = 1, // short, char tos cached - stos = 2, // short, char tos cached + ctos = 1, // char tos cached + stos = 2, // short tos cached itos = 3, // int tos cached ltos = 4, // long tos cached ftos = 5, // float tos cached @@ -579,7 +588,7 @@ inline TosState as_TosState(BasicType type) { switch (type) { case T_BYTE : return btos; - case T_BOOLEAN: return btos; + case T_BOOLEAN: return btos; // FIXME: Add ztos case T_CHAR : return ctos; case T_SHORT : return stos; case T_INT : return itos; @@ -593,6 +602,22 @@ return ilgl; } +inline BasicType as_BasicType(TosState state) { + switch (state) { + //case ztos: return T_BOOLEAN;//FIXME + case btos : return T_BYTE; + case ctos : return T_CHAR; + case stos : return T_SHORT; + case itos : return T_INT; + case ltos : return T_LONG; + case ftos : return T_FLOAT; + case dtos : return T_DOUBLE; + case atos : return T_OBJECT; + case vtos : return T_VOID; + } + return T_ILLEGAL; +} + // Helper function to convert BasicType info into TosState // Note: Cannot define here as it uses global constant at the time being. diff -r 5d4dd2f5f6a1 -r 7a485bc4da16 test/compiler/6539464/Test.java --- /dev/null Thu Jan 01 00:00:00 1970 +0000 +++ b/test/compiler/6539464/Test.java Thu May 07 10:30:17 2009 -0700 @@ -0,0 +1,44 @@ +/* + * Copyright 2009 Sun Microsystems, Inc. All Rights Reserved. + * DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER. + * + * This code is free software; you can redistribute it and/or modify it + * under the terms of the GNU General Public License version 2 only, as + * published by the Free Software Foundation. + * + * This code is distributed in the hope that it will be useful, but WITHOUT + * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or + * FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License + * version 2 for more details (a copy is included in the LICENSE file that + * accompanied this code). + * + * You should have received a copy of the GNU General Public License version + * 2 along with this work; if not, write to the Free Software Foundation, + * Inc., 51 Franklin St, Fifth Floor, Boston, MA 02110-1301 USA. + * + * Please contact Sun Microsystems, Inc., 4150 Network Circle, Santa Clara, + * CA 95054 USA or visit www.sun.com if you need additional information or + * have any questions. + */ + +/** + * @test + * @bug 6539464 + * @summary Math.log() produces inconsistent results between successive runs. + * + * @run main/othervm -Xcomp -XX:CompileOnly=Test.main Test + */ + +public class Test { + static double log_value = 17197; + static double log_result = Math.log(log_value); + + public static void main(String[] args) throws Exception { + for (int i = 0; i < 1000000; i++) { + double log_result2 = Math.log(log_value); + if (log_result2 != log_result) { + throw new InternalError("Math.log produces inconsistent results: " + log_result2 + " != " + log_result); + } + } + } +} diff -r 5d4dd2f5f6a1 -r 7a485bc4da16 test/compiler/6589834/Test_ia32.java --- /dev/null Thu Jan 01 00:00:00 1970 +0000 +++ b/test/compiler/6589834/Test_ia32.java Thu May 07 10:30:17 2009 -0700 @@ -0,0 +1,126 @@ +/* + * Copyright 2009 Sun Microsystems, Inc. All Rights Reserved. + * DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER. + * + * This code is free software; you can redistribute it and/or modify it + * under the terms of the GNU General Public License version 2 only, as + * published by the Free Software Foundation. + * + * This code is distributed in the hope that it will be useful, but WITHOUT + * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or + * FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License + * version 2 for more details (a copy is included in the LICENSE file that + * accompanied this code). + * + * You should have received a copy of the GNU General Public License version + * 2 along with this work; if not, write to the Free Software Foundation, + * Inc., 51 Franklin St, Fifth Floor, Boston, MA 02110-1301 USA. + * + * Please contact Sun Microsystems, Inc., 4150 Network Circle, Santa Clara, + * CA 95054 USA or visit www.sun.com if you need additional information or + * have any questions. + */ + +/** + * @test + * @bug 6589834 + * @summary deoptimization problem with -XX:+DeoptimizeALot + * + * @run main/othervm -server Test_ia32 + */ + +/*************************************************************************************** +NOTE: The bug shows up (with several "Bug!" message) even without the + flag -XX:+DeoptimizeALot. In a debug build, you may want to try + the flags -XX:+VerifyStack and -XX:+DeoptimizeALot to get more information. +****************************************************************************************/ +import java.lang.reflect.Constructor; + +public class Test_ia32 { + + public static int NUM_THREADS = 100; + + public static int CLONE_LENGTH = 1000; + + public static void main(String[] args) throws InterruptedException, ClassNotFoundException { + + Reflector[] threads = new Reflector[NUM_THREADS]; + for (int i = 0; i < threads.length; i++) { + threads[i] = new Reflector(); + threads[i].start(); + } + + System.out.println("Give Reflector.run() some time to compile..."); + Thread.sleep(5000); + + System.out.println("Load RMISecurityException causing run() deoptimization"); + ClassLoader.getSystemClassLoader().loadClass("java.rmi.RMISecurityException"); + + for (Reflector thread : threads) + thread.requestStop(); + + for (Reflector thread : threads) + try { + thread.join(); + } catch (InterruptedException e) { + System.out.println(e); + } + + } + +} + +class Reflector extends Thread { + + volatile boolean _doSpin = true; + + Test_ia32[] _tests; + + Reflector() { + _tests = new Test_ia32[Test_ia32.CLONE_LENGTH]; + for (int i = 0; i < _tests.length; i++) { + _tests[i] = new Test_ia32(); + } + } + + static int g(int i1, int i2, Test_ia32[] arr, int i3, int i4) { + + if (!(i1==1 && i2==2 && i3==3 && i4==4)) { + System.out.println("Bug!"); + } + + return arr.length; + } + + static int f(Test_ia32[] arr) { + return g(1, 2, arr.clone(), 3, 4); + } + + @Override + public void run() { + Constructor[] ctrs = null; + Class klass = Test_ia32.class; + try { + ctrs = klass.getConstructors(); + } catch (SecurityException e) { + System.out.println(e); + } + + try { + while (_doSpin) { + if (f(_tests) < 0) + System.out.println("return value usage"); + } + } catch (NullPointerException e) { + e.printStackTrace(); + } + + System.out.println(this + " - stopped."); + } + + public void requestStop() { + System.out.println(this + " - stop requested."); + _doSpin = false; + } + +} diff -r 5d4dd2f5f6a1 -r 7a485bc4da16 test/compiler/6636138/Test1.java --- a/test/compiler/6636138/Test1.java Fri Apr 17 15:50:12 2009 +0100 +++ b/test/compiler/6636138/Test1.java Thu May 07 10:30:17 2009 -0700 @@ -26,7 +26,7 @@ * @bug 6636138 * @summary SuperWord::co_locate_pack(Node_List* p) generates memory graph that leads to memory order violation. * - * @run main/othervm -server -Xbatch -XX:CompileOnly=Test1.init -XX:+UseSuperword Test1 + * @run main/othervm -server -Xbatch -XX:CompileOnly=Test1.init Test1 */ class Test1 { diff -r 5d4dd2f5f6a1 -r 7a485bc4da16 test/compiler/6636138/Test2.java --- a/test/compiler/6636138/Test2.java Fri Apr 17 15:50:12 2009 +0100 +++ b/test/compiler/6636138/Test2.java Thu May 07 10:30:17 2009 -0700 @@ -26,7 +26,7 @@ * @bug 6636138 * @summary SuperWord::co_locate_pack(Node_List* p) generates memory graph that leads to memory order violation. * - * @run main/othervm -server -Xbatch -XX:CompileOnly=Test2.shift -XX:+UseSuperword Test2 + * @run main/othervm -server -Xbatch -XX:CompileOnly=Test2.shift Test2 */ class Test2 { diff -r 5d4dd2f5f6a1 -r 7a485bc4da16 test/compiler/6711117/Test.java --- /dev/null Thu Jan 01 00:00:00 1970 +0000 +++ b/test/compiler/6711117/Test.java Thu May 07 10:30:17 2009 -0700 @@ -0,0 +1,849 @@ +/* + * Copyright 2009 Sun Microsystems, Inc. All Rights Reserved. + * DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER. + * + * This code is free software; you can redistribute it and/or modify it + * under the terms of the GNU General Public License version 2 only, as + * published by the Free Software Foundation. + * + * This code is distributed in the hope that it will be useful, but WITHOUT + * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or + * FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License + * version 2 for more details (a copy is included in the LICENSE file that + * accompanied this code). + * + * You should have received a copy of the GNU General Public License version + * 2 along with this work; if not, write to the Free Software Foundation, + * Inc., 51 Franklin St, Fifth Floor, Boston, MA 02110-1301 USA. + * + * Please contact Sun Microsystems, Inc., 4150 Network Circle, Santa Clara, + * CA 95054 USA or visit www.sun.com if you need additional information or + * have any questions. + * + */ + +/* + * @test + * @bug 6711117 + * @summary Assertion in 64bit server vm (flat != TypePtr::BOTTOM,"cannot alias-analyze an untyped ptr") + * @run main/othervm -Xcomp -XX:+IgnoreUnrecognizedVMOptions -XX:+AggressiveOpts -XX:+UseCompressedOops Test + */ + +final class Test_Class_0 { + final static char var_1 = 'E'; + short var_2 = 16213; + final static String var_3 = "jiiibmmsk"; + + + public Test_Class_0() + { + var_2 ^= 'M'; + final String var_18 = var_3; + var_2--; + var_2 |= (byte)('D' / (byte)var_2) - ((byte)1.6680514E38F << + ((byte)'O') & 7320241275829036032L); + func_2(((!false & false | false ? true : false) ? true : true | !true) ? var_2 : 834513107); + var_2 >>>= var_1; + "smiosoebk".codePointCount(true ^ (false ^ ! !false) ? (byte)- ((byte)430513598) : + ((byte)'_'), ~ (true ? (byte)']' : (byte)-2.8272547997066827E307)); + var_2 -= true ? var_1 : var_1; + var_2 ^= var_1; + var_2 &= (var_2 |= ~ ((byte)(var_2 *= var_2))); + long var_19 = 0L; + short var_20 = var_2 += 'P'; + while (var_19 < 1) + { + var_2 ^= true ? (byte)- +1.2219539475209E308 : (byte)1.2748408476894178E308; + var_19++; + var_2 = (byte)((1489358000 == (var_20 | 7816908224315289600L) ? var_1 : var_1) ^ var_19); + var_20--; + } + var_20 -= 'f'; + var_20 <<= (((new Test_Class_0[(byte)var_20])[(byte)var_2]).var_2 *= false ? 'g' : 'x'); + } + + + + + static float func_0() + { + ((new Test_Class_0[(byte)7.774490796987995E307])[(byte)'v']).var_2 <<= false ^ !false ? (short)'v' : "".codePointCount(594464985, 579036736); + ((new Test_Class_0[(byte)(((new Test_Class_0[(byte)1361657519])[(byte)2.3703713E38F]).var_2-- - (short)3.5589388134844986E307)])[((true ? !true : false) ^ (!false ? true : !true) ? !false : false) ? (byte)7.047289E37F : (byte)- ((byte)2.6620062118475144E307)]).var_2 *= 3273943364390983680L; + --((new Test_Class_0[false ? (byte)(short)1.4965069E36F : (byte)286322022])[(byte)- ((byte)2.742619E38F)]).var_2; + long var_4; + { + double var_5; + } + var_4 = (byte)1.3509231E38F; + ((new Test_Class_0[(byte)'_'])[('g' | 1427123046096105472L) < var_1 >> (byte)(int)(byte)7697616672011068416L ? (byte)var_1 : (byte)1251856579]).var_2--; + switch (--((new Test_Class_0[(byte)5.0656327E37F])[(byte)'e']).var_2 != ++((new Test_Class_0[(byte)(int)1.3728667270920175E308])[(byte)+ + -1.6338179407381788E308]).var_2 | !var_3.equalsIgnoreCase("iiwwwln") ? (false ? (byte)1.8291216E38F : (byte)4.778575546584698E307) : (byte)1048254181) + { + case 99: + + } + { + byte var_6 = 13; + } + var_4 = --((new Test_Class_0[!var_3.endsWith("qaoioore") ^ false ? (byte)2.827362738392923E307 : (byte)~4890175967151316992L])[(byte)(short)var_1]).var_2; + ++((new Test_Class_0[(byte)(1.0075552E38F + (short)2083553541)])[(byte)(short)(byte)(short)1.6872205E38F]).var_2; + return ((new Test_Class_0[(byte)var_1])[(byte)+ +5760973323384750080L]).var_2 - (false ? (byte)'i' : (var_4 = (short)1.2458781351126844E308) + 2.131006E38F); + } + + public static long func_1(String arg_0, Object arg_1, final long arg_2) + { + arg_0 = false ? arg_0 : "fgbrpgsq"; + ((new Test_Class_0[(byte)- ((byte)']')])[false ? (byte)757239006 : (byte)1866002020]).var_2 ^= (short)(true ? (byte)(((new Test_Class_0[(byte)1416194866])[(byte)1.2309887362692395E308]).var_2 >>= (int)~ ~ ~arg_2) : (byte)5804970709284726784L); + final long var_7 = (long)(- + ((long)+ - + - -2.5396583E38F) - - +1.8770165E38F % 2472404173160781824L < --((new Test_Class_0[(byte)5.569360482341752E307])[(byte)(double)(byte)8131142397821553664L]).var_2 ^ true ? (false ? (byte)- -1.163275451591927E308 : (byte)var_1) : (false ? (byte)1843746036 : (byte)1.0209668642291047E308)); + arg_0 = (arg_0 = arg_0.substring(699480935)); + switch (((new Test_Class_0[(byte)(5415649243316856832L >> 861936806)])[true | true & !false ? (byte)(short)- -7.785169683394908E307 : (byte)+ ((byte)arg_2)]).var_2++) + { + case 42: + + case 102: + + } + arg_1 = (true || false ? false : true) ? (arg_0 = (arg_0 = "jbfaru")) : arg_0; + arg_1 = new byte[(byte)2.669957E38F]; + boolean var_8 = ! ((false ? (short)1.4259420861834744E308 : (short)7.352115508157158E307) != 1.7635658130722812E308); + arg_1 = new Object[(byte)- ((byte)(short)1.8950693E38F)]; + arg_0 = arg_0; + return (byte)1.4762239057269886E308 & 4923938844759802880L; + } + + double[][] func_2(final int arg_0) + { + var_2 >>>= (var_2 >>= var_2++); + float var_9 = 0F; + var_2 %= var_2; + do + { + ++var_2; + var_9++; + var_2++; + } while (true && (var_9 < 1 && false)); + double var_10 = 0; + final int var_11 = 11903395; + do + { + --var_2; + var_10++; + ++var_2; + } while ((false & true || false) && (var_10 < 2 && ~ ((byte)'[') == (byte)(1.1943192E38F % ('c' << var_1) % (byte)((var_2 |= var_2) + 591679039 / ~5932100696448264192L)))); + String var_12 = "jkwnk"; + var_12 = var_3; + var_12 = (var_12 = (var_12 = var_3)); + var_12 = "qrhdwx"; + var_12 = var_12; + short var_13 = (true && true) ^ true | ! (!true || 1646418779 <= (byte)var_1) ? var_2 : var_2; + return new double[(byte)var_1][true || false ^ !true ^ true ? (byte)arg_0 : (byte)var_10]; + } + + private final int func_3() + { + long var_14 = 's' * (~ ~6656240461354863616L * 3151744928387344384L) << ~ (((var_2 >>>= 6600935261424147456L) % 1798503219359364096L | - ~3832249967647077376L / - ((byte)~1529201870915276800L)) / var_2); + { + var_14 |= !false | (byte)1078230528 >= (byte)1.3972878565417081E308 | (true | !true & !true & !false) ? var_1 : '_'; + } + long var_15 = 7589204885152164864L; + var_2 ^= (var_1 < (byte)'r' ? 475314139 : 'Z') <= 1943074698 ? 'h' : var_1; + return 'V' * (false ? (byte)5.498204E37F : (byte)1.0137001669765466E308); + } + + protected static boolean func_4(boolean arg_0, byte arg_1, boolean arg_2) + { + arg_1++; + arg_1 &= (((((new Test_Class_0[arg_1][arg_1][arg_1])[arg_1])[arg_1])[arg_1]).var_2 |= arg_2 ? (short)~3038084056596854784L : (short)+ (arg_1 = arg_1)); + arg_0 |= true; + arg_1 %= (arg_1 |= ((new Test_Class_0[arg_1])[arg_1]).var_2--); + if (false) + { + arg_0 |= arg_2; + } + else + { + ++(((new Test_Class_0[arg_1][arg_1][arg_1])[arg_1 += var_1])[(!arg_2 | (arg_0 &= false)) ^ (arg_0 | arg_0) ? arg_1 : (arg_1 <<= 3192041751921364992L)][arg_1 /= arg_1]).var_2; + } + arg_1 &= +(new byte[arg_1])[arg_1]; + arg_1 <<= 3632133838014908416L; + byte[] var_16 = (new byte[arg_1][arg_1--])[arg_1]; + long var_17; + arg_1 ^= ~ arg_1--; + arg_0 ^= (arg_2 ^= 1186877294 >= ((new Test_Class_0[arg_1][arg_1])[arg_1][arg_1]).var_2) & arg_2; + return var_3.startsWith(var_3); + } + + public String toString() + { + String result = "[\n"; + result += "Test_Class_0.var_2 = "; result += Test.Printer.print(var_2); + result += "\n"; + result += "Test_Class_0.var_1 = "; result += Test.Printer.print(var_1); + result += "\n"; + result += "Test_Class_0.var_3 = "; result += Test.Printer.print(var_3); + result += ""; + result += "\n]"; + return result; + } +} + + +class Test_Class_1 { + static int var_21 = 670918363; + final float var_22 = 8.650798E37F; + static int var_23 = 1774228457; + final int var_24 = 1282736974; + final byte var_25 = !false & false | true ? (byte)7.677121016144275E307 : (byte)'r'; + static long var_26 = 2939310115459338240L; + final long var_27 = var_25 - 7555453173456381952L; + double var_28; + static String var_29; + + + public Test_Class_1() + { + var_29 = Test_Class_0.var_3; + ((false ? false || ! !true : ! (! !true & !true)) ? new Test_Class_0() : new Test_Class_0()).var_2++; + var_23 -= 2.963694E38F; + } + + + + + public String toString() + { + String result = "[\n"; + result += "Test_Class_1.var_21 = "; result += Test.Printer.print(var_21); + result += "\n"; + result += "Test_Class_1.var_23 = "; result += Test.Printer.print(var_23); + result += "\n"; + result += "Test_Class_1.var_24 = "; result += Test.Printer.print(var_24); + result += "\n"; + result += "Test_Class_1.var_26 = "; result += Test.Printer.print(var_26); + result += "\n"; + result += "Test_Class_1.var_27 = "; result += Test.Printer.print(var_27); + result += "\n"; + result += "Test_Class_1.var_28 = "; result += Test.Printer.print(var_28); + result += "\n"; + result += "Test_Class_1.var_22 = "; result += Test.Printer.print(var_22); + result += "\n"; + result += "Test_Class_1.var_25 = "; result += Test.Printer.print(var_25); + result += "\n"; + result += "Test_Class_1.var_29 = "; result += Test.Printer.print(var_29); + result += ""; + result += "\n]"; + return result; + } +} + + +class Test_Class_2 { + double var_30; + static byte var_31; + static char var_32; + float var_33; + double var_34 = !false & (true ? true : ! !true && false) ? 'q' - 4789231433793305600L - (var_33 = -1.0677024E38F) : 2.65473560313378E307; + final double var_35 = ~Test_Class_1.var_26 == 5.145660681364723E307 | false ? 1.4134775E38F : 1.77223030708671E308; + final int var_36 = Test_Class_1.var_23 |= Test_Class_1.var_21++; + + + public Test_Class_2() + { + Test_Class_0.var_3.replace(Test_Class_0.var_1, 'Q'); + var_32 = (var_32 = (var_32 = '_')); + Test_Class_1.var_26 |= Test_Class_0.var_1; + Test_Class_1.var_29 = (Test_Class_1.var_29 = Test_Class_0.var_3); + var_32 = Test_Class_0.var_1; + var_33 = ((new Test_Class_0[(byte)851412948463452160L])[var_31 = new Test_Class_1().var_25]).var_2; + var_33 = ! (((!false | false) & (false || !true) ? false : ! !false) | false) ? new Test_Class_1().var_25 : (var_31 = new Test_Class_1().var_25); + float var_38 = 0F; + var_34 /= 5336005797857974272L; + for ("ccnyq".endsWith((new String[(byte)Test_Class_1.var_26])[var_31 = (var_31 = (var_31 = (byte)4.7927775E37F))]); var_38 < 2; var_32 = '^' <= Test_Class_0.var_1 ^ true ? (var_32 = Test_Class_0.var_1) : (var_32 = 'V')) + { + var_32 = true ? 'a' : (var_32 = Test_Class_0.var_1); + var_38++; + var_33 = new Test_Class_1().var_24; + var_32 = ! (true || true ? !false : (short)3.2844383E37F < 2.1400662E38F) ? (char)1.2691096999143248E308 : (! !false ^ true ? 's' : 'q'); + } + var_32 = 'B'; + { + var_32 = Test_Class_0.var_1; + } + var_32 = Test_Class_0.var_1; + Test_Class_1.var_29 = "ov"; + Test_Class_1.var_29 = "smtolghw"; + } + + + + + + protected final static String func_0(final long[][] arg_0, byte arg_1, char arg_2) + { + arg_1 <<= (((new Test_Class_2[arg_1])[arg_1]).var_34 > new Test_Class_0().var_2 | true ? new Test_Class_0() : (new Test_Class_0[arg_1][arg_1])[new Test_Class_1().var_25][new Test_Class_1().var_25]).var_2; + Test_Class_1.var_26 >>>= (!true | !true | (new boolean[arg_1])[arg_1] || true ? (new Test_Class_1[arg_1])[arg_1] : new Test_Class_1()).var_27; + float var_37 = 0F; + arg_2 >>= ((new Test_Class_1[arg_1][arg_1])[arg_1][arg_1]).var_25; + do + { + ((new Test_Class_2[arg_1 /= 2055714081])[arg_1]).var_34 = 'l'; + var_37++; + Test_Class_1.var_29 = Test_Class_0.var_3; + } while ((false ? false : false) && var_37 < 7); + Test_Class_1.var_29 = Test_Class_0.var_3 + ""; + ((new Test_Class_2[new Test_Class_1().var_25][new Test_Class_1().var_25])[new Test_Class_1().var_25][arg_1 |= new Test_Class_0().var_2]).var_34 += Test_Class_0.var_1; + return "esb"; + } + + public String toString() + { + String result = "[\n"; + result += "Test_Class_2.var_32 = "; result += Test.Printer.print(var_32); + result += "\n"; + result += "Test_Class_2.var_36 = "; result += Test.Printer.print(var_36); + result += "\n"; + result += "Test_Class_2.var_30 = "; result += Test.Printer.print(var_30); + result += "\n"; + result += "Test_Class_2.var_34 = "; result += Test.Printer.print(var_34); + result += "\n"; + result += "Test_Class_2.var_35 = "; result += Test.Printer.print(var_35); + result += "\n"; + result += "Test_Class_2.var_33 = "; result += Test.Printer.print(var_33); + result += "\n"; + result += "Test_Class_2.var_31 = "; result += Test.Printer.print(var_31); + result += ""; + result += "\n]"; + return result; + } +} + + +final class Test_Class_3 extends Test_Class_2 { + byte var_39 = 23; + static boolean var_40 = false; + + + public Test_Class_3() + { + if (true) + { + Test_Class_1.var_21 |= new Test_Class_1().var_27; + } + else + { + final float var_46 = 7.9266674E37F; + ++Test_Class_1.var_26; + } + { + Test_Class_1.var_23++; + } + var_30 = ((new Test_Class_1[var_39][var_39])[var_39][var_39]).var_25; + if (var_40 &= (var_40 |= (var_40 |= var_40))) + { + Test_Class_0.var_3.indexOf(Test_Class_1.var_29 = "xfgyblg", 'X' >>> ((Test_Class_1)(new Object[var_39])[((new Test_Class_1[var_39])[var_39]).var_25]).var_27); + } + else + { + var_40 &= var_40 && var_40; + } + ((Test_Class_2)(((new boolean[var_39])[var_39++] ? (var_40 &= var_40) : (var_40 &= false)) ? (new Test_Class_2[var_39][var_39])[var_39][var_39] : (new Object[var_39][var_39])[var_39][var_39])).var_33 = (var_40 ? new Test_Class_1() : new Test_Class_1()).var_25; + switch (var_39) + { + case 24: + + } + var_39 += (((var_40 ^= true) ? new Test_Class_0() : new Test_Class_0()).var_2 ^= var_40 & (var_40 | false) ? var_39-- : var_36); + new Test_Class_0().var_2 %= (new Test_Class_0().var_2 += (var_39 ^= Test_Class_1.var_26)); + } + + + + + private static String func_0() + { + --Test_Class_1.var_26; + { + Test_Class_1.var_29 = var_40 ? Test_Class_0.var_3 : "rahqjhqf"; + } + if (var_40 ^= var_40) + { + Test_Class_1.var_26 >>= (Test_Class_2.var_32 = Test_Class_0.var_1) / new Test_Class_0().var_2; + } + else + { + ++Test_Class_1.var_21; + } + ++Test_Class_1.var_26; + int var_41 = 0; + ++Test_Class_1.var_26; + do + { + var_40 = (var_40 = true); + var_41++; + Test_Class_0 var_42 = new Test_Class_0(); + } while (var_41 < 1); + Test_Class_1.var_29 = "f"; + Test_Class_1 var_43; + var_43 = (var_43 = new Test_Class_1()); + Test_Class_2.var_32 = 'V'; + long var_44 = 0L; + Test_Class_1.var_23--; + while (var_40 && (var_44 < 1 && var_40)) + { + Test_Class_1.var_29 = "bsgewkmk"; + var_44++; + Test_Class_1.var_29 = "ktegattny"; + var_40 &= var_40 ^ (var_40 |= (short)4.4487427E37F < 'n') & true; + } + Test_Class_1.var_23 %= (((var_40 |= true & (var_40 &= var_40)) ^ true ? new Test_Class_0() : new Test_Class_0()).var_2 -= 1.6638270827800162E308); + float var_45; + var_32 = (Test_Class_2.var_32 = Test_Class_0.var_1); + return false ? "fluk" : "wt"; + } + + public String toString() + { + String result = "[\n"; + result += "Test_Class_3.var_32 = "; result += Test.Printer.print(var_32); + result += "\n"; + result += "Test_Class_3.var_36 = "; result += Test.Printer.print(var_36); + result += "\n"; + result += "Test_Class_3.var_30 = "; result += Test.Printer.print(var_30); + result += "\n"; + result += "Test_Class_3.var_34 = "; result += Test.Printer.print(var_34); + result += "\n"; + result += "Test_Class_3.var_35 = "; result += Test.Printer.print(var_35); + result += "\n"; + result += "Test_Class_3.var_33 = "; result += Test.Printer.print(var_33); + result += "\n"; + result += "Test_Class_3.var_31 = "; result += Test.Printer.print(var_31); + result += "\n"; + result += "Test_Class_3.var_39 = "; result += Test.Printer.print(var_39); + result += "\n"; + result += "Test_Class_3.var_40 = "; result += Test.Printer.print(var_40); + result += ""; + result += "\n]"; + return result; + } +} + + +class Test_Class_4 { + final float var_47 = 1.9043434E38F; + final byte var_48 = 32; + final float var_49 = 2.8176504E38F; + final char var_50 = 'r'; + final String var_51 = "uwgmnjpg"; + static int var_52; + short[] var_53; + Test_Class_1 var_54; + + + public Test_Class_4() + { + final float var_55 = (3.1554042E38F == var_50 ^ (Test_Class_3.var_40 |= true) ? (Test_Class_3.var_40 ^= Test_Class_3.var_40) ^ true : Test_Class_3.var_40) ? new Test_Class_0().var_2 : 2.965321E38F; + new Test_Class_0().var_2 = (new Test_Class_0().var_2 >>= +new Test_Class_1().var_25); + ((Test_Class_1.var_29 = (Test_Class_1.var_29 = (Test_Class_1.var_29 = "l"))) + "").equalsIgnoreCase(Test_Class_1.var_29 = "garnio"); + double var_56 = 0; + Test_Class_1.var_29 = var_51; + while (var_56 < 1) + { + ((Test_Class_3)(Test_Class_2)(new Object[var_48])[var_48]).var_33 = ++Test_Class_1.var_26; + var_56++; + Test_Class_1.var_29 = (Test_Class_1.var_29 = "fvyjrih"); + float[] var_57; + } + { + ((new Test_Class_2[var_48])[((new Test_Class_3[var_48][var_48])[var_48][var_48]).var_39]).var_34 *= 2.2119221943262553E307; + Test_Class_2.var_32 = true ? 'q' : 't'; + ((new Test_Class_3[--((Test_Class_3)new Test_Class_2()).var_39])[var_48]).var_33 = new Test_Class_0().var_2; + int var_58 = 'i' >> (var_48 << Test_Class_0.var_1); + } + Test_Class_3.var_40 &= true && var_51.equalsIgnoreCase(var_51) || new Test_Class_0().var_2 < --((new Test_Class_3[var_48])[var_48]).var_39; + ((Test_Class_3)(Test_Class_2)(new Object[var_48][var_48])[var_48][var_48]).var_34 += Test_Class_1.var_26--; + var_54 = new Test_Class_1(); + Test_Class_3.var_40 |= (long)(!true ^ var_47 > ((Test_Class_2)(new Object[var_48])[var_48]).var_34 ? (Test_Class_2.var_31 = (Test_Class_3.var_31 = (Test_Class_3.var_31 = var_48))) : (var_54 = new Test_Class_1()).var_25) <= var_48; + (Test_Class_3.var_40 ? (true ? new Test_Class_0() : new Test_Class_0()) : new Test_Class_0()).var_2 &= var_48; + (Test_Class_3.var_40 ? (Test_Class_3)new Test_Class_2() : (new Test_Class_3[var_48][var_48])[var_48][var_48]).var_34 += Test_Class_1.var_21; + Test_Class_3 var_59; + Test_Class_2.var_32 = 'H'; + --Test_Class_1.var_26; + } + + + + + + public String toString() + { + String result = "[\n"; + result += "Test_Class_4.var_50 = "; result += Test.Printer.print(var_50); + result += "\n"; + result += "Test_Class_4.var_52 = "; result += Test.Printer.print(var_52); + result += "\n"; + result += "Test_Class_4.var_53 = "; result += Test.Printer.print(var_53); + result += "\n"; + result += "Test_Class_4.var_47 = "; result += Test.Printer.print(var_47); + result += "\n"; + result += "Test_Class_4.var_49 = "; result += Test.Printer.print(var_49); + result += "\n"; + result += "Test_Class_4.var_48 = "; result += Test.Printer.print(var_48); + result += "\n"; + result += "Test_Class_4.var_51 = "; result += Test.Printer.print(var_51); + result += "\n"; + result += "Test_Class_4.var_54 = "; result += Test.Printer.print(var_54); + result += ""; + result += "\n]"; + return result; + } +} + + +class Test_Class_5 extends Test_Class_4 { + char var_60 = '_'; + final byte var_61 = 101; + + + public Test_Class_5() + { + Test_Class_0.var_3.indexOf(Test_Class_1.var_21, (Test_Class_3.var_40 |= Test_Class_3.var_40) ? new Test_Class_1().var_24 : 'i'); + } + + + + + final char func_0(Test_Class_1 arg_0, final Test_Class_1 arg_1) + { + long var_62 = 0L; + "aoal".toLowerCase(); + for (byte var_63 = arg_0.var_25; var_62 < 1 && "ji".startsWith("dikrs".endsWith("va") ? (Test_Class_1.var_29 = "mvp") : Test_Class_0.var_3, Test_Class_1.var_23); ((Test_Class_2)(new Object[arg_0.var_25])[var_63]).var_34 -= new Test_Class_2().var_36) + { + ((Test_Class_3.var_40 ? false : Test_Class_3.var_40) ? (Test_Class_0)(new Object[arg_1.var_25][arg_1.var_25])[arg_1.var_25][var_63] : (Test_Class_0)(new Object[var_48][var_48])[var_63][var_63]).var_2 += true ^ Test_Class_3.var_40 ^ (((new Test_Class_3[var_63][var_63])[var_63][var_61]).var_35 != 2.1423512E38F | ! !false) ? var_49 + ~var_48 : 3.1549515E38F; + var_62++; + (!false & ((Test_Class_3.var_40 |= (Test_Class_3.var_40 ^= true)) & true) ? (Test_Class_2)(new Object[var_63])[var_63] : (new Test_Class_2[var_63][var_61])[var_63][arg_0.var_25]).var_33 = (var_60 *= (var_60 *= ((new Test_Class_3[var_48][var_61])[var_61][var_63]).var_35)); + float var_64; + } + Test_Class_1.var_29 = "xyenjknu"; + Test_Class_3.var_40 ^= (Test_Class_3.var_40 = !false & true) ? Test_Class_3.var_40 : Test_Class_3.var_40; + ((new Test_Class_2[var_48][arg_1.var_25])[arg_0.var_25][var_48]).var_33 = var_61; + Test_Class_1.var_21 |= --(((new Test_Class_3[Test_Class_3.var_31 = arg_0.var_25][var_61])[var_61])[(((new Test_Class_3[var_48][var_61])[var_48])[((Test_Class_3)(new Test_Class_2[var_48][arg_0.var_25])[var_61][var_48]).var_39]).var_39 >>>= var_60]).var_39; + var_51.compareToIgnoreCase("hgcaybk"); + Test_Class_0 var_65 = (Test_Class_1.var_29 = "t").codePointBefore(1602805584) >= (float)((new Test_Class_3[var_48][var_61])[var_48][Test_Class_2.var_31 = arg_1.var_25]).var_39 - 7.256386549028811E307 ? new Test_Class_0() : ((new Test_Class_0[arg_0.var_25][var_48][var_48])[arg_0.var_25])[arg_0.var_25][Test_Class_2.var_31 = arg_1.var_25]; + return 'U'; + } + + protected static Test_Class_1 func_1(final short arg_0, long arg_1) + { + --new Test_Class_0().var_2; + "xb".length(); + if ((Test_Class_3.var_40 ^= (Test_Class_2.var_32 = Test_Class_0.var_1) == 1.2609472E38F) ? (Test_Class_3.var_40 = (Test_Class_3.var_40 = Test_Class_3.var_40)) : true) + { + --Test_Class_1.var_26; + } + else + { + "ybbe".substring(209378562, var_52 = (Test_Class_1.var_21 |= (Test_Class_2.var_31 = (byte)'a'))); + } + Test_Class_3.var_40 &= (Test_Class_3.var_40 &= true) && (Test_Class_1.var_29 = (Test_Class_1.var_29 = Test_Class_0.var_3)).endsWith(Test_Class_0.var_3); + (false ? new Test_Class_0() : new Test_Class_0()).var_2 >>= new Test_Class_1().var_25; + return 9.430116214455637E307 <= (true ? (Test_Class_3)new Test_Class_2() : (Test_Class_3)new Test_Class_2()).var_34 ? new Test_Class_1() : new Test_Class_1(); + } + + public String toString() + { + String result = "[\n"; + result += "Test_Class_5.var_50 = "; result += Test.Printer.print(var_50); + result += "\n"; + result += "Test_Class_5.var_60 = "; result += Test.Printer.print(var_60); + result += "\n"; + result += "Test_Class_5.var_52 = "; result += Test.Printer.print(var_52); + result += "\n"; + result += "Test_Class_5.var_53 = "; result += Test.Printer.print(var_53); + result += "\n"; + result += "Test_Class_5.var_47 = "; result += Test.Printer.print(var_47); + result += "\n"; + result += "Test_Class_5.var_49 = "; result += Test.Printer.print(var_49); + result += "\n"; + result += "Test_Class_5.var_48 = "; result += Test.Printer.print(var_48); + result += "\n"; + result += "Test_Class_5.var_61 = "; result += Test.Printer.print(var_61); + result += "\n"; + result += "Test_Class_5.var_51 = "; result += Test.Printer.print(var_51); + result += "\n"; + result += "Test_Class_5.var_54 = "; result += Test.Printer.print(var_54); + result += ""; + result += "\n]"; + return result; + } +} + +public class Test { + Test_Class_4 var_66; + Test_Class_3 var_67; + Test_Class_5 var_68; + Test_Class_2[] var_69; + long var_70 = ++Test_Class_1.var_26 & Test_Class_1.var_21++; + final static double var_71 = 3.566207721984698E307; + static boolean var_72; + final static String var_73 = "nmxx"; + + + private final char func_0(Test_Class_3 arg_0, final boolean[] arg_1) + { + ((Test_Class_5)(arg_1[arg_0.var_39++] ? new Test_Class_2[(var_67 = arg_0).var_39] : (new Object[arg_0.var_39])[arg_0.var_39])).var_54 = new Test_Class_1(); + new Test_Class_0(); + (((new Test[arg_0.var_39][arg_0.var_39][arg_0.var_39])[++arg_0.var_39])[arg_0.var_39][arg_0.var_39]).var_66 = (var_68 = (new Test_Class_5[arg_0.var_39][arg_0.var_39])[arg_0.var_39][arg_0.var_39]); + ((new Test[arg_0.var_39])[(arg_0 = (var_67 = (arg_0 = arg_0))).var_39]).var_70 = ((new long[arg_0.var_39][arg_0.var_39])[arg_0.var_39])[arg_0.var_39 = ((var_67 = (arg_0 = arg_0)).var_39 -= new Test_Class_0().var_2)] << ']'; + arg_0 = (new Test_Class_0().var_2 *= ((new Test_Class_2[arg_0.var_39])[arg_0.var_39]).var_34) >= arg_0.var_39 ? (var_67 = arg_0) : (arg_0 = arg_0); + Test_Class_1.var_26--; + Test_Class_4 var_74 = var_66 = (Test_Class_5)(new Test_Class_4[arg_0.var_39])[arg_0.var_39]; + Test_Class_3.var_40 ^= ! (Test_Class_3.var_40 &= (Test_Class_3.var_40 ^= Test_Class_3.var_40) | (Test_Class_3.var_40 &= Test_Class_3.var_40)); + var_72 = (arg_1[(var_67 = arg_0).var_39] | !Test_Class_3.var_40 & !Test_Class_3.var_40 ? (Test_Class_1.var_29 = var_73).endsWith((var_66 = var_74).var_51) && (Test_Class_3.var_40 ^= Test_Class_3.var_40) : (Test_Class_3.var_40 ^= Test_Class_3.var_40)) ^ !Test_Class_3.var_40; + Test_Class_3.var_40 &= (Test_Class_3.var_40 &= (Test_Class_3.var_40 = Test_Class_3.var_40) & Test_Class_3.var_40 ^ Test_Class_3.var_40); + arg_0.var_39 -= --var_70; + int var_75; + double var_76; + { + boolean var_77; + var_70 ^= new Test_Class_0().var_2++; + } + Test_Class_1.var_26 /= Test_Class_0.var_3.lastIndexOf(~new Test_Class_1().var_25, Test_Class_1.var_21); + Test_Class_1.var_26 |= Test_Class_1.var_21; + (((new Test_Class_3[arg_0.var_39][arg_0.var_39][var_74.var_48])[arg_0.var_39])[arg_0.var_39][arg_0.var_39]).var_34 %= (var_67 = arg_0).var_39; + Test_Class_1.var_21 &= arg_0.var_39; + var_68 = (var_68 = (Test_Class_5)var_74); + var_72 = false; + return new Test_Class_5().var_60 ^= 'v'; + } + + public static Test_Class_2 func_1(byte[][] arg_0, final int arg_1, Test_Class_1 arg_2, final Test_Class_1 arg_3) + { + ((new Test[arg_3.var_25])[((Test_Class_3)new Test_Class_2()).var_39 *= --Test_Class_1.var_26]).var_67 = (((new Test[arg_2.var_25])[(((new Test[arg_2.var_25][arg_2.var_25])[arg_3.var_25][arg_3.var_25]).var_67 = (new Test_Class_3[arg_2.var_25][arg_2.var_25])[arg_2.var_25][arg_3.var_25]).var_39 %= Test_Class_1.var_26]).var_67 = (((new Test[arg_3.var_25][arg_2.var_25])[arg_3.var_25][arg_2.var_25]).var_67 = (((new Test[arg_3.var_25])[arg_2.var_25]).var_67 = (Test_Class_3)new Test_Class_2()))); + { + --Test_Class_1.var_26; + } + if (!Test_Class_3.var_40) + { + "jfqj".replaceAll("ac", Test_Class_0.var_3); + } + else + { + arg_2 = (((new Test_Class_5[arg_3.var_25][arg_2.var_25])[((new Test_Class_3[arg_2.var_25])[arg_3.var_25]).var_39][((Test_Class_3)(new Test_Class_2[arg_2.var_25])[arg_3.var_25]).var_39]).var_54 = arg_3); + new Test_Class_1(); + } + if (true) + { + Test_Class_0.func_0(); + } + else + { + Test_Class_1.var_23 /= Test_Class_1.var_26; + } + Test_Class_1.var_26--; + Test_Class_1.var_23 ^= Test_Class_0.var_1; + return new Test_Class_2(); + } + + public static String execute() + { + try { + Test t = new Test(); + try { t.test(); } + catch(Throwable e) { } + try { return t.toString(); } + catch (Throwable e) { return "Error during result conversion to String"; } + } catch (Throwable e) { return "Error during test execution"; } + } + + public static void main(String[] args) + { + try { + Test t = new Test(); + try { t.test(); } + catch(Throwable e) { } + try { System.out.println(t); } + catch(Throwable e) { } + } catch (Throwable e) { } + } + + private void test() + { + double var_78 = 0; + --Test_Class_1.var_26; + long var_79; + for (var_70 /= 8.089457748637276E307; var_78 < 162 && !true & (true ? Test_Class_3.var_40 : (Test_Class_3.var_40 ^= Test_Class_3.var_40)); Test_Class_1.var_26 -= 1.2513521E38F) + { + short var_80 = 10682; + Test_Class_1.var_21--; + var_78++; + var_72 = (Test_Class_3.var_40 |= (Test_Class_3.var_40 ^= false)); + ++Test_Class_1.var_26; + } + Test_Class_2 var_81; + new Test_Class_4(); + int var_82 = 0; + ++Test_Class_1.var_23; + do + { + --Test_Class_1.var_26; + var_82++; + ++Test_Class_1.var_21; + } while ((Test_Class_3.var_40 ^= false & false) && var_82 < 256); + Test_Class_1.var_23 |= (var_68 = (var_68 = (Test_Class_5)(var_66 = new Test_Class_4()))).var_48 + (Test_Class_1.var_26 >>> new Test_Class_0().var_2); + (true ? new Test_Class_5() : (var_68 = (var_68 = new Test_Class_5()))).var_60 *= Test_Class_0.var_1; + } + public String toString() + { + String result = "[\n"; + result += "Test.var_69 = "; result += Printer.print(var_69); + result += "\n"; + result += "Test.var_70 = "; result += Printer.print(var_70); + result += "\n"; + result += "Test.var_71 = "; result += Printer.print(var_71); + result += "\n"; + result += "Test.var_73 = "; result += Printer.print(var_73); + result += "\n"; + result += "Test.var_68 = "; result += Printer.print(var_68); + result += "\n"; + result += "Test.var_66 = "; result += Printer.print(var_66); + result += "\n"; + result += "Test.var_72 = "; result += Printer.print(var_72); + result += "\n"; + result += "Test.var_67 = "; result += Printer.print(var_67); + result += ""; + result += "\n]"; + return result; + } + static class Printer + { + public static String print(boolean arg) { return String.valueOf(arg); } + public static String print(byte arg) { return String.valueOf(arg); } + public static String print(short arg) { return String.valueOf(arg); } + public static String print(char arg) { return String.valueOf((int)arg); } + public static String print(int arg) { return String.valueOf(arg); } + public static String print(long arg) { return String.valueOf(arg); } + public static String print(float arg) { return String.valueOf(arg); } + public static String print(double arg) { return String.valueOf(arg); } + + + public static String print(Object arg) + { + return print_r(new java.util.Stack(), arg); + } + + private static String print_r(java.util.Stack visitedObjects, Object arg) + { + String result = ""; + if (arg == null) + result += "null"; + else + if (arg.getClass().isArray()) + { + for (int i = 0; i < visitedObjects.size(); i++) + if (visitedObjects.elementAt(i) == arg) return ""; + + visitedObjects.push(arg); + + final String delimiter = ", "; + result += "["; + + if (arg instanceof Object[]) + { + Object[] array = (Object[]) arg; + for (int i = 0; i < array.length; i++) + { + result += print_r(visitedObjects, array[i]); + if (i < array.length - 1) result += delimiter; + } + } + else + if (arg instanceof boolean[]) + { + boolean[] array = (boolean[]) arg; + for (int i = 0; i < array.length; i++) + { + result += print(array[i]); + if (i < array.length - 1) result += delimiter; + } + } + else + if (arg instanceof byte[]) + { + byte[] array = (byte[]) arg; + for (int i = 0; i < array.length; i++) + { + result += print(array[i]); + if (i < array.length - 1) result += delimiter; + } + } + else + if (arg instanceof short[]) + { + short[] array = (short[]) arg; + for (int i = 0; i < array.length; i++) + { + result += print(array[i]); + if (i < array.length - 1) result += delimiter; + } + } + else + if (arg instanceof char[]) + { + char[] array = (char[]) arg; + for (int i = 0; i < array.length; i++) + { + result += print(array[i]); + if (i < array.length - 1) result += delimiter; + } + } + else + if (arg instanceof int[]) + { + int[] array = (int[]) arg; + for (int i = 0; i < array.length; i++) + { + result += print(array[i]); + if (i < array.length - 1) result += delimiter; + } + } + else + if (arg instanceof long[]) + { + long[] array = (long[]) arg; + for (int i = 0; i < array.length; i++) + { + result += print(array[i]); + if (i < array.length - 1) result += delimiter; + } + } + else + if (arg instanceof float[]) + { + float[] array = (float[]) arg; + for (int i = 0; i < array.length; i++) + { + result += print(array[i]); + if (i < array.length - 1) result += delimiter; + } + } + else + if (arg instanceof double[]) + { + double[] array = (double[]) arg; + for (int i = 0; i < array.length; i++) + { + result += print(array[i]); + if (i < array.length - 1) result += delimiter; + } + } + + result += "]"; + visitedObjects.pop(); + + } else + { + result += arg.toString(); + } + + return result; + } + } +} + diff -r 5d4dd2f5f6a1 -r 7a485bc4da16 test/compiler/6823453/Test.java --- /dev/null Thu Jan 01 00:00:00 1970 +0000 +++ b/test/compiler/6823453/Test.java Thu May 07 10:30:17 2009 -0700 @@ -0,0 +1,96 @@ +/* + * Copyright 2009 Sun Microsystems, Inc. All Rights Reserved. + * DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER. + * + * This code is free software; you can redistribute it and/or modify it + * under the terms of the GNU General Public License version 2 only, as + * published by the Free Software Foundation. + * + * This code is distributed in the hope that it will be useful, but WITHOUT + * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or + * FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License + * version 2 for more details (a copy is included in the LICENSE file that + * accompanied this code). + * + * You should have received a copy of the GNU General Public License version + * 2 along with this work; if not, write to the Free Software Foundation, + * Inc., 51 Franklin St, Fifth Floor, Boston, MA 02110-1301 USA. + * + * Please contact Sun Microsystems, Inc., 4150 Network Circle, Santa Clara, + * CA 95054 USA or visit www.sun.com if you need additional information or + * have any questions. + * + */ + +/* + * @test + * @bug 6823453 + * @summary DeoptimizeALot causes fastdebug server jvm to fail with assert(false,"unscheduable graph") + * @run main/othervm -Xcomp -XX:CompileOnly=Test -XX:+DeoptimizeALot Test + */ + +public class Test { + + static long vara_1 = 1L; + + static void testa() { + short var_2 = (byte) 1.0E10; + + for ( Object temp = new byte[(byte)1.0E10]; true ; + var_2 = "1".equals("0") ? ((byte) vara_1) : 1 ) {} + } + + static void testb() { + long var_1 = -1L; + + short var_2 = (byte) 1.0E10; + + for ( Object temp = new byte[(byte)1.0E10]; true ; + var_2 = "1".equals("0") ? ((byte) var_1) : 1 ) {} + } + + static void testc() { + long var_1 = -1L; + if (vara_1 > 0) var_1 = 1L; + + int var_2 = (byte)var_1 - 128; + + for ( Object temp = new byte[var_2]; true ; + var_2 = "1".equals("0") ? 2 : 1 ) {} + } + + static void testd() { + long var_1 = 0L; + + int var_2 = (byte)var_1 + 1; + for (int i=0; i<2 ; i++) var_2 = var_2 - 1; + + for ( Object temp = new byte[var_2]; true ; + var_2 = "1".equals("0") ? 2 : 1 ) {} + } + + public static void main(String[] args) throws Exception { + int nex = 0; + + try { + testa(); + } + catch (java.lang.NegativeArraySizeException ex) { nex++; } + try { + testb(); + } + catch (java.lang.NegativeArraySizeException ex) { nex++; } + try { + testc(); + } + catch (java.lang.NegativeArraySizeException ex) { nex++; } + try { + testd(); + } + catch (java.lang.NegativeArraySizeException ex) { nex++; } + + if (nex != 4) + System.exit(97); + } +} +