# HG changeset patch # User jprovino # Date 1373991608 14400 # Node ID 90d6c221d4e5f200e01740673cea7d162a2fc06a # Parent 16b10327b00d40e2eb1bec58800c94078ba61fc8# Parent a74ec8831c7ba32933dda1d54ff5d2d8ce171433 Merge diff -r 16b10327b00d -r 90d6c221d4e5 .hgtags --- a/.hgtags Tue Jul 16 10:55:48 2013 -0400 +++ b/.hgtags Tue Jul 16 12:20:08 2013 -0400 @@ -349,3 +349,13 @@ 573d86d412cd9d3df7912194c1a540be50e9544e jdk8-b93 b786c04b7be15194febe88dc1f0c9443e737a84b hs25-b35 3c78a14da19d26d6937af5f98b97e2a21c653b04 hs25-b36 +1beed1f6f9edefe47ba8ed1355fbd3e7606b8288 jdk8-b94 +69689078dff8b21e6df30870464f5d736eebdf72 hs25-b37 +5d65c078cd0ac455aa5e58a09844c7acce54b487 jdk8-b95 +2cc5a9d1ba66dfdff578918b393c727bd9450210 hs25-b38 +e6a4b8c71fa6f225bd989a34de2d0d0a656a8be8 jdk8-b96 +2b9380b0bf0b649f40704735773e8956c2d88ba0 hs25-b39 +d197d377ab2e016d024e8c86cb06a57bd7eae590 jdk8-b97 +c9dd82da51ed34a28f7c6b3245163ee962e94572 hs25-b40 +30b5b75c42ac5174b640fbef8aa87527668e8400 jdk8-b98 +2b9946e10587f74ef75ae8145bea484df4a2738b hs25-b41 diff -r 16b10327b00d -r 90d6c221d4e5 agent/src/share/classes/sun/jvm/hotspot/CLHSDB.java --- a/agent/src/share/classes/sun/jvm/hotspot/CLHSDB.java Tue Jul 16 10:55:48 2013 -0400 +++ b/agent/src/share/classes/sun/jvm/hotspot/CLHSDB.java Tue Jul 16 12:20:08 2013 -0400 @@ -31,13 +31,19 @@ import java.util.*; public class CLHSDB { + + public CLHSDB(JVMDebugger d) { + jvmDebugger = d; + } + public static void main(String[] args) { new CLHSDB(args).run(); } - private void run() { - // At this point, if pidText != null we are supposed to attach to it. - // Else, if execPath != null, it is the path of a jdk/bin/java + public void run() { + // If jvmDebugger is already set, we have been given a JVMDebugger. + // Otherwise, if pidText != null we are supposed to attach to it. + // Finally, if execPath != null, it is the path of a jdk/bin/java // and coreFilename is the pathname of a core file we are // supposed to attach to. @@ -49,7 +55,9 @@ } }); - if (pidText != null) { + if (jvmDebugger != null) { + attachDebugger(jvmDebugger); + } else if (pidText != null) { attachDebugger(pidText); } else if (execPath != null) { attachDebugger(execPath, coreFilename); @@ -96,6 +104,7 @@ // Internals only below this point // private HotSpotAgent agent; + private JVMDebugger jvmDebugger; private boolean attached; // These had to be made data members because they are referenced in inner classes. private String pidText; @@ -120,7 +129,7 @@ case (1): if (args[0].equals("help") || args[0].equals("-help")) { doUsage(); - System.exit(0); + return; } // If all numbers, it is a PID to attach to // Else, it is a pathname to a .../bin/java for a core file. @@ -142,10 +151,15 @@ default: System.out.println("HSDB Error: Too many options specified"); doUsage(); - System.exit(1); + return; } } + private void attachDebugger(JVMDebugger d) { + agent.attach(d); + attached = true; + } + /** NOTE we are in a different thread here than either the main thread or the Swing/AWT event handler thread, so we must be very careful when creating or removing widgets */ diff -r 16b10327b00d -r 90d6c221d4e5 agent/src/share/classes/sun/jvm/hotspot/CommandProcessor.java --- a/agent/src/share/classes/sun/jvm/hotspot/CommandProcessor.java Tue Jul 16 10:55:48 2013 -0400 +++ b/agent/src/share/classes/sun/jvm/hotspot/CommandProcessor.java Tue Jul 16 12:20:08 2013 -0400 @@ -101,6 +101,9 @@ import sun.jvm.hotspot.utilities.soql.JSJavaScriptEngine; public class CommandProcessor { + + volatile boolean quit; + public abstract static class DebuggerInterface { public abstract HotSpotAgent getAgent(); public abstract boolean isAttached(); @@ -1135,7 +1138,7 @@ usage(); } else { debugger.detach(); - System.exit(0); + quit = true; } } }, @@ -1714,7 +1717,7 @@ } protected void quit() { debugger.detach(); - System.exit(0); + quit = true; } protected BufferedReader getInputReader() { return in; @@ -1781,7 +1784,7 @@ public void run(boolean prompt) { // Process interactive commands. - while (true) { + while (!quit) { if (prompt) printPrompt(); String ln = null; try { diff -r 16b10327b00d -r 90d6c221d4e5 agent/src/share/classes/sun/jvm/hotspot/HSDB.java --- a/agent/src/share/classes/sun/jvm/hotspot/HSDB.java Tue Jul 16 10:55:48 2013 -0400 +++ b/agent/src/share/classes/sun/jvm/hotspot/HSDB.java Tue Jul 16 12:20:08 2013 -0400 @@ -59,8 +59,11 @@ // Internals only below this point // private HotSpotAgent agent; + private JVMDebugger jvmDebugger; private JDesktopPane desktop; private boolean attached; + private boolean argError; + private JFrame frame; /** List */ private java.util.List attachMenuItems; /** List */ @@ -85,6 +88,11 @@ System.out.println(" path-to-corefile: Debug this corefile. The default is 'core'"); System.out.println(" If no arguments are specified, you can select what to do from the GUI.\n"); HotSpotAgent.showUsage(); + argError = true; + } + + public HSDB(JVMDebugger d) { + jvmDebugger = d; } private HSDB(String[] args) { @@ -95,7 +103,6 @@ case (1): if (args[0].equals("help") || args[0].equals("-help")) { doUsage(); - System.exit(0); } // If all numbers, it is a PID to attach to // Else, it is a pathname to a .../bin/java for a core file. @@ -117,24 +124,29 @@ default: System.out.println("HSDB Error: Too many options specified"); doUsage(); - System.exit(1); } } - private void run() { - // At this point, if pidText != null we are supposed to attach to it. - // Else, if execPath != null, it is the path of a jdk/bin/java - // and coreFilename is the pathname of a core file we are - // supposed to attach to. + // close this tool without calling System.exit + protected void closeUI() { + workerThread.shutdown(); + frame.dispose(); + } + + public void run() { + // Don't start the UI if there were bad arguments. + if (argError) { + return; + } agent = new HotSpotAgent(); workerThread = new WorkerThread(); attachMenuItems = new java.util.ArrayList(); detachMenuItems = new java.util.ArrayList(); - JFrame frame = new JFrame("HSDB - HotSpot Debugger"); + frame = new JFrame("HSDB - HotSpot Debugger"); frame.setSize(800, 600); - frame.setDefaultCloseOperation(WindowConstants.EXIT_ON_CLOSE); + frame.setDefaultCloseOperation(WindowConstants.DISPOSE_ON_CLOSE); JMenuBar menuBar = new JMenuBar(); @@ -197,7 +209,7 @@ item = createMenuItem("Exit", new ActionListener() { public void actionPerformed(ActionEvent e) { - System.exit(0); + closeUI(); } }); item.setAccelerator(KeyStroke.getKeyStroke(KeyEvent.VK_X, ActionEvent.ALT_MASK)); @@ -406,7 +418,15 @@ } }); - if (pidText != null) { + // If jvmDebugger is already set, we have been given a JVMDebugger. + // Otherwise, if pidText != null we are supposed to attach to it. + // Finally, if execPath != null, it is the path of a jdk/bin/java + // and coreFilename is the pathname of a core file we are + // supposed to attach to. + + if (jvmDebugger != null) { + attach(jvmDebugger); + } else if (pidText != null) { attach(pidText); } else if (execPath != null) { attach(execPath, coreFilename); @@ -1113,6 +1133,12 @@ }); } + // Attach to existing JVMDebugger, which should be already attached to a core/process. + private void attach(JVMDebugger d) { + attached = true; + showThreadsDialog(); + } + /** NOTE we are in a different thread here than either the main thread or the Swing/AWT event handler thread, so we must be very careful when creating or removing widgets */ diff -r 16b10327b00d -r 90d6c221d4e5 agent/src/share/classes/sun/jvm/hotspot/HotSpotAgent.java --- a/agent/src/share/classes/sun/jvm/hotspot/HotSpotAgent.java Tue Jul 16 10:55:48 2013 -0400 +++ b/agent/src/share/classes/sun/jvm/hotspot/HotSpotAgent.java Tue Jul 16 12:20:08 2013 -0400 @@ -25,6 +25,8 @@ package sun.jvm.hotspot; import java.rmi.RemoteException; +import java.lang.reflect.Constructor; +import java.lang.reflect.InvocationTargetException; import sun.jvm.hotspot.debugger.Debugger; import sun.jvm.hotspot.debugger.DebuggerException; @@ -63,7 +65,6 @@ private String os; private String cpu; - private String fileSep; // The system can work in several ways: // - Attaching to local process @@ -155,6 +156,14 @@ go(); } + /** This uses a JVMDebugger that is already attached to the core or process */ + public synchronized void attach(JVMDebugger d) + throws DebuggerException { + debugger = d; + isServer = false; + go(); + } + /** This attaches to a "debug server" on a remote machine; this remote server has already attached to a process or opened a core file and is waiting for RMI calls on the Debugger object to @@ -303,28 +312,37 @@ // server, but not client attaching to server) // - try { - os = PlatformInfo.getOS(); - cpu = PlatformInfo.getCPU(); - } - catch (UnsupportedPlatformException e) { - throw new DebuggerException(e); - } - fileSep = System.getProperty("file.separator"); + // Handle existing or alternate JVMDebugger: + // these will set os, cpu independently of our PlatformInfo implementation. + String alternateDebugger = System.getProperty("sa.altDebugger"); + if (debugger != null) { + setupDebuggerExisting(); + + } else if (alternateDebugger != null) { + setupDebuggerAlternate(alternateDebugger); - if (os.equals("solaris")) { - setupDebuggerSolaris(); - } else if (os.equals("win32")) { - setupDebuggerWin32(); - } else if (os.equals("linux")) { - setupDebuggerLinux(); - } else if (os.equals("bsd")) { - setupDebuggerBsd(); - } else if (os.equals("darwin")) { - setupDebuggerDarwin(); } else { - // Add support for more operating systems here - throw new DebuggerException("Operating system " + os + " not yet supported"); + // Otherwise, os, cpu are those of our current platform: + try { + os = PlatformInfo.getOS(); + cpu = PlatformInfo.getCPU(); + } catch (UnsupportedPlatformException e) { + throw new DebuggerException(e); + } + if (os.equals("solaris")) { + setupDebuggerSolaris(); + } else if (os.equals("win32")) { + setupDebuggerWin32(); + } else if (os.equals("linux")) { + setupDebuggerLinux(); + } else if (os.equals("bsd")) { + setupDebuggerBsd(); + } else if (os.equals("darwin")) { + setupDebuggerDarwin(); + } else { + // Add support for more operating systems here + throw new DebuggerException("Operating system " + os + " not yet supported"); + } } if (isServer) { @@ -423,6 +441,41 @@ // OS-specific debugger setup/connect routines // + // Use the existing JVMDebugger, as passed to our constructor. + // Retrieve os and cpu from that debugger, not the current platform. + private void setupDebuggerExisting() { + + os = debugger.getOS(); + cpu = debugger.getCPU(); + setupJVMLibNames(os); + machDesc = debugger.getMachineDescription(); + } + + // Given a classname, load an alternate implementation of JVMDebugger. + private void setupDebuggerAlternate(String alternateName) { + + try { + Class c = Class.forName(alternateName); + Constructor cons = c.getConstructor(); + debugger = (JVMDebugger) cons.newInstance(); + attachDebugger(); + setupDebuggerExisting(); + + } catch (ClassNotFoundException cnfe) { + throw new DebuggerException("Cannot find alternate SA Debugger: '" + alternateName + "'"); + } catch (NoSuchMethodException nsme) { + throw new DebuggerException("Alternate SA Debugger: '" + alternateName + "' has missing constructor."); + } catch (InstantiationException ie) { + throw new DebuggerException("Alternate SA Debugger: '" + alternateName + "' fails to initialise: ", ie); + } catch (IllegalAccessException iae) { + throw new DebuggerException("Alternate SA Debugger: '" + alternateName + "' fails to initialise: ", iae); + } catch (InvocationTargetException iae) { + throw new DebuggerException("Alternate SA Debugger: '" + alternateName + "' fails to initialise: ", iae); + } + + System.err.println("Loaded alternate HotSpot SA Debugger: " + alternateName); + } + // // Solaris // @@ -466,6 +519,11 @@ debugger = new RemoteDebuggerClient(remote); machDesc = ((RemoteDebuggerClient) debugger).getMachineDescription(); os = debugger.getOS(); + setupJVMLibNames(os); + cpu = debugger.getCPU(); + } + + private void setupJVMLibNames(String os) { if (os.equals("solaris")) { setupJVMLibNamesSolaris(); } else if (os.equals("win32")) { @@ -479,8 +537,6 @@ } else { throw new RuntimeException("Unknown OS type"); } - - cpu = debugger.getCPU(); } private void setupJVMLibNamesSolaris() { diff -r 16b10327b00d -r 90d6c221d4e5 agent/src/share/classes/sun/jvm/hotspot/debugger/linux/LinuxAddress.java --- a/agent/src/share/classes/sun/jvm/hotspot/debugger/linux/LinuxAddress.java Tue Jul 16 10:55:48 2013 -0400 +++ b/agent/src/share/classes/sun/jvm/hotspot/debugger/linux/LinuxAddress.java Tue Jul 16 12:20:08 2013 -0400 @@ -26,11 +26,11 @@ import sun.jvm.hotspot.debugger.*; -class LinuxAddress implements Address { +public class LinuxAddress implements Address { protected LinuxDebugger debugger; protected long addr; - LinuxAddress(LinuxDebugger debugger, long addr) { + public LinuxAddress(LinuxDebugger debugger, long addr) { this.debugger = debugger; this.addr = addr; } diff -r 16b10327b00d -r 90d6c221d4e5 agent/src/share/classes/sun/jvm/hotspot/debugger/linux/LinuxOopHandle.java --- a/agent/src/share/classes/sun/jvm/hotspot/debugger/linux/LinuxOopHandle.java Tue Jul 16 10:55:48 2013 -0400 +++ b/agent/src/share/classes/sun/jvm/hotspot/debugger/linux/LinuxOopHandle.java Tue Jul 16 12:20:08 2013 -0400 @@ -26,8 +26,8 @@ import sun.jvm.hotspot.debugger.*; -class LinuxOopHandle extends LinuxAddress implements OopHandle { - LinuxOopHandle(LinuxDebugger debugger, long addr) { +public class LinuxOopHandle extends LinuxAddress implements OopHandle { + public LinuxOopHandle(LinuxDebugger debugger, long addr) { super(debugger, addr); } diff -r 16b10327b00d -r 90d6c221d4e5 agent/src/share/classes/sun/jvm/hotspot/oops/ArrayKlass.java --- a/agent/src/share/classes/sun/jvm/hotspot/oops/ArrayKlass.java Tue Jul 16 10:55:48 2013 -0400 +++ b/agent/src/share/classes/sun/jvm/hotspot/oops/ArrayKlass.java Tue Jul 16 12:20:08 2013 -0400 @@ -49,7 +49,6 @@ higherDimension = new MetadataField(type.getAddressField("_higher_dimension"), 0); lowerDimension = new MetadataField(type.getAddressField("_lower_dimension"), 0); vtableLen = new CIntField(type.getCIntegerField("_vtable_len"), 0); - allocSize = new CIntField(type.getCIntegerField("_alloc_size"), 0); componentMirror = new OopField(type.getOopField("_component_mirror"), 0); javaLangCloneableName = null; javaLangObjectName = null; @@ -64,7 +63,6 @@ private static MetadataField higherDimension; private static MetadataField lowerDimension; private static CIntField vtableLen; - private static CIntField allocSize; private static OopField componentMirror; public Klass getJavaSuper() { @@ -76,7 +74,6 @@ public Klass getHigherDimension() { return (Klass) higherDimension.getValue(this); } public Klass getLowerDimension() { return (Klass) lowerDimension.getValue(this); } public long getVtableLen() { return vtableLen.getValue(this); } - public long getAllocSize() { return allocSize.getValue(this); } public Oop getComponentMirror() { return componentMirror.getValue(this); } // constant class names - javaLangCloneable, javaIoSerializable, javaLangObject @@ -147,7 +144,6 @@ visitor.doMetadata(higherDimension, true); visitor.doMetadata(lowerDimension, true); visitor.doCInt(vtableLen, true); - visitor.doCInt(allocSize, true); visitor.doOop(componentMirror, true); } } diff -r 16b10327b00d -r 90d6c221d4e5 agent/src/share/classes/sun/jvm/hotspot/oops/Klass.java --- a/agent/src/share/classes/sun/jvm/hotspot/oops/Klass.java Tue Jul 16 10:55:48 2013 -0400 +++ b/agent/src/share/classes/sun/jvm/hotspot/oops/Klass.java Tue Jul 16 12:20:08 2013 -0400 @@ -57,7 +57,6 @@ accessFlags = new CIntField(type.getCIntegerField("_access_flags"), 0); subklass = new MetadataField(type.getAddressField("_subklass"), 0); nextSibling = new MetadataField(type.getAddressField("_next_sibling"), 0); - allocCount = new CIntField(type.getCIntegerField("_alloc_count"), 0); LH_INSTANCE_SLOW_PATH_BIT = db.lookupIntConstant("Klass::_lh_instance_slow_path_bit").intValue(); LH_LOG2_ELEMENT_SIZE_SHIFT = db.lookupIntConstant("Klass::_lh_log2_element_size_shift").intValue(); @@ -87,7 +86,6 @@ private static CIntField accessFlags; private static MetadataField subklass; private static MetadataField nextSibling; - private static CIntField allocCount; private Address getValue(AddressField field) { return addr.getAddressAt(field.getOffset()); @@ -108,7 +106,6 @@ public AccessFlags getAccessFlagsObj(){ return new AccessFlags(getAccessFlags()); } public Klass getSubklassKlass() { return (Klass) subklass.getValue(this); } public Klass getNextSiblingKlass() { return (Klass) nextSibling.getValue(this); } - public long getAllocCount() { return allocCount.getValue(this); } // computed access flags - takes care of inner classes etc. // This is closer to actual source level than getAccessFlags() etc. @@ -172,7 +169,6 @@ visitor.doCInt(accessFlags, true); visitor.doMetadata(subklass, true); visitor.doMetadata(nextSibling, true); - visitor.doCInt(allocCount, true); } public long getObjectSize() { diff -r 16b10327b00d -r 90d6c221d4e5 agent/src/share/classes/sun/jvm/hotspot/runtime/VM.java --- a/agent/src/share/classes/sun/jvm/hotspot/runtime/VM.java Tue Jul 16 10:55:48 2013 -0400 +++ b/agent/src/share/classes/sun/jvm/hotspot/runtime/VM.java Tue Jul 16 12:20:08 2013 -0400 @@ -246,7 +246,7 @@ } } - private static final boolean disableDerivedPrinterTableCheck; + private static final boolean disableDerivedPointerTableCheck; private static final Properties saProps; static { @@ -256,12 +256,12 @@ url = VM.class.getClassLoader().getResource("sa.properties"); saProps.load(new BufferedInputStream(url.openStream())); } catch (Exception e) { - throw new RuntimeException("Unable to load properties " + + System.err.println("Unable to load properties " + (url == null ? "null" : url.toString()) + ": " + e.getMessage()); } - disableDerivedPrinterTableCheck = System.getProperty("sun.jvm.hotspot.runtime.VM.disableDerivedPointerTableCheck") != null; + disableDerivedPointerTableCheck = System.getProperty("sun.jvm.hotspot.runtime.VM.disableDerivedPointerTableCheck") != null; } private VM(TypeDataBase db, JVMDebugger debugger, boolean isBigEndian) { @@ -371,7 +371,8 @@ /** This is used by the debugging system */ public static void initialize(TypeDataBase db, JVMDebugger debugger) { if (soleInstance != null) { - throw new RuntimeException("Attempt to initialize VM twice"); + // Using multiple SA Tool classes in the same process creates a call here. + return; } soleInstance = new VM(db, debugger, debugger.getMachineDescription().isBigEndian()); @@ -683,7 +684,7 @@ /** Returns true if C2 derived pointer table should be used, false otherwise */ public boolean useDerivedPointerTable() { - return !disableDerivedPrinterTableCheck; + return !disableDerivedPointerTableCheck; } /** Returns the code cache; should not be used if is core build */ diff -r 16b10327b00d -r 90d6c221d4e5 agent/src/share/classes/sun/jvm/hotspot/tools/ClassLoaderStats.java --- a/agent/src/share/classes/sun/jvm/hotspot/tools/ClassLoaderStats.java Tue Jul 16 10:55:48 2013 -0400 +++ b/agent/src/share/classes/sun/jvm/hotspot/tools/ClassLoaderStats.java Tue Jul 16 12:20:08 2013 -0400 @@ -41,6 +41,14 @@ public class ClassLoaderStats extends Tool { boolean verbose = true; + public ClassLoaderStats() { + super(); + } + + public ClassLoaderStats(JVMDebugger d) { + super(d); + } + public static void main(String[] args) { ClassLoaderStats cls = new ClassLoaderStats(); cls.start(args); diff -r 16b10327b00d -r 90d6c221d4e5 agent/src/share/classes/sun/jvm/hotspot/tools/FinalizerInfo.java --- a/agent/src/share/classes/sun/jvm/hotspot/tools/FinalizerInfo.java Tue Jul 16 10:55:48 2013 -0400 +++ b/agent/src/share/classes/sun/jvm/hotspot/tools/FinalizerInfo.java Tue Jul 16 12:20:08 2013 -0400 @@ -24,6 +24,7 @@ package sun.jvm.hotspot.tools; +import sun.jvm.hotspot.debugger.JVMDebugger; import sun.jvm.hotspot.tools.*; import sun.jvm.hotspot.oops.*; @@ -42,6 +43,15 @@ * summary of these objects in the form of a histogram. */ public class FinalizerInfo extends Tool { + + public FinalizerInfo() { + super(); + } + + public FinalizerInfo(JVMDebugger d) { + super(d); + } + public static void main(String[] args) { FinalizerInfo finfo = new FinalizerInfo(); finfo.start(args); diff -r 16b10327b00d -r 90d6c221d4e5 agent/src/share/classes/sun/jvm/hotspot/tools/FlagDumper.java --- a/agent/src/share/classes/sun/jvm/hotspot/tools/FlagDumper.java Tue Jul 16 10:55:48 2013 -0400 +++ b/agent/src/share/classes/sun/jvm/hotspot/tools/FlagDumper.java Tue Jul 16 12:20:08 2013 -0400 @@ -25,10 +25,19 @@ package sun.jvm.hotspot.tools; import java.io.PrintStream; +import sun.jvm.hotspot.debugger.JVMDebugger; import sun.jvm.hotspot.runtime.*; public class FlagDumper extends Tool { + public FlagDumper() { + super(); + } + + public FlagDumper(JVMDebugger d) { + super(d); + } + public void run() { VM.Flag[] flags = VM.getVM().getCommandLineFlags(); PrintStream out = System.out; diff -r 16b10327b00d -r 90d6c221d4e5 agent/src/share/classes/sun/jvm/hotspot/tools/HeapDumper.java --- a/agent/src/share/classes/sun/jvm/hotspot/tools/HeapDumper.java Tue Jul 16 10:55:48 2013 -0400 +++ b/agent/src/share/classes/sun/jvm/hotspot/tools/HeapDumper.java Tue Jul 16 12:20:08 2013 -0400 @@ -25,6 +25,7 @@ package sun.jvm.hotspot.tools; import sun.jvm.hotspot.utilities.HeapHprofBinWriter; +import sun.jvm.hotspot.debugger.JVMDebugger; import java.io.IOException; /* @@ -42,6 +43,11 @@ this.dumpFile = dumpFile; } + public HeapDumper(String dumpFile, JVMDebugger d) { + super(d); + this.dumpFile = dumpFile; + } + protected void printFlagsUsage() { System.out.println(" \tto dump heap to " + DEFAULT_DUMP_FILE); diff -r 16b10327b00d -r 90d6c221d4e5 agent/src/share/classes/sun/jvm/hotspot/tools/HeapSummary.java --- a/agent/src/share/classes/sun/jvm/hotspot/tools/HeapSummary.java Tue Jul 16 10:55:48 2013 -0400 +++ b/agent/src/share/classes/sun/jvm/hotspot/tools/HeapSummary.java Tue Jul 16 12:20:08 2013 -0400 @@ -29,12 +29,21 @@ import sun.jvm.hotspot.gc_implementation.g1.*; import sun.jvm.hotspot.gc_implementation.parallelScavenge.*; import sun.jvm.hotspot.gc_implementation.shared.*; +import sun.jvm.hotspot.debugger.JVMDebugger; import sun.jvm.hotspot.memory.*; import sun.jvm.hotspot.oops.*; import sun.jvm.hotspot.runtime.*; public class HeapSummary extends Tool { + public HeapSummary() { + super(); + } + + public HeapSummary(JVMDebugger d) { + super(d); + } + public static void main(String[] args) { HeapSummary hs = new HeapSummary(); hs.start(args); diff -r 16b10327b00d -r 90d6c221d4e5 agent/src/share/classes/sun/jvm/hotspot/tools/JInfo.java --- a/agent/src/share/classes/sun/jvm/hotspot/tools/JInfo.java Tue Jul 16 10:55:48 2013 -0400 +++ b/agent/src/share/classes/sun/jvm/hotspot/tools/JInfo.java Tue Jul 16 12:20:08 2013 -0400 @@ -25,12 +25,21 @@ package sun.jvm.hotspot.tools; import sun.jvm.hotspot.runtime.*; +import sun.jvm.hotspot.debugger.JVMDebugger; public class JInfo extends Tool { + public JInfo() { + super(); + } + public JInfo(int m) { mode = m; } + public JInfo(JVMDebugger d) { + super(d); + } + protected boolean needsJavaPrefix() { return false; } diff -r 16b10327b00d -r 90d6c221d4e5 agent/src/share/classes/sun/jvm/hotspot/tools/JMap.java --- a/agent/src/share/classes/sun/jvm/hotspot/tools/JMap.java Tue Jul 16 10:55:48 2013 -0400 +++ b/agent/src/share/classes/sun/jvm/hotspot/tools/JMap.java Tue Jul 16 12:20:08 2013 -0400 @@ -25,6 +25,7 @@ package sun.jvm.hotspot.tools; import java.io.*; +import sun.jvm.hotspot.debugger.JVMDebugger; import sun.jvm.hotspot.utilities.*; public class JMap extends Tool { @@ -36,6 +37,10 @@ this(MODE_PMAP); } + public JMap(JVMDebugger d) { + super(d); + } + protected boolean needsJavaPrefix() { return false; } diff -r 16b10327b00d -r 90d6c221d4e5 agent/src/share/classes/sun/jvm/hotspot/tools/JSnap.java --- a/agent/src/share/classes/sun/jvm/hotspot/tools/JSnap.java Tue Jul 16 10:55:48 2013 -0400 +++ b/agent/src/share/classes/sun/jvm/hotspot/tools/JSnap.java Tue Jul 16 12:20:08 2013 -0400 @@ -25,9 +25,19 @@ package sun.jvm.hotspot.tools; import java.io.*; +import sun.jvm.hotspot.debugger.JVMDebugger; import sun.jvm.hotspot.runtime.*; public class JSnap extends Tool { + + public JSnap() { + super(); + } + + public JSnap(JVMDebugger d) { + super(d); + } + public void run() { final PrintStream out = System.out; if (PerfMemory.initialized()) { diff -r 16b10327b00d -r 90d6c221d4e5 agent/src/share/classes/sun/jvm/hotspot/tools/JStack.java --- a/agent/src/share/classes/sun/jvm/hotspot/tools/JStack.java Tue Jul 16 10:55:48 2013 -0400 +++ b/agent/src/share/classes/sun/jvm/hotspot/tools/JStack.java Tue Jul 16 12:20:08 2013 -0400 @@ -24,6 +24,8 @@ package sun.jvm.hotspot.tools; +import sun.jvm.hotspot.debugger.JVMDebugger; + public class JStack extends Tool { public JStack(boolean mixedMode, boolean concurrentLocks) { this.mixedMode = mixedMode; @@ -34,6 +36,10 @@ this(true, true); } + public JStack(JVMDebugger d) { + super(d); + } + protected boolean needsJavaPrefix() { return false; } diff -r 16b10327b00d -r 90d6c221d4e5 agent/src/share/classes/sun/jvm/hotspot/tools/ObjectHistogram.java --- a/agent/src/share/classes/sun/jvm/hotspot/tools/ObjectHistogram.java Tue Jul 16 10:55:48 2013 -0400 +++ b/agent/src/share/classes/sun/jvm/hotspot/tools/ObjectHistogram.java Tue Jul 16 12:20:08 2013 -0400 @@ -33,6 +33,14 @@ an object histogram from a remote or crashed VM. */ public class ObjectHistogram extends Tool { + public ObjectHistogram() { + super(); + } + + public ObjectHistogram(JVMDebugger d) { + super(d); + } + public void run() { run(System.out, System.err); } diff -r 16b10327b00d -r 90d6c221d4e5 agent/src/share/classes/sun/jvm/hotspot/tools/PMap.java --- a/agent/src/share/classes/sun/jvm/hotspot/tools/PMap.java Tue Jul 16 10:55:48 2013 -0400 +++ b/agent/src/share/classes/sun/jvm/hotspot/tools/PMap.java Tue Jul 16 12:20:08 2013 -0400 @@ -31,6 +31,15 @@ import sun.jvm.hotspot.runtime.*; public class PMap extends Tool { + + public PMap() { + super(); + } + + public PMap(JVMDebugger d) { + super(d); + } + public void run() { run(System.out); } diff -r 16b10327b00d -r 90d6c221d4e5 agent/src/share/classes/sun/jvm/hotspot/tools/PStack.java --- a/agent/src/share/classes/sun/jvm/hotspot/tools/PStack.java Tue Jul 16 10:55:48 2013 -0400 +++ b/agent/src/share/classes/sun/jvm/hotspot/tools/PStack.java Tue Jul 16 12:20:08 2013 -0400 @@ -45,6 +45,10 @@ this(true, true); } + public PStack(JVMDebugger d) { + super(d); + } + public void run() { run(System.out); } diff -r 16b10327b00d -r 90d6c221d4e5 agent/src/share/classes/sun/jvm/hotspot/tools/StackTrace.java --- a/agent/src/share/classes/sun/jvm/hotspot/tools/StackTrace.java Tue Jul 16 10:55:48 2013 -0400 +++ b/agent/src/share/classes/sun/jvm/hotspot/tools/StackTrace.java Tue Jul 16 12:20:08 2013 -0400 @@ -45,6 +45,16 @@ run(System.out); } + public StackTrace(JVMDebugger d) { + super(d); + } + + public StackTrace(JVMDebugger d, boolean v, boolean concurrentLocks) { + super(d); + this.verbose = v; + this.concurrentLocks = concurrentLocks; + } + public void run(java.io.PrintStream tty) { // Ready to go with the database... try { diff -r 16b10327b00d -r 90d6c221d4e5 agent/src/share/classes/sun/jvm/hotspot/tools/SysPropsDumper.java --- a/agent/src/share/classes/sun/jvm/hotspot/tools/SysPropsDumper.java Tue Jul 16 10:55:48 2013 -0400 +++ b/agent/src/share/classes/sun/jvm/hotspot/tools/SysPropsDumper.java Tue Jul 16 12:20:08 2013 -0400 @@ -27,10 +27,19 @@ import java.io.PrintStream; import java.util.*; +import sun.jvm.hotspot.debugger.JVMDebugger; import sun.jvm.hotspot.runtime.*; public class SysPropsDumper extends Tool { + public SysPropsDumper() { + super(); + } + + public SysPropsDumper(JVMDebugger d) { + super(d); + } + public void run() { Properties sysProps = VM.getVM().getSystemProperties(); PrintStream out = System.out; diff -r 16b10327b00d -r 90d6c221d4e5 agent/src/share/classes/sun/jvm/hotspot/tools/Tool.java --- a/agent/src/share/classes/sun/jvm/hotspot/tools/Tool.java Tue Jul 16 10:55:48 2013 -0400 +++ b/agent/src/share/classes/sun/jvm/hotspot/tools/Tool.java Tue Jul 16 12:20:08 2013 -0400 @@ -35,6 +35,7 @@ public abstract class Tool implements Runnable { private HotSpotAgent agent; + private JVMDebugger jvmDebugger; private int debugeeType; // debugeeType is one of constants below @@ -42,6 +43,13 @@ protected static final int DEBUGEE_CORE = 1; protected static final int DEBUGEE_REMOTE = 2; + public Tool() { + } + + public Tool(JVMDebugger d) { + jvmDebugger = d; + } + public String getName() { return getClass().getName(); } @@ -90,7 +98,6 @@ protected void usage() { printUsage(); - System.exit(1); } /* @@ -106,13 +113,13 @@ protected void stop() { if (agent != null) { agent.detach(); - System.exit(0); } } protected void start(String[] args) { if ((args.length < 1) || (args.length > 2)) { usage(); + return; } // Attempt to handle -h or -help or some invalid flag @@ -185,13 +192,31 @@ } if (e.getMessage() != null) { err.print(e.getMessage()); + e.printStackTrace(); } err.println(); - System.exit(1); + return; } err.println("Debugger attached successfully."); + startInternal(); + } + // When using an existing JVMDebugger. + public void start() { + + if (jvmDebugger == null) { + throw new RuntimeException("Tool.start() called with no JVMDebugger set."); + } + agent = new HotSpotAgent(); + agent.attach(jvmDebugger); + startInternal(); + } + + // Remains of the start mechanism, common to both start methods. + private void startInternal() { + + PrintStream err = System.err; VM vm = VM.getVM(); if (vm.isCore()) { err.println("Core build detected."); diff -r 16b10327b00d -r 90d6c221d4e5 agent/src/share/classes/sun/jvm/hotspot/tools/jcore/ClassDump.java --- a/agent/src/share/classes/sun/jvm/hotspot/tools/jcore/ClassDump.java Tue Jul 16 10:55:48 2013 -0400 +++ b/agent/src/share/classes/sun/jvm/hotspot/tools/jcore/ClassDump.java Tue Jul 16 12:20:08 2013 -0400 @@ -25,6 +25,7 @@ package sun.jvm.hotspot.tools.jcore; import java.io.*; +import java.lang.reflect.Constructor; import java.util.jar.JarOutputStream; import java.util.jar.JarEntry; import java.util.jar.Manifest; @@ -38,6 +39,16 @@ private ClassFilter classFilter; private String outputDirectory; private JarOutputStream jarStream; + private String pkgList; + + public ClassDump() { + super(); + } + + public ClassDump(JVMDebugger d, String pkgList) { + super(d); + this.pkgList = pkgList; + } public void setClassFilter(ClassFilter cf) { classFilter = cf; @@ -63,6 +74,25 @@ public void run() { // Ready to go with the database... try { + // The name of the filter always comes from a System property. + // If we have a pkgList, pass it, otherwise let the filter read + // its own System property for the list of classes. + String filterClassName = System.getProperty("sun.jvm.hotspot.tools.jcore.filter", + "sun.jvm.hotspot.tools.jcore.PackageNameFilter"); + try { + Class filterClass = Class.forName(filterClassName); + if (pkgList == null) { + classFilter = (ClassFilter) filterClass.newInstance(); + } else { + Constructor con = filterClass.getConstructor(String.class); + classFilter = (ClassFilter) con.newInstance(pkgList); + } + } catch(Exception exp) { + System.err.println("Warning: Can not create class filter!"); + } + + String outputDirectory = System.getProperty("sun.jvm.hotspot.tools.jcore.outputDir", "."); + setOutputDirectory(outputDirectory); // walk through the system dictionary SystemDictionary dict = VM.getVM().getSystemDictionary(); @@ -139,26 +169,8 @@ } public static void main(String[] args) { - // load class filters - ClassFilter classFilter = null; - String filterClassName = System.getProperty("sun.jvm.hotspot.tools.jcore.filter"); - if (filterClassName != null) { - try { - Class filterClass = Class.forName(filterClassName); - classFilter = (ClassFilter) filterClass.newInstance(); - } catch(Exception exp) { - System.err.println("Warning: Can not create class filter!"); - } - } - - String outputDirectory = System.getProperty("sun.jvm.hotspot.tools.jcore.outputDir"); - if (outputDirectory == null) - outputDirectory = "."; - ClassDump cd = new ClassDump(); - cd.setClassFilter(classFilter); - cd.setOutputDirectory(outputDirectory); cd.start(args); cd.stop(); } diff -r 16b10327b00d -r 90d6c221d4e5 agent/src/share/classes/sun/jvm/hotspot/tools/soql/JSDB.java --- a/agent/src/share/classes/sun/jvm/hotspot/tools/soql/JSDB.java Tue Jul 16 10:55:48 2013 -0400 +++ b/agent/src/share/classes/sun/jvm/hotspot/tools/soql/JSDB.java Tue Jul 16 12:20:08 2013 -0400 @@ -24,12 +24,22 @@ package sun.jvm.hotspot.tools.soql; +import sun.jvm.hotspot.debugger.JVMDebugger; import sun.jvm.hotspot.tools.*; import sun.jvm.hotspot.utilities.*; import sun.jvm.hotspot.utilities.soql.*; /** This is command line JavaScript debugger console */ public class JSDB extends Tool { + + public JSDB() { + super(); + } + + public JSDB(JVMDebugger d) { + super(d); + } + public static void main(String[] args) { JSDB jsdb = new JSDB(); jsdb.start(args); diff -r 16b10327b00d -r 90d6c221d4e5 agent/src/share/classes/sun/jvm/hotspot/tools/soql/SOQL.java --- a/agent/src/share/classes/sun/jvm/hotspot/tools/soql/SOQL.java Tue Jul 16 10:55:48 2013 -0400 +++ b/agent/src/share/classes/sun/jvm/hotspot/tools/soql/SOQL.java Tue Jul 16 12:20:08 2013 -0400 @@ -44,6 +44,14 @@ soql.stop(); } + public SOQL() { + super(); + } + + public SOQL(JVMDebugger d) { + super(d); + } + protected SOQLEngine soqlEngine; protected BufferedReader in = new BufferedReader(new InputStreamReader(System.in)); protected PrintStream out = System.out; diff -r 16b10327b00d -r 90d6c221d4e5 make/Makefile --- a/make/Makefile Tue Jul 16 10:55:48 2013 -0400 +++ b/make/Makefile Tue Jul 16 12:20:08 2013 -0400 @@ -486,7 +486,7 @@ JFR_EXISTS=$(shell if [ -d $(HS_ALT_SRC) ]; then echo 1; else echo 0; fi) # export jfr.h ifeq ($JFR_EXISTS,1) -$(EXPORT_INCLUDE_DIR)/%: $(HS_ALT_SRC)/share/vm/jfr/agent/% +$(EXPORT_INCLUDE_DIR)/%: $(HS_ALT_SRC)/share/vm/jfr/% $(install-file) else $(EXPORT_INCLUDE_DIR)/jfr.h: diff -r 16b10327b00d -r 90d6c221d4e5 make/bsd/makefiles/build_vm_def.sh --- a/make/bsd/makefiles/build_vm_def.sh Tue Jul 16 10:55:48 2013 -0400 +++ b/make/bsd/makefiles/build_vm_def.sh Tue Jul 16 12:20:08 2013 -0400 @@ -7,6 +7,6 @@ NM=nm fi -$NM --defined-only $* | awk ' - { if ($3 ~ /^_ZTV/ || $3 ~ /^gHotSpotVM/) print "\t" $3 ";" } +$NM -Uj $* | awk ' + { if ($3 ~ /^_ZTV/ || $3 ~ /^gHotSpotVM/) print "\t" $3 } ' diff -r 16b10327b00d -r 90d6c221d4e5 make/bsd/makefiles/buildtree.make --- a/make/bsd/makefiles/buildtree.make Tue Jul 16 10:55:48 2013 -0400 +++ b/make/bsd/makefiles/buildtree.make Tue Jul 16 12:20:08 2013 -0400 @@ -47,6 +47,7 @@ # flags.make - with macro settings # vm.make - to support making "$(MAKE) -v vm.make" in makefiles # adlc.make - +# trace.make - generate tracing event and type definitions # jvmti.make - generate JVMTI bindings from the spec (JSR-163) # sa.make - generate SA jar file and natives # @@ -119,6 +120,7 @@ $(PLATFORM_DIR)/generated/dependencies \ $(PLATFORM_DIR)/generated/adfiles \ $(PLATFORM_DIR)/generated/jvmtifiles \ + $(PLATFORM_DIR)/generated/tracefiles \ $(PLATFORM_DIR)/generated/dtracefiles TARGETS = debug fastdebug optimized product @@ -128,7 +130,7 @@ BUILDTREE_MAKE = $(GAMMADIR)/make/$(OS_FAMILY)/makefiles/buildtree.make # dtrace.make is used on BSD versions that implement Dtrace (like MacOS X) -BUILDTREE_TARGETS = Makefile flags.make flags_vm.make vm.make adlc.make jvmti.make sa.make dtrace.make +BUILDTREE_TARGETS = Makefile flags.make flags_vm.make vm.make adlc.make jvmti.make trace.make sa.make dtrace.make BUILDTREE_VARS = GAMMADIR=$(GAMMADIR) OS_FAMILY=$(OS_FAMILY) \ SRCARCH=$(SRCARCH) BUILDARCH=$(BUILDARCH) LIBARCH=$(LIBARCH) VARIANT=$(VARIANT) @@ -331,6 +333,16 @@ echo "include \$$(GAMMADIR)/make/$(OS_FAMILY)/makefiles/$(@F)"; \ ) > $@ +trace.make: $(BUILDTREE_MAKE) + @echo Creating $@ ... + $(QUIETLY) ( \ + $(BUILDTREE_COMMENT); \ + echo; \ + echo include flags.make; \ + echo; \ + echo "include \$$(GAMMADIR)/make/$(OS_FAMILY)/makefiles/$(@F)"; \ + ) > $@ + sa.make: $(BUILDTREE_MAKE) @echo Creating $@ ... $(QUIETLY) ( \ diff -r 16b10327b00d -r 90d6c221d4e5 make/bsd/makefiles/gcc.make --- a/make/bsd/makefiles/gcc.make Tue Jul 16 10:55:48 2013 -0400 +++ b/make/bsd/makefiles/gcc.make Tue Jul 16 12:20:08 2013 -0400 @@ -1,5 +1,5 @@ # -# Copyright (c) 1999, 2012, Oracle and/or its affiliates. All rights reserved. +# Copyright (c) 1999, 2013, Oracle and/or its affiliates. All rights reserved. # DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER. # # This code is free software; you can redistribute it and/or modify it @@ -368,8 +368,8 @@ # Standard linker flags LFLAGS += - # Darwin doesn't use ELF and doesn't support version scripts - LDNOMAP = true + # The apple linker has its own variant of mapfiles/version-scripts + MAPFLAG = -Xlinker -exported_symbols_list -Xlinker FILENAME # Use $(SONAMEFLAG:SONAME=soname) to specify the intrinsic name of a shared obj SONAMEFLAG = diff -r 16b10327b00d -r 90d6c221d4e5 make/bsd/makefiles/mapfile-vers-debug --- a/make/bsd/makefiles/mapfile-vers-debug Tue Jul 16 10:55:48 2013 -0400 +++ b/make/bsd/makefiles/mapfile-vers-debug Tue Jul 16 12:20:08 2013 -0400 @@ -1,7 +1,3 @@ -# -# @(#)mapfile-vers-debug 1.18 07/10/25 16:47:35 -# - # # Copyright (c) 2002, 2013, Oracle and/or its affiliates. All rights reserved. # DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER. @@ -23,273 +19,243 @@ # Please contact Oracle, 500 Oracle Parkway, Redwood Shores, CA 94065 USA # or visit www.oracle.com if you need additional information or have any # questions. -# +# # +# Only used for OSX/Darwin builds # Define public interface. - -SUNWprivate_1.1 { - global: - # JNI - JNI_CreateJavaVM; - JNI_GetCreatedJavaVMs; - JNI_GetDefaultJavaVMInitArgs; + # _JNI + _JNI_CreateJavaVM + _JNI_GetCreatedJavaVMs + _JNI_GetDefaultJavaVMInitArgs - # JVM - JVM_Accept; - JVM_ActiveProcessorCount; - JVM_AllocateNewArray; - JVM_AllocateNewObject; - JVM_ArrayCopy; - JVM_AssertionStatusDirectives; - JVM_Available; - JVM_Bind; - JVM_ClassDepth; - JVM_ClassLoaderDepth; - JVM_Clone; - JVM_Close; - JVM_CX8Field; - JVM_CompileClass; - JVM_CompileClasses; - JVM_CompilerCommand; - JVM_Connect; - JVM_ConstantPoolGetClassAt; - JVM_ConstantPoolGetClassAtIfLoaded; - JVM_ConstantPoolGetDoubleAt; - JVM_ConstantPoolGetFieldAt; - JVM_ConstantPoolGetFieldAtIfLoaded; - JVM_ConstantPoolGetFloatAt; - JVM_ConstantPoolGetIntAt; - JVM_ConstantPoolGetLongAt; - JVM_ConstantPoolGetMethodAt; - JVM_ConstantPoolGetMethodAtIfLoaded; - JVM_ConstantPoolGetMemberRefInfoAt; - JVM_ConstantPoolGetSize; - JVM_ConstantPoolGetStringAt; - JVM_ConstantPoolGetUTF8At; - JVM_CountStackFrames; - JVM_CurrentClassLoader; - JVM_CurrentLoadedClass; - JVM_CurrentThread; - JVM_CurrentTimeMillis; - JVM_DefineClass; - JVM_DefineClassWithSource; - JVM_DefineClassWithSourceCond; - JVM_DesiredAssertionStatus; - JVM_DisableCompiler; - JVM_DoPrivileged; - JVM_DTraceGetVersion; - JVM_DTraceActivate; - JVM_DTraceIsProbeEnabled; - JVM_DTraceIsSupported; - JVM_DTraceDispose; - JVM_DumpAllStacks; - JVM_DumpThreads; - JVM_EnableCompiler; - JVM_Exit; - JVM_FillInStackTrace; - JVM_FindClassFromClass; - JVM_FindClassFromClassLoader; - JVM_FindClassFromBootLoader; - JVM_FindLibraryEntry; - JVM_FindLoadedClass; - JVM_FindPrimitiveClass; - JVM_FindSignal; - JVM_FreeMemory; - JVM_GC; - JVM_GetAllThreads; - JVM_GetArrayElement; - JVM_GetArrayLength; - JVM_GetCPClassNameUTF; - JVM_GetCPFieldClassNameUTF; - JVM_GetCPFieldModifiers; - JVM_GetCPFieldNameUTF; - JVM_GetCPFieldSignatureUTF; - JVM_GetCPMethodClassNameUTF; - JVM_GetCPMethodModifiers; - JVM_GetCPMethodNameUTF; - JVM_GetCPMethodSignatureUTF; - JVM_GetCallerClass; - JVM_GetClassAccessFlags; - JVM_GetClassAnnotations; - JVM_GetClassCPEntriesCount; - JVM_GetClassCPTypes; - JVM_GetClassConstantPool; - JVM_GetClassContext; - JVM_GetClassDeclaredConstructors; - JVM_GetClassDeclaredFields; - JVM_GetClassDeclaredMethods; - JVM_GetClassFieldsCount; - JVM_GetClassInterfaces; - JVM_GetClassLoader; - JVM_GetClassMethodsCount; - JVM_GetClassModifiers; - JVM_GetClassName; - JVM_GetClassNameUTF; - JVM_GetClassSignature; - JVM_GetClassSigners; - JVM_GetClassTypeAnnotations; - JVM_GetComponentType; - JVM_GetDeclaredClasses; - JVM_GetDeclaringClass; - JVM_GetEnclosingMethodInfo; - JVM_GetFieldAnnotations; - JVM_GetFieldIxModifiers; - JVM_GetFieldTypeAnnotations; - JVM_GetHostName; - JVM_GetInheritedAccessControlContext; - JVM_GetInterfaceVersion; - JVM_GetLastErrorString; - JVM_GetManagement; - JVM_GetMethodAnnotations; - JVM_GetMethodDefaultAnnotationValue; - JVM_GetMethodIxArgsSize; - JVM_GetMethodIxByteCode; - JVM_GetMethodIxByteCodeLength; - JVM_GetMethodIxExceptionIndexes; - JVM_GetMethodIxExceptionTableEntry; - JVM_GetMethodIxExceptionTableLength; - JVM_GetMethodIxExceptionsCount; - JVM_GetMethodIxLocalsCount; - JVM_GetMethodIxMaxStack; - JVM_GetMethodIxModifiers; - JVM_GetMethodIxNameUTF; - JVM_GetMethodIxSignatureUTF; - JVM_GetMethodParameterAnnotations; - JVM_GetMethodParameters; - JVM_GetMethodTypeAnnotations; - JVM_GetPrimitiveArrayElement; - JVM_GetProtectionDomain; - JVM_GetSockName; - JVM_GetSockOpt; - JVM_GetStackAccessControlContext; - JVM_GetStackTraceDepth; - JVM_GetStackTraceElement; - JVM_GetSystemPackage; - JVM_GetSystemPackages; - JVM_GetThreadStateNames; - JVM_GetThreadStateValues; - JVM_GetVersionInfo; - JVM_Halt; - JVM_HoldsLock; - JVM_IHashCode; - JVM_InitAgentProperties; - JVM_InitProperties; - JVM_InitializeCompiler; - JVM_InitializeSocketLibrary; - JVM_InternString; - JVM_Interrupt; - JVM_InvokeMethod; - JVM_IsArrayClass; - JVM_IsConstructorIx; - JVM_IsInterface; - JVM_IsInterrupted; - JVM_IsNaN; - JVM_IsPrimitiveClass; - JVM_IsSameClassPackage; - JVM_IsSilentCompiler; - JVM_IsSupportedJNIVersion; - JVM_IsThreadAlive; - JVM_IsVMGeneratedMethodIx; - JVM_LatestUserDefinedLoader; - JVM_Listen; - JVM_LoadClass0; - JVM_LoadLibrary; - JVM_Lseek; - JVM_MaxObjectInspectionAge; - JVM_MaxMemory; - JVM_MonitorNotify; - JVM_MonitorNotifyAll; - JVM_MonitorWait; - JVM_NanoTime; - JVM_NativePath; - JVM_NewArray; - JVM_NewInstanceFromConstructor; - JVM_NewMultiArray; - JVM_OnExit; - JVM_Open; - JVM_RaiseSignal; - JVM_RawMonitorCreate; - JVM_RawMonitorDestroy; - JVM_RawMonitorEnter; - JVM_RawMonitorExit; - JVM_Read; - JVM_Recv; - JVM_RecvFrom; - JVM_RegisterSignal; - JVM_ReleaseUTF; - JVM_ResolveClass; - JVM_ResumeThread; - JVM_Send; - JVM_SendTo; - JVM_SetArrayElement; - JVM_SetClassSigners; - JVM_SetLength; - JVM_SetPrimitiveArrayElement; - JVM_SetProtectionDomain; - JVM_SetSockOpt; - JVM_SetThreadPriority; - JVM_Sleep; - JVM_Socket; - JVM_SocketAvailable; - JVM_SocketClose; - JVM_SocketShutdown; - JVM_StartThread; - JVM_StopThread; - JVM_SuspendThread; - JVM_SupportsCX8; - JVM_Sync; - JVM_Timeout; - JVM_TotalMemory; - JVM_TraceInstructions; - JVM_TraceMethodCalls; - JVM_UnloadLibrary; - JVM_Write; - JVM_Yield; - JVM_handle_bsd_signal; + # _JVM + _JVM_Accept + _JVM_ActiveProcessorCount + _JVM_AllocateNewArray + _JVM_AllocateNewObject + _JVM_ArrayCopy + _JVM_AssertionStatusDirectives + _JVM_Available + _JVM_Bind + _JVM_ClassDepth + _JVM_ClassLoaderDepth + _JVM_Clone + _JVM_Close + _JVM_CX8Field + _JVM_CompileClass + _JVM_CompileClasses + _JVM_CompilerCommand + _JVM_Connect + _JVM_ConstantPoolGetClassAt + _JVM_ConstantPoolGetClassAtIfLoaded + _JVM_ConstantPoolGetDoubleAt + _JVM_ConstantPoolGetFieldAt + _JVM_ConstantPoolGetFieldAtIfLoaded + _JVM_ConstantPoolGetFloatAt + _JVM_ConstantPoolGetIntAt + _JVM_ConstantPoolGetLongAt + _JVM_ConstantPoolGetMethodAt + _JVM_ConstantPoolGetMethodAtIfLoaded + _JVM_ConstantPoolGetMemberRefInfoAt + _JVM_ConstantPoolGetSize + _JVM_ConstantPoolGetStringAt + _JVM_ConstantPoolGetUTF8At + _JVM_CountStackFrames + _JVM_CurrentClassLoader + _JVM_CurrentLoadedClass + _JVM_CurrentThread + _JVM_CurrentTimeMillis + _JVM_DefineClass + _JVM_DefineClassWithSource + _JVM_DefineClassWithSourceCond + _JVM_DesiredAssertionStatus + _JVM_DisableCompiler + _JVM_DoPrivileged + _JVM_DTraceGetVersion + _JVM_DTraceActivate + _JVM_DTraceIsProbeEnabled + _JVM_DTraceIsSupported + _JVM_DTraceDispose + _JVM_DumpAllStacks + _JVM_DumpThreads + _JVM_EnableCompiler + _JVM_Exit + _JVM_FillInStackTrace + _JVM_FindClassFromClass + _JVM_FindClassFromClassLoader + _JVM_FindClassFromBootLoader + _JVM_FindLibraryEntry + _JVM_FindLoadedClass + _JVM_FindPrimitiveClass + _JVM_FindSignal + _JVM_FreeMemory + _JVM_GC + _JVM_GetAllThreads + _JVM_GetArrayElement + _JVM_GetArrayLength + _JVM_GetCPClassNameUTF + _JVM_GetCPFieldClassNameUTF + _JVM_GetCPFieldModifiers + _JVM_GetCPFieldNameUTF + _JVM_GetCPFieldSignatureUTF + _JVM_GetCPMethodClassNameUTF + _JVM_GetCPMethodModifiers + _JVM_GetCPMethodNameUTF + _JVM_GetCPMethodSignatureUTF + _JVM_GetCallerClass + _JVM_GetClassAccessFlags + _JVM_GetClassAnnotations + _JVM_GetClassCPEntriesCount + _JVM_GetClassCPTypes + _JVM_GetClassConstantPool + _JVM_GetClassContext + _JVM_GetClassDeclaredConstructors + _JVM_GetClassDeclaredFields + _JVM_GetClassDeclaredMethods + _JVM_GetClassFieldsCount + _JVM_GetClassInterfaces + _JVM_GetClassLoader + _JVM_GetClassMethodsCount + _JVM_GetClassModifiers + _JVM_GetClassName + _JVM_GetClassNameUTF + _JVM_GetClassSignature + _JVM_GetClassSigners + _JVM_GetClassTypeAnnotations + _JVM_GetComponentType + _JVM_GetDeclaredClasses + _JVM_GetDeclaringClass + _JVM_GetEnclosingMethodInfo + _JVM_GetFieldAnnotations + _JVM_GetFieldIxModifiers + _JVM_GetFieldTypeAnnotations + _JVM_GetHostName + _JVM_GetInheritedAccessControlContext + _JVM_GetInterfaceVersion + _JVM_GetLastErrorString + _JVM_GetManagement + _JVM_GetMethodAnnotations + _JVM_GetMethodDefaultAnnotationValue + _JVM_GetMethodIxArgsSize + _JVM_GetMethodIxByteCode + _JVM_GetMethodIxByteCodeLength + _JVM_GetMethodIxExceptionIndexes + _JVM_GetMethodIxExceptionTableEntry + _JVM_GetMethodIxExceptionTableLength + _JVM_GetMethodIxExceptionsCount + _JVM_GetMethodIxLocalsCount + _JVM_GetMethodIxMaxStack + _JVM_GetMethodIxModifiers + _JVM_GetMethodIxNameUTF + _JVM_GetMethodIxSignatureUTF + _JVM_GetMethodParameterAnnotations + _JVM_GetMethodParameters + _JVM_GetMethodTypeAnnotations + _JVM_GetPrimitiveArrayElement + _JVM_GetProtectionDomain + _JVM_GetSockName + _JVM_GetSockOpt + _JVM_GetStackAccessControlContext + _JVM_GetStackTraceDepth + _JVM_GetStackTraceElement + _JVM_GetSystemPackage + _JVM_GetSystemPackages + _JVM_GetThreadStateNames + _JVM_GetThreadStateValues + _JVM_GetVersionInfo + _JVM_Halt + _JVM_HoldsLock + _JVM_IHashCode + _JVM_InitAgentProperties + _JVM_InitProperties + _JVM_InitializeCompiler + _JVM_InitializeSocketLibrary + _JVM_InternString + _JVM_Interrupt + _JVM_InvokeMethod + _JVM_IsArrayClass + _JVM_IsConstructorIx + _JVM_IsInterface + _JVM_IsInterrupted + _JVM_IsNaN + _JVM_IsPrimitiveClass + _JVM_IsSameClassPackage + _JVM_IsSilentCompiler + _JVM_IsSupportedJNIVersion + _JVM_IsThreadAlive + _JVM_IsVMGeneratedMethodIx + _JVM_LatestUserDefinedLoader + _JVM_Listen + _JVM_LoadClass0 + _JVM_LoadLibrary + _JVM_Lseek + _JVM_MaxObjectInspectionAge + _JVM_MaxMemory + _JVM_MonitorNotify + _JVM_MonitorNotifyAll + _JVM_MonitorWait + _JVM_NanoTime + _JVM_NativePath + _JVM_NewArray + _JVM_NewInstanceFromConstructor + _JVM_NewMultiArray + _JVM_OnExit + _JVM_Open + _JVM_RaiseSignal + _JVM_RawMonitorCreate + _JVM_RawMonitorDestroy + _JVM_RawMonitorEnter + _JVM_RawMonitorExit + _JVM_Read + _JVM_Recv + _JVM_RecvFrom + _JVM_RegisterSignal + _JVM_ReleaseUTF + _JVM_ResolveClass + _JVM_ResumeThread + _JVM_Send + _JVM_SendTo + _JVM_SetArrayElement + _JVM_SetClassSigners + _JVM_SetLength + _JVM_SetNativeThreadName + _JVM_SetPrimitiveArrayElement + _JVM_SetSockOpt + _JVM_SetThreadPriority + _JVM_Sleep + _JVM_Socket + _JVM_SocketAvailable + _JVM_SocketClose + _JVM_SocketShutdown + _JVM_StartThread + _JVM_StopThread + _JVM_SuspendThread + _JVM_SupportsCX8 + _JVM_Sync + _JVM_Timeout + _JVM_TotalMemory + _JVM_TraceInstructions + _JVM_TraceMethodCalls + _JVM_UnloadLibrary + _JVM_Write + _JVM_Yield + _JVM_handle_bsd_signal - # Old reflection routines - # These do not need to be present in the product build in JDK 1.4 - # but their code has not been removed yet because there will not - # be a substantial code savings until JVM_InvokeMethod and - # JVM_NewInstanceFromConstructor can also be removed; see - # reflectionCompat.hpp. - JVM_GetClassConstructor; - JVM_GetClassConstructors; - JVM_GetClassField; - JVM_GetClassFields; - JVM_GetClassMethod; - JVM_GetClassMethods; - JVM_GetField; - JVM_GetPrimitiveField; - JVM_NewInstance; - JVM_SetField; - JVM_SetPrimitiveField; - - # debug JVM - JVM_AccessVMBooleanFlag; - JVM_AccessVMIntFlag; - JVM_VMBreakPoint; + # debug _JVM + _JVM_AccessVMBooleanFlag + _JVM_AccessVMIntFlag + _JVM_VMBreakPoint # miscellaneous functions - jio_fprintf; - jio_printf; - jio_snprintf; - jio_vfprintf; - jio_vsnprintf; - fork1; - numa_warn; - numa_error; - - # Needed because there is no JVM interface for this. - sysThreadAvailableStackWithSlack; + _jio_fprintf + _jio_printf + _jio_snprintf + _jio_vfprintf + _jio_vsnprintf # This is for Forte Analyzer profiling support. - AsyncGetCallTrace; + _AsyncGetCallTrace # INSERT VTABLE SYMBOLS HERE - local: - *; -}; - diff -r 16b10327b00d -r 90d6c221d4e5 make/bsd/makefiles/mapfile-vers-product --- a/make/bsd/makefiles/mapfile-vers-product Tue Jul 16 10:55:48 2013 -0400 +++ b/make/bsd/makefiles/mapfile-vers-product Tue Jul 16 12:20:08 2013 -0400 @@ -1,7 +1,3 @@ -# -# @(#)mapfile-vers-product 1.19 08/02/12 10:56:37 -# - # # Copyright (c) 2002, 2013, Oracle and/or its affiliates. All rights reserved. # DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER. @@ -23,268 +19,238 @@ # Please contact Oracle, 500 Oracle Parkway, Redwood Shores, CA 94065 USA # or visit www.oracle.com if you need additional information or have any # questions. -# +# # +# Only used for OSX/Darwin builds # Define public interface. - -SUNWprivate_1.1 { - global: - # JNI - JNI_CreateJavaVM; - JNI_GetCreatedJavaVMs; - JNI_GetDefaultJavaVMInitArgs; + # _JNI + _JNI_CreateJavaVM + _JNI_GetCreatedJavaVMs + _JNI_GetDefaultJavaVMInitArgs - # JVM - JVM_Accept; - JVM_ActiveProcessorCount; - JVM_AllocateNewArray; - JVM_AllocateNewObject; - JVM_ArrayCopy; - JVM_AssertionStatusDirectives; - JVM_Available; - JVM_Bind; - JVM_ClassDepth; - JVM_ClassLoaderDepth; - JVM_Clone; - JVM_Close; - JVM_CX8Field; - JVM_CompileClass; - JVM_CompileClasses; - JVM_CompilerCommand; - JVM_Connect; - JVM_ConstantPoolGetClassAt; - JVM_ConstantPoolGetClassAtIfLoaded; - JVM_ConstantPoolGetDoubleAt; - JVM_ConstantPoolGetFieldAt; - JVM_ConstantPoolGetFieldAtIfLoaded; - JVM_ConstantPoolGetFloatAt; - JVM_ConstantPoolGetIntAt; - JVM_ConstantPoolGetLongAt; - JVM_ConstantPoolGetMethodAt; - JVM_ConstantPoolGetMethodAtIfLoaded; - JVM_ConstantPoolGetMemberRefInfoAt; - JVM_ConstantPoolGetSize; - JVM_ConstantPoolGetStringAt; - JVM_ConstantPoolGetUTF8At; - JVM_CountStackFrames; - JVM_CurrentClassLoader; - JVM_CurrentLoadedClass; - JVM_CurrentThread; - JVM_CurrentTimeMillis; - JVM_DefineClass; - JVM_DefineClassWithSource; - JVM_DefineClassWithSourceCond; - JVM_DesiredAssertionStatus; - JVM_DisableCompiler; - JVM_DoPrivileged; - JVM_DTraceGetVersion; - JVM_DTraceActivate; - JVM_DTraceIsProbeEnabled; - JVM_DTraceIsSupported; - JVM_DTraceDispose; - JVM_DumpAllStacks; - JVM_DumpThreads; - JVM_EnableCompiler; - JVM_Exit; - JVM_FillInStackTrace; - JVM_FindClassFromClass; - JVM_FindClassFromClassLoader; - JVM_FindClassFromBootLoader; - JVM_FindLibraryEntry; - JVM_FindLoadedClass; - JVM_FindPrimitiveClass; - JVM_FindSignal; - JVM_FreeMemory; - JVM_GC; - JVM_GetAllThreads; - JVM_GetArrayElement; - JVM_GetArrayLength; - JVM_GetCPClassNameUTF; - JVM_GetCPFieldClassNameUTF; - JVM_GetCPFieldModifiers; - JVM_GetCPFieldNameUTF; - JVM_GetCPFieldSignatureUTF; - JVM_GetCPMethodClassNameUTF; - JVM_GetCPMethodModifiers; - JVM_GetCPMethodNameUTF; - JVM_GetCPMethodSignatureUTF; - JVM_GetCallerClass; - JVM_GetClassAccessFlags; - JVM_GetClassAnnotations; - JVM_GetClassCPEntriesCount; - JVM_GetClassCPTypes; - JVM_GetClassConstantPool; - JVM_GetClassContext; - JVM_GetClassDeclaredConstructors; - JVM_GetClassDeclaredFields; - JVM_GetClassDeclaredMethods; - JVM_GetClassFieldsCount; - JVM_GetClassInterfaces; - JVM_GetClassLoader; - JVM_GetClassMethodsCount; - JVM_GetClassModifiers; - JVM_GetClassName; - JVM_GetClassNameUTF; - JVM_GetClassSignature; - JVM_GetClassSigners; - JVM_GetClassTypeAnnotations; - JVM_GetComponentType; - JVM_GetDeclaredClasses; - JVM_GetDeclaringClass; - JVM_GetEnclosingMethodInfo; - JVM_GetFieldAnnotations; - JVM_GetFieldIxModifiers; - JVM_GetFieldTypeAnnotations; - JVM_GetHostName; - JVM_GetInheritedAccessControlContext; - JVM_GetInterfaceVersion; - JVM_GetLastErrorString; - JVM_GetManagement; - JVM_GetMethodAnnotations; - JVM_GetMethodDefaultAnnotationValue; - JVM_GetMethodIxArgsSize; - JVM_GetMethodIxByteCode; - JVM_GetMethodIxByteCodeLength; - JVM_GetMethodIxExceptionIndexes; - JVM_GetMethodIxExceptionTableEntry; - JVM_GetMethodIxExceptionTableLength; - JVM_GetMethodIxExceptionsCount; - JVM_GetMethodIxLocalsCount; - JVM_GetMethodIxMaxStack; - JVM_GetMethodIxModifiers; - JVM_GetMethodIxNameUTF; - JVM_GetMethodIxSignatureUTF; - JVM_GetMethodParameterAnnotations; - JVM_GetMethodParameters; - JVM_GetMethodTypeAnnotations; - JVM_GetPrimitiveArrayElement; - JVM_GetProtectionDomain; - JVM_GetSockName; - JVM_GetSockOpt; - JVM_GetStackAccessControlContext; - JVM_GetStackTraceDepth; - JVM_GetStackTraceElement; - JVM_GetSystemPackage; - JVM_GetSystemPackages; - JVM_GetThreadStateNames; - JVM_GetThreadStateValues; - JVM_GetVersionInfo; - JVM_Halt; - JVM_HoldsLock; - JVM_IHashCode; - JVM_InitAgentProperties; - JVM_InitProperties; - JVM_InitializeCompiler; - JVM_InitializeSocketLibrary; - JVM_InternString; - JVM_Interrupt; - JVM_InvokeMethod; - JVM_IsArrayClass; - JVM_IsConstructorIx; - JVM_IsInterface; - JVM_IsInterrupted; - JVM_IsNaN; - JVM_IsPrimitiveClass; - JVM_IsSameClassPackage; - JVM_IsSilentCompiler; - JVM_IsSupportedJNIVersion; - JVM_IsThreadAlive; - JVM_IsVMGeneratedMethodIx; - JVM_LatestUserDefinedLoader; - JVM_Listen; - JVM_LoadClass0; - JVM_LoadLibrary; - JVM_Lseek; - JVM_MaxObjectInspectionAge; - JVM_MaxMemory; - JVM_MonitorNotify; - JVM_MonitorNotifyAll; - JVM_MonitorWait; - JVM_NanoTime; - JVM_NativePath; - JVM_NewArray; - JVM_NewInstanceFromConstructor; - JVM_NewMultiArray; - JVM_OnExit; - JVM_Open; - JVM_RaiseSignal; - JVM_RawMonitorCreate; - JVM_RawMonitorDestroy; - JVM_RawMonitorEnter; - JVM_RawMonitorExit; - JVM_Read; - JVM_Recv; - JVM_RecvFrom; - JVM_RegisterSignal; - JVM_ReleaseUTF; - JVM_ResolveClass; - JVM_ResumeThread; - JVM_Send; - JVM_SendTo; - JVM_SetArrayElement; - JVM_SetClassSigners; - JVM_SetLength; - JVM_SetPrimitiveArrayElement; - JVM_SetProtectionDomain; - JVM_SetSockOpt; - JVM_SetThreadPriority; - JVM_Sleep; - JVM_Socket; - JVM_SocketAvailable; - JVM_SocketClose; - JVM_SocketShutdown; - JVM_StartThread; - JVM_StopThread; - JVM_SuspendThread; - JVM_SupportsCX8; - JVM_Sync; - JVM_Timeout; - JVM_TotalMemory; - JVM_TraceInstructions; - JVM_TraceMethodCalls; - JVM_UnloadLibrary; - JVM_Write; - JVM_Yield; - JVM_handle_bsd_signal; - - # Old reflection routines - # These do not need to be present in the product build in JDK 1.4 - # but their code has not been removed yet because there will not - # be a substantial code savings until JVM_InvokeMethod and - # JVM_NewInstanceFromConstructor can also be removed; see - # reflectionCompat.hpp. - JVM_GetClassConstructor; - JVM_GetClassConstructors; - JVM_GetClassField; - JVM_GetClassFields; - JVM_GetClassMethod; - JVM_GetClassMethods; - JVM_GetField; - JVM_GetPrimitiveField; - JVM_NewInstance; - JVM_SetField; - JVM_SetPrimitiveField; + # _JVM + _JVM_Accept + _JVM_ActiveProcessorCount + _JVM_AllocateNewArray + _JVM_AllocateNewObject + _JVM_ArrayCopy + _JVM_AssertionStatusDirectives + _JVM_Available + _JVM_Bind + _JVM_ClassDepth + _JVM_ClassLoaderDepth + _JVM_Clone + _JVM_Close + _JVM_CX8Field + _JVM_CompileClass + _JVM_CompileClasses + _JVM_CompilerCommand + _JVM_Connect + _JVM_ConstantPoolGetClassAt + _JVM_ConstantPoolGetClassAtIfLoaded + _JVM_ConstantPoolGetDoubleAt + _JVM_ConstantPoolGetFieldAt + _JVM_ConstantPoolGetFieldAtIfLoaded + _JVM_ConstantPoolGetFloatAt + _JVM_ConstantPoolGetIntAt + _JVM_ConstantPoolGetLongAt + _JVM_ConstantPoolGetMethodAt + _JVM_ConstantPoolGetMethodAtIfLoaded + _JVM_ConstantPoolGetMemberRefInfoAt + _JVM_ConstantPoolGetSize + _JVM_ConstantPoolGetStringAt + _JVM_ConstantPoolGetUTF8At + _JVM_CountStackFrames + _JVM_CurrentClassLoader + _JVM_CurrentLoadedClass + _JVM_CurrentThread + _JVM_CurrentTimeMillis + _JVM_DefineClass + _JVM_DefineClassWithSource + _JVM_DefineClassWithSourceCond + _JVM_DesiredAssertionStatus + _JVM_DisableCompiler + _JVM_DoPrivileged + _JVM_DTraceGetVersion + _JVM_DTraceActivate + _JVM_DTraceIsProbeEnabled + _JVM_DTraceIsSupported + _JVM_DTraceDispose + _JVM_DumpAllStacks + _JVM_DumpThreads + _JVM_EnableCompiler + _JVM_Exit + _JVM_FillInStackTrace + _JVM_FindClassFromClass + _JVM_FindClassFromClassLoader + _JVM_FindClassFromBootLoader + _JVM_FindLibraryEntry + _JVM_FindLoadedClass + _JVM_FindPrimitiveClass + _JVM_FindSignal + _JVM_FreeMemory + _JVM_GC + _JVM_GetAllThreads + _JVM_GetArrayElement + _JVM_GetArrayLength + _JVM_GetCPClassNameUTF + _JVM_GetCPFieldClassNameUTF + _JVM_GetCPFieldModifiers + _JVM_GetCPFieldNameUTF + _JVM_GetCPFieldSignatureUTF + _JVM_GetCPMethodClassNameUTF + _JVM_GetCPMethodModifiers + _JVM_GetCPMethodNameUTF + _JVM_GetCPMethodSignatureUTF + _JVM_GetCallerClass + _JVM_GetClassAccessFlags + _JVM_GetClassAnnotations + _JVM_GetClassCPEntriesCount + _JVM_GetClassCPTypes + _JVM_GetClassConstantPool + _JVM_GetClassContext + _JVM_GetClassDeclaredConstructors + _JVM_GetClassDeclaredFields + _JVM_GetClassDeclaredMethods + _JVM_GetClassFieldsCount + _JVM_GetClassInterfaces + _JVM_GetClassLoader + _JVM_GetClassMethodsCount + _JVM_GetClassModifiers + _JVM_GetClassName + _JVM_GetClassNameUTF + _JVM_GetClassSignature + _JVM_GetClassSigners + _JVM_GetClassTypeAnnotations + _JVM_GetComponentType + _JVM_GetDeclaredClasses + _JVM_GetDeclaringClass + _JVM_GetEnclosingMethodInfo + _JVM_GetFieldAnnotations + _JVM_GetFieldIxModifiers + _JVM_GetFieldTypeAnnotations + _JVM_GetHostName + _JVM_GetInheritedAccessControlContext + _JVM_GetInterfaceVersion + _JVM_GetLastErrorString + _JVM_GetManagement + _JVM_GetMethodAnnotations + _JVM_GetMethodDefaultAnnotationValue + _JVM_GetMethodIxArgsSize + _JVM_GetMethodIxByteCode + _JVM_GetMethodIxByteCodeLength + _JVM_GetMethodIxExceptionIndexes + _JVM_GetMethodIxExceptionTableEntry + _JVM_GetMethodIxExceptionTableLength + _JVM_GetMethodIxExceptionsCount + _JVM_GetMethodIxLocalsCount + _JVM_GetMethodIxMaxStack + _JVM_GetMethodIxModifiers + _JVM_GetMethodIxNameUTF + _JVM_GetMethodIxSignatureUTF + _JVM_GetMethodParameterAnnotations + _JVM_GetMethodParameters + _JVM_GetMethodTypeAnnotations + _JVM_GetPrimitiveArrayElement + _JVM_GetProtectionDomain + _JVM_GetSockName + _JVM_GetSockOpt + _JVM_GetStackAccessControlContext + _JVM_GetStackTraceDepth + _JVM_GetStackTraceElement + _JVM_GetSystemPackage + _JVM_GetSystemPackages + _JVM_GetThreadStateNames + _JVM_GetThreadStateValues + _JVM_GetVersionInfo + _JVM_Halt + _JVM_HoldsLock + _JVM_IHashCode + _JVM_InitAgentProperties + _JVM_InitProperties + _JVM_InitializeCompiler + _JVM_InitializeSocketLibrary + _JVM_InternString + _JVM_Interrupt + _JVM_InvokeMethod + _JVM_IsArrayClass + _JVM_IsConstructorIx + _JVM_IsInterface + _JVM_IsInterrupted + _JVM_IsNaN + _JVM_IsPrimitiveClass + _JVM_IsSameClassPackage + _JVM_IsSilentCompiler + _JVM_IsSupportedJNIVersion + _JVM_IsThreadAlive + _JVM_IsVMGeneratedMethodIx + _JVM_LatestUserDefinedLoader + _JVM_Listen + _JVM_LoadClass0 + _JVM_LoadLibrary + _JVM_Lseek + _JVM_MaxObjectInspectionAge + _JVM_MaxMemory + _JVM_MonitorNotify + _JVM_MonitorNotifyAll + _JVM_MonitorWait + _JVM_NanoTime + _JVM_NativePath + _JVM_NewArray + _JVM_NewInstanceFromConstructor + _JVM_NewMultiArray + _JVM_OnExit + _JVM_Open + _JVM_RaiseSignal + _JVM_RawMonitorCreate + _JVM_RawMonitorDestroy + _JVM_RawMonitorEnter + _JVM_RawMonitorExit + _JVM_Read + _JVM_Recv + _JVM_RecvFrom + _JVM_RegisterSignal + _JVM_ReleaseUTF + _JVM_ResolveClass + _JVM_ResumeThread + _JVM_Send + _JVM_SendTo + _JVM_SetArrayElement + _JVM_SetClassSigners + _JVM_SetLength + _JVM_SetNativeThreadName + _JVM_SetPrimitiveArrayElement + _JVM_SetSockOpt + _JVM_SetThreadPriority + _JVM_Sleep + _JVM_Socket + _JVM_SocketAvailable + _JVM_SocketClose + _JVM_SocketShutdown + _JVM_StartThread + _JVM_StopThread + _JVM_SuspendThread + _JVM_SupportsCX8 + _JVM_Sync + _JVM_Timeout + _JVM_TotalMemory + _JVM_TraceInstructions + _JVM_TraceMethodCalls + _JVM_UnloadLibrary + _JVM_Write + _JVM_Yield + _JVM_handle_bsd_signal # miscellaneous functions - jio_fprintf; - jio_printf; - jio_snprintf; - jio_vfprintf; - jio_vsnprintf; - fork1; - numa_warn; - numa_error; - - # Needed because there is no JVM interface for this. - sysThreadAvailableStackWithSlack; + _jio_fprintf + _jio_printf + _jio_snprintf + _jio_vfprintf + _jio_vsnprintf # This is for Forte Analyzer profiling support. - AsyncGetCallTrace; - - # INSERT VTABLE SYMBOLS HERE + _AsyncGetCallTrace - local: - *; -}; + # INSERT VTABLE SYMBOLS HERE diff -r 16b10327b00d -r 90d6c221d4e5 make/bsd/makefiles/minimal1.make --- a/make/bsd/makefiles/minimal1.make Tue Jul 16 10:55:48 2013 -0400 +++ b/make/bsd/makefiles/minimal1.make Tue Jul 16 12:20:08 2013 -0400 @@ -19,7 +19,7 @@ # Please contact Oracle, 500 Oracle Parkway, Redwood Shores, CA 94065 USA # or visit www.oracle.com if you need additional information or have any # questions. -# +# # TYPE=MINIMAL1 @@ -32,6 +32,7 @@ INCLUDE_MANAGEMENT ?= false INCLUDE_ALL_GCS ?= false INCLUDE_NMT ?= false +INCLUDE_TRACE ?= false INCLUDE_CDS ?= false CXXFLAGS += -DMINIMAL_JVM -DCOMPILER1 -DVMTYPE=\"Minimal\" diff -r 16b10327b00d -r 90d6c221d4e5 make/bsd/makefiles/top.make --- a/make/bsd/makefiles/top.make Tue Jul 16 10:55:48 2013 -0400 +++ b/make/bsd/makefiles/top.make Tue Jul 16 12:20:08 2013 -0400 @@ -1,5 +1,5 @@ # -# Copyright (c) 1999, 2012, Oracle and/or its affiliates. All rights reserved. +# Copyright (c) 1999, 2013, Oracle and/or its affiliates. All rights reserved. # DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER. # # This code is free software; you can redistribute it and/or modify it @@ -80,7 +80,7 @@ @echo All done. # This is an explicit dependency for the sake of parallel makes. -vm_build_preliminaries: checks $(Cached_plat) $(AD_Files_If_Required) jvmti_stuff sa_stuff dtrace_stuff +vm_build_preliminaries: checks $(Cached_plat) $(AD_Files_If_Required) jvmti_stuff trace_stuff sa_stuff dtrace_stuff @# We need a null action here, so implicit rules don't get consulted. $(Cached_plat): $(Plat_File) @@ -94,6 +94,10 @@ jvmti_stuff: $(Cached_plat) $(adjust-mflags) @$(MAKE) -f jvmti.make $(MFLAGS-adjusted) +# generate trace files +trace_stuff: jvmti_stuff $(Cached_plat) $(adjust-mflags) + @$(MAKE) -f trace.make $(MFLAGS-adjusted) + ifeq ($(OS_VENDOR), Darwin) # generate dtrace header files dtrace_stuff: $(Cached_plat) $(adjust-mflags) diff -r 16b10327b00d -r 90d6c221d4e5 make/bsd/makefiles/trace.make --- /dev/null Thu Jan 01 00:00:00 1970 +0000 +++ b/make/bsd/makefiles/trace.make Tue Jul 16 12:20:08 2013 -0400 @@ -0,0 +1,121 @@ +# +# Copyright (c) 2003, 2013, Oracle and/or its affiliates. All rights reserved. +# DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER. +# +# This code is free software; you can redistribute it and/or modify it +# under the terms of the GNU General Public License version 2 only, as +# published by the Free Software Foundation. +# +# This code is distributed in the hope that it will be useful, but WITHOUT +# ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or +# FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License +# version 2 for more details (a copy is included in the LICENSE file that +# accompanied this code). +# +# You should have received a copy of the GNU General Public License version +# 2 along with this work; if not, write to the Free Software Foundation, +# Inc., 51 Franklin St, Fifth Floor, Boston, MA 02110-1301 USA. +# +# Please contact Oracle, 500 Oracle Parkway, Redwood Shores, CA 94065 USA +# or visit www.oracle.com if you need additional information or have any +# questions. +# +# + +# This makefile (trace.make) is included from the trace.make in the +# build directories. +# +# It knows how to build and run the tools to generate trace files. + +include $(GAMMADIR)/make/bsd/makefiles/rules.make +include $(GAMMADIR)/make/altsrc.make + +# ######################################################################### + +HAS_ALT_SRC:=$(shell if [ -d $(HS_ALT_SRC)/share/vm/trace ]; then \ + echo "true"; else echo "false";\ + fi) + +TOPDIR = $(shell echo `pwd`) +GENERATED = $(TOPDIR)/../generated +JvmtiOutDir = $(GENERATED)/jvmtifiles +TraceOutDir = $(GENERATED)/tracefiles + +TraceAltSrcDir = $(HS_ALT_SRC)/share/vm/trace +TraceSrcDir = $(HS_COMMON_SRC)/share/vm/trace + +# set VPATH so make knows where to look for source files +Src_Dirs_V += $(TraceSrcDir) $(TraceAltSrcDir) +VPATH += $(Src_Dirs_V:%=%:) + +TraceGeneratedNames = \ + traceEventClasses.hpp \ + traceEventIds.hpp \ + traceTypes.hpp + +ifeq ($(HAS_ALT_SRC), true) +TraceGeneratedNames += \ + traceRequestables.hpp \ + traceEventControl.hpp + +ifneq ($(INCLUDE_TRACE), false) +TraceGeneratedNames += traceProducer.cpp +endif + +endif + + +TraceGeneratedFiles = $(TraceGeneratedNames:%=$(TraceOutDir)/%) + +XSLT = $(REMOTE) $(RUN.JAVA) -classpath $(JvmtiOutDir) jvmtiGen + +XML_DEPS = $(TraceSrcDir)/trace.xml $(TraceSrcDir)/tracetypes.xml \ + $(TraceSrcDir)/trace.dtd $(TraceSrcDir)/xinclude.mod +ifeq ($(HAS_ALT_SRC), true) + XML_DEPS += $(TraceAltSrcDir)/traceevents.xml +endif + +.PHONY: all clean cleanall + +# ######################################################################### + +all: $(TraceGeneratedFiles) + +GENERATE_CODE= \ + $(QUIETLY) echo Generating $@; \ + $(XSLT) -IN $(word 1,$^) -XSL $(word 2,$^) -OUT $@; \ + test -f $@ + +$(TraceOutDir)/traceEventIds.hpp: $(TraceSrcDir)/trace.xml $(TraceSrcDir)/traceEventIds.xsl $(XML_DEPS) + $(GENERATE_CODE) + +$(TraceOutDir)/traceTypes.hpp: $(TraceSrcDir)/trace.xml $(TraceSrcDir)/traceTypes.xsl $(XML_DEPS) + $(GENERATE_CODE) + +ifeq ($(HAS_ALT_SRC), false) + +$(TraceOutDir)/traceEventClasses.hpp: $(TraceSrcDir)/trace.xml $(TraceSrcDir)/traceEventClasses.xsl $(XML_DEPS) + $(GENERATE_CODE) + +else + +$(TraceOutDir)/traceEventClasses.hpp: $(TraceSrcDir)/trace.xml $(TraceAltSrcDir)/traceEventClasses.xsl $(XML_DEPS) + $(GENERATE_CODE) + +$(TraceOutDir)/traceProducer.cpp: $(TraceSrcDir)/trace.xml $(TraceAltSrcDir)/traceProducer.xsl $(XML_DEPS) + $(GENERATE_CODE) + +$(TraceOutDir)/traceRequestables.hpp: $(TraceSrcDir)/trace.xml $(TraceAltSrcDir)/traceRequestables.xsl $(XML_DEPS) + $(GENERATE_CODE) + +$(TraceOutDir)/traceEventControl.hpp: $(TraceSrcDir)/trace.xml $(TraceAltSrcDir)/traceEventControl.xsl $(XML_DEPS) + $(GENERATE_CODE) + +endif + +# ######################################################################### + + +clean cleanall: + rm $(TraceGeneratedFiles) + diff -r 16b10327b00d -r 90d6c221d4e5 make/bsd/makefiles/vm.make --- a/make/bsd/makefiles/vm.make Tue Jul 16 10:55:48 2013 -0400 +++ b/make/bsd/makefiles/vm.make Tue Jul 16 12:20:08 2013 -0400 @@ -1,5 +1,5 @@ # -# Copyright (c) 1999, 2012, Oracle and/or its affiliates. All rights reserved. +# Copyright (c) 1999, 2013, Oracle and/or its affiliates. All rights reserved. # DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER. # # This code is free software; you can redistribute it and/or modify it @@ -19,7 +19,7 @@ # Please contact Oracle, 500 Oracle Parkway, Redwood Shores, CA 94065 USA # or visit www.oracle.com if you need additional information or have any # questions. -# +# # # Rules to build JVM and related libraries, included from vm.make in the build @@ -52,7 +52,7 @@ # Src_Dirs_V is everything in src/share/vm/*, plus the right os/*/vm and cpu/*/vm # The adfiles directory contains ad_.[ch]pp. # The jvmtifiles directory contains jvmti*.[ch]pp -Src_Dirs_V += $(GENERATED)/adfiles $(GENERATED)/jvmtifiles +Src_Dirs_V += $(GENERATED)/adfiles $(GENERATED)/jvmtifiles $(GENERATED)/tracefiles VPATH += $(Src_Dirs_V:%=%:) # set INCLUDES for C preprocessor. @@ -66,7 +66,7 @@ SYMFLAG = endif -# HOTSPOT_RELEASE_VERSION and HOTSPOT_BUILD_VERSION are defined +# HOTSPOT_RELEASE_VERSION and HOTSPOT_BUILD_VERSION are defined # in $(GAMMADIR)/make/defs.make ifeq ($(HOTSPOT_BUILD_VERSION),) BUILD_VERSION = -DHOTSPOT_RELEASE_VERSION="\"$(HOTSPOT_RELEASE_VERSION)\"" @@ -93,7 +93,7 @@ # This is VERY important! The version define must only be supplied to vm_version.o # If not, ccache will not re-use the cache at all, since the version string might contain -# a time and date. +# a time and date. CXXFLAGS/vm_version.o += ${JRE_VERSION} CXXFLAGS/BYFILE = $(CXXFLAGS/$@) @@ -105,10 +105,6 @@ CXXFLAGS += -DDEFAULT_LIBPATH="\"$(DEFAULT_LIBPATH)\"" endif -ifndef JAVASE_EMBEDDED -CFLAGS += -DINCLUDE_TRACE -endif - # CFLAGS_WARN holds compiler options to suppress/enable warnings. CFLAGS += $(CFLAGS_WARN/BYFILE) @@ -165,15 +161,15 @@ SOURCE_PATHS+=$(HS_COMMON_SRC)/cpu/$(Platform_arch)/vm SOURCE_PATHS+=$(HS_COMMON_SRC)/os_cpu/$(Platform_os_arch)/vm -ifndef JAVASE_EMBEDDED -SOURCE_PATHS+=$(shell if [ -d $(HS_ALT_SRC)/share/vm/jfr ]; then \ +CORE_PATHS=$(foreach path,$(SOURCE_PATHS),$(call altsrc,$(path)) $(path)) +CORE_PATHS+=$(GENERATED)/jvmtifiles $(GENERATED)/tracefiles + +ifneq ($(INCLUDE_TRACE), false) +CORE_PATHS+=$(shell if [ -d $(HS_ALT_SRC)/share/vm/jfr ]; then \ find $(HS_ALT_SRC)/share/vm/jfr -type d; \ fi) endif -CORE_PATHS=$(foreach path,$(SOURCE_PATHS),$(call altsrc,$(path)) $(path)) -CORE_PATHS+=$(GENERATED)/jvmtifiles - COMPILER1_PATHS := $(call altsrc,$(HS_COMMON_SRC)/share/vm/c1) COMPILER1_PATHS += $(HS_COMMON_SRC)/share/vm/c1 diff -r 16b10327b00d -r 90d6c221d4e5 make/defs.make --- a/make/defs.make Tue Jul 16 10:55:48 2013 -0400 +++ b/make/defs.make Tue Jul 16 12:20:08 2013 -0400 @@ -1,5 +1,5 @@ # -# Copyright (c) 2006, 2012, Oracle and/or its affiliates. All rights reserved. +# Copyright (c) 2006, 2013, Oracle and/or its affiliates. All rights reserved. # DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER. # # This code is free software; you can redistribute it and/or modify it @@ -19,7 +19,7 @@ # Please contact Oracle, 500 Oracle Parkway, Redwood Shores, CA 94065 USA # or visit www.oracle.com if you need additional information or have any # questions. -# +# # # The common definitions for hotspot builds. @@ -236,7 +236,7 @@ JDK_IMAGE_DIR=$(ALT_JDK_IMAGE_DIR) endif -# The platform dependent defs.make defines platform specific variable such +# The platform dependent defs.make defines platform specific variable such # as ARCH, EXPORT_LIST etc. We must place the include here after BOOTDIR is defined. include $(GAMMADIR)/make/$(OSNAME)/makefiles/defs.make @@ -258,7 +258,7 @@ # LIBARCH - directory name in JDK/JRE # Use uname output for SRCARCH, but deal with platform differences. If ARCH - # is not explicitly listed below, it is treated as x86. + # is not explicitly listed below, it is treated as x86. SRCARCH = $(ARCH/$(filter sparc sparc64 ia64 amd64 x86_64 arm ppc zero,$(ARCH))) ARCH/ = x86 ARCH/sparc = sparc @@ -337,8 +337,5 @@ EXPORT_LIST += $(EXPORT_INCLUDE_DIR)/$(JDK_INCLUDE_SUBDIR)/jni_md.h EXPORT_LIST += $(EXPORT_INCLUDE_DIR)/jmm.h -ifndef JAVASE_EMBEDDED -EXPORT_LIST += $(EXPORT_INCLUDE_DIR)/jfr.h -endif +.PHONY: $(HS_ALT_MAKE)/defs.make -.PHONY: $(HS_ALT_MAKE)/defs.make diff -r 16b10327b00d -r 90d6c221d4e5 make/excludeSrc.make --- a/make/excludeSrc.make Tue Jul 16 10:55:48 2013 -0400 +++ b/make/excludeSrc.make Tue Jul 16 12:20:08 2013 -0400 @@ -19,7 +19,7 @@ # Please contact Oracle, 500 Oracle Parkway, Redwood Shores, CA 94065 USA # or visit www.oracle.com if you need additional information or have any # questions. -# +# # ifeq ($(INCLUDE_JVMTI), false) CXXFLAGS += -DINCLUDE_JVMTI=0 @@ -100,7 +100,7 @@ parCardTableModRefBS.cpp parGCAllocBuffer.cpp parNewGeneration.cpp mutableSpace.cpp \ gSpaceCounters.cpp allocationStats.cpp spaceCounters.cpp gcAdaptivePolicyCounters.cpp \ mutableNUMASpace.cpp immutableSpace.cpp yieldingWorkGroup.cpp -endif +endif ifeq ($(INCLUDE_NMT), false) CXXFLAGS += -DINCLUDE_NMT=0 @@ -110,3 +110,7 @@ memBaseline.cpp memPtr.cpp memRecorder.cpp memReporter.cpp memSnapshot.cpp memTrackWorker.cpp \ memTracker.cpp nmtDCmd.cpp endif + +-include $(HS_ALT_MAKE)/excludeSrc.make + +.PHONY: $(HS_ALT_MAKE)/excludeSrc.make diff -r 16b10327b00d -r 90d6c221d4e5 make/hotspot_version --- a/make/hotspot_version Tue Jul 16 10:55:48 2013 -0400 +++ b/make/hotspot_version Tue Jul 16 12:20:08 2013 -0400 @@ -35,7 +35,7 @@ HS_MAJOR_VER=25 HS_MINOR_VER=0 -HS_BUILD_NUMBER=37 +HS_BUILD_NUMBER=42 JDK_MAJOR_VER=1 JDK_MINOR_VER=8 diff -r 16b10327b00d -r 90d6c221d4e5 make/linux/makefiles/buildtree.make --- a/make/linux/makefiles/buildtree.make Tue Jul 16 10:55:48 2013 -0400 +++ b/make/linux/makefiles/buildtree.make Tue Jul 16 12:20:08 2013 -0400 @@ -47,6 +47,7 @@ # flags.make - with macro settings # vm.make - to support making "$(MAKE) -v vm.make" in makefiles # adlc.make - +# trace.make - generate tracing event and type definitions # jvmti.make - generate JVMTI bindings from the spec (JSR-163) # sa.make - generate SA jar file and natives # @@ -114,7 +115,8 @@ SIMPLE_DIRS = \ $(PLATFORM_DIR)/generated/dependencies \ $(PLATFORM_DIR)/generated/adfiles \ - $(PLATFORM_DIR)/generated/jvmtifiles + $(PLATFORM_DIR)/generated/jvmtifiles \ + $(PLATFORM_DIR)/generated/tracefiles TARGETS = debug fastdebug optimized product SUBMAKE_DIRS = $(addprefix $(PLATFORM_DIR)/,$(TARGETS)) @@ -122,7 +124,7 @@ # For dependencies and recursive makes. BUILDTREE_MAKE = $(GAMMADIR)/make/$(OS_FAMILY)/makefiles/buildtree.make -BUILDTREE_TARGETS = Makefile flags.make flags_vm.make vm.make adlc.make jvmti.make sa.make +BUILDTREE_TARGETS = Makefile flags.make flags_vm.make vm.make adlc.make jvmti.make trace.make sa.make BUILDTREE_VARS = GAMMADIR=$(GAMMADIR) OS_FAMILY=$(OS_FAMILY) \ SRCARCH=$(SRCARCH) BUILDARCH=$(BUILDARCH) LIBARCH=$(LIBARCH) VARIANT=$(VARIANT) @@ -269,6 +271,8 @@ echo && \ echo "HOTSPOT_EXTRA_SYSDEFS\$$(HOTSPOT_EXTRA_SYSDEFS) = $(HOTSPOT_EXTRA_SYSDEFS)" && \ echo "SYSDEFS += \$$(HOTSPOT_EXTRA_SYSDEFS)"; \ + [ -n "$(INCLUDE_TRACE)" ] && \ + echo && echo "INCLUDE_TRACE = $(INCLUDE_TRACE)"; \ echo; \ [ -n "$(SPEC)" ] && \ echo "include $(SPEC)"; \ @@ -337,6 +341,16 @@ echo "include \$$(GAMMADIR)/make/$(OS_FAMILY)/makefiles/$(@F)"; \ ) > $@ +trace.make: $(BUILDTREE_MAKE) + @echo Creating $@ ... + $(QUIETLY) ( \ + $(BUILDTREE_COMMENT); \ + echo; \ + echo include flags.make; \ + echo; \ + echo "include \$$(GAMMADIR)/make/$(OS_FAMILY)/makefiles/$(@F)"; \ + ) > $@ + sa.make: $(BUILDTREE_MAKE) @echo Creating $@ ... $(QUIETLY) ( \ diff -r 16b10327b00d -r 90d6c221d4e5 make/linux/makefiles/gcc.make --- a/make/linux/makefiles/gcc.make Tue Jul 16 10:55:48 2013 -0400 +++ b/make/linux/makefiles/gcc.make Tue Jul 16 12:20:08 2013 -0400 @@ -214,7 +214,7 @@ WARNINGS_ARE_ERRORS += -Wno-return-type -Wno-empty-body endif -WARNING_FLAGS = -Wpointer-arith -Wsign-compare -Wundef -Wunused-function +WARNING_FLAGS = -Wpointer-arith -Wsign-compare -Wundef -Wunused-function -Wunused-value ifeq ($(USE_CLANG),) # Since GCC 4.3, -Wconversion has changed its meanings to warn these implicit @@ -350,9 +350,9 @@ ifeq ($(DEBUG_CFLAGS/$(BUILDARCH)),) ifeq ($(USE_CLANG), true) # Clang doesn't understand -gstabs - OPT_CFLAGS += -g + DEBUG_CFLAGS += -g else - OPT_CFLAGS += -gstabs + DEBUG_CFLAGS += -gstabs endif endif @@ -365,9 +365,9 @@ ifeq ($(FASTDEBUG_CFLAGS/$(BUILDARCH)),) ifeq ($(USE_CLANG), true) # Clang doesn't understand -gstabs - OPT_CFLAGS += -g + FASTDEBUG_CFLAGS += -g else - OPT_CFLAGS += -gstabs + FASTDEBUG_CFLAGS += -gstabs endif endif diff -r 16b10327b00d -r 90d6c221d4e5 make/linux/makefiles/mapfile-vers-debug --- a/make/linux/makefiles/mapfile-vers-debug Tue Jul 16 10:55:48 2013 -0400 +++ b/make/linux/makefiles/mapfile-vers-debug Tue Jul 16 12:20:08 2013 -0400 @@ -223,7 +223,6 @@ JVM_SetLength; JVM_SetNativeThreadName; JVM_SetPrimitiveArrayElement; - JVM_SetProtectionDomain; JVM_SetSockOpt; JVM_SetThreadPriority; JVM_Sleep; diff -r 16b10327b00d -r 90d6c221d4e5 make/linux/makefiles/mapfile-vers-product --- a/make/linux/makefiles/mapfile-vers-product Tue Jul 16 10:55:48 2013 -0400 +++ b/make/linux/makefiles/mapfile-vers-product Tue Jul 16 12:20:08 2013 -0400 @@ -223,7 +223,6 @@ JVM_SetLength; JVM_SetNativeThreadName; JVM_SetPrimitiveArrayElement; - JVM_SetProtectionDomain; JVM_SetSockOpt; JVM_SetThreadPriority; JVM_Sleep; diff -r 16b10327b00d -r 90d6c221d4e5 make/linux/makefiles/minimal1.make --- a/make/linux/makefiles/minimal1.make Tue Jul 16 10:55:48 2013 -0400 +++ b/make/linux/makefiles/minimal1.make Tue Jul 16 12:20:08 2013 -0400 @@ -19,7 +19,7 @@ # Please contact Oracle, 500 Oracle Parkway, Redwood Shores, CA 94065 USA # or visit www.oracle.com if you need additional information or have any # questions. -# +# # TYPE=MINIMAL1 @@ -32,6 +32,7 @@ INCLUDE_MANAGEMENT ?= false INCLUDE_ALL_GCS ?= false INCLUDE_NMT ?= false +INCLUDE_TRACE ?= false INCLUDE_CDS ?= false CXXFLAGS += -DMINIMAL_JVM -DCOMPILER1 -DVMTYPE=\"Minimal\" diff -r 16b10327b00d -r 90d6c221d4e5 make/linux/makefiles/top.make --- a/make/linux/makefiles/top.make Tue Jul 16 10:55:48 2013 -0400 +++ b/make/linux/makefiles/top.make Tue Jul 16 12:20:08 2013 -0400 @@ -1,5 +1,5 @@ # -# Copyright (c) 1999, 2012, Oracle and/or its affiliates. All rights reserved. +# Copyright (c) 1999, 2013, Oracle and/or its affiliates. All rights reserved. # DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER. # # This code is free software; you can redistribute it and/or modify it @@ -80,7 +80,7 @@ @echo All done. # This is an explicit dependency for the sake of parallel makes. -vm_build_preliminaries: checks $(Cached_plat) $(AD_Files_If_Required) jvmti_stuff sa_stuff +vm_build_preliminaries: checks $(Cached_plat) $(AD_Files_If_Required) trace_stuff jvmti_stuff sa_stuff @# We need a null action here, so implicit rules don't get consulted. $(Cached_plat): $(Plat_File) @@ -94,6 +94,10 @@ jvmti_stuff: $(Cached_plat) $(adjust-mflags) @$(MAKE) -f jvmti.make $(MFLAGS-adjusted) +# generate trace files +trace_stuff: jvmti_stuff $(Cached_plat) $(adjust-mflags) + @$(MAKE) -f trace.make $(MFLAGS-adjusted) + # generate SA jar files and native header sa_stuff: @$(MAKE) -f sa.make $(MFLAGS-adjusted) diff -r 16b10327b00d -r 90d6c221d4e5 make/linux/makefiles/trace.make --- /dev/null Thu Jan 01 00:00:00 1970 +0000 +++ b/make/linux/makefiles/trace.make Tue Jul 16 12:20:08 2013 -0400 @@ -0,0 +1,120 @@ +# +# Copyright (c) 2003, 2013, Oracle and/or its affiliates. All rights reserved. +# DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER. +# +# This code is free software; you can redistribute it and/or modify it +# under the terms of the GNU General Public License version 2 only, as +# published by the Free Software Foundation. +# +# This code is distributed in the hope that it will be useful, but WITHOUT +# ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or +# FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License +# version 2 for more details (a copy is included in the LICENSE file that +# accompanied this code). +# +# You should have received a copy of the GNU General Public License version +# 2 along with this work; if not, write to the Free Software Foundation, +# Inc., 51 Franklin St, Fifth Floor, Boston, MA 02110-1301 USA. +# +# Please contact Oracle, 500 Oracle Parkway, Redwood Shores, CA 94065 USA +# or visit www.oracle.com if you need additional information or have any +# questions. +# +# + +# This makefile (trace.make) is included from the trace.make in the +# build directories. +# +# It knows how to build and run the tools to generate trace files. + +include $(GAMMADIR)/make/linux/makefiles/rules.make +include $(GAMMADIR)/make/altsrc.make + +# ######################################################################### + +HAS_ALT_SRC:=$(shell if [ -d $(HS_ALT_SRC)/share/vm/trace ]; then \ + echo "true"; else echo "false";\ + fi) + +TOPDIR = $(shell echo `pwd`) +GENERATED = $(TOPDIR)/../generated +JvmtiOutDir = $(GENERATED)/jvmtifiles +TraceOutDir = $(GENERATED)/tracefiles + +TraceAltSrcDir = $(HS_ALT_SRC)/share/vm/trace +TraceSrcDir = $(HS_COMMON_SRC)/share/vm/trace + +# set VPATH so make knows where to look for source files +Src_Dirs_V += $(TraceSrcDir) $(TraceAltSrcDir) +VPATH += $(Src_Dirs_V:%=%:) + +TraceGeneratedNames = \ + traceEventClasses.hpp \ + traceEventIds.hpp \ + traceTypes.hpp + +ifeq ($(HAS_ALT_SRC), true) +TraceGeneratedNames += \ + traceRequestables.hpp \ + traceEventControl.hpp + +ifneq ($(INCLUDE_TRACE), false) +TraceGeneratedNames += traceProducer.cpp +endif + +endif + +TraceGeneratedFiles = $(TraceGeneratedNames:%=$(TraceOutDir)/%) + +XSLT = $(REMOTE) $(RUN.JAVA) -classpath $(JvmtiOutDir) jvmtiGen + +XML_DEPS = $(TraceSrcDir)/trace.xml $(TraceSrcDir)/tracetypes.xml \ + $(TraceSrcDir)/trace.dtd $(TraceSrcDir)/xinclude.mod +ifeq ($(HAS_ALT_SRC), true) + XML_DEPS += $(TraceAltSrcDir)/traceevents.xml +endif + +.PHONY: all clean cleanall + +# ######################################################################### + +all: $(TraceGeneratedFiles) + +GENERATE_CODE= \ + $(QUIETLY) echo Generating $@; \ + $(XSLT) -IN $(word 1,$^) -XSL $(word 2,$^) -OUT $@; \ + test -f $@ + +$(TraceOutDir)/traceEventIds.hpp: $(TraceSrcDir)/trace.xml $(TraceSrcDir)/traceEventIds.xsl $(XML_DEPS) + $(GENERATE_CODE) + +$(TraceOutDir)/traceTypes.hpp: $(TraceSrcDir)/trace.xml $(TraceSrcDir)/traceTypes.xsl $(XML_DEPS) + $(GENERATE_CODE) + +ifeq ($(HAS_ALT_SRC), false) + +$(TraceOutDir)/traceEventClasses.hpp: $(TraceSrcDir)/trace.xml $(TraceSrcDir)/traceEventClasses.xsl $(XML_DEPS) + $(GENERATE_CODE) + +else + +$(TraceOutDir)/traceEventClasses.hpp: $(TraceSrcDir)/trace.xml $(TraceAltSrcDir)/traceEventClasses.xsl $(XML_DEPS) + $(GENERATE_CODE) + +$(TraceOutDir)/traceProducer.cpp: $(TraceSrcDir)/trace.xml $(TraceAltSrcDir)/traceProducer.xsl $(XML_DEPS) + $(GENERATE_CODE) + +$(TraceOutDir)/traceRequestables.hpp: $(TraceSrcDir)/trace.xml $(TraceAltSrcDir)/traceRequestables.xsl $(XML_DEPS) + $(GENERATE_CODE) + +$(TraceOutDir)/traceEventControl.hpp: $(TraceSrcDir)/trace.xml $(TraceAltSrcDir)/traceEventControl.xsl $(XML_DEPS) + $(GENERATE_CODE) + +endif + +# ######################################################################### + +clean cleanall: + rm $(TraceGeneratedFiles) + + diff -r 16b10327b00d -r 90d6c221d4e5 make/linux/makefiles/vm.make --- a/make/linux/makefiles/vm.make Tue Jul 16 10:55:48 2013 -0400 +++ b/make/linux/makefiles/vm.make Tue Jul 16 12:20:08 2013 -0400 @@ -1,5 +1,5 @@ # -# Copyright (c) 1999, 2012, Oracle and/or its affiliates. All rights reserved. +# Copyright (c) 1999, 2013, Oracle and/or its affiliates. All rights reserved. # DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER. # # This code is free software; you can redistribute it and/or modify it @@ -19,7 +19,7 @@ # Please contact Oracle, 500 Oracle Parkway, Redwood Shores, CA 94065 USA # or visit www.oracle.com if you need additional information or have any # questions. -# +# # # Rules to build JVM and related libraries, included from vm.make in the build @@ -53,7 +53,7 @@ # Src_Dirs_V is everything in src/share/vm/*, plus the right os/*/vm and cpu/*/vm # The adfiles directory contains ad_.[ch]pp. # The jvmtifiles directory contains jvmti*.[ch]pp -Src_Dirs_V += $(GENERATED)/adfiles $(GENERATED)/jvmtifiles +Src_Dirs_V += $(GENERATED)/adfiles $(GENERATED)/jvmtifiles $(GENERATED)/tracefiles VPATH += $(Src_Dirs_V:%=%:) # set INCLUDES for C preprocessor. @@ -73,7 +73,7 @@ endif endif -# HOTSPOT_RELEASE_VERSION and HOTSPOT_BUILD_VERSION are defined +# HOTSPOT_RELEASE_VERSION and HOTSPOT_BUILD_VERSION are defined # in $(GAMMADIR)/make/defs.make ifeq ($(HOTSPOT_BUILD_VERSION),) BUILD_VERSION = -DHOTSPOT_RELEASE_VERSION="\"$(HOTSPOT_RELEASE_VERSION)\"" @@ -100,7 +100,7 @@ # This is VERY important! The version define must only be supplied to vm_version.o # If not, ccache will not re-use the cache at all, since the version string might contain -# a time and date. +# a time and date. CXXFLAGS/vm_version.o += ${JRE_VERSION} CXXFLAGS/BYFILE = $(CXXFLAGS/$@) @@ -108,12 +108,10 @@ # File specific flags CXXFLAGS += $(CXXFLAGS/BYFILE) - -ifndef JAVASE_EMBEDDED -ifneq (${ARCH},arm) -CFLAGS += -DINCLUDE_TRACE -endif -endif +# Large File Support +ifneq ($(LP64), 1) +CXXFLAGS/ostream.o += -D_FILE_OFFSET_BITS=64 +endif # ifneq ($(LP64), 1) # CFLAGS_WARN holds compiler options to suppress/enable warnings. CFLAGS += $(CFLAGS_WARN/BYFILE) @@ -159,16 +157,14 @@ SOURCE_PATHS+=$(HS_COMMON_SRC)/cpu/$(Platform_arch)/vm SOURCE_PATHS+=$(HS_COMMON_SRC)/os_cpu/$(Platform_os_arch)/vm -ifndef JAVASE_EMBEDDED -ifneq (${ARCH},arm) -SOURCE_PATHS+=$(shell if [ -d $(HS_ALT_SRC)/share/vm/jfr ]; then \ +CORE_PATHS=$(foreach path,$(SOURCE_PATHS),$(call altsrc,$(path)) $(path)) +CORE_PATHS+=$(GENERATED)/jvmtifiles $(GENERATED)/tracefiles + +ifneq ($(INCLUDE_TRACE), false) +CORE_PATHS+=$(shell if [ -d $(HS_ALT_SRC)/share/vm/jfr ]; then \ find $(HS_ALT_SRC)/share/vm/jfr -type d; \ fi) endif -endif - -CORE_PATHS=$(foreach path,$(SOURCE_PATHS),$(call altsrc,$(path)) $(path)) -CORE_PATHS+=$(GENERATED)/jvmtifiles COMPILER1_PATHS := $(call altsrc,$(HS_COMMON_SRC)/share/vm/c1) COMPILER1_PATHS += $(HS_COMMON_SRC)/share/vm/c1 @@ -317,7 +313,7 @@ # With more recent Redhat releases (or the cutting edge version Fedora), if # SELinux is configured to be enabled, the runtime linker will fail to apply # the text relocation to libjvm.so considering that it is built as a non-PIC -# DSO. To workaround that, we run chcon to libjvm.so after it is built. See +# DSO. To workaround that, we run chcon to libjvm.so after it is built. See # details in bug 6538311. $(LIBJVM): $(LIBJVM.o) $(LIBJVM_MAPFILE) $(LD_SCRIPT) $(QUIETLY) { \ diff -r 16b10327b00d -r 90d6c221d4e5 make/solaris/makefiles/buildtree.make --- a/make/solaris/makefiles/buildtree.make Tue Jul 16 10:55:48 2013 -0400 +++ b/make/solaris/makefiles/buildtree.make Tue Jul 16 12:20:08 2013 -0400 @@ -47,6 +47,7 @@ # flags.make - with macro settings # vm.make - to support making "$(MAKE) -v vm.make" in makefiles # adlc.make - +# trace.make - generate tracing event and type definitions # jvmti.make - generate JVMTI bindings from the spec (JSR-163) # sa.make - generate SA jar file and natives # @@ -107,7 +108,8 @@ SIMPLE_DIRS = \ $(PLATFORM_DIR)/generated/dependencies \ $(PLATFORM_DIR)/generated/adfiles \ - $(PLATFORM_DIR)/generated/jvmtifiles + $(PLATFORM_DIR)/generated/jvmtifiles \ + $(PLATFORM_DIR)/generated/tracefiles TARGETS = debug fastdebug optimized product SUBMAKE_DIRS = $(addprefix $(PLATFORM_DIR)/,$(TARGETS)) @@ -115,7 +117,7 @@ # For dependencies and recursive makes. BUILDTREE_MAKE = $(GAMMADIR)/make/$(OS_FAMILY)/makefiles/buildtree.make -BUILDTREE_TARGETS = Makefile flags.make flags_vm.make vm.make adlc.make jvmti.make sa.make +BUILDTREE_TARGETS = Makefile flags.make flags_vm.make vm.make adlc.make jvmti.make trace.make sa.make BUILDTREE_VARS = GAMMADIR=$(GAMMADIR) OS_FAMILY=$(OS_FAMILY) \ ARCH=$(ARCH) BUILDARCH=$(BUILDARCH) LIBARCH=$(LIBARCH) VARIANT=$(VARIANT) @@ -327,6 +329,16 @@ echo "include \$$(GAMMADIR)/make/$(OS_FAMILY)/makefiles/$(@F)"; \ ) > $@ +trace.make: $(BUILDTREE_MAKE) + @echo Creating $@ ... + $(QUIETLY) ( \ + $(BUILDTREE_COMMENT); \ + echo; \ + echo include flags.make; \ + echo; \ + echo "include \$$(GAMMADIR)/make/$(OS_FAMILY)/makefiles/$(@F)"; \ + ) > $@ + sa.make: $(BUILDTREE_MAKE) @echo Creating $@ ... $(QUIETLY) ( \ diff -r 16b10327b00d -r 90d6c221d4e5 make/solaris/makefiles/mapfile-vers --- a/make/solaris/makefiles/mapfile-vers Tue Jul 16 10:55:48 2013 -0400 +++ b/make/solaris/makefiles/mapfile-vers Tue Jul 16 12:20:08 2013 -0400 @@ -223,7 +223,6 @@ JVM_SetLength; JVM_SetNativeThreadName; JVM_SetPrimitiveArrayElement; - JVM_SetProtectionDomain; JVM_SetSockOpt; JVM_SetThreadPriority; JVM_Sleep; diff -r 16b10327b00d -r 90d6c221d4e5 make/solaris/makefiles/top.make --- a/make/solaris/makefiles/top.make Tue Jul 16 10:55:48 2013 -0400 +++ b/make/solaris/makefiles/top.make Tue Jul 16 12:20:08 2013 -0400 @@ -1,5 +1,5 @@ # -# Copyright (c) 1998, 2012, Oracle and/or its affiliates. All rights reserved. +# Copyright (c) 1998, 2013, Oracle and/or its affiliates. All rights reserved. # DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER. # # This code is free software; you can redistribute it and/or modify it @@ -73,7 +73,7 @@ @echo All done. # This is an explicit dependency for the sake of parallel makes. -vm_build_preliminaries: checks $(Cached_plat) $(AD_Files_If_Required) jvmti_stuff sa_stuff +vm_build_preliminaries: checks $(Cached_plat) $(AD_Files_If_Required) jvmti_stuff trace_stuff sa_stuff @# We need a null action here, so implicit rules don't get consulted. $(Cached_plat): $(Plat_File) @@ -87,6 +87,10 @@ jvmti_stuff: $(Cached_plat) $(adjust-mflags) @$(MAKE) -f jvmti.make $(MFLAGS-adjusted) +# generate trace files +trace_stuff: jvmti_stuff $(Cached_plat) $(adjust-mflags) + @$(MAKE) -f trace.make $(MFLAGS-adjusted) + # generate SA jar files and native header sa_stuff: @$(MAKE) -f sa.make $(MFLAGS-adjusted) @@ -127,5 +131,5 @@ rm -fr $(GENERATED) .PHONY: default vm_build_preliminaries -.PHONY: lists ad_stuff jvmti_stuff sa_stuff the_vm clean realclean +.PHONY: lists ad_stuff jvmti_stuff trace_stuff sa_stuff the_vm clean realclean .PHONY: checks check_os_version install diff -r 16b10327b00d -r 90d6c221d4e5 make/solaris/makefiles/trace.make --- /dev/null Thu Jan 01 00:00:00 1970 +0000 +++ b/make/solaris/makefiles/trace.make Tue Jul 16 12:20:08 2013 -0400 @@ -0,0 +1,116 @@ +# +# Copyright (c) 2003, 2013, Oracle and/or its affiliates. All rights reserved. +# DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER. +# +# This code is free software; you can redistribute it and/or modify it +# under the terms of the GNU General Public License version 2 only, as +# published by the Free Software Foundation. +# +# This code is distributed in the hope that it will be useful, but WITHOUT +# ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or +# FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License +# version 2 for more details (a copy is included in the LICENSE file that +# accompanied this code). +# +# You should have received a copy of the GNU General Public License version +# 2 along with this work; if not, write to the Free Software Foundation, +# Inc., 51 Franklin St, Fifth Floor, Boston, MA 02110-1301 USA. +# +# Please contact Oracle, 500 Oracle Parkway, Redwood Shores, CA 94065 USA +# or visit www.oracle.com if you need additional information or have any +# questions. +# +# + +# This makefile (trace.make) is included from the trace.make in the +# build directories. +# +# It knows how to build and run the tools to generate trace files. + +include $(GAMMADIR)/make/solaris/makefiles/rules.make +include $(GAMMADIR)/make/altsrc.make + +# ######################################################################### + +HAS_ALT_SRC:=$(shell if [ -d $(HS_ALT_SRC)/share/vm/trace ]; then \ + echo "true"; else echo "false";\ + fi) + +TOPDIR = $(shell echo `pwd`) +GENERATED = $(TOPDIR)/../generated +JvmtiOutDir = $(GENERATED)/jvmtifiles +TraceOutDir = $(GENERATED)/tracefiles + +TraceAltSrcDir = $(HS_ALT_SRC)/share/vm/trace +TraceSrcDir = $(HS_COMMON_SRC)/share/vm/trace + +# set VPATH so make knows where to look for source files +Src_Dirs_V += $(TraceSrcDir) $(TraceAltSrcDir) +VPATH += $(Src_Dirs_V:%=%:) + +TraceGeneratedNames = \ + traceEventClasses.hpp \ + traceEventIds.hpp \ + traceTypes.hpp + +ifeq ($(HAS_ALT_SRC), true) +TraceGeneratedNames += \ + traceRequestables.hpp \ + traceEventControl.hpp \ + traceProducer.cpp +endif + +TraceGeneratedFiles = $(TraceGeneratedNames:%=$(TraceOutDir)/%) + +XSLT = $(REMOTE) $(RUN.JAVA) -classpath $(JvmtiOutDir) jvmtiGen + +XML_DEPS = $(TraceSrcDir)/trace.xml $(TraceSrcDir)/tracetypes.xml \ + $(TraceSrcDir)/trace.dtd $(TraceSrcDir)/xinclude.mod +ifeq ($(HAS_ALT_SRC), true) + XML_DEPS += $(TraceAltSrcDir)/traceevents.xml +endif + +.PHONY: all clean cleanall + +# ######################################################################### + +all: $(TraceGeneratedFiles) + +GENERATE_CODE= \ + $(QUIETLY) echo Generating $@; \ + $(XSLT) -IN $(word 1,$^) -XSL $(word 2,$^) -OUT $@; \ + test -f $@ + +$(TraceOutDir)/traceEventIds.hpp: $(TraceSrcDir)/trace.xml $(TraceSrcDir)/traceEventIds.xsl $(XML_DEPS) + $(GENERATE_CODE) + +$(TraceOutDir)/traceTypes.hpp: $(TraceSrcDir)/trace.xml $(TraceSrcDir)/traceTypes.xsl $(XML_DEPS) + $(GENERATE_CODE) + +ifeq ($(HAS_ALT_SRC), false) + +$(TraceOutDir)/traceEventClasses.hpp: $(TraceSrcDir)/trace.xml $(TraceSrcDir)/traceEventClasses.xsl $(XML_DEPS) + $(GENERATE_CODE) + +else + +$(TraceOutDir)/traceEventClasses.hpp: $(TraceSrcDir)/trace.xml $(TraceAltSrcDir)/traceEventClasses.xsl $(XML_DEPS) + $(GENERATE_CODE) + +$(TraceOutDir)/traceProducer.cpp: $(TraceSrcDir)/trace.xml $(TraceAltSrcDir)/traceProducer.xsl $(XML_DEPS) + $(GENERATE_CODE) + +$(TraceOutDir)/traceRequestables.hpp: $(TraceSrcDir)/trace.xml $(TraceAltSrcDir)/traceRequestables.xsl $(XML_DEPS) + $(GENERATE_CODE) + +$(TraceOutDir)/traceEventControl.hpp: $(TraceSrcDir)/trace.xml $(TraceAltSrcDir)/traceEventControl.xsl $(XML_DEPS) + $(GENERATE_CODE) + +endif + +# ######################################################################### + +clean cleanall: + rm $(TraceGeneratedFiles) + + diff -r 16b10327b00d -r 90d6c221d4e5 make/solaris/makefiles/vm.make --- a/make/solaris/makefiles/vm.make Tue Jul 16 10:55:48 2013 -0400 +++ b/make/solaris/makefiles/vm.make Tue Jul 16 12:20:08 2013 -0400 @@ -1,5 +1,5 @@ # -# Copyright (c) 1998, 2012, Oracle and/or its affiliates. All rights reserved. +# Copyright (c) 1998, 2013, Oracle and/or its affiliates. All rights reserved. # DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER. # # This code is free software; you can redistribute it and/or modify it @@ -19,7 +19,7 @@ # Please contact Oracle, 500 Oracle Parkway, Redwood Shores, CA 94065 USA # or visit www.oracle.com if you need additional information or have any # questions. -# +# # # Rules to build JVM and related libraries, included from vm.make in the build @@ -48,7 +48,7 @@ # Src_Dirs_V is everything in src/share/vm/*, plus the right os/*/vm and cpu/*/vm # The adfiles directory contains ad_.[ch]pp. # The jvmtifiles directory contains jvmti*.[ch]pp -Src_Dirs_V += $(GENERATED)/adfiles $(GENERATED)/jvmtifiles +Src_Dirs_V += $(GENERATED)/adfiles $(GENERATED)/jvmtifiles $(GENERATED)/tracefiles VPATH += $(Src_Dirs_V:%=%:) # set INCLUDES for C preprocessor @@ -87,7 +87,7 @@ # This is VERY important! The version define must only be supplied to vm_version.o # If not, ccache will not re-use the cache at all, since the version string might contain -# a time and date. +# a time and date. CXXFLAGS/vm_version.o += ${JRE_VERSION} CXXFLAGS/BYFILE = $(CXXFLAGS/$@) @@ -95,6 +95,10 @@ # File specific flags CXXFLAGS += $(CXXFLAGS/BYFILE) +# Large File Support +ifneq ($(LP64), 1) +CXXFLAGS/ostream.o += -D_FILE_OFFSET_BITS=64 +endif # ifneq ($(LP64), 1) # CFLAGS_WARN holds compiler options to suppress/enable warnings. CFLAGS += $(CFLAGS_WARN) @@ -103,7 +107,7 @@ CFLAGS += $(CFLAGS/NOEX) # Extra flags from gnumake's invocation or environment -CFLAGS += $(EXTRA_CFLAGS) -DINCLUDE_TRACE +CFLAGS += $(EXTRA_CFLAGS) # Math Library (libm.so), do not use -lm. # There might be two versions of libm.so on the build system: @@ -137,9 +141,7 @@ LIBS += -lsocket -lsched -ldl $(LIBM) -lthread -lc -ldemangle endif # sparcWorks -ifeq ("${Platform_arch}", "sparc") LIBS += -lkstat -endif # By default, link the *.o into the library, not the executable. LINK_INTO$(LINK_INTO) = LIBJVM @@ -177,12 +179,14 @@ SOURCE_PATHS+=$(HS_COMMON_SRC)/cpu/$(Platform_arch)/vm SOURCE_PATHS+=$(HS_COMMON_SRC)/os_cpu/$(Platform_os_arch)/vm -SOURCE_PATHS+=$(shell if [ -d $(HS_ALT_SRC)/share/vm/jfr ]; then \ +CORE_PATHS=$(foreach path,$(SOURCE_PATHS),$(call altsrc,$(path)) $(path)) +CORE_PATHS+=$(GENERATED)/jvmtifiles $(GENERATED)/tracefiles + +ifneq ($(INCLUDE_TRACE), false) +CORE_PATHS+=$(shell if [ -d $(HS_ALT_SRC)/share/vm/jfr ]; then \ find $(HS_ALT_SRC)/share/vm/jfr -type d; \ fi) - -CORE_PATHS=$(foreach path,$(SOURCE_PATHS),$(call altsrc,$(path)) $(path)) -CORE_PATHS+=$(GENERATED)/jvmtifiles +endif COMPILER1_PATHS := $(call altsrc,$(HS_COMMON_SRC)/share/vm/c1) COMPILER1_PATHS += $(HS_COMMON_SRC)/share/vm/c1 @@ -287,7 +291,7 @@ LINK_VM = $(LINK_LIB.CXX) endif # making the library: -$(LIBJVM): $(ADD_GNU_DEBUGLINK) $(FIX_EMPTY_SEC_HDR_FLAGS) $(LIBJVM.o) $(LIBJVM_MAPFILE) +$(LIBJVM): $(ADD_GNU_DEBUGLINK) $(FIX_EMPTY_SEC_HDR_FLAGS) $(LIBJVM.o) $(LIBJVM_MAPFILE) ifeq ($(filter -sbfast -xsbfast, $(CFLAGS_BROWSE)),) @echo Linking vm... $(QUIETLY) $(LINK_LIB.CXX/PRE_HOOK) diff -r 16b10327b00d -r 90d6c221d4e5 make/windows/build.make --- a/make/windows/build.make Tue Jul 16 10:55:48 2013 -0400 +++ b/make/windows/build.make Tue Jul 16 12:20:08 2013 -0400 @@ -196,6 +196,12 @@ # End VERSIONINFO parameters +# if hotspot-only build and/or OPENJDK isn't passed down, need to set OPENJDK +!ifndef OPENJDK +!if !exists($(WorkSpace)\src\closed) +OPENJDK=true +!endif +!endif # We don't support SA on ia64, and we can't # build it if we are using a version of Vis Studio @@ -273,6 +279,7 @@ @ echo HS_COMPANY=$(COMPANY_NAME) >> $@ @ echo HS_FILEDESC=$(HS_FILEDESC) >> $@ @ echo HOTSPOT_VM_DISTRO=$(HOTSPOT_VM_DISTRO) >> $@ + @ if "$(OPENJDK)" NEQ "" echo OPENJDK=$(OPENJDK) >> $@ @ echo HS_COPYRIGHT=$(HOTSPOT_VM_COPYRIGHT) >> $@ @ echo HS_NAME=$(PRODUCT_NAME) $(JDK_MKTG_VERSION) >> $@ @ echo HS_BUILD_VER=$(HS_BUILD_VER) >> $@ diff -r 16b10327b00d -r 90d6c221d4e5 make/windows/create_obj_files.sh --- a/make/windows/create_obj_files.sh Tue Jul 16 10:55:48 2013 -0400 +++ b/make/windows/create_obj_files.sh Tue Jul 16 12:20:08 2013 -0400 @@ -71,13 +71,11 @@ BASE_PATHS="${BASE_PATHS} ${COMMONSRC}/${sd}" done -BASE_PATHS="${BASE_PATHS} ${GENERATED}/jvmtifiles" +BASE_PATHS="${BASE_PATHS} ${GENERATED}/jvmtifiles ${GENERATED}/tracefiles" if [ -d "${ALTSRC}/share/vm/jfr" ]; then - BASE_PATHS="${BASE_PATHS} ${ALTSRC}/share/vm/jfr/agent" - BASE_PATHS="${BASE_PATHS} ${ALTSRC}/share/vm/jfr/agent/isolated_deps/util" - BASE_PATHS="${BASE_PATHS} ${ALTSRC}/share/vm/jfr/jvm" - BASE_PATHS="${BASE_PATHS} ${ALTSRC}/share/vm/jfr" + BASE_PATHS="${BASE_PATHS} ${ALTSRC}/share/vm/jfr" + BASE_PATHS="${BASE_PATHS} ${ALTSRC}/share/vm/jfr/buffers" fi BASE_PATHS="${BASE_PATHS} ${COMMONSRC}/share/vm/prims/wbtestmethods" diff -r 16b10327b00d -r 90d6c221d4e5 make/windows/makefiles/generated.make --- a/make/windows/makefiles/generated.make Tue Jul 16 10:55:48 2013 -0400 +++ b/make/windows/makefiles/generated.make Tue Jul 16 12:20:08 2013 -0400 @@ -1,5 +1,5 @@ # -# Copyright (c) 2005, 2010, Oracle and/or its affiliates. All rights reserved. +# Copyright (c) 2005, 2013, Oracle and/or its affiliates. All rights reserved. # DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER. # # This code is free software; you can redistribute it and/or modify it @@ -30,15 +30,19 @@ JvmtiOutDir=jvmtifiles !include $(WorkSpace)/make/windows/makefiles/jvmti.make +# Pick up rules for building trace +TraceOutDir=tracefiles +!include $(WorkSpace)/make/windows/makefiles/trace.make + # Pick up rules for building SA !include $(WorkSpace)/make/windows/makefiles/sa.make AdlcOutDir=adfiles !if ("$(Variant)" == "compiler2") || ("$(Variant)" == "tiered") -default:: $(AdlcOutDir)/ad_$(Platform_arch_model).cpp $(AdlcOutDir)/dfa_$(Platform_arch_model).cpp $(JvmtiGeneratedFiles) buildobjfiles +default:: $(AdlcOutDir)/ad_$(Platform_arch_model).cpp $(AdlcOutDir)/dfa_$(Platform_arch_model).cpp $(JvmtiGeneratedFiles) $(TraceGeneratedFiles) buildobjfiles !else -default:: $(JvmtiGeneratedFiles) buildobjfiles +default:: $(JvmtiGeneratedFiles) $(TraceGeneratedFiles) buildobjfiles !endif buildobjfiles: diff -r 16b10327b00d -r 90d6c221d4e5 make/windows/makefiles/projectcreator.make --- a/make/windows/makefiles/projectcreator.make Tue Jul 16 10:55:48 2013 -0400 +++ b/make/windows/makefiles/projectcreator.make Tue Jul 16 12:20:08 2013 -0400 @@ -19,7 +19,7 @@ # Please contact Oracle, 500 Oracle Parkway, Redwood Shores, CA 94065 USA # or visit www.oracle.com if you need additional information or have any # questions. -# +# # !include $(WorkSpace)/make/windows/makefiles/rules.make @@ -72,7 +72,7 @@ -ignorePath ppc \ -ignorePath zero \ -hidePath .hg - + # This is referenced externally by both the IDE and batch builds ProjectCreatorOptions= @@ -89,7 +89,7 @@ -disablePch bytecodeInterpreter.cpp \ -disablePch bytecodeInterpreterWithChecks.cpp \ -disablePch getThread_windows_$(Platform_arch).cpp \ - -disablePch_compiler2 opcodes.cpp + -disablePch_compiler2 opcodes.cpp # Common options for the IDE builds for core, c1, and c2 ProjectCreatorIDEOptions=\ @@ -115,7 +115,7 @@ -define TARGET_OS_ARCH_windows_x86 \ -define TARGET_OS_FAMILY_windows \ -define TARGET_COMPILER_visCPP \ - -define INCLUDE_TRACE \ + -define INCLUDE_TRACE=1 \ $(ProjectCreatorIncludesPRIVATE) # Add in build-specific options @@ -203,4 +203,12 @@ -additionalFile jvmtiEnter.cpp \ -additionalFile jvmtiEnterTrace.cpp \ -additionalFile jvmti.h \ - -additionalFile bytecodeInterpreterWithChecks.cpp + -additionalFile bytecodeInterpreterWithChecks.cpp \ + -additionalFile traceEventClasses.hpp \ + -additionalFile traceEventIds.hpp \ +!if "$(OPENJDK)" != "true" + -additionalFile traceRequestables.hpp \ + -additionalFile traceEventControl.hpp \ + -additionalFile traceProducer.cpp \ +!endif + -additionalFile traceTypes.hpp diff -r 16b10327b00d -r 90d6c221d4e5 make/windows/makefiles/trace.make --- /dev/null Thu Jan 01 00:00:00 1970 +0000 +++ b/make/windows/makefiles/trace.make Tue Jul 16 12:20:08 2013 -0400 @@ -0,0 +1,121 @@ +# +# Copyright (c) 2003, 2013, Oracle and/or its affiliates. All rights reserved. +# DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER. +# +# This code is free software; you can redistribute it and/or modify it +# under the terms of the GNU General Public License version 2 only, as +# published by the Free Software Foundation. +# +# This code is distributed in the hope that it will be useful, but WITHOUT +# ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or +# FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License +# version 2 for more details (a copy is included in the LICENSE file that +# accompanied this code). +# +# You should have received a copy of the GNU General Public License version +# 2 along with this work; if not, write to the Free Software Foundation, +# Inc., 51 Franklin St, Fifth Floor, Boston, MA 02110-1301 USA. +# +# Please contact Oracle, 500 Oracle Parkway, Redwood Shores, CA 94065 USA +# or visit www.oracle.com if you need additional information or have any +# questions. +# +# + +# This makefile (trace.make) is included from the trace.make in the +# build directories. +# +# It knows how to build and run the tools to generate trace files. + +!include $(WorkSpace)/make/windows/makefiles/rules.make + +# ######################################################################### + + +TraceAltSrcDir = $(WorkSpace)/src/closed/share/vm/trace +TraceSrcDir = $(WorkSpace)/src/share/vm/trace + +TraceGeneratedNames = \ + traceEventClasses.hpp \ + traceEventIds.hpp \ + traceTypes.hpp + + +!if "$(OPENJDK)" != "true" +TraceGeneratedNames = $(TraceGeneratedNames) \ + traceRequestables.hpp \ + traceEventControl.hpp \ + traceProducer.cpp +!endif + + +#Note: TraceGeneratedFiles must be kept in sync with TraceGeneratedNames by hand. +#Should be equivalent to "TraceGeneratedFiles = $(TraceGeneratedNames:%=$(TraceOutDir)/%)" +TraceGeneratedFiles = \ + $(TraceOutDir)/traceEventClasses.hpp \ + $(TraceOutDir)/traceEventIds.hpp \ + $(TraceOutDir)/traceTypes.hpp + +!if "$(OPENJDK)" != "true" +TraceGeneratedFiles = $(TraceGeneratedFiles) \ + $(TraceOutDir)/traceRequestables.hpp \ + $(TraceOutDir)/traceEventControl.hpp \ + $(TraceOutDir)/traceProducer.cpp +!endif + +XSLT = $(QUIETLY) $(REMOTE) $(RUN_JAVA) -classpath $(JvmtiOutDir) jvmtiGen + +XML_DEPS = $(TraceSrcDir)/trace.xml $(TraceSrcDir)/tracetypes.xml \ + $(TraceSrcDir)/trace.dtd $(TraceSrcDir)/xinclude.mod + +!if "$(OPENJDK)" != "true" +XML_DEPS = $(XML_DEPS) $(TraceAltSrcDir)/traceevents.xml +!endif + +.PHONY: all clean cleanall + +# ######################################################################### + +default:: + @if not exist $(TraceOutDir) mkdir $(TraceOutDir) + +$(TraceOutDir)/traceEventIds.hpp: $(TraceSrcDir)/trace.xml $(TraceSrcDir)/traceEventIds.xsl $(XML_DEPS) + @echo Generating $@ + @$(XSLT) -IN $(TraceSrcDir)/trace.xml -XSL $(TraceSrcDir)/traceEventIds.xsl -OUT $(TraceOutDir)/traceEventIds.hpp + +$(TraceOutDir)/traceTypes.hpp: $(TraceSrcDir)/trace.xml $(TraceSrcDir)/traceTypes.xsl $(XML_DEPS) + @echo Generating $@ + @$(XSLT) -IN $(TraceSrcDir)/trace.xml -XSL $(TraceSrcDir)/traceTypes.xsl -OUT $(TraceOutDir)/traceTypes.hpp + +!if "$(OPENJDK)" == "true" + +$(TraceOutDir)/traceEventClasses.hpp: $(TraceSrcDir)/trace.xml $(TraceSrcDir)/traceEventClasses.xsl $(XML_DEPS) + @echo Generating $@ + @$(XSLT) -IN $(TraceSrcDir)/trace.xml -XSL $(TraceSrcDir)/traceEventClasses.xsl -OUT $(TraceOutDir)/traceEventClasses.hpp + +!else + +$(TraceOutDir)/traceEventClasses.hpp: $(TraceSrcDir)/trace.xml $(TraceAltSrcDir)/traceEventClasses.xsl $(XML_DEPS) + @echo Generating $@ + @$(XSLT) -IN $(TraceSrcDir)/trace.xml -XSL $(TraceAltSrcDir)/traceEventClasses.xsl -OUT $(TraceOutDir)/traceEventClasses.hpp + +$(TraceOutDir)/traceProducer.cpp: $(TraceSrcDir)/trace.xml $(TraceAltSrcDir)/traceProducer.xsl $(XML_DEPS) + @echo Generating $@ + @$(XSLT) -IN $(TraceSrcDir)/trace.xml -XSL $(TraceAltSrcDir)/traceProducer.xsl -OUT $(TraceOutDir)/traceProducer.cpp + +$(TraceOutDir)/traceRequestables.hpp: $(TraceSrcDir)/trace.xml $(TraceAltSrcDir)/traceRequestables.xsl $(XML_DEPS) + @echo Generating $@ + @$(XSLT) -IN $(TraceSrcDir)/trace.xml -XSL $(TraceAltSrcDir)/traceRequestables.xsl -OUT $(TraceOutDir)/traceRequestables.hpp + +$(TraceOutDir)/traceEventControl.hpp: $(TraceSrcDir)/trace.xml $(TraceAltSrcDir)/traceEventControl.xsl $(XML_DEPS) + @echo Generating $@ + @$(XSLT) -IN $(TraceSrcDir)/trace.xml -XSL $(TraceAltSrcDir)/traceEventControl.xsl -OUT $(TraceOutDir)/traceEventControl.hpp + +!endif + +# ######################################################################### + +cleanall : + rm $(TraceGeneratedFiles) + + diff -r 16b10327b00d -r 90d6c221d4e5 make/windows/makefiles/vm.make --- a/make/windows/makefiles/vm.make Tue Jul 16 10:55:48 2013 -0400 +++ b/make/windows/makefiles/vm.make Tue Jul 16 12:20:08 2013 -0400 @@ -66,10 +66,6 @@ CXX_FLAGS=$(CXX_FLAGS) /D "HOTSPOT_BUILD_USER=\"$(BuildUser)\"" CXX_FLAGS=$(CXX_FLAGS) /D "HOTSPOT_VM_DISTRO=\"$(HOTSPOT_VM_DISTRO)\"" -!ifndef JAVASE_EMBEDDED -CXX_FLAGS=$(CXX_FLAGS) /D "INCLUDE_TRACE" -!endif - CXX_FLAGS=$(CXX_FLAGS) $(CXX_INCLUDE_DIRS) # Define that so jni.h is on correct side @@ -144,6 +140,7 @@ VM_PATH=../generated VM_PATH=$(VM_PATH);../generated/adfiles VM_PATH=$(VM_PATH);../generated/jvmtifiles +VM_PATH=$(VM_PATH);../generated/tracefiles VM_PATH=$(VM_PATH);$(WorkSpace)/src/share/vm/c1 VM_PATH=$(VM_PATH);$(WorkSpace)/src/share/vm/compiler VM_PATH=$(VM_PATH);$(WorkSpace)/src/share/vm/code @@ -172,10 +169,8 @@ VM_PATH=$(VM_PATH);$(WorkSpace)/src/share/vm/opto !if exists($(ALTSRC)\share\vm\jfr) -VM_PATH=$(VM_PATH);$(ALTSRC)/share/vm/jfr/agent -VM_PATH=$(VM_PATH);$(ALTSRC)/share/vm/jfr/agent/isolated_deps/util -VM_PATH=$(VM_PATH);$(ALTSRC)/share/vm/jfr/jvm VM_PATH=$(VM_PATH);$(ALTSRC)/share/vm/jfr +VM_PATH=$(VM_PATH);$(ALTSRC)/share/vm/jfr/buffers !endif VM_PATH={$(VM_PATH)} @@ -384,16 +379,13 @@ {..\generated\jvmtifiles}.cpp.obj:: $(CXX) $(CXX_FLAGS) $(CXX_USE_PCH) /c $< +{..\generated\tracefiles}.cpp.obj:: + $(CXX) $(CXX_FLAGS) $(CXX_USE_PCH) /c $< + {$(ALTSRC)\share\vm\jfr}.cpp.obj:: $(CXX) $(CXX_FLAGS) $(CXX_USE_PCH) /c $< -{$(ALTSRC)\share\vm\jfr\agent}.cpp.obj:: - $(CXX) $(CXX_FLAGS) $(CXX_USE_PCH) /c $< - -{$(ALTSRC)\share\vm\jfr\agent\isolated_deps\util}.cpp.obj:: - $(CXX) $(CXX_FLAGS) $(CXX_USE_PCH) /c $< - -{$(ALTSRC)\share\vm\jfr\jvm}.cpp.obj:: +{$(ALTSRC)\share\vm\jfr\buffers}.cpp.obj:: $(CXX) $(CXX_FLAGS) $(CXX_USE_PCH) /c $< default:: diff -r 16b10327b00d -r 90d6c221d4e5 make/windows/projectfiles/common/Makefile --- a/make/windows/projectfiles/common/Makefile Tue Jul 16 10:55:48 2013 -0400 +++ b/make/windows/projectfiles/common/Makefile Tue Jul 16 12:20:08 2013 -0400 @@ -1,5 +1,5 @@ # -# Copyright (c) 1999, 2012, Oracle and/or its affiliates. All rights reserved. +# Copyright (c) 1999, 2013, Oracle and/or its affiliates. All rights reserved. # DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER. # # This code is free software; you can redistribute it and/or modify it @@ -45,6 +45,12 @@ !endif !endif +# if hotspot-only build and/or OPENJDK isn't passed down, need to set OPENJDK +!ifndef OPENJDK +!if !exists($(WorkSpace)\src\closed) +OPENJDK=true +!endif +!endif !include $(HOTSPOTWORKSPACE)/make/windows/makefiles/projectcreator.make @@ -54,6 +60,10 @@ JvmtiOutDir=$(HOTSPOTBUILDSPACE)\$(Variant)\generated\jvmtifiles !include $(HOTSPOTWORKSPACE)/make/windows/makefiles/jvmti.make +# Pick up rules for building trace +TraceOutDir=$(HOTSPOTBUILDSPACE)\$(Variant)\generated\tracefiles +!include $(HOTSPOTWORKSPACE)/make/windows/makefiles/trace.make + !if "$(Variant)" == "compiler2" # Pick up rules for building adlc !include $(HOTSPOTWORKSPACE)/make/windows/makefiles/adlc.make @@ -66,7 +76,7 @@ HS_INTERNAL_NAME=jvm -default:: $(AdditionalTargets) $(JvmtiGeneratedFiles) +default:: $(AdditionalTargets) $(JvmtiGeneratedFiles) $(TraceGeneratedFiles) !include $(HOTSPOTWORKSPACE)/make/hotspot_version diff -r 16b10327b00d -r 90d6c221d4e5 src/cpu/sparc/vm/assembler_sparc.hpp --- a/src/cpu/sparc/vm/assembler_sparc.hpp Tue Jul 16 10:55:48 2013 -0400 +++ b/src/cpu/sparc/vm/assembler_sparc.hpp Tue Jul 16 12:20:08 2013 -0400 @@ -57,7 +57,6 @@ fbp_op2 = 5, br_op2 = 2, bp_op2 = 1, - cb_op2 = 7, // V8 sethi_op2 = 4 }; @@ -145,7 +144,6 @@ ldsh_op3 = 0x0a, ldx_op3 = 0x0b, - ldstub_op3 = 0x0d, stx_op3 = 0x0e, swap_op3 = 0x0f, @@ -163,15 +161,6 @@ prefetch_op3 = 0x2d, - - ldc_op3 = 0x30, - ldcsr_op3 = 0x31, - lddc_op3 = 0x33, - stc_op3 = 0x34, - stcsr_op3 = 0x35, - stdcq_op3 = 0x36, - stdc_op3 = 0x37, - casa_op3 = 0x3c, casxa_op3 = 0x3e, @@ -574,17 +563,11 @@ static void vis3_only() { assert( VM_Version::has_vis3(), "This instruction only works on SPARC with VIS3"); } // instruction only in v9 - static void v9_only() { assert( VM_Version::v9_instructions_work(), "This instruction only works on SPARC V9"); } - - // instruction only in v8 - static void v8_only() { assert( VM_Version::v8_instructions_work(), "This instruction only works on SPARC V8"); } + static void v9_only() { } // do nothing // instruction deprecated in v9 static void v9_dep() { } // do nothing for now - // some float instructions only exist for single prec. on v8 - static void v8_s_only(FloatRegisterImpl::Width w) { if (w != FloatRegisterImpl::S) v9_only(); } - // v8 has no CC field static void v8_no_cc(CC cc) { if (cc) v9_only(); } @@ -730,11 +713,6 @@ inline void bp( Condition c, bool a, CC cc, Predict p, address d, relocInfo::relocType rt = relocInfo::none ); inline void bp( Condition c, bool a, CC cc, Predict p, Label& L ); - // pp 121 (V8) - - inline void cb( Condition c, bool a, address d, relocInfo::relocType rt = relocInfo::none ); - inline void cb( Condition c, bool a, Label& L ); - // pp 149 inline void call( address d, relocInfo::relocType rt = relocInfo::runtime_call_type ); @@ -775,8 +753,8 @@ // pp 157 - void fcmp( FloatRegisterImpl::Width w, CC cc, FloatRegister s1, FloatRegister s2) { v8_no_cc(cc); emit_int32( op(arith_op) | cmpcc(cc) | op3(fpop2_op3) | fs1(s1, w) | opf(0x50 + w) | fs2(s2, w)); } - void fcmpe( FloatRegisterImpl::Width w, CC cc, FloatRegister s1, FloatRegister s2) { v8_no_cc(cc); emit_int32( op(arith_op) | cmpcc(cc) | op3(fpop2_op3) | fs1(s1, w) | opf(0x54 + w) | fs2(s2, w)); } + void fcmp( FloatRegisterImpl::Width w, CC cc, FloatRegister s1, FloatRegister s2) { emit_int32( op(arith_op) | cmpcc(cc) | op3(fpop2_op3) | fs1(s1, w) | opf(0x50 + w) | fs2(s2, w)); } + void fcmpe( FloatRegisterImpl::Width w, CC cc, FloatRegister s1, FloatRegister s2) { emit_int32( op(arith_op) | cmpcc(cc) | op3(fpop2_op3) | fs1(s1, w) | opf(0x54 + w) | fs2(s2, w)); } // pp 159 @@ -794,21 +772,11 @@ // pp 162 - void fmov( FloatRegisterImpl::Width w, FloatRegister s, FloatRegister d ) { v8_s_only(w); emit_int32( op(arith_op) | fd(d, w) | op3(fpop1_op3) | opf(0x00 + w) | fs2(s, w)); } - - void fneg( FloatRegisterImpl::Width w, FloatRegister s, FloatRegister d ) { v8_s_only(w); emit_int32( op(arith_op) | fd(d, w) | op3(fpop1_op3) | opf(0x04 + w) | fs2(s, w)); } - - // page 144 sparc v8 architecture (double prec works on v8 if the source and destination registers are the same). fnegs is the only instruction available - // on v8 to do negation of single, double and quad precision floats. + void fmov( FloatRegisterImpl::Width w, FloatRegister s, FloatRegister d ) { emit_int32( op(arith_op) | fd(d, w) | op3(fpop1_op3) | opf(0x00 + w) | fs2(s, w)); } - void fneg( FloatRegisterImpl::Width w, FloatRegister sd ) { if (VM_Version::v9_instructions_work()) emit_int32( op(arith_op) | fd(sd, w) | op3(fpop1_op3) | opf(0x04 + w) | fs2(sd, w)); else emit_int32( op(arith_op) | fd(sd, w) | op3(fpop1_op3) | opf(0x05) | fs2(sd, w)); } - - void fabs( FloatRegisterImpl::Width w, FloatRegister s, FloatRegister d ) { v8_s_only(w); emit_int32( op(arith_op) | fd(d, w) | op3(fpop1_op3) | opf(0x08 + w) | fs2(s, w)); } + void fneg( FloatRegisterImpl::Width w, FloatRegister s, FloatRegister d ) { emit_int32( op(arith_op) | fd(d, w) | op3(fpop1_op3) | opf(0x04 + w) | fs2(s, w)); } - // page 144 sparc v8 architecture (double prec works on v8 if the source and destination registers are the same). fabss is the only instruction available - // on v8 to do abs operation on single/double/quad precision floats. - - void fabs( FloatRegisterImpl::Width w, FloatRegister sd ) { if (VM_Version::v9_instructions_work()) emit_int32( op(arith_op) | fd(sd, w) | op3(fpop1_op3) | opf(0x08 + w) | fs2(sd, w)); else emit_int32( op(arith_op) | fd(sd, w) | op3(fpop1_op3) | opf(0x09) | fs2(sd, w)); } + void fabs( FloatRegisterImpl::Width w, FloatRegister s, FloatRegister d ) { emit_int32( op(arith_op) | fd(d, w) | op3(fpop1_op3) | opf(0x08 + w) | fs2(s, w)); } // pp 163 @@ -839,11 +807,6 @@ void impdep1( int id1, int const19a ) { v9_only(); emit_int32( op(arith_op) | fcn(id1) | op3(impdep1_op3) | u_field(const19a, 18, 0)); } void impdep2( int id1, int const19a ) { v9_only(); emit_int32( op(arith_op) | fcn(id1) | op3(impdep2_op3) | u_field(const19a, 18, 0)); } - // pp 149 (v8) - - void cpop1( int opc, int cr1, int cr2, int crd ) { v8_only(); emit_int32( op(arith_op) | fcn(crd) | op3(impdep1_op3) | u_field(cr1, 18, 14) | opf(opc) | u_field(cr2, 4, 0)); } - void cpop2( int opc, int cr1, int cr2, int crd ) { v8_only(); emit_int32( op(arith_op) | fcn(crd) | op3(impdep2_op3) | u_field(cr1, 18, 14) | opf(opc) | u_field(cr2, 4, 0)); } - // pp 170 void jmpl( Register s1, Register s2, Register d ); @@ -860,16 +823,6 @@ inline void ldxfsr( Register s1, Register s2 ); inline void ldxfsr( Register s1, int simm13a); - // pp 94 (v8) - - inline void ldc( Register s1, Register s2, int crd ); - inline void ldc( Register s1, int simm13a, int crd); - inline void lddc( Register s1, Register s2, int crd ); - inline void lddc( Register s1, int simm13a, int crd); - inline void ldcsr( Register s1, Register s2, int crd ); - inline void ldcsr( Register s1, int simm13a, int crd); - - // 173 void ldfa( FloatRegisterImpl::Width w, Register s1, Register s2, int ia, FloatRegister d ) { v9_only(); emit_int32( op(ldst_op) | fd(d, w) | alt_op3(ldf_op3 | alt_bit_op3, w) | rs1(s1) | imm_asi(ia) | rs2(s2) ); } @@ -910,18 +863,6 @@ void lduwa( Register s1, int simm13a, Register d ) { emit_int32( op(ldst_op) | rd(d) | op3(lduw_op3 | alt_bit_op3) | rs1(s1) | immed(true) | simm(simm13a, 13) ); } void ldxa( Register s1, Register s2, int ia, Register d ) { v9_only(); emit_int32( op(ldst_op) | rd(d) | op3(ldx_op3 | alt_bit_op3) | rs1(s1) | imm_asi(ia) | rs2(s2) ); } void ldxa( Register s1, int simm13a, Register d ) { v9_only(); emit_int32( op(ldst_op) | rd(d) | op3(ldx_op3 | alt_bit_op3) | rs1(s1) | immed(true) | simm(simm13a, 13) ); } - void ldda( Register s1, Register s2, int ia, Register d ) { v9_dep(); emit_int32( op(ldst_op) | rd(d) | op3(ldd_op3 | alt_bit_op3) | rs1(s1) | imm_asi(ia) | rs2(s2) ); } - void ldda( Register s1, int simm13a, Register d ) { v9_dep(); emit_int32( op(ldst_op) | rd(d) | op3(ldd_op3 | alt_bit_op3) | rs1(s1) | immed(true) | simm(simm13a, 13) ); } - - // pp 179 - - inline void ldstub( Register s1, Register s2, Register d ); - inline void ldstub( Register s1, int simm13a, Register d); - - // pp 180 - - void ldstuba( Register s1, Register s2, int ia, Register d ) { emit_int32( op(ldst_op) | rd(d) | op3(ldstub_op3 | alt_bit_op3) | rs1(s1) | imm_asi(ia) | rs2(s2) ); } - void ldstuba( Register s1, int simm13a, Register d ) { emit_int32( op(ldst_op) | rd(d) | op3(ldstub_op3 | alt_bit_op3) | rs1(s1) | immed(true) | simm(simm13a, 13) ); } // pp 181 @@ -992,11 +933,6 @@ void smulcc( Register s1, Register s2, Register d ) { emit_int32( op(arith_op) | rd(d) | op3(smul_op3 | cc_bit_op3) | rs1(s1) | rs2(s2) ); } void smulcc( Register s1, int simm13a, Register d ) { emit_int32( op(arith_op) | rd(d) | op3(smul_op3 | cc_bit_op3) | rs1(s1) | immed(true) | simm(simm13a, 13) ); } - // pp 199 - - void mulscc( Register s1, Register s2, Register d ) { v9_dep(); emit_int32( op(arith_op) | rd(d) | op3(mulscc_op3) | rs1(s1) | rs2(s2) ); } - void mulscc( Register s1, int simm13a, Register d ) { v9_dep(); emit_int32( op(arith_op) | rd(d) | op3(mulscc_op3) | rs1(s1) | immed(true) | simm(simm13a, 13) ); } - // pp 201 void nop() { emit_int32( op(branch_op) | op2(sethi_op2) ); } @@ -1116,17 +1052,6 @@ void stda( Register d, Register s1, Register s2, int ia ) { emit_int32( op(ldst_op) | rd(d) | op3(std_op3 | alt_bit_op3) | rs1(s1) | imm_asi(ia) | rs2(s2) ); } void stda( Register d, Register s1, int simm13a ) { emit_int32( op(ldst_op) | rd(d) | op3(std_op3 | alt_bit_op3) | rs1(s1) | immed(true) | simm(simm13a, 13) ); } - // pp 97 (v8) - - inline void stc( int crd, Register s1, Register s2 ); - inline void stc( int crd, Register s1, int simm13a); - inline void stdc( int crd, Register s1, Register s2 ); - inline void stdc( int crd, Register s1, int simm13a); - inline void stcsr( int crd, Register s1, Register s2 ); - inline void stcsr( int crd, Register s1, int simm13a); - inline void stdcq( int crd, Register s1, Register s2 ); - inline void stdcq( int crd, Register s1, int simm13a); - // pp 230 void sub( Register s1, Register s2, Register d ) { emit_int32( op(arith_op) | rd(d) | op3(sub_op3 ) | rs1(s1) | rs2(s2) ); } @@ -1153,20 +1078,16 @@ void taddcc( Register s1, Register s2, Register d ) { emit_int32( op(arith_op) | rd(d) | op3(taddcc_op3 ) | rs1(s1) | rs2(s2) ); } void taddcc( Register s1, int simm13a, Register d ) { emit_int32( op(arith_op) | rd(d) | op3(taddcc_op3 ) | rs1(s1) | immed(true) | simm(simm13a, 13) ); } - void taddcctv( Register s1, Register s2, Register d ) { v9_dep(); emit_int32( op(arith_op) | rd(d) | op3(taddcctv_op3) | rs1(s1) | rs2(s2) ); } - void taddcctv( Register s1, int simm13a, Register d ) { v9_dep(); emit_int32( op(arith_op) | rd(d) | op3(taddcctv_op3) | rs1(s1) | immed(true) | simm(simm13a, 13) ); } // pp 235 void tsubcc( Register s1, Register s2, Register d ) { emit_int32( op(arith_op) | rd(d) | op3(tsubcc_op3 ) | rs1(s1) | rs2(s2) ); } void tsubcc( Register s1, int simm13a, Register d ) { emit_int32( op(arith_op) | rd(d) | op3(tsubcc_op3 ) | rs1(s1) | immed(true) | simm(simm13a, 13) ); } - void tsubcctv( Register s1, Register s2, Register d ) { emit_int32( op(arith_op) | rd(d) | op3(tsubcctv_op3) | rs1(s1) | rs2(s2) ); } - void tsubcctv( Register s1, int simm13a, Register d ) { emit_int32( op(arith_op) | rd(d) | op3(tsubcctv_op3) | rs1(s1) | immed(true) | simm(simm13a, 13) ); } // pp 237 - void trap( Condition c, CC cc, Register s1, Register s2 ) { v8_no_cc(cc); emit_int32( op(arith_op) | cond(c) | op3(trap_op3) | rs1(s1) | trapcc(cc) | rs2(s2)); } - void trap( Condition c, CC cc, Register s1, int trapa ) { v8_no_cc(cc); emit_int32( op(arith_op) | cond(c) | op3(trap_op3) | rs1(s1) | trapcc(cc) | immed(true) | u_field(trapa, 6, 0)); } + void trap( Condition c, CC cc, Register s1, Register s2 ) { emit_int32( op(arith_op) | cond(c) | op3(trap_op3) | rs1(s1) | trapcc(cc) | rs2(s2)); } + void trap( Condition c, CC cc, Register s1, int trapa ) { emit_int32( op(arith_op) | cond(c) | op3(trap_op3) | rs1(s1) | trapcc(cc) | immed(true) | u_field(trapa, 6, 0)); } // simple uncond. trap void trap( int trapa ) { trap( always, icc, G0, trapa ); } diff -r 16b10327b00d -r 90d6c221d4e5 src/cpu/sparc/vm/assembler_sparc.inline.hpp --- a/src/cpu/sparc/vm/assembler_sparc.inline.hpp Tue Jul 16 10:55:48 2013 -0400 +++ b/src/cpu/sparc/vm/assembler_sparc.inline.hpp Tue Jul 16 12:20:08 2013 -0400 @@ -63,9 +63,6 @@ inline void Assembler::fbp( Condition c, bool a, CC cc, Predict p, address d, relocInfo::relocType rt ) { v9_only(); cti(); emit_data( op(branch_op) | annul(a) | cond(c) | op2(fbp_op2) | branchcc(cc) | predict(p) | wdisp(intptr_t(d), intptr_t(pc()), 19), rt); has_delay_slot(); } inline void Assembler::fbp( Condition c, bool a, CC cc, Predict p, Label& L ) { fbp(c, a, cc, p, target(L)); } -inline void Assembler::cb( Condition c, bool a, address d, relocInfo::relocType rt ) { v8_only(); cti(); emit_data( op(branch_op) | annul(a) | cond(c) | op2(cb_op2) | wdisp(intptr_t(d), intptr_t(pc()), 22), rt); has_delay_slot(); } -inline void Assembler::cb( Condition c, bool a, Label& L ) { cb(c, a, target(L)); } - inline void Assembler::br( Condition c, bool a, address d, relocInfo::relocType rt ) { v9_dep(); cti(); emit_data( op(branch_op) | annul(a) | cond(c) | op2(br_op2) | wdisp(intptr_t(d), intptr_t(pc()), 22), rt); has_delay_slot(); } inline void Assembler::br( Condition c, bool a, Label& L ) { br(c, a, target(L)); } @@ -88,18 +85,9 @@ inline void Assembler::ldf(FloatRegisterImpl::Width w, Register s1, Register s2, FloatRegister d) { emit_int32( op(ldst_op) | fd(d, w) | alt_op3(ldf_op3, w) | rs1(s1) | rs2(s2) ); } inline void Assembler::ldf(FloatRegisterImpl::Width w, Register s1, int simm13a, FloatRegister d, RelocationHolder const& rspec) { emit_data( op(ldst_op) | fd(d, w) | alt_op3(ldf_op3, w) | rs1(s1) | immed(true) | simm(simm13a, 13), rspec); } -inline void Assembler::ldfsr( Register s1, Register s2) { v9_dep(); emit_int32( op(ldst_op) | op3(ldfsr_op3) | rs1(s1) | rs2(s2) ); } -inline void Assembler::ldfsr( Register s1, int simm13a) { v9_dep(); emit_data( op(ldst_op) | op3(ldfsr_op3) | rs1(s1) | immed(true) | simm(simm13a, 13)); } inline void Assembler::ldxfsr( Register s1, Register s2) { v9_only(); emit_int32( op(ldst_op) | rd(G1) | op3(ldfsr_op3) | rs1(s1) | rs2(s2) ); } inline void Assembler::ldxfsr( Register s1, int simm13a) { v9_only(); emit_data( op(ldst_op) | rd(G1) | op3(ldfsr_op3) | rs1(s1) | immed(true) | simm(simm13a, 13)); } -inline void Assembler::ldc( Register s1, Register s2, int crd) { v8_only(); emit_int32( op(ldst_op) | fcn(crd) | op3(ldc_op3 ) | rs1(s1) | rs2(s2) ); } -inline void Assembler::ldc( Register s1, int simm13a, int crd) { v8_only(); emit_data( op(ldst_op) | fcn(crd) | op3(ldc_op3 ) | rs1(s1) | immed(true) | simm(simm13a, 13)); } -inline void Assembler::lddc( Register s1, Register s2, int crd) { v8_only(); emit_int32( op(ldst_op) | fcn(crd) | op3(lddc_op3 ) | rs1(s1) | rs2(s2) ); } -inline void Assembler::lddc( Register s1, int simm13a, int crd) { v8_only(); emit_data( op(ldst_op) | fcn(crd) | op3(lddc_op3 ) | rs1(s1) | immed(true) | simm(simm13a, 13)); } -inline void Assembler::ldcsr( Register s1, Register s2, int crd) { v8_only(); emit_int32( op(ldst_op) | fcn(crd) | op3(ldcsr_op3) | rs1(s1) | rs2(s2) ); } -inline void Assembler::ldcsr( Register s1, int simm13a, int crd) { v8_only(); emit_data( op(ldst_op) | fcn(crd) | op3(ldcsr_op3) | rs1(s1) | immed(true) | simm(simm13a, 13)); } - inline void Assembler::ldsb( Register s1, Register s2, Register d) { emit_int32( op(ldst_op) | rd(d) | op3(ldsb_op3) | rs1(s1) | rs2(s2) ); } inline void Assembler::ldsb( Register s1, int simm13a, Register d) { emit_data( op(ldst_op) | rd(d) | op3(ldsb_op3) | rs1(s1) | immed(true) | simm(simm13a, 13)); } @@ -119,9 +107,6 @@ inline void Assembler::ldd( Register s1, Register s2, Register d) { v9_dep(); assert(d->is_even(), "not even"); emit_int32( op(ldst_op) | rd(d) | op3(ldd_op3) | rs1(s1) | rs2(s2) ); } inline void Assembler::ldd( Register s1, int simm13a, Register d) { v9_dep(); assert(d->is_even(), "not even"); emit_data( op(ldst_op) | rd(d) | op3(ldd_op3) | rs1(s1) | immed(true) | simm(simm13a, 13)); } -inline void Assembler::ldstub( Register s1, Register s2, Register d) { emit_int32( op(ldst_op) | rd(d) | op3(ldstub_op3) | rs1(s1) | rs2(s2) ); } -inline void Assembler::ldstub( Register s1, int simm13a, Register d) { emit_data( op(ldst_op) | rd(d) | op3(ldstub_op3) | rs1(s1) | immed(true) | simm(simm13a, 13)); } - inline void Assembler::rett( Register s1, Register s2 ) { cti(); emit_int32( op(arith_op) | op3(rett_op3) | rs1(s1) | rs2(s2)); has_delay_slot(); } inline void Assembler::rett( Register s1, int simm13a, relocInfo::relocType rt) { cti(); emit_data( op(arith_op) | op3(rett_op3) | rs1(s1) | immed(true) | simm(simm13a, 13), rt); has_delay_slot(); } @@ -132,8 +117,6 @@ inline void Assembler::stf( FloatRegisterImpl::Width w, FloatRegister d, Register s1, Register s2) { emit_int32( op(ldst_op) | fd(d, w) | alt_op3(stf_op3, w) | rs1(s1) | rs2(s2) ); } inline void Assembler::stf( FloatRegisterImpl::Width w, FloatRegister d, Register s1, int simm13a) { emit_data( op(ldst_op) | fd(d, w) | alt_op3(stf_op3, w) | rs1(s1) | immed(true) | simm(simm13a, 13)); } -inline void Assembler::stfsr( Register s1, Register s2) { v9_dep(); emit_int32( op(ldst_op) | op3(stfsr_op3) | rs1(s1) | rs2(s2) ); } -inline void Assembler::stfsr( Register s1, int simm13a) { v9_dep(); emit_data( op(ldst_op) | op3(stfsr_op3) | rs1(s1) | immed(true) | simm(simm13a, 13)); } inline void Assembler::stxfsr( Register s1, Register s2) { v9_only(); emit_int32( op(ldst_op) | rd(G1) | op3(stfsr_op3) | rs1(s1) | rs2(s2) ); } inline void Assembler::stxfsr( Register s1, int simm13a) { v9_only(); emit_data( op(ldst_op) | rd(G1) | op3(stfsr_op3) | rs1(s1) | immed(true) | simm(simm13a, 13)); } @@ -152,17 +135,6 @@ inline void Assembler::std( Register d, Register s1, Register s2) { v9_dep(); assert(d->is_even(), "not even"); emit_int32( op(ldst_op) | rd(d) | op3(std_op3) | rs1(s1) | rs2(s2) ); } inline void Assembler::std( Register d, Register s1, int simm13a) { v9_dep(); assert(d->is_even(), "not even"); emit_data( op(ldst_op) | rd(d) | op3(std_op3) | rs1(s1) | immed(true) | simm(simm13a, 13)); } -// v8 p 99 - -inline void Assembler::stc( int crd, Register s1, Register s2) { v8_only(); emit_int32( op(ldst_op) | fcn(crd) | op3(stc_op3 ) | rs1(s1) | rs2(s2) ); } -inline void Assembler::stc( int crd, Register s1, int simm13a) { v8_only(); emit_data( op(ldst_op) | fcn(crd) | op3(stc_op3 ) | rs1(s1) | immed(true) | simm(simm13a, 13)); } -inline void Assembler::stdc( int crd, Register s1, Register s2) { v8_only(); emit_int32( op(ldst_op) | fcn(crd) | op3(stdc_op3) | rs1(s1) | rs2(s2) ); } -inline void Assembler::stdc( int crd, Register s1, int simm13a) { v8_only(); emit_data( op(ldst_op) | fcn(crd) | op3(stdc_op3) | rs1(s1) | immed(true) | simm(simm13a, 13)); } -inline void Assembler::stcsr( int crd, Register s1, Register s2) { v8_only(); emit_int32( op(ldst_op) | fcn(crd) | op3(stcsr_op3) | rs1(s1) | rs2(s2) ); } -inline void Assembler::stcsr( int crd, Register s1, int simm13a) { v8_only(); emit_data( op(ldst_op) | fcn(crd) | op3(stcsr_op3) | rs1(s1) | immed(true) | simm(simm13a, 13)); } -inline void Assembler::stdcq( int crd, Register s1, Register s2) { v8_only(); emit_int32( op(ldst_op) | fcn(crd) | op3(stdcq_op3) | rs1(s1) | rs2(s2) ); } -inline void Assembler::stdcq( int crd, Register s1, int simm13a) { v8_only(); emit_data( op(ldst_op) | fcn(crd) | op3(stdcq_op3) | rs1(s1) | immed(true) | simm(simm13a, 13)); } - // pp 231 inline void Assembler::swap( Register s1, Register s2, Register d) { v9_dep(); emit_int32( op(ldst_op) | rd(d) | op3(swap_op3) | rs1(s1) | rs2(s2) ); } diff -r 16b10327b00d -r 90d6c221d4e5 src/cpu/sparc/vm/c1_LIRAssembler_sparc.cpp --- a/src/cpu/sparc/vm/c1_LIRAssembler_sparc.cpp Tue Jul 16 10:55:48 2013 -0400 +++ b/src/cpu/sparc/vm/c1_LIRAssembler_sparc.cpp Tue Jul 16 12:20:08 2013 -0400 @@ -1,5 +1,5 @@ /* - * Copyright (c) 2000, 2012, Oracle and/or its affiliates. All rights reserved. + * Copyright (c) 2000, 2013, Oracle and/or its affiliates. All rights reserved. * DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER. * * This code is free software; you can redistribute it and/or modify it @@ -597,13 +597,6 @@ __ sra(Rdividend, 31, Rscratch); __ wry(Rscratch); - if (!VM_Version::v9_instructions_work()) { - // v9 doesn't require these nops - __ nop(); - __ nop(); - __ nop(); - __ nop(); - } add_debug_info_for_div0_here(op->info()); @@ -652,10 +645,6 @@ case lir_cond_lessEqual: acond = (is_unordered ? Assembler::f_unorderedOrLessOrEqual : Assembler::f_lessOrEqual); break; case lir_cond_greaterEqual: acond = (is_unordered ? Assembler::f_unorderedOrGreaterOrEqual: Assembler::f_greaterOrEqual); break; default : ShouldNotReachHere(); - }; - - if (!VM_Version::v9_instructions_work()) { - __ nop(); } __ fb( acond, false, Assembler::pn, *(op->label())); } else { @@ -725,9 +714,6 @@ Label L; // result must be 0 if value is NaN; test by comparing value to itself __ fcmp(FloatRegisterImpl::S, Assembler::fcc0, rsrc, rsrc); - if (!VM_Version::v9_instructions_work()) { - __ nop(); - } __ fb(Assembler::f_unordered, true, Assembler::pn, L); __ delayed()->st(G0, addr); // annuled if contents of rsrc is not NaN __ ftoi(FloatRegisterImpl::S, rsrc, rsrc); @@ -1909,7 +1895,7 @@ switch (code) { case lir_add: __ add (lreg, rreg, res); break; case lir_sub: __ sub (lreg, rreg, res); break; - case lir_mul: __ mult (lreg, rreg, res); break; + case lir_mul: __ mulx (lreg, rreg, res); break; default: ShouldNotReachHere(); } } @@ -1924,7 +1910,7 @@ switch (code) { case lir_add: __ add (lreg, simm13, res); break; case lir_sub: __ sub (lreg, simm13, res); break; - case lir_mul: __ mult (lreg, simm13, res); break; + case lir_mul: __ mulx (lreg, simm13, res); break; default: ShouldNotReachHere(); } } else { @@ -1936,7 +1922,7 @@ switch (code) { case lir_add: __ add (lreg, (int)con, res); break; case lir_sub: __ sub (lreg, (int)con, res); break; - case lir_mul: __ mult (lreg, (int)con, res); break; + case lir_mul: __ mulx (lreg, (int)con, res); break; default: ShouldNotReachHere(); } } @@ -2960,6 +2946,9 @@ } } +void LIR_Assembler::emit_updatecrc32(LIR_OpUpdateCRC32* op) { + fatal("CRC32 intrinsic is not implemented on this platform"); +} void LIR_Assembler::emit_lock(LIR_OpLock* op) { Register obj = op->obj_opr()->as_register(); @@ -3234,48 +3223,26 @@ Register base = mem_addr->base()->as_register(); if (src->is_register() && dest->is_address()) { // G4 is high half, G5 is low half - if (VM_Version::v9_instructions_work()) { - // clear the top bits of G5, and scale up G4 - __ srl (src->as_register_lo(), 0, G5); - __ sllx(src->as_register_hi(), 32, G4); - // combine the two halves into the 64 bits of G4 - __ or3(G4, G5, G4); - null_check_offset = __ offset(); - if (idx == noreg) { - __ stx(G4, base, disp); - } else { - __ stx(G4, base, idx); - } + // clear the top bits of G5, and scale up G4 + __ srl (src->as_register_lo(), 0, G5); + __ sllx(src->as_register_hi(), 32, G4); + // combine the two halves into the 64 bits of G4 + __ or3(G4, G5, G4); + null_check_offset = __ offset(); + if (idx == noreg) { + __ stx(G4, base, disp); } else { - __ mov (src->as_register_hi(), G4); - __ mov (src->as_register_lo(), G5); - null_check_offset = __ offset(); - if (idx == noreg) { - __ std(G4, base, disp); - } else { - __ std(G4, base, idx); - } + __ stx(G4, base, idx); } } else if (src->is_address() && dest->is_register()) { null_check_offset = __ offset(); - if (VM_Version::v9_instructions_work()) { - if (idx == noreg) { - __ ldx(base, disp, G5); - } else { - __ ldx(base, idx, G5); - } - __ srax(G5, 32, dest->as_register_hi()); // fetch the high half into hi - __ mov (G5, dest->as_register_lo()); // copy low half into lo + if (idx == noreg) { + __ ldx(base, disp, G5); } else { - if (idx == noreg) { - __ ldd(base, disp, G4); - } else { - __ ldd(base, idx, G4); - } - // G4 is high half, G5 is low half - __ mov (G4, dest->as_register_hi()); - __ mov (G5, dest->as_register_lo()); + __ ldx(base, idx, G5); } + __ srax(G5, 32, dest->as_register_hi()); // fetch the high half into hi + __ mov (G5, dest->as_register_lo()); // copy low half into lo } else { Unimplemented(); } diff -r 16b10327b00d -r 90d6c221d4e5 src/cpu/sparc/vm/c1_LIRGenerator_sparc.cpp --- a/src/cpu/sparc/vm/c1_LIRGenerator_sparc.cpp Tue Jul 16 10:55:48 2013 -0400 +++ b/src/cpu/sparc/vm/c1_LIRGenerator_sparc.cpp Tue Jul 16 12:20:08 2013 -0400 @@ -1,5 +1,5 @@ /* - * Copyright (c) 2005, 2012, Oracle and/or its affiliates. All rights reserved. + * Copyright (c) 2005, 2013, Oracle and/or its affiliates. All rights reserved. * DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER. * * This code is free software; you can redistribute it and/or modify it @@ -784,6 +784,10 @@ set_no_result(x); } +void LIRGenerator::do_update_CRC32(Intrinsic* x) { + fatal("CRC32 intrinsic is not implemented on this platform"); +} + // _i2l, _i2f, _i2d, _l2i, _l2f, _l2d, _f2i, _f2l, _f2d, _d2i, _d2l, _d2f // _i2b, _i2c, _i2s void LIRGenerator::do_Convert(Convert* x) { diff -r 16b10327b00d -r 90d6c221d4e5 src/cpu/sparc/vm/c1_MacroAssembler_sparc.cpp --- a/src/cpu/sparc/vm/c1_MacroAssembler_sparc.cpp Tue Jul 16 10:55:48 2013 -0400 +++ b/src/cpu/sparc/vm/c1_MacroAssembler_sparc.cpp Tue Jul 16 12:20:08 2013 -0400 @@ -108,7 +108,7 @@ // compare object markOop with Rmark and if equal exchange Rscratch with object markOop assert(mark_addr.disp() == 0, "cas must take a zero displacement"); - casx_under_lock(mark_addr.base(), Rmark, Rscratch, (address)StubRoutines::Sparc::atomic_memory_operation_lock_addr()); + cas_ptr(mark_addr.base(), Rmark, Rscratch); // if compare/exchange succeeded we found an unlocked object and we now have locked it // hence we are done cmp(Rmark, Rscratch); @@ -149,7 +149,7 @@ // Check if it is still a light weight lock, this is is true if we see // the stack address of the basicLock in the markOop of the object - casx_under_lock(mark_addr.base(), Rbox, Rmark, (address)StubRoutines::Sparc::atomic_memory_operation_lock_addr()); + cas_ptr(mark_addr.base(), Rbox, Rmark); cmp(Rbox, Rmark); brx(Assembler::notEqual, false, Assembler::pn, slow_case); @@ -276,7 +276,7 @@ sub(var_size_in_bytes, hdr_size_in_bytes, t2); // compute size of body initialize_body(t1, t2); #ifndef _LP64 - } else if (VM_Version::v9_instructions_work() && con_size_in_bytes < threshold * 2) { + } else if (con_size_in_bytes < threshold * 2) { // on v9 we can do double word stores to fill twice as much space. assert(hdr_size_in_bytes % 8 == 0, "double word aligned"); assert(con_size_in_bytes % 8 == 0, "double word aligned"); diff -r 16b10327b00d -r 90d6c221d4e5 src/cpu/sparc/vm/c1_globals_sparc.hpp --- a/src/cpu/sparc/vm/c1_globals_sparc.hpp Tue Jul 16 10:55:48 2013 -0400 +++ b/src/cpu/sparc/vm/c1_globals_sparc.hpp Tue Jul 16 12:20:08 2013 -0400 @@ -49,8 +49,9 @@ define_pd_global(bool, ResizeTLAB, true ); define_pd_global(intx, ReservedCodeCacheSize, 32*M ); define_pd_global(intx, CodeCacheExpansionSize, 32*K ); -define_pd_global(uintx,CodeCacheMinBlockLength, 1); -define_pd_global(uintx,MetaspaceSize, 12*M ); +define_pd_global(uintx, CodeCacheMinBlockLength, 1); +define_pd_global(uintx, CodeCacheMinimumUseSpace, 400*K); +define_pd_global(uintx, MetaspaceSize, 12*M ); define_pd_global(bool, NeverActAsServerClassMachine, true ); define_pd_global(intx, NewSizeThreadIncrease, 16*K ); define_pd_global(uint64_t,MaxRAM, 1ULL*G); diff -r 16b10327b00d -r 90d6c221d4e5 src/cpu/sparc/vm/c2_globals_sparc.hpp --- a/src/cpu/sparc/vm/c2_globals_sparc.hpp Tue Jul 16 10:55:48 2013 -0400 +++ b/src/cpu/sparc/vm/c2_globals_sparc.hpp Tue Jul 16 12:20:08 2013 -0400 @@ -86,7 +86,8 @@ // Ergonomics related flags define_pd_global(uint64_t,MaxRAM, 4ULL*G); #endif -define_pd_global(uintx,CodeCacheMinBlockLength, 4); +define_pd_global(uintx, CodeCacheMinBlockLength, 4); +define_pd_global(uintx, CodeCacheMinimumUseSpace, 400*K); // Heap related flags define_pd_global(uintx,MetaspaceSize, ScaleForWordSize(16*M)); diff -r 16b10327b00d -r 90d6c221d4e5 src/cpu/sparc/vm/c2_init_sparc.cpp --- a/src/cpu/sparc/vm/c2_init_sparc.cpp Tue Jul 16 10:55:48 2013 -0400 +++ b/src/cpu/sparc/vm/c2_init_sparc.cpp Tue Jul 16 12:20:08 2013 -0400 @@ -30,5 +30,4 @@ void Compile::pd_compiler2_init() { guarantee(CodeEntryAlignment >= InteriorEntryAlignment, "" ); - guarantee( VM_Version::v9_instructions_work(), "Server compiler does not run on V8 systems" ); } diff -r 16b10327b00d -r 90d6c221d4e5 src/cpu/sparc/vm/disassembler_sparc.hpp --- a/src/cpu/sparc/vm/disassembler_sparc.hpp Tue Jul 16 10:55:48 2013 -0400 +++ b/src/cpu/sparc/vm/disassembler_sparc.hpp Tue Jul 16 12:20:08 2013 -0400 @@ -30,8 +30,7 @@ } static const char* pd_cpu_opts() { - return (VM_Version::v9_instructions_work()? - (VM_Version::v8_instructions_work()? "" : "v9only") : "v8only"); + return "v9only"; } #endif // CPU_SPARC_VM_DISASSEMBLER_SPARC_HPP diff -r 16b10327b00d -r 90d6c221d4e5 src/cpu/sparc/vm/frame_sparc.cpp --- a/src/cpu/sparc/vm/frame_sparc.cpp Tue Jul 16 10:55:48 2013 -0400 +++ b/src/cpu/sparc/vm/frame_sparc.cpp Tue Jul 16 12:20:08 2013 -0400 @@ -252,6 +252,11 @@ return false; } + // Could be a zombie method + if (sender_blob->is_zombie() || sender_blob->is_unloaded()) { + return false; + } + // It should be safe to construct the sender though it might not be valid frame sender(_SENDER_SP, younger_sp, adjusted_stack); @@ -294,10 +299,10 @@ return jcw_safe; } - // If the frame size is 0 something is bad because every nmethod has a non-zero frame size + // If the frame size is 0 something (or less) is bad because every nmethod has a non-zero frame size // because you must allocate window space - if (sender_blob->frame_size() == 0) { + if (sender_blob->frame_size() <= 0) { assert(!sender_blob->is_nmethod(), "should count return address at least"); return false; } @@ -670,7 +675,7 @@ // validate ConstantPoolCache* ConstantPoolCache* cp = *interpreter_frame_cache_addr(); - if (cp == NULL || !cp->is_metadata()) return false; + if (cp == NULL || !cp->is_metaspace_object()) return false; // validate locals diff -r 16b10327b00d -r 90d6c221d4e5 src/cpu/sparc/vm/globals_sparc.hpp --- a/src/cpu/sparc/vm/globals_sparc.hpp Tue Jul 16 10:55:48 2013 -0400 +++ b/src/cpu/sparc/vm/globals_sparc.hpp Tue Jul 16 12:20:08 2013 -0400 @@ -110,8 +110,5 @@ \ product(uintx, ArraycopyDstPrefetchDistance, 0, \ "Distance to prefetch destination array in arracopy") \ - \ - develop(intx, V8AtomicOperationUnderLockSpinCount, 50, \ - "Number of times to spin wait on a v8 atomic operation lock") \ #endif // CPU_SPARC_VM_GLOBALS_SPARC_HPP diff -r 16b10327b00d -r 90d6c221d4e5 src/cpu/sparc/vm/interp_masm_sparc.cpp --- a/src/cpu/sparc/vm/interp_masm_sparc.cpp Tue Jul 16 10:55:48 2013 -0400 +++ b/src/cpu/sparc/vm/interp_masm_sparc.cpp Tue Jul 16 12:20:08 2013 -0400 @@ -1210,8 +1210,7 @@ st_ptr(mark_reg, lock_addr, BasicLock::displaced_header_offset_in_bytes()); // compare and exchange object_addr, markOop | 1, stack address of basicLock assert(mark_addr.disp() == 0, "cas must take a zero displacement"); - casx_under_lock(mark_addr.base(), mark_reg, temp_reg, - (address)StubRoutines::Sparc::atomic_memory_operation_lock_addr()); + cas_ptr(mark_addr.base(), mark_reg, temp_reg); // if the compare and exchange succeeded we are done (we saw an unlocked object) cmp_and_brx_short(mark_reg, temp_reg, Assembler::equal, Assembler::pt, done); @@ -1291,8 +1290,7 @@ // we expect to see the stack address of the basicLock in case the // lock is still a light weight lock (lock_reg) assert(mark_addr.disp() == 0, "cas must take a zero displacement"); - casx_under_lock(mark_addr.base(), lock_reg, displaced_header_reg, - (address)StubRoutines::Sparc::atomic_memory_operation_lock_addr()); + cas_ptr(mark_addr.base(), lock_reg, displaced_header_reg); cmp(lock_reg, displaced_header_reg); brx(Assembler::equal, true, Assembler::pn, done); delayed()->st_ptr(G0, lockobj_addr); // free entry diff -r 16b10327b00d -r 90d6c221d4e5 src/cpu/sparc/vm/macroAssembler_sparc.cpp --- a/src/cpu/sparc/vm/macroAssembler_sparc.cpp Tue Jul 16 10:55:48 2013 -0400 +++ b/src/cpu/sparc/vm/macroAssembler_sparc.cpp Tue Jul 16 12:20:08 2013 -0400 @@ -118,7 +118,6 @@ case bp_op2: m = wdisp( word_aligned_ones, 0, 19); v = wdisp( dest_pos, inst_pos, 19); break; case fb_op2: m = wdisp( word_aligned_ones, 0, 22); v = wdisp( dest_pos, inst_pos, 22); break; case br_op2: m = wdisp( word_aligned_ones, 0, 22); v = wdisp( dest_pos, inst_pos, 22); break; - case cb_op2: m = wdisp( word_aligned_ones, 0, 22); v = wdisp( dest_pos, inst_pos, 22); break; case bpr_op2: { if (is_cbcond(inst)) { m = wdisp10(word_aligned_ones, 0); @@ -149,7 +148,6 @@ case bp_op2: r = inv_wdisp( inst, pos, 19); break; case fb_op2: r = inv_wdisp( inst, pos, 22); break; case br_op2: r = inv_wdisp( inst, pos, 22); break; - case cb_op2: r = inv_wdisp( inst, pos, 22); break; case bpr_op2: { if (is_cbcond(inst)) { r = inv_wdisp10(inst, pos); @@ -325,12 +323,6 @@ trap(ST_RESERVED_FOR_USER_0); } -// flush windows (except current) using flushw instruction if avail. -void MacroAssembler::flush_windows() { - if (VM_Version::v9_instructions_work()) flushw(); - else flush_windows_trap(); -} - // Write serialization page so VM thread can do a pseudo remote membar // We use the current thread pointer to calculate a thread specific // offset to write to within the page. This minimizes bus traffic @@ -358,88 +350,6 @@ Unimplemented(); } -void MacroAssembler::mult(Register s1, Register s2, Register d) { - if(VM_Version::v9_instructions_work()) { - mulx (s1, s2, d); - } else { - smul (s1, s2, d); - } -} - -void MacroAssembler::mult(Register s1, int simm13a, Register d) { - if(VM_Version::v9_instructions_work()) { - mulx (s1, simm13a, d); - } else { - smul (s1, simm13a, d); - } -} - - -#ifdef ASSERT -void MacroAssembler::read_ccr_v8_assert(Register ccr_save) { - const Register s1 = G3_scratch; - const Register s2 = G4_scratch; - Label get_psr_test; - // Get the condition codes the V8 way. - read_ccr_trap(s1); - mov(ccr_save, s2); - // This is a test of V8 which has icc but not xcc - // so mask off the xcc bits - and3(s2, 0xf, s2); - // Compare condition codes from the V8 and V9 ways. - subcc(s2, s1, G0); - br(Assembler::notEqual, true, Assembler::pt, get_psr_test); - delayed()->breakpoint_trap(); - bind(get_psr_test); -} - -void MacroAssembler::write_ccr_v8_assert(Register ccr_save) { - const Register s1 = G3_scratch; - const Register s2 = G4_scratch; - Label set_psr_test; - // Write out the saved condition codes the V8 way - write_ccr_trap(ccr_save, s1, s2); - // Read back the condition codes using the V9 instruction - rdccr(s1); - mov(ccr_save, s2); - // This is a test of V8 which has icc but not xcc - // so mask off the xcc bits - and3(s2, 0xf, s2); - and3(s1, 0xf, s1); - // Compare the V8 way with the V9 way. - subcc(s2, s1, G0); - br(Assembler::notEqual, true, Assembler::pt, set_psr_test); - delayed()->breakpoint_trap(); - bind(set_psr_test); -} -#else -#define read_ccr_v8_assert(x) -#define write_ccr_v8_assert(x) -#endif // ASSERT - -void MacroAssembler::read_ccr(Register ccr_save) { - if (VM_Version::v9_instructions_work()) { - rdccr(ccr_save); - // Test code sequence used on V8. Do not move above rdccr. - read_ccr_v8_assert(ccr_save); - } else { - read_ccr_trap(ccr_save); - } -} - -void MacroAssembler::write_ccr(Register ccr_save) { - if (VM_Version::v9_instructions_work()) { - // Test code sequence used on V8. Do not move below wrccr. - write_ccr_v8_assert(ccr_save); - wrccr(ccr_save); - } else { - const Register temp_reg1 = G3_scratch; - const Register temp_reg2 = G4_scratch; - write_ccr_trap(ccr_save, temp_reg1, temp_reg2); - } -} - - // Calls to C land #ifdef ASSERT @@ -465,8 +375,8 @@ #ifdef ASSERT AddressLiteral last_get_thread_addrlit(&last_get_thread); set(last_get_thread_addrlit, L3); - inc(L4, get_pc(L4) + 2 * BytesPerInstWord); // skip getpc() code + inc + st_ptr to point L4 at call - st_ptr(L4, L3, 0); + rdpc(L4); + inc(L4, 3 * BytesPerInstWord); // skip rdpc + inc + st_ptr to point L4 at call st_ptr(L4, L3, 0); #endif call(CAST_FROM_FN_PTR(address, reinitialize_thread), relocInfo::runtime_call_type); delayed()->nop(); @@ -1251,12 +1161,6 @@ while (offset() % modulus != 0) nop(); } - -void MacroAssembler::safepoint() { - relocate(breakpoint_Relocation::spec(breakpoint_Relocation::safepoint)); -} - - void RegistersForDebugging::print(outputStream* s) { FlagSetting fs(Debugging, true); int j; @@ -1327,7 +1231,7 @@ void RegistersForDebugging::save_registers(MacroAssembler* a) { a->sub(FP, round_to(sizeof(RegistersForDebugging), sizeof(jdouble)) - STACK_BIAS, O0); - a->flush_windows(); + a->flushw(); int i; for (i = 0; i < 8; ++i) { a->ld_ptr(as_iRegister(i)->address_in_saved_window().after_save(), L1); a->st_ptr( L1, O0, i_offset(i)); @@ -1338,7 +1242,7 @@ for (i = 0; i < 32; ++i) { a->stf(FloatRegisterImpl::S, as_FloatRegister(i), O0, f_offset(i)); } - for (i = 0; i < (VM_Version::v9_instructions_work() ? 64 : 32); i += 2) { + for (i = 0; i < 64; i += 2) { a->stf(FloatRegisterImpl::D, as_FloatRegister(i), O0, d_offset(i)); } } @@ -1350,7 +1254,7 @@ for (int j = 0; j < 32; ++j) { a->ldf(FloatRegisterImpl::S, O0, f_offset(j), as_FloatRegister(j)); } - for (int k = 0; k < (VM_Version::v9_instructions_work() ? 64 : 32); k += 2) { + for (int k = 0; k < 64; k += 2) { a->ldf(FloatRegisterImpl::D, O0, d_offset(k), as_FloatRegister(k)); } } @@ -1465,8 +1369,6 @@ // the high bits of the O-regs if they contain Long values. Acts as a 'leaf' // call. void MacroAssembler::verify_oop_subroutine() { - assert( VM_Version::v9_instructions_work(), "VerifyOops not supported for V8" ); - // Leaf call; no frame. Label succeed, fail, null_or_fail; @@ -1870,26 +1772,17 @@ // And the equals case for the high part does not need testing, // since that triplet is reached only after finding the high halves differ. - if (VM_Version::v9_instructions_work()) { - mov(-1, Rresult); - ba(done); delayed()-> movcc(greater, false, icc, 1, Rresult); - } else { - br(less, true, pt, done); delayed()-> set(-1, Rresult); - br(greater, true, pt, done); delayed()-> set( 1, Rresult); - } - - bind( check_low_parts ); - - if (VM_Version::v9_instructions_work()) { - mov( -1, Rresult); - movcc(equal, false, icc, 0, Rresult); - movcc(greaterUnsigned, false, icc, 1, Rresult); - } else { - set(-1, Rresult); - br(equal, true, pt, done); delayed()->set( 0, Rresult); - br(greaterUnsigned, true, pt, done); delayed()->set( 1, Rresult); - } - bind( done ); + mov(-1, Rresult); + ba(done); + delayed()->movcc(greater, false, icc, 1, Rresult); + + bind(check_low_parts); + + mov( -1, Rresult); + movcc(equal, false, icc, 0, Rresult); + movcc(greaterUnsigned, false, icc, 1, Rresult); + + bind(done); } void MacroAssembler::lneg( Register Rhi, Register Rlow ) { @@ -2117,118 +2010,23 @@ void MacroAssembler::float_cmp( bool is_float, int unordered_result, FloatRegister Fa, FloatRegister Fb, Register Rresult) { - - fcmp(is_float ? FloatRegisterImpl::S : FloatRegisterImpl::D, fcc0, Fa, Fb); - - Condition lt = unordered_result == -1 ? f_unorderedOrLess : f_less; - Condition eq = f_equal; - Condition gt = unordered_result == 1 ? f_unorderedOrGreater : f_greater; - - if (VM_Version::v9_instructions_work()) { - - mov(-1, Rresult); - movcc(eq, true, fcc0, 0, Rresult); - movcc(gt, true, fcc0, 1, Rresult); - + if (is_float) { + fcmp(FloatRegisterImpl::S, fcc0, Fa, Fb); } else { - Label done; - - set( -1, Rresult ); - //fb(lt, true, pn, done); delayed()->set( -1, Rresult ); - fb( eq, true, pn, done); delayed()->set( 0, Rresult ); - fb( gt, true, pn, done); delayed()->set( 1, Rresult ); - - bind (done); + fcmp(FloatRegisterImpl::D, fcc0, Fa, Fb); } -} - - -void MacroAssembler::fneg( FloatRegisterImpl::Width w, FloatRegister s, FloatRegister d) -{ - if (VM_Version::v9_instructions_work()) { - Assembler::fneg(w, s, d); + + if (unordered_result == 1) { + mov( -1, Rresult); + movcc(f_equal, true, fcc0, 0, Rresult); + movcc(f_unorderedOrGreater, true, fcc0, 1, Rresult); } else { - if (w == FloatRegisterImpl::S) { - Assembler::fneg(w, s, d); - } else if (w == FloatRegisterImpl::D) { - // number() does a sanity check on the alignment. - assert(((s->encoding(FloatRegisterImpl::D) & 1) == 0) && - ((d->encoding(FloatRegisterImpl::D) & 1) == 0), "float register alignment check"); - - Assembler::fneg(FloatRegisterImpl::S, s, d); - Assembler::fmov(FloatRegisterImpl::S, s->successor(), d->successor()); - } else { - assert(w == FloatRegisterImpl::Q, "Invalid float register width"); - - // number() does a sanity check on the alignment. - assert(((s->encoding(FloatRegisterImpl::D) & 3) == 0) && - ((d->encoding(FloatRegisterImpl::D) & 3) == 0), "float register alignment check"); - - Assembler::fneg(FloatRegisterImpl::S, s, d); - Assembler::fmov(FloatRegisterImpl::S, s->successor(), d->successor()); - Assembler::fmov(FloatRegisterImpl::S, s->successor()->successor(), d->successor()->successor()); - Assembler::fmov(FloatRegisterImpl::S, s->successor()->successor()->successor(), d->successor()->successor()->successor()); - } + mov( -1, Rresult); + movcc(f_equal, true, fcc0, 0, Rresult); + movcc(f_greater, true, fcc0, 1, Rresult); } } -void MacroAssembler::fmov( FloatRegisterImpl::Width w, FloatRegister s, FloatRegister d) -{ - if (VM_Version::v9_instructions_work()) { - Assembler::fmov(w, s, d); - } else { - if (w == FloatRegisterImpl::S) { - Assembler::fmov(w, s, d); - } else if (w == FloatRegisterImpl::D) { - // number() does a sanity check on the alignment. - assert(((s->encoding(FloatRegisterImpl::D) & 1) == 0) && - ((d->encoding(FloatRegisterImpl::D) & 1) == 0), "float register alignment check"); - - Assembler::fmov(FloatRegisterImpl::S, s, d); - Assembler::fmov(FloatRegisterImpl::S, s->successor(), d->successor()); - } else { - assert(w == FloatRegisterImpl::Q, "Invalid float register width"); - - // number() does a sanity check on the alignment. - assert(((s->encoding(FloatRegisterImpl::D) & 3) == 0) && - ((d->encoding(FloatRegisterImpl::D) & 3) == 0), "float register alignment check"); - - Assembler::fmov(FloatRegisterImpl::S, s, d); - Assembler::fmov(FloatRegisterImpl::S, s->successor(), d->successor()); - Assembler::fmov(FloatRegisterImpl::S, s->successor()->successor(), d->successor()->successor()); - Assembler::fmov(FloatRegisterImpl::S, s->successor()->successor()->successor(), d->successor()->successor()->successor()); - } - } -} - -void MacroAssembler::fabs( FloatRegisterImpl::Width w, FloatRegister s, FloatRegister d) -{ - if (VM_Version::v9_instructions_work()) { - Assembler::fabs(w, s, d); - } else { - if (w == FloatRegisterImpl::S) { - Assembler::fabs(w, s, d); - } else if (w == FloatRegisterImpl::D) { - // number() does a sanity check on the alignment. - assert(((s->encoding(FloatRegisterImpl::D) & 1) == 0) && - ((d->encoding(FloatRegisterImpl::D) & 1) == 0), "float register alignment check"); - - Assembler::fabs(FloatRegisterImpl::S, s, d); - Assembler::fmov(FloatRegisterImpl::S, s->successor(), d->successor()); - } else { - assert(w == FloatRegisterImpl::Q, "Invalid float register width"); - - // number() does a sanity check on the alignment. - assert(((s->encoding(FloatRegisterImpl::D) & 3) == 0) && - ((d->encoding(FloatRegisterImpl::D) & 3) == 0), "float register alignment check"); - - Assembler::fabs(FloatRegisterImpl::S, s, d); - Assembler::fmov(FloatRegisterImpl::S, s->successor(), d->successor()); - Assembler::fmov(FloatRegisterImpl::S, s->successor()->successor(), d->successor()->successor()); - Assembler::fmov(FloatRegisterImpl::S, s->successor()->successor()->successor(), d->successor()->successor()->successor()); - } - } -} void MacroAssembler::save_all_globals_into_locals() { mov(G1,L1); @@ -2250,135 +2048,6 @@ mov(L7,G7); } -// Use for 64 bit operation. -void MacroAssembler::casx_under_lock(Register top_ptr_reg, Register top_reg, Register ptr_reg, address lock_addr, bool use_call_vm) -{ - // store ptr_reg as the new top value -#ifdef _LP64 - casx(top_ptr_reg, top_reg, ptr_reg); -#else - cas_under_lock(top_ptr_reg, top_reg, ptr_reg, lock_addr, use_call_vm); -#endif // _LP64 -} - -// [RGV] This routine does not handle 64 bit operations. -// use casx_under_lock() or casx directly!!! -void MacroAssembler::cas_under_lock(Register top_ptr_reg, Register top_reg, Register ptr_reg, address lock_addr, bool use_call_vm) -{ - // store ptr_reg as the new top value - if (VM_Version::v9_instructions_work()) { - cas(top_ptr_reg, top_reg, ptr_reg); - } else { - - // If the register is not an out nor global, it is not visible - // after the save. Allocate a register for it, save its - // value in the register save area (the save may not flush - // registers to the save area). - - Register top_ptr_reg_after_save; - Register top_reg_after_save; - Register ptr_reg_after_save; - - if (top_ptr_reg->is_out() || top_ptr_reg->is_global()) { - top_ptr_reg_after_save = top_ptr_reg->after_save(); - } else { - Address reg_save_addr = top_ptr_reg->address_in_saved_window(); - top_ptr_reg_after_save = L0; - st(top_ptr_reg, reg_save_addr); - } - - if (top_reg->is_out() || top_reg->is_global()) { - top_reg_after_save = top_reg->after_save(); - } else { - Address reg_save_addr = top_reg->address_in_saved_window(); - top_reg_after_save = L1; - st(top_reg, reg_save_addr); - } - - if (ptr_reg->is_out() || ptr_reg->is_global()) { - ptr_reg_after_save = ptr_reg->after_save(); - } else { - Address reg_save_addr = ptr_reg->address_in_saved_window(); - ptr_reg_after_save = L2; - st(ptr_reg, reg_save_addr); - } - - const Register& lock_reg = L3; - const Register& lock_ptr_reg = L4; - const Register& value_reg = L5; - const Register& yield_reg = L6; - const Register& yieldall_reg = L7; - - save_frame(); - - if (top_ptr_reg_after_save == L0) { - ld(top_ptr_reg->address_in_saved_window().after_save(), top_ptr_reg_after_save); - } - - if (top_reg_after_save == L1) { - ld(top_reg->address_in_saved_window().after_save(), top_reg_after_save); - } - - if (ptr_reg_after_save == L2) { - ld(ptr_reg->address_in_saved_window().after_save(), ptr_reg_after_save); - } - - Label(retry_get_lock); - Label(not_same); - Label(dont_yield); - - assert(lock_addr, "lock_address should be non null for v8"); - set((intptr_t)lock_addr, lock_ptr_reg); - // Initialize yield counter - mov(G0,yield_reg); - mov(G0, yieldall_reg); - set(StubRoutines::Sparc::locked, lock_reg); - - bind(retry_get_lock); - cmp_and_br_short(yield_reg, V8AtomicOperationUnderLockSpinCount, Assembler::less, Assembler::pt, dont_yield); - - if(use_call_vm) { - Untested("Need to verify global reg consistancy"); - call_VM(noreg, CAST_FROM_FN_PTR(address, SharedRuntime::yield_all), yieldall_reg); - } else { - // Save the regs and make space for a C call - save(SP, -96, SP); - save_all_globals_into_locals(); - call(CAST_FROM_FN_PTR(address,os::yield_all)); - delayed()->mov(yieldall_reg, O0); - restore_globals_from_locals(); - restore(); - } - - // reset the counter - mov(G0,yield_reg); - add(yieldall_reg, 1, yieldall_reg); - - bind(dont_yield); - // try to get lock - Assembler::swap(lock_ptr_reg, 0, lock_reg); - - // did we get the lock? - cmp(lock_reg, StubRoutines::Sparc::unlocked); - br(Assembler::notEqual, true, Assembler::pn, retry_get_lock); - delayed()->add(yield_reg,1,yield_reg); - - // yes, got lock. do we have the same top? - ld(top_ptr_reg_after_save, 0, value_reg); - cmp_and_br_short(value_reg, top_reg_after_save, Assembler::notEqual, Assembler::pn, not_same); - - // yes, same top. - st(ptr_reg_after_save, top_ptr_reg_after_save, 0); - membar(Assembler::StoreStore); - - bind(not_same); - mov(value_reg, ptr_reg_after_save); - st(lock_reg, lock_ptr_reg, 0); // unlock - - restore(); - } -} - RegisterOrConstant MacroAssembler::delayed_value_impl(intptr_t* delayed_value_addr, Register tmp, int offset) { @@ -2970,7 +2639,7 @@ markOopDesc::biased_lock_mask_in_place | markOopDesc::age_mask_in_place | markOopDesc::epoch_mask_in_place, mark_reg); or3(G2_thread, mark_reg, temp_reg); - casn(mark_addr.base(), mark_reg, temp_reg); + cas_ptr(mark_addr.base(), mark_reg, temp_reg); // If the biasing toward our thread failed, this means that // another thread succeeded in biasing it toward itself and we // need to revoke that bias. The revocation will occur in the @@ -2998,7 +2667,7 @@ load_klass(obj_reg, temp_reg); ld_ptr(Address(temp_reg, Klass::prototype_header_offset()), temp_reg); or3(G2_thread, temp_reg, temp_reg); - casn(mark_addr.base(), mark_reg, temp_reg); + cas_ptr(mark_addr.base(), mark_reg, temp_reg); // If the biasing toward our thread failed, this means that // another thread succeeded in biasing it toward itself and we // need to revoke that bias. The revocation will occur in the @@ -3027,7 +2696,7 @@ // bits in this situation. Should attempt to preserve them. load_klass(obj_reg, temp_reg); ld_ptr(Address(temp_reg, Klass::prototype_header_offset()), temp_reg); - casn(mark_addr.base(), mark_reg, temp_reg); + cas_ptr(mark_addr.base(), mark_reg, temp_reg); // Fall through to the normal CAS-based lock, because no matter what // the result of the above CAS, some thread must have succeeded in // removing the bias bit from the object's header. @@ -3058,15 +2727,6 @@ } -// CASN -- 32-64 bit switch hitter similar to the synthetic CASN provided by -// Solaris/SPARC's "as". Another apt name would be cas_ptr() - -void MacroAssembler::casn (Register addr_reg, Register cmp_reg, Register set_reg ) { - casx_under_lock (addr_reg, cmp_reg, set_reg, (address)StubRoutines::Sparc::atomic_memory_operation_lock_addr()); -} - - - // compiler_lock_object() and compiler_unlock_object() are direct transliterations // of i486.ad fast_lock() and fast_unlock(). See those methods for detailed comments. // The code could be tightened up considerably. @@ -3129,8 +2789,7 @@ // compare object markOop with Rmark and if equal exchange Rscratch with object markOop assert(mark_addr.disp() == 0, "cas must take a zero displacement"); - casx_under_lock(mark_addr.base(), Rmark, Rscratch, - (address)StubRoutines::Sparc::atomic_memory_operation_lock_addr()); + cas_ptr(mark_addr.base(), Rmark, Rscratch); // if compare/exchange succeeded we found an unlocked object and we now have locked it // hence we are done @@ -3176,7 +2835,7 @@ mov(Rbox, Rscratch); or3(Rmark, markOopDesc::unlocked_value, Rmark); assert(mark_addr.disp() == 0, "cas must take a zero displacement"); - casn(mark_addr.base(), Rmark, Rscratch); + cas_ptr(mark_addr.base(), Rmark, Rscratch); cmp(Rmark, Rscratch); brx(Assembler::equal, false, Assembler::pt, done); delayed()->sub(Rscratch, SP, Rscratch); @@ -3207,7 +2866,7 @@ // Invariant: if we acquire the lock then _recursions should be 0. add(Rmark, ObjectMonitor::owner_offset_in_bytes()-2, Rmark); mov(G2_thread, Rscratch); - casn(Rmark, G0, Rscratch); + cas_ptr(Rmark, G0, Rscratch); cmp(Rscratch, G0); // Intentional fall-through into done } else { @@ -3240,7 +2899,7 @@ mov(0, Rscratch); or3(Rmark, markOopDesc::unlocked_value, Rmark); assert(mark_addr.disp() == 0, "cas must take a zero displacement"); - casn(mark_addr.base(), Rmark, Rscratch); + cas_ptr(mark_addr.base(), Rmark, Rscratch); // prefetch (mark_addr, Assembler::severalWritesAndPossiblyReads); cmp(Rscratch, Rmark); brx(Assembler::notZero, false, Assembler::pn, Recursive); @@ -3266,7 +2925,7 @@ // the fast-path stack-lock code from the interpreter and always passed // control to the "slow" operators in synchronizer.cpp. - // RScratch contains the fetched obj->mark value from the failed CASN. + // RScratch contains the fetched obj->mark value from the failed CAS. #ifdef _LP64 sub(Rscratch, STACK_BIAS, Rscratch); #endif @@ -3300,7 +2959,7 @@ // Invariant: if we acquire the lock then _recursions should be 0. add(Rmark, ObjectMonitor::owner_offset_in_bytes()-2, Rmark); mov(G2_thread, Rscratch); - casn(Rmark, G0, Rscratch); + cas_ptr(Rmark, G0, Rscratch); cmp(Rscratch, G0); // ST box->displaced_header = NonZero. // Any non-zero value suffices: @@ -3336,8 +2995,7 @@ // Check if it is still a light weight lock, this is is true if we see // the stack address of the basicLock in the markOop of the object assert(mark_addr.disp() == 0, "cas must take a zero displacement"); - casx_under_lock(mark_addr.base(), Rbox, Rmark, - (address)StubRoutines::Sparc::atomic_memory_operation_lock_addr()); + cas_ptr(mark_addr.base(), Rbox, Rmark); ba(done); delayed()->cmp(Rbox, Rmark); bind(done); @@ -3398,7 +3056,7 @@ delayed()->andcc(G0, G0, G0); add(Rmark, ObjectMonitor::owner_offset_in_bytes()-2, Rmark); mov(G2_thread, Rscratch); - casn(Rmark, G0, Rscratch); + cas_ptr(Rmark, G0, Rscratch); // invert icc.zf and goto done br_notnull(Rscratch, false, Assembler::pt, done); delayed()->cmp(G0, G0); @@ -3440,7 +3098,7 @@ // A prototype implementation showed excellent results, although // the scavenger and timeout code was rather involved. - casn(mark_addr.base(), Rbox, Rscratch); + cas_ptr(mark_addr.base(), Rbox, Rscratch); cmp(Rbox, Rscratch); // Intentional fall through into done ... @@ -3540,7 +3198,8 @@ if (CMSIncrementalMode || !Universe::heap()->supports_inline_contig_alloc()) { // No allocation in the shared eden. - ba_short(slow_case); + ba(slow_case); + delayed()->nop(); } else { // get eden boundaries // note: we need both top & top_addr! @@ -3583,7 +3242,7 @@ // Compare obj with the value at top_addr; if still equal, swap the value of // end with the value at top_addr. If not equal, read the value at top_addr // into end. - casx_under_lock(top_addr, obj, end, (address)StubRoutines::Sparc::atomic_memory_operation_lock_addr()); + cas_ptr(top_addr, obj, end); // if someone beat us on the allocation, try again, otherwise continue cmp(obj, end); brx(Assembler::notEqual, false, Assembler::pn, retry); diff -r 16b10327b00d -r 90d6c221d4e5 src/cpu/sparc/vm/macroAssembler_sparc.hpp --- a/src/cpu/sparc/vm/macroAssembler_sparc.hpp Tue Jul 16 10:55:48 2013 -0400 +++ b/src/cpu/sparc/vm/macroAssembler_sparc.hpp Tue Jul 16 12:20:08 2013 -0400 @@ -1,5 +1,5 @@ /* - * Copyright (c) 1997, 2012, Oracle and/or its affiliates. All rights reserved. + * Copyright (c) 1997, 2013, Oracle and/or its affiliates. All rights reserved. * DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER. * * This code is free software; you can redistribute it and/or modify it @@ -963,7 +963,7 @@ inline void sub(Register s1, RegisterOrConstant s2, Register d, int offset = 0); using Assembler::swap; - inline void swap(Address& a, Register d, int offset = 0); + inline void swap(const Address& a, Register d, int offset = 0); // address pseudos: make these names unlike instruction names to avoid confusion inline intptr_t load_pc_address( Register reg, int bytes_to_skip ); @@ -1056,13 +1056,6 @@ void breakpoint_trap(); void breakpoint_trap(Condition c, CC cc); - void flush_windows_trap(); - void clean_windows_trap(); - void get_psr_trap(); - void set_psr_trap(); - - // V8/V9 flush_windows - void flush_windows(); // Support for serializing memory accesses between threads void serialize_memory(Register thread, Register tmp1, Register tmp2); @@ -1071,14 +1064,6 @@ void enter(); void leave(); - // V8/V9 integer multiply - void mult(Register s1, Register s2, Register d); - void mult(Register s1, int simm13a, Register d); - - // V8/V9 read and write of condition codes. - void read_ccr(Register d); - void write_ccr(Register s); - // Manipulation of C++ bools // These are idioms to flag the need for care with accessing bools but on // this platform we assume byte size @@ -1162,21 +1147,6 @@ // check_and_forward_exception to handle exceptions when it is safe void check_and_forward_exception(Register scratch_reg); - private: - // For V8 - void read_ccr_trap(Register ccr_save); - void write_ccr_trap(Register ccr_save1, Register scratch1, Register scratch2); - -#ifdef ASSERT - // For V8 debugging. Uses V8 instruction sequence and checks - // result with V9 insturctions rdccr and wrccr. - // Uses Gscatch and Gscatch2 - void read_ccr_v8_assert(Register ccr_save); - void write_ccr_v8_assert(Register ccr_save); -#endif // ASSERT - - public: - // Write to card table for - register is destroyed afterwards. void card_table_write(jbyte* byte_map_base, Register tmp, Register obj); @@ -1314,20 +1284,9 @@ FloatRegister Fa, FloatRegister Fb, Register Rresult); - void fneg( FloatRegisterImpl::Width w, FloatRegister s, FloatRegister d); - void fneg( FloatRegisterImpl::Width w, FloatRegister sd ) { Assembler::fneg(w, sd); } - void fmov( FloatRegisterImpl::Width w, FloatRegister s, FloatRegister d); - void fabs( FloatRegisterImpl::Width w, FloatRegister s, FloatRegister d); - void save_all_globals_into_locals(); void restore_globals_from_locals(); - void casx_under_lock(Register top_ptr_reg, Register top_reg, Register ptr_reg, - address lock_addr=0, bool use_call_vm=false); - void cas_under_lock(Register top_ptr_reg, Register top_reg, Register ptr_reg, - address lock_addr=0, bool use_call_vm=false); - void casn (Register addr_reg, Register cmp_reg, Register set_reg) ; - // These set the icc condition code to equal if the lock succeeded // and notEqual if it failed and requires a slow case void compiler_lock_object(Register Roop, Register Rmark, Register Rbox, diff -r 16b10327b00d -r 90d6c221d4e5 src/cpu/sparc/vm/macroAssembler_sparc.inline.hpp --- a/src/cpu/sparc/vm/macroAssembler_sparc.inline.hpp Tue Jul 16 10:55:48 2013 -0400 +++ b/src/cpu/sparc/vm/macroAssembler_sparc.inline.hpp Tue Jul 16 12:20:08 2013 -0400 @@ -1,5 +1,5 @@ /* - * Copyright (c) 1997, 2012, Oracle and/or its affiliates. All rights reserved. + * Copyright (c) 1997, 2013, Oracle and/or its affiliates. All rights reserved. * DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER. * * This code is free software; you can redistribute it and/or modify it @@ -229,10 +229,7 @@ // Use the right branch for the platform inline void MacroAssembler::br( Condition c, bool a, Predict p, address d, relocInfo::relocType rt ) { - if (VM_Version::v9_instructions_work()) - Assembler::bp(c, a, icc, p, d, rt); - else - Assembler::br(c, a, d, rt); + Assembler::bp(c, a, icc, p, d, rt); } inline void MacroAssembler::br( Condition c, bool a, Predict p, Label& L ) { @@ -268,10 +265,7 @@ } inline void MacroAssembler::fb( Condition c, bool a, Predict p, address d, relocInfo::relocType rt ) { - if (VM_Version::v9_instructions_work()) - fbp(c, a, fcc0, p, d, rt); - else - Assembler::fb(c, a, d, rt); + fbp(c, a, fcc0, p, d, rt); } inline void MacroAssembler::fb( Condition c, bool a, Predict p, Label& L ) { @@ -334,7 +328,7 @@ // prefetch instruction inline void MacroAssembler::iprefetch( address d, relocInfo::relocType rt ) { - if (VM_Version::v9_instructions_work()) + Assembler::bp( never, true, xcc, pt, d, rt ); Assembler::bp( never, true, xcc, pt, d, rt ); } inline void MacroAssembler::iprefetch( Label& L) { iprefetch( target(L) ); } @@ -344,15 +338,7 @@ // returns delta from gotten pc to addr after inline int MacroAssembler::get_pc( Register d ) { int x = offset(); - if (VM_Version::v9_instructions_work()) - rdpc(d); - else { - Label lbl; - Assembler::call(lbl, relocInfo::none); // No relocation as this is call to pc+0x8 - if (d == O7) delayed()->nop(); - else delayed()->mov(O7, d); - bind(lbl); - } + rdpc(d); return offset() - x; } @@ -646,41 +632,26 @@ // returns if membar generates anything, obviously this code should mirror // membar below. inline bool MacroAssembler::membar_has_effect( Membar_mask_bits const7a ) { - if( !os::is_MP() ) return false; // Not needed on single CPU - if( VM_Version::v9_instructions_work() ) { - const Membar_mask_bits effective_mask = - Membar_mask_bits(const7a & ~(LoadLoad | LoadStore | StoreStore)); - return (effective_mask != 0); - } else { - return true; - } + if (!os::is_MP()) + return false; // Not needed on single CPU + const Membar_mask_bits effective_mask = + Membar_mask_bits(const7a & ~(LoadLoad | LoadStore | StoreStore)); + return (effective_mask != 0); } inline void MacroAssembler::membar( Membar_mask_bits const7a ) { // Uniprocessors do not need memory barriers - if (!os::is_MP()) return; + if (!os::is_MP()) + return; // Weakened for current Sparcs and TSO. See the v9 manual, sections 8.4.3, // 8.4.4.3, a.31 and a.50. - if( VM_Version::v9_instructions_work() ) { - // Under TSO, setting bit 3, 2, or 0 is redundant, so the only value - // of the mmask subfield of const7a that does anything that isn't done - // implicitly is StoreLoad. - const Membar_mask_bits effective_mask = - Membar_mask_bits(const7a & ~(LoadLoad | LoadStore | StoreStore)); - if ( effective_mask != 0 ) { - Assembler::membar( effective_mask ); - } - } else { - // stbar is the closest there is on v8. Equivalent to membar(StoreStore). We - // do not issue the stbar because to my knowledge all v8 machines implement TSO, - // which guarantees that all stores behave as if an stbar were issued just after - // each one of them. On these machines, stbar ought to be a nop. There doesn't - // appear to be an equivalent of membar(StoreLoad) on v8: TSO doesn't require it, - // it can't be specified by stbar, nor have I come up with a way to simulate it. - // - // Addendum. Dave says that ldstub guarantees a write buffer flush to coherent - // space. Put one here to be on the safe side. - Assembler::ldstub(SP, 0, G0); + // Under TSO, setting bit 3, 2, or 0 is redundant, so the only value + // of the mmask subfield of const7a that does anything that isn't done + // implicitly is StoreLoad. + const Membar_mask_bits effective_mask = + Membar_mask_bits(const7a & ~(LoadLoad | LoadStore | StoreStore)); + if (effective_mask != 0) { + Assembler::membar(effective_mask); } } @@ -748,7 +719,7 @@ if (offset != 0) sub(d, offset, d); } -inline void MacroAssembler::swap(Address& a, Register d, int offset) { +inline void MacroAssembler::swap(const Address& a, Register d, int offset) { relocate(a.rspec(offset)); if (a.has_index()) { assert(offset == 0, ""); swap(a.base(), a.index(), d ); } else { swap(a.base(), a.disp() + offset, d); } diff -r 16b10327b00d -r 90d6c221d4e5 src/cpu/sparc/vm/nativeInst_sparc.cpp --- a/src/cpu/sparc/vm/nativeInst_sparc.cpp Tue Jul 16 10:55:48 2013 -0400 +++ b/src/cpu/sparc/vm/nativeInst_sparc.cpp Tue Jul 16 12:20:08 2013 -0400 @@ -162,7 +162,7 @@ int i1 = ((int*)code_buffer)[1]; int* contention_addr = (int*) n_call->addr_at(1*BytesPerInstWord); assert(inv_op(*contention_addr) == Assembler::arith_op || - *contention_addr == nop_instruction() || !VM_Version::v9_instructions_work(), + *contention_addr == nop_instruction(), "must not interfere with original call"); // The set_long_at calls do the ICacheInvalidate so we just need to do them in reverse order n_call->set_long_at(1*BytesPerInstWord, i1); @@ -181,7 +181,7 @@ // Make sure the first-patched instruction, which may co-exist // briefly with the call, will do something harmless. assert(inv_op(*contention_addr) == Assembler::arith_op || - *contention_addr == nop_instruction() || !VM_Version::v9_instructions_work(), + *contention_addr == nop_instruction(), "must not interfere with original call"); } @@ -933,11 +933,7 @@ int code_size = 1 * BytesPerInstWord; CodeBuffer cb(verified_entry, code_size + 1); MacroAssembler* a = new MacroAssembler(&cb); - if (VM_Version::v9_instructions_work()) { - a->ldsw(G0, 0, O7); // "ld" must agree with code in the signal handler - } else { - a->lduw(G0, 0, O7); // "ld" must agree with code in the signal handler - } + a->ldsw(G0, 0, O7); // "ld" must agree with code in the signal handler ICache::invalidate_range(verified_entry, code_size); } @@ -1024,7 +1020,7 @@ int i1 = ((int*)code_buffer)[1]; int* contention_addr = (int*) h_jump->addr_at(1*BytesPerInstWord); assert(inv_op(*contention_addr) == Assembler::arith_op || - *contention_addr == nop_instruction() || !VM_Version::v9_instructions_work(), + *contention_addr == nop_instruction(), "must not interfere with original call"); // The set_long_at calls do the ICacheInvalidate so we just need to do them in reverse order h_jump->set_long_at(1*BytesPerInstWord, i1); @@ -1043,6 +1039,6 @@ // Make sure the first-patched instruction, which may co-exist // briefly with the call, will do something harmless. assert(inv_op(*contention_addr) == Assembler::arith_op || - *contention_addr == nop_instruction() || !VM_Version::v9_instructions_work(), + *contention_addr == nop_instruction(), "must not interfere with original call"); } diff -r 16b10327b00d -r 90d6c221d4e5 src/cpu/sparc/vm/nativeInst_sparc.hpp --- a/src/cpu/sparc/vm/nativeInst_sparc.hpp Tue Jul 16 10:55:48 2013 -0400 +++ b/src/cpu/sparc/vm/nativeInst_sparc.hpp Tue Jul 16 12:20:08 2013 -0400 @@ -70,8 +70,7 @@ bool is_zombie() { int x = long_at(0); return is_op3(x, - VM_Version::v9_instructions_work() ? - Assembler::ldsw_op3 : Assembler::lduw_op3, + Assembler::ldsw_op3, Assembler::ldst_op) && Assembler::inv_rs1(x) == G0 && Assembler::inv_rd(x) == O7; diff -r 16b10327b00d -r 90d6c221d4e5 src/cpu/sparc/vm/register_sparc.hpp --- a/src/cpu/sparc/vm/register_sparc.hpp Tue Jul 16 10:55:48 2013 -0400 +++ b/src/cpu/sparc/vm/register_sparc.hpp Tue Jul 16 12:20:08 2013 -0400 @@ -249,12 +249,10 @@ case D: assert(c < 64 && (c & 1) == 0, "bad double float register"); - assert(c < 32 || VM_Version::v9_instructions_work(), "V9 float work only on V9 platform"); return (c & 0x1e) | ((c & 0x20) >> 5); case Q: assert(c < 64 && (c & 3) == 0, "bad quad float register"); - assert(c < 32 || VM_Version::v9_instructions_work(), "V9 float work only on V9 platform"); return (c & 0x1c) | ((c & 0x20) >> 5); } ShouldNotReachHere(); diff -r 16b10327b00d -r 90d6c221d4e5 src/cpu/sparc/vm/relocInfo_sparc.cpp --- a/src/cpu/sparc/vm/relocInfo_sparc.cpp Tue Jul 16 10:55:48 2013 -0400 +++ b/src/cpu/sparc/vm/relocInfo_sparc.cpp Tue Jul 16 12:20:08 2013 -0400 @@ -193,36 +193,6 @@ return *(address*)addr(); } - -int Relocation::pd_breakpoint_size() { - // minimum breakpoint size, in short words - return NativeIllegalInstruction::instruction_size / sizeof(short); -} - -void Relocation::pd_swap_in_breakpoint(address x, short* instrs, int instrlen) { - Untested("pd_swap_in_breakpoint"); - // %%% probably do not need a general instrlen; just use the trap size - if (instrs != NULL) { - assert(instrlen * sizeof(short) == NativeIllegalInstruction::instruction_size, "enough instrlen in reloc. data"); - for (int i = 0; i < instrlen; i++) { - instrs[i] = ((short*)x)[i]; - } - } - NativeIllegalInstruction::insert(x); -} - - -void Relocation::pd_swap_out_breakpoint(address x, short* instrs, int instrlen) { - Untested("pd_swap_out_breakpoint"); - assert(instrlen * sizeof(short) == sizeof(int), "enough buf"); - union { int l; short s[1]; } u; - for (int i = 0; i < instrlen; i++) { - u.s[i] = instrs[i]; - } - NativeInstruction* ni = nativeInstruction_at(x); - ni->set_long_at(0, u.l); -} - void poll_Relocation::fix_relocation_after_move(const CodeBuffer* src, CodeBuffer* dest) { } diff -r 16b10327b00d -r 90d6c221d4e5 src/cpu/sparc/vm/sharedRuntime_sparc.cpp --- a/src/cpu/sparc/vm/sharedRuntime_sparc.cpp Tue Jul 16 10:55:48 2013 -0400 +++ b/src/cpu/sparc/vm/sharedRuntime_sparc.cpp Tue Jul 16 12:20:08 2013 -0400 @@ -2459,7 +2459,7 @@ // Finally just about ready to make the JNI call - __ flush_windows(); + __ flushw(); if (inner_frame_created) { __ restore(); } else { diff -r 16b10327b00d -r 90d6c221d4e5 src/cpu/sparc/vm/sparc.ad --- a/src/cpu/sparc/vm/sparc.ad Tue Jul 16 10:55:48 2013 -0400 +++ b/src/cpu/sparc/vm/sparc.ad Tue Jul 16 12:20:08 2013 -0400 @@ -2778,10 +2778,7 @@ Register Rold = reg_to_register_object($old$$reg); Register Rnew = reg_to_register_object($new$$reg); - // casx_under_lock picks 1 of 3 encodings: - // For 32-bit pointers you get a 32-bit CAS - // For 64-bit pointers you get a 64-bit CASX - __ casn(Rmem, Rold, Rnew); // Swap(*Rmem,Rnew) if *Rmem == Rold + __ cas_ptr(Rmem, Rold, Rnew); // Swap(*Rmem,Rnew) if *Rmem == Rold __ cmp( Rold, Rnew ); %} @@ -3067,7 +3064,7 @@ AddressLiteral last_rethrow_addrlit(&last_rethrow); __ sethi(last_rethrow_addrlit, L1); Address addr(L1, last_rethrow_addrlit.low10()); - __ get_pc(L2); + __ rdpc(L2); __ inc(L2, 3 * BytesPerInstWord); // skip this & 2 more insns to point at jump_to __ st_ptr(L2, addr); __ restore(); diff -r 16b10327b00d -r 90d6c221d4e5 src/cpu/sparc/vm/stubGenerator_sparc.cpp --- a/src/cpu/sparc/vm/stubGenerator_sparc.cpp Tue Jul 16 10:55:48 2013 -0400 +++ b/src/cpu/sparc/vm/stubGenerator_sparc.cpp Tue Jul 16 12:20:08 2013 -0400 @@ -410,6 +410,51 @@ return start; } + // Safefetch stubs. + void generate_safefetch(const char* name, int size, address* entry, + address* fault_pc, address* continuation_pc) { + // safefetch signatures: + // int SafeFetch32(int* adr, int errValue); + // intptr_t SafeFetchN (intptr_t* adr, intptr_t errValue); + // + // arguments: + // o0 = adr + // o1 = errValue + // + // result: + // o0 = *adr or errValue + + StubCodeMark mark(this, "StubRoutines", name); + + // Entry point, pc or function descriptor. + __ align(CodeEntryAlignment); + *entry = __ pc(); + + __ mov(O0, G1); // g1 = o0 + __ mov(O1, O0); // o0 = o1 + // Load *adr into c_rarg1, may fault. + *fault_pc = __ pc(); + switch (size) { + case 4: + // int32_t + __ ldsw(G1, 0, O0); // o0 = [g1] + break; + case 8: + // int64_t + __ ldx(G1, 0, O0); // o0 = [g1] + break; + default: + ShouldNotReachHere(); + } + + // return errValue or *adr + *continuation_pc = __ pc(); + // By convention with the trap handler we ensure there is a non-CTI + // instruction in the trap shadow. + __ nop(); + __ retl(); + __ delayed()->nop(); + } //------------------------------------------------------------------------------------------------------------------------ // Continuation point for throwing of implicit exceptions that are not handled in @@ -566,7 +611,7 @@ StubCodeMark mark(this, "StubRoutines", "flush_callers_register_windows"); address start = __ pc(); - __ flush_windows(); + __ flushw(); __ retl(false); __ delayed()->add( FP, STACK_BIAS, O0 ); // The returned value must be a stack pointer whose register save area @@ -575,67 +620,9 @@ return start; } - // Helper functions for v8 atomic operations. - // - void get_v8_oop_lock_ptr(Register lock_ptr_reg, Register mark_oop_reg, Register scratch_reg) { - if (mark_oop_reg == noreg) { - address lock_ptr = (address)StubRoutines::Sparc::atomic_memory_operation_lock_addr(); - __ set((intptr_t)lock_ptr, lock_ptr_reg); - } else { - assert(scratch_reg != noreg, "just checking"); - address lock_ptr = (address)StubRoutines::Sparc::_v8_oop_lock_cache; - __ set((intptr_t)lock_ptr, lock_ptr_reg); - __ and3(mark_oop_reg, StubRoutines::Sparc::v8_oop_lock_mask_in_place, scratch_reg); - __ add(lock_ptr_reg, scratch_reg, lock_ptr_reg); - } - } - - void generate_v8_lock_prologue(Register lock_reg, Register lock_ptr_reg, Register yield_reg, Label& retry, Label& dontyield, Register mark_oop_reg = noreg, Register scratch_reg = noreg) { - - get_v8_oop_lock_ptr(lock_ptr_reg, mark_oop_reg, scratch_reg); - __ set(StubRoutines::Sparc::locked, lock_reg); - // Initialize yield counter - __ mov(G0,yield_reg); - - __ BIND(retry); - __ cmp_and_br_short(yield_reg, V8AtomicOperationUnderLockSpinCount, Assembler::less, Assembler::pt, dontyield); - - // This code can only be called from inside the VM, this - // stub is only invoked from Atomic::add(). We do not - // want to use call_VM, because _last_java_sp and such - // must already be set. - // - // Save the regs and make space for a C call - __ save(SP, -96, SP); - __ save_all_globals_into_locals(); - BLOCK_COMMENT("call os::naked_sleep"); - __ call(CAST_FROM_FN_PTR(address, os::naked_sleep)); - __ delayed()->nop(); - __ restore_globals_from_locals(); - __ restore(); - // reset the counter - __ mov(G0,yield_reg); - - __ BIND(dontyield); - - // try to get lock - __ swap(lock_ptr_reg, 0, lock_reg); - - // did we get the lock? - __ cmp(lock_reg, StubRoutines::Sparc::unlocked); - __ br(Assembler::notEqual, true, Assembler::pn, retry); - __ delayed()->add(yield_reg,1,yield_reg); - - // yes, got lock. do the operation here. - } - - void generate_v8_lock_epilogue(Register lock_reg, Register lock_ptr_reg, Register yield_reg, Label& retry, Label& dontyield, Register mark_oop_reg = noreg, Register scratch_reg = noreg) { - __ st(lock_reg, lock_ptr_reg, 0); // unlock - } - // Support for jint Atomic::xchg(jint exchange_value, volatile jint* dest). // - // Arguments : + // Arguments: // // exchange_value: O0 // dest: O1 @@ -656,33 +643,14 @@ __ mov(O0, O3); // scratch copy of exchange value __ ld(O1, 0, O2); // observe the previous value // try to replace O2 with O3 - __ cas_under_lock(O1, O2, O3, - (address)StubRoutines::Sparc::atomic_memory_operation_lock_addr(),false); + __ cas(O1, O2, O3); __ cmp_and_br_short(O2, O3, Assembler::notEqual, Assembler::pn, retry); __ retl(false); __ delayed()->mov(O2, O0); // report previous value to caller - } else { - if (VM_Version::v9_instructions_work()) { - __ retl(false); - __ delayed()->swap(O1, 0, O0); - } else { - const Register& lock_reg = O2; - const Register& lock_ptr_reg = O3; - const Register& yield_reg = O4; - - Label retry; - Label dontyield; - - generate_v8_lock_prologue(lock_reg, lock_ptr_reg, yield_reg, retry, dontyield); - // got the lock, do the swap - __ swap(O1, 0, O0); - - generate_v8_lock_epilogue(lock_reg, lock_ptr_reg, yield_reg, retry, dontyield); - __ retl(false); - __ delayed()->nop(); - } + __ retl(false); + __ delayed()->swap(O1, 0, O0); } return start; @@ -691,7 +659,7 @@ // Support for jint Atomic::cmpxchg(jint exchange_value, volatile jint* dest, jint compare_value) // - // Arguments : + // Arguments: // // exchange_value: O0 // dest: O1 @@ -701,15 +669,12 @@ // // O0: the value previously stored in dest // - // Overwrites (v8): O3,O4,O5 - // address generate_atomic_cmpxchg() { StubCodeMark mark(this, "StubRoutines", "atomic_cmpxchg"); address start = __ pc(); // cmpxchg(dest, compare_value, exchange_value) - __ cas_under_lock(O1, O2, O0, - (address)StubRoutines::Sparc::atomic_memory_operation_lock_addr(),false); + __ cas(O1, O2, O0); __ retl(false); __ delayed()->nop(); @@ -718,7 +683,7 @@ // Support for jlong Atomic::cmpxchg(jlong exchange_value, volatile jlong *dest, jlong compare_value) // - // Arguments : + // Arguments: // // exchange_value: O1:O0 // dest: O2 @@ -728,17 +693,12 @@ // // O1:O0: the value previously stored in dest // - // This only works on V9, on V8 we don't generate any - // code and just return NULL. - // // Overwrites: G1,G2,G3 // address generate_atomic_cmpxchg_long() { StubCodeMark mark(this, "StubRoutines", "atomic_cmpxchg_long"); address start = __ pc(); - if (!VM_Version::supports_cx8()) - return NULL;; __ sllx(O0, 32, O0); __ srl(O1, 0, O1); __ or3(O0,O1,O0); // O0 holds 64-bit value from compare_value @@ -756,7 +716,7 @@ // Support for jint Atomic::add(jint add_value, volatile jint* dest). // - // Arguments : + // Arguments: // // add_value: O0 (e.g., +1 or -1) // dest: O1 @@ -765,47 +725,22 @@ // // O0: the new value stored in dest // - // Overwrites (v9): O3 - // Overwrites (v8): O3,O4,O5 + // Overwrites: O3 // address generate_atomic_add() { StubCodeMark mark(this, "StubRoutines", "atomic_add"); address start = __ pc(); __ BIND(_atomic_add_stub); - if (VM_Version::v9_instructions_work()) { - Label(retry); - __ BIND(retry); - - __ lduw(O1, 0, O2); - __ add(O0, O2, O3); - __ cas(O1, O2, O3); - __ cmp_and_br_short(O2, O3, Assembler::notEqual, Assembler::pn, retry); - __ retl(false); - __ delayed()->add(O0, O2, O0); // note that cas made O2==O3 - } else { - const Register& lock_reg = O2; - const Register& lock_ptr_reg = O3; - const Register& value_reg = O4; - const Register& yield_reg = O5; - - Label(retry); - Label(dontyield); - - generate_v8_lock_prologue(lock_reg, lock_ptr_reg, yield_reg, retry, dontyield); - // got lock, do the increment - __ ld(O1, 0, value_reg); - __ add(O0, value_reg, value_reg); - __ st(value_reg, O1, 0); - - // %%% only for RMO and PSO - __ membar(Assembler::StoreStore); - - generate_v8_lock_epilogue(lock_reg, lock_ptr_reg, yield_reg, retry, dontyield); - - __ retl(false); - __ delayed()->mov(value_reg, O0); - } + Label(retry); + __ BIND(retry); + + __ lduw(O1, 0, O2); + __ add(O0, O2, O3); + __ cas(O1, O2, O3); + __ cmp_and_br_short(O2, O3, Assembler::notEqual, Assembler::pn, retry); + __ retl(false); + __ delayed()->add(O0, O2, O0); // note that cas made O2==O3 return start; } @@ -841,7 +776,7 @@ __ mov(G3, L3); __ mov(G4, L4); __ mov(G5, L5); - for (i = 0; i < (VM_Version::v9_instructions_work() ? 64 : 32); i += 2) { + for (i = 0; i < 64; i += 2) { __ stf(FloatRegisterImpl::D, as_FloatRegister(i), preserve_addr, i * wordSize); } @@ -855,7 +790,7 @@ __ mov(L3, G3); __ mov(L4, G4); __ mov(L5, G5); - for (i = 0; i < (VM_Version::v9_instructions_work() ? 64 : 32); i += 2) { + for (i = 0; i < 64; i += 2) { __ ldf(FloatRegisterImpl::D, preserve_addr, as_FloatRegister(i), i * wordSize); } @@ -3425,6 +3360,14 @@ // Don't initialize the platform math functions since sparc // doesn't have intrinsics for these operations. + + // Safefetch stubs. + generate_safefetch("SafeFetch32", sizeof(int), &StubRoutines::_safefetch32_entry, + &StubRoutines::_safefetch32_fault_pc, + &StubRoutines::_safefetch32_continuation_pc); + generate_safefetch("SafeFetchN", sizeof(intptr_t), &StubRoutines::_safefetchN_entry, + &StubRoutines::_safefetchN_fault_pc, + &StubRoutines::_safefetchN_continuation_pc); } diff -r 16b10327b00d -r 90d6c221d4e5 src/cpu/sparc/vm/stubRoutines_sparc.cpp --- a/src/cpu/sparc/vm/stubRoutines_sparc.cpp Tue Jul 16 10:55:48 2013 -0400 +++ b/src/cpu/sparc/vm/stubRoutines_sparc.cpp Tue Jul 16 12:20:08 2013 -0400 @@ -52,7 +52,3 @@ address StubRoutines::Sparc::_flush_callers_register_windows_entry = CAST_FROM_FN_PTR(address, bootstrap_flush_windows); address StubRoutines::Sparc::_partial_subtype_check = NULL; - -int StubRoutines::Sparc::_atomic_memory_operation_lock = StubRoutines::Sparc::unlocked; - -int StubRoutines::Sparc::_v8_oop_lock_cache[StubRoutines::Sparc::nof_v8_oop_lock_cache_entries]; diff -r 16b10327b00d -r 90d6c221d4e5 src/cpu/sparc/vm/stubRoutines_sparc.hpp --- a/src/cpu/sparc/vm/stubRoutines_sparc.hpp Tue Jul 16 10:55:48 2013 -0400 +++ b/src/cpu/sparc/vm/stubRoutines_sparc.hpp Tue Jul 16 12:20:08 2013 -0400 @@ -47,46 +47,14 @@ class Sparc { friend class StubGenerator; - public: - enum { nof_instance_allocators = 10 }; - - // allocator lock values - enum { - unlocked = 0, - locked = 1 - }; - - enum { - v8_oop_lock_ignore_bits = 2, - v8_oop_lock_bits = 4, - nof_v8_oop_lock_cache_entries = 1 << (v8_oop_lock_bits+v8_oop_lock_ignore_bits), - v8_oop_lock_mask = right_n_bits(v8_oop_lock_bits), - v8_oop_lock_mask_in_place = v8_oop_lock_mask << v8_oop_lock_ignore_bits - }; - - static int _v8_oop_lock_cache[nof_v8_oop_lock_cache_entries]; - private: static address _test_stop_entry; static address _stop_subroutine_entry; static address _flush_callers_register_windows_entry; - static int _atomic_memory_operation_lock; - static address _partial_subtype_check; public: - // %%% global lock for everyone who needs to use atomic_compare_and_exchange - // %%% or atomic_increment -- should probably use more locks for more - // %%% scalability-- for instance one for each eden space or group of - - // address of the lock for atomic_compare_and_exchange - static int* atomic_memory_operation_lock_addr() { return &_atomic_memory_operation_lock; } - - // accessor and mutator for _atomic_memory_operation_lock - static int atomic_memory_operation_lock() { return _atomic_memory_operation_lock; } - static void set_atomic_memory_operation_lock(int value) { _atomic_memory_operation_lock = value; } - // test assembler stop routine by setting registers static void (*test_stop_entry()) () { return CAST_TO_FN_PTR(void (*)(void), _test_stop_entry); } diff -r 16b10327b00d -r 90d6c221d4e5 src/cpu/sparc/vm/templateInterpreter_sparc.cpp --- a/src/cpu/sparc/vm/templateInterpreter_sparc.cpp Tue Jul 16 10:55:48 2013 -0400 +++ b/src/cpu/sparc/vm/templateInterpreter_sparc.cpp Tue Jul 16 12:20:08 2013 -0400 @@ -1054,7 +1054,7 @@ // flush the windows now. We don't care about the current (protection) frame // only the outer frames - __ flush_windows(); + __ flushw(); // mark windows as flushed Address flags(G2_thread, JavaThread::frame_anchor_offset() + JavaFrameAnchor::flags_offset()); diff -r 16b10327b00d -r 90d6c221d4e5 src/cpu/sparc/vm/templateTable_sparc.cpp --- a/src/cpu/sparc/vm/templateTable_sparc.cpp Tue Jul 16 10:55:48 2013 -0400 +++ b/src/cpu/sparc/vm/templateTable_sparc.cpp Tue Jul 16 12:20:08 2013 -0400 @@ -1338,14 +1338,13 @@ void TemplateTable::fneg() { transition(ftos, ftos); - __ fneg(FloatRegisterImpl::S, Ftos_f); + __ fneg(FloatRegisterImpl::S, Ftos_f, Ftos_f); } void TemplateTable::dneg() { transition(dtos, dtos); - // v8 has fnegd if source and dest are the same - __ fneg(FloatRegisterImpl::D, Ftos_f); + __ fneg(FloatRegisterImpl::D, Ftos_f, Ftos_f); } @@ -1470,19 +1469,10 @@ __ st_long(Otos_l, __ d_tmp); __ ldf(FloatRegisterImpl::D, __ d_tmp, Ftos_d); - if (VM_Version::v9_instructions_work()) { - if (bytecode() == Bytecodes::_l2f) { - __ fxtof(FloatRegisterImpl::S, Ftos_d, Ftos_f); - } else { - __ fxtof(FloatRegisterImpl::D, Ftos_d, Ftos_d); - } + if (bytecode() == Bytecodes::_l2f) { + __ fxtof(FloatRegisterImpl::S, Ftos_d, Ftos_f); } else { - __ call_VM_leaf( - Lscratch, - bytecode() == Bytecodes::_l2f - ? CAST_FROM_FN_PTR(address, SharedRuntime::l2f) - : CAST_FROM_FN_PTR(address, SharedRuntime::l2d) - ); + __ fxtof(FloatRegisterImpl::D, Ftos_d, Ftos_d); } break; @@ -1490,11 +1480,6 @@ Label isNaN; // result must be 0 if value is NaN; test by comparing value to itself __ fcmp(FloatRegisterImpl::S, Assembler::fcc0, Ftos_f, Ftos_f); - // According to the v8 manual, you have to have a non-fp instruction - // between fcmp and fb. - if (!VM_Version::v9_instructions_work()) { - __ nop(); - } __ fb(Assembler::f_unordered, true, Assembler::pn, isNaN); __ delayed()->clr(Otos_i); // NaN __ ftoi(FloatRegisterImpl::S, Ftos_f, F30); @@ -1537,16 +1522,7 @@ break; case Bytecodes::_d2f: - if (VM_Version::v9_instructions_work()) { __ ftof( FloatRegisterImpl::D, FloatRegisterImpl::S, Ftos_d, Ftos_f); - } - else { - // must uncache tos - __ push_d(); - __ pop_i(O0); - __ pop_i(O1); - __ call_VM_leaf(Lscratch, CAST_FROM_FN_PTR(address, SharedRuntime::d2f)); - } break; default: ShouldNotReachHere(); @@ -1956,17 +1932,8 @@ __ ld( Rarray, Rscratch, Rscratch ); // (Rscratch is already in the native byte-ordering.) __ cmp( Rkey, Rscratch ); - if ( VM_Version::v9_instructions_work() ) { - __ movcc( Assembler::less, false, Assembler::icc, Rh, Rj ); // j = h if (key < array[h].fast_match()) - __ movcc( Assembler::greaterEqual, false, Assembler::icc, Rh, Ri ); // i = h if (key >= array[h].fast_match()) - } - else { - Label end_of_if; - __ br( Assembler::less, true, Assembler::pt, end_of_if ); - __ delayed()->mov( Rh, Rj ); // if (<) Rj = Rh - __ mov( Rh, Ri ); // else i = h - __ bind(end_of_if); // } - } + __ movcc( Assembler::less, false, Assembler::icc, Rh, Rj ); // j = h if (key < array[h].fast_match()) + __ movcc( Assembler::greaterEqual, false, Assembler::icc, Rh, Ri ); // i = h if (key >= array[h].fast_match()) // while (i+1 < j) __ bind( entry ); @@ -3418,9 +3385,7 @@ // has been allocated. __ cmp_and_brx_short(RnewTopValue, RendValue, Assembler::greaterUnsigned, Assembler::pn, slow_case); - __ casx_under_lock(RtopAddr, RoldTopValue, RnewTopValue, - VM_Version::v9_instructions_work() ? NULL : - (address)StubRoutines::Sparc::atomic_memory_operation_lock_addr()); + __ cas_ptr(RtopAddr, RoldTopValue, RnewTopValue); // if someone beat us on the allocation, try again, otherwise continue __ cmp_and_brx_short(RoldTopValue, RnewTopValue, Assembler::notEqual, Assembler::pn, retry); @@ -3701,14 +3666,7 @@ __ verify_oop(O4); // verify each monitor's oop __ tst(O4); // is this entry unused? - if (VM_Version::v9_instructions_work()) - __ movcc( Assembler::zero, false, Assembler::ptr_cc, O3, O1); - else { - Label L; - __ br( Assembler::zero, true, Assembler::pn, L ); - __ delayed()->mov(O3, O1); // rememeber this one if match - __ bind(L); - } + __ movcc( Assembler::zero, false, Assembler::ptr_cc, O3, O1); __ cmp(O4, O0); // check if current entry is for same object __ brx( Assembler::equal, false, Assembler::pn, exit ); diff -r 16b10327b00d -r 90d6c221d4e5 src/cpu/sparc/vm/vm_version_sparc.cpp --- a/src/cpu/sparc/vm/vm_version_sparc.cpp Tue Jul 16 10:55:48 2013 -0400 +++ b/src/cpu/sparc/vm/vm_version_sparc.cpp Tue Jul 16 12:20:08 2013 -0400 @@ -75,23 +75,14 @@ FLAG_SET_DEFAULT(AllocatePrefetchStyle, 1); } - if (has_v9()) { - assert(ArraycopySrcPrefetchDistance < 4096, "invalid value"); - if (ArraycopySrcPrefetchDistance >= 4096) - ArraycopySrcPrefetchDistance = 4064; - assert(ArraycopyDstPrefetchDistance < 4096, "invalid value"); - if (ArraycopyDstPrefetchDistance >= 4096) - ArraycopyDstPrefetchDistance = 4064; - } else { - if (ArraycopySrcPrefetchDistance > 0) { - warning("prefetch instructions are not available on this CPU"); - FLAG_SET_DEFAULT(ArraycopySrcPrefetchDistance, 0); - } - if (ArraycopyDstPrefetchDistance > 0) { - warning("prefetch instructions are not available on this CPU"); - FLAG_SET_DEFAULT(ArraycopyDstPrefetchDistance, 0); - } - } + guarantee(VM_Version::has_v9(), "only SPARC v9 is supported"); + + assert(ArraycopySrcPrefetchDistance < 4096, "invalid value"); + if (ArraycopySrcPrefetchDistance >= 4096) + ArraycopySrcPrefetchDistance = 4064; + assert(ArraycopyDstPrefetchDistance < 4096, "invalid value"); + if (ArraycopyDstPrefetchDistance >= 4096) + ArraycopyDstPrefetchDistance = 4064; UseSSE = 0; // Only on x86 and x64 diff -r 16b10327b00d -r 90d6c221d4e5 src/cpu/sparc/vm/vm_version_sparc.hpp --- a/src/cpu/sparc/vm/vm_version_sparc.hpp Tue Jul 16 10:55:48 2013 -0400 +++ b/src/cpu/sparc/vm/vm_version_sparc.hpp Tue Jul 16 12:20:08 2013 -0400 @@ -177,10 +177,6 @@ return AllocatePrefetchDistance > 0 ? AllocatePrefetchStyle : 0; } - // Legacy - static bool v8_instructions_work() { return has_v8() && !has_v9(); } - static bool v9_instructions_work() { return has_v9(); } - // Assembler testing static void allow_all(); static void revert(); diff -r 16b10327b00d -r 90d6c221d4e5 src/cpu/x86/vm/assembler_x86.cpp --- a/src/cpu/x86/vm/assembler_x86.cpp Tue Jul 16 10:55:48 2013 -0400 +++ b/src/cpu/x86/vm/assembler_x86.cpp Tue Jul 16 12:20:08 2013 -0400 @@ -1,5 +1,5 @@ /* - * Copyright (c) 1997, 2012, Oracle and/or its affiliates. All rights reserved. + * Copyright (c) 1997, 2013, Oracle and/or its affiliates. All rights reserved. * DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER. * * This code is free software; you can redistribute it and/or modify it @@ -1673,6 +1673,11 @@ emit_simd_arith_nonds(0x6F, dst, src, VEX_SIMD_66); } +void Assembler::movdqa(XMMRegister dst, Address src) { + NOT_LP64(assert(VM_Version::supports_sse2(), "")); + emit_simd_arith_nonds(0x6F, dst, src, VEX_SIMD_66); +} + void Assembler::movdqu(XMMRegister dst, Address src) { NOT_LP64(assert(VM_Version::supports_sse2(), "")); emit_simd_arith_nonds(0x6F, dst, src, VEX_SIMD_F3); @@ -2286,6 +2291,38 @@ emit_int8(imm8); } +void Assembler::pextrd(Register dst, XMMRegister src, int imm8) { + assert(VM_Version::supports_sse4_1(), ""); + int encode = simd_prefix_and_encode(as_XMMRegister(dst->encoding()), xnoreg, src, VEX_SIMD_66, VEX_OPCODE_0F_3A, false); + emit_int8(0x16); + emit_int8((unsigned char)(0xC0 | encode)); + emit_int8(imm8); +} + +void Assembler::pextrq(Register dst, XMMRegister src, int imm8) { + assert(VM_Version::supports_sse4_1(), ""); + int encode = simd_prefix_and_encode(as_XMMRegister(dst->encoding()), xnoreg, src, VEX_SIMD_66, VEX_OPCODE_0F_3A, true); + emit_int8(0x16); + emit_int8((unsigned char)(0xC0 | encode)); + emit_int8(imm8); +} + +void Assembler::pinsrd(XMMRegister dst, Register src, int imm8) { + assert(VM_Version::supports_sse4_1(), ""); + int encode = simd_prefix_and_encode(dst, dst, as_XMMRegister(src->encoding()), VEX_SIMD_66, VEX_OPCODE_0F_3A, false); + emit_int8(0x22); + emit_int8((unsigned char)(0xC0 | encode)); + emit_int8(imm8); +} + +void Assembler::pinsrq(XMMRegister dst, Register src, int imm8) { + assert(VM_Version::supports_sse4_1(), ""); + int encode = simd_prefix_and_encode(dst, dst, as_XMMRegister(src->encoding()), VEX_SIMD_66, VEX_OPCODE_0F_3A, true); + emit_int8(0x22); + emit_int8((unsigned char)(0xC0 | encode)); + emit_int8(imm8); +} + void Assembler::pmovzxbw(XMMRegister dst, Address src) { assert(VM_Version::supports_sse4_1(), ""); InstructionMark im(this); @@ -3691,6 +3728,16 @@ emit_int8((unsigned char)(0xC0 | encode)); } +// Carry-Less Multiplication Quadword +void Assembler::vpclmulqdq(XMMRegister dst, XMMRegister nds, XMMRegister src, int mask) { + assert(VM_Version::supports_avx() && VM_Version::supports_clmul(), ""); + bool vector256 = false; + int encode = vex_prefix_and_encode(dst, nds, src, VEX_SIMD_66, vector256, VEX_OPCODE_0F_3A); + emit_int8(0x44); + emit_int8((unsigned char)(0xC0 | encode)); + emit_int8((unsigned char)mask); +} + void Assembler::vzeroupper() { assert(VM_Version::supports_avx(), ""); (void)vex_prefix_and_encode(xmm0, xmm0, xmm0, VEX_SIMD_NONE); diff -r 16b10327b00d -r 90d6c221d4e5 src/cpu/x86/vm/assembler_x86.hpp --- a/src/cpu/x86/vm/assembler_x86.hpp Tue Jul 16 10:55:48 2013 -0400 +++ b/src/cpu/x86/vm/assembler_x86.hpp Tue Jul 16 12:20:08 2013 -0400 @@ -1266,6 +1266,7 @@ // Move Aligned Double Quadword void movdqa(XMMRegister dst, XMMRegister src); + void movdqa(XMMRegister dst, Address src); // Move Unaligned Double Quadword void movdqu(Address dst, XMMRegister src); @@ -1404,6 +1405,14 @@ void pcmpestri(XMMRegister xmm1, XMMRegister xmm2, int imm8); void pcmpestri(XMMRegister xmm1, Address src, int imm8); + // SSE 4.1 extract + void pextrd(Register dst, XMMRegister src, int imm8); + void pextrq(Register dst, XMMRegister src, int imm8); + + // SSE 4.1 insert + void pinsrd(XMMRegister dst, Register src, int imm8); + void pinsrq(XMMRegister dst, Register src, int imm8); + // SSE4.1 packed move void pmovzxbw(XMMRegister dst, XMMRegister src); void pmovzxbw(XMMRegister dst, Address src); @@ -1764,6 +1773,9 @@ // duplicate 4-bytes integer data from src into 8 locations in dest void vpbroadcastd(XMMRegister dst, XMMRegister src); + // Carry-Less Multiplication Quadword + void vpclmulqdq(XMMRegister dst, XMMRegister nds, XMMRegister src, int mask); + // AVX instruction which is used to clear upper 128 bits of YMM registers and // to avoid transaction penalty between AVX and SSE states. There is no // penalty if legacy SSE instructions are encoded using VEX prefix because diff -r 16b10327b00d -r 90d6c221d4e5 src/cpu/x86/vm/c1_LIRAssembler_x86.cpp --- a/src/cpu/x86/vm/c1_LIRAssembler_x86.cpp Tue Jul 16 10:55:48 2013 -0400 +++ b/src/cpu/x86/vm/c1_LIRAssembler_x86.cpp Tue Jul 16 12:20:08 2013 -0400 @@ -1,5 +1,5 @@ /* - * Copyright (c) 2000, 2012, Oracle and/or its affiliates. All rights reserved. + * Copyright (c) 2000, 2013, Oracle and/or its affiliates. All rights reserved. * DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER. * * This code is free software; you can redistribute it and/or modify it @@ -3512,6 +3512,22 @@ __ bind(*stub->continuation()); } +void LIR_Assembler::emit_updatecrc32(LIR_OpUpdateCRC32* op) { + assert(op->crc()->is_single_cpu(), "crc must be register"); + assert(op->val()->is_single_cpu(), "byte value must be register"); + assert(op->result_opr()->is_single_cpu(), "result must be register"); + Register crc = op->crc()->as_register(); + Register val = op->val()->as_register(); + Register res = op->result_opr()->as_register(); + + assert_different_registers(val, crc, res); + + __ lea(res, ExternalAddress(StubRoutines::crc_table_addr())); + __ notl(crc); // ~crc + __ update_byte_crc32(crc, val, res); + __ notl(crc); // ~crc + __ mov(res, crc); +} void LIR_Assembler::emit_lock(LIR_OpLock* op) { Register obj = op->obj_opr()->as_register(); // may not be an oop diff -r 16b10327b00d -r 90d6c221d4e5 src/cpu/x86/vm/c1_LIRGenerator_x86.cpp --- a/src/cpu/x86/vm/c1_LIRGenerator_x86.cpp Tue Jul 16 10:55:48 2013 -0400 +++ b/src/cpu/x86/vm/c1_LIRGenerator_x86.cpp Tue Jul 16 12:20:08 2013 -0400 @@ -1,5 +1,5 @@ /* - * Copyright (c) 2005, 2012, Oracle and/or its affiliates. All rights reserved. + * Copyright (c) 2005, 2013, Oracle and/or its affiliates. All rights reserved. * DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER. * * This code is free software; you can redistribute it and/or modify it @@ -932,6 +932,81 @@ __ arraycopy(src.result(), src_pos.result(), dst.result(), dst_pos.result(), length.result(), tmp, expected_type, flags, info); // does add_safepoint } +void LIRGenerator::do_update_CRC32(Intrinsic* x) { + assert(UseCRC32Intrinsics, "need AVX and LCMUL instructions support"); + // Make all state_for calls early since they can emit code + LIR_Opr result = rlock_result(x); + int flags = 0; + switch (x->id()) { + case vmIntrinsics::_updateCRC32: { + LIRItem crc(x->argument_at(0), this); + LIRItem val(x->argument_at(1), this); + crc.load_item(); + val.load_item(); + __ update_crc32(crc.result(), val.result(), result); + break; + } + case vmIntrinsics::_updateBytesCRC32: + case vmIntrinsics::_updateByteBufferCRC32: { + bool is_updateBytes = (x->id() == vmIntrinsics::_updateBytesCRC32); + + LIRItem crc(x->argument_at(0), this); + LIRItem buf(x->argument_at(1), this); + LIRItem off(x->argument_at(2), this); + LIRItem len(x->argument_at(3), this); + buf.load_item(); + off.load_nonconstant(); + + LIR_Opr index = off.result(); + int offset = is_updateBytes ? arrayOopDesc::base_offset_in_bytes(T_BYTE) : 0; + if(off.result()->is_constant()) { + index = LIR_OprFact::illegalOpr; + offset += off.result()->as_jint(); + } + LIR_Opr base_op = buf.result(); + +#ifndef _LP64 + if (!is_updateBytes) { // long b raw address + base_op = new_register(T_INT); + __ convert(Bytecodes::_l2i, buf.result(), base_op); + } +#else + if (index->is_valid()) { + LIR_Opr tmp = new_register(T_LONG); + __ convert(Bytecodes::_i2l, index, tmp); + index = tmp; + } +#endif + + LIR_Address* a = new LIR_Address(base_op, + index, + LIR_Address::times_1, + offset, + T_BYTE); + BasicTypeList signature(3); + signature.append(T_INT); + signature.append(T_ADDRESS); + signature.append(T_INT); + CallingConvention* cc = frame_map()->c_calling_convention(&signature); + const LIR_Opr result_reg = result_register_for(x->type()); + + LIR_Opr addr = new_pointer_register(); + __ leal(LIR_OprFact::address(a), addr); + + crc.load_item_force(cc->at(0)); + __ move(addr, cc->at(1)); + len.load_item_force(cc->at(2)); + + __ call_runtime_leaf(StubRoutines::updateBytesCRC32(), getThreadTemp(), result_reg, cc->args()); + __ move(result_reg, result); + + break; + } + default: { + ShouldNotReachHere(); + } + } +} // _i2l, _i2f, _i2d, _l2i, _l2f, _l2d, _f2i, _f2l, _f2d, _d2i, _d2l, _d2f // _i2b, _i2c, _i2s diff -r 16b10327b00d -r 90d6c221d4e5 src/cpu/x86/vm/c1_globals_x86.hpp --- a/src/cpu/x86/vm/c1_globals_x86.hpp Tue Jul 16 10:55:48 2013 -0400 +++ b/src/cpu/x86/vm/c1_globals_x86.hpp Tue Jul 16 12:20:08 2013 -0400 @@ -50,8 +50,9 @@ define_pd_global(intx, ReservedCodeCacheSize, 32*M ); define_pd_global(bool, ProfileInterpreter, false); define_pd_global(intx, CodeCacheExpansionSize, 32*K ); -define_pd_global(uintx,CodeCacheMinBlockLength, 1); -define_pd_global(uintx,MetaspaceSize, 12*M ); +define_pd_global(uintx, CodeCacheMinBlockLength, 1); +define_pd_global(uintx, CodeCacheMinimumUseSpace, 400*K); +define_pd_global(uintx, MetaspaceSize, 12*M ); define_pd_global(bool, NeverActAsServerClassMachine, true ); define_pd_global(uint64_t,MaxRAM, 1ULL*G); define_pd_global(bool, CICompileOSR, true ); diff -r 16b10327b00d -r 90d6c221d4e5 src/cpu/x86/vm/c2_globals_x86.hpp --- a/src/cpu/x86/vm/c2_globals_x86.hpp Tue Jul 16 10:55:48 2013 -0400 +++ b/src/cpu/x86/vm/c2_globals_x86.hpp Tue Jul 16 12:20:08 2013 -0400 @@ -85,7 +85,8 @@ define_pd_global(bool, OptoBundling, false); define_pd_global(intx, ReservedCodeCacheSize, 48*M); -define_pd_global(uintx,CodeCacheMinBlockLength, 4); +define_pd_global(uintx, CodeCacheMinBlockLength, 4); +define_pd_global(uintx, CodeCacheMinimumUseSpace, 400*K); // Heap related flags define_pd_global(uintx,MetaspaceSize, ScaleForWordSize(16*M)); diff -r 16b10327b00d -r 90d6c221d4e5 src/cpu/x86/vm/frame_x86.cpp --- a/src/cpu/x86/vm/frame_x86.cpp Tue Jul 16 10:55:48 2013 -0400 +++ b/src/cpu/x86/vm/frame_x86.cpp Tue Jul 16 12:20:08 2013 -0400 @@ -1,5 +1,5 @@ /* - * Copyright (c) 1997, 2012, Oracle and/or its affiliates. All rights reserved. + * Copyright (c) 1997, 2013, Oracle and/or its affiliates. All rights reserved. * DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER. * * This code is free software; you can redistribute it and/or modify it @@ -33,6 +33,7 @@ #include "runtime/handles.inline.hpp" #include "runtime/javaCalls.hpp" #include "runtime/monitorChunk.hpp" +#include "runtime/os.hpp" #include "runtime/signature.hpp" #include "runtime/stubCodeGenerator.hpp" #include "runtime/stubRoutines.hpp" @@ -54,16 +55,22 @@ address sp = (address)_sp; address fp = (address)_fp; address unextended_sp = (address)_unextended_sp; - // sp must be within the stack - bool sp_safe = (sp <= thread->stack_base()) && - (sp >= thread->stack_base() - thread->stack_size()); + + // consider stack guards when trying to determine "safe" stack pointers + static size_t stack_guard_size = os::uses_stack_guard_pages() ? (StackYellowPages + StackRedPages) * os::vm_page_size() : 0; + size_t usable_stack_size = thread->stack_size() - stack_guard_size; + + // sp must be within the usable part of the stack (not in guards) + bool sp_safe = (sp < thread->stack_base()) && + (sp >= thread->stack_base() - usable_stack_size); + if (!sp_safe) { return false; } // unextended sp must be within the stack and above or equal sp - bool unextended_sp_safe = (unextended_sp <= thread->stack_base()) && + bool unextended_sp_safe = (unextended_sp < thread->stack_base()) && (unextended_sp >= sp); if (!unextended_sp_safe) { @@ -71,7 +78,8 @@ } // an fp must be within the stack and above (but not equal) sp - bool fp_safe = (fp <= thread->stack_base()) && (fp > sp); + // second evaluation on fp+ is added to handle situation where fp is -1 + bool fp_safe = (fp < thread->stack_base() && (fp > sp) && (((fp + (return_addr_offset * sizeof(void*))) < thread->stack_base()))); // We know sp/unextended_sp are safe only fp is questionable here @@ -86,6 +94,13 @@ // other generic buffer blobs are more problematic so we just assume they are // ok. adapter blobs never have a frame complete and are never ok. + // check for a valid frame_size, otherwise we are unlikely to get a valid sender_pc + + if (!Interpreter::contains(_pc) && _cb->frame_size() <= 0) { + //assert(0, "Invalid frame_size"); + return false; + } + if (!_cb->is_frame_complete_at(_pc)) { if (_cb->is_nmethod() || _cb->is_adapter_blob() || _cb->is_runtime_stub()) { return false; @@ -107,7 +122,7 @@ address jcw = (address)entry_frame_call_wrapper(); - bool jcw_safe = (jcw <= thread->stack_base()) && ( jcw > fp); + bool jcw_safe = (jcw < thread->stack_base()) && ( jcw > fp); return jcw_safe; @@ -134,12 +149,6 @@ sender_pc = (address) *(sender_sp-1); } - // We must always be able to find a recognizable pc - CodeBlob* sender_blob = CodeCache::find_blob_unsafe(sender_pc); - if (sender_pc == NULL || sender_blob == NULL) { - return false; - } - // If the potential sender is the interpreter then we can do some more checking if (Interpreter::contains(sender_pc)) { @@ -149,7 +158,7 @@ // is really a frame pointer. intptr_t *saved_fp = (intptr_t*)*(sender_sp - frame::sender_sp_offset); - bool saved_fp_safe = ((address)saved_fp <= thread->stack_base()) && (saved_fp > sender_sp); + bool saved_fp_safe = ((address)saved_fp < thread->stack_base()) && (saved_fp > sender_sp); if (!saved_fp_safe) { return false; @@ -163,6 +172,17 @@ } + // We must always be able to find a recognizable pc + CodeBlob* sender_blob = CodeCache::find_blob_unsafe(sender_pc); + if (sender_pc == NULL || sender_blob == NULL) { + return false; + } + + // Could be a zombie method + if (sender_blob->is_zombie() || sender_blob->is_unloaded()) { + return false; + } + // Could just be some random pointer within the codeBlob if (!sender_blob->code_contains(sender_pc)) { return false; @@ -174,10 +194,9 @@ } // Could be the call_stub - if (StubRoutines::returns_to_call_stub(sender_pc)) { intptr_t *saved_fp = (intptr_t*)*(sender_sp - frame::sender_sp_offset); - bool saved_fp_safe = ((address)saved_fp <= thread->stack_base()) && (saved_fp > sender_sp); + bool saved_fp_safe = ((address)saved_fp < thread->stack_base()) && (saved_fp > sender_sp); if (!saved_fp_safe) { return false; @@ -190,15 +209,24 @@ // Validate the JavaCallWrapper an entry frame must have address jcw = (address)sender.entry_frame_call_wrapper(); - bool jcw_safe = (jcw <= thread->stack_base()) && ( jcw > (address)sender.fp()); + bool jcw_safe = (jcw < thread->stack_base()) && ( jcw > (address)sender.fp()); return jcw_safe; } - // If the frame size is 0 something is bad because every nmethod has a non-zero frame size + if (sender_blob->is_nmethod()) { + nmethod* nm = sender_blob->as_nmethod_or_null(); + if (nm != NULL) { + if (nm->is_deopt_mh_entry(sender_pc) || nm->is_deopt_entry(sender_pc)) { + return false; + } + } + } + + // If the frame size is 0 something (or less) is bad because every nmethod has a non-zero frame size // because the return address counts against the callee's frame. - if (sender_blob->frame_size() == 0) { + if (sender_blob->frame_size() <= 0) { assert(!sender_blob->is_nmethod(), "should count return address at least"); return false; } @@ -208,7 +236,9 @@ // should not be anything but the call stub (already covered), the interpreter (already covered) // or an nmethod. - assert(sender_blob->is_nmethod(), "Impossible call chain"); + if (!sender_blob->is_nmethod()) { + return false; + } // Could put some more validation for the potential non-interpreted sender // frame we'd create by calling sender if I could think of any. Wait for next crash in forte... @@ -557,7 +587,7 @@ // validate ConstantPoolCache* ConstantPoolCache* cp = *interpreter_frame_cache_addr(); - if (cp == NULL || !cp->is_metadata()) return false; + if (cp == NULL || !cp->is_metaspace_object()) return false; // validate locals diff -r 16b10327b00d -r 90d6c221d4e5 src/cpu/x86/vm/globals_x86.hpp --- a/src/cpu/x86/vm/globals_x86.hpp Tue Jul 16 10:55:48 2013 -0400 +++ b/src/cpu/x86/vm/globals_x86.hpp Tue Jul 16 12:20:08 2013 -0400 @@ -1,5 +1,5 @@ /* - * Copyright (c) 2000, 2012, Oracle and/or its affiliates. All rights reserved. + * Copyright (c) 2000, 2013, Oracle and/or its affiliates. All rights reserved. * DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER. * * This code is free software; you can redistribute it and/or modify it @@ -55,7 +55,7 @@ define_pd_global(intx, InlineFrequencyCount, 100); define_pd_global(intx, InlineSmallCode, 1000); -define_pd_global(intx, StackYellowPages, 2); +define_pd_global(intx, StackYellowPages, NOT_WINDOWS(2) WINDOWS_ONLY(3)); define_pd_global(intx, StackRedPages, 1); #ifdef AMD64 // Very large C++ stack frames using solaris-amd64 optimized builds @@ -96,6 +96,9 @@ product(intx, UseAVX, 99, \ "Highest supported AVX instructions set on x86/x64") \ \ + product(bool, UseCLMUL, false, \ + "Control whether CLMUL instructions can be used on x86/x64") \ + \ diagnostic(bool, UseIncDec, true, \ "Use INC, DEC instructions on x86") \ \ diff -r 16b10327b00d -r 90d6c221d4e5 src/cpu/x86/vm/interpreterGenerator_x86.hpp --- a/src/cpu/x86/vm/interpreterGenerator_x86.hpp Tue Jul 16 10:55:48 2013 -0400 +++ b/src/cpu/x86/vm/interpreterGenerator_x86.hpp Tue Jul 16 12:20:08 2013 -0400 @@ -1,5 +1,5 @@ /* - * Copyright (c) 1997, 2012, Oracle and/or its affiliates. All rights reserved. + * Copyright (c) 1997, 2013, Oracle and/or its affiliates. All rights reserved. * DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER. * * This code is free software; you can redistribute it and/or modify it @@ -39,6 +39,8 @@ address generate_empty_entry(void); address generate_accessor_entry(void); address generate_Reference_get_entry(); + address generate_CRC32_update_entry(); + address generate_CRC32_updateBytes_entry(AbstractInterpreter::MethodKind kind); void lock_method(void); void generate_stack_overflow_check(void); diff -r 16b10327b00d -r 90d6c221d4e5 src/cpu/x86/vm/macroAssembler_x86.cpp --- a/src/cpu/x86/vm/macroAssembler_x86.cpp Tue Jul 16 10:55:48 2013 -0400 +++ b/src/cpu/x86/vm/macroAssembler_x86.cpp Tue Jul 16 12:20:08 2013 -0400 @@ -1,5 +1,5 @@ /* - * Copyright (c) 1997, 2012, Oracle and/or its affiliates. All rights reserved. + * Copyright (c) 1997, 2013, Oracle and/or its affiliates. All rights reserved. * DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER. * * This code is free software; you can redistribute it and/or modify it @@ -2794,6 +2794,15 @@ } } +void MacroAssembler::movdqa(XMMRegister dst, AddressLiteral src) { + if (reachable(src)) { + Assembler::movdqa(dst, as_Address(src)); + } else { + lea(rscratch1, src); + Assembler::movdqa(dst, Address(rscratch1, 0)); + } +} + void MacroAssembler::movsd(XMMRegister dst, AddressLiteral src) { if (reachable(src)) { Assembler::movsd(dst, as_Address(src)); @@ -6388,6 +6397,193 @@ bind(L_done); } +/** + * Emits code to update CRC-32 with a byte value according to constants in table + * + * @param [in,out]crc Register containing the crc. + * @param [in]val Register containing the byte to fold into the CRC. + * @param [in]table Register containing the table of crc constants. + * + * uint32_t crc; + * val = crc_table[(val ^ crc) & 0xFF]; + * crc = val ^ (crc >> 8); + * + */ +void MacroAssembler::update_byte_crc32(Register crc, Register val, Register table) { + xorl(val, crc); + andl(val, 0xFF); + shrl(crc, 8); // unsigned shift + xorl(crc, Address(table, val, Address::times_4, 0)); +} + +/** + * Fold 128-bit data chunk + */ +void MacroAssembler::fold_128bit_crc32(XMMRegister xcrc, XMMRegister xK, XMMRegister xtmp, Register buf, int offset) { + vpclmulhdq(xtmp, xK, xcrc); // [123:64] + vpclmulldq(xcrc, xK, xcrc); // [63:0] + vpxor(xcrc, xcrc, Address(buf, offset), false /* vector256 */); + pxor(xcrc, xtmp); +} + +void MacroAssembler::fold_128bit_crc32(XMMRegister xcrc, XMMRegister xK, XMMRegister xtmp, XMMRegister xbuf) { + vpclmulhdq(xtmp, xK, xcrc); + vpclmulldq(xcrc, xK, xcrc); + pxor(xcrc, xbuf); + pxor(xcrc, xtmp); +} + +/** + * 8-bit folds to compute 32-bit CRC + * + * uint64_t xcrc; + * timesXtoThe32[xcrc & 0xFF] ^ (xcrc >> 8); + */ +void MacroAssembler::fold_8bit_crc32(XMMRegister xcrc, Register table, XMMRegister xtmp, Register tmp) { + movdl(tmp, xcrc); + andl(tmp, 0xFF); + movdl(xtmp, Address(table, tmp, Address::times_4, 0)); + psrldq(xcrc, 1); // unsigned shift one byte + pxor(xcrc, xtmp); +} + +/** + * uint32_t crc; + * timesXtoThe32[crc & 0xFF] ^ (crc >> 8); + */ +void MacroAssembler::fold_8bit_crc32(Register crc, Register table, Register tmp) { + movl(tmp, crc); + andl(tmp, 0xFF); + shrl(crc, 8); + xorl(crc, Address(table, tmp, Address::times_4, 0)); +} + +/** + * @param crc register containing existing CRC (32-bit) + * @param buf register pointing to input byte buffer (byte*) + * @param len register containing number of bytes + * @param table register that will contain address of CRC table + * @param tmp scratch register + */ +void MacroAssembler::kernel_crc32(Register crc, Register buf, Register len, Register table, Register tmp) { + assert_different_registers(crc, buf, len, table, tmp, rax); + + Label L_tail, L_tail_restore, L_tail_loop, L_exit, L_align_loop, L_aligned; + Label L_fold_tail, L_fold_128b, L_fold_512b, L_fold_512b_loop, L_fold_tail_loop; + + lea(table, ExternalAddress(StubRoutines::crc_table_addr())); + notl(crc); // ~crc + cmpl(len, 16); + jcc(Assembler::less, L_tail); + + // Align buffer to 16 bytes + movl(tmp, buf); + andl(tmp, 0xF); + jccb(Assembler::zero, L_aligned); + subl(tmp, 16); + addl(len, tmp); + + align(4); + BIND(L_align_loop); + movsbl(rax, Address(buf, 0)); // load byte with sign extension + update_byte_crc32(crc, rax, table); + increment(buf); + incrementl(tmp); + jccb(Assembler::less, L_align_loop); + + BIND(L_aligned); + movl(tmp, len); // save + shrl(len, 4); + jcc(Assembler::zero, L_tail_restore); + + // Fold crc into first bytes of vector + movdqa(xmm1, Address(buf, 0)); + movdl(rax, xmm1); + xorl(crc, rax); + pinsrd(xmm1, crc, 0); + addptr(buf, 16); + subl(len, 4); // len > 0 + jcc(Assembler::less, L_fold_tail); + + movdqa(xmm2, Address(buf, 0)); + movdqa(xmm3, Address(buf, 16)); + movdqa(xmm4, Address(buf, 32)); + addptr(buf, 48); + subl(len, 3); + jcc(Assembler::lessEqual, L_fold_512b); + + // Fold total 512 bits of polynomial on each iteration, + // 128 bits per each of 4 parallel streams. + movdqu(xmm0, ExternalAddress(StubRoutines::x86::crc_by128_masks_addr() + 32)); + + align(32); + BIND(L_fold_512b_loop); + fold_128bit_crc32(xmm1, xmm0, xmm5, buf, 0); + fold_128bit_crc32(xmm2, xmm0, xmm5, buf, 16); + fold_128bit_crc32(xmm3, xmm0, xmm5, buf, 32); + fold_128bit_crc32(xmm4, xmm0, xmm5, buf, 48); + addptr(buf, 64); + subl(len, 4); + jcc(Assembler::greater, L_fold_512b_loop); + + // Fold 512 bits to 128 bits. + BIND(L_fold_512b); + movdqu(xmm0, ExternalAddress(StubRoutines::x86::crc_by128_masks_addr() + 16)); + fold_128bit_crc32(xmm1, xmm0, xmm5, xmm2); + fold_128bit_crc32(xmm1, xmm0, xmm5, xmm3); + fold_128bit_crc32(xmm1, xmm0, xmm5, xmm4); + + // Fold the rest of 128 bits data chunks + BIND(L_fold_tail); + addl(len, 3); + jccb(Assembler::lessEqual, L_fold_128b); + movdqu(xmm0, ExternalAddress(StubRoutines::x86::crc_by128_masks_addr() + 16)); + + BIND(L_fold_tail_loop); + fold_128bit_crc32(xmm1, xmm0, xmm5, buf, 0); + addptr(buf, 16); + decrementl(len); + jccb(Assembler::greater, L_fold_tail_loop); + + // Fold 128 bits in xmm1 down into 32 bits in crc register. + BIND(L_fold_128b); + movdqu(xmm0, ExternalAddress(StubRoutines::x86::crc_by128_masks_addr())); + vpclmulqdq(xmm2, xmm0, xmm1, 0x1); + vpand(xmm3, xmm0, xmm2, false /* vector256 */); + vpclmulqdq(xmm0, xmm0, xmm3, 0x1); + psrldq(xmm1, 8); + psrldq(xmm2, 4); + pxor(xmm0, xmm1); + pxor(xmm0, xmm2); + + // 8 8-bit folds to compute 32-bit CRC. + for (int j = 0; j < 4; j++) { + fold_8bit_crc32(xmm0, table, xmm1, rax); + } + movdl(crc, xmm0); // mov 32 bits to general register + for (int j = 0; j < 4; j++) { + fold_8bit_crc32(crc, table, rax); + } + + BIND(L_tail_restore); + movl(len, tmp); // restore + BIND(L_tail); + andl(len, 0xf); + jccb(Assembler::zero, L_exit); + + // Fold the rest of bytes + align(4); + BIND(L_tail_loop); + movsbl(rax, Address(buf, 0)); // load byte with sign extension + update_byte_crc32(crc, rax, table); + increment(buf); + decrementl(len); + jccb(Assembler::greater, L_tail_loop); + + BIND(L_exit); + notl(crc); // ~c +} + #undef BIND #undef BLOCK_COMMENT diff -r 16b10327b00d -r 90d6c221d4e5 src/cpu/x86/vm/macroAssembler_x86.hpp --- a/src/cpu/x86/vm/macroAssembler_x86.hpp Tue Jul 16 10:55:48 2013 -0400 +++ b/src/cpu/x86/vm/macroAssembler_x86.hpp Tue Jul 16 12:20:08 2013 -0400 @@ -1,5 +1,5 @@ /* - * Copyright (c) 1997, 2012, Oracle and/or its affiliates. All rights reserved. + * Copyright (c) 1997, 2013, Oracle and/or its affiliates. All rights reserved. * DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER. * * This code is free software; you can redistribute it and/or modify it @@ -899,6 +899,11 @@ void movdqu(XMMRegister dst, XMMRegister src) { Assembler::movdqu(dst, src); } void movdqu(XMMRegister dst, AddressLiteral src); + // Move Aligned Double Quadword + void movdqa(XMMRegister dst, Address src) { Assembler::movdqa(dst, src); } + void movdqa(XMMRegister dst, XMMRegister src) { Assembler::movdqa(dst, src); } + void movdqa(XMMRegister dst, AddressLiteral src); + void movsd(XMMRegister dst, XMMRegister src) { Assembler::movsd(dst, src); } void movsd(Address dst, XMMRegister src) { Assembler::movsd(dst, src); } void movsd(XMMRegister dst, Address src) { Assembler::movsd(dst, src); } @@ -1027,6 +1032,16 @@ Assembler::vinsertf128h(dst, nds, src); } + // Carry-Less Multiplication Quadword + void vpclmulldq(XMMRegister dst, XMMRegister nds, XMMRegister src) { + // 0x00 - multiply lower 64 bits [0:63] + Assembler::vpclmulqdq(dst, nds, src, 0x00); + } + void vpclmulhdq(XMMRegister dst, XMMRegister nds, XMMRegister src) { + // 0x11 - multiply upper 64 bits [64:127] + Assembler::vpclmulqdq(dst, nds, src, 0x11); + } + // Data void cmov32( Condition cc, Register dst, Address src); @@ -1143,6 +1158,16 @@ XMMRegister tmp1, XMMRegister tmp2, XMMRegister tmp3, XMMRegister tmp4, Register tmp5, Register result); + // CRC32 code for java.util.zip.CRC32::updateBytes() instrinsic. + void update_byte_crc32(Register crc, Register val, Register table); + void kernel_crc32(Register crc, Register buf, Register len, Register table, Register tmp); + // Fold 128-bit data chunk + void fold_128bit_crc32(XMMRegister xcrc, XMMRegister xK, XMMRegister xtmp, Register buf, int offset); + void fold_128bit_crc32(XMMRegister xcrc, XMMRegister xK, XMMRegister xtmp, XMMRegister xbuf); + // Fold 8-bit data + void fold_8bit_crc32(Register crc, Register table, Register tmp); + void fold_8bit_crc32(XMMRegister crc, Register table, XMMRegister xtmp, Register tmp); + #undef VIRTUAL }; diff -r 16b10327b00d -r 90d6c221d4e5 src/cpu/x86/vm/relocInfo_x86.cpp --- a/src/cpu/x86/vm/relocInfo_x86.cpp Tue Jul 16 10:55:48 2013 -0400 +++ b/src/cpu/x86/vm/relocInfo_x86.cpp Tue Jul 16 12:20:08 2013 -0400 @@ -177,30 +177,6 @@ return *pd_address_in_code(); } -int Relocation::pd_breakpoint_size() { - // minimum breakpoint size, in short words - return NativeIllegalInstruction::instruction_size / sizeof(short); -} - -void Relocation::pd_swap_in_breakpoint(address x, short* instrs, int instrlen) { - Untested("pd_swap_in_breakpoint"); - if (instrs != NULL) { - assert(instrlen * sizeof(short) == NativeIllegalInstruction::instruction_size, "enough instrlen in reloc. data"); - for (int i = 0; i < instrlen; i++) { - instrs[i] = ((short*)x)[i]; - } - } - NativeIllegalInstruction::insert(x); -} - - -void Relocation::pd_swap_out_breakpoint(address x, short* instrs, int instrlen) { - Untested("pd_swap_out_breakpoint"); - assert(NativeIllegalInstruction::instruction_size == sizeof(short), "right address unit for update"); - NativeInstruction* ni = nativeInstruction_at(x); - *(short*)ni->addr_at(0) = instrs[0]; -} - void poll_Relocation::fix_relocation_after_move(const CodeBuffer* src, CodeBuffer* dest) { #ifdef _LP64 if (!Assembler::is_polling_page_far()) { diff -r 16b10327b00d -r 90d6c221d4e5 src/cpu/x86/vm/sharedRuntime_x86_64.cpp --- a/src/cpu/x86/vm/sharedRuntime_x86_64.cpp Tue Jul 16 10:55:48 2013 -0400 +++ b/src/cpu/x86/vm/sharedRuntime_x86_64.cpp Tue Jul 16 12:20:08 2013 -0400 @@ -1429,6 +1429,8 @@ assert(!length_arg.first()->is_Register() || length_arg.first()->as_Register() != tmp_reg, "possible collision"); + __ block_comment("unpack_array_argument {"); + // Pass the length, ptr pair Label is_null, done; VMRegPair tmp; @@ -1453,6 +1455,8 @@ move_ptr(masm, tmp, body_arg); move32_64(masm, tmp, length_arg); __ bind(done); + + __ block_comment("} unpack_array_argument"); } @@ -2170,27 +2174,34 @@ } } - // point c_arg at the first arg that is already loaded in case we - // need to spill before we call out - int c_arg = total_c_args - total_in_args; + int c_arg; // Pre-load a static method's oop into r14. Used both by locking code and // the normal JNI call code. - if (method->is_static() && !is_critical_native) { - - // load oop into a register - __ movoop(oop_handle_reg, JNIHandles::make_local(method->method_holder()->java_mirror())); - - // Now handlize the static class mirror it's known not-null. - __ movptr(Address(rsp, klass_offset), oop_handle_reg); - map->set_oop(VMRegImpl::stack2reg(klass_slot_offset)); - - // Now get the handle - __ lea(oop_handle_reg, Address(rsp, klass_offset)); - // store the klass handle as second argument - __ movptr(c_rarg1, oop_handle_reg); - // and protect the arg if we must spill - c_arg--; + if (!is_critical_native) { + // point c_arg at the first arg that is already loaded in case we + // need to spill before we call out + c_arg = total_c_args - total_in_args; + + if (method->is_static()) { + + // load oop into a register + __ movoop(oop_handle_reg, JNIHandles::make_local(method->method_holder()->java_mirror())); + + // Now handlize the static class mirror it's known not-null. + __ movptr(Address(rsp, klass_offset), oop_handle_reg); + map->set_oop(VMRegImpl::stack2reg(klass_slot_offset)); + + // Now get the handle + __ lea(oop_handle_reg, Address(rsp, klass_offset)); + // store the klass handle as second argument + __ movptr(c_rarg1, oop_handle_reg); + // and protect the arg if we must spill + c_arg--; + } + } else { + // For JNI critical methods we need to save all registers in save_args. + c_arg = 0; } // Change state to native (we save the return address in the thread, since it might not diff -r 16b10327b00d -r 90d6c221d4e5 src/cpu/x86/vm/stubGenerator_x86_32.cpp --- a/src/cpu/x86/vm/stubGenerator_x86_32.cpp Tue Jul 16 10:55:48 2013 -0400 +++ b/src/cpu/x86/vm/stubGenerator_x86_32.cpp Tue Jul 16 12:20:08 2013 -0400 @@ -1,5 +1,5 @@ /* - * Copyright (c) 1999, 2012, Oracle and/or its affiliates. All rights reserved. + * Copyright (c) 1999, 2013, Oracle and/or its affiliates. All rights reserved. * DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER. * * This code is free software; you can redistribute it and/or modify it @@ -83,7 +83,7 @@ private: #ifdef PRODUCT -#define inc_counter_np(counter) (0) +#define inc_counter_np(counter) ((void)0) #else void inc_counter_np_(int& counter) { __ incrementl(ExternalAddress((address)&counter)); @@ -2713,6 +2713,92 @@ return start; } + /** + * Arguments: + * + * Inputs: + * rsp(4) - int crc + * rsp(8) - byte* buf + * rsp(12) - int length + * + * Ouput: + * rax - int crc result + */ + address generate_updateBytesCRC32() { + assert(UseCRC32Intrinsics, "need AVX and CLMUL instructions"); + + __ align(CodeEntryAlignment); + StubCodeMark mark(this, "StubRoutines", "updateBytesCRC32"); + + address start = __ pc(); + + const Register crc = rdx; // crc + const Register buf = rsi; // source java byte array address + const Register len = rcx; // length + const Register table = rdi; // crc_table address (reuse register) + const Register tmp = rbx; + assert_different_registers(crc, buf, len, table, tmp, rax); + + BLOCK_COMMENT("Entry:"); + __ enter(); // required for proper stackwalking of RuntimeStub frame + __ push(rsi); + __ push(rdi); + __ push(rbx); + + Address crc_arg(rbp, 8 + 0); + Address buf_arg(rbp, 8 + 4); + Address len_arg(rbp, 8 + 8); + + // Load up: + __ movl(crc, crc_arg); + __ movptr(buf, buf_arg); + __ movl(len, len_arg); + + __ kernel_crc32(crc, buf, len, table, tmp); + + __ movl(rax, crc); + __ pop(rbx); + __ pop(rdi); + __ pop(rsi); + __ leave(); // required for proper stackwalking of RuntimeStub frame + __ ret(0); + + return start; + } + + // Safefetch stubs. + void generate_safefetch(const char* name, int size, address* entry, + address* fault_pc, address* continuation_pc) { + // safefetch signatures: + // int SafeFetch32(int* adr, int errValue); + // intptr_t SafeFetchN (intptr_t* adr, intptr_t errValue); + + StubCodeMark mark(this, "StubRoutines", name); + + // Entry point, pc or function descriptor. + *entry = __ pc(); + + __ movl(rax, Address(rsp, 0x8)); + __ movl(rcx, Address(rsp, 0x4)); + // Load *adr into eax, may fault. + *fault_pc = __ pc(); + switch (size) { + case 4: + // int32_t + __ movl(rax, Address(rcx, 0)); + break; + case 8: + // int64_t + Unimplemented(); + break; + default: + ShouldNotReachHere(); + } + + // Return errValue or *adr. + *continuation_pc = __ pc(); + __ ret(0); + } public: // Information about frame layout at time of blocking runtime call. @@ -2887,6 +2973,12 @@ // Build this early so it's available for the interpreter StubRoutines::_throw_StackOverflowError_entry = generate_throw_exception("StackOverflowError throw_exception", CAST_FROM_FN_PTR(address, SharedRuntime::throw_StackOverflowError)); + + if (UseCRC32Intrinsics) { + // set table address before stub generation which use it + StubRoutines::_crc_table_adr = (address)StubRoutines::x86::_crc_table; + StubRoutines::_updateBytesCRC32 = generate_updateBytesCRC32(); + } } @@ -2919,6 +3011,14 @@ StubRoutines::_cipherBlockChaining_encryptAESCrypt = generate_cipherBlockChaining_encryptAESCrypt(); StubRoutines::_cipherBlockChaining_decryptAESCrypt = generate_cipherBlockChaining_decryptAESCrypt(); } + + // Safefetch stubs. + generate_safefetch("SafeFetch32", sizeof(int), &StubRoutines::_safefetch32_entry, + &StubRoutines::_safefetch32_fault_pc, + &StubRoutines::_safefetch32_continuation_pc); + StubRoutines::_safefetchN_entry = StubRoutines::_safefetch32_entry; + StubRoutines::_safefetchN_fault_pc = StubRoutines::_safefetch32_fault_pc; + StubRoutines::_safefetchN_continuation_pc = StubRoutines::_safefetch32_continuation_pc; } diff -r 16b10327b00d -r 90d6c221d4e5 src/cpu/x86/vm/stubGenerator_x86_64.cpp --- a/src/cpu/x86/vm/stubGenerator_x86_64.cpp Tue Jul 16 10:55:48 2013 -0400 +++ b/src/cpu/x86/vm/stubGenerator_x86_64.cpp Tue Jul 16 12:20:08 2013 -0400 @@ -1,5 +1,5 @@ /* - * Copyright (c) 2003, 2012, Oracle and/or its affiliates. All rights reserved. + * Copyright (c) 2003, 2013, Oracle and/or its affiliates. All rights reserved. * DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER. * * This code is free software; you can redistribute it and/or modify it @@ -81,7 +81,7 @@ private: #ifdef PRODUCT -#define inc_counter_np(counter) (0) +#define inc_counter_np(counter) ((void)0) #else void inc_counter_np_(int& counter) { // This can destroy rscratch1 if counter is far from the code cache @@ -3357,7 +3357,45 @@ return start; } - + // Safefetch stubs. + void generate_safefetch(const char* name, int size, address* entry, + address* fault_pc, address* continuation_pc) { + // safefetch signatures: + // int SafeFetch32(int* adr, int errValue); + // intptr_t SafeFetchN (intptr_t* adr, intptr_t errValue); + // + // arguments: + // c_rarg0 = adr + // c_rarg1 = errValue + // + // result: + // PPC_RET = *adr or errValue + + StubCodeMark mark(this, "StubRoutines", name); + + // Entry point, pc or function descriptor. + *entry = __ pc(); + + // Load *adr into c_rarg1, may fault. + *fault_pc = __ pc(); + switch (size) { + case 4: + // int32_t + __ movl(c_rarg1, Address(c_rarg0, 0)); + break; + case 8: + // int64_t + __ movq(c_rarg1, Address(c_rarg0, 0)); + break; + default: + ShouldNotReachHere(); + } + + // return errValue or *adr + *continuation_pc = __ pc(); + __ movq(rax, c_rarg1); + __ ret(0); + } // This is a version of CBC/AES Decrypt which does 4 blocks in a loop at a time // to hide instruction latency @@ -3584,7 +3622,45 @@ return start; } - + /** + * Arguments: + * + * Inputs: + * c_rarg0 - int crc + * c_rarg1 - byte* buf + * c_rarg2 - int length + * + * Ouput: + * rax - int crc result + */ + address generate_updateBytesCRC32() { + assert(UseCRC32Intrinsics, "need AVX and CLMUL instructions"); + + __ align(CodeEntryAlignment); + StubCodeMark mark(this, "StubRoutines", "updateBytesCRC32"); + + address start = __ pc(); + // Win64: rcx, rdx, r8, r9 (c_rarg0, c_rarg1, ...) + // Unix: rdi, rsi, rdx, rcx, r8, r9 (c_rarg0, c_rarg1, ...) + // rscratch1: r10 + const Register crc = c_rarg0; // crc + const Register buf = c_rarg1; // source java byte array address + const Register len = c_rarg2; // length + const Register table = c_rarg3; // crc_table address (reuse register) + const Register tmp = r11; + assert_different_registers(crc, buf, len, table, tmp, rax); + + BLOCK_COMMENT("Entry:"); + __ enter(); // required for proper stackwalking of RuntimeStub frame + + __ kernel_crc32(crc, buf, len, table, tmp); + + __ movl(rax, crc); + __ leave(); // required for proper stackwalking of RuntimeStub frame + __ ret(0); + + return start; + } #undef __ #define __ masm-> @@ -3736,6 +3812,11 @@ CAST_FROM_FN_PTR(address, SharedRuntime:: throw_StackOverflowError)); + if (UseCRC32Intrinsics) { + // set table address before stub generation which use it + StubRoutines::_crc_table_adr = (address)StubRoutines::x86::_crc_table; + StubRoutines::_updateBytesCRC32 = generate_updateBytesCRC32(); + } } void generate_all() { @@ -3790,6 +3871,14 @@ StubRoutines::_cipherBlockChaining_encryptAESCrypt = generate_cipherBlockChaining_encryptAESCrypt(); StubRoutines::_cipherBlockChaining_decryptAESCrypt = generate_cipherBlockChaining_decryptAESCrypt_Parallel(); } + + // Safefetch stubs. + generate_safefetch("SafeFetch32", sizeof(int), &StubRoutines::_safefetch32_entry, + &StubRoutines::_safefetch32_fault_pc, + &StubRoutines::_safefetch32_continuation_pc); + generate_safefetch("SafeFetchN", sizeof(intptr_t), &StubRoutines::_safefetchN_entry, + &StubRoutines::_safefetchN_fault_pc, + &StubRoutines::_safefetchN_continuation_pc); } public: diff -r 16b10327b00d -r 90d6c221d4e5 src/cpu/x86/vm/stubRoutines_x86.cpp --- /dev/null Thu Jan 01 00:00:00 1970 +0000 +++ b/src/cpu/x86/vm/stubRoutines_x86.cpp Tue Jul 16 12:20:08 2013 -0400 @@ -0,0 +1,130 @@ +/* + * Copyright (c) 2013, Oracle and/or its affiliates. All rights reserved. + * DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER. + * + * This code is free software; you can redistribute it and/or modify it + * under the terms of the GNU General Public License version 2 only, as + * published by the Free Software Foundation. + * + * This code is distributed in the hope that it will be useful, but WITHOUT + * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or + * FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License + * version 2 for more details (a copy is included in the LICENSE file that + * accompanied this code). + * + * You should have received a copy of the GNU General Public License version + * 2 along with this work; if not, write to the Free Software Foundation, + * Inc., 51 Franklin St, Fifth Floor, Boston, MA 02110-1301 USA. + * + * Please contact Oracle, 500 Oracle Parkway, Redwood Shores, CA 94065 USA + * or visit www.oracle.com if you need additional information or have any + * questions. + * + */ + +#include "precompiled.hpp" +#include "runtime/deoptimization.hpp" +#include "runtime/frame.inline.hpp" +#include "runtime/stubRoutines.hpp" +#include "runtime/thread.inline.hpp" + +// Implementation of the platform-specific part of StubRoutines - for +// a description of how to extend it, see the stubRoutines.hpp file. + +address StubRoutines::x86::_verify_mxcsr_entry = NULL; +address StubRoutines::x86::_key_shuffle_mask_addr = NULL; + +uint64_t StubRoutines::x86::_crc_by128_masks[] = +{ + /* The fields in this structure are arranged so that they can be + * picked up two at a time with 128-bit loads. + * + * Because of flipped bit order for this CRC polynomials + * the constant for X**N is left-shifted by 1. This is because + * a 64 x 64 polynomial multiply produces a 127-bit result + * but the highest term is always aligned to bit 0 in the container. + * Pre-shifting by one fixes this, at the cost of potentially making + * the 32-bit constant no longer fit in a 32-bit container (thus the + * use of uint64_t, though this is also the size used by the carry- + * less multiply instruction. + * + * In addition, the flipped bit order and highest-term-at-least-bit + * multiply changes the constants used. The 96-bit result will be + * aligned to the high-term end of the target 128-bit container, + * not the low-term end; that is, instead of a 512-bit or 576-bit fold, + * instead it is a 480 (=512-32) or 544 (=512+64-32) bit fold. + * + * This cause additional problems in the 128-to-64-bit reduction; see the + * code for details. By storing a mask in the otherwise unused half of + * a 128-bit constant, bits can be cleared before multiplication without + * storing and reloading. Note that staying on a 128-bit datapath means + * that some data is uselessly stored and some unused data is intersected + * with an irrelevant constant. + */ + + ((uint64_t) 0xffffffffUL), /* low of K_M_64 */ + ((uint64_t) 0xb1e6b092U << 1), /* high of K_M_64 */ + ((uint64_t) 0xba8ccbe8U << 1), /* low of K_160_96 */ + ((uint64_t) 0x6655004fU << 1), /* high of K_160_96 */ + ((uint64_t) 0xaa2215eaU << 1), /* low of K_544_480 */ + ((uint64_t) 0xe3720acbU << 1) /* high of K_544_480 */ +}; + +/** + * crc_table[] from jdk/src/share/native/java/util/zip/zlib-1.2.5/crc32.h + */ +juint StubRoutines::x86::_crc_table[] = +{ + 0x00000000UL, 0x77073096UL, 0xee0e612cUL, 0x990951baUL, 0x076dc419UL, + 0x706af48fUL, 0xe963a535UL, 0x9e6495a3UL, 0x0edb8832UL, 0x79dcb8a4UL, + 0xe0d5e91eUL, 0x97d2d988UL, 0x09b64c2bUL, 0x7eb17cbdUL, 0xe7b82d07UL, + 0x90bf1d91UL, 0x1db71064UL, 0x6ab020f2UL, 0xf3b97148UL, 0x84be41deUL, + 0x1adad47dUL, 0x6ddde4ebUL, 0xf4d4b551UL, 0x83d385c7UL, 0x136c9856UL, + 0x646ba8c0UL, 0xfd62f97aUL, 0x8a65c9ecUL, 0x14015c4fUL, 0x63066cd9UL, + 0xfa0f3d63UL, 0x8d080df5UL, 0x3b6e20c8UL, 0x4c69105eUL, 0xd56041e4UL, + 0xa2677172UL, 0x3c03e4d1UL, 0x4b04d447UL, 0xd20d85fdUL, 0xa50ab56bUL, + 0x35b5a8faUL, 0x42b2986cUL, 0xdbbbc9d6UL, 0xacbcf940UL, 0x32d86ce3UL, + 0x45df5c75UL, 0xdcd60dcfUL, 0xabd13d59UL, 0x26d930acUL, 0x51de003aUL, + 0xc8d75180UL, 0xbfd06116UL, 0x21b4f4b5UL, 0x56b3c423UL, 0xcfba9599UL, + 0xb8bda50fUL, 0x2802b89eUL, 0x5f058808UL, 0xc60cd9b2UL, 0xb10be924UL, + 0x2f6f7c87UL, 0x58684c11UL, 0xc1611dabUL, 0xb6662d3dUL, 0x76dc4190UL, + 0x01db7106UL, 0x98d220bcUL, 0xefd5102aUL, 0x71b18589UL, 0x06b6b51fUL, + 0x9fbfe4a5UL, 0xe8b8d433UL, 0x7807c9a2UL, 0x0f00f934UL, 0x9609a88eUL, + 0xe10e9818UL, 0x7f6a0dbbUL, 0x086d3d2dUL, 0x91646c97UL, 0xe6635c01UL, + 0x6b6b51f4UL, 0x1c6c6162UL, 0x856530d8UL, 0xf262004eUL, 0x6c0695edUL, + 0x1b01a57bUL, 0x8208f4c1UL, 0xf50fc457UL, 0x65b0d9c6UL, 0x12b7e950UL, + 0x8bbeb8eaUL, 0xfcb9887cUL, 0x62dd1ddfUL, 0x15da2d49UL, 0x8cd37cf3UL, + 0xfbd44c65UL, 0x4db26158UL, 0x3ab551ceUL, 0xa3bc0074UL, 0xd4bb30e2UL, + 0x4adfa541UL, 0x3dd895d7UL, 0xa4d1c46dUL, 0xd3d6f4fbUL, 0x4369e96aUL, + 0x346ed9fcUL, 0xad678846UL, 0xda60b8d0UL, 0x44042d73UL, 0x33031de5UL, + 0xaa0a4c5fUL, 0xdd0d7cc9UL, 0x5005713cUL, 0x270241aaUL, 0xbe0b1010UL, + 0xc90c2086UL, 0x5768b525UL, 0x206f85b3UL, 0xb966d409UL, 0xce61e49fUL, + 0x5edef90eUL, 0x29d9c998UL, 0xb0d09822UL, 0xc7d7a8b4UL, 0x59b33d17UL, + 0x2eb40d81UL, 0xb7bd5c3bUL, 0xc0ba6cadUL, 0xedb88320UL, 0x9abfb3b6UL, + 0x03b6e20cUL, 0x74b1d29aUL, 0xead54739UL, 0x9dd277afUL, 0x04db2615UL, + 0x73dc1683UL, 0xe3630b12UL, 0x94643b84UL, 0x0d6d6a3eUL, 0x7a6a5aa8UL, + 0xe40ecf0bUL, 0x9309ff9dUL, 0x0a00ae27UL, 0x7d079eb1UL, 0xf00f9344UL, + 0x8708a3d2UL, 0x1e01f268UL, 0x6906c2feUL, 0xf762575dUL, 0x806567cbUL, + 0x196c3671UL, 0x6e6b06e7UL, 0xfed41b76UL, 0x89d32be0UL, 0x10da7a5aUL, + 0x67dd4accUL, 0xf9b9df6fUL, 0x8ebeeff9UL, 0x17b7be43UL, 0x60b08ed5UL, + 0xd6d6a3e8UL, 0xa1d1937eUL, 0x38d8c2c4UL, 0x4fdff252UL, 0xd1bb67f1UL, + 0xa6bc5767UL, 0x3fb506ddUL, 0x48b2364bUL, 0xd80d2bdaUL, 0xaf0a1b4cUL, + 0x36034af6UL, 0x41047a60UL, 0xdf60efc3UL, 0xa867df55UL, 0x316e8eefUL, + 0x4669be79UL, 0xcb61b38cUL, 0xbc66831aUL, 0x256fd2a0UL, 0x5268e236UL, + 0xcc0c7795UL, 0xbb0b4703UL, 0x220216b9UL, 0x5505262fUL, 0xc5ba3bbeUL, + 0xb2bd0b28UL, 0x2bb45a92UL, 0x5cb36a04UL, 0xc2d7ffa7UL, 0xb5d0cf31UL, + 0x2cd99e8bUL, 0x5bdeae1dUL, 0x9b64c2b0UL, 0xec63f226UL, 0x756aa39cUL, + 0x026d930aUL, 0x9c0906a9UL, 0xeb0e363fUL, 0x72076785UL, 0x05005713UL, + 0x95bf4a82UL, 0xe2b87a14UL, 0x7bb12baeUL, 0x0cb61b38UL, 0x92d28e9bUL, + 0xe5d5be0dUL, 0x7cdcefb7UL, 0x0bdbdf21UL, 0x86d3d2d4UL, 0xf1d4e242UL, + 0x68ddb3f8UL, 0x1fda836eUL, 0x81be16cdUL, 0xf6b9265bUL, 0x6fb077e1UL, + 0x18b74777UL, 0x88085ae6UL, 0xff0f6a70UL, 0x66063bcaUL, 0x11010b5cUL, + 0x8f659effUL, 0xf862ae69UL, 0x616bffd3UL, 0x166ccf45UL, 0xa00ae278UL, + 0xd70dd2eeUL, 0x4e048354UL, 0x3903b3c2UL, 0xa7672661UL, 0xd06016f7UL, + 0x4969474dUL, 0x3e6e77dbUL, 0xaed16a4aUL, 0xd9d65adcUL, 0x40df0b66UL, + 0x37d83bf0UL, 0xa9bcae53UL, 0xdebb9ec5UL, 0x47b2cf7fUL, 0x30b5ffe9UL, + 0xbdbdf21cUL, 0xcabac28aUL, 0x53b39330UL, 0x24b4a3a6UL, 0xbad03605UL, + 0xcdd70693UL, 0x54de5729UL, 0x23d967bfUL, 0xb3667a2eUL, 0xc4614ab8UL, + 0x5d681b02UL, 0x2a6f2b94UL, 0xb40bbe37UL, 0xc30c8ea1UL, 0x5a05df1bUL, + 0x2d02ef8dUL +}; diff -r 16b10327b00d -r 90d6c221d4e5 src/cpu/x86/vm/stubRoutines_x86.hpp --- /dev/null Thu Jan 01 00:00:00 1970 +0000 +++ b/src/cpu/x86/vm/stubRoutines_x86.hpp Tue Jul 16 12:20:08 2013 -0400 @@ -0,0 +1,45 @@ +/* + * Copyright (c) 2013, Oracle and/or its affiliates. All rights reserved. + * DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER. + * + * This code is free software; you can redistribute it and/or modify it + * under the terms of the GNU General Public License version 2 only, as + * published by the Free Software Foundation. + * + * This code is distributed in the hope that it will be useful, but WITHOUT + * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or + * FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License + * version 2 for more details (a copy is included in the LICENSE file that + * accompanied this code). + * + * You should have received a copy of the GNU General Public License version + * 2 along with this work; if not, write to the Free Software Foundation, + * Inc., 51 Franklin St, Fifth Floor, Boston, MA 02110-1301 USA. + * + * Please contact Oracle, 500 Oracle Parkway, Redwood Shores, CA 94065 USA + * or visit www.oracle.com if you need additional information or have any + * questions. + * + */ + +#ifndef CPU_X86_VM_STUBROUTINES_X86_HPP +#define CPU_X86_VM_STUBROUTINES_X86_HPP + +// This file holds the platform specific parts of the StubRoutines +// definition. See stubRoutines.hpp for a description on how to +// extend it. + + private: + static address _verify_mxcsr_entry; + // shuffle mask for fixing up 128-bit words consisting of big-endian 32-bit integers + static address _key_shuffle_mask_addr; + // masks and table for CRC32 + static uint64_t _crc_by128_masks[]; + static juint _crc_table[]; + + public: + static address verify_mxcsr_entry() { return _verify_mxcsr_entry; } + static address key_shuffle_mask_addr() { return _key_shuffle_mask_addr; } + static address crc_by128_masks_addr() { return (address)_crc_by128_masks; } + +#endif // CPU_X86_VM_STUBROUTINES_X86_32_HPP diff -r 16b10327b00d -r 90d6c221d4e5 src/cpu/x86/vm/stubRoutines_x86_32.cpp --- a/src/cpu/x86/vm/stubRoutines_x86_32.cpp Tue Jul 16 10:55:48 2013 -0400 +++ b/src/cpu/x86/vm/stubRoutines_x86_32.cpp Tue Jul 16 12:20:08 2013 -0400 @@ -1,5 +1,5 @@ /* - * Copyright (c) 1997, 2011, Oracle and/or its affiliates. All rights reserved. + * Copyright (c) 1997, 2013, Oracle and/or its affiliates. All rights reserved. * DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER. * * This code is free software; you can redistribute it and/or modify it @@ -31,6 +31,4 @@ // Implementation of the platform-specific part of StubRoutines - for // a description of how to extend it, see the stubRoutines.hpp file. -address StubRoutines::x86::_verify_mxcsr_entry = NULL; address StubRoutines::x86::_verify_fpu_cntrl_wrd_entry = NULL; -address StubRoutines::x86::_key_shuffle_mask_addr = NULL; diff -r 16b10327b00d -r 90d6c221d4e5 src/cpu/x86/vm/stubRoutines_x86_32.hpp --- a/src/cpu/x86/vm/stubRoutines_x86_32.hpp Tue Jul 16 10:55:48 2013 -0400 +++ b/src/cpu/x86/vm/stubRoutines_x86_32.hpp Tue Jul 16 12:20:08 2013 -0400 @@ -1,5 +1,5 @@ /* - * Copyright (c) 1997, 2011, Oracle and/or its affiliates. All rights reserved. + * Copyright (c) 1997, 2013, Oracle and/or its affiliates. All rights reserved. * DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER. * * This code is free software; you can redistribute it and/or modify it @@ -39,15 +39,12 @@ friend class VMStructs; private: - static address _verify_mxcsr_entry; static address _verify_fpu_cntrl_wrd_entry; - // shuffle mask for fixing up 128-bit words consisting of big-endian 32-bit integers - static address _key_shuffle_mask_addr; public: - static address verify_mxcsr_entry() { return _verify_mxcsr_entry; } static address verify_fpu_cntrl_wrd_entry() { return _verify_fpu_cntrl_wrd_entry; } - static address key_shuffle_mask_addr() { return _key_shuffle_mask_addr; } + +# include "stubRoutines_x86.hpp" }; diff -r 16b10327b00d -r 90d6c221d4e5 src/cpu/x86/vm/stubRoutines_x86_64.cpp --- a/src/cpu/x86/vm/stubRoutines_x86_64.cpp Tue Jul 16 10:55:48 2013 -0400 +++ b/src/cpu/x86/vm/stubRoutines_x86_64.cpp Tue Jul 16 12:20:08 2013 -0400 @@ -1,5 +1,5 @@ /* - * Copyright (c) 2003, 2012, Oracle and/or its affiliates. All rights reserved. + * Copyright (c) 2003, 2013, Oracle and/or its affiliates. All rights reserved. * DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER. * * This code is free software; you can redistribute it and/or modify it @@ -34,8 +34,6 @@ address StubRoutines::x86::_get_previous_fp_entry = NULL; address StubRoutines::x86::_get_previous_sp_entry = NULL; -address StubRoutines::x86::_verify_mxcsr_entry = NULL; - address StubRoutines::x86::_f2i_fixup = NULL; address StubRoutines::x86::_f2l_fixup = NULL; address StubRoutines::x86::_d2i_fixup = NULL; @@ -45,4 +43,3 @@ address StubRoutines::x86::_double_sign_mask = NULL; address StubRoutines::x86::_double_sign_flip = NULL; address StubRoutines::x86::_mxcsr_std = NULL; -address StubRoutines::x86::_key_shuffle_mask_addr = NULL; diff -r 16b10327b00d -r 90d6c221d4e5 src/cpu/x86/vm/stubRoutines_x86_64.hpp --- a/src/cpu/x86/vm/stubRoutines_x86_64.hpp Tue Jul 16 10:55:48 2013 -0400 +++ b/src/cpu/x86/vm/stubRoutines_x86_64.hpp Tue Jul 16 12:20:08 2013 -0400 @@ -1,5 +1,5 @@ /* - * Copyright (c) 2003, 2012, Oracle and/or its affiliates. All rights reserved. + * Copyright (c) 2003, 2013, Oracle and/or its affiliates. All rights reserved. * DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER. * * This code is free software; you can redistribute it and/or modify it @@ -42,7 +42,6 @@ private: static address _get_previous_fp_entry; static address _get_previous_sp_entry; - static address _verify_mxcsr_entry; static address _f2i_fixup; static address _f2l_fixup; @@ -54,8 +53,6 @@ static address _double_sign_mask; static address _double_sign_flip; static address _mxcsr_std; - // shuffle mask for fixing up 128-bit words consisting of big-endian 32-bit integers - static address _key_shuffle_mask_addr; public: @@ -69,11 +66,6 @@ return _get_previous_sp_entry; } - static address verify_mxcsr_entry() - { - return _verify_mxcsr_entry; - } - static address f2i_fixup() { return _f2i_fixup; @@ -119,7 +111,7 @@ return _mxcsr_std; } - static address key_shuffle_mask_addr() { return _key_shuffle_mask_addr; } +# include "stubRoutines_x86.hpp" }; diff -r 16b10327b00d -r 90d6c221d4e5 src/cpu/x86/vm/templateInterpreter_x86_32.cpp --- a/src/cpu/x86/vm/templateInterpreter_x86_32.cpp Tue Jul 16 10:55:48 2013 -0400 +++ b/src/cpu/x86/vm/templateInterpreter_x86_32.cpp Tue Jul 16 12:20:08 2013 -0400 @@ -868,6 +868,120 @@ return generate_accessor_entry(); } +/** + * Method entry for static native methods: + * int java.util.zip.CRC32.update(int crc, int b) + */ +address InterpreterGenerator::generate_CRC32_update_entry() { + if (UseCRC32Intrinsics) { + address entry = __ pc(); + + // rbx,: Method* + // rsi: senderSP must preserved for slow path, set SP to it on fast path + // rdx: scratch + // rdi: scratch + + Label slow_path; + // If we need a safepoint check, generate full interpreter entry. + ExternalAddress state(SafepointSynchronize::address_of_state()); + __ cmp32(ExternalAddress(SafepointSynchronize::address_of_state()), + SafepointSynchronize::_not_synchronized); + __ jcc(Assembler::notEqual, slow_path); + + // We don't generate local frame and don't align stack because + // we call stub code and there is no safepoint on this path. + + // Load parameters + const Register crc = rax; // crc + const Register val = rdx; // source java byte value + const Register tbl = rdi; // scratch + + // Arguments are reversed on java expression stack + __ movl(val, Address(rsp, wordSize)); // byte value + __ movl(crc, Address(rsp, 2*wordSize)); // Initial CRC + + __ lea(tbl, ExternalAddress(StubRoutines::crc_table_addr())); + __ notl(crc); // ~crc + __ update_byte_crc32(crc, val, tbl); + __ notl(crc); // ~crc + // result in rax + + // _areturn + __ pop(rdi); // get return address + __ mov(rsp, rsi); // set sp to sender sp + __ jmp(rdi); + + // generate a vanilla native entry as the slow path + __ bind(slow_path); + + (void) generate_native_entry(false); + + return entry; + } + return generate_native_entry(false); +} + +/** + * Method entry for static native methods: + * int java.util.zip.CRC32.updateBytes(int crc, byte[] b, int off, int len) + * int java.util.zip.CRC32.updateByteBuffer(int crc, long buf, int off, int len) + */ +address InterpreterGenerator::generate_CRC32_updateBytes_entry(AbstractInterpreter::MethodKind kind) { + if (UseCRC32Intrinsics) { + address entry = __ pc(); + + // rbx,: Method* + // rsi: senderSP must preserved for slow path, set SP to it on fast path + // rdx: scratch + // rdi: scratch + + Label slow_path; + // If we need a safepoint check, generate full interpreter entry. + ExternalAddress state(SafepointSynchronize::address_of_state()); + __ cmp32(ExternalAddress(SafepointSynchronize::address_of_state()), + SafepointSynchronize::_not_synchronized); + __ jcc(Assembler::notEqual, slow_path); + + // We don't generate local frame and don't align stack because + // we call stub code and there is no safepoint on this path. + + // Load parameters + const Register crc = rax; // crc + const Register buf = rdx; // source java byte array address + const Register len = rdi; // length + + // Arguments are reversed on java expression stack + __ movl(len, Address(rsp, wordSize)); // Length + // Calculate address of start element + if (kind == Interpreter::java_util_zip_CRC32_updateByteBuffer) { + __ movptr(buf, Address(rsp, 3*wordSize)); // long buf + __ addptr(buf, Address(rsp, 2*wordSize)); // + offset + __ movl(crc, Address(rsp, 5*wordSize)); // Initial CRC + } else { + __ movptr(buf, Address(rsp, 3*wordSize)); // byte[] array + __ addptr(buf, arrayOopDesc::base_offset_in_bytes(T_BYTE)); // + header size + __ addptr(buf, Address(rsp, 2*wordSize)); // + offset + __ movl(crc, Address(rsp, 4*wordSize)); // Initial CRC + } + + __ super_call_VM_leaf(CAST_FROM_FN_PTR(address, StubRoutines::updateBytesCRC32()), crc, buf, len); + // result in rax + + // _areturn + __ pop(rdi); // get return address + __ mov(rsp, rsi); // set sp to sender sp + __ jmp(rdi); + + // generate a vanilla native entry as the slow path + __ bind(slow_path); + + (void) generate_native_entry(false); + + return entry; + } + return generate_native_entry(false); +} + // // Interpreter stub for calling a native method. (asm interpreter) // This sets up a somewhat different looking stack for calling the native method @@ -1501,15 +1615,16 @@ // determine code generation flags bool synchronized = false; address entry_point = NULL; + InterpreterGenerator* ig_this = (InterpreterGenerator*)this; switch (kind) { - case Interpreter::zerolocals : break; - case Interpreter::zerolocals_synchronized: synchronized = true; break; - case Interpreter::native : entry_point = ((InterpreterGenerator*)this)->generate_native_entry(false); break; - case Interpreter::native_synchronized : entry_point = ((InterpreterGenerator*)this)->generate_native_entry(true); break; - case Interpreter::empty : entry_point = ((InterpreterGenerator*)this)->generate_empty_entry(); break; - case Interpreter::accessor : entry_point = ((InterpreterGenerator*)this)->generate_accessor_entry(); break; - case Interpreter::abstract : entry_point = ((InterpreterGenerator*)this)->generate_abstract_entry(); break; + case Interpreter::zerolocals : break; + case Interpreter::zerolocals_synchronized: synchronized = true; break; + case Interpreter::native : entry_point = ig_this->generate_native_entry(false); break; + case Interpreter::native_synchronized : entry_point = ig_this->generate_native_entry(true); break; + case Interpreter::empty : entry_point = ig_this->generate_empty_entry(); break; + case Interpreter::accessor : entry_point = ig_this->generate_accessor_entry(); break; + case Interpreter::abstract : entry_point = ig_this->generate_abstract_entry(); break; case Interpreter::java_lang_math_sin : // fall thru case Interpreter::java_lang_math_cos : // fall thru @@ -1519,9 +1634,15 @@ case Interpreter::java_lang_math_log10 : // fall thru case Interpreter::java_lang_math_sqrt : // fall thru case Interpreter::java_lang_math_pow : // fall thru - case Interpreter::java_lang_math_exp : entry_point = ((InterpreterGenerator*)this)->generate_math_entry(kind); break; + case Interpreter::java_lang_math_exp : entry_point = ig_this->generate_math_entry(kind); break; case Interpreter::java_lang_ref_reference_get - : entry_point = ((InterpreterGenerator*)this)->generate_Reference_get_entry(); break; + : entry_point = ig_this->generate_Reference_get_entry(); break; + case Interpreter::java_util_zip_CRC32_update + : entry_point = ig_this->generate_CRC32_update_entry(); break; + case Interpreter::java_util_zip_CRC32_updateBytes + : // fall thru + case Interpreter::java_util_zip_CRC32_updateByteBuffer + : entry_point = ig_this->generate_CRC32_updateBytes_entry(kind); break; default: fatal(err_msg("unexpected method kind: %d", kind)); break; @@ -1529,7 +1650,7 @@ if (entry_point) return entry_point; - return ((InterpreterGenerator*)this)->generate_normal_entry(synchronized); + return ig_this->generate_normal_entry(synchronized); } diff -r 16b10327b00d -r 90d6c221d4e5 src/cpu/x86/vm/templateInterpreter_x86_64.cpp --- a/src/cpu/x86/vm/templateInterpreter_x86_64.cpp Tue Jul 16 10:55:48 2013 -0400 +++ b/src/cpu/x86/vm/templateInterpreter_x86_64.cpp Tue Jul 16 12:20:08 2013 -0400 @@ -840,6 +840,117 @@ return generate_accessor_entry(); } +/** + * Method entry for static native methods: + * int java.util.zip.CRC32.update(int crc, int b) + */ +address InterpreterGenerator::generate_CRC32_update_entry() { + if (UseCRC32Intrinsics) { + address entry = __ pc(); + + // rbx,: Method* + // rsi: senderSP must preserved for slow path, set SP to it on fast path + // rdx: scratch + // rdi: scratch + + Label slow_path; + // If we need a safepoint check, generate full interpreter entry. + ExternalAddress state(SafepointSynchronize::address_of_state()); + __ cmp32(ExternalAddress(SafepointSynchronize::address_of_state()), + SafepointSynchronize::_not_synchronized); + __ jcc(Assembler::notEqual, slow_path); + + // We don't generate local frame and don't align stack because + // we call stub code and there is no safepoint on this path. + + // Load parameters + const Register crc = rax; // crc + const Register val = rdx; // source java byte value + const Register tbl = rdi; // scratch + + // Arguments are reversed on java expression stack + __ movl(val, Address(rsp, wordSize)); // byte value + __ movl(crc, Address(rsp, 2*wordSize)); // Initial CRC + + __ lea(tbl, ExternalAddress(StubRoutines::crc_table_addr())); + __ notl(crc); // ~crc + __ update_byte_crc32(crc, val, tbl); + __ notl(crc); // ~crc + // result in rax + + // _areturn + __ pop(rdi); // get return address + __ mov(rsp, rsi); // set sp to sender sp + __ jmp(rdi); + + // generate a vanilla native entry as the slow path + __ bind(slow_path); + + (void) generate_native_entry(false); + + return entry; + } + return generate_native_entry(false); +} + +/** + * Method entry for static native methods: + * int java.util.zip.CRC32.updateBytes(int crc, byte[] b, int off, int len) + * int java.util.zip.CRC32.updateByteBuffer(int crc, long buf, int off, int len) + */ +address InterpreterGenerator::generate_CRC32_updateBytes_entry(AbstractInterpreter::MethodKind kind) { + if (UseCRC32Intrinsics) { + address entry = __ pc(); + + // rbx,: Method* + // r13: senderSP must preserved for slow path, set SP to it on fast path + + Label slow_path; + // If we need a safepoint check, generate full interpreter entry. + ExternalAddress state(SafepointSynchronize::address_of_state()); + __ cmp32(ExternalAddress(SafepointSynchronize::address_of_state()), + SafepointSynchronize::_not_synchronized); + __ jcc(Assembler::notEqual, slow_path); + + // We don't generate local frame and don't align stack because + // we call stub code and there is no safepoint on this path. + + // Load parameters + const Register crc = c_rarg0; // crc + const Register buf = c_rarg1; // source java byte array address + const Register len = c_rarg2; // length + + // Arguments are reversed on java expression stack + __ movl(len, Address(rsp, wordSize)); // Length + // Calculate address of start element + if (kind == Interpreter::java_util_zip_CRC32_updateByteBuffer) { + __ movptr(buf, Address(rsp, 3*wordSize)); // long buf + __ addptr(buf, Address(rsp, 2*wordSize)); // + offset + __ movl(crc, Address(rsp, 5*wordSize)); // Initial CRC + } else { + __ movptr(buf, Address(rsp, 3*wordSize)); // byte[] array + __ addptr(buf, arrayOopDesc::base_offset_in_bytes(T_BYTE)); // + header size + __ addptr(buf, Address(rsp, 2*wordSize)); // + offset + __ movl(crc, Address(rsp, 4*wordSize)); // Initial CRC + } + + __ super_call_VM_leaf(CAST_FROM_FN_PTR(address, StubRoutines::updateBytesCRC32()), crc, buf, len); + // result in rax + + // _areturn + __ pop(rdi); // get return address + __ mov(rsp, r13); // set sp to sender sp + __ jmp(rdi); + + // generate a vanilla native entry as the slow path + __ bind(slow_path); + + (void) generate_native_entry(false); + + return entry; + } + return generate_native_entry(false); +} // Interpreter stub for calling a native method. (asm interpreter) // This sets up a somewhat different looking stack for calling the @@ -1510,15 +1621,16 @@ // determine code generation flags bool synchronized = false; address entry_point = NULL; + InterpreterGenerator* ig_this = (InterpreterGenerator*)this; switch (kind) { - case Interpreter::zerolocals : break; - case Interpreter::zerolocals_synchronized: synchronized = true; break; - case Interpreter::native : entry_point = ((InterpreterGenerator*)this)->generate_native_entry(false); break; - case Interpreter::native_synchronized : entry_point = ((InterpreterGenerator*)this)->generate_native_entry(true); break; - case Interpreter::empty : entry_point = ((InterpreterGenerator*)this)->generate_empty_entry(); break; - case Interpreter::accessor : entry_point = ((InterpreterGenerator*)this)->generate_accessor_entry(); break; - case Interpreter::abstract : entry_point = ((InterpreterGenerator*)this)->generate_abstract_entry(); break; + case Interpreter::zerolocals : break; + case Interpreter::zerolocals_synchronized: synchronized = true; break; + case Interpreter::native : entry_point = ig_this->generate_native_entry(false); break; + case Interpreter::native_synchronized : entry_point = ig_this->generate_native_entry(true); break; + case Interpreter::empty : entry_point = ig_this->generate_empty_entry(); break; + case Interpreter::accessor : entry_point = ig_this->generate_accessor_entry(); break; + case Interpreter::abstract : entry_point = ig_this->generate_abstract_entry(); break; case Interpreter::java_lang_math_sin : // fall thru case Interpreter::java_lang_math_cos : // fall thru @@ -1528,9 +1640,15 @@ case Interpreter::java_lang_math_log10 : // fall thru case Interpreter::java_lang_math_sqrt : // fall thru case Interpreter::java_lang_math_pow : // fall thru - case Interpreter::java_lang_math_exp : entry_point = ((InterpreterGenerator*)this)->generate_math_entry(kind); break; + case Interpreter::java_lang_math_exp : entry_point = ig_this->generate_math_entry(kind); break; case Interpreter::java_lang_ref_reference_get - : entry_point = ((InterpreterGenerator*)this)->generate_Reference_get_entry(); break; + : entry_point = ig_this->generate_Reference_get_entry(); break; + case Interpreter::java_util_zip_CRC32_update + : entry_point = ig_this->generate_CRC32_update_entry(); break; + case Interpreter::java_util_zip_CRC32_updateBytes + : // fall thru + case Interpreter::java_util_zip_CRC32_updateByteBuffer + : entry_point = ig_this->generate_CRC32_updateBytes_entry(kind); break; default: fatal(err_msg("unexpected method kind: %d", kind)); break; @@ -1540,8 +1658,7 @@ return entry_point; } - return ((InterpreterGenerator*) this)-> - generate_normal_entry(synchronized); + return ig_this->generate_normal_entry(synchronized); } // These should never be compiled since the interpreter will prefer diff -r 16b10327b00d -r 90d6c221d4e5 src/cpu/x86/vm/vm_version_x86.cpp --- a/src/cpu/x86/vm/vm_version_x86.cpp Tue Jul 16 10:55:48 2013 -0400 +++ b/src/cpu/x86/vm/vm_version_x86.cpp Tue Jul 16 12:20:08 2013 -0400 @@ -1,5 +1,5 @@ /* - * Copyright (c) 1997, 2012, Oracle and/or its affiliates. All rights reserved. + * Copyright (c) 1997, 2013, Oracle and/or its affiliates. All rights reserved. * DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER. * * This code is free software; you can redistribute it and/or modify it @@ -446,6 +446,7 @@ (supports_avx() ? ", avx" : ""), (supports_avx2() ? ", avx2" : ""), (supports_aes() ? ", aes" : ""), + (supports_clmul() ? ", clmul" : ""), (supports_erms() ? ", erms" : ""), (supports_mmx_ext() ? ", mmxext" : ""), (supports_3dnow_prefetch() ? ", 3dnowpref" : ""), @@ -489,6 +490,27 @@ FLAG_SET_DEFAULT(UseAES, false); } + // Use CLMUL instructions if available. + if (supports_clmul()) { + if (FLAG_IS_DEFAULT(UseCLMUL)) { + UseCLMUL = true; + } + } else if (UseCLMUL) { + if (!FLAG_IS_DEFAULT(UseCLMUL)) + warning("CLMUL instructions not available on this CPU (AVX may also be required)"); + FLAG_SET_DEFAULT(UseCLMUL, false); + } + + if (UseCLMUL && (UseAVX > 0) && (UseSSE > 2)) { + if (FLAG_IS_DEFAULT(UseCRC32Intrinsics)) { + UseCRC32Intrinsics = true; + } + } else if (UseCRC32Intrinsics) { + if (!FLAG_IS_DEFAULT(UseCRC32Intrinsics)) + warning("CRC32 Intrinsics requires AVX and CLMUL instructions (not available on this CPU)"); + FLAG_SET_DEFAULT(UseCRC32Intrinsics, false); + } + // The AES intrinsic stubs require AES instruction support (of course) // but also require sse3 mode for instructions it use. if (UseAES && (UseSSE > 2)) { diff -r 16b10327b00d -r 90d6c221d4e5 src/cpu/x86/vm/vm_version_x86.hpp --- a/src/cpu/x86/vm/vm_version_x86.hpp Tue Jul 16 10:55:48 2013 -0400 +++ b/src/cpu/x86/vm/vm_version_x86.hpp Tue Jul 16 12:20:08 2013 -0400 @@ -1,5 +1,5 @@ /* - * Copyright (c) 1997, 2012, Oracle and/or its affiliates. All rights reserved. + * Copyright (c) 1997, 2013, Oracle and/or its affiliates. All rights reserved. * DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER. * * This code is free software; you can redistribute it and/or modify it @@ -61,7 +61,8 @@ uint32_t value; struct { uint32_t sse3 : 1, - : 2, + clmul : 1, + : 1, monitor : 1, : 1, vmx : 1, @@ -249,7 +250,8 @@ CPU_AVX = (1 << 17), CPU_AVX2 = (1 << 18), CPU_AES = (1 << 19), - CPU_ERMS = (1 << 20) // enhanced 'rep movsb/stosb' instructions + CPU_ERMS = (1 << 20), // enhanced 'rep movsb/stosb' instructions + CPU_CLMUL = (1 << 21) // carryless multiply for CRC } cpuFeatureFlags; enum { @@ -429,6 +431,8 @@ result |= CPU_AES; if (_cpuid_info.sef_cpuid7_ebx.bits.erms != 0) result |= CPU_ERMS; + if (_cpuid_info.std_cpuid1_ecx.bits.clmul != 0) + result |= CPU_CLMUL; // AMD features. if (is_amd()) { @@ -555,6 +559,7 @@ static bool supports_tsc() { return (_cpuFeatures & CPU_TSC) != 0; } static bool supports_aes() { return (_cpuFeatures & CPU_AES) != 0; } static bool supports_erms() { return (_cpuFeatures & CPU_ERMS) != 0; } + static bool supports_clmul() { return (_cpuFeatures & CPU_CLMUL) != 0; } // Intel features static bool is_intel_family_core() { return is_intel() && diff -r 16b10327b00d -r 90d6c221d4e5 src/cpu/zero/vm/relocInfo_zero.cpp --- a/src/cpu/zero/vm/relocInfo_zero.cpp Tue Jul 16 10:55:48 2013 -0400 +++ b/src/cpu/zero/vm/relocInfo_zero.cpp Tue Jul 16 12:20:08 2013 -0400 @@ -52,22 +52,6 @@ return (address *) addr(); } -int Relocation::pd_breakpoint_size() { - ShouldNotCallThis(); -} - -void Relocation::pd_swap_in_breakpoint(address x, - short* instrs, - int instrlen) { - ShouldNotCallThis(); -} - -void Relocation::pd_swap_out_breakpoint(address x, - short* instrs, - int instrlen) { - ShouldNotCallThis(); -} - void poll_Relocation::fix_relocation_after_move(const CodeBuffer* src, CodeBuffer* dst) { ShouldNotCallThis(); diff -r 16b10327b00d -r 90d6c221d4e5 src/cpu/zero/vm/shark_globals_zero.hpp --- a/src/cpu/zero/vm/shark_globals_zero.hpp Tue Jul 16 10:55:48 2013 -0400 +++ b/src/cpu/zero/vm/shark_globals_zero.hpp Tue Jul 16 12:20:08 2013 -0400 @@ -58,7 +58,9 @@ define_pd_global(bool, ProfileInterpreter, false); define_pd_global(intx, CodeCacheExpansionSize, 32*K ); define_pd_global(uintx, CodeCacheMinBlockLength, 1 ); -define_pd_global(uintx, MetaspaceSize, 12*M ); +define_pd_global(uintx, CodeCacheMinimumUseSpace, 200*K); + +define_pd_global(uintx, MetaspaceSize, 12*M ); define_pd_global(bool, NeverActAsServerClassMachine, true ); define_pd_global(uint64_t, MaxRAM, 1ULL*G); define_pd_global(bool, CICompileOSR, true ); diff -r 16b10327b00d -r 90d6c221d4e5 src/os/bsd/dtrace/jvm_dtrace.c --- a/src/os/bsd/dtrace/jvm_dtrace.c Tue Jul 16 10:55:48 2013 -0400 +++ b/src/os/bsd/dtrace/jvm_dtrace.c Tue Jul 16 12:20:08 2013 -0400 @@ -122,9 +122,7 @@ } static int file_close(int fd) { - int ret; - RESTARTABLE(close(fd), ret); - return ret; + return close(fd); } static int file_read(int fd, char* buf, int len) { diff -r 16b10327b00d -r 90d6c221d4e5 src/os/bsd/vm/attachListener_bsd.cpp --- a/src/os/bsd/vm/attachListener_bsd.cpp Tue Jul 16 10:55:48 2013 -0400 +++ b/src/os/bsd/vm/attachListener_bsd.cpp Tue Jul 16 12:20:08 2013 -0400 @@ -199,7 +199,7 @@ ::unlink(initial_path); int res = ::bind(listener, (struct sockaddr*)&addr, sizeof(addr)); if (res == -1) { - RESTARTABLE(::close(listener), res); + ::close(listener); return -1; } @@ -217,7 +217,7 @@ } } if (res == -1) { - RESTARTABLE(::close(listener), res); + ::close(listener); ::unlink(initial_path); return -1; } @@ -345,24 +345,21 @@ uid_t puid; gid_t pgid; if (::getpeereid(s, &puid, &pgid) != 0) { - int res; - RESTARTABLE(::close(s), res); + ::close(s); continue; } uid_t euid = geteuid(); gid_t egid = getegid(); if (puid != euid || pgid != egid) { - int res; - RESTARTABLE(::close(s), res); + ::close(s); continue; } // peer credential look okay so we read the request BsdAttachOperation* op = read_request(s); if (op == NULL) { - int res; - RESTARTABLE(::close(s), res); + ::close(s); continue; } else { return op; @@ -413,7 +410,7 @@ } // done - RESTARTABLE(::close(this->socket()), rc); + ::close(this->socket()); // were we externally suspended while we were waiting? thread->check_and_wait_while_suspended(); diff -r 16b10327b00d -r 90d6c221d4e5 src/os/bsd/vm/osThread_bsd.hpp --- a/src/os/bsd/vm/osThread_bsd.hpp Tue Jul 16 10:55:48 2013 -0400 +++ b/src/os/bsd/vm/osThread_bsd.hpp Tue Jul 16 12:20:08 2013 -0400 @@ -1,5 +1,5 @@ /* - * Copyright (c) 1999, 2012, Oracle and/or its affiliates. All rights reserved. + * Copyright (c) 1999, 2013, Oracle and/or its affiliates. All rights reserved. * DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER. * * This code is free software; you can redistribute it and/or modify it @@ -94,7 +94,7 @@ // flags that support signal based suspend/resume on Bsd are in a // separate class to avoid confusion with many flags in OSThread that // are used by VM level suspend/resume. - os::Bsd::SuspendResume sr; + os::SuspendResume sr; // _ucontext and _siginfo are used by SR_handler() to save thread context, // and they will later be used to walk the stack or reposition thread PC. diff -r 16b10327b00d -r 90d6c221d4e5 src/os/bsd/vm/os_bsd.cpp --- a/src/os/bsd/vm/os_bsd.cpp Tue Jul 16 10:55:48 2013 -0400 +++ b/src/os/bsd/vm/os_bsd.cpp Tue Jul 16 12:20:08 2013 -0400 @@ -1234,12 +1234,13 @@ Dl_info dlinfo; if (libjvm_base_addr == NULL) { - dladdr(CAST_FROM_FN_PTR(void *, os::address_is_in_vm), &dlinfo); - libjvm_base_addr = (address)dlinfo.dli_fbase; + if (dladdr(CAST_FROM_FN_PTR(void *, os::address_is_in_vm), &dlinfo) != 0) { + libjvm_base_addr = (address)dlinfo.dli_fbase; + } assert(libjvm_base_addr !=NULL, "Cannot obtain base address for libjvm"); } - if (dladdr((void *)addr, &dlinfo)) { + if (dladdr((void *)addr, &dlinfo) != 0) { if (libjvm_base_addr == (address)dlinfo.dli_fbase) return true; } @@ -1251,35 +1252,40 @@ bool os::dll_address_to_function_name(address addr, char *buf, int buflen, int *offset) { + // buf is not optional, but offset is optional + assert(buf != NULL, "sanity check"); + Dl_info dlinfo; char localbuf[MACH_MAXSYMLEN]; - // dladdr will find names of dynamic functions only, but does - // it set dli_fbase with mach_header address when it "fails" ? - if (dladdr((void*)addr, &dlinfo) && dlinfo.dli_sname != NULL) { - if (buf != NULL) { - if(!Decoder::demangle(dlinfo.dli_sname, buf, buflen)) { + if (dladdr((void*)addr, &dlinfo) != 0) { + // see if we have a matching symbol + if (dlinfo.dli_saddr != NULL && dlinfo.dli_sname != NULL) { + if (!Decoder::demangle(dlinfo.dli_sname, buf, buflen)) { jio_snprintf(buf, buflen, "%s", dlinfo.dli_sname); } + if (offset != NULL) *offset = addr - (address)dlinfo.dli_saddr; + return true; + } + // no matching symbol so try for just file info + if (dlinfo.dli_fname != NULL && dlinfo.dli_fbase != NULL) { + if (Decoder::decode((address)(addr - (address)dlinfo.dli_fbase), + buf, buflen, offset, dlinfo.dli_fname)) { + return true; + } } - if (offset != NULL) *offset = addr - (address)dlinfo.dli_saddr; - return true; - } else if (dlinfo.dli_fname != NULL && dlinfo.dli_fbase != 0) { - if (Decoder::decode((address)(addr - (address)dlinfo.dli_fbase), - buf, buflen, offset, dlinfo.dli_fname)) { - return true; + + // Handle non-dynamic manually: + if (dlinfo.dli_fbase != NULL && + Decoder::decode(addr, localbuf, MACH_MAXSYMLEN, offset, + dlinfo.dli_fbase)) { + if (!Decoder::demangle(localbuf, buf, buflen)) { + jio_snprintf(buf, buflen, "%s", localbuf); + } + return true; } } - - // Handle non-dymanic manually: - if (dlinfo.dli_fbase != NULL && - Decoder::decode(addr, localbuf, MACH_MAXSYMLEN, offset, dlinfo.dli_fbase)) { - if(!Decoder::demangle(localbuf, buf, buflen)) { - jio_snprintf(buf, buflen, "%s", localbuf); - } - return true; - } - if (buf != NULL) buf[0] = '\0'; + buf[0] = '\0'; if (offset != NULL) *offset = -1; return false; } @@ -1287,17 +1293,24 @@ // ported from solaris version bool os::dll_address_to_library_name(address addr, char* buf, int buflen, int* offset) { + // buf is not optional, but offset is optional + assert(buf != NULL, "sanity check"); + Dl_info dlinfo; - if (dladdr((void*)addr, &dlinfo)){ - if (buf) jio_snprintf(buf, buflen, "%s", dlinfo.dli_fname); - if (offset) *offset = addr - (address)dlinfo.dli_fbase; - return true; - } else { - if (buf) buf[0] = '\0'; - if (offset) *offset = -1; - return false; + if (dladdr((void*)addr, &dlinfo) != 0) { + if (dlinfo.dli_fname != NULL) { + jio_snprintf(buf, buflen, "%s", dlinfo.dli_fname); + } + if (dlinfo.dli_fbase != NULL && offset != NULL) { + *offset = addr - (address)dlinfo.dli_fbase; + } + return true; } + + buf[0] = '\0'; + if (offset) *offset = -1; + return false; } // Loads .dll/.so and @@ -1520,49 +1533,50 @@ } void os::print_dll_info(outputStream *st) { - st->print_cr("Dynamic libraries:"); + st->print_cr("Dynamic libraries:"); #ifdef RTLD_DI_LINKMAP - Dl_info dli; - void *handle; - Link_map *map; - Link_map *p; - - if (!dladdr(CAST_FROM_FN_PTR(void *, os::print_dll_info), &dli)) { - st->print_cr("Error: Cannot print dynamic libraries."); - return; - } - handle = dlopen(dli.dli_fname, RTLD_LAZY); - if (handle == NULL) { - st->print_cr("Error: Cannot print dynamic libraries."); - return; - } - dlinfo(handle, RTLD_DI_LINKMAP, &map); - if (map == NULL) { - st->print_cr("Error: Cannot print dynamic libraries."); - return; - } - - while (map->l_prev != NULL) - map = map->l_prev; - - while (map != NULL) { - st->print_cr(PTR_FORMAT " \t%s", map->l_addr, map->l_name); - map = map->l_next; - } - - dlclose(handle); + Dl_info dli; + void *handle; + Link_map *map; + Link_map *p; + + if (dladdr(CAST_FROM_FN_PTR(void *, os::print_dll_info), &dli) == 0 || + dli.dli_fname == NULL) { + st->print_cr("Error: Cannot print dynamic libraries."); + return; + } + handle = dlopen(dli.dli_fname, RTLD_LAZY); + if (handle == NULL) { + st->print_cr("Error: Cannot print dynamic libraries."); + return; + } + dlinfo(handle, RTLD_DI_LINKMAP, &map); + if (map == NULL) { + st->print_cr("Error: Cannot print dynamic libraries."); + return; + } + + while (map->l_prev != NULL) + map = map->l_prev; + + while (map != NULL) { + st->print_cr(PTR_FORMAT " \t%s", map->l_addr, map->l_name); + map = map->l_next; + } + + dlclose(handle); #elif defined(__APPLE__) - uint32_t count; - uint32_t i; - - count = _dyld_image_count(); - for (i = 1; i < count; i++) { - const char *name = _dyld_get_image_name(i); - intptr_t slide = _dyld_get_image_vmaddr_slide(i); - st->print_cr(PTR_FORMAT " \t%s", slide, name); - } + uint32_t count; + uint32_t i; + + count = _dyld_image_count(); + for (i = 1; i < count; i++) { + const char *name = _dyld_get_image_name(i); + intptr_t slide = _dyld_get_image_vmaddr_slide(i); + st->print_cr(PTR_FORMAT " \t%s", slide, name); + } #else - st->print_cr("Error: Cannot print dynamic libraries."); + st->print_cr("Error: Cannot print dynamic libraries."); #endif } @@ -1707,8 +1721,11 @@ bool ret = dll_address_to_library_name( CAST_FROM_FN_PTR(address, os::jvm_path), dli_fname, sizeof(dli_fname), NULL); - assert(ret != 0, "cannot locate libjvm"); - char *rp = realpath(dli_fname, buf); + assert(ret, "cannot locate libjvm"); + char *rp = NULL; + if (ret && dli_fname[0] != '\0') { + rp = realpath(dli_fname, buf); + } if (rp == NULL) return; @@ -1852,17 +1869,118 @@ // Bsd(POSIX) specific hand shaking semaphore. #ifdef __APPLE__ -static semaphore_t sig_sem; +typedef semaphore_t os_semaphore_t; #define SEM_INIT(sem, value) semaphore_create(mach_task_self(), &sem, SYNC_POLICY_FIFO, value) -#define SEM_WAIT(sem) semaphore_wait(sem); -#define SEM_POST(sem) semaphore_signal(sem); +#define SEM_WAIT(sem) semaphore_wait(sem) +#define SEM_POST(sem) semaphore_signal(sem) +#define SEM_DESTROY(sem) semaphore_destroy(mach_task_self(), sem) #else -static sem_t sig_sem; +typedef sem_t os_semaphore_t; #define SEM_INIT(sem, value) sem_init(&sem, 0, value) -#define SEM_WAIT(sem) sem_wait(&sem); -#define SEM_POST(sem) sem_post(&sem); +#define SEM_WAIT(sem) sem_wait(&sem) +#define SEM_POST(sem) sem_post(&sem) +#define SEM_DESTROY(sem) sem_destroy(&sem) #endif +class Semaphore : public StackObj { + public: + Semaphore(); + ~Semaphore(); + void signal(); + void wait(); + bool trywait(); + bool timedwait(unsigned int sec, int nsec); + private: + jlong currenttime() const; + semaphore_t _semaphore; +}; + +Semaphore::Semaphore() : _semaphore(0) { + SEM_INIT(_semaphore, 0); +} + +Semaphore::~Semaphore() { + SEM_DESTROY(_semaphore); +} + +void Semaphore::signal() { + SEM_POST(_semaphore); +} + +void Semaphore::wait() { + SEM_WAIT(_semaphore); +} + +jlong Semaphore::currenttime() const { + struct timeval tv; + gettimeofday(&tv, NULL); + return (tv.tv_sec * NANOSECS_PER_SEC) + (tv.tv_usec * 1000); +} + +#ifdef __APPLE__ +bool Semaphore::trywait() { + return timedwait(0, 0); +} + +bool Semaphore::timedwait(unsigned int sec, int nsec) { + kern_return_t kr = KERN_ABORTED; + mach_timespec_t waitspec; + waitspec.tv_sec = sec; + waitspec.tv_nsec = nsec; + + jlong starttime = currenttime(); + + kr = semaphore_timedwait(_semaphore, waitspec); + while (kr == KERN_ABORTED) { + jlong totalwait = (sec * NANOSECS_PER_SEC) + nsec; + + jlong current = currenttime(); + jlong passedtime = current - starttime; + + if (passedtime >= totalwait) { + waitspec.tv_sec = 0; + waitspec.tv_nsec = 0; + } else { + jlong waittime = totalwait - (current - starttime); + waitspec.tv_sec = waittime / NANOSECS_PER_SEC; + waitspec.tv_nsec = waittime % NANOSECS_PER_SEC; + } + + kr = semaphore_timedwait(_semaphore, waitspec); + } + + return kr == KERN_SUCCESS; +} + +#else + +bool Semaphore::trywait() { + return sem_trywait(&_semaphore) == 0; +} + +bool Semaphore::timedwait(unsigned int sec, int nsec) { + struct timespec ts; + jlong endtime = unpackTime(&ts, false, (sec * NANOSECS_PER_SEC) + nsec); + + while (1) { + int result = sem_timedwait(&_semaphore, &ts); + if (result == 0) { + return true; + } else if (errno == EINTR) { + continue; + } else if (errno == ETIMEDOUT) { + return false; + } else { + return false; + } + } +} + +#endif // __APPLE__ + +static os_semaphore_t sig_sem; +static Semaphore sr_semaphore; + void os::signal_init_pd() { // Initialize signal structures ::memset((void*)pending_signals, 0, sizeof(pending_signals)); @@ -1973,6 +2091,13 @@ } } +static void warn_fail_commit_memory(char* addr, size_t size, bool exec, + int err) { + warning("INFO: os::commit_memory(" PTR_FORMAT ", " SIZE_FORMAT + ", %d) failed; error='%s' (errno=%d)", addr, size, exec, + strerror(err), err); +} + // NOTE: Bsd kernel does not really reserve the pages for us. // All it does is to check if there are enough free pages // left at the time of mmap(). This could be a potential @@ -1981,18 +2106,45 @@ int prot = exec ? PROT_READ|PROT_WRITE|PROT_EXEC : PROT_READ|PROT_WRITE; #ifdef __OpenBSD__ // XXX: Work-around mmap/MAP_FIXED bug temporarily on OpenBSD - return ::mprotect(addr, size, prot) == 0; + if (::mprotect(addr, size, prot) == 0) { + return true; + } #else uintptr_t res = (uintptr_t) ::mmap(addr, size, prot, MAP_PRIVATE|MAP_FIXED|MAP_ANONYMOUS, -1, 0); - return res != (uintptr_t) MAP_FAILED; + if (res != (uintptr_t) MAP_FAILED) { + return true; + } #endif + + // Warn about any commit errors we see in non-product builds just + // in case mmap() doesn't work as described on the man page. + NOT_PRODUCT(warn_fail_commit_memory(addr, size, exec, errno);) + + return false; } - bool os::pd_commit_memory(char* addr, size_t size, size_t alignment_hint, bool exec) { - return commit_memory(addr, size, exec); + // alignment_hint is ignored on this OS + return pd_commit_memory(addr, size, exec); +} + +void os::pd_commit_memory_or_exit(char* addr, size_t size, bool exec, + const char* mesg) { + assert(mesg != NULL, "mesg must be specified"); + if (!pd_commit_memory(addr, size, exec)) { + // add extra info in product mode for vm_exit_out_of_memory(): + PRODUCT_ONLY(warn_fail_commit_memory(addr, size, exec, errno);) + vm_exit_out_of_memory(size, OOM_MMAP_ERROR, mesg); + } +} + +void os::pd_commit_memory_or_exit(char* addr, size_t size, + size_t alignment_hint, bool exec, + const char* mesg) { + // alignment_hint is ignored on this OS + pd_commit_memory_or_exit(addr, size, exec, mesg); } void os::pd_realign_memory(char *addr, size_t bytes, size_t alignment_hint) { @@ -2047,7 +2199,7 @@ } bool os::pd_create_stack_guard_pages(char* addr, size_t size) { - return os::commit_memory(addr, size); + return os::commit_memory(addr, size, !ExecMem); } // If this is a growable mapping, remove the guard pages entirely by @@ -2219,21 +2371,20 @@ } // The memory is committed - address pc = CALLER_PC; - MemTracker::record_virtual_memory_reserve((address)addr, bytes, pc); - MemTracker::record_virtual_memory_commit((address)addr, bytes, pc); + MemTracker::record_virtual_memory_reserve_and_commit((address)addr, bytes, mtNone, CALLER_PC); return addr; } bool os::release_memory_special(char* base, size_t bytes) { + MemTracker::Tracker tkr = MemTracker::get_virtual_memory_release_tracker(); // detaching the SHM segment will also delete it, see reserve_memory_special() int rslt = shmdt(base); if (rslt == 0) { - MemTracker::record_virtual_memory_uncommit((address)base, bytes); - MemTracker::record_virtual_memory_release((address)base, bytes); + tkr.record((address)base, bytes); return true; } else { + tkr.discard(); return false; } @@ -2616,9 +2767,6 @@ static void resume_clear_context(OSThread *osthread) { osthread->set_ucontext(NULL); osthread->set_siginfo(NULL); - - // notify the suspend action is completed, we have now resumed - osthread->sr.clear_suspended(); } static void suspend_save_context(OSThread *osthread, siginfo_t* siginfo, ucontext_t* context) { @@ -2638,7 +2786,7 @@ // its signal handlers run and prevents sigwait()'s use with the // mutex granting granting signal. // -// Currently only ever called on the VMThread +// Currently only ever called on the VMThread or JavaThread // static void SR_handler(int sig, siginfo_t* siginfo, ucontext_t* context) { // Save and restore errno to avoid confusing native code with EINTR @@ -2647,38 +2795,48 @@ Thread* thread = Thread::current(); OSThread* osthread = thread->osthread(); - assert(thread->is_VM_thread(), "Must be VMThread"); - // read current suspend action - int action = osthread->sr.suspend_action(); - if (action == os::Bsd::SuspendResume::SR_SUSPEND) { + assert(thread->is_VM_thread() || thread->is_Java_thread(), "Must be VMThread or JavaThread"); + + os::SuspendResume::State current = osthread->sr.state(); + if (current == os::SuspendResume::SR_SUSPEND_REQUEST) { suspend_save_context(osthread, siginfo, context); - // Notify the suspend action is about to be completed. do_suspend() - // waits until SR_SUSPENDED is set and then returns. We will wait - // here for a resume signal and that completes the suspend-other - // action. do_suspend/do_resume is always called as a pair from - // the same thread - so there are no races - - // notify the caller - osthread->sr.set_suspended(); - - sigset_t suspend_set; // signals for sigsuspend() - - // get current set of blocked signals and unblock resume signal - pthread_sigmask(SIG_BLOCK, NULL, &suspend_set); - sigdelset(&suspend_set, SR_signum); - - // wait here until we are resumed - do { - sigsuspend(&suspend_set); - // ignore all returns until we get a resume signal - } while (osthread->sr.suspend_action() != os::Bsd::SuspendResume::SR_CONTINUE); + // attempt to switch the state, we assume we had a SUSPEND_REQUEST + os::SuspendResume::State state = osthread->sr.suspended(); + if (state == os::SuspendResume::SR_SUSPENDED) { + sigset_t suspend_set; // signals for sigsuspend() + + // get current set of blocked signals and unblock resume signal + pthread_sigmask(SIG_BLOCK, NULL, &suspend_set); + sigdelset(&suspend_set, SR_signum); + + sr_semaphore.signal(); + // wait here until we are resumed + while (1) { + sigsuspend(&suspend_set); + + os::SuspendResume::State result = osthread->sr.running(); + if (result == os::SuspendResume::SR_RUNNING) { + sr_semaphore.signal(); + break; + } else if (result != os::SuspendResume::SR_SUSPENDED) { + ShouldNotReachHere(); + } + } + + } else if (state == os::SuspendResume::SR_RUNNING) { + // request was cancelled, continue + } else { + ShouldNotReachHere(); + } resume_clear_context(osthread); - + } else if (current == os::SuspendResume::SR_RUNNING) { + // request was cancelled, continue + } else if (current == os::SuspendResume::SR_WAKEUP_REQUEST) { + // ignore } else { - assert(action == os::Bsd::SuspendResume::SR_CONTINUE, "unexpected sr action"); - // nothing special to do - just leave the handler + // ignore } errno = old_errno; @@ -2722,42 +2880,82 @@ return 0; } +static int sr_notify(OSThread* osthread) { + int status = pthread_kill(osthread->pthread_id(), SR_signum); + assert_status(status == 0, status, "pthread_kill"); + return status; +} + +// "Randomly" selected value for how long we want to spin +// before bailing out on suspending a thread, also how often +// we send a signal to a thread we want to resume +static const int RANDOMLY_LARGE_INTEGER = 1000000; +static const int RANDOMLY_LARGE_INTEGER2 = 100; // returns true on success and false on error - really an error is fatal // but this seems the normal response to library errors static bool do_suspend(OSThread* osthread) { + assert(osthread->sr.is_running(), "thread should be running"); + assert(!sr_semaphore.trywait(), "semaphore has invalid state"); + // mark as suspended and send signal - osthread->sr.set_suspend_action(os::Bsd::SuspendResume::SR_SUSPEND); - int status = pthread_kill(osthread->pthread_id(), SR_signum); - assert_status(status == 0, status, "pthread_kill"); - - // check status and wait until notified of suspension - if (status == 0) { - for (int i = 0; !osthread->sr.is_suspended(); i++) { - os::yield_all(i); - } - osthread->sr.set_suspend_action(os::Bsd::SuspendResume::SR_NONE); - return true; - } - else { - osthread->sr.set_suspend_action(os::Bsd::SuspendResume::SR_NONE); + if (osthread->sr.request_suspend() != os::SuspendResume::SR_SUSPEND_REQUEST) { + // failed to switch, state wasn't running? + ShouldNotReachHere(); return false; } + + if (sr_notify(osthread) != 0) { + ShouldNotReachHere(); + } + + // managed to send the signal and switch to SUSPEND_REQUEST, now wait for SUSPENDED + while (true) { + if (sr_semaphore.timedwait(0, 2 * NANOSECS_PER_MILLISEC)) { + break; + } else { + // timeout + os::SuspendResume::State cancelled = osthread->sr.cancel_suspend(); + if (cancelled == os::SuspendResume::SR_RUNNING) { + return false; + } else if (cancelled == os::SuspendResume::SR_SUSPENDED) { + // make sure that we consume the signal on the semaphore as well + sr_semaphore.wait(); + break; + } else { + ShouldNotReachHere(); + return false; + } + } + } + + guarantee(osthread->sr.is_suspended(), "Must be suspended"); + return true; } static void do_resume(OSThread* osthread) { assert(osthread->sr.is_suspended(), "thread should be suspended"); - osthread->sr.set_suspend_action(os::Bsd::SuspendResume::SR_CONTINUE); - - int status = pthread_kill(osthread->pthread_id(), SR_signum); - assert_status(status == 0, status, "pthread_kill"); - // check status and wait unit notified of resumption - if (status == 0) { - for (int i = 0; osthread->sr.is_suspended(); i++) { - os::yield_all(i); + assert(!sr_semaphore.trywait(), "invalid semaphore state"); + + if (osthread->sr.request_wakeup() != os::SuspendResume::SR_WAKEUP_REQUEST) { + // failed to switch to WAKEUP_REQUEST + ShouldNotReachHere(); + return; + } + + while (true) { + if (sr_notify(osthread) == 0) { + if (sr_semaphore.timedwait(0, 2 * NANOSECS_PER_MILLISEC)) { + if (osthread->sr.is_running()) { + return; + } + } + } else { + ShouldNotReachHere(); } } - osthread->sr.set_suspend_action(os::Bsd::SuspendResume::SR_NONE); + + guarantee(osthread->sr.is_running(), "Must be running!"); } //////////////////////////////////////////////////////////////////////////////// @@ -3364,7 +3562,7 @@ if (!UseMembar) { address mem_serialize_page = (address) ::mmap(NULL, Bsd::page_size(), PROT_READ | PROT_WRITE, MAP_PRIVATE|MAP_ANONYMOUS, -1, 0); - guarantee( mem_serialize_page != NULL, "mmap Failed for memory serialize page"); + guarantee( mem_serialize_page != MAP_FAILED, "mmap Failed for memory serialize page"); os::set_memory_serialize_page( mem_serialize_page ); #ifndef PRODUCT @@ -3508,7 +3706,40 @@ return false; } +void os::SuspendedThreadTask::internal_do_task() { + if (do_suspend(_thread->osthread())) { + SuspendedThreadTaskContext context(_thread, _thread->osthread()->ucontext()); + do_task(context); + do_resume(_thread->osthread()); + } +} + /// +class PcFetcher : public os::SuspendedThreadTask { +public: + PcFetcher(Thread* thread) : os::SuspendedThreadTask(thread) {} + ExtendedPC result(); +protected: + void do_task(const os::SuspendedThreadTaskContext& context); +private: + ExtendedPC _epc; +}; + +ExtendedPC PcFetcher::result() { + guarantee(is_done(), "task is not done yet."); + return _epc; +} + +void PcFetcher::do_task(const os::SuspendedThreadTaskContext& context) { + Thread* thread = context.thread(); + OSThread* osthread = thread->osthread(); + if (osthread->ucontext() != NULL) { + _epc = os::Bsd::ucontext_get_pc((ucontext_t *) context.ucontext()); + } else { + // NULL context is unexpected, double-check this is the VMThread + guarantee(thread->is_VM_thread(), "can only be called for VMThread"); + } +} // Suspends the target using the signal mechanism and then grabs the PC before // resuming the target. Used by the flat-profiler only @@ -3517,22 +3748,9 @@ assert(Thread::current()->is_Watcher_thread(), "Must be watcher"); assert(thread->is_VM_thread(), "Can only be called for VMThread"); - ExtendedPC epc; - - OSThread* osthread = thread->osthread(); - if (do_suspend(osthread)) { - if (osthread->ucontext() != NULL) { - epc = os::Bsd::ucontext_get_pc(osthread->ucontext()); - } else { - // NULL context is unexpected, double-check this is the VMThread - guarantee(thread->is_VM_thread(), "can only be called for VMThread"); - } - do_resume(osthread); - } - // failure means pthread_kill failed for some reason - arguably this is - // a fatal problem, but such problems are ignored elsewhere - - return epc; + PcFetcher fetcher(thread); + fetcher.run(); + return fetcher.result(); } int os::Bsd::safe_cond_timedwait(pthread_cond_t *_cond, pthread_mutex_t *_mutex, const struct timespec *_abstime) @@ -3546,20 +3764,20 @@ bool os::find(address addr, outputStream* st) { Dl_info dlinfo; memset(&dlinfo, 0, sizeof(dlinfo)); - if (dladdr(addr, &dlinfo)) { + if (dladdr(addr, &dlinfo) != 0) { st->print(PTR_FORMAT ": ", addr); - if (dlinfo.dli_sname != NULL) { + if (dlinfo.dli_sname != NULL && dlinfo.dli_saddr != NULL) { st->print("%s+%#x", dlinfo.dli_sname, addr - (intptr_t)dlinfo.dli_saddr); - } else if (dlinfo.dli_fname) { + } else if (dlinfo.dli_fbase != NULL) { st->print("", addr - (intptr_t)dlinfo.dli_fbase); } else { st->print(""); } - if (dlinfo.dli_fname) { + if (dlinfo.dli_fname != NULL) { st->print(" in %s", dlinfo.dli_fname); } - if (dlinfo.dli_fbase) { + if (dlinfo.dli_fbase != NULL) { st->print(" at " PTR_FORMAT, dlinfo.dli_fbase); } st->cr(); @@ -3572,7 +3790,7 @@ if (!lowest) lowest = (address) dlinfo.dli_fbase; if (begin < lowest) begin = lowest; Dl_info dlinfo2; - if (dladdr(end, &dlinfo2) && dlinfo2.dli_saddr != dlinfo.dli_saddr + if (dladdr(end, &dlinfo2) != 0 && dlinfo2.dli_saddr != dlinfo.dli_saddr && end > dlinfo2.dli_saddr && dlinfo2.dli_saddr > begin) end = (address) dlinfo2.dli_saddr; Disassembler::decode(begin, end, st); @@ -4517,3 +4735,4 @@ return n; } + diff -r 16b10327b00d -r 90d6c221d4e5 src/os/bsd/vm/os_bsd.hpp --- a/src/os/bsd/vm/os_bsd.hpp Tue Jul 16 10:55:48 2013 -0400 +++ b/src/os/bsd/vm/os_bsd.hpp Tue Jul 16 12:20:08 2013 -0400 @@ -145,36 +145,6 @@ // BsdThreads work-around for 6292965 static int safe_cond_timedwait(pthread_cond_t *_cond, pthread_mutex_t *_mutex, const struct timespec *_abstime); - - // Bsd suspend/resume support - this helper is a shadow of its former - // self now that low-level suspension is barely used, and old workarounds - // for BsdThreads are no longer needed. - class SuspendResume { - private: - volatile int _suspend_action; - volatile jint _state; - public: - // values for suspend_action: - enum { - SR_NONE = 0x00, - SR_SUSPEND = 0x01, // suspend request - SR_CONTINUE = 0x02, // resume request - SR_SUSPENDED = 0x20 // values for _state: + SR_NONE - }; - - SuspendResume() { _suspend_action = SR_NONE; _state = SR_NONE; } - - int suspend_action() const { return _suspend_action; } - void set_suspend_action(int x) { _suspend_action = x; } - - // atomic updates for _state - inline void set_suspended(); - inline void clear_suspended(); - bool is_suspended() { return _state & SR_SUSPENDED; } - - #undef SR_SUSPENDED - }; - private: typedef int (*sched_getcpu_func_t)(void); typedef int (*numa_node_to_cpus_func_t)(int node, unsigned long *buffer, int bufferlen); @@ -250,7 +220,7 @@ int TryPark () ; int park (jlong millis) ; void SetAssociation (Thread * a) { _Assoc = a ; } -} ; +}; class PlatformParker : public CHeapObj { protected: @@ -268,6 +238,6 @@ status = pthread_mutex_init (_mutex, NULL); assert_status(status == 0, status, "mutex_init"); } -} ; +}; #endif // OS_BSD_VM_OS_BSD_HPP diff -r 16b10327b00d -r 90d6c221d4e5 src/os/bsd/vm/os_bsd.inline.hpp --- a/src/os/bsd/vm/os_bsd.inline.hpp Tue Jul 16 10:55:48 2013 -0400 +++ b/src/os/bsd/vm/os_bsd.inline.hpp Tue Jul 16 12:20:08 2013 -0400 @@ -178,11 +178,11 @@ } inline int os::close(int fd) { - RESTARTABLE_RETURN_INT(::close(fd)); + return ::close(fd); } inline int os::socket_close(int fd) { - RESTARTABLE_RETURN_INT(::close(fd)); + return ::close(fd); } inline int os::socket(int domain, int type, int protocol) { @@ -286,20 +286,4 @@ return ::setsockopt(fd, level, optname, optval, optlen); } -inline void os::Bsd::SuspendResume::set_suspended() { - jint temp, temp2; - do { - temp = _state; - temp2 = Atomic::cmpxchg(temp | SR_SUSPENDED, &_state, temp); - } while (temp2 != temp); -} - -inline void os::Bsd::SuspendResume::clear_suspended() { - jint temp, temp2; - do { - temp = _state; - temp2 = Atomic::cmpxchg(temp & ~SR_SUSPENDED, &_state, temp); - } while (temp2 != temp); -} - #endif // OS_BSD_VM_OS_BSD_INLINE_HPP diff -r 16b10327b00d -r 90d6c221d4e5 src/os/bsd/vm/perfMemory_bsd.cpp --- a/src/os/bsd/vm/perfMemory_bsd.cpp Tue Jul 16 10:55:48 2013 -0400 +++ b/src/os/bsd/vm/perfMemory_bsd.cpp Tue Jul 16 12:20:08 2013 -0400 @@ -1,5 +1,5 @@ /* - * Copyright (c) 2001, 2012, Oracle and/or its affiliates. All rights reserved. + * Copyright (c) 2001, 2013, Oracle and/or its affiliates. All rights reserved. * DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER. * * This code is free software; you can redistribute it and/or modify it @@ -60,7 +60,7 @@ } // commit memory - if (!os::commit_memory(mapAddress, size)) { + if (!os::commit_memory(mapAddress, size, !ExecMem)) { if (PrintMiscellaneous && Verbose) { warning("Could not commit PerfData memory\n"); } @@ -120,7 +120,7 @@ addr += result; } - RESTARTABLE(::close(fd), result); + result = ::close(fd); if (PrintMiscellaneous && Verbose) { if (result == OS_ERR) { warning("Could not close %s: %s\n", destfile, strerror(errno)); @@ -632,7 +632,7 @@ if (PrintMiscellaneous && Verbose) { warning("could not set shared memory file size: %s\n", strerror(errno)); } - RESTARTABLE(::close(fd), result); + ::close(fd); return -1; } @@ -656,7 +656,7 @@ if (result != -1) { return fd; } else { - RESTARTABLE(::close(fd), result); + ::close(fd); return -1; } } @@ -734,9 +734,7 @@ mapAddress = (char*)::mmap((char*)0, size, PROT_READ|PROT_WRITE, MAP_SHARED, fd, 0); - // attempt to close the file - restart it if it was interrupted, - // but ignore other failures - RESTARTABLE(::close(fd), result); + result = ::close(fd); assert(result != OS_ERR, "could not close file"); if (mapAddress == MAP_FAILED) { @@ -755,8 +753,7 @@ (void)::memset((void*) mapAddress, 0, size); // it does not go through os api, the operation has to record from here - MemTracker::record_virtual_memory_reserve((address)mapAddress, size, CURRENT_PC); - MemTracker::record_virtual_memory_type((address)mapAddress, mtInternal); + MemTracker::record_virtual_memory_reserve((address)mapAddress, size, mtInternal, CURRENT_PC); return mapAddress; } @@ -909,7 +906,7 @@ // attempt to close the file - restart if it gets interrupted, // but ignore other failures - RESTARTABLE(::close(fd), result); + result = ::close(fd); assert(result != OS_ERR, "could not close file"); if (mapAddress == MAP_FAILED) { @@ -921,8 +918,7 @@ } // it does not go through os api, the operation has to record from here - MemTracker::record_virtual_memory_reserve((address)mapAddress, size, CURRENT_PC); - MemTracker::record_virtual_memory_type((address)mapAddress, mtInternal); + MemTracker::record_virtual_memory_reserve((address)mapAddress, size, mtInternal, CURRENT_PC); *addr = mapAddress; *sizep = size; diff -r 16b10327b00d -r 90d6c221d4e5 src/os/linux/vm/attachListener_linux.cpp --- a/src/os/linux/vm/attachListener_linux.cpp Tue Jul 16 10:55:48 2013 -0400 +++ b/src/os/linux/vm/attachListener_linux.cpp Tue Jul 16 12:20:08 2013 -0400 @@ -199,7 +199,7 @@ ::unlink(initial_path); int res = ::bind(listener, (struct sockaddr*)&addr, sizeof(addr)); if (res == -1) { - RESTARTABLE(::close(listener), res); + ::close(listener); return -1; } @@ -212,7 +212,7 @@ } } if (res == -1) { - RESTARTABLE(::close(listener), res); + ::close(listener); ::unlink(initial_path); return -1; } @@ -340,24 +340,21 @@ struct ucred cred_info; socklen_t optlen = sizeof(cred_info); if (::getsockopt(s, SOL_SOCKET, SO_PEERCRED, (void*)&cred_info, &optlen) == -1) { - int res; - RESTARTABLE(::close(s), res); + ::close(s); continue; } uid_t euid = geteuid(); gid_t egid = getegid(); if (cred_info.uid != euid || cred_info.gid != egid) { - int res; - RESTARTABLE(::close(s), res); + ::close(s); continue; } // peer credential look okay so we read the request LinuxAttachOperation* op = read_request(s); if (op == NULL) { - int res; - RESTARTABLE(::close(s), res); + ::close(s); continue; } else { return op; @@ -408,7 +405,7 @@ } // done - RESTARTABLE(::close(this->socket()), rc); + ::close(this->socket()); // were we externally suspended while we were waiting? thread->check_and_wait_while_suspended(); diff -r 16b10327b00d -r 90d6c221d4e5 src/os/linux/vm/osThread_linux.hpp --- a/src/os/linux/vm/osThread_linux.hpp Tue Jul 16 10:55:48 2013 -0400 +++ b/src/os/linux/vm/osThread_linux.hpp Tue Jul 16 12:20:08 2013 -0400 @@ -1,5 +1,5 @@ /* - * Copyright (c) 1999, 2012, Oracle and/or its affiliates. All rights reserved. + * Copyright (c) 1999, 2013, Oracle and/or its affiliates. All rights reserved. * DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER. * * This code is free software; you can redistribute it and/or modify it @@ -77,7 +77,7 @@ // flags that support signal based suspend/resume on Linux are in a // separate class to avoid confusion with many flags in OSThread that // are used by VM level suspend/resume. - os::Linux::SuspendResume sr; + os::SuspendResume sr; // _ucontext and _siginfo are used by SR_handler() to save thread context, // and they will later be used to walk the stack or reposition thread PC. diff -r 16b10327b00d -r 90d6c221d4e5 src/os/linux/vm/os_linux.cpp --- a/src/os/linux/vm/os_linux.cpp Tue Jul 16 10:55:48 2013 -0400 +++ b/src/os/linux/vm/os_linux.cpp Tue Jul 16 12:20:08 2013 -0400 @@ -151,6 +151,9 @@ /* Used to protect dlsym() calls */ static pthread_mutex_t dl_mutex; +// Declarations +static void unpackTime(timespec* absTime, bool isAbsolute, jlong time); + #ifdef JAVASE_EMBEDDED class MemNotifyThread: public Thread { friend class VMStructs; @@ -1679,12 +1682,13 @@ Dl_info dlinfo; if (libjvm_base_addr == NULL) { - dladdr(CAST_FROM_FN_PTR(void *, os::address_is_in_vm), &dlinfo); - libjvm_base_addr = (address)dlinfo.dli_fbase; + if (dladdr(CAST_FROM_FN_PTR(void *, os::address_is_in_vm), &dlinfo) != 0) { + libjvm_base_addr = (address)dlinfo.dli_fbase; + } assert(libjvm_base_addr !=NULL, "Cannot obtain base address for libjvm"); } - if (dladdr((void *)addr, &dlinfo)) { + if (dladdr((void *)addr, &dlinfo) != 0) { if (libjvm_base_addr == (address)dlinfo.dli_fbase) return true; } @@ -1693,24 +1697,30 @@ bool os::dll_address_to_function_name(address addr, char *buf, int buflen, int *offset) { + // buf is not optional, but offset is optional + assert(buf != NULL, "sanity check"); + Dl_info dlinfo; - if (dladdr((void*)addr, &dlinfo) && dlinfo.dli_sname != NULL) { - if (buf != NULL) { - if(!Decoder::demangle(dlinfo.dli_sname, buf, buflen)) { + if (dladdr((void*)addr, &dlinfo) != 0) { + // see if we have a matching symbol + if (dlinfo.dli_saddr != NULL && dlinfo.dli_sname != NULL) { + if (!Decoder::demangle(dlinfo.dli_sname, buf, buflen)) { jio_snprintf(buf, buflen, "%s", dlinfo.dli_sname); } + if (offset != NULL) *offset = addr - (address)dlinfo.dli_saddr; + return true; + } + // no matching symbol so try for just file info + if (dlinfo.dli_fname != NULL && dlinfo.dli_fbase != NULL) { + if (Decoder::decode((address)(addr - (address)dlinfo.dli_fbase), + buf, buflen, offset, dlinfo.dli_fname)) { + return true; + } } - if (offset != NULL) *offset = addr - (address)dlinfo.dli_saddr; - return true; - } else if (dlinfo.dli_fname != NULL && dlinfo.dli_fbase != 0) { - if (Decoder::decode((address)(addr - (address)dlinfo.dli_fbase), - buf, buflen, offset, dlinfo.dli_fname)) { - return true; - } - } - - if (buf != NULL) buf[0] = '\0'; + } + + buf[0] = '\0'; if (offset != NULL) *offset = -1; return false; } @@ -1761,6 +1771,9 @@ bool os::dll_address_to_library_name(address addr, char* buf, int buflen, int* offset) { + // buf is not optional, but offset is optional + assert(buf != NULL, "sanity check"); + Dl_info dlinfo; struct _address_to_library_name data; @@ -1779,15 +1792,20 @@ // buf already contains library name if (offset) *offset = addr - data.base; return true; - } else if (dladdr((void*)addr, &dlinfo)){ - if (buf) jio_snprintf(buf, buflen, "%s", dlinfo.dli_fname); - if (offset) *offset = addr - (address)dlinfo.dli_fbase; - return true; - } else { - if (buf) buf[0] = '\0'; - if (offset) *offset = -1; - return false; - } + } + if (dladdr((void*)addr, &dlinfo) != 0) { + if (dlinfo.dli_fname != NULL) { + jio_snprintf(buf, buflen, "%s", dlinfo.dli_fname); + } + if (dlinfo.dli_fbase != NULL && offset != NULL) { + *offset = addr - (address)dlinfo.dli_fbase; + } + return true; + } + + buf[0] = '\0'; + if (offset) *offset = -1; + return false; } // Loads .dll/.so and @@ -2314,8 +2332,11 @@ bool ret = dll_address_to_library_name( CAST_FROM_FN_PTR(address, os::jvm_path), dli_fname, sizeof(dli_fname), NULL); - assert(ret != 0, "cannot locate libjvm"); - char *rp = realpath(dli_fname, buf); + assert(ret, "cannot locate libjvm"); + char *rp = NULL; + if (ret && dli_fname[0] != '\0') { + rp = realpath(dli_fname, buf); + } if (rp == NULL) return; @@ -2407,6 +2428,57 @@ return CAST_FROM_FN_PTR(void*, UserHandler); } +class Semaphore : public StackObj { + public: + Semaphore(); + ~Semaphore(); + void signal(); + void wait(); + bool trywait(); + bool timedwait(unsigned int sec, int nsec); + private: + sem_t _semaphore; +}; + + +Semaphore::Semaphore() { + sem_init(&_semaphore, 0, 0); +} + +Semaphore::~Semaphore() { + sem_destroy(&_semaphore); +} + +void Semaphore::signal() { + sem_post(&_semaphore); +} + +void Semaphore::wait() { + sem_wait(&_semaphore); +} + +bool Semaphore::trywait() { + return sem_trywait(&_semaphore) == 0; +} + +bool Semaphore::timedwait(unsigned int sec, int nsec) { + struct timespec ts; + unpackTime(&ts, false, (sec * NANOSECS_PER_SEC) + nsec); + + while (1) { + int result = sem_timedwait(&_semaphore, &ts); + if (result == 0) { + return true; + } else if (errno == EINTR) { + continue; + } else if (errno == ETIMEDOUT) { + return false; + } else { + return false; + } + } +} + extern "C" { typedef void (*sa_handler_t)(int); typedef void (*sa_sigaction_t)(int, siginfo_t *, void *); @@ -2446,6 +2518,7 @@ // Linux(POSIX) specific hand shaking semaphore. static sem_t sig_sem; +static Semaphore sr_semaphore; void os::signal_init_pd() { // Initialize signal structures @@ -2557,11 +2630,49 @@ } } +static bool recoverable_mmap_error(int err) { + // See if the error is one we can let the caller handle. This + // list of errno values comes from JBS-6843484. I can't find a + // Linux man page that documents this specific set of errno + // values so while this list currently matches Solaris, it may + // change as we gain experience with this failure mode. + switch (err) { + case EBADF: + case EINVAL: + case ENOTSUP: + // let the caller deal with these errors + return true; + + default: + // Any remaining errors on this OS can cause our reserved mapping + // to be lost. That can cause confusion where different data + // structures think they have the same memory mapped. The worst + // scenario is if both the VM and a library think they have the + // same memory mapped. + return false; + } +} + +static void warn_fail_commit_memory(char* addr, size_t size, bool exec, + int err) { + warning("INFO: os::commit_memory(" PTR_FORMAT ", " SIZE_FORMAT + ", %d) failed; error='%s' (errno=%d)", addr, size, exec, + strerror(err), err); +} + +static void warn_fail_commit_memory(char* addr, size_t size, + size_t alignment_hint, bool exec, + int err) { + warning("INFO: os::commit_memory(" PTR_FORMAT ", " SIZE_FORMAT + ", " SIZE_FORMAT ", %d) failed; error='%s' (errno=%d)", addr, size, + alignment_hint, exec, strerror(err), err); +} + // NOTE: Linux kernel does not really reserve the pages for us. // All it does is to check if there are enough free pages // left at the time of mmap(). This could be a potential // problem. -bool os::pd_commit_memory(char* addr, size_t size, bool exec) { +int os::Linux::commit_memory_impl(char* addr, size_t size, bool exec) { int prot = exec ? PROT_READ|PROT_WRITE|PROT_EXEC : PROT_READ|PROT_WRITE; uintptr_t res = (uintptr_t) ::mmap(addr, size, prot, MAP_PRIVATE|MAP_FIXED|MAP_ANONYMOUS, -1, 0); @@ -2569,9 +2680,32 @@ if (UseNUMAInterleaving) { numa_make_global(addr, size); } - return true; - } - return false; + return 0; + } + + int err = errno; // save errno from mmap() call above + + if (!recoverable_mmap_error(err)) { + warn_fail_commit_memory(addr, size, exec, err); + vm_exit_out_of_memory(size, OOM_MMAP_ERROR, "committing reserved memory."); + } + + return err; +} + +bool os::pd_commit_memory(char* addr, size_t size, bool exec) { + return os::Linux::commit_memory_impl(addr, size, exec) == 0; +} + +void os::pd_commit_memory_or_exit(char* addr, size_t size, bool exec, + const char* mesg) { + assert(mesg != NULL, "mesg must be specified"); + int err = os::Linux::commit_memory_impl(addr, size, exec); + if (err != 0) { + // the caller wants all commit errors to exit with the specified mesg: + warn_fail_commit_memory(addr, size, exec, err); + vm_exit_out_of_memory(size, OOM_MMAP_ERROR, mesg); + } } // Define MAP_HUGETLB here so we can build HotSpot on old systems. @@ -2584,8 +2718,9 @@ #define MADV_HUGEPAGE 14 #endif -bool os::pd_commit_memory(char* addr, size_t size, size_t alignment_hint, - bool exec) { +int os::Linux::commit_memory_impl(char* addr, size_t size, + size_t alignment_hint, bool exec) { + int err; if (UseHugeTLBFS && alignment_hint > (size_t)vm_page_size()) { int prot = exec ? PROT_READ|PROT_WRITE|PROT_EXEC : PROT_READ|PROT_WRITE; uintptr_t res = @@ -2596,16 +2731,46 @@ if (UseNUMAInterleaving) { numa_make_global(addr, size); } - return true; + return 0; + } + + err = errno; // save errno from mmap() call above + + if (!recoverable_mmap_error(err)) { + // However, it is not clear that this loss of our reserved mapping + // happens with large pages on Linux or that we cannot recover + // from the loss. For now, we just issue a warning and we don't + // call vm_exit_out_of_memory(). This issue is being tracked by + // JBS-8007074. + warn_fail_commit_memory(addr, size, alignment_hint, exec, err); +// vm_exit_out_of_memory(size, OOM_MMAP_ERROR, +// "committing reserved memory."); } // Fall through and try to use small pages } - if (commit_memory(addr, size, exec)) { + err = os::Linux::commit_memory_impl(addr, size, exec); + if (err == 0) { realign_memory(addr, size, alignment_hint); - return true; - } - return false; + } + return err; +} + +bool os::pd_commit_memory(char* addr, size_t size, size_t alignment_hint, + bool exec) { + return os::Linux::commit_memory_impl(addr, size, alignment_hint, exec) == 0; +} + +void os::pd_commit_memory_or_exit(char* addr, size_t size, + size_t alignment_hint, bool exec, + const char* mesg) { + assert(mesg != NULL, "mesg must be specified"); + int err = os::Linux::commit_memory_impl(addr, size, alignment_hint, exec); + if (err != 0) { + // the caller wants all commit errors to exit with the specified mesg: + warn_fail_commit_memory(addr, size, alignment_hint, exec, err); + vm_exit_out_of_memory(size, OOM_MMAP_ERROR, mesg); + } } void os::pd_realign_memory(char *addr, size_t bytes, size_t alignment_hint) { @@ -2623,7 +2788,7 @@ // small pages on top of the SHM segment. This method always works for small pages, so we // allow that in any case. if (alignment_hint <= (size_t)os::vm_page_size() || !UseSHM) { - commit_memory(addr, bytes, alignment_hint, false); + commit_memory(addr, bytes, alignment_hint, !ExecMem); } } @@ -2876,7 +3041,7 @@ ::munmap((void*)stack_extent, (uintptr_t)addr - stack_extent); } - return os::commit_memory(addr, size); + return os::commit_memory(addr, size, !ExecMem); } // If this is a growable mapping, remove the guard pages entirely by @@ -2998,7 +3163,7 @@ MAP_ANONYMOUS|MAP_PRIVATE|MAP_HUGETLB, -1, 0); - if (p != (void *) -1) { + if (p != MAP_FAILED) { // We don't know if this really is a huge page or not. FILE *fp = fopen("/proc/self/maps", "r"); if (fp) { @@ -3216,22 +3381,21 @@ } // The memory is committed - address pc = CALLER_PC; - MemTracker::record_virtual_memory_reserve((address)addr, bytes, pc); - MemTracker::record_virtual_memory_commit((address)addr, bytes, pc); + MemTracker::record_virtual_memory_reserve_and_commit((address)addr, bytes, mtNone, CALLER_PC); return addr; } bool os::release_memory_special(char* base, size_t bytes) { + MemTracker::Tracker tkr = MemTracker::get_virtual_memory_release_tracker(); // detaching the SHM segment will also delete it, see reserve_memory_special() int rslt = shmdt(base); if (rslt == 0) { - MemTracker::record_virtual_memory_uncommit((address)base, bytes); - MemTracker::record_virtual_memory_release((address)base, bytes); + tkr.record((address)base, bytes); return true; } else { - return false; + tkr.discard(); + return false; } } @@ -3559,9 +3723,6 @@ static void resume_clear_context(OSThread *osthread) { osthread->set_ucontext(NULL); osthread->set_siginfo(NULL); - - // notify the suspend action is completed, we have now resumed - osthread->sr.clear_suspended(); } static void suspend_save_context(OSThread *osthread, siginfo_t* siginfo, ucontext_t* context) { @@ -3581,7 +3742,7 @@ // its signal handlers run and prevents sigwait()'s use with the // mutex granting granting signal. // -// Currently only ever called on the VMThread +// Currently only ever called on the VMThread and JavaThreads (PC sampling) // static void SR_handler(int sig, siginfo_t* siginfo, ucontext_t* context) { // Save and restore errno to avoid confusing native code with EINTR @@ -3590,38 +3751,46 @@ Thread* thread = Thread::current(); OSThread* osthread = thread->osthread(); - assert(thread->is_VM_thread(), "Must be VMThread"); - // read current suspend action - int action = osthread->sr.suspend_action(); - if (action == os::Linux::SuspendResume::SR_SUSPEND) { + assert(thread->is_VM_thread() || thread->is_Java_thread(), "Must be VMThread or JavaThread"); + + os::SuspendResume::State current = osthread->sr.state(); + if (current == os::SuspendResume::SR_SUSPEND_REQUEST) { suspend_save_context(osthread, siginfo, context); - // Notify the suspend action is about to be completed. do_suspend() - // waits until SR_SUSPENDED is set and then returns. We will wait - // here for a resume signal and that completes the suspend-other - // action. do_suspend/do_resume is always called as a pair from - // the same thread - so there are no races - - // notify the caller - osthread->sr.set_suspended(); - - sigset_t suspend_set; // signals for sigsuspend() - - // get current set of blocked signals and unblock resume signal - pthread_sigmask(SIG_BLOCK, NULL, &suspend_set); - sigdelset(&suspend_set, SR_signum); - - // wait here until we are resumed - do { - sigsuspend(&suspend_set); - // ignore all returns until we get a resume signal - } while (osthread->sr.suspend_action() != os::Linux::SuspendResume::SR_CONTINUE); + // attempt to switch the state, we assume we had a SUSPEND_REQUEST + os::SuspendResume::State state = osthread->sr.suspended(); + if (state == os::SuspendResume::SR_SUSPENDED) { + sigset_t suspend_set; // signals for sigsuspend() + + // get current set of blocked signals and unblock resume signal + pthread_sigmask(SIG_BLOCK, NULL, &suspend_set); + sigdelset(&suspend_set, SR_signum); + + sr_semaphore.signal(); + // wait here until we are resumed + while (1) { + sigsuspend(&suspend_set); + + os::SuspendResume::State result = osthread->sr.running(); + if (result == os::SuspendResume::SR_RUNNING) { + sr_semaphore.signal(); + break; + } + } + + } else if (state == os::SuspendResume::SR_RUNNING) { + // request was cancelled, continue + } else { + ShouldNotReachHere(); + } resume_clear_context(osthread); - + } else if (current == os::SuspendResume::SR_RUNNING) { + // request was cancelled, continue + } else if (current == os::SuspendResume::SR_WAKEUP_REQUEST) { + // ignore } else { - assert(action == os::Linux::SuspendResume::SR_CONTINUE, "unexpected sr action"); - // nothing special to do - just leave the handler + // ignore } errno = old_errno; @@ -3665,42 +3834,82 @@ return 0; } +static int sr_notify(OSThread* osthread) { + int status = pthread_kill(osthread->pthread_id(), SR_signum); + assert_status(status == 0, status, "pthread_kill"); + return status; +} + +// "Randomly" selected value for how long we want to spin +// before bailing out on suspending a thread, also how often +// we send a signal to a thread we want to resume +static const int RANDOMLY_LARGE_INTEGER = 1000000; +static const int RANDOMLY_LARGE_INTEGER2 = 100; // returns true on success and false on error - really an error is fatal // but this seems the normal response to library errors static bool do_suspend(OSThread* osthread) { + assert(osthread->sr.is_running(), "thread should be running"); + assert(!sr_semaphore.trywait(), "semaphore has invalid state"); + // mark as suspended and send signal - osthread->sr.set_suspend_action(os::Linux::SuspendResume::SR_SUSPEND); - int status = pthread_kill(osthread->pthread_id(), SR_signum); - assert_status(status == 0, status, "pthread_kill"); - - // check status and wait until notified of suspension - if (status == 0) { - for (int i = 0; !osthread->sr.is_suspended(); i++) { - os::yield_all(i); + if (osthread->sr.request_suspend() != os::SuspendResume::SR_SUSPEND_REQUEST) { + // failed to switch, state wasn't running? + ShouldNotReachHere(); + return false; + } + + if (sr_notify(osthread) != 0) { + ShouldNotReachHere(); + } + + // managed to send the signal and switch to SUSPEND_REQUEST, now wait for SUSPENDED + while (true) { + if (sr_semaphore.timedwait(0, 2 * NANOSECS_PER_MILLISEC)) { + break; + } else { + // timeout + os::SuspendResume::State cancelled = osthread->sr.cancel_suspend(); + if (cancelled == os::SuspendResume::SR_RUNNING) { + return false; + } else if (cancelled == os::SuspendResume::SR_SUSPENDED) { + // make sure that we consume the signal on the semaphore as well + sr_semaphore.wait(); + break; + } else { + ShouldNotReachHere(); + return false; + } } - osthread->sr.set_suspend_action(os::Linux::SuspendResume::SR_NONE); - return true; - } - else { - osthread->sr.set_suspend_action(os::Linux::SuspendResume::SR_NONE); - return false; - } + } + + guarantee(osthread->sr.is_suspended(), "Must be suspended"); + return true; } static void do_resume(OSThread* osthread) { assert(osthread->sr.is_suspended(), "thread should be suspended"); - osthread->sr.set_suspend_action(os::Linux::SuspendResume::SR_CONTINUE); - - int status = pthread_kill(osthread->pthread_id(), SR_signum); - assert_status(status == 0, status, "pthread_kill"); - // check status and wait unit notified of resumption - if (status == 0) { - for (int i = 0; osthread->sr.is_suspended(); i++) { - os::yield_all(i); + assert(!sr_semaphore.trywait(), "invalid semaphore state"); + + if (osthread->sr.request_wakeup() != os::SuspendResume::SR_WAKEUP_REQUEST) { + // failed to switch to WAKEUP_REQUEST + ShouldNotReachHere(); + return; + } + + while (true) { + if (sr_notify(osthread) == 0) { + if (sr_semaphore.timedwait(0, 2 * NANOSECS_PER_MILLISEC)) { + if (osthread->sr.is_running()) { + return; + } + } + } else { + ShouldNotReachHere(); } } - osthread->sr.set_suspend_action(os::Linux::SuspendResume::SR_NONE); + + guarantee(osthread->sr.is_running(), "Must be running!"); } //////////////////////////////////////////////////////////////////////////////// @@ -4293,7 +4502,7 @@ if (!UseMembar) { address mem_serialize_page = (address) ::mmap(NULL, Linux::page_size(), PROT_READ | PROT_WRITE, MAP_PRIVATE|MAP_ANONYMOUS, -1, 0); - guarantee( mem_serialize_page != NULL, "mmap Failed for memory serialize page"); + guarantee( mem_serialize_page != MAP_FAILED, "mmap Failed for memory serialize page"); os::set_memory_serialize_page( mem_serialize_page ); #ifndef PRODUCT @@ -4472,6 +4681,40 @@ /// +void os::SuspendedThreadTask::internal_do_task() { + if (do_suspend(_thread->osthread())) { + SuspendedThreadTaskContext context(_thread, _thread->osthread()->ucontext()); + do_task(context); + do_resume(_thread->osthread()); + } +} + +class PcFetcher : public os::SuspendedThreadTask { +public: + PcFetcher(Thread* thread) : os::SuspendedThreadTask(thread) {} + ExtendedPC result(); +protected: + void do_task(const os::SuspendedThreadTaskContext& context); +private: + ExtendedPC _epc; +}; + +ExtendedPC PcFetcher::result() { + guarantee(is_done(), "task is not done yet."); + return _epc; +} + +void PcFetcher::do_task(const os::SuspendedThreadTaskContext& context) { + Thread* thread = context.thread(); + OSThread* osthread = thread->osthread(); + if (osthread->ucontext() != NULL) { + _epc = os::Linux::ucontext_get_pc((ucontext_t *) context.ucontext()); + } else { + // NULL context is unexpected, double-check this is the VMThread + guarantee(thread->is_VM_thread(), "can only be called for VMThread"); + } +} + // Suspends the target using the signal mechanism and then grabs the PC before // resuming the target. Used by the flat-profiler only ExtendedPC os::get_thread_pc(Thread* thread) { @@ -4479,22 +4722,9 @@ assert(Thread::current()->is_Watcher_thread(), "Must be watcher"); assert(thread->is_VM_thread(), "Can only be called for VMThread"); - ExtendedPC epc; - - OSThread* osthread = thread->osthread(); - if (do_suspend(osthread)) { - if (osthread->ucontext() != NULL) { - epc = os::Linux::ucontext_get_pc(osthread->ucontext()); - } else { - // NULL context is unexpected, double-check this is the VMThread - guarantee(thread->is_VM_thread(), "can only be called for VMThread"); - } - do_resume(osthread); - } - // failure means pthread_kill failed for some reason - arguably this is - // a fatal problem, but such problems are ignored elsewhere - - return epc; + PcFetcher fetcher(thread); + fetcher.run(); + return fetcher.result(); } int os::Linux::safe_cond_timedwait(pthread_cond_t *_cond, pthread_mutex_t *_mutex, const struct timespec *_abstime) @@ -4518,20 +4748,20 @@ bool os::find(address addr, outputStream* st) { Dl_info dlinfo; memset(&dlinfo, 0, sizeof(dlinfo)); - if (dladdr(addr, &dlinfo)) { + if (dladdr(addr, &dlinfo) != 0) { st->print(PTR_FORMAT ": ", addr); - if (dlinfo.dli_sname != NULL) { + if (dlinfo.dli_sname != NULL && dlinfo.dli_saddr != NULL) { st->print("%s+%#x", dlinfo.dli_sname, addr - (intptr_t)dlinfo.dli_saddr); - } else if (dlinfo.dli_fname) { + } else if (dlinfo.dli_fbase != NULL) { st->print("", addr - (intptr_t)dlinfo.dli_fbase); } else { st->print(""); } - if (dlinfo.dli_fname) { + if (dlinfo.dli_fname != NULL) { st->print(" in %s", dlinfo.dli_fname); } - if (dlinfo.dli_fbase) { + if (dlinfo.dli_fbase != NULL) { st->print(" at " PTR_FORMAT, dlinfo.dli_fbase); } st->cr(); @@ -4544,7 +4774,7 @@ if (!lowest) lowest = (address) dlinfo.dli_fbase; if (begin < lowest) begin = lowest; Dl_info dlinfo2; - if (dladdr(end, &dlinfo2) && dlinfo2.dli_saddr != dlinfo.dli_saddr + if (dladdr(end, &dlinfo2) != 0 && dlinfo2.dli_saddr != dlinfo.dli_saddr && end > dlinfo2.dli_saddr && dlinfo2.dli_saddr > begin) end = (address) dlinfo2.dli_saddr; Disassembler::decode(begin, end, st); @@ -5616,4 +5846,5 @@ new MemNotifyThread(fd); } } + #endif // JAVASE_EMBEDDED diff -r 16b10327b00d -r 90d6c221d4e5 src/os/linux/vm/os_linux.hpp --- a/src/os/linux/vm/os_linux.hpp Tue Jul 16 10:55:48 2013 -0400 +++ b/src/os/linux/vm/os_linux.hpp Tue Jul 16 12:20:08 2013 -0400 @@ -76,6 +76,10 @@ static julong physical_memory() { return _physical_memory; } static void initialize_system_info(); + static int commit_memory_impl(char* addr, size_t bytes, bool exec); + static int commit_memory_impl(char* addr, size_t bytes, + size_t alignment_hint, bool exec); + static void set_glibc_version(const char *s) { _glibc_version = s; } static void set_libpthread_version(const char *s) { _libpthread_version = s; } @@ -210,35 +214,6 @@ // LinuxThreads work-around for 6292965 static int safe_cond_timedwait(pthread_cond_t *_cond, pthread_mutex_t *_mutex, const struct timespec *_abstime); - - // Linux suspend/resume support - this helper is a shadow of its former - // self now that low-level suspension is barely used, and old workarounds - // for LinuxThreads are no longer needed. - class SuspendResume { - private: - volatile int _suspend_action; - volatile jint _state; - public: - // values for suspend_action: - enum { - SR_NONE = 0x00, - SR_SUSPEND = 0x01, // suspend request - SR_CONTINUE = 0x02, // resume request - SR_SUSPENDED = 0x20 // values for _state: + SR_NONE - }; - - SuspendResume() { _suspend_action = SR_NONE; _state = SR_NONE; } - - int suspend_action() const { return _suspend_action; } - void set_suspend_action(int x) { _suspend_action = x; } - - // atomic updates for _state - inline void set_suspended(); - inline void clear_suspended(); - bool is_suspended() { return _state & SR_SUSPENDED; } - - }; - private: typedef int (*sched_getcpu_func_t)(void); typedef int (*numa_node_to_cpus_func_t)(int node, unsigned long *buffer, int bufferlen); @@ -333,6 +308,6 @@ status = pthread_mutex_init (_mutex, NULL); assert_status(status == 0, status, "mutex_init"); } -} ; +}; #endif // OS_LINUX_VM_OS_LINUX_HPP diff -r 16b10327b00d -r 90d6c221d4e5 src/os/linux/vm/os_linux.inline.hpp --- a/src/os/linux/vm/os_linux.inline.hpp Tue Jul 16 10:55:48 2013 -0400 +++ b/src/os/linux/vm/os_linux.inline.hpp Tue Jul 16 12:20:08 2013 -0400 @@ -288,20 +288,4 @@ return ::setsockopt(fd, level, optname, optval, optlen); } -inline void os::Linux::SuspendResume::set_suspended() { - jint temp, temp2; - do { - temp = _state; - temp2 = Atomic::cmpxchg(temp | SR_SUSPENDED, &_state, temp); - } while (temp2 != temp); -} - -inline void os::Linux::SuspendResume::clear_suspended() { - jint temp, temp2; - do { - temp = _state; - temp2 = Atomic::cmpxchg(temp & ~SR_SUSPENDED, &_state, temp); - } while (temp2 != temp); -} - #endif // OS_LINUX_VM_OS_LINUX_INLINE_HPP diff -r 16b10327b00d -r 90d6c221d4e5 src/os/linux/vm/perfMemory_linux.cpp --- a/src/os/linux/vm/perfMemory_linux.cpp Tue Jul 16 10:55:48 2013 -0400 +++ b/src/os/linux/vm/perfMemory_linux.cpp Tue Jul 16 12:20:08 2013 -0400 @@ -1,5 +1,5 @@ /* - * Copyright (c) 2001, 2012, Oracle and/or its affiliates. All rights reserved. + * Copyright (c) 2001, 2013, Oracle and/or its affiliates. All rights reserved. * DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER. * * This code is free software; you can redistribute it and/or modify it @@ -60,7 +60,7 @@ } // commit memory - if (!os::commit_memory(mapAddress, size)) { + if (!os::commit_memory(mapAddress, size, !ExecMem)) { if (PrintMiscellaneous && Verbose) { warning("Could not commit PerfData memory\n"); } @@ -120,7 +120,7 @@ addr += result; } - RESTARTABLE(::close(fd), result); + result = ::close(fd); if (PrintMiscellaneous && Verbose) { if (result == OS_ERR) { warning("Could not close %s: %s\n", destfile, strerror(errno)); @@ -632,7 +632,7 @@ if (PrintMiscellaneous && Verbose) { warning("could not set shared memory file size: %s\n", strerror(errno)); } - RESTARTABLE(::close(fd), result); + ::close(fd); return -1; } @@ -656,7 +656,7 @@ if (result != -1) { return fd; } else { - RESTARTABLE(::close(fd), result); + ::close(fd); return -1; } } @@ -734,9 +734,7 @@ mapAddress = (char*)::mmap((char*)0, size, PROT_READ|PROT_WRITE, MAP_SHARED, fd, 0); - // attempt to close the file - restart it if it was interrupted, - // but ignore other failures - RESTARTABLE(::close(fd), result); + result = ::close(fd); assert(result != OS_ERR, "could not close file"); if (mapAddress == MAP_FAILED) { @@ -755,8 +753,7 @@ (void)::memset((void*) mapAddress, 0, size); // it does not go through os api, the operation has to record from here - MemTracker::record_virtual_memory_reserve((address)mapAddress, size, CURRENT_PC); - MemTracker::record_virtual_memory_type((address)mapAddress, mtInternal); + MemTracker::record_virtual_memory_reserve((address)mapAddress, size, mtInternal, CURRENT_PC); return mapAddress; } @@ -907,9 +904,7 @@ mapAddress = (char*)::mmap((char*)0, size, mmap_prot, MAP_SHARED, fd, 0); - // attempt to close the file - restart if it gets interrupted, - // but ignore other failures - RESTARTABLE(::close(fd), result); + result = ::close(fd); assert(result != OS_ERR, "could not close file"); if (mapAddress == MAP_FAILED) { @@ -921,8 +916,7 @@ } // it does not go through os api, the operation has to record from here - MemTracker::record_virtual_memory_reserve((address)mapAddress, size, CURRENT_PC); - MemTracker::record_virtual_memory_type((address)mapAddress, mtInternal); + MemTracker::record_virtual_memory_reserve((address)mapAddress, size, mtInternal, CURRENT_PC); *addr = mapAddress; *sizep = size; diff -r 16b10327b00d -r 90d6c221d4e5 src/os/solaris/dtrace/jhelper.d --- a/src/os/solaris/dtrace/jhelper.d Tue Jul 16 10:55:48 2013 -0400 +++ b/src/os/solaris/dtrace/jhelper.d Tue Jul 16 12:20:08 2013 -0400 @@ -1,5 +1,5 @@ /* - * Copyright (c) 2003, 2012, Oracle and/or its affiliates. All rights reserved. + * Copyright (c) 2003, 2013, Oracle and/or its affiliates. All rights reserved. * DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER. * * This code is free software; you can redistribute it and/or modify it @@ -332,12 +332,15 @@ this->nameSymbol = copyin_ptr(this->constantPool + this->nameIndex * sizeof (pointer) + SIZE_ConstantPool); + /* The symbol is a CPSlot and has lower bit set to indicate metadata */ + this->nameSymbol &= (~1); /* remove metadata lsb */ this->nameSymbolLength = copyin_uint16(this->nameSymbol + OFFSET_Symbol_length); this->signatureSymbol = copyin_ptr(this->constantPool + this->signatureIndex * sizeof (pointer) + SIZE_ConstantPool); + this->signatureSymbol &= (~1); /* remove metadata lsb */ this->signatureSymbolLength = copyin_uint16(this->signatureSymbol + OFFSET_Symbol_length); diff -r 16b10327b00d -r 90d6c221d4e5 src/os/solaris/dtrace/jvm_dtrace.c --- a/src/os/solaris/dtrace/jvm_dtrace.c Tue Jul 16 10:55:48 2013 -0400 +++ b/src/os/solaris/dtrace/jvm_dtrace.c Tue Jul 16 12:20:08 2013 -0400 @@ -122,9 +122,7 @@ } static int file_close(int fd) { - int ret; - RESTARTABLE(close(fd), ret); - return ret; + return close(fd); } static int file_read(int fd, char* buf, int len) { diff -r 16b10327b00d -r 90d6c221d4e5 src/os/solaris/vm/attachListener_solaris.cpp --- a/src/os/solaris/vm/attachListener_solaris.cpp Tue Jul 16 10:55:48 2013 -0400 +++ b/src/os/solaris/vm/attachListener_solaris.cpp Tue Jul 16 12:20:08 2013 -0400 @@ -392,7 +392,7 @@ return -1; } assert(fd >= 0, "bad file descriptor"); - RESTARTABLE(::close(fd), res); + ::close(fd); // attach the door descriptor to the file if ((res = ::fattach(dd, initial_path)) == -1) { @@ -410,7 +410,7 @@ // rename file so that clients can attach if (dd >= 0) { if (::rename(initial_path, door_path) == -1) { - RESTARTABLE(::close(dd), res); + ::close(dd); ::fdetach(initial_path); dd = -1; } @@ -549,7 +549,7 @@ } // close socket and we're done - RESTARTABLE(::close(this->socket()), rc); + ::close(this->socket()); // were we externally suspended while we were waiting? thread->check_and_wait_while_suspended(); diff -r 16b10327b00d -r 90d6c221d4e5 src/os/solaris/vm/globals_solaris.hpp --- a/src/os/solaris/vm/globals_solaris.hpp Tue Jul 16 10:55:48 2013 -0400 +++ b/src/os/solaris/vm/globals_solaris.hpp Tue Jul 16 12:20:08 2013 -0400 @@ -30,15 +30,6 @@ // #define RUNTIME_OS_FLAGS(develop, develop_pd, product, product_pd, diagnostic, notproduct) \ \ - product(bool, UseISM, false, \ - "Use Intimate Shared Memory (Solaris Only)") \ - \ - product(bool, UsePermISM, false, \ - "Obsolete flag for compatibility (same as UseISM)") \ - \ - product(bool, UseMPSS, true, \ - "Use Multiple Page Size Support (Solaris 9 Only)") \ - \ product(bool, UseExtendedFileIO, true, \ "Enable workaround for limitations of stdio FILE structure") diff -r 16b10327b00d -r 90d6c221d4e5 src/os/solaris/vm/osThread_solaris.cpp --- a/src/os/solaris/vm/osThread_solaris.cpp Tue Jul 16 10:55:48 2013 -0400 +++ b/src/os/solaris/vm/osThread_solaris.cpp Tue Jul 16 12:20:08 2013 -0400 @@ -1,5 +1,5 @@ /* - * Copyright (c) 1998, 2010, Oracle and/or its affiliates. All rights reserved. + * Copyright (c) 1998, 2013, Oracle and/or its affiliates. All rights reserved. * DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER. * * This code is free software; you can redistribute it and/or modify it @@ -41,10 +41,6 @@ _thread_id = 0; sigemptyset(&_caller_sigmask); - _current_callback = NULL; - _current_callback_lock = VM_Version::supports_compare_and_exchange() ? NULL - : new Mutex(Mutex::suspend_resume, "Callback_lock", true); - _saved_interrupt_thread_state = _thread_new; _vm_created_thread = false; } @@ -52,172 +48,6 @@ void OSThread::pd_destroy() { } -// Synchronous interrupt support -// -// _current_callback == NULL no pending callback -// == 1 callback_in_progress -// == other value pointer to the pending callback -// - -// CAS on v8 is implemented by using a global atomic_memory_operation_lock, -// which is shared by other atomic functions. It is OK for normal uses, but -// dangerous if used after some thread is suspended or if used in signal -// handlers. Instead here we use a special per-thread lock to synchronize -// updating _current_callback if we are running on v8. Note in general trying -// to grab locks after a thread is suspended is not safe, but it is safe for -// updating _current_callback, because synchronous interrupt callbacks are -// currently only used in: -// 1. GetThreadPC_Callback - used by WatcherThread to profile VM thread -// There is no overlap between the callbacks, which means we won't try to -// grab a thread's sync lock after the thread has been suspended while holding -// the same lock. - -// used after a thread is suspended -static intptr_t compare_and_exchange_current_callback ( - intptr_t callback, intptr_t *addr, intptr_t compare_value, Mutex *sync) { - if (VM_Version::supports_compare_and_exchange()) { - return Atomic::cmpxchg_ptr(callback, addr, compare_value); - } else { - MutexLockerEx ml(sync, Mutex::_no_safepoint_check_flag); - if (*addr == compare_value) { - *addr = callback; - return compare_value; - } else { - return callback; - } - } -} - -// used in signal handler -static intptr_t exchange_current_callback(intptr_t callback, intptr_t *addr, Mutex *sync) { - if (VM_Version::supports_compare_and_exchange()) { - return Atomic::xchg_ptr(callback, addr); - } else { - MutexLockerEx ml(sync, Mutex::_no_safepoint_check_flag); - intptr_t cb = *addr; - *addr = callback; - return cb; - } -} - -// one interrupt at a time. spin if _current_callback != NULL -int OSThread::set_interrupt_callback(Sync_Interrupt_Callback * cb) { - int count = 0; - while (compare_and_exchange_current_callback( - (intptr_t)cb, (intptr_t *)&_current_callback, (intptr_t)NULL, _current_callback_lock) != NULL) { - while (_current_callback != NULL) { - count++; -#ifdef ASSERT - if ((WarnOnStalledSpinLock > 0) && - (count % WarnOnStalledSpinLock == 0)) { - warning("_current_callback seems to be stalled: %p", _current_callback); - } -#endif - os::yield_all(count); - } - } - return 0; -} - -// reset _current_callback, spin if _current_callback is callback_in_progress -void OSThread::remove_interrupt_callback(Sync_Interrupt_Callback * cb) { - int count = 0; - while (compare_and_exchange_current_callback( - (intptr_t)NULL, (intptr_t *)&_current_callback, (intptr_t)cb, _current_callback_lock) != (intptr_t)cb) { -#ifdef ASSERT - intptr_t p = (intptr_t)_current_callback; - assert(p == (intptr_t)callback_in_progress || - p == (intptr_t)cb, "wrong _current_callback value"); -#endif - while (_current_callback != cb) { - count++; -#ifdef ASSERT - if ((WarnOnStalledSpinLock > 0) && - (count % WarnOnStalledSpinLock == 0)) { - warning("_current_callback seems to be stalled: %p", _current_callback); - } -#endif - os::yield_all(count); - } - } -} - -void OSThread::do_interrupt_callbacks_at_interrupt(InterruptArguments *args) { - Sync_Interrupt_Callback * cb; - cb = (Sync_Interrupt_Callback *)exchange_current_callback( - (intptr_t)callback_in_progress, (intptr_t *)&_current_callback, _current_callback_lock); - - if (cb == NULL) { - // signal is delivered too late (thread is masking interrupt signal??). - // there is nothing we need to do because requesting thread has given up. - } else if ((intptr_t)cb == (intptr_t)callback_in_progress) { - fatal("invalid _current_callback state"); - } else { - assert(cb->target()->osthread() == this, "wrong target"); - cb->execute(args); - cb->leave_callback(); // notify the requester - } - - // restore original _current_callback value - intptr_t p; - p = exchange_current_callback((intptr_t)cb, (intptr_t *)&_current_callback, _current_callback_lock); - assert(p == (intptr_t)callback_in_progress, "just checking"); -} - -// Called by the requesting thread to send a signal to target thread and -// execute "this" callback from the signal handler. -int OSThread::Sync_Interrupt_Callback::interrupt(Thread * target, int timeout) { - // Let signals to the vm_thread go even if the Threads_lock is not acquired - assert(Threads_lock->owned_by_self() || (target == VMThread::vm_thread()), - "must have threads lock to call this"); - - OSThread * osthread = target->osthread(); - - // may block if target thread already has a pending callback - osthread->set_interrupt_callback(this); - - _target = target; - - int rslt = thr_kill(osthread->thread_id(), os::Solaris::SIGasync()); - assert(rslt == 0, "thr_kill != 0"); - - bool status = false; - jlong t1 = os::javaTimeMillis(); - { // don't use safepoint check because we might be the watcher thread. - MutexLockerEx ml(_sync, Mutex::_no_safepoint_check_flag); - while (!is_done()) { - status = _sync->wait(Mutex::_no_safepoint_check_flag, timeout); - - // status == true if timed out - if (status) break; - - // update timeout - jlong t2 = os::javaTimeMillis(); - timeout -= t2 - t1; - t1 = t2; - } - } - - // reset current_callback - osthread->remove_interrupt_callback(this); - - return status; -} - -void OSThread::Sync_Interrupt_Callback::leave_callback() { - if (!_sync->owned_by_self()) { - // notify requesting thread - MutexLockerEx ml(_sync, Mutex::_no_safepoint_check_flag); - _is_done = true; - _sync->notify_all(); - } else { - // Current thread is interrupted while it is holding the _sync lock, trying - // to grab it again will deadlock. The requester will timeout anyway, - // so just return. - _is_done = true; - } -} - // copied from synchronizer.cpp void OSThread::handle_spinlock_contention(int tries) { @@ -229,3 +59,7 @@ os::yield(); // Yield to threads of same or higher priority } } + +void OSThread::SR_handler(Thread* thread, ucontext_t* uc) { + os::Solaris::SR_handler(thread, uc); +} diff -r 16b10327b00d -r 90d6c221d4e5 src/os/solaris/vm/osThread_solaris.hpp --- a/src/os/solaris/vm/osThread_solaris.hpp Tue Jul 16 10:55:48 2013 -0400 +++ b/src/os/solaris/vm/osThread_solaris.hpp Tue Jul 16 12:20:08 2013 -0400 @@ -1,5 +1,5 @@ /* - * Copyright (c) 1997, 2012, Oracle and/or its affiliates. All rights reserved. + * Copyright (c) 1997, 2013, Oracle and/or its affiliates. All rights reserved. * DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER. * * This code is free software; you can redistribute it and/or modify it @@ -72,61 +72,15 @@ // *************************************************************** public: - - class InterruptArguments : StackObj { - private: - Thread* _thread; // the thread to signal was dispatched to - ucontext_t* _ucontext; // the machine context at the time of the signal - - public: - InterruptArguments(Thread* thread, ucontext_t* ucontext) { - _thread = thread; - _ucontext = ucontext; - } - - Thread* thread() const { return _thread; } - ucontext_t* ucontext() const { return _ucontext; } - }; - - // There are currently no asynchronous callbacks - and we'd better not - // support them in the future either, as they need to be deallocated from - // the interrupt handler, which is not safe; they also require locks to - // protect the callback queue. - - class Sync_Interrupt_Callback : private StackObj { - protected: - volatile bool _is_done; - Monitor* _sync; - Thread* _target; - public: - Sync_Interrupt_Callback(Monitor * sync) { - _is_done = false; _target = NULL; _sync = sync; - } - - bool is_done() const { return _is_done; } - Thread* target() const { return _target; } - - int interrupt(Thread * target, int timeout); - - // override to implement the callback. - virtual void execute(InterruptArguments *args) = 0; - - void leave_callback(); - }; + os::SuspendResume sr; private: - - Sync_Interrupt_Callback * volatile _current_callback; - enum { - callback_in_progress = 1 - }; - Mutex * _current_callback_lock; // only used on v8 + ucontext_t* _ucontext; public: - - int set_interrupt_callback (Sync_Interrupt_Callback * cb); - void remove_interrupt_callback(Sync_Interrupt_Callback * cb); - void do_interrupt_callbacks_at_interrupt(InterruptArguments *args); + ucontext_t* ucontext() const { return _ucontext; } + void set_ucontext(ucontext_t* ptr) { _ucontext = ptr; } + static void SR_handler(Thread* thread, ucontext_t* uc); // *************************************************************** // java.lang.Thread.interrupt state. diff -r 16b10327b00d -r 90d6c221d4e5 src/os/solaris/vm/os_share_solaris.hpp --- a/src/os/solaris/vm/os_share_solaris.hpp Tue Jul 16 10:55:48 2013 -0400 +++ b/src/os/solaris/vm/os_share_solaris.hpp Tue Jul 16 12:20:08 2013 -0400 @@ -1,5 +1,5 @@ /* - * Copyright (c) 1999, 2010, Oracle and/or its affiliates. All rights reserved. + * Copyright (c) 1999, 2013, Oracle and/or its affiliates. All rights reserved. * DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER. * * This code is free software; you can redistribute it and/or modify it @@ -27,28 +27,6 @@ // Defines the interfaces to Solaris operating systems that vary across platforms - -// This is a simple callback that just fetches a PC for an interrupted thread. -// The thread need not be suspended and the fetched PC is just a hint. -// Returned PC and nPC are not necessarily consecutive. -// This one is currently used for profiling the VMThread ONLY! - -// Must be synchronous -class GetThreadPC_Callback : public OSThread::Sync_Interrupt_Callback { - private: - ExtendedPC _addr; - - public: - - GetThreadPC_Callback(Monitor *sync) : - OSThread::Sync_Interrupt_Callback(sync) { } - ExtendedPC addr() const { return _addr; } - - void set_addr(ExtendedPC addr) { _addr = addr; } - - void execute(OSThread::InterruptArguments *args); -}; - // misc extern "C" { void signalHandler(int, siginfo_t*, void*); diff -r 16b10327b00d -r 90d6c221d4e5 src/os/solaris/vm/os_solaris.cpp --- a/src/os/solaris/vm/os_solaris.cpp Tue Jul 16 10:55:48 2013 -0400 +++ b/src/os/solaris/vm/os_solaris.cpp Tue Jul 16 12:20:08 2013 -0400 @@ -115,45 +115,6 @@ // for timer info max values which include all bits #define ALL_64_BITS CONST64(0xFFFFFFFFFFFFFFFF) -#ifdef _GNU_SOURCE -// See bug #6514594 -extern "C" int madvise(caddr_t, size_t, int); -extern "C" int memcntl(caddr_t addr, size_t len, int cmd, caddr_t arg, - int attr, int mask); -#endif //_GNU_SOURCE - -/* - MPSS Changes Start. - The JVM binary needs to be built and run on pre-Solaris 9 - systems, but the constants needed by MPSS are only in Solaris 9 - header files. They are textually replicated here to allow - building on earlier systems. Once building on Solaris 8 is - no longer a requirement, these #defines can be replaced by ordinary - system .h inclusion. - - In earlier versions of the JDK and Solaris, we used ISM for large pages. - But ISM requires shared memory to achieve this and thus has many caveats. - MPSS is a fully transparent and is a cleaner way to get large pages. - Although we still require keeping ISM for backward compatiblitiy as well as - giving the opportunity to use large pages on older systems it is - recommended that MPSS be used for Solaris 9 and above. - -*/ - -#ifndef MC_HAT_ADVISE - -struct memcntl_mha { - uint_t mha_cmd; /* command(s) */ - uint_t mha_flags; - size_t mha_pagesize; -}; -#define MC_HAT_ADVISE 7 /* advise hat map size */ -#define MHA_MAPSIZE_VA 0x1 /* set preferred page size */ -#define MAP_ALIGN 0x200 /* addr specifies alignment */ - -#endif -// MPSS Changes End. - // Here are some liblgrp types from sys/lgrp_user.h to be able to // compile on older systems without this header file. @@ -172,32 +133,6 @@ # define LGRP_RSRC_MEM 1 /* memory resources */ #endif -// Some more macros from sys/mman.h that are not present in Solaris 8. - -#ifndef MAX_MEMINFO_CNT -/* - * info_req request type definitions for meminfo - * request types starting with MEMINFO_V are used for Virtual addresses - * and should not be mixed with MEMINFO_PLGRP which is targeted for Physical - * addresses - */ -# define MEMINFO_SHIFT 16 -# define MEMINFO_MASK (0xFF << MEMINFO_SHIFT) -# define MEMINFO_VPHYSICAL (0x01 << MEMINFO_SHIFT) /* get physical addr */ -# define MEMINFO_VLGRP (0x02 << MEMINFO_SHIFT) /* get lgroup */ -# define MEMINFO_VPAGESIZE (0x03 << MEMINFO_SHIFT) /* size of phys page */ -# define MEMINFO_VREPLCNT (0x04 << MEMINFO_SHIFT) /* no. of replica */ -# define MEMINFO_VREPL (0x05 << MEMINFO_SHIFT) /* physical replica */ -# define MEMINFO_VREPL_LGRP (0x06 << MEMINFO_SHIFT) /* lgrp of replica */ -# define MEMINFO_PLGRP (0x07 << MEMINFO_SHIFT) /* lgroup for paddr */ - -/* maximum number of addresses meminfo() can process at a time */ -# define MAX_MEMINFO_CNT 256 - -/* maximum number of request types */ -# define MAX_MEMINFO_REQ 31 -#endif - // see thr_setprio(3T) for the basis of these numbers #define MinimumPriority 0 #define NormalPriority 64 @@ -240,6 +175,8 @@ static int pthread_cond_default_init(cond_t *cv, int scope, void *arg){ memset(cv, 0, sizeof(cond_t)); return 0; } } +static void unpackTime(timespec* absTime, bool isAbsolute, jlong time); + // Thread Local Storage // This is common to all Solaris platforms so it is defined here, // in this common file. @@ -1922,12 +1859,13 @@ Dl_info dlinfo; if (libjvm_base_addr == NULL) { - dladdr(CAST_FROM_FN_PTR(void *, os::address_is_in_vm), &dlinfo); - libjvm_base_addr = (address)dlinfo.dli_fbase; + if (dladdr(CAST_FROM_FN_PTR(void *, os::address_is_in_vm), &dlinfo) != 0) { + libjvm_base_addr = (address)dlinfo.dli_fbase; + } assert(libjvm_base_addr !=NULL, "Cannot obtain base address for libjvm"); } - if (dladdr((void *)addr, &dlinfo)) { + if (dladdr((void *)addr, &dlinfo) != 0) { if (libjvm_base_addr == (address)dlinfo.dli_fbase) return true; } @@ -1939,114 +1877,133 @@ bool os::dll_address_to_function_name(address addr, char *buf, int buflen, int * offset) { + // buf is not optional, but offset is optional + assert(buf != NULL, "sanity check"); + Dl_info dlinfo; // dladdr1_func was initialized in os::init() - if (dladdr1_func){ - // yes, we have dladdr1 - - // Support for dladdr1 is checked at runtime; it may be - // available even if the vm is built on a machine that does - // not have dladdr1 support. Make sure there is a value for - // RTLD_DL_SYMENT. - #ifndef RTLD_DL_SYMENT - #define RTLD_DL_SYMENT 1 - #endif + if (dladdr1_func != NULL) { + // yes, we have dladdr1 + + // Support for dladdr1 is checked at runtime; it may be + // available even if the vm is built on a machine that does + // not have dladdr1 support. Make sure there is a value for + // RTLD_DL_SYMENT. + #ifndef RTLD_DL_SYMENT + #define RTLD_DL_SYMENT 1 + #endif #ifdef _LP64 - Elf64_Sym * info; + Elf64_Sym * info; #else - Elf32_Sym * info; + Elf32_Sym * info; #endif - if (dladdr1_func((void *)addr, &dlinfo, (void **)&info, - RTLD_DL_SYMENT)) { - if ((char *)dlinfo.dli_saddr + info->st_size > (char *)addr) { - if (buf != NULL) { - if (!Decoder::demangle(dlinfo.dli_sname, buf, buflen)) - jio_snprintf(buf, buflen, "%s", dlinfo.dli_sname); - } - if (offset != NULL) *offset = addr - (address)dlinfo.dli_saddr; - return true; - } - } - if (dlinfo.dli_fname != NULL && dlinfo.dli_fbase != 0) { - if (Decoder::decode((address)(addr - (address)dlinfo.dli_fbase), - buf, buflen, offset, dlinfo.dli_fname)) { + if (dladdr1_func((void *)addr, &dlinfo, (void **)&info, + RTLD_DL_SYMENT) != 0) { + // see if we have a matching symbol that covers our address + if (dlinfo.dli_saddr != NULL && + (char *)dlinfo.dli_saddr + info->st_size > (char *)addr) { + if (dlinfo.dli_sname != NULL) { + if (!Decoder::demangle(dlinfo.dli_sname, buf, buflen)) { + jio_snprintf(buf, buflen, "%s", dlinfo.dli_sname); + } + if (offset != NULL) *offset = addr - (address)dlinfo.dli_saddr; return true; } } - if (buf != NULL) buf[0] = '\0'; - if (offset != NULL) *offset = -1; - return false; - } else { - // no, only dladdr is available - if (dladdr((void *)addr, &dlinfo)) { - if (buf != NULL) { - if (!Decoder::demangle(dlinfo.dli_sname, buf, buflen)) - jio_snprintf(buf, buflen, dlinfo.dli_sname); - } - if (offset != NULL) *offset = addr - (address)dlinfo.dli_saddr; - return true; - } else if (dlinfo.dli_fname != NULL && dlinfo.dli_fbase != 0) { + // no matching symbol so try for just file info + if (dlinfo.dli_fname != NULL && dlinfo.dli_fbase != NULL) { if (Decoder::decode((address)(addr - (address)dlinfo.dli_fbase), - buf, buflen, offset, dlinfo.dli_fname)) { + buf, buflen, offset, dlinfo.dli_fname)) { return true; } } - if (buf != NULL) buf[0] = '\0'; - if (offset != NULL) *offset = -1; - return false; - } + } + buf[0] = '\0'; + if (offset != NULL) *offset = -1; + return false; + } + + // no, only dladdr is available + if (dladdr((void *)addr, &dlinfo) != 0) { + // see if we have a matching symbol + if (dlinfo.dli_saddr != NULL && dlinfo.dli_sname != NULL) { + if (!Decoder::demangle(dlinfo.dli_sname, buf, buflen)) { + jio_snprintf(buf, buflen, dlinfo.dli_sname); + } + if (offset != NULL) *offset = addr - (address)dlinfo.dli_saddr; + return true; + } + // no matching symbol so try for just file info + if (dlinfo.dli_fname != NULL && dlinfo.dli_fbase != NULL) { + if (Decoder::decode((address)(addr - (address)dlinfo.dli_fbase), + buf, buflen, offset, dlinfo.dli_fname)) { + return true; + } + } + } + buf[0] = '\0'; + if (offset != NULL) *offset = -1; + return false; } bool os::dll_address_to_library_name(address addr, char* buf, int buflen, int* offset) { + // buf is not optional, but offset is optional + assert(buf != NULL, "sanity check"); + Dl_info dlinfo; - if (dladdr((void*)addr, &dlinfo)){ - if (buf) jio_snprintf(buf, buflen, "%s", dlinfo.dli_fname); - if (offset) *offset = addr - (address)dlinfo.dli_fbase; - return true; - } else { - if (buf) buf[0] = '\0'; - if (offset) *offset = -1; - return false; - } + if (dladdr((void*)addr, &dlinfo) != 0) { + if (dlinfo.dli_fname != NULL) { + jio_snprintf(buf, buflen, "%s", dlinfo.dli_fname); + } + if (dlinfo.dli_fbase != NULL && offset != NULL) { + *offset = addr - (address)dlinfo.dli_fbase; + } + return true; + } + + buf[0] = '\0'; + if (offset) *offset = -1; + return false; } // Prints the names and full paths of all opened dynamic libraries // for current process void os::print_dll_info(outputStream * st) { - Dl_info dli; - void *handle; - Link_map *map; - Link_map *p; - - st->print_cr("Dynamic libraries:"); st->flush(); - - if (!dladdr(CAST_FROM_FN_PTR(void *, os::print_dll_info), &dli)) { - st->print_cr("Error: Cannot print dynamic libraries."); - return; - } - handle = dlopen(dli.dli_fname, RTLD_LAZY); - if (handle == NULL) { - st->print_cr("Error: Cannot print dynamic libraries."); - return; - } - dlinfo(handle, RTLD_DI_LINKMAP, &map); - if (map == NULL) { - st->print_cr("Error: Cannot print dynamic libraries."); - return; - } - - while (map->l_prev != NULL) - map = map->l_prev; - - while (map != NULL) { - st->print_cr(PTR_FORMAT " \t%s", map->l_addr, map->l_name); - map = map->l_next; - } - - dlclose(handle); + Dl_info dli; + void *handle; + Link_map *map; + Link_map *p; + + st->print_cr("Dynamic libraries:"); st->flush(); + + if (dladdr(CAST_FROM_FN_PTR(void *, os::print_dll_info), &dli) == 0 || + dli.dli_fname == NULL) { + st->print_cr("Error: Cannot print dynamic libraries."); + return; + } + handle = dlopen(dli.dli_fname, RTLD_LAZY); + if (handle == NULL) { + st->print_cr("Error: Cannot print dynamic libraries."); + return; + } + dlinfo(handle, RTLD_DI_LINKMAP, &map); + if (map == NULL) { + st->print_cr("Error: Cannot print dynamic libraries."); + return; + } + + while (map->l_prev != NULL) + map = map->l_prev; + + while (map != NULL) { + st->print_cr(PTR_FORMAT " \t%s", map->l_addr, map->l_name); + map = map->l_next; + } + + dlclose(handle); } // Loads .dll/.so and @@ -2473,7 +2430,12 @@ Dl_info dlinfo; int ret = dladdr(CAST_FROM_FN_PTR(void *, os::jvm_path), &dlinfo); assert(ret != 0, "cannot locate libjvm"); - realpath((char *)dlinfo.dli_fname, buf); + if (ret != 0 && dlinfo.dli_fname != NULL) { + realpath((char *)dlinfo.dli_fname, buf); + } else { + buf[0] = '\0'; + return; + } if (Arguments::created_by_gamma_launcher()) { // Support for the gamma launcher. Typical value for buf is @@ -2580,6 +2542,57 @@ return CAST_FROM_FN_PTR(void*, UserHandler); } +class Semaphore : public StackObj { + public: + Semaphore(); + ~Semaphore(); + void signal(); + void wait(); + bool trywait(); + bool timedwait(unsigned int sec, int nsec); + private: + sema_t _semaphore; +}; + + +Semaphore::Semaphore() { + sema_init(&_semaphore, 0, NULL, NULL); +} + +Semaphore::~Semaphore() { + sema_destroy(&_semaphore); +} + +void Semaphore::signal() { + sema_post(&_semaphore); +} + +void Semaphore::wait() { + sema_wait(&_semaphore); +} + +bool Semaphore::trywait() { + return sema_trywait(&_semaphore) == 0; +} + +bool Semaphore::timedwait(unsigned int sec, int nsec) { + struct timespec ts; + unpackTime(&ts, false, (sec * NANOSECS_PER_SEC) + nsec); + + while (1) { + int result = sema_timedwait(&_semaphore, &ts); + if (result == 0) { + return true; + } else if (errno == EINTR) { + continue; + } else if (errno == ETIME) { + return false; + } else { + return false; + } + } +} + extern "C" { typedef void (*sa_handler_t)(int); typedef void (*sa_sigaction_t)(int, siginfo_t *, void *); @@ -2731,7 +2744,42 @@ return page_size; } -bool os::pd_commit_memory(char* addr, size_t bytes, bool exec) { +static bool recoverable_mmap_error(int err) { + // See if the error is one we can let the caller handle. This + // list of errno values comes from the Solaris mmap(2) man page. + switch (err) { + case EBADF: + case EINVAL: + case ENOTSUP: + // let the caller deal with these errors + return true; + + default: + // Any remaining errors on this OS can cause our reserved mapping + // to be lost. That can cause confusion where different data + // structures think they have the same memory mapped. The worst + // scenario is if both the VM and a library think they have the + // same memory mapped. + return false; + } +} + +static void warn_fail_commit_memory(char* addr, size_t bytes, bool exec, + int err) { + warning("INFO: os::commit_memory(" PTR_FORMAT ", " SIZE_FORMAT + ", %d) failed; error='%s' (errno=%d)", addr, bytes, exec, + strerror(err), err); +} + +static void warn_fail_commit_memory(char* addr, size_t bytes, + size_t alignment_hint, bool exec, + int err) { + warning("INFO: os::commit_memory(" PTR_FORMAT ", " SIZE_FORMAT + ", " SIZE_FORMAT ", %d) failed; error='%s' (errno=%d)", addr, bytes, + alignment_hint, exec, strerror(err), err); +} + +int os::Solaris::commit_memory_impl(char* addr, size_t bytes, bool exec) { int prot = exec ? PROT_READ|PROT_WRITE|PROT_EXEC : PROT_READ|PROT_WRITE; size_t size = bytes; char *res = Solaris::mmap_chunk(addr, size, MAP_PRIVATE|MAP_FIXED, prot); @@ -2739,15 +2787,39 @@ if (UseNUMAInterleaving) { numa_make_global(addr, bytes); } - return true; - } - return false; -} - -bool os::pd_commit_memory(char* addr, size_t bytes, size_t alignment_hint, - bool exec) { - if (commit_memory(addr, bytes, exec)) { - if (UseMPSS && alignment_hint > (size_t)vm_page_size()) { + return 0; + } + + int err = errno; // save errno from mmap() call in mmap_chunk() + + if (!recoverable_mmap_error(err)) { + warn_fail_commit_memory(addr, bytes, exec, err); + vm_exit_out_of_memory(bytes, OOM_MMAP_ERROR, "committing reserved memory."); + } + + return err; +} + +bool os::pd_commit_memory(char* addr, size_t bytes, bool exec) { + return Solaris::commit_memory_impl(addr, bytes, exec) == 0; +} + +void os::pd_commit_memory_or_exit(char* addr, size_t bytes, bool exec, + const char* mesg) { + assert(mesg != NULL, "mesg must be specified"); + int err = os::Solaris::commit_memory_impl(addr, bytes, exec); + if (err != 0) { + // the caller wants all commit errors to exit with the specified mesg: + warn_fail_commit_memory(addr, bytes, exec, err); + vm_exit_out_of_memory(bytes, OOM_MMAP_ERROR, mesg); + } +} + +int os::Solaris::commit_memory_impl(char* addr, size_t bytes, + size_t alignment_hint, bool exec) { + int err = Solaris::commit_memory_impl(addr, bytes, exec); + if (err == 0) { + if (UseLargePages && (alignment_hint > (size_t)vm_page_size())) { // If the large page size has been set and the VM // is using large pages, use the large page size // if it is smaller than the alignment hint. This is @@ -2766,11 +2838,27 @@ page_size = alignment_hint; } // Since this is a hint, ignore any failures. - (void)Solaris::set_mpss_range(addr, bytes, page_size); + (void)Solaris::setup_large_pages(addr, bytes, page_size); } - return true; - } - return false; + } + return err; +} + +bool os::pd_commit_memory(char* addr, size_t bytes, size_t alignment_hint, + bool exec) { + return Solaris::commit_memory_impl(addr, bytes, alignment_hint, exec) == 0; +} + +void os::pd_commit_memory_or_exit(char* addr, size_t bytes, + size_t alignment_hint, bool exec, + const char* mesg) { + assert(mesg != NULL, "mesg must be specified"); + int err = os::Solaris::commit_memory_impl(addr, bytes, alignment_hint, exec); + if (err != 0) { + // the caller wants all commit errors to exit with the specified mesg: + warn_fail_commit_memory(addr, bytes, alignment_hint, exec, err); + vm_exit_out_of_memory(bytes, OOM_MMAP_ERROR, mesg); + } } // Uncommit the pages in a specified region. @@ -2782,7 +2870,7 @@ } bool os::pd_create_stack_guard_pages(char* addr, size_t size) { - return os::commit_memory(addr, size); + return os::commit_memory(addr, size, !ExecMem); } bool os::remove_stack_guard_pages(char* addr, size_t size) { @@ -2793,8 +2881,8 @@ void os::pd_realign_memory(char *addr, size_t bytes, size_t alignment_hint) { assert((intptr_t)addr % alignment_hint == 0, "Address should be aligned."); assert((intptr_t)(addr + bytes) % alignment_hint == 0, "End should be aligned."); - if (UseLargePages && UseMPSS) { - Solaris::set_mpss_range(addr, bytes, alignment_hint); + if (UseLargePages) { + Solaris::setup_large_pages(addr, bytes, alignment_hint); } } @@ -3193,47 +3281,8 @@ } // Large page support - -// UseLargePages is the master flag to enable/disable large page memory. -// UseMPSS and UseISM are supported for compatibility reasons. Their combined -// effects can be described in the following table: -// -// UseLargePages UseMPSS UseISM -// false * * => UseLargePages is the master switch, turning -// it off will turn off both UseMPSS and -// UseISM. VM will not use large page memory -// regardless the settings of UseMPSS/UseISM. -// true false false => Unless future Solaris provides other -// mechanism to use large page memory, this -// combination is equivalent to -UseLargePages, -// VM will not use large page memory -// true true false => JVM will use MPSS for large page memory. -// This is the default behavior. -// true false true => JVM will use ISM for large page memory. -// true true true => JVM will use ISM if it is available. -// Otherwise, JVM will fall back to MPSS. -// Becaues ISM is now available on all -// supported Solaris versions, this combination -// is equivalent to +UseISM -UseMPSS. - static size_t _large_page_size = 0; -bool os::Solaris::ism_sanity_check(bool warn, size_t * page_size) { - // x86 uses either 2M or 4M page, depending on whether PAE (Physical Address - // Extensions) mode is enabled. AMD64/EM64T uses 2M page in 64bit mode. Sparc - // can support multiple page sizes. - - // Don't bother to probe page size because getpagesizes() comes with MPSS. - // ISM is only recommended on old Solaris where there is no MPSS support. - // Simply choose a conservative value as default. - *page_size = LargePageSizeInBytes ? LargePageSizeInBytes : - SPARC_ONLY(4 * M) IA32_ONLY(4 * M) AMD64_ONLY(2 * M) - ARM_ONLY(2 * M); - - // ISM is available on all supported Solaris versions - return true; -} - // Insertion sort for small arrays (descending order). static void insertion_sort_descending(size_t* array, int len) { for (int i = 0; i < len; i++) { @@ -3246,7 +3295,7 @@ } } -bool os::Solaris::mpss_sanity_check(bool warn, size_t * page_size) { +bool os::Solaris::mpss_sanity_check(bool warn, size_t* page_size) { const unsigned int usable_count = VM_Version::page_size_count(); if (usable_count == 1) { return false; @@ -3312,41 +3361,24 @@ } void os::large_page_init() { - if (!UseLargePages) { - UseISM = false; - UseMPSS = false; - return; - } - - // print a warning if any large page related flag is specified on command line - bool warn_on_failure = !FLAG_IS_DEFAULT(UseLargePages) || - !FLAG_IS_DEFAULT(UseISM) || - !FLAG_IS_DEFAULT(UseMPSS) || - !FLAG_IS_DEFAULT(LargePageSizeInBytes); - UseISM = UseISM && - Solaris::ism_sanity_check(warn_on_failure, &_large_page_size); - if (UseISM) { - // ISM disables MPSS to be compatible with old JDK behavior - UseMPSS = false; - _page_sizes[0] = _large_page_size; - _page_sizes[1] = vm_page_size(); - } - - UseMPSS = UseMPSS && - Solaris::mpss_sanity_check(warn_on_failure, &_large_page_size); - - UseLargePages = UseISM || UseMPSS; -} - -bool os::Solaris::set_mpss_range(caddr_t start, size_t bytes, size_t align) { + if (UseLargePages) { + // print a warning if any large page related flag is specified on command line + bool warn_on_failure = !FLAG_IS_DEFAULT(UseLargePages) || + !FLAG_IS_DEFAULT(LargePageSizeInBytes); + + UseLargePages = Solaris::mpss_sanity_check(warn_on_failure, &_large_page_size); + } +} + +bool os::Solaris::setup_large_pages(caddr_t start, size_t bytes, size_t align) { // Signal to OS that we want large pages for addresses // from addr, addr + bytes struct memcntl_mha mpss_struct; mpss_struct.mha_cmd = MHA_MAPSIZE_VA; mpss_struct.mha_pagesize = align; mpss_struct.mha_flags = 0; - if (memcntl(start, bytes, MC_HAT_ADVISE, - (caddr_t) &mpss_struct, 0, 0) < 0) { + // Upon successful completion, memcntl() returns 0 + if (memcntl(start, bytes, MC_HAT_ADVISE, (caddr_t) &mpss_struct, 0, 0)) { debug_only(warning("Attempt to use MPSS failed.")); return false; } @@ -3354,73 +3386,13 @@ } char* os::reserve_memory_special(size_t size, char* addr, bool exec) { - // "exec" is passed in but not used. Creating the shared image for - // the code cache doesn't have an SHM_X executable permission to check. - assert(UseLargePages && UseISM, "only for ISM large pages"); - - char* retAddr = NULL; - int shmid; - key_t ismKey; - - bool warn_on_failure = UseISM && - (!FLAG_IS_DEFAULT(UseLargePages) || - !FLAG_IS_DEFAULT(UseISM) || - !FLAG_IS_DEFAULT(LargePageSizeInBytes) - ); - char msg[128]; - - ismKey = IPC_PRIVATE; - - // Create a large shared memory region to attach to based on size. - // Currently, size is the total size of the heap - shmid = shmget(ismKey, size, SHM_R | SHM_W | IPC_CREAT); - if (shmid == -1){ - if (warn_on_failure) { - jio_snprintf(msg, sizeof(msg), "Failed to reserve shared memory (errno = %d).", errno); - warning(msg); - } - return NULL; - } - - // Attach to the region - retAddr = (char *) shmat(shmid, 0, SHM_SHARE_MMU | SHM_R | SHM_W); - int err = errno; - - // Remove shmid. If shmat() is successful, the actual shared memory segment - // will be deleted when it's detached by shmdt() or when the process - // terminates. If shmat() is not successful this will remove the shared - // segment immediately. - shmctl(shmid, IPC_RMID, NULL); - - if (retAddr == (char *) -1) { - if (warn_on_failure) { - jio_snprintf(msg, sizeof(msg), "Failed to attach shared memory (errno = %d).", err); - warning(msg); - } - return NULL; - } - if ((retAddr != NULL) && UseNUMAInterleaving) { - numa_make_global(retAddr, size); - } - - // The memory is committed - address pc = CALLER_PC; - MemTracker::record_virtual_memory_reserve((address)retAddr, size, pc); - MemTracker::record_virtual_memory_commit((address)retAddr, size, pc); - - return retAddr; + fatal("os::reserve_memory_special should not be called on Solaris."); + return NULL; } bool os::release_memory_special(char* base, size_t bytes) { - // detaching the SHM segment will also delete it, see reserve_memory_special() - int rslt = shmdt(base); - if (rslt == 0) { - MemTracker::record_virtual_memory_uncommit((address)base, bytes); - MemTracker::record_virtual_memory_release((address)base, bytes); - return true; - } else { - return false; - } + fatal("os::release_memory_special should not be called on Solaris."); + return false; } size_t os::large_page_size() { @@ -3430,11 +3402,11 @@ // MPSS allows application to commit large page memory on demand; with ISM // the entire memory region must be allocated as shared memory. bool os::can_commit_large_page_memory() { - return UseISM ? false : true; + return true; } bool os::can_execute_large_page_memory() { - return UseISM ? false : true; + return true; } static int os_sleep(jlong millis, bool interruptible) { @@ -3708,28 +3680,6 @@ static const int criticalPrio = 60; // FX/60 is critical thread class/priority on T4 static int java_MaxPriority_to_os_priority = 0; // Saved mapping -// Call the version of priocntl suitable for all supported versions -// of Solaris. We need to call through this wrapper so that we can -// build on Solaris 9 and run on Solaris 8, 9 and 10. -// -// This code should be removed if we ever stop supporting Solaris 8 -// and earlier releases. - -static long priocntl_stub(int pcver, idtype_t idtype, id_t id, int cmd, caddr_t arg); -typedef long (*priocntl_type)(int pcver, idtype_t idtype, id_t id, int cmd, caddr_t arg); -static priocntl_type priocntl_ptr = priocntl_stub; - -// Stub to set the value of the real pointer, and then call the real -// function. - -static long priocntl_stub(int pcver, idtype_t idtype, id_t id, int cmd, caddr_t arg) { - // Try Solaris 8- name only. - priocntl_type tmp = (priocntl_type)dlsym(RTLD_DEFAULT, "__priocntl"); - guarantee(tmp != NULL, "priocntl function not found."); - priocntl_ptr = tmp; - return (*priocntl_ptr)(PC_VERSION, idtype, id, cmd, arg); -} - // lwp_priocntl_init // @@ -3737,9 +3687,7 @@ // // Return errno or 0 if OK. // -static -int lwp_priocntl_init () -{ +static int lwp_priocntl_init () { int rslt; pcinfo_t ClassInfo; pcparms_t ParmInfo; @@ -3779,7 +3727,7 @@ strcpy(ClassInfo.pc_clname, "TS"); ClassInfo.pc_cid = -1; - rslt = (*priocntl_ptr)(PC_VERSION, P_ALL, 0, PC_GETCID, (caddr_t)&ClassInfo); + rslt = priocntl(P_ALL, 0, PC_GETCID, (caddr_t)&ClassInfo); if (rslt < 0) return errno; assert(ClassInfo.pc_cid != -1, "cid for TS class is -1"); tsLimits.schedPolicy = ClassInfo.pc_cid; @@ -3788,7 +3736,7 @@ strcpy(ClassInfo.pc_clname, "IA"); ClassInfo.pc_cid = -1; - rslt = (*priocntl_ptr)(PC_VERSION, P_ALL, 0, PC_GETCID, (caddr_t)&ClassInfo); + rslt = priocntl(P_ALL, 0, PC_GETCID, (caddr_t)&ClassInfo); if (rslt < 0) return errno; assert(ClassInfo.pc_cid != -1, "cid for IA class is -1"); iaLimits.schedPolicy = ClassInfo.pc_cid; @@ -3797,7 +3745,7 @@ strcpy(ClassInfo.pc_clname, "RT"); ClassInfo.pc_cid = -1; - rslt = (*priocntl_ptr)(PC_VERSION, P_ALL, 0, PC_GETCID, (caddr_t)&ClassInfo); + rslt = priocntl(P_ALL, 0, PC_GETCID, (caddr_t)&ClassInfo); if (rslt < 0) return errno; assert(ClassInfo.pc_cid != -1, "cid for RT class is -1"); rtLimits.schedPolicy = ClassInfo.pc_cid; @@ -3806,7 +3754,7 @@ strcpy(ClassInfo.pc_clname, "FX"); ClassInfo.pc_cid = -1; - rslt = (*priocntl_ptr)(PC_VERSION, P_ALL, 0, PC_GETCID, (caddr_t)&ClassInfo); + rslt = priocntl(P_ALL, 0, PC_GETCID, (caddr_t)&ClassInfo); if (rslt < 0) return errno; assert(ClassInfo.pc_cid != -1, "cid for FX class is -1"); fxLimits.schedPolicy = ClassInfo.pc_cid; @@ -3817,7 +3765,7 @@ // This will normally be IA, TS or, rarely, FX or RT. memset(&ParmInfo, 0, sizeof(ParmInfo)); ParmInfo.pc_cid = PC_CLNULL; - rslt = (*priocntl_ptr) (PC_VERSION, P_PID, P_MYID, PC_GETPARMS, (caddr_t)&ParmInfo); + rslt = priocntl(P_PID, P_MYID, PC_GETPARMS, (caddr_t)&ParmInfo); if (rslt < 0) return errno; myClass = ParmInfo.pc_cid; @@ -3825,7 +3773,7 @@ // about the class. ClassInfo.pc_cid = myClass; ClassInfo.pc_clname[0] = 0; - rslt = (*priocntl_ptr) (PC_VERSION, (idtype)0, 0, PC_GETCLINFO, (caddr_t)&ClassInfo); + rslt = priocntl((idtype)0, 0, PC_GETCLINFO, (caddr_t)&ClassInfo); if (rslt < 0) return errno; if (ThreadPriorityVerbose) { @@ -3834,7 +3782,7 @@ memset(&ParmInfo, 0, sizeof(pcparms_t)); ParmInfo.pc_cid = PC_CLNULL; - rslt = (*priocntl_ptr)(PC_VERSION, P_PID, P_MYID, PC_GETPARMS, (caddr_t)&ParmInfo); + rslt = priocntl(P_PID, P_MYID, PC_GETPARMS, (caddr_t)&ParmInfo); if (rslt < 0) return errno; if (ParmInfo.pc_cid == rtLimits.schedPolicy) { @@ -3938,7 +3886,7 @@ memset(&ParmInfo, 0, sizeof(pcparms_t)); ParmInfo.pc_cid = PC_CLNULL; - rslt = (*priocntl_ptr)(PC_VERSION, P_LWPID, lwpid, PC_GETPARMS, (caddr_t)&ParmInfo); + rslt = priocntl(P_LWPID, lwpid, PC_GETPARMS, (caddr_t)&ParmInfo); if (rslt < 0) return errno; int cur_class = ParmInfo.pc_cid; @@ -4006,7 +3954,7 @@ return EINVAL; // no clue, punt } - rslt = (*priocntl_ptr)(PC_VERSION, P_LWPID, lwpid, PC_SETPARMS, (caddr_t)&ParmInfo); + rslt = priocntl(P_LWPID, lwpid, PC_SETPARMS, (caddr_t)&ParmInfo); if (ThreadPriorityVerbose && rslt) { tty->print_cr ("PC_SETPARMS ->%d %d\n", rslt, errno); } @@ -4025,7 +3973,7 @@ memset(&ReadBack, 0, sizeof(pcparms_t)); ReadBack.pc_cid = PC_CLNULL; - rslt = (*priocntl_ptr)(PC_VERSION, P_LWPID, lwpid, PC_GETPARMS, (caddr_t)&ReadBack); + rslt = priocntl(P_LWPID, lwpid, PC_GETPARMS, (caddr_t)&ReadBack); assert(rslt >= 0, "priocntl failed"); Actual = Expected = 0xBAD; assert(ParmInfo.pc_cid == ReadBack.pc_cid, "cid's don't match"); @@ -4164,6 +4112,68 @@ schedctl_start(schedctl_init()); } +static void resume_clear_context(OSThread *osthread) { + osthread->set_ucontext(NULL); +} + +static void suspend_save_context(OSThread *osthread, ucontext_t* context) { + osthread->set_ucontext(context); +} + +static Semaphore sr_semaphore; + +void os::Solaris::SR_handler(Thread* thread, ucontext_t* uc) { + // Save and restore errno to avoid confusing native code with EINTR + // after sigsuspend. + int old_errno = errno; + + OSThread* osthread = thread->osthread(); + assert(thread->is_VM_thread() || thread->is_Java_thread(), "Must be VMThread or JavaThread"); + + os::SuspendResume::State current = osthread->sr.state(); + if (current == os::SuspendResume::SR_SUSPEND_REQUEST) { + suspend_save_context(osthread, uc); + + // attempt to switch the state, we assume we had a SUSPEND_REQUEST + os::SuspendResume::State state = osthread->sr.suspended(); + if (state == os::SuspendResume::SR_SUSPENDED) { + sigset_t suspend_set; // signals for sigsuspend() + + // get current set of blocked signals and unblock resume signal + thr_sigsetmask(SIG_BLOCK, NULL, &suspend_set); + sigdelset(&suspend_set, os::Solaris::SIGasync()); + + sr_semaphore.signal(); + // wait here until we are resumed + while (1) { + sigsuspend(&suspend_set); + + os::SuspendResume::State result = osthread->sr.running(); + if (result == os::SuspendResume::SR_RUNNING) { + sr_semaphore.signal(); + break; + } + } + + } else if (state == os::SuspendResume::SR_RUNNING) { + // request was cancelled, continue + } else { + ShouldNotReachHere(); + } + + resume_clear_context(osthread); + } else if (current == os::SuspendResume::SR_RUNNING) { + // request was cancelled, continue + } else if (current == os::SuspendResume::SR_WAKEUP_REQUEST) { + // ignore + } else { + // ignore + } + + errno = old_errno; +} + + void os::interrupt(Thread* thread) { assert(Thread::current() == thread || Threads_lock->owned_by_self(), "possibility of dangling Thread pointer"); @@ -4247,6 +4257,116 @@ return buf[0] == 'y' || buf[0] == 'Y'; } +static int sr_notify(OSThread* osthread) { + int status = thr_kill(osthread->thread_id(), os::Solaris::SIGasync()); + assert_status(status == 0, status, "thr_kill"); + return status; +} + +// "Randomly" selected value for how long we want to spin +// before bailing out on suspending a thread, also how often +// we send a signal to a thread we want to resume +static const int RANDOMLY_LARGE_INTEGER = 1000000; +static const int RANDOMLY_LARGE_INTEGER2 = 100; + +static bool do_suspend(OSThread* osthread) { + assert(osthread->sr.is_running(), "thread should be running"); + assert(!sr_semaphore.trywait(), "semaphore has invalid state"); + + // mark as suspended and send signal + if (osthread->sr.request_suspend() != os::SuspendResume::SR_SUSPEND_REQUEST) { + // failed to switch, state wasn't running? + ShouldNotReachHere(); + return false; + } + + if (sr_notify(osthread) != 0) { + ShouldNotReachHere(); + } + + // managed to send the signal and switch to SUSPEND_REQUEST, now wait for SUSPENDED + while (true) { + if (sr_semaphore.timedwait(0, 2000 * NANOSECS_PER_MILLISEC)) { + break; + } else { + // timeout + os::SuspendResume::State cancelled = osthread->sr.cancel_suspend(); + if (cancelled == os::SuspendResume::SR_RUNNING) { + return false; + } else if (cancelled == os::SuspendResume::SR_SUSPENDED) { + // make sure that we consume the signal on the semaphore as well + sr_semaphore.wait(); + break; + } else { + ShouldNotReachHere(); + return false; + } + } + } + + guarantee(osthread->sr.is_suspended(), "Must be suspended"); + return true; +} + +static void do_resume(OSThread* osthread) { + assert(osthread->sr.is_suspended(), "thread should be suspended"); + assert(!sr_semaphore.trywait(), "invalid semaphore state"); + + if (osthread->sr.request_wakeup() != os::SuspendResume::SR_WAKEUP_REQUEST) { + // failed to switch to WAKEUP_REQUEST + ShouldNotReachHere(); + return; + } + + while (true) { + if (sr_notify(osthread) == 0) { + if (sr_semaphore.timedwait(0, 2 * NANOSECS_PER_MILLISEC)) { + if (osthread->sr.is_running()) { + return; + } + } + } else { + ShouldNotReachHere(); + } + } + + guarantee(osthread->sr.is_running(), "Must be running!"); +} + +void os::SuspendedThreadTask::internal_do_task() { + if (do_suspend(_thread->osthread())) { + SuspendedThreadTaskContext context(_thread, _thread->osthread()->ucontext()); + do_task(context); + do_resume(_thread->osthread()); + } +} + +class PcFetcher : public os::SuspendedThreadTask { +public: + PcFetcher(Thread* thread) : os::SuspendedThreadTask(thread) {} + ExtendedPC result(); +protected: + void do_task(const os::SuspendedThreadTaskContext& context); +private: + ExtendedPC _epc; +}; + +ExtendedPC PcFetcher::result() { + guarantee(is_done(), "task is not done yet."); + return _epc; +} + +void PcFetcher::do_task(const os::SuspendedThreadTaskContext& context) { + Thread* thread = context.thread(); + OSThread* osthread = thread->osthread(); + if (osthread->ucontext() != NULL) { + _epc = os::Solaris::ucontext_get_pc((ucontext_t *) context.ucontext()); + } else { + // NULL context is unexpected, double-check this is the VMThread + guarantee(thread->is_VM_thread(), "can only be called for VMThread"); + } +} + // A lightweight implementation that does not suspend the target thread and // thus returns only a hint. Used for profiling only! ExtendedPC os::get_thread_pc(Thread* thread) { @@ -4254,21 +4374,9 @@ assert(Thread::current()->is_Watcher_thread(), "Must be watcher and own Threads_lock"); // For now, is only used to profile the VM Thread assert(thread->is_VM_thread(), "Can only be called for VMThread"); - ExtendedPC epc; - - GetThreadPC_Callback cb(ProfileVM_lock); - OSThread *osthread = thread->osthread(); - const int time_to_wait = 400; // 400ms wait for initial response - int status = cb.interrupt(thread, time_to_wait); - - if (cb.is_done() ) { - epc = cb.addr(); - } else { - DEBUG_ONLY(tty->print_cr("Failed to get pc for thread: %d got %d status", - osthread->thread_id(), status);); - // epc is already NULL - } - return epc; + PcFetcher fetcher(thread); + fetcher.run(); + return fetcher.result(); } @@ -4957,11 +5065,6 @@ return _getisax(array, n); } -// Symbol doesn't exist in Solaris 8 pset.h -#ifndef PS_MYID -#define PS_MYID -3 -#endif - // int pset_getloadavg(psetid_t pset, double loadavg[], int nelem); typedef long (*pset_getloadavg_type)(psetid_t pset, double loadavg[], int nelem); static pset_getloadavg_type pset_getloadavg_ptr = NULL; @@ -5131,20 +5234,6 @@ UseNUMA = false; } } - // ISM is not compatible with the NUMA allocator - it always allocates - // pages round-robin across the lgroups. - if (UseNUMA && UseLargePages && UseISM) { - if (!FLAG_IS_DEFAULT(UseNUMA)) { - if (FLAG_IS_DEFAULT(UseLargePages) && FLAG_IS_DEFAULT(UseISM)) { - UseLargePages = false; - } else { - warning("UseNUMA is not compatible with ISM large pages, disabling NUMA allocator"); - UseNUMA = false; - } - } else { - UseNUMA = false; - } - } if (!UseNUMA && ForceNUMA) { UseNUMA = true; } @@ -5790,24 +5879,20 @@ bool os::find(address addr, outputStream* st) { Dl_info dlinfo; memset(&dlinfo, 0, sizeof(dlinfo)); - if (dladdr(addr, &dlinfo)) { -#ifdef _LP64 - st->print("0x%016lx: ", addr); -#else - st->print("0x%08x: ", addr); -#endif - if (dlinfo.dli_sname != NULL) + if (dladdr(addr, &dlinfo) != 0) { + st->print(PTR_FORMAT ": ", addr); + if (dlinfo.dli_sname != NULL && dlinfo.dli_saddr != NULL) { st->print("%s+%#lx", dlinfo.dli_sname, addr-(intptr_t)dlinfo.dli_saddr); - else if (dlinfo.dli_fname) + } else if (dlinfo.dli_fbase != NULL) st->print("", addr-(intptr_t)dlinfo.dli_fbase); else st->print(""); - if (dlinfo.dli_fname) st->print(" in %s", dlinfo.dli_fname); -#ifdef _LP64 - if (dlinfo.dli_fbase) st->print(" at 0x%016lx", dlinfo.dli_fbase); -#else - if (dlinfo.dli_fbase) st->print(" at 0x%08x", dlinfo.dli_fbase); -#endif + if (dlinfo.dli_fname != NULL) { + st->print(" in %s", dlinfo.dli_fname); + } + if (dlinfo.dli_fbase != NULL) { + st->print(" at " PTR_FORMAT, dlinfo.dli_fbase); + } st->cr(); if (Verbose) { @@ -5818,7 +5903,7 @@ if (!lowest) lowest = (address) dlinfo.dli_fbase; if (begin < lowest) begin = lowest; Dl_info dlinfo2; - if (dladdr(end, &dlinfo2) && dlinfo2.dli_saddr != dlinfo.dli_saddr + if (dladdr(end, &dlinfo2) != 0 && dlinfo2.dli_saddr != dlinfo.dli_saddr && end > dlinfo2.dli_saddr && dlinfo2.dli_saddr > begin) end = (address) dlinfo2.dli_saddr; Disassembler::decode(begin, end, st); @@ -6391,11 +6476,11 @@ } int os::close(int fd) { - RESTARTABLE_RETURN_INT(::close(fd)); + return ::close(fd); } int os::socket_close(int fd) { - RESTARTABLE_RETURN_INT(::close(fd)); + return ::close(fd); } int os::recv(int fd, char* buf, size_t nBytes, uint flags) { diff -r 16b10327b00d -r 90d6c221d4e5 src/os/solaris/vm/os_solaris.hpp --- a/src/os/solaris/vm/os_solaris.hpp Tue Jul 16 10:55:48 2013 -0400 +++ b/src/os/solaris/vm/os_solaris.hpp Tue Jul 16 12:20:08 2013 -0400 @@ -1,5 +1,5 @@ /* - * Copyright (c) 1997, 2012, Oracle and/or its affiliates. All rights reserved. + * Copyright (c) 1997, 2013, Oracle and/or its affiliates. All rights reserved. * DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER. * * This code is free software; you can redistribute it and/or modify it @@ -106,8 +106,8 @@ static meminfo_func_t _meminfo; - // Large Page Support--mpss. - static bool set_mpss_range(caddr_t start, size_t bytes, size_t align); + // Large Page Support + static bool setup_large_pages(caddr_t start, size_t bytes, size_t align); static void init_thread_fpu_state(void); @@ -127,7 +127,6 @@ static void set_SIGinterrupt(int newsig) { _SIGinterrupt = newsig; } static void set_SIGasync(int newsig) { _SIGasync = newsig; } - public: // Large Page Support--ISM. static bool largepage_range(char* addr, size_t size); @@ -145,6 +144,7 @@ static intptr_t* ucontext_get_sp(ucontext_t* uc); // ucontext_get_fp() is only used by Solaris X86 (see note below) static intptr_t* ucontext_get_fp(ucontext_t* uc); + static address ucontext_get_pc(ucontext_t* uc); // For Analyzer Forte AsyncGetCallTrace profiling support: // Parameter ret_fp is only used by Solaris X86. @@ -157,6 +157,8 @@ static void hotspot_sigmask(Thread* thread); + // SR_handler + static void SR_handler(Thread* thread, ucontext_t* uc); protected: // Solaris-specific interface goes here static julong available_memory(); @@ -166,10 +168,12 @@ static int _dev_zero_fd; static int get_dev_zero_fd() { return _dev_zero_fd; } static void set_dev_zero_fd(int fd) { _dev_zero_fd = fd; } + static int commit_memory_impl(char* addr, size_t bytes, bool exec); + static int commit_memory_impl(char* addr, size_t bytes, + size_t alignment_hint, bool exec); static char* mmap_chunk(char *addr, size_t size, int flags, int prot); static char* anon_mmap(char* requested_addr, size_t bytes, size_t alignment_hint, bool fixed); static bool mpss_sanity_check(bool warn, size_t * page_size); - static bool ism_sanity_check (bool warn, size_t * page_size); // Workaround for 4352906. thr_stksegment sometimes returns // a bad value for the primordial thread's stack base when diff -r 16b10327b00d -r 90d6c221d4e5 src/os/solaris/vm/os_solaris.inline.hpp --- a/src/os/solaris/vm/os_solaris.inline.hpp Tue Jul 16 10:55:48 2013 -0400 +++ b/src/os/solaris/vm/os_solaris.inline.hpp Tue Jul 16 12:20:08 2013 -0400 @@ -89,7 +89,7 @@ inline struct dirent* os::readdir(DIR* dirp, dirent* dbuf) { assert(dirp != NULL, "just checking"); -#if defined(_LP64) || defined(_GNU_SOURCE) +#if defined(_LP64) || defined(_GNU_SOURCE) || _FILE_OFFSET_BITS==64 dirent* p; int status; @@ -98,9 +98,9 @@ return NULL; } else return p; -#else // defined(_LP64) || defined(_GNU_SOURCE) +#else // defined(_LP64) || defined(_GNU_SOURCE) || _FILE_OFFSET_BITS==64 return ::readdir_r(dirp, dbuf); -#endif // defined(_LP64) || defined(_GNU_SOURCE) +#endif // defined(_LP64) || defined(_GNU_SOURCE) || _FILE_OFFSET_BITS==64 } inline int os::closedir(DIR *dirp) { diff -r 16b10327b00d -r 90d6c221d4e5 src/os/solaris/vm/perfMemory_solaris.cpp --- a/src/os/solaris/vm/perfMemory_solaris.cpp Tue Jul 16 10:55:48 2013 -0400 +++ b/src/os/solaris/vm/perfMemory_solaris.cpp Tue Jul 16 12:20:08 2013 -0400 @@ -1,5 +1,5 @@ /* - * Copyright (c) 2001, 2012, Oracle and/or its affiliates. All rights reserved. + * Copyright (c) 2001, 2013, Oracle and/or its affiliates. All rights reserved. * DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER. * * This code is free software; you can redistribute it and/or modify it @@ -62,7 +62,7 @@ } // commit memory - if (!os::commit_memory(mapAddress, size)) { + if (!os::commit_memory(mapAddress, size, !ExecMem)) { if (PrintMiscellaneous && Verbose) { warning("Could not commit PerfData memory\n"); } @@ -122,7 +122,7 @@ addr += result; } - RESTARTABLE(::close(fd), result); + result = ::close(fd); if (PrintMiscellaneous && Verbose) { if (result == OS_ERR) { warning("Could not close %s: %s\n", destfile, strerror(errno)); @@ -437,7 +437,7 @@ addr+=result; } - RESTARTABLE(::close(fd), result); + ::close(fd); // get the user name for the effective user id of the process char* user_name = get_user_name(psinfo.pr_euid); @@ -669,7 +669,7 @@ if (PrintMiscellaneous && Verbose) { warning("could not set shared memory file size: %s\n", strerror(errno)); } - RESTARTABLE(::close(fd), result); + ::close(fd); return -1; } @@ -749,9 +749,7 @@ mapAddress = (char*)::mmap((char*)0, size, PROT_READ|PROT_WRITE, MAP_SHARED, fd, 0); - // attempt to close the file - restart it if it was interrupted, - // but ignore other failures - RESTARTABLE(::close(fd), result); + result = ::close(fd); assert(result != OS_ERR, "could not close file"); if (mapAddress == MAP_FAILED) { @@ -770,8 +768,7 @@ (void)::memset((void*) mapAddress, 0, size); // it does not go through os api, the operation has to record from here - MemTracker::record_virtual_memory_reserve((address)mapAddress, size, CURRENT_PC); - MemTracker::record_virtual_memory_type((address)mapAddress, mtInternal); + MemTracker::record_virtual_memory_reserve((address)mapAddress, size, mtInternal, CURRENT_PC); return mapAddress; } @@ -922,9 +919,7 @@ mapAddress = (char*)::mmap((char*)0, size, mmap_prot, MAP_SHARED, fd, 0); - // attempt to close the file - restart if it gets interrupted, - // but ignore other failures - RESTARTABLE(::close(fd), result); + result = ::close(fd); assert(result != OS_ERR, "could not close file"); if (mapAddress == MAP_FAILED) { @@ -936,8 +931,7 @@ } // it does not go through os api, the operation has to record from here - MemTracker::record_virtual_memory_reserve((address)mapAddress, size, CURRENT_PC); - MemTracker::record_virtual_memory_type((address)mapAddress, mtInternal); + MemTracker::record_virtual_memory_reserve((address)mapAddress, size, mtInternal, CURRENT_PC); *addr = mapAddress; *sizep = size; diff -r 16b10327b00d -r 90d6c221d4e5 src/os/windows/vm/os_windows.cpp --- a/src/os/windows/vm/os_windows.cpp Tue Jul 16 10:55:48 2013 -0400 +++ b/src/os/windows/vm/os_windows.cpp Tue Jul 16 12:20:08 2013 -0400 @@ -1420,34 +1420,40 @@ bool os::dll_address_to_library_name(address addr, char* buf, int buflen, int* offset) { + // buf is not optional, but offset is optional + assert(buf != NULL, "sanity check"); + // NOTE: the reason we don't use SymGetModuleInfo() is it doesn't always // return the full path to the DLL file, sometimes it returns path // to the corresponding PDB file (debug info); sometimes it only // returns partial path, which makes life painful. - struct _modinfo mi; - mi.addr = addr; - mi.full_path = buf; - mi.buflen = buflen; - int pid = os::current_process_id(); - if (enumerate_modules(pid, _locate_module_by_addr, (void *)&mi)) { - // buf already contains path name - if (offset) *offset = addr - mi.base_addr; - return true; - } else { - if (buf) buf[0] = '\0'; - if (offset) *offset = -1; - return false; - } + struct _modinfo mi; + mi.addr = addr; + mi.full_path = buf; + mi.buflen = buflen; + int pid = os::current_process_id(); + if (enumerate_modules(pid, _locate_module_by_addr, (void *)&mi)) { + // buf already contains path name + if (offset) *offset = addr - mi.base_addr; + return true; + } + + buf[0] = '\0'; + if (offset) *offset = -1; + return false; } bool os::dll_address_to_function_name(address addr, char *buf, int buflen, int *offset) { + // buf is not optional, but offset is optional + assert(buf != NULL, "sanity check"); + if (Decoder::decode(addr, buf, buflen, offset)) { return true; } if (offset != NULL) *offset = -1; - if (buf != NULL) buf[0] = '\0'; + buf[0] = '\0'; return false; } @@ -2317,6 +2323,11 @@ #endif Thread* t = ThreadLocalStorage::get_thread_slow(); // slow & steady + // Handle SafeFetch32 and SafeFetchN exceptions. + if (StubRoutines::is_safefetch_fault(pc)) { + return Handle_Exception(exceptionInfo, StubRoutines::continuation_for_safefetch_fault(pc)); + } + #ifndef _WIN64 // Execution protection violation - win32 running on AMD64 only // Handled first to avoid misdiagnosis as a "normal" access violation; @@ -2524,7 +2535,7 @@ addr = (address)((uintptr_t)addr & (~((uintptr_t)os::vm_page_size() - (uintptr_t)1))); os::commit_memory((char *)addr, thread->stack_base() - addr, - false ); + !ExecMem); return EXCEPTION_CONTINUE_EXECUTION; } else @@ -2689,6 +2700,19 @@ } #endif +#ifndef PRODUCT +void os::win32::call_test_func_with_wrapper(void (*funcPtr)(void)) { + // Install a win32 structured exception handler around the test + // function call so the VM can generate an error dump if needed. + __try { + (*funcPtr)(); + } __except(topLevelExceptionFilter( + (_EXCEPTION_POINTERS*)_exception_info())) { + // Nothing to do. + } +} +#endif + // Virtual Memory int os::vm_page_size() { return os::win32::vm_page_size(); } @@ -2875,7 +2899,7 @@ PAGE_READWRITE); // If reservation failed, return NULL if (p_buf == NULL) return NULL; - MemTracker::record_virtual_memory_reserve((address)p_buf, size_of_reserve, CALLER_PC); + MemTracker::record_virtual_memory_reserve((address)p_buf, size_of_reserve, mtNone, CALLER_PC); os::release_memory(p_buf, bytes + chunk_size); // we still need to round up to a page boundary (in case we are using large pages) @@ -2941,7 +2965,7 @@ // need to create a dummy 'reserve' record to match // the release. MemTracker::record_virtual_memory_reserve((address)p_buf, - bytes_to_release, CALLER_PC); + bytes_to_release, mtNone, CALLER_PC); os::release_memory(p_buf, bytes_to_release); } #ifdef ASSERT @@ -2961,9 +2985,10 @@ // Although the memory is allocated individually, it is returned as one. // NMT records it as one block. address pc = CALLER_PC; - MemTracker::record_virtual_memory_reserve((address)p_buf, bytes, pc); if ((flags & MEM_COMMIT) != 0) { - MemTracker::record_virtual_memory_commit((address)p_buf, bytes, pc); + MemTracker::record_virtual_memory_reserve_and_commit((address)p_buf, bytes, mtNone, pc); + } else { + MemTracker::record_virtual_memory_reserve((address)p_buf, bytes, mtNone, pc); } // made it this far, success @@ -3154,8 +3179,7 @@ char * res = (char *)VirtualAlloc(NULL, bytes, flag, prot); if (res != NULL) { address pc = CALLER_PC; - MemTracker::record_virtual_memory_reserve((address)res, bytes, pc); - MemTracker::record_virtual_memory_commit((address)res, bytes, pc); + MemTracker::record_virtual_memory_reserve_and_commit((address)res, bytes, mtNone, pc); } return res; @@ -3164,14 +3188,21 @@ bool os::release_memory_special(char* base, size_t bytes) { assert(base != NULL, "Sanity check"); - // Memory allocated via reserve_memory_special() is committed - MemTracker::record_virtual_memory_uncommit((address)base, bytes); return release_memory(base, bytes); } void os::print_statistics() { } +static void warn_fail_commit_memory(char* addr, size_t bytes, bool exec) { + int err = os::get_last_error(); + char buf[256]; + size_t buf_len = os::lasterror(buf, sizeof(buf)); + warning("INFO: os::commit_memory(" PTR_FORMAT ", " SIZE_FORMAT + ", %d) failed; error='%s' (DOS error/errno=%d)", addr, bytes, + exec, buf_len != 0 ? buf : "", err); +} + bool os::pd_commit_memory(char* addr, size_t bytes, bool exec) { if (bytes == 0) { // Don't bother the OS with noops. @@ -3186,11 +3217,17 @@ // is always within a reserve covered by a single VirtualAlloc // in that case we can just do a single commit for the requested size if (!UseNUMAInterleaving) { - if (VirtualAlloc(addr, bytes, MEM_COMMIT, PAGE_READWRITE) == NULL) return false; + if (VirtualAlloc(addr, bytes, MEM_COMMIT, PAGE_READWRITE) == NULL) { + NOT_PRODUCT(warn_fail_commit_memory(addr, bytes, exec);) + return false; + } if (exec) { DWORD oldprot; // Windows doc says to use VirtualProtect to get execute permissions - if (!VirtualProtect(addr, bytes, PAGE_EXECUTE_READWRITE, &oldprot)) return false; + if (!VirtualProtect(addr, bytes, PAGE_EXECUTE_READWRITE, &oldprot)) { + NOT_PRODUCT(warn_fail_commit_memory(addr, bytes, exec);) + return false; + } } return true; } else { @@ -3205,12 +3242,20 @@ MEMORY_BASIC_INFORMATION alloc_info; VirtualQuery(next_alloc_addr, &alloc_info, sizeof(alloc_info)); size_t bytes_to_rq = MIN2(bytes_remaining, (size_t)alloc_info.RegionSize); - if (VirtualAlloc(next_alloc_addr, bytes_to_rq, MEM_COMMIT, PAGE_READWRITE) == NULL) + if (VirtualAlloc(next_alloc_addr, bytes_to_rq, MEM_COMMIT, + PAGE_READWRITE) == NULL) { + NOT_PRODUCT(warn_fail_commit_memory(next_alloc_addr, bytes_to_rq, + exec);) return false; + } if (exec) { DWORD oldprot; - if (!VirtualProtect(next_alloc_addr, bytes_to_rq, PAGE_EXECUTE_READWRITE, &oldprot)) + if (!VirtualProtect(next_alloc_addr, bytes_to_rq, + PAGE_EXECUTE_READWRITE, &oldprot)) { + NOT_PRODUCT(warn_fail_commit_memory(next_alloc_addr, bytes_to_rq, + exec);) return false; + } } bytes_remaining -= bytes_to_rq; next_alloc_addr += bytes_to_rq; @@ -3222,7 +3267,24 @@ bool os::pd_commit_memory(char* addr, size_t size, size_t alignment_hint, bool exec) { - return commit_memory(addr, size, exec); + // alignment_hint is ignored on this OS + return pd_commit_memory(addr, size, exec); +} + +void os::pd_commit_memory_or_exit(char* addr, size_t size, bool exec, + const char* mesg) { + assert(mesg != NULL, "mesg must be specified"); + if (!pd_commit_memory(addr, size, exec)) { + warn_fail_commit_memory(addr, size, exec); + vm_exit_out_of_memory(size, OOM_MMAP_ERROR, mesg); + } +} + +void os::pd_commit_memory_or_exit(char* addr, size_t size, + size_t alignment_hint, bool exec, + const char* mesg) { + // alignment_hint is ignored on this OS + pd_commit_memory_or_exit(addr, size, exec, mesg); } bool os::pd_uncommit_memory(char* addr, size_t bytes) { @@ -3240,7 +3302,7 @@ } bool os::pd_create_stack_guard_pages(char* addr, size_t size) { - return os::commit_memory(addr, size); + return os::commit_memory(addr, size, !ExecMem); } bool os::remove_stack_guard_pages(char* addr, size_t size) { @@ -3264,8 +3326,9 @@ // Strange enough, but on Win32 one can change protection only for committed // memory, not a big deal anyway, as bytes less or equal than 64K - if (!is_committed && !commit_memory(addr, bytes, prot == MEM_PROT_RWX)) { - fatal("cannot commit protection page"); + if (!is_committed) { + commit_memory_or_exit(addr, bytes, prot == MEM_PROT_RWX, + "cannot commit protection page"); } // One cannot use os::guard_memory() here, as on Win32 guard page // have different (one-shot) semantics, from MSDN on PAGE_GUARD: @@ -5048,6 +5111,71 @@ return ::setsockopt(fd, level, optname, optval, optlen); } +// WINDOWS CONTEXT Flags for THREAD_SAMPLING +#if defined(IA32) +# define sampling_context_flags (CONTEXT_FULL | CONTEXT_FLOATING_POINT | CONTEXT_EXTENDED_REGISTERS) +#elif defined (AMD64) +# define sampling_context_flags (CONTEXT_FULL | CONTEXT_FLOATING_POINT) +#endif + +// returns true if thread could be suspended, +// false otherwise +static bool do_suspend(HANDLE* h) { + if (h != NULL) { + if (SuspendThread(*h) != ~0) { + return true; + } + } + return false; +} + +// resume the thread +// calling resume on an active thread is a no-op +static void do_resume(HANDLE* h) { + if (h != NULL) { + ResumeThread(*h); + } +} + +// retrieve a suspend/resume context capable handle +// from the tid. Caller validates handle return value. +void get_thread_handle_for_extended_context(HANDLE* h, OSThread::thread_id_t tid) { + if (h != NULL) { + *h = OpenThread(THREAD_SUSPEND_RESUME | THREAD_GET_CONTEXT | THREAD_QUERY_INFORMATION, FALSE, tid); + } +} + +// +// Thread sampling implementation +// +void os::SuspendedThreadTask::internal_do_task() { + CONTEXT ctxt; + HANDLE h = NULL; + + // get context capable handle for thread + get_thread_handle_for_extended_context(&h, _thread->osthread()->thread_id()); + + // sanity + if (h == NULL || h == INVALID_HANDLE_VALUE) { + return; + } + + // suspend the thread + if (do_suspend(&h)) { + ctxt.ContextFlags = sampling_context_flags; + // get thread context + GetThreadContext(h, &ctxt); + SuspendedThreadTaskContext context(_thread, &ctxt); + // pass context to Thread Sampling impl + do_task(context); + // resume thread + do_resume(&h); + } + + // close handle + CloseHandle(h); +} + // Kernel32 API typedef SIZE_T (WINAPI* GetLargePageMinimum_Fn)(void); diff -r 16b10327b00d -r 90d6c221d4e5 src/os/windows/vm/os_windows.hpp --- a/src/os/windows/vm/os_windows.hpp Tue Jul 16 10:55:48 2013 -0400 +++ b/src/os/windows/vm/os_windows.hpp Tue Jul 16 12:20:08 2013 -0400 @@ -1,5 +1,5 @@ /* - * Copyright (c) 1997, 2012, Oracle and/or its affiliates. All rights reserved. + * Copyright (c) 1997, 2013, Oracle and/or its affiliates. All rights reserved. * DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER. * * This code is free software; you can redistribute it and/or modify it @@ -94,6 +94,10 @@ static address fast_jni_accessor_wrapper(BasicType); #endif +#ifndef PRODUCT + static void call_test_func_with_wrapper(void (*funcPtr)(void)); +#endif + // filter function to ignore faults on serializations page static LONG WINAPI serialize_fault_filter(struct _EXCEPTION_POINTERS* e); }; diff -r 16b10327b00d -r 90d6c221d4e5 src/os/windows/vm/os_windows.inline.hpp --- a/src/os/windows/vm/os_windows.inline.hpp Tue Jul 16 10:55:48 2013 -0400 +++ b/src/os/windows/vm/os_windows.inline.hpp Tue Jul 16 12:20:08 2013 -0400 @@ -106,4 +106,10 @@ inline int os::close(int fd) { return ::close(fd); } + +#ifndef PRODUCT + #define CALL_TEST_FUNC_WITH_WRAPPER_IF_NEEDED(f) \ + os::win32::call_test_func_with_wrapper(f) +#endif + #endif // OS_WINDOWS_VM_OS_WINDOWS_INLINE_HPP diff -r 16b10327b00d -r 90d6c221d4e5 src/os/windows/vm/perfMemory_windows.cpp --- a/src/os/windows/vm/perfMemory_windows.cpp Tue Jul 16 10:55:48 2013 -0400 +++ b/src/os/windows/vm/perfMemory_windows.cpp Tue Jul 16 12:20:08 2013 -0400 @@ -1,5 +1,5 @@ /* - * Copyright (c) 2001, 2012, Oracle and/or its affiliates. All rights reserved. + * Copyright (c) 2001, 2013, Oracle and/or its affiliates. All rights reserved. * DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER. * * This code is free software; you can redistribute it and/or modify it @@ -58,7 +58,7 @@ } // commit memory - if (!os::commit_memory(mapAddress, size)) { + if (!os::commit_memory(mapAddress, size, !ExecMem)) { if (PrintMiscellaneous && Verbose) { warning("Could not commit PerfData memory\n"); } @@ -1498,8 +1498,7 @@ (void)memset(mapAddress, '\0', size); // it does not go through os api, the operation has to record from here - MemTracker::record_virtual_memory_reserve((address)mapAddress, size, CURRENT_PC); - MemTracker::record_virtual_memory_type((address)mapAddress, mtInternal); + MemTracker::record_virtual_memory_reserve((address)mapAddress, size, mtInternal, CURRENT_PC); return (char*) mapAddress; } @@ -1681,8 +1680,7 @@ } // it does not go through os api, the operation has to record from here - MemTracker::record_virtual_memory_reserve((address)mapAddress, size, CURRENT_PC); - MemTracker::record_virtual_memory_type((address)mapAddress, mtInternal); + MemTracker::record_virtual_memory_reserve((address)mapAddress, size, mtInternal, CURRENT_PC); *addrp = (char*)mapAddress; @@ -1836,9 +1834,10 @@ return; } + MemTracker::Tracker tkr = MemTracker::get_virtual_memory_release_tracker(); remove_file_mapping(addr); // it does not go through os api, the operation has to record from here - MemTracker::record_virtual_memory_release((address)addr, bytes); + tkr.record((address)addr, bytes); } char* PerfMemory::backing_store_filename() { diff -r 16b10327b00d -r 90d6c221d4e5 src/os_cpu/bsd_x86/vm/bsd_x86_32.s --- a/src/os_cpu/bsd_x86/vm/bsd_x86_32.s Tue Jul 16 10:55:48 2013 -0400 +++ b/src/os_cpu/bsd_x86/vm/bsd_x86_32.s Tue Jul 16 12:20:08 2013 -0400 @@ -63,24 +63,6 @@ popl %eax ret - .globl SYMBOL(SafeFetch32), SYMBOL(Fetch32PFI), SYMBOL(Fetch32Resume) - .globl SYMBOL(SafeFetchN) - ## TODO: avoid exposing Fetch32PFI and Fetch32Resume. - ## Instead, the signal handler would call a new SafeFetchTriage(FaultingEIP) - ## routine to vet the address. If the address is the faulting LD then - ## SafeFetchTriage() would return the resume-at EIP, otherwise null. - ELF_TYPE(SafeFetch32,@function) - .p2align 4,,15 -SYMBOL(SafeFetch32): -SYMBOL(SafeFetchN): - movl 0x8(%esp), %eax - movl 0x4(%esp), %ecx -SYMBOL(Fetch32PFI): - movl (%ecx), %eax -SYMBOL(Fetch32Resume): - ret - - .globl SYMBOL(SpinPause) ELF_TYPE(SpinPause,@function) .p2align 4,,15 diff -r 16b10327b00d -r 90d6c221d4e5 src/os_cpu/bsd_x86/vm/bsd_x86_64.s --- a/src/os_cpu/bsd_x86/vm/bsd_x86_64.s Tue Jul 16 10:55:48 2013 -0400 +++ b/src/os_cpu/bsd_x86/vm/bsd_x86_64.s Tue Jul 16 12:20:08 2013 -0400 @@ -46,28 +46,6 @@ .text - .globl SYMBOL(SafeFetch32), SYMBOL(Fetch32PFI), SYMBOL(Fetch32Resume) - .p2align 4,,15 - ELF_TYPE(SafeFetch32,@function) - // Prototype: int SafeFetch32 (int * Adr, int ErrValue) -SYMBOL(SafeFetch32): - movl %esi, %eax -SYMBOL(Fetch32PFI): - movl (%rdi), %eax -SYMBOL(Fetch32Resume): - ret - - .globl SYMBOL(SafeFetchN), SYMBOL(FetchNPFI), SYMBOL(FetchNResume) - .p2align 4,,15 - ELF_TYPE(SafeFetchN,@function) - // Prototype: intptr_t SafeFetchN (intptr_t * Adr, intptr_t ErrValue) -SYMBOL(SafeFetchN): - movq %rsi, %rax -SYMBOL(FetchNPFI): - movq (%rdi), %rax -SYMBOL(FetchNResume): - ret - .globl SYMBOL(SpinPause) .p2align 4,,15 ELF_TYPE(SpinPause,@function) diff -r 16b10327b00d -r 90d6c221d4e5 src/os_cpu/bsd_x86/vm/os_bsd_x86.cpp --- a/src/os_cpu/bsd_x86/vm/os_bsd_x86.cpp Tue Jul 16 10:55:48 2013 -0400 +++ b/src/os_cpu/bsd_x86/vm/os_bsd_x86.cpp Tue Jul 16 12:20:08 2013 -0400 @@ -385,13 +385,6 @@ trap_page_fault = 0xE }; -extern "C" void Fetch32PFI () ; -extern "C" void Fetch32Resume () ; -#ifdef AMD64 -extern "C" void FetchNPFI () ; -extern "C" void FetchNResume () ; -#endif // AMD64 - extern "C" JNIEXPORT int JVM_handle_bsd_signal(int sig, siginfo_t* info, @@ -454,16 +447,10 @@ if (info != NULL && uc != NULL && thread != NULL) { pc = (address) os::Bsd::ucontext_get_pc(uc); - if (pc == (address) Fetch32PFI) { - uc->context_pc = intptr_t(Fetch32Resume) ; - return 1 ; + if (StubRoutines::is_safefetch_fault(pc)) { + uc->context_pc = intptr_t(StubRoutines::continuation_for_safefetch_fault(pc)); + return 1; } -#ifdef AMD64 - if (pc == (address) FetchNPFI) { - uc->context_pc = intptr_t (FetchNResume) ; - return 1 ; - } -#endif // AMD64 // Handle ALL stack overflow variations here if (sig == SIGSEGV || sig == SIGBUS) { diff -r 16b10327b00d -r 90d6c221d4e5 src/os_cpu/bsd_x86/vm/thread_bsd_x86.cpp --- a/src/os_cpu/bsd_x86/vm/thread_bsd_x86.cpp Tue Jul 16 10:55:48 2013 -0400 +++ b/src/os_cpu/bsd_x86/vm/thread_bsd_x86.cpp Tue Jul 16 12:20:08 2013 -0400 @@ -1,5 +1,5 @@ /* - * Copyright (c) 2003, 2010, Oracle and/or its affiliates. All rights reserved. + * Copyright (c) 2003, 2013, Oracle and/or its affiliates. All rights reserved. * DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER. * * This code is free software; you can redistribute it and/or modify it @@ -30,10 +30,16 @@ // currently interrupted by SIGPROF bool JavaThread::pd_get_top_frame_for_signal_handler(frame* fr_addr, void* ucontext, bool isInJava) { + assert(Thread::current() == this, "caller must be current thread"); + return pd_get_top_frame(fr_addr, ucontext, isInJava); +} - assert(Thread::current() == this, "caller must be current thread"); +bool JavaThread::pd_get_top_frame_for_profiling(frame* fr_addr, void* ucontext, bool isInJava) { + return pd_get_top_frame(fr_addr, ucontext, isInJava); +} + +bool JavaThread::pd_get_top_frame(frame* fr_addr, void* ucontext, bool isInJava) { assert(this->is_Java_thread(), "must be JavaThread"); - JavaThread* jt = (JavaThread *)this; // If we have a last_Java_frame, then we should use it even if diff -r 16b10327b00d -r 90d6c221d4e5 src/os_cpu/bsd_x86/vm/thread_bsd_x86.hpp --- a/src/os_cpu/bsd_x86/vm/thread_bsd_x86.hpp Tue Jul 16 10:55:48 2013 -0400 +++ b/src/os_cpu/bsd_x86/vm/thread_bsd_x86.hpp Tue Jul 16 12:20:08 2013 -0400 @@ -1,5 +1,5 @@ /* - * Copyright (c) 2000, 2010, Oracle and/or its affiliates. All rights reserved. + * Copyright (c) 2000, 2013, Oracle and/or its affiliates. All rights reserved. * DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER. * * This code is free software; you can redistribute it and/or modify it @@ -61,6 +61,13 @@ bool pd_get_top_frame_for_signal_handler(frame* fr_addr, void* ucontext, bool isInJava); + bool pd_get_top_frame_for_profiling(frame* fr_addr, void* ucontext, + bool isInJava); + +private: + bool pd_get_top_frame(frame* fr_addr, void* ucontext, bool isInJava); +public: + // These routines are only used on cpu architectures that // have separate register stacks (Itanium). static bool register_stack_overflow() { return false; } diff -r 16b10327b00d -r 90d6c221d4e5 src/os_cpu/linux_sparc/vm/assembler_linux_sparc.cpp --- a/src/os_cpu/linux_sparc/vm/assembler_linux_sparc.cpp Tue Jul 16 10:55:48 2013 -0400 +++ /dev/null Thu Jan 01 00:00:00 1970 +0000 @@ -1,47 +0,0 @@ -/* - * Copyright (c) 1999, 2010, Oracle and/or its affiliates. All rights reserved. - * DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER. - * - * This code is free software; you can redistribute it and/or modify it - * under the terms of the GNU General Public License version 2 only, as - * published by the Free Software Foundation. - * - * This code is distributed in the hope that it will be useful, but WITHOUT - * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or - * FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License - * version 2 for more details (a copy is included in the LICENSE file that - * accompanied this code). - * - * You should have received a copy of the GNU General Public License version - * 2 along with this work; if not, write to the Free Software Foundation, - * Inc., 51 Franklin St, Fifth Floor, Boston, MA 02110-1301 USA. - * - * Please contact Oracle, 500 Oracle Parkway, Redwood Shores, CA 94065 USA - * or visit www.oracle.com if you need additional information or have any - * questions. - * - */ - -#include "precompiled.hpp" -#include "asm/macroAssembler.hpp" -#include "runtime/os.hpp" -#include "runtime/threadLocalStorage.hpp" - -#include - -void MacroAssembler::read_ccr_trap(Register ccr_save) { - // No implementation - breakpoint_trap(); -} - -void MacroAssembler::write_ccr_trap(Register ccr_save, Register scratch1, Register scratch2) { - // No implementation - breakpoint_trap(); -} - -void MacroAssembler::flush_windows_trap() { trap(SP_TRAP_FWIN); } -void MacroAssembler::clean_windows_trap() { trap(SP_TRAP_CWIN); } - -// Use software breakpoint trap until we figure out how to do this on Linux -void MacroAssembler::get_psr_trap() { trap(SP_TRAP_SBPT); } -void MacroAssembler::set_psr_trap() { trap(SP_TRAP_SBPT); } diff -r 16b10327b00d -r 90d6c221d4e5 src/os_cpu/linux_sparc/vm/atomic_linux_sparc.inline.hpp --- a/src/os_cpu/linux_sparc/vm/atomic_linux_sparc.inline.hpp Tue Jul 16 10:55:48 2013 -0400 +++ b/src/os_cpu/linux_sparc/vm/atomic_linux_sparc.inline.hpp Tue Jul 16 12:20:08 2013 -0400 @@ -169,7 +169,6 @@ : "memory"); return rv; #else - assert(VM_Version::v9_instructions_work(), "cas only supported on v9"); volatile jlong_accessor evl, cvl, rv; evl.long_value = exchange_value; cvl.long_value = compare_value; diff -r 16b10327b00d -r 90d6c221d4e5 src/os_cpu/linux_sparc/vm/linux_sparc.s --- a/src/os_cpu/linux_sparc/vm/linux_sparc.s Tue Jul 16 10:55:48 2013 -0400 +++ b/src/os_cpu/linux_sparc/vm/linux_sparc.s Tue Jul 16 12:20:08 2013 -0400 @@ -21,42 +21,6 @@ # questions. # - # Prototype: int SafeFetch32 (int * adr, int ErrValue) - # The "ld" at Fetch32 is potentially faulting instruction. - # If the instruction traps the trap handler will arrange - # for control to resume at Fetch32Resume. - # By convention with the trap handler we ensure there is a non-CTI - # instruction in the trap shadow. - - - .globl SafeFetch32, Fetch32PFI, Fetch32Resume - .globl SafeFetchN - .align 32 - .type SafeFetch32,@function -SafeFetch32: - mov %o0, %g1 - mov %o1, %o0 -Fetch32PFI: - # <-- Potentially faulting instruction - ld [%g1], %o0 -Fetch32Resume: - nop - retl - nop - - .globl SafeFetchN, FetchNPFI, FetchNResume - .type SafeFetchN,@function - .align 32 -SafeFetchN: - mov %o0, %g1 - mov %o1, %o0 -FetchNPFI: - ldn [%g1], %o0 -FetchNResume: - nop - retl - nop - # Possibilities: # -- membar # -- CAS (SP + BIAS, G0, G0) diff -r 16b10327b00d -r 90d6c221d4e5 src/os_cpu/linux_sparc/vm/os_linux_sparc.cpp --- a/src/os_cpu/linux_sparc/vm/os_linux_sparc.cpp Tue Jul 16 10:55:48 2013 -0400 +++ b/src/os_cpu/linux_sparc/vm/os_linux_sparc.cpp Tue Jul 16 12:20:08 2013 -0400 @@ -366,18 +366,9 @@ // Utility functions -extern "C" void Fetch32PFI(); -extern "C" void Fetch32Resume(); -extern "C" void FetchNPFI(); -extern "C" void FetchNResume(); - inline static bool checkPrefetch(sigcontext* uc, address pc) { - if (pc == (address) Fetch32PFI) { - set_cont_address(uc, address(Fetch32Resume)); - return true; - } - if (pc == (address) FetchNPFI) { - set_cont_address(uc, address(FetchNResume)); + if (StubRoutines::is_safefetch_fault(pc)) { + set_cont_address(uc, address(StubRoutines::continuation_for_safefetch_fault(pc))); return true; } return false; diff -r 16b10327b00d -r 90d6c221d4e5 src/os_cpu/linux_x86/vm/linux_x86_32.s --- a/src/os_cpu/linux_x86/vm/linux_x86_32.s Tue Jul 16 10:55:48 2013 -0400 +++ b/src/os_cpu/linux_x86/vm/linux_x86_32.s Tue Jul 16 12:20:08 2013 -0400 @@ -42,24 +42,6 @@ .text - .globl SafeFetch32, Fetch32PFI, Fetch32Resume - .globl SafeFetchN - ## TODO: avoid exposing Fetch32PFI and Fetch32Resume. - ## Instead, the signal handler would call a new SafeFetchTriage(FaultingEIP) - ## routine to vet the address. If the address is the faulting LD then - ## SafeFetchTriage() would return the resume-at EIP, otherwise null. - .type SafeFetch32,@function - .p2align 4,,15 -SafeFetch32: -SafeFetchN: - movl 0x8(%esp), %eax - movl 0x4(%esp), %ecx -Fetch32PFI: - movl (%ecx), %eax -Fetch32Resume: - ret - - .globl SpinPause .type SpinPause,@function .p2align 4,,15 diff -r 16b10327b00d -r 90d6c221d4e5 src/os_cpu/linux_x86/vm/linux_x86_64.s --- a/src/os_cpu/linux_x86/vm/linux_x86_64.s Tue Jul 16 10:55:48 2013 -0400 +++ b/src/os_cpu/linux_x86/vm/linux_x86_64.s Tue Jul 16 12:20:08 2013 -0400 @@ -38,28 +38,6 @@ .text - .globl SafeFetch32, Fetch32PFI, Fetch32Resume - .align 16 - .type SafeFetch32,@function - // Prototype: int SafeFetch32 (int * Adr, int ErrValue) -SafeFetch32: - movl %esi, %eax -Fetch32PFI: - movl (%rdi), %eax -Fetch32Resume: - ret - - .globl SafeFetchN, FetchNPFI, FetchNResume - .align 16 - .type SafeFetchN,@function - // Prototype: intptr_t SafeFetchN (intptr_t * Adr, intptr_t ErrValue) -SafeFetchN: - movq %rsi, %rax -FetchNPFI: - movq (%rdi), %rax -FetchNResume: - ret - .globl SpinPause .align 16 .type SpinPause,@function diff -r 16b10327b00d -r 90d6c221d4e5 src/os_cpu/linux_x86/vm/os_linux_x86.cpp --- a/src/os_cpu/linux_x86/vm/os_linux_x86.cpp Tue Jul 16 10:55:48 2013 -0400 +++ b/src/os_cpu/linux_x86/vm/os_linux_x86.cpp Tue Jul 16 12:20:08 2013 -0400 @@ -209,13 +209,6 @@ trap_page_fault = 0xE }; -extern "C" void Fetch32PFI () ; -extern "C" void Fetch32Resume () ; -#ifdef AMD64 -extern "C" void FetchNPFI () ; -extern "C" void FetchNResume () ; -#endif // AMD64 - extern "C" JNIEXPORT int JVM_handle_linux_signal(int sig, siginfo_t* info, @@ -278,14 +271,18 @@ if (info != NULL && uc != NULL && thread != NULL) { pc = (address) os::Linux::ucontext_get_pc(uc); - if (pc == (address) Fetch32PFI) { - uc->uc_mcontext.gregs[REG_PC] = intptr_t(Fetch32Resume) ; - return 1 ; + if (StubRoutines::is_safefetch_fault(pc)) { + uc->uc_mcontext.gregs[REG_PC] = intptr_t(StubRoutines::continuation_for_safefetch_fault(pc)); + return 1; } -#ifdef AMD64 - if (pc == (address) FetchNPFI) { - uc->uc_mcontext.gregs[REG_PC] = intptr_t (FetchNResume) ; - return 1 ; + +#ifndef AMD64 + // Halt if SI_KERNEL before more crashes get misdiagnosed as Java bugs + // This can happen in any running code (currently more frequently in + // interpreter code but has been seen in compiled code) + if (sig == SIGSEGV && info->si_addr == 0 && info->si_code == SI_KERNEL) { + fatal("An irrecoverable SI_KERNEL SIGSEGV has occurred due " + "to unstable signal handling in this distribution."); } #endif // AMD64 diff -r 16b10327b00d -r 90d6c221d4e5 src/os_cpu/linux_x86/vm/thread_linux_x86.cpp --- a/src/os_cpu/linux_x86/vm/thread_linux_x86.cpp Tue Jul 16 10:55:48 2013 -0400 +++ b/src/os_cpu/linux_x86/vm/thread_linux_x86.cpp Tue Jul 16 12:20:08 2013 -0400 @@ -1,5 +1,5 @@ /* - * Copyright (c) 2003, 2010, Oracle and/or its affiliates. All rights reserved. + * Copyright (c) 2003, 2013, Oracle and/or its affiliates. All rights reserved. * DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER. * * This code is free software; you can redistribute it and/or modify it @@ -32,8 +32,15 @@ void* ucontext, bool isInJava) { assert(Thread::current() == this, "caller must be current thread"); + return pd_get_top_frame(fr_addr, ucontext, isInJava); +} + +bool JavaThread::pd_get_top_frame_for_profiling(frame* fr_addr, void* ucontext, bool isInJava) { + return pd_get_top_frame(fr_addr, ucontext, isInJava); +} + +bool JavaThread::pd_get_top_frame(frame* fr_addr, void* ucontext, bool isInJava) { assert(this->is_Java_thread(), "must be JavaThread"); - JavaThread* jt = (JavaThread *)this; // If we have a last_Java_frame, then we should use it even if diff -r 16b10327b00d -r 90d6c221d4e5 src/os_cpu/linux_x86/vm/thread_linux_x86.hpp --- a/src/os_cpu/linux_x86/vm/thread_linux_x86.hpp Tue Jul 16 10:55:48 2013 -0400 +++ b/src/os_cpu/linux_x86/vm/thread_linux_x86.hpp Tue Jul 16 12:20:08 2013 -0400 @@ -1,5 +1,5 @@ /* - * Copyright (c) 2000, 2010, Oracle and/or its affiliates. All rights reserved. + * Copyright (c) 2000, 2013, Oracle and/or its affiliates. All rights reserved. * DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER. * * This code is free software; you can redistribute it and/or modify it @@ -61,6 +61,11 @@ bool pd_get_top_frame_for_signal_handler(frame* fr_addr, void* ucontext, bool isInJava); + bool pd_get_top_frame_for_profiling(frame* fr_addr, void* ucontext, bool isInJava); +private: + bool pd_get_top_frame(frame* fr_addr, void* ucontext, bool isInJava); +public: + // These routines are only used on cpu architectures that // have separate register stacks (Itanium). static bool register_stack_overflow() { return false; } diff -r 16b10327b00d -r 90d6c221d4e5 src/os_cpu/solaris_sparc/vm/assembler_solaris_sparc.cpp --- a/src/os_cpu/solaris_sparc/vm/assembler_solaris_sparc.cpp Tue Jul 16 10:55:48 2013 -0400 +++ /dev/null Thu Jan 01 00:00:00 1970 +0000 @@ -1,61 +0,0 @@ -/* - * Copyright (c) 1999, 2010, Oracle and/or its affiliates. All rights reserved. - * DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER. - * - * This code is free software; you can redistribute it and/or modify it - * under the terms of the GNU General Public License version 2 only, as - * published by the Free Software Foundation. - * - * This code is distributed in the hope that it will be useful, but WITHOUT - * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or - * FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License - * version 2 for more details (a copy is included in the LICENSE file that - * accompanied this code). - * - * You should have received a copy of the GNU General Public License version - * 2 along with this work; if not, write to the Free Software Foundation, - * Inc., 51 Franklin St, Fifth Floor, Boston, MA 02110-1301 USA. - * - * Please contact Oracle, 500 Oracle Parkway, Redwood Shores, CA 94065 USA - * or visit www.oracle.com if you need additional information or have any - * questions. - * - */ - -#include "precompiled.hpp" -#include "asm/macroAssembler.inline.hpp" -#include "runtime/os.hpp" -#include "runtime/threadLocalStorage.hpp" - -#include // For trap numbers -#include // For V8 compatibility - -void MacroAssembler::read_ccr_trap(Register ccr_save) { - // Execute a trap to get the PSR, mask and shift - // to get the condition codes. - get_psr_trap(); - nop(); - set(PSR_ICC, ccr_save); - and3(O0, ccr_save, ccr_save); - srl(ccr_save, PSR_ICC_SHIFT, ccr_save); -} - -void MacroAssembler::write_ccr_trap(Register ccr_save, Register scratch1, Register scratch2) { - // Execute a trap to get the PSR, shift back - // the condition codes, mask the condition codes - // back into and PSR and trap to write back the - // PSR. - sll(ccr_save, PSR_ICC_SHIFT, scratch2); - get_psr_trap(); - nop(); - set(~PSR_ICC, scratch1); - and3(O0, scratch1, O0); - or3(O0, scratch2, O0); - set_psr_trap(); - nop(); -} - -void MacroAssembler::flush_windows_trap() { trap(ST_FLUSH_WINDOWS); } -void MacroAssembler::clean_windows_trap() { trap(ST_CLEAN_WINDOWS); } -void MacroAssembler::get_psr_trap() { trap(ST_GETPSR); } -void MacroAssembler::set_psr_trap() { trap(ST_SETPSR); } diff -r 16b10327b00d -r 90d6c221d4e5 src/os_cpu/solaris_sparc/vm/atomic_solaris_sparc.inline.hpp --- a/src/os_cpu/solaris_sparc/vm/atomic_solaris_sparc.inline.hpp Tue Jul 16 10:55:48 2013 -0400 +++ b/src/os_cpu/solaris_sparc/vm/atomic_solaris_sparc.inline.hpp Tue Jul 16 12:20:08 2013 -0400 @@ -60,21 +60,10 @@ #else -extern "C" void _Atomic_move_long_v8(volatile jlong* src, volatile jlong* dst); extern "C" void _Atomic_move_long_v9(volatile jlong* src, volatile jlong* dst); inline void Atomic_move_long(volatile jlong* src, volatile jlong* dst) { -#ifdef COMPILER2 - // Compiler2 does not support v8, it is used only for v9. _Atomic_move_long_v9(src, dst); -#else - // The branch is cheaper then emulated LDD. - if (VM_Version::v9_instructions_work()) { - _Atomic_move_long_v9(src, dst); - } else { - _Atomic_move_long_v8(src, dst); - } -#endif } inline jlong Atomic::load(volatile jlong* src) { @@ -209,7 +198,6 @@ : "memory"); return rv; #else //_LP64 - assert(VM_Version::v9_instructions_work(), "cas only supported on v9"); volatile jlong_accessor evl, cvl, rv; evl.long_value = exchange_value; cvl.long_value = compare_value; @@ -318,7 +306,6 @@ // Return 64 bit value in %o0 return _Atomic_cas64((intptr_t)exchange_value, (intptr_t *)dest, (intptr_t)compare_value); #else // _LP64 - assert (VM_Version::v9_instructions_work(), "only supported on v9"); // Return 64 bit value in %o0,%o1 by hand return _Atomic_casl(exchange_value, dest, compare_value); #endif // _LP64 diff -r 16b10327b00d -r 90d6c221d4e5 src/os_cpu/solaris_sparc/vm/os_solaris_sparc.cpp --- a/src/os_cpu/solaris_sparc/vm/os_solaris_sparc.cpp Tue Jul 16 10:55:48 2013 -0400 +++ b/src/os_cpu/solaris_sparc/vm/os_solaris_sparc.cpp Tue Jul 16 12:20:08 2013 -0400 @@ -1,5 +1,5 @@ /* - * Copyright (c) 1999, 2012, Oracle and/or its affiliates. All rights reserved. + * Copyright (c) 1999, 2013, Oracle and/or its affiliates. All rights reserved. * DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER. * * This code is free software; you can redistribute it and/or modify it @@ -194,6 +194,11 @@ return NULL; } +address os::Solaris::ucontext_get_pc(ucontext_t *uc) { + return (address) uc->uc_mcontext.gregs[REG_PC]; +} + + // For Forte Analyzer AsyncGetCallTrace profiling support - thread // is currently interrupted by SIGPROF. // @@ -265,22 +270,6 @@ } } - -void GetThreadPC_Callback::execute(OSThread::InterruptArguments *args) { - Thread* thread = args->thread(); - ucontext_t* uc = args->ucontext(); - intptr_t* sp; - - assert(ProfileVM && thread->is_VM_thread(), "just checking"); - - // Skip the mcontext corruption verification. If if occasionally - // things get corrupt, it is ok for profiling - we will just get an unresolved - // function name - ExtendedPC new_addr((address)uc->uc_mcontext.gregs[REG_PC]); - _addr = new_addr; -} - - static int threadgetstate(thread_t tid, int *flags, lwpid_t *lwp, stack_t *ss, gregset_t rs, lwpstatus_t *lwpstatus) { char lwpstatusfile[PROCFILE_LENGTH]; int lwpfd, err; @@ -314,11 +303,6 @@ #endif } -extern "C" void Fetch32PFI () ; -extern "C" void Fetch32Resume () ; -extern "C" void FetchNPFI () ; -extern "C" void FetchNResume () ; - extern "C" JNIEXPORT int JVM_handle_solaris_signal(int sig, siginfo_t* info, void* ucVoid, int abort_if_unrecognized) { @@ -358,13 +342,8 @@ guarantee(sig != os::Solaris::SIGinterrupt(), "Can not chain VM interrupt signal, try -XX:+UseAltSigs"); if (sig == os::Solaris::SIGasync()) { - if (thread) { - OSThread::InterruptArguments args(thread, uc); - thread->osthread()->do_interrupt_callbacks_at_interrupt(&args); - return true; - } else if (vmthread) { - OSThread::InterruptArguments args(vmthread, uc); - vmthread->osthread()->do_interrupt_callbacks_at_interrupt(&args); + if (thread || vmthread) { + OSThread::SR_handler(t, uc); return true; } else if (os::Solaris::chained_handler(sig, info, ucVoid)) { return true; @@ -395,17 +374,10 @@ npc = (address) uc->uc_mcontext.gregs[REG_nPC]; // SafeFetch() support - // Implemented with either a fixed set of addresses such - // as Fetch32*, or with Thread._OnTrap. - if (uc->uc_mcontext.gregs[REG_PC] == intptr_t(Fetch32PFI)) { - uc->uc_mcontext.gregs [REG_PC] = intptr_t(Fetch32Resume) ; - uc->uc_mcontext.gregs [REG_nPC] = intptr_t(Fetch32Resume) + 4 ; - return true ; - } - if (uc->uc_mcontext.gregs[REG_PC] == intptr_t(FetchNPFI)) { - uc->uc_mcontext.gregs [REG_PC] = intptr_t(FetchNResume) ; - uc->uc_mcontext.gregs [REG_nPC] = intptr_t(FetchNResume) + 4 ; - return true ; + if (StubRoutines::is_safefetch_fault(pc)) { + uc->uc_mcontext.gregs[REG_PC] = intptr_t(StubRoutines::continuation_for_safefetch_fault(pc)); + uc->uc_mcontext.gregs[REG_nPC] = uc->uc_mcontext.gregs[REG_PC] + 4; + return 1; } // Handle ALL stack overflow variations here diff -r 16b10327b00d -r 90d6c221d4e5 src/os_cpu/solaris_sparc/vm/solaris_sparc.il --- a/src/os_cpu/solaris_sparc/vm/solaris_sparc.il Tue Jul 16 10:55:48 2013 -0400 +++ b/src/os_cpu/solaris_sparc/vm/solaris_sparc.il Tue Jul 16 12:20:08 2013 -0400 @@ -152,23 +152,6 @@ .nonvolatile .end - // Support for jlong Atomic::load and Atomic::store on v8. - // - // void _Atomic_move_long_v8(volatile jlong* src, volatile jlong* dst) - // - // Arguments: - // src: O0 - // dest: O1 - // - // Overwrites O2 and O3 - - .inline _Atomic_move_long_v8,2 - .volatile - ldd [%o0], %o2 - std %o2, [%o1] - .nonvolatile - .end - // Support for jlong Atomic::load and Atomic::store on v9. // // void _Atomic_move_long_v9(volatile jlong* src, volatile jlong* dst) diff -r 16b10327b00d -r 90d6c221d4e5 src/os_cpu/solaris_sparc/vm/solaris_sparc.s --- a/src/os_cpu/solaris_sparc/vm/solaris_sparc.s Tue Jul 16 10:55:48 2013 -0400 +++ b/src/os_cpu/solaris_sparc/vm/solaris_sparc.s Tue Jul 16 12:20:08 2013 -0400 @@ -21,47 +21,6 @@ !! questions. !! - !! Prototype: int SafeFetch32 (int * adr, int ErrValue) - !! The "ld" at Fetch32 is potentially faulting instruction. - !! If the instruction traps the trap handler will arrange - !! for control to resume at Fetch32Resume. - !! By convention with the trap handler we ensure there is a non-CTI - !! instruction in the trap shadow. - !! - !! The reader might be tempted to move this service to .il. - !! Don't. Sun's CC back-end reads and optimize code emitted - !! by the .il "call", in some cases optimizing the code, completely eliding it, - !! or by moving the code from the "call site". - - !! ASM better know we may use G6 for our own purposes - .register %g6, #ignore - - .globl SafeFetch32 - .align 32 - .global Fetch32PFI, Fetch32Resume -SafeFetch32: - mov %o0, %g1 - mov %o1, %o0 -Fetch32PFI: - ld [%g1], %o0 !! <-- Potentially faulting instruction -Fetch32Resume: - nop - retl - nop - - .globl SafeFetchN - .align 32 - .globl FetchNPFI, FetchNResume -SafeFetchN: - mov %o0, %g1 - mov %o1, %o0 -FetchNPFI: - ldn [%g1], %o0 -FetchNResume: - nop - retl - nop - !! Possibilities: !! -- membar !! -- CAS (SP + BIAS, G0, G0) diff -r 16b10327b00d -r 90d6c221d4e5 src/os_cpu/solaris_sparc/vm/thread_solaris_sparc.cpp --- a/src/os_cpu/solaris_sparc/vm/thread_solaris_sparc.cpp Tue Jul 16 10:55:48 2013 -0400 +++ b/src/os_cpu/solaris_sparc/vm/thread_solaris_sparc.cpp Tue Jul 16 12:20:08 2013 -0400 @@ -1,5 +1,5 @@ /* - * Copyright (c) 2003, 2010, Oracle and/or its affiliates. All rights reserved. + * Copyright (c) 2003, 2013, Oracle and/or its affiliates. All rights reserved. * DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER. * * This code is free software; you can redistribute it and/or modify it @@ -36,11 +36,21 @@ void* ucontext, bool isInJava) { assert(Thread::current() == this, "caller must be current thread"); + return pd_get_top_frame(fr_addr, ucontext, isInJava, true); +} + +bool JavaThread::pd_get_top_frame_for_profiling(frame* fr_addr, void* ucontext, bool isInJava) { + // get ucontext somehow + return pd_get_top_frame(fr_addr, ucontext, isInJava, false); +} + +bool JavaThread::pd_get_top_frame(frame* fr_addr, + void* ucontext, bool isInJava, bool makeWalkable) { assert(this->is_Java_thread(), "must be JavaThread"); JavaThread* jt = (JavaThread *)this; - if (!isInJava) { + if (!isInJava && makeWalkable) { // make_walkable flushes register windows and grabs last_Java_pc // which can not be done if the ucontext sp matches last_Java_sp // stack walking utilities assume last_Java_pc set if marked flushed diff -r 16b10327b00d -r 90d6c221d4e5 src/os_cpu/solaris_sparc/vm/thread_solaris_sparc.hpp --- a/src/os_cpu/solaris_sparc/vm/thread_solaris_sparc.hpp Tue Jul 16 10:55:48 2013 -0400 +++ b/src/os_cpu/solaris_sparc/vm/thread_solaris_sparc.hpp Tue Jul 16 12:20:08 2013 -0400 @@ -1,5 +1,5 @@ /* - * Copyright (c) 1998, 2010, Oracle and/or its affiliates. All rights reserved. + * Copyright (c) 1998, 2013, Oracle and/or its affiliates. All rights reserved. * DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER. * * This code is free software; you can redistribute it and/or modify it @@ -93,6 +93,11 @@ bool pd_get_top_frame_for_signal_handler(frame* fr_addr, void* ucontext, bool isInJava); + bool pd_get_top_frame_for_profiling(frame* fr_addr, void* ucontext, bool isInJava); +private: + bool pd_get_top_frame(frame* fr_addr, void* ucontext, bool isInJava, bool makeWalkable); +public: + // These routines are only used on cpu architectures that // have separate register stacks (Itanium). static bool register_stack_overflow() { return false; } diff -r 16b10327b00d -r 90d6c221d4e5 src/os_cpu/solaris_x86/vm/os_solaris_x86.cpp --- a/src/os_cpu/solaris_x86/vm/os_solaris_x86.cpp Tue Jul 16 10:55:48 2013 -0400 +++ b/src/os_cpu/solaris_x86/vm/os_solaris_x86.cpp Tue Jul 16 12:20:08 2013 -0400 @@ -1,5 +1,5 @@ /* - * Copyright (c) 1999, 2012, Oracle and/or its affiliates. All rights reserved. + * Copyright (c) 1999, 2013, Oracle and/or its affiliates. All rights reserved. * DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER. * * This code is free software; you can redistribute it and/or modify it @@ -183,6 +183,10 @@ return (intptr_t*)uc->uc_mcontext.gregs[REG_FP]; } +address os::Solaris::ucontext_get_pc(ucontext_t *uc) { + return (address) uc->uc_mcontext.gregs[REG_PC]; +} + // For Forte Analyzer AsyncGetCallTrace profiling support - thread // is currently interrupted by SIGPROF. // @@ -252,22 +256,6 @@ } } -// This is a simple callback that just fetches a PC for an interrupted thread. -// The thread need not be suspended and the fetched PC is just a hint. -// This one is currently used for profiling the VMThread ONLY! - -// Must be synchronous -void GetThreadPC_Callback::execute(OSThread::InterruptArguments *args) { - Thread* thread = args->thread(); - ucontext_t* uc = args->ucontext(); - intptr_t* sp; - - assert(ProfileVM && thread->is_VM_thread(), "just checking"); - - ExtendedPC new_addr((address)uc->uc_mcontext.gregs[REG_PC]); - _addr = new_addr; -} - static int threadgetstate(thread_t tid, int *flags, lwpid_t *lwp, stack_t *ss, gregset_t rs, lwpstatus_t *lwpstatus) { char lwpstatusfile[PROCFILE_LENGTH]; int lwpfd, err; @@ -364,13 +352,6 @@ } -extern "C" void Fetch32PFI () ; -extern "C" void Fetch32Resume () ; -#ifdef AMD64 -extern "C" void FetchNPFI () ; -extern "C" void FetchNResume () ; -#endif // AMD64 - extern "C" JNIEXPORT int JVM_handle_solaris_signal(int sig, siginfo_t* info, void* ucVoid, int abort_if_unrecognized) { @@ -419,14 +400,8 @@ guarantee(sig != os::Solaris::SIGinterrupt(), "Can not chain VM interrupt signal, try -XX:+UseAltSigs"); if (sig == os::Solaris::SIGasync()) { - if(thread){ - OSThread::InterruptArguments args(thread, uc); - thread->osthread()->do_interrupt_callbacks_at_interrupt(&args); - return true; - } - else if(vmthread){ - OSThread::InterruptArguments args(vmthread, uc); - vmthread->osthread()->do_interrupt_callbacks_at_interrupt(&args); + if(thread || vmthread){ + OSThread::SR_handler(t, uc); return true; } else if (os::Solaris::chained_handler(sig, info, ucVoid)) { return true; @@ -454,17 +429,10 @@ // factor me: getPCfromContext pc = (address) uc->uc_mcontext.gregs[REG_PC]; - // SafeFetch32() support - if (pc == (address) Fetch32PFI) { - uc->uc_mcontext.gregs[REG_PC] = intptr_t(Fetch32Resume) ; - return true ; + if (StubRoutines::is_safefetch_fault(pc)) { + uc->uc_mcontext.gregs[REG_PC] = intptr_t(StubRoutines::continuation_for_safefetch_fault(pc)); + return true; } -#ifdef AMD64 - if (pc == (address) FetchNPFI) { - uc->uc_mcontext.gregs [REG_PC] = intptr_t(FetchNResume) ; - return true ; - } -#endif // AMD64 // Handle ALL stack overflow variations here if (sig == SIGSEGV && info->si_code == SEGV_ACCERR) { diff -r 16b10327b00d -r 90d6c221d4e5 src/os_cpu/solaris_x86/vm/solaris_x86_32.s --- a/src/os_cpu/solaris_x86/vm/solaris_x86_32.s Tue Jul 16 10:55:48 2013 -0400 +++ b/src/os_cpu/solaris_x86/vm/solaris_x86_32.s Tue Jul 16 12:20:08 2013 -0400 @@ -54,20 +54,6 @@ popl %eax ret - .align 16 - .globl SafeFetch32 - .globl SafeFetchN - .globl Fetch32PFI, Fetch32Resume -SafeFetch32: -SafeFetchN: - movl 0x8(%esp), %eax - movl 0x4(%esp), %ecx -Fetch32PFI: - movl (%ecx), %eax -Fetch32Resume: - ret - - .align 16 .globl SpinPause SpinPause: diff -r 16b10327b00d -r 90d6c221d4e5 src/os_cpu/solaris_x86/vm/solaris_x86_64.s --- a/src/os_cpu/solaris_x86/vm/solaris_x86_64.s Tue Jul 16 10:55:48 2013 -0400 +++ b/src/os_cpu/solaris_x86/vm/solaris_x86_64.s Tue Jul 16 12:20:08 2013 -0400 @@ -21,54 +21,34 @@ / questions. / - .globl fs_load - .globl fs_thread + .globl fs_load + .globl fs_thread // NOTE WELL! The _Copy functions are called directly - // from server-compiler-generated code via CallLeafNoFP, - // which means that they *must* either not use floating - // point or use it in the same manner as does the server - // compiler. + // from server-compiler-generated code via CallLeafNoFP, + // which means that they *must* either not use floating + // point or use it in the same manner as does the server + // compiler. .globl _Copy_arrayof_conjoint_bytes .globl _Copy_conjoint_jshorts_atomic - .globl _Copy_arrayof_conjoint_jshorts + .globl _Copy_arrayof_conjoint_jshorts .globl _Copy_conjoint_jints_atomic .globl _Copy_arrayof_conjoint_jints - .globl _Copy_conjoint_jlongs_atomic + .globl _Copy_conjoint_jlongs_atomic .globl _Copy_arrayof_conjoint_jlongs - .section .text,"ax" + .section .text,"ax" / Fast thread accessors, used by threadLS_solaris_amd64.cpp - .align 16 + .align 16 fs_load: - movq %fs:(%rdi),%rax - ret - - .align 16 -fs_thread: - movq %fs:0x0,%rax - ret - - .globl SafeFetch32, Fetch32PFI, Fetch32Resume - .align 16 - // Prototype: int SafeFetch32 (int * Adr, int ErrValue) -SafeFetch32: - movl %esi, %eax -Fetch32PFI: - movl (%rdi), %eax -Fetch32Resume: + movq %fs:(%rdi),%rax ret - .globl SafeFetchN, FetchNPFI, FetchNResume - .align 16 - // Prototype: intptr_t SafeFetchN (intptr_t * Adr, intptr_t ErrValue) -SafeFetchN: - movq %rsi, %rax -FetchNPFI: - movq (%rdi), %rax -FetchNResume: + .align 16 +fs_thread: + movq %fs:0x0,%rax ret .globl SpinPause @@ -78,7 +58,7 @@ nop movq $1, %rax ret - + / Support for void Copy::arrayof_conjoint_bytes(void* from, / void* to, @@ -340,7 +320,7 @@ addq $4,%rdx jg 1b ret - + / Support for void Copy::arrayof_conjoint_jlongs(jlong* from, / jlong* to, / size_t count) diff -r 16b10327b00d -r 90d6c221d4e5 src/os_cpu/solaris_x86/vm/thread_solaris_x86.cpp --- a/src/os_cpu/solaris_x86/vm/thread_solaris_x86.cpp Tue Jul 16 10:55:48 2013 -0400 +++ b/src/os_cpu/solaris_x86/vm/thread_solaris_x86.cpp Tue Jul 16 12:20:08 2013 -0400 @@ -1,5 +1,5 @@ /* - * Copyright (c) 2003, 2010, Oracle and/or its affiliates. All rights reserved. + * Copyright (c) 2003, 2013, Oracle and/or its affiliates. All rights reserved. * DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER. * * This code is free software; you can redistribute it and/or modify it @@ -30,8 +30,17 @@ // currently interrupted by SIGPROF bool JavaThread::pd_get_top_frame_for_signal_handler(frame* fr_addr, void* ucontext, bool isInJava) { + assert(Thread::current() == this, "caller must be current thread"); + return pd_get_top_frame(fr_addr, ucontext, isInJava); +} - assert(Thread::current() == this, "caller must be current thread"); +bool JavaThread::pd_get_top_frame_for_profiling(frame* fr_addr, + void* ucontext, bool isInJava) { + return pd_get_top_frame(fr_addr, ucontext, isInJava); +} + +bool JavaThread::pd_get_top_frame(frame* fr_addr, + void* ucontext, bool isInJava) { assert(this->is_Java_thread(), "must be JavaThread"); JavaThread* jt = (JavaThread *)this; diff -r 16b10327b00d -r 90d6c221d4e5 src/os_cpu/solaris_x86/vm/thread_solaris_x86.hpp --- a/src/os_cpu/solaris_x86/vm/thread_solaris_x86.hpp Tue Jul 16 10:55:48 2013 -0400 +++ b/src/os_cpu/solaris_x86/vm/thread_solaris_x86.hpp Tue Jul 16 12:20:08 2013 -0400 @@ -1,5 +1,5 @@ /* - * Copyright (c) 1999, 2010, Oracle and/or its affiliates. All rights reserved. + * Copyright (c) 1999, 2013, Oracle and/or its affiliates. All rights reserved. * DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER. * * This code is free software; you can redistribute it and/or modify it @@ -54,6 +54,12 @@ bool pd_get_top_frame_for_signal_handler(frame* fr_addr, void* ucontext, bool isInJava); + bool pd_get_top_frame_for_profiling(frame* fr_addr, void* ucontext, + bool isInJava); +private: + bool pd_get_top_frame(frame* fr_addr, void* ucontext, + bool isInJava); +public: // These routines are only used on cpu architectures that // have separate register stacks (Itanium). diff -r 16b10327b00d -r 90d6c221d4e5 src/os_cpu/windows_x86/vm/os_windows_x86.cpp --- a/src/os_cpu/windows_x86/vm/os_windows_x86.cpp Tue Jul 16 10:55:48 2013 -0400 +++ b/src/os_cpu/windows_x86/vm/os_windows_x86.cpp Tue Jul 16 12:20:08 2013 -0400 @@ -518,24 +518,6 @@ st->cr(); } -extern "C" int SafeFetch32 (int * adr, int Err) { - int rv = Err ; - _try { - rv = *((volatile int *) adr) ; - } __except(EXCEPTION_EXECUTE_HANDLER) { - } - return rv ; -} - -extern "C" intptr_t SafeFetchN (intptr_t * adr, intptr_t Err) { - intptr_t rv = Err ; - _try { - rv = *((volatile intptr_t *) adr) ; - } __except(EXCEPTION_EXECUTE_HANDLER) { - } - return rv ; -} - extern "C" int SpinPause () { #ifdef AMD64 return 0 ; diff -r 16b10327b00d -r 90d6c221d4e5 src/os_cpu/windows_x86/vm/thread_windows_x86.cpp --- a/src/os_cpu/windows_x86/vm/thread_windows_x86.cpp Tue Jul 16 10:55:48 2013 -0400 +++ b/src/os_cpu/windows_x86/vm/thread_windows_x86.cpp Tue Jul 16 12:20:08 2013 -0400 @@ -1,5 +1,5 @@ /* - * Copyright (c) 2003, 2010, Oracle and/or its affiliates. All rights reserved. + * Copyright (c) 2003, 2013, Oracle and/or its affiliates. All rights reserved. * DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER. * * This code is free software; you can redistribute it and/or modify it @@ -32,6 +32,15 @@ void* ucontext, bool isInJava) { assert(Thread::current() == this, "caller must be current thread"); + return pd_get_top_frame(fr_addr, ucontext, isInJava); +} + +bool JavaThread::pd_get_top_frame_for_profiling(frame* fr_addr, void* ucontext, bool isInJava) { + return pd_get_top_frame(fr_addr, ucontext, isInJava); +} + +bool JavaThread::pd_get_top_frame(frame* fr_addr, void* ucontext, bool isInJava) { + assert(this->is_Java_thread(), "must be JavaThread"); JavaThread* jt = (JavaThread *)this; @@ -87,4 +96,3 @@ } void JavaThread::cache_global_variables() { } - diff -r 16b10327b00d -r 90d6c221d4e5 src/os_cpu/windows_x86/vm/thread_windows_x86.hpp --- a/src/os_cpu/windows_x86/vm/thread_windows_x86.hpp Tue Jul 16 10:55:48 2013 -0400 +++ b/src/os_cpu/windows_x86/vm/thread_windows_x86.hpp Tue Jul 16 12:20:08 2013 -0400 @@ -1,5 +1,5 @@ /* - * Copyright (c) 1999, 2010, Oracle and/or its affiliates. All rights reserved. + * Copyright (c) 1999, 2013, Oracle and/or its affiliates. All rights reserved. * DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER. * * This code is free software; you can redistribute it and/or modify it @@ -58,6 +58,12 @@ bool pd_get_top_frame_for_signal_handler(frame* fr_addr, void* ucontext, bool isInJava); + bool pd_get_top_frame_for_profiling(frame* fr_addr, void* ucontext, bool isInJava); + +private: + bool pd_get_top_frame(frame* fr_addr, void* ucontext, bool isInJava); + + public: // These routines are only used on cpu architectures that // have separate register stacks (Itanium). static bool register_stack_overflow() { return false; } diff -r 16b10327b00d -r 90d6c221d4e5 src/share/tools/ProjectCreator/BuildConfig.java --- a/src/share/tools/ProjectCreator/BuildConfig.java Tue Jul 16 10:55:48 2013 -0400 +++ b/src/share/tools/ProjectCreator/BuildConfig.java Tue Jul 16 12:20:08 2013 -0400 @@ -1,5 +1,5 @@ /* - * Copyright (c) 2005, 2012, Oracle and/or its affiliates. All rights reserved. + * Copyright (c) 2005, 2013, Oracle and/or its affiliates. All rights reserved. * DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER. * * This code is free software; you can redistribute it and/or modify it @@ -152,7 +152,7 @@ sysDefines.add("_WINDOWS"); sysDefines.add("HOTSPOT_BUILD_USER=\\\""+System.getProperty("user.name")+"\\\""); sysDefines.add("HOTSPOT_BUILD_TARGET=\\\""+get("Build")+"\\\""); - sysDefines.add("INCLUDE_TRACE"); + sysDefines.add("INCLUDE_TRACE=1"); sysDefines.add("_JNI_IMPLEMENTATION_"); if (vars.get("PlatformName").equals("Win32")) { sysDefines.add("HOTSPOT_LIB_ARCH=\\\"i386\\\""); diff -r 16b10327b00d -r 90d6c221d4e5 src/share/vm/adlc/forms.hpp --- a/src/share/vm/adlc/forms.hpp Tue Jul 16 10:55:48 2013 -0400 +++ b/src/share/vm/adlc/forms.hpp Tue Jul 16 12:20:08 2013 -0400 @@ -1,5 +1,5 @@ /* - * Copyright (c) 1997, 2012, Oracle and/or its affiliates. All rights reserved. + * Copyright (c) 1997, 2013, Oracle and/or its affiliates. All rights reserved. * DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER. * * This code is free software; you can redistribute it and/or modify it @@ -146,7 +146,7 @@ // Public Methods Form(int formType=0, int line=0) : _next(NULL), _linenum(line), _ftype(formType) { }; - ~Form() {}; + virtual ~Form() {}; virtual bool ideal_only() const { assert(0,"Check of ideal status on non-instruction/operand form.\n"); diff -r 16b10327b00d -r 90d6c221d4e5 src/share/vm/adlc/formssel.cpp --- a/src/share/vm/adlc/formssel.cpp Tue Jul 16 10:55:48 2013 -0400 +++ b/src/share/vm/adlc/formssel.cpp Tue Jul 16 12:20:08 2013 -0400 @@ -235,6 +235,9 @@ return false; } +bool InstructForm::is_ideal_negD() const { + return (_matrule && _matrule->_rChild && strcmp(_matrule->_rChild->_opType, "NegD") == 0); +} // Return 'true' if this instruction matches an ideal 'Copy*' node int InstructForm::is_ideal_copy() const { @@ -533,6 +536,12 @@ if( data_type != Form::none ) rematerialize = true; + // Ugly: until a better fix is implemented, disable rematerialization for + // negD nodes because they are proved to be problematic. + if (is_ideal_negD()) { + return false; + } + // Constants if( _components.count() == 1 && _components[0]->is(Component::USE_DEF) ) rematerialize = true; diff -r 16b10327b00d -r 90d6c221d4e5 src/share/vm/adlc/formssel.hpp --- a/src/share/vm/adlc/formssel.hpp Tue Jul 16 10:55:48 2013 -0400 +++ b/src/share/vm/adlc/formssel.hpp Tue Jul 16 12:20:08 2013 -0400 @@ -147,6 +147,7 @@ virtual int is_empty_encoding() const; // _size=0 and/or _insencode empty virtual int is_tls_instruction() const; // tlsLoadP rule or ideal ThreadLocal virtual int is_ideal_copy() const; // node matches ideal 'Copy*' + virtual bool is_ideal_negD() const; // node matches ideal 'NegD' virtual bool is_ideal_if() const; // node matches ideal 'If' virtual bool is_ideal_fastlock() const; // node matches 'FastLock' virtual bool is_ideal_membar() const; // node matches ideal 'MemBarXXX' diff -r 16b10327b00d -r 90d6c221d4e5 src/share/vm/c1/c1_GraphBuilder.cpp --- a/src/share/vm/c1/c1_GraphBuilder.cpp Tue Jul 16 10:55:48 2013 -0400 +++ b/src/share/vm/c1/c1_GraphBuilder.cpp Tue Jul 16 12:20:08 2013 -0400 @@ -1,5 +1,5 @@ /* - * Copyright (c) 1999, 2012, Oracle and/or its affiliates. All rights reserved. + * Copyright (c) 1999, 2013, Oracle and/or its affiliates. All rights reserved. * DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER. * * This code is free software; you can redistribute it and/or modify it @@ -3461,6 +3461,14 @@ preserves_state = true; break; + case vmIntrinsics::_updateCRC32: + case vmIntrinsics::_updateBytesCRC32: + case vmIntrinsics::_updateByteBufferCRC32: + if (!UseCRC32Intrinsics) return false; + cantrap = false; + preserves_state = true; + break; + case vmIntrinsics::_loadFence : case vmIntrinsics::_storeFence: case vmIntrinsics::_fullFence : diff -r 16b10327b00d -r 90d6c221d4e5 src/share/vm/c1/c1_IR.cpp --- a/src/share/vm/c1/c1_IR.cpp Tue Jul 16 10:55:48 2013 -0400 +++ b/src/share/vm/c1/c1_IR.cpp Tue Jul 16 12:20:08 2013 -0400 @@ -506,7 +506,7 @@ _loop_map(0, 0), // initialized later with correct size _compilation(c) { - TRACE_LINEAR_SCAN(2, "***** computing linear-scan block order"); + TRACE_LINEAR_SCAN(2, tty->print_cr("***** computing linear-scan block order")); init_visited(); count_edges(start_block, NULL); @@ -683,7 +683,7 @@ } void ComputeLinearScanOrder::assign_loop_depth(BlockBegin* start_block) { - TRACE_LINEAR_SCAN(3, "----- computing loop-depth and weight"); + TRACE_LINEAR_SCAN(3, tty->print_cr("----- computing loop-depth and weight")); init_visited(); assert(_work_list.is_empty(), "work list must be empty before processing"); @@ -868,7 +868,7 @@ } void ComputeLinearScanOrder::compute_order(BlockBegin* start_block) { - TRACE_LINEAR_SCAN(3, "----- computing final block order"); + TRACE_LINEAR_SCAN(3, tty->print_cr("----- computing final block order")); // the start block is always the first block in the linear scan order _linear_scan_order = new BlockList(_num_blocks); diff -r 16b10327b00d -r 90d6c221d4e5 src/share/vm/c1/c1_LIR.cpp --- a/src/share/vm/c1/c1_LIR.cpp Tue Jul 16 10:55:48 2013 -0400 +++ b/src/share/vm/c1/c1_LIR.cpp Tue Jul 16 12:20:08 2013 -0400 @@ -201,23 +201,24 @@ #ifdef ASSERT if (!is_pointer() && !is_illegal()) { + OprKind kindfield = kind_field(); // Factored out because of compiler bug, see 8002160 switch (as_BasicType(type_field())) { case T_LONG: - assert((kind_field() == cpu_register || kind_field() == stack_value) && + assert((kindfield == cpu_register || kindfield == stack_value) && size_field() == double_size, "must match"); break; case T_FLOAT: // FP return values can be also in CPU registers on ARM and PPC (softfp ABI) - assert((kind_field() == fpu_register || kind_field() == stack_value - ARM_ONLY(|| kind_field() == cpu_register) - PPC_ONLY(|| kind_field() == cpu_register) ) && + assert((kindfield == fpu_register || kindfield == stack_value + ARM_ONLY(|| kindfield == cpu_register) + PPC_ONLY(|| kindfield == cpu_register) ) && size_field() == single_size, "must match"); break; case T_DOUBLE: // FP return values can be also in CPU registers on ARM and PPC (softfp ABI) - assert((kind_field() == fpu_register || kind_field() == stack_value - ARM_ONLY(|| kind_field() == cpu_register) - PPC_ONLY(|| kind_field() == cpu_register) ) && + assert((kindfield == fpu_register || kindfield == stack_value + ARM_ONLY(|| kindfield == cpu_register) + PPC_ONLY(|| kindfield == cpu_register) ) && size_field() == double_size, "must match"); break; case T_BOOLEAN: @@ -229,7 +230,7 @@ case T_OBJECT: case T_METADATA: case T_ARRAY: - assert((kind_field() == cpu_register || kind_field() == stack_value) && + assert((kindfield == cpu_register || kindfield == stack_value) && size_field() == single_size, "must match"); break; @@ -429,6 +430,11 @@ _stub = new ArrayCopyStub(this); } +LIR_OpUpdateCRC32::LIR_OpUpdateCRC32(LIR_Opr crc, LIR_Opr val, LIR_Opr res) + : LIR_Op(lir_updatecrc32, res, NULL) + , _crc(crc) + , _val(val) { +} //-------------------verify-------------------------- @@ -875,6 +881,20 @@ } +// LIR_OpUpdateCRC32 + case lir_updatecrc32: { + assert(op->as_OpUpdateCRC32() != NULL, "must be"); + LIR_OpUpdateCRC32* opUp = (LIR_OpUpdateCRC32*)op; + + assert(opUp->_crc->is_valid(), "used"); do_input(opUp->_crc); do_temp(opUp->_crc); + assert(opUp->_val->is_valid(), "used"); do_input(opUp->_val); do_temp(opUp->_val); + assert(opUp->_result->is_valid(), "used"); do_output(opUp->_result); + assert(opUp->_info == NULL, "no info for LIR_OpUpdateCRC32"); + + break; + } + + // LIR_OpLock case lir_lock: case lir_unlock: { @@ -1055,6 +1075,10 @@ masm->emit_code_stub(stub()); } +void LIR_OpUpdateCRC32::emit_code(LIR_Assembler* masm) { + masm->emit_updatecrc32(this); +} + void LIR_Op0::emit_code(LIR_Assembler* masm) { masm->emit_op0(this); } @@ -1762,6 +1786,8 @@ case lir_dynamic_call: s = "dynamic"; break; // LIR_OpArrayCopy case lir_arraycopy: s = "arraycopy"; break; + // LIR_OpUpdateCRC32 + case lir_updatecrc32: s = "updatecrc32"; break; // LIR_OpLock case lir_lock: s = "lock"; break; case lir_unlock: s = "unlock"; break; @@ -1814,6 +1840,13 @@ tmp()->print(out); out->print(" "); } +// LIR_OpUpdateCRC32 +void LIR_OpUpdateCRC32::print_instr(outputStream* out) const { + crc()->print(out); out->print(" "); + val()->print(out); out->print(" "); + result_opr()->print(out); out->print(" "); +} + // LIR_OpCompareAndSwap void LIR_OpCompareAndSwap::print_instr(outputStream* out) const { addr()->print(out); out->print(" "); diff -r 16b10327b00d -r 90d6c221d4e5 src/share/vm/c1/c1_LIR.hpp --- a/src/share/vm/c1/c1_LIR.hpp Tue Jul 16 10:55:48 2013 -0400 +++ b/src/share/vm/c1/c1_LIR.hpp Tue Jul 16 12:20:08 2013 -0400 @@ -1,5 +1,5 @@ /* - * Copyright (c) 2000, 2012, Oracle and/or its affiliates. All rights reserved. + * Copyright (c) 2000, 2013, Oracle and/or its affiliates. All rights reserved. * DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER. * * This code is free software; you can redistribute it and/or modify it @@ -877,6 +877,7 @@ class LIR_OpJavaCall; class LIR_OpRTCall; class LIR_OpArrayCopy; +class LIR_OpUpdateCRC32; class LIR_OpLock; class LIR_OpTypeCheck; class LIR_OpCompareAndSwap; @@ -982,6 +983,9 @@ , begin_opArrayCopy , lir_arraycopy , end_opArrayCopy + , begin_opUpdateCRC32 + , lir_updatecrc32 + , end_opUpdateCRC32 , begin_opLock , lir_lock , lir_unlock @@ -1137,6 +1141,7 @@ virtual LIR_Op2* as_Op2() { return NULL; } virtual LIR_Op3* as_Op3() { return NULL; } virtual LIR_OpArrayCopy* as_OpArrayCopy() { return NULL; } + virtual LIR_OpUpdateCRC32* as_OpUpdateCRC32() { return NULL; } virtual LIR_OpTypeCheck* as_OpTypeCheck() { return NULL; } virtual LIR_OpCompareAndSwap* as_OpCompareAndSwap() { return NULL; } virtual LIR_OpProfileCall* as_OpProfileCall() { return NULL; } @@ -1293,6 +1298,25 @@ void print_instr(outputStream* out) const PRODUCT_RETURN; }; +// LIR_OpUpdateCRC32 +class LIR_OpUpdateCRC32: public LIR_Op { + friend class LIR_OpVisitState; + +private: + LIR_Opr _crc; + LIR_Opr _val; + +public: + + LIR_OpUpdateCRC32(LIR_Opr crc, LIR_Opr val, LIR_Opr res); + + LIR_Opr crc() const { return _crc; } + LIR_Opr val() const { return _val; } + + virtual void emit_code(LIR_Assembler* masm); + virtual LIR_OpUpdateCRC32* as_OpUpdateCRC32() { return this; } + void print_instr(outputStream* out) const PRODUCT_RETURN; +}; // -------------------------------------------------- // LIR_Op0 @@ -2212,6 +2236,8 @@ void arraycopy(LIR_Opr src, LIR_Opr src_pos, LIR_Opr dst, LIR_Opr dst_pos, LIR_Opr length, LIR_Opr tmp, ciArrayKlass* expected_type, int flags, CodeEmitInfo* info) { append(new LIR_OpArrayCopy(src, src_pos, dst, dst_pos, length, tmp, expected_type, flags, info)); } + void update_crc32(LIR_Opr crc, LIR_Opr val, LIR_Opr res) { append(new LIR_OpUpdateCRC32(crc, val, res)); } + void fpop_raw() { append(new LIR_Op0(lir_fpop_raw)); } void instanceof(LIR_Opr result, LIR_Opr object, ciKlass* klass, LIR_Opr tmp1, LIR_Opr tmp2, LIR_Opr tmp3, bool fast_check, CodeEmitInfo* info_for_patch, ciMethod* profiled_method, int profiled_bci); diff -r 16b10327b00d -r 90d6c221d4e5 src/share/vm/c1/c1_LIRAssembler.hpp --- a/src/share/vm/c1/c1_LIRAssembler.hpp Tue Jul 16 10:55:48 2013 -0400 +++ b/src/share/vm/c1/c1_LIRAssembler.hpp Tue Jul 16 12:20:08 2013 -0400 @@ -1,5 +1,5 @@ /* - * Copyright (c) 2000, 2012, Oracle and/or its affiliates. All rights reserved. + * Copyright (c) 2000, 2013, Oracle and/or its affiliates. All rights reserved. * DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER. * * This code is free software; you can redistribute it and/or modify it @@ -195,6 +195,7 @@ void emit_opBranch(LIR_OpBranch* op); void emit_opLabel(LIR_OpLabel* op); void emit_arraycopy(LIR_OpArrayCopy* op); + void emit_updatecrc32(LIR_OpUpdateCRC32* op); void emit_opConvert(LIR_OpConvert* op); void emit_alloc_obj(LIR_OpAllocObj* op); void emit_alloc_array(LIR_OpAllocArray* op); diff -r 16b10327b00d -r 90d6c221d4e5 src/share/vm/c1/c1_LIRGenerator.cpp --- a/src/share/vm/c1/c1_LIRGenerator.cpp Tue Jul 16 10:55:48 2013 -0400 +++ b/src/share/vm/c1/c1_LIRGenerator.cpp Tue Jul 16 12:20:08 2013 -0400 @@ -2994,6 +2994,12 @@ do_Reference_get(x); break; + case vmIntrinsics::_updateCRC32: + case vmIntrinsics::_updateBytesCRC32: + case vmIntrinsics::_updateByteBufferCRC32: + do_update_CRC32(x); + break; + default: ShouldNotReachHere(); break; } } diff -r 16b10327b00d -r 90d6c221d4e5 src/share/vm/c1/c1_LIRGenerator.hpp --- a/src/share/vm/c1/c1_LIRGenerator.hpp Tue Jul 16 10:55:48 2013 -0400 +++ b/src/share/vm/c1/c1_LIRGenerator.hpp Tue Jul 16 12:20:08 2013 -0400 @@ -1,5 +1,5 @@ /* - * Copyright (c) 2005, 2012, Oracle and/or its affiliates. All rights reserved. + * Copyright (c) 2005, 2013, Oracle and/or its affiliates. All rights reserved. * DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER. * * This code is free software; you can redistribute it and/or modify it @@ -247,6 +247,7 @@ void do_NIOCheckIndex(Intrinsic* x); void do_FPIntrinsics(Intrinsic* x); void do_Reference_get(Intrinsic* x); + void do_update_CRC32(Intrinsic* x); void do_UnsafePrefetch(UnsafePrefetch* x, bool is_store); diff -r 16b10327b00d -r 90d6c221d4e5 src/share/vm/c1/c1_Runtime1.cpp --- a/src/share/vm/c1/c1_Runtime1.cpp Tue Jul 16 10:55:48 2013 -0400 +++ b/src/share/vm/c1/c1_Runtime1.cpp Tue Jul 16 12:20:08 2013 -0400 @@ -299,6 +299,7 @@ #ifdef TRACE_HAVE_INTRINSICS FUNCTION_CASE(entry, TRACE_TIME_METHOD); #endif + FUNCTION_CASE(entry, StubRoutines::updateBytesCRC32()); #undef FUNCTION_CASE diff -r 16b10327b00d -r 90d6c221d4e5 src/share/vm/ci/ciObjectFactory.cpp --- a/src/share/vm/ci/ciObjectFactory.cpp Tue Jul 16 10:55:48 2013 -0400 +++ b/src/share/vm/ci/ciObjectFactory.cpp Tue Jul 16 12:20:08 2013 -0400 @@ -265,8 +265,6 @@ ciMetadata* ciObjectFactory::get_metadata(Metadata* key) { ASSERT_IN_VM; - assert(key == NULL || key->is_metadata(), "must be"); - #ifdef ASSERT if (CIObjectFactoryVerify) { Metadata* last = NULL; diff -r 16b10327b00d -r 90d6c221d4e5 src/share/vm/ci/ciUtilities.hpp --- a/src/share/vm/ci/ciUtilities.hpp Tue Jul 16 10:55:48 2013 -0400 +++ b/src/share/vm/ci/ciUtilities.hpp Tue Jul 16 12:20:08 2013 -0400 @@ -96,7 +96,7 @@ CLEAR_PENDING_EXCEPTION; \ return (result); \ } \ - (0 + (void)(0 #define KILL_COMPILE_ON_ANY \ THREAD); \ @@ -104,7 +104,7 @@ fatal("unhandled ci exception"); \ CLEAR_PENDING_EXCEPTION; \ } \ -(0 +(void)(0 inline const char* bool_to_str(bool b) { diff -r 16b10327b00d -r 90d6c221d4e5 src/share/vm/classfile/classFileParser.cpp --- a/src/share/vm/classfile/classFileParser.cpp Tue Jul 16 10:55:48 2013 -0400 +++ b/src/share/vm/classfile/classFileParser.cpp Tue Jul 16 12:20:08 2013 -0400 @@ -39,6 +39,7 @@ #include "memory/gcLocker.hpp" #include "memory/metadataFactory.hpp" #include "memory/oopFactory.hpp" +#include "memory/referenceType.hpp" #include "memory/universe.inline.hpp" #include "oops/constantPool.hpp" #include "oops/fieldStreams.hpp" diff -r 16b10327b00d -r 90d6c221d4e5 src/share/vm/classfile/classLoaderData.cpp --- a/src/share/vm/classfile/classLoaderData.cpp Tue Jul 16 10:55:48 2013 -0400 +++ b/src/share/vm/classfile/classLoaderData.cpp Tue Jul 16 12:20:08 2013 -0400 @@ -64,6 +64,11 @@ #include "utilities/growableArray.hpp" #include "utilities/ostream.hpp" +#if INCLUDE_TRACE + #include "trace/tracing.hpp" +#endif + + ClassLoaderData * ClassLoaderData::_the_null_class_loader_data = NULL; ClassLoaderData::ClassLoaderData(Handle h_class_loader, bool is_anonymous, Dependencies dependencies) : @@ -120,6 +125,12 @@ } } +void ClassLoaderData::classes_do(void f(Klass * const)) { + for (Klass* k = _klasses; k != NULL; k = k->next_link()) { + f(k); + } +} + void ClassLoaderData::classes_do(void f(InstanceKlass*)) { for (Klass* k = _klasses; k != NULL; k = k->next_link()) { if (k->oop_is_instance()) { @@ -583,6 +594,19 @@ } } +void ClassLoaderDataGraph::classes_do(void f(Klass* const)) { + for (ClassLoaderData* cld = _head; cld != NULL; cld = cld->next()) { + cld->classes_do(f); + } +} + +void ClassLoaderDataGraph::classes_unloading_do(void f(Klass* const)) { + assert(SafepointSynchronize::is_at_safepoint(), "must be at safepoint!"); + for (ClassLoaderData* cld = _unloading; cld != NULL; cld = cld->next()) { + cld->classes_do(f); + } +} + GrowableArray* ClassLoaderDataGraph::new_clds() { assert(_head == NULL || _saved_head != NULL, "remember_new_clds(true) not called?"); @@ -687,6 +711,11 @@ dead->set_next(_unloading); _unloading = dead; } + + if (seen_dead_loader) { + post_class_unload_events(); + } + return seen_dead_loader; } @@ -702,6 +731,20 @@ Metaspace::purge(); } +void ClassLoaderDataGraph::post_class_unload_events(void) { +#if INCLUDE_TRACE + assert(SafepointSynchronize::is_at_safepoint(), "must be at safepoint!"); + if (Tracing::enabled()) { + if (Tracing::is_event_enabled(TraceClassUnloadEvent)) { + assert(_unloading != NULL, "need class loader data unload list!"); + _class_unload_time = Tracing::time(); + classes_unloading_do(&class_unload_event); + } + Tracing::on_unloading_classes(); + } +#endif +} + // CDS support // Global metaspaces for writing information to the shared archive. When @@ -769,3 +812,21 @@ class_loader()->print_value_on(out); } } + +#if INCLUDE_TRACE + +TracingTime ClassLoaderDataGraph::_class_unload_time; + +void ClassLoaderDataGraph::class_unload_event(Klass* const k) { + + // post class unload event + EventClassUnload event(UNTIMED); + event.set_endtime(_class_unload_time); + event.set_unloadedClass(k); + oop defining_class_loader = k->class_loader(); + event.set_definingClassLoader(defining_class_loader != NULL ? + defining_class_loader->klass() : (Klass*)NULL); + event.commit(); +} + +#endif /* INCLUDE_TRACE */ diff -r 16b10327b00d -r 90d6c221d4e5 src/share/vm/classfile/classLoaderData.hpp --- a/src/share/vm/classfile/classLoaderData.hpp Tue Jul 16 10:55:48 2013 -0400 +++ b/src/share/vm/classfile/classLoaderData.hpp Tue Jul 16 12:20:08 2013 -0400 @@ -32,6 +32,10 @@ #include "runtime/mutex.hpp" #include "utilities/growableArray.hpp" +#if INCLUDE_TRACE +# include "trace/traceTime.hpp" +#endif + // // A class loader represents a linkset. Conceptually, a linkset identifies // the complete transitive closure of resolved links that a dynamic linker can @@ -49,6 +53,7 @@ class JNIMethodBlock; class JNIHandleBlock; class Metadebug; + // GC root for walking class loader data created class ClassLoaderDataGraph : public AllStatic { @@ -63,6 +68,7 @@ static ClassLoaderData* _saved_head; static ClassLoaderData* add(Handle class_loader, bool anonymous, TRAPS); + static void post_class_unload_events(void); public: static ClassLoaderData* find_or_create(Handle class_loader, TRAPS); static void purge(); @@ -71,6 +77,8 @@ static void always_strong_oops_do(OopClosure* blk, KlassClosure* klass_closure, bool must_claim); static void keep_alive_oops_do(OopClosure* blk, KlassClosure* klass_closure, bool must_claim); static void classes_do(KlassClosure* klass_closure); + static void classes_do(void f(Klass* const)); + static void classes_unloading_do(void f(Klass* const)); static bool do_unloading(BoolObjectClosure* is_alive); // CMS support. @@ -86,6 +94,12 @@ static bool contains(address x); static bool contains_loader_data(ClassLoaderData* loader_data); #endif + +#if INCLUDE_TRACE + private: + static TracingTime _class_unload_time; + static void class_unload_event(Klass* const k); +#endif }; // ClassLoaderData class @@ -171,7 +185,7 @@ void unload(); bool keep_alive() const { return _keep_alive; } bool is_alive(BoolObjectClosure* is_alive_closure) const; - + void classes_do(void f(Klass*)); void classes_do(void f(InstanceKlass*)); // Deallocate free list during class unloading. diff -r 16b10327b00d -r 90d6c221d4e5 src/share/vm/classfile/defaultMethods.cpp --- a/src/share/vm/classfile/defaultMethods.cpp Tue Jul 16 10:55:48 2013 -0400 +++ b/src/share/vm/classfile/defaultMethods.cpp Tue Jul 16 12:20:08 2013 -0400 @@ -318,17 +318,17 @@ } }; + // A method family contains a set of all methods that implement a single -// language-level method. Because of erasure, these methods may have different -// signatures. As members of the set are collected while walking over the +// erased method. As members of the set are collected while walking over the // hierarchy, they are tagged with a qualification state. The qualification // state for an erased method is set to disqualified if there exists a path // from the root of hierarchy to the method that contains an interleaving -// language-equivalent method defined in an interface. +// erased method defined in an interface. + class MethodFamily : public ResourceObj { private: - generic::MethodDescriptor* _descriptor; // language-level description GrowableArray > _members; ResourceHashtable _member_index; @@ -358,15 +358,8 @@ public: - MethodFamily(generic::MethodDescriptor* canonical_desc) - : _descriptor(canonical_desc), _selected_target(NULL), - _exception_message(NULL) {} - - generic::MethodDescriptor* descriptor() const { return _descriptor; } - - bool descriptor_matches(generic::MethodDescriptor* md, generic::Context* ctx) { - return descriptor()->covariant_match(md, ctx); - } + MethodFamily() + : _selected_target(NULL), _exception_message(NULL) {} void set_target_if_empty(Method* m) { if (_selected_target == NULL && !m->is_overpass()) { @@ -441,16 +434,10 @@ } #ifndef PRODUCT - void print_on(outputStream* str) const { - print_on(str, 0); - } - - void print_on(outputStream* str, int indent) const { + void print_sig_on(outputStream* str, Symbol* signature, int indent) const { streamIndentor si(str, indent * 2); - generic::Context ctx(NULL); // empty, as _descriptor already canonicalized - TempNewSymbol family = descriptor()->reify_signature(&ctx, Thread::current()); - str->indent().print_cr("Logical Method %s:", family->as_C_string()); + str->indent().print_cr("Logical Method %s:", signature->as_C_string()); streamIndentor si2(str); for (int i = 0; i < _members.length(); ++i) { @@ -516,36 +503,92 @@ return SymbolTable::new_symbol(ss.base(), (int)ss.size(), CHECK_NULL); } +// A generic method family contains a set of all methods that implement a single +// language-level method. Because of erasure, these methods may have different +// signatures. As members of the set are collected while walking over the +// hierarchy, they are tagged with a qualification state. The qualification +// state for an erased method is set to disqualified if there exists a path +// from the root of hierarchy to the method that contains an interleaving +// language-equivalent method defined in an interface. +class GenericMethodFamily : public MethodFamily { + private: + + generic::MethodDescriptor* _descriptor; // language-level description + + public: + + GenericMethodFamily(generic::MethodDescriptor* canonical_desc) + : _descriptor(canonical_desc) {} + + generic::MethodDescriptor* descriptor() const { return _descriptor; } + + bool descriptor_matches(generic::MethodDescriptor* md, generic::Context* ctx) { + return descriptor()->covariant_match(md, ctx); + } + +#ifndef PRODUCT + Symbol* get_generic_sig() const { + + generic::Context ctx(NULL); // empty, as _descriptor already canonicalized + TempNewSymbol sig = descriptor()->reify_signature(&ctx, Thread::current()); + return sig; + } +#endif // ndef PRODUCT +}; + class StateRestorer; -// StatefulMethodFamily is a wrapper around MethodFamily that maintains the +// StatefulMethodFamily is a wrapper around a MethodFamily that maintains the // qualification state during hierarchy visitation, and applies that state -// when adding members to the MethodFamily. +// when adding members to the MethodFamily class StatefulMethodFamily : public ResourceObj { friend class StateRestorer; private: - MethodFamily* _method; QualifiedState _qualification_state; void set_qualification_state(QualifiedState state) { _qualification_state = state; } + protected: + MethodFamily* _method_family; + public: - StatefulMethodFamily(generic::MethodDescriptor* md, generic::Context* ctx) { - _method = new MethodFamily(md->canonicalize(ctx)); - _qualification_state = QUALIFIED; + StatefulMethodFamily() { + _method_family = new MethodFamily(); + _qualification_state = QUALIFIED; + } + + StatefulMethodFamily(MethodFamily* mf) { + _method_family = mf; + _qualification_state = QUALIFIED; } - void set_target_if_empty(Method* m) { _method->set_target_if_empty(m); } + void set_target_if_empty(Method* m) { _method_family->set_target_if_empty(m); } + + MethodFamily* get_method_family() { return _method_family; } + + StateRestorer* record_method_and_dq_further(Method* mo); +}; + - MethodFamily* get_method_family() { return _method; } +// StatefulGenericMethodFamily is a wrapper around GenericMethodFamily that maintains the +// qualification state during hierarchy visitation, and applies that state +// when adding members to the GenericMethodFamily. +class StatefulGenericMethodFamily : public StatefulMethodFamily { + + public: + StatefulGenericMethodFamily(generic::MethodDescriptor* md, generic::Context* ctx) + : StatefulMethodFamily(new GenericMethodFamily(md->canonicalize(ctx))) { + + } + GenericMethodFamily* get_method_family() { + return (GenericMethodFamily*)_method_family; + } bool descriptor_matches(generic::MethodDescriptor* md, generic::Context* ctx) { - return _method->descriptor_matches(md, ctx); + return get_method_family()->descriptor_matches(md, ctx); } - - StateRestorer* record_method_and_dq_further(Method* mo); }; class StateRestorer : public PseudoScopeMark { @@ -563,9 +606,9 @@ StateRestorer* StatefulMethodFamily::record_method_and_dq_further(Method* mo) { StateRestorer* mark = new StateRestorer(this, _qualification_state); if (_qualification_state == QUALIFIED) { - _method->record_qualified_method(mo); + _method_family->record_qualified_method(mo); } else { - _method->record_disqualified_method(mo); + _method_family->record_disqualified_method(mo); } // Everything found "above"??? this method in the hierarchy walk is set to // disqualified @@ -573,15 +616,15 @@ return mark; } -class StatefulMethodFamilies : public ResourceObj { +class StatefulGenericMethodFamilies : public ResourceObj { private: - GrowableArray _methods; + GrowableArray _methods; public: - StatefulMethodFamily* find_matching( + StatefulGenericMethodFamily* find_matching( generic::MethodDescriptor* md, generic::Context* ctx) { for (int i = 0; i < _methods.length(); ++i) { - StatefulMethodFamily* existing = _methods.at(i); + StatefulGenericMethodFamily* existing = _methods.at(i); if (existing->descriptor_matches(md, ctx)) { return existing; } @@ -589,17 +632,17 @@ return NULL; } - StatefulMethodFamily* find_matching_or_create( + StatefulGenericMethodFamily* find_matching_or_create( generic::MethodDescriptor* md, generic::Context* ctx) { - StatefulMethodFamily* method = find_matching(md, ctx); + StatefulGenericMethodFamily* method = find_matching(md, ctx); if (method == NULL) { - method = new StatefulMethodFamily(md, ctx); + method = new StatefulGenericMethodFamily(md, ctx); _methods.append(method); } return method; } - void extract_families_into(GrowableArray* array) { + void extract_families_into(GrowableArray* array) { for (int i = 0; i < _methods.length(); ++i) { array->append(_methods.at(i)->get_method_family()); } @@ -683,26 +726,79 @@ return slots; } +// Iterates over the superinterface type hierarchy looking for all methods +// with a specific erased signature. +class FindMethodsByErasedSig : public HierarchyVisitor { + private: + // Context data + Symbol* _method_name; + Symbol* _method_signature; + StatefulMethodFamily* _family; + + public: + FindMethodsByErasedSig(Symbol* name, Symbol* signature) : + _method_name(name), _method_signature(signature), + _family(NULL) {} + + void get_discovered_family(MethodFamily** family) { + if (_family != NULL) { + *family = _family->get_method_family(); + } else { + *family = NULL; + } + } + + void* new_node_data(InstanceKlass* cls) { return new PseudoScope(); } + void free_node_data(void* node_data) { + PseudoScope::cast(node_data)->destroy(); + } + + // Find all methods on this hierarchy that match this + // method's erased (name, signature) + bool visit() { + PseudoScope* scope = PseudoScope::cast(current_data()); + InstanceKlass* iklass = current_class(); + + Method* m = iklass->find_method(_method_name, _method_signature); + if (m != NULL) { + if (_family == NULL) { + _family = new StatefulMethodFamily(); + } + + if (iklass->is_interface()) { + StateRestorer* restorer = _family->record_method_and_dq_further(m); + scope->add_mark(restorer); + } else { + // This is the rule that methods in classes "win" (bad word) over + // methods in interfaces. This works because of single inheritance + _family->set_target_if_empty(m); + } + } + return true; + } + +}; + // Iterates over the type hierarchy looking for all methods with a specific // method name. The result of this is a set of method families each of // which is populated with a set of methods that implement the same // language-level signature. -class FindMethodsByName : public HierarchyVisitor { +class FindMethodsByGenericSig : public HierarchyVisitor { private: // Context data Thread* THREAD; generic::DescriptorCache* _cache; Symbol* _method_name; generic::Context* _ctx; - StatefulMethodFamilies _families; + StatefulGenericMethodFamilies _families; public: - FindMethodsByName(generic::DescriptorCache* cache, Symbol* name, + FindMethodsByGenericSig(generic::DescriptorCache* cache, Symbol* name, generic::Context* ctx, Thread* thread) : _cache(cache), _method_name(name), _ctx(ctx), THREAD(thread) {} - void get_discovered_families(GrowableArray* methods) { + void get_discovered_families(GrowableArray* methods) { _families.extract_families_into(methods); } @@ -733,7 +829,7 @@ // Find all methods on this hierarchy that match this method // (name, signature). This class collects other families of this // method name. - StatefulMethodFamily* family = + StatefulGenericMethodFamily* family = _families.find_matching_or_create(md, _ctx); if (klass->is_interface()) { @@ -752,8 +848,8 @@ }; #ifndef PRODUCT -static void print_families( - GrowableArray* methods, Symbol* match) { +static void print_generic_families( + GrowableArray* methods, Symbol* match) { streamIndentor si(tty, 4); if (methods->length() == 0) { tty->indent(); @@ -761,21 +857,86 @@ } for (int i = 0; i < methods->length(); ++i) { tty->indent(); - MethodFamily* lm = methods->at(i); + GenericMethodFamily* lm = methods->at(i); if (lm->contains_signature(match)) { tty->print_cr(""); } else { tty->print_cr(""); } - lm->print_on(tty, 1); + lm->print_sig_on(tty, lm->get_generic_sig(), 1); } } #endif // ndef PRODUCT +static void create_overpasses( + GrowableArray* slots, InstanceKlass* klass, TRAPS); + +static void generate_generic_defaults( + InstanceKlass* klass, GrowableArray* empty_slots, + EmptyVtableSlot* slot, int current_slot_index, TRAPS) { + + if (slot->is_bound()) { +#ifndef PRODUCT + if (TraceDefaultMethods) { + streamIndentor si(tty, 4); + tty->indent().print_cr("Already bound to logical method:"); + GenericMethodFamily* lm = (GenericMethodFamily*)(slot->get_binding()); + lm->print_sig_on(tty, lm->get_generic_sig(), 1); + } +#endif // ndef PRODUCT + return; // covered by previous processing + } + + generic::DescriptorCache cache; + + generic::Context ctx(&cache); + FindMethodsByGenericSig visitor(&cache, slot->name(), &ctx, CHECK); + visitor.run(klass); + + GrowableArray discovered_families; + visitor.get_discovered_families(&discovered_families); + +#ifndef PRODUCT + if (TraceDefaultMethods) { + print_generic_families(&discovered_families, slot->signature()); + } +#endif // ndef PRODUCT + + // Find and populate any other slots that match the discovered families + for (int j = current_slot_index; j < empty_slots->length(); ++j) { + EmptyVtableSlot* open_slot = empty_slots->at(j); + + if (slot->name() == open_slot->name()) { + for (int k = 0; k < discovered_families.length(); ++k) { + GenericMethodFamily* lm = discovered_families.at(k); + + if (lm->contains_signature(open_slot->signature())) { + lm->determine_target(klass, CHECK); + open_slot->bind_family(lm); + } + } + } + } +} + +static void generate_erased_defaults( + InstanceKlass* klass, GrowableArray* empty_slots, + EmptyVtableSlot* slot, TRAPS) { + + // sets up a set of methods with the same exact erased signature + FindMethodsByErasedSig visitor(slot->name(), slot->signature()); + visitor.run(klass); + + MethodFamily* family; + visitor.get_discovered_family(&family); + if (family != NULL) { + family->determine_target(klass, CHECK); + slot->bind_family(family); + } +} + static void merge_in_new_methods(InstanceKlass* klass, GrowableArray* new_methods, TRAPS); -static void create_overpasses( - GrowableArray* slots, InstanceKlass* klass, TRAPS); // This is the guts of the default methods implementation. This is called just // after the classfile has been parsed if some ancestor has default methods. @@ -807,8 +968,6 @@ // whatever scope it's in. ResourceMark rm(THREAD); - generic::DescriptorCache cache; - // Keep entire hierarchy alive for the duration of the computation KeepAliveRegistrar keepAlive(THREAD); KeepAliveVisitor loadKeepAlive(&keepAlive); @@ -837,47 +996,13 @@ tty->print_cr(""); } #endif // ndef PRODUCT - if (slot->is_bound()) { -#ifndef PRODUCT - if (TraceDefaultMethods) { - streamIndentor si(tty, 4); - tty->indent().print_cr("Already bound to logical method:"); - slot->get_binding()->print_on(tty, 1); - } -#endif // ndef PRODUCT - continue; // covered by previous processing + + if (ParseGenericDefaults) { + generate_generic_defaults(klass, empty_slots, slot, i, CHECK); + } else { + generate_erased_defaults(klass, empty_slots, slot, CHECK); } - - generic::Context ctx(&cache); - FindMethodsByName visitor(&cache, slot->name(), &ctx, CHECK); - visitor.run(klass); - - GrowableArray discovered_families; - visitor.get_discovered_families(&discovered_families); - -#ifndef PRODUCT - if (TraceDefaultMethods) { - print_families(&discovered_families, slot->signature()); - } -#endif // ndef PRODUCT - - // Find and populate any other slots that match the discovered families - for (int j = i; j < empty_slots->length(); ++j) { - EmptyVtableSlot* open_slot = empty_slots->at(j); - - if (slot->name() == open_slot->name()) { - for (int k = 0; k < discovered_families.length(); ++k) { - MethodFamily* lm = discovered_families.at(k); - - if (lm->contains_signature(open_slot->signature())) { - lm->determine_target(klass, CHECK); - open_slot->bind_family(lm); - } - } - } - } - } - + } #ifndef PRODUCT if (TraceDefaultMethods) { tty->print_cr("Creating overpasses..."); @@ -893,7 +1018,6 @@ #endif // ndef PRODUCT } - /** * Generic analysis was used upon interface '_target' and found a unique * default method candidate with generic signature '_method_desc'. This @@ -912,16 +1036,84 @@ * the selected method along that path. */ class ShadowChecker : public HierarchyVisitor { - private: - generic::DescriptorCache* _cache; + protected: Thread* THREAD; InstanceKlass* _target; Symbol* _method_name; InstanceKlass* _method_holder; + bool _found_shadow; + + + public: + + ShadowChecker(Thread* thread, Symbol* name, InstanceKlass* holder, + InstanceKlass* target) + : THREAD(thread), _method_name(name), _method_holder(holder), + _target(target), _found_shadow(false) {} + + void* new_node_data(InstanceKlass* cls) { return NULL; } + void free_node_data(void* data) { return; } + + bool visit() { + InstanceKlass* ik = current_class(); + if (ik == _target && current_depth() == 1) { + return false; // This was the specified super -- no need to search it + } + if (ik == _method_holder || ik == _target) { + // We found a path that should be examined to see if it shadows _method + if (path_has_shadow()) { + _found_shadow = true; + cancel_iteration(); + } + return false; // no need to continue up hierarchy + } + return true; + } + + virtual bool path_has_shadow() = 0; + bool found_shadow() { return _found_shadow; } +}; + +// Used for Invokespecial. +// Invokespecial is allowed to invoke a concrete interface method +// and can be used to disambuiguate among qualified candidates, +// which are methods in immediate superinterfaces, +// but may not be used to invoke a candidate that would be shadowed +// from the perspective of the caller. +// Invokespecial is also used in the overpass generation today +// We re-run the shadowchecker because we can't distinguish this case, +// but it should return the same answer, since the overpass target +// is now the invokespecial caller. +class ErasedShadowChecker : public ShadowChecker { + private: + bool path_has_shadow() { + + for (int i = current_depth() - 1; i > 0; --i) { + InstanceKlass* ik = class_at_depth(i); + + if (ik->is_interface()) { + int end; + int start = ik->find_method_by_name(_method_name, &end); + if (start != -1) { + return true; + } + } + } + return false; + } + public: + + ErasedShadowChecker(Thread* thread, Symbol* name, InstanceKlass* holder, + InstanceKlass* target) + : ShadowChecker(thread, name, holder, target) {} +}; + +class GenericShadowChecker : public ShadowChecker { + private: + generic::DescriptorCache* _cache; generic::MethodDescriptor* _method_desc; - bool _found_shadow; bool path_has_shadow() { generic::Context ctx(_cache); @@ -950,104 +1142,42 @@ public: - ShadowChecker(generic::DescriptorCache* cache, Thread* thread, + GenericShadowChecker(generic::DescriptorCache* cache, Thread* thread, Symbol* name, InstanceKlass* holder, generic::MethodDescriptor* desc, InstanceKlass* target) - : _cache(cache), THREAD(thread), _method_name(name), _method_holder(holder), - _method_desc(desc), _target(target), _found_shadow(false) {} - - void* new_node_data(InstanceKlass* cls) { return NULL; } - void free_node_data(void* data) { return; } - - bool visit() { - InstanceKlass* ik = current_class(); - if (ik == _target && current_depth() == 1) { - return false; // This was the specified super -- no need to search it - } - if (ik == _method_holder || ik == _target) { - // We found a path that should be examined to see if it shadows _method - if (path_has_shadow()) { - _found_shadow = true; - cancel_iteration(); - } - return false; // no need to continue up hierarchy - } - return true; - } - - bool found_shadow() { return _found_shadow; } + : ShadowChecker(thread, name, holder, target) { + _cache = cache; + _method_desc = desc; + } }; -// This is called during linktime when we find an invokespecial call that -// refers to a direct superinterface. It indicates that we should find the -// default method in the hierarchy of that superinterface, and if that method -// would have been a candidate from the point of view of 'this' class, then we -// return that method. -Method* DefaultMethods::find_super_default( - Klass* cls, Klass* super, Symbol* method_name, Symbol* sig, TRAPS) { + - ResourceMark rm(THREAD); - - assert(cls != NULL && super != NULL, "Need real classes"); - - InstanceKlass* current_class = InstanceKlass::cast(cls); - InstanceKlass* direction = InstanceKlass::cast(super); +// Find the unique qualified candidate from the perspective of the super_class +// which is the resolved_klass, which must be an immediate superinterface +// of klass +Method* find_erased_super_default(InstanceKlass* current_class, InstanceKlass* super_class, Symbol* method_name, Symbol* sig, TRAPS) { - // Keep entire hierarchy alive for the duration of the computation - KeepAliveRegistrar keepAlive(THREAD); - KeepAliveVisitor loadKeepAlive(&keepAlive); - loadKeepAlive.run(current_class); + FindMethodsByErasedSig visitor(method_name, sig); + visitor.run(super_class); // find candidates from resolved_klass -#ifndef PRODUCT - if (TraceDefaultMethods) { - tty->print_cr("Finding super default method %s.%s%s from %s", - direction->name()->as_C_string(), - method_name->as_C_string(), sig->as_C_string(), - current_class->name()->as_C_string()); - } -#endif // ndef PRODUCT + MethodFamily* family; + visitor.get_discovered_family(&family); - if (!direction->is_interface()) { - // We should not be here - return NULL; + if (family != NULL) { + family->determine_target(current_class, CHECK_NULL); // get target from current_class } - generic::DescriptorCache cache; - generic::Context ctx(&cache); - - // Prime the initial generic context for current -> direction - ctx.apply_type_arguments(current_class, direction, CHECK_NULL); - - FindMethodsByName visitor(&cache, method_name, &ctx, CHECK_NULL); - visitor.run(direction); - - GrowableArray families; - visitor.get_discovered_families(&families); - -#ifndef PRODUCT - if (TraceDefaultMethods) { - print_families(&families, sig); - } -#endif // ndef PRODUCT - - MethodFamily* selected_family = NULL; - - for (int i = 0; i < families.length(); ++i) { - MethodFamily* lm = families.at(i); - if (lm->contains_signature(sig)) { - lm->determine_target(current_class, CHECK_NULL); - selected_family = lm; - } - } - - if (selected_family->has_target()) { - Method* target = selected_family->get_selected_target(); + if (family->has_target()) { + Method* target = family->get_selected_target(); InstanceKlass* holder = InstanceKlass::cast(target->method_holder()); // Verify that the identified method is valid from the context of - // the current class - ShadowChecker checker(&cache, THREAD, target->name(), - holder, selected_family->descriptor(), direction); + // the current class, which is the caller class for invokespecial + // link resolution, i.e. ensure there it is not shadowed. + // You can use invokespecial to disambiguate interface methods, but + // you can not use it to skip over an interface method that would shadow it. + ErasedShadowChecker checker(THREAD, target->name(), holder, super_class); checker.run(current_class); if (checker.found_shadow()) { @@ -1061,20 +1191,143 @@ } else { #ifndef PRODUCT if (TraceDefaultMethods) { - tty->print(" Returning "); - print_method(tty, target, true); - tty->print_cr(""); + family->print_sig_on(tty, target->signature(), 1); } #endif // ndef PRODUCT return target; } } else { + assert(family->throws_exception(), "must have target or throw"); + THROW_MSG_(vmSymbols::java_lang_AbstractMethodError(), + family->get_exception_message()->as_C_string(), NULL); + } +} + +// super_class is assumed to be the direct super of current_class +Method* find_generic_super_default( InstanceKlass* current_class, + InstanceKlass* super_class, + Symbol* method_name, Symbol* sig, TRAPS) { + generic::DescriptorCache cache; + generic::Context ctx(&cache); + + // Prime the initial generic context for current -> super_class + ctx.apply_type_arguments(current_class, super_class, CHECK_NULL); + + FindMethodsByGenericSig visitor(&cache, method_name, &ctx, CHECK_NULL); + visitor.run(super_class); + + GrowableArray families; + visitor.get_discovered_families(&families); + +#ifndef PRODUCT + if (TraceDefaultMethods) { + print_generic_families(&families, sig); + } +#endif // ndef PRODUCT + + GenericMethodFamily* selected_family = NULL; + + for (int i = 0; i < families.length(); ++i) { + GenericMethodFamily* lm = families.at(i); + if (lm->contains_signature(sig)) { + lm->determine_target(current_class, CHECK_NULL); + selected_family = lm; + } + } + + if (selected_family->has_target()) { + Method* target = selected_family->get_selected_target(); + InstanceKlass* holder = InstanceKlass::cast(target->method_holder()); + + // Verify that the identified method is valid from the context of + // the current class + GenericShadowChecker checker(&cache, THREAD, target->name(), + holder, selected_family->descriptor(), super_class); + checker.run(current_class); + + if (checker.found_shadow()) { +#ifndef PRODUCT + if (TraceDefaultMethods) { + tty->print_cr(" Only candidate found was shadowed."); + } +#endif // ndef PRODUCT + THROW_MSG_(vmSymbols::java_lang_AbstractMethodError(), + "Accessible default method not found", NULL); + } else { + return target; + } + } else { assert(selected_family->throws_exception(), "must have target or throw"); THROW_MSG_(vmSymbols::java_lang_AbstractMethodError(), selected_family->get_exception_message()->as_C_string(), NULL); } } +// This is called during linktime when we find an invokespecial call that +// refers to a direct superinterface. It indicates that we should find the +// default method in the hierarchy of that superinterface, and if that method +// would have been a candidate from the point of view of 'this' class, then we +// return that method. +// This logic assumes that the super is a direct superclass of the caller +Method* DefaultMethods::find_super_default( + Klass* cls, Klass* super, Symbol* method_name, Symbol* sig, TRAPS) { + + ResourceMark rm(THREAD); + + assert(cls != NULL && super != NULL, "Need real classes"); + + InstanceKlass* current_class = InstanceKlass::cast(cls); + InstanceKlass* super_class = InstanceKlass::cast(super); + + // Keep entire hierarchy alive for the duration of the computation + KeepAliveRegistrar keepAlive(THREAD); + KeepAliveVisitor loadKeepAlive(&keepAlive); + loadKeepAlive.run(current_class); // get hierarchy from current class + +#ifndef PRODUCT + if (TraceDefaultMethods) { + tty->print_cr("Finding super default method %s.%s%s from %s", + super_class->name()->as_C_string(), + method_name->as_C_string(), sig->as_C_string(), + current_class->name()->as_C_string()); + } +#endif // ndef PRODUCT + + assert(super_class->is_interface(), "only call for default methods"); + + Method* target = NULL; + if (ParseGenericDefaults) { + target = find_generic_super_default(current_class, super_class, + method_name, sig, CHECK_NULL); + } else { + target = find_erased_super_default(current_class, super_class, + method_name, sig, CHECK_NULL); + } + +#ifndef PRODUCT + if (target != NULL) { + if (TraceDefaultMethods) { + tty->print(" Returning "); + print_method(tty, target, true); + tty->print_cr(""); + } + } +#endif // ndef PRODUCT + return target; +} + +#ifndef PRODUCT +// Return true is broad type is a covariant return of narrow type +static bool covariant_return_type(BasicType narrow, BasicType broad) { + if (narrow == broad) { + return true; + } + if (broad == T_OBJECT) { + return true; + } + return false; +} +#endif // ndef PRODUCT static int assemble_redirect( BytecodeConstantPool* cp, BytecodeBuffer* buffer, @@ -1103,7 +1356,7 @@ out.next(); } assert(out.at_return_type(), "Parameter counts do not match"); - assert(in.type() == out.type(), "Return types are not compatible"); + assert(covariant_return_type(out.type(), in.type()), "Return types are not compatible"); if (parameter_count == 1 && (in.type() == T_LONG || in.type() == T_DOUBLE)) { ++parameter_count; // need room for return value @@ -1144,10 +1397,15 @@ Symbol* sig, AccessFlags flags, int max_stack, int params, ConstMethod::MethodType mt, TRAPS) { - address code_start = static_cast
(bytecodes->adr_at(0)); - int code_length = bytecodes->length(); + address code_start = 0; + int code_length = 0; InlineTableSizes sizes; + if (bytecodes != NULL && bytecodes->length() > 0) { + code_start = static_cast
(bytecodes->adr_at(0)); + code_length = bytecodes->length(); + } + Method* m = Method::allocate(cp->pool_holder()->class_loader_data(), code_length, flags, &sizes, mt, CHECK_NULL); diff -r 16b10327b00d -r 90d6c221d4e5 src/share/vm/classfile/dictionary.cpp --- a/src/share/vm/classfile/dictionary.cpp Tue Jul 16 10:55:48 2013 -0400 +++ b/src/share/vm/classfile/dictionary.cpp Tue Jul 16 12:20:08 2013 -0400 @@ -555,7 +555,7 @@ loader_data->class_loader() == NULL || loader_data->class_loader()->is_instance(), "checking type of class_loader"); - e->verify(); + e->verify(/*check_dictionary*/false); probe->verify_protection_domain_set(); element_count++; } diff -r 16b10327b00d -r 90d6c221d4e5 src/share/vm/classfile/genericSignatures.cpp --- a/src/share/vm/classfile/genericSignatures.cpp Tue Jul 16 10:55:48 2013 -0400 +++ b/src/share/vm/classfile/genericSignatures.cpp Tue Jul 16 12:20:08 2013 -0400 @@ -124,7 +124,7 @@ fatal(STREAM->parse_error()); \ } \ return NULL; \ - } 0 + } (void)0 #define READ() STREAM->read(); CHECK_FOR_PARSE_ERROR() #define PEEK() STREAM->peek(); CHECK_FOR_PARSE_ERROR() @@ -133,7 +133,7 @@ #define EXPECTED(c, ch) STREAM->assert_char(c, ch); CHECK_FOR_PARSE_ERROR() #define EXPECT_END() STREAM->expect_end(); CHECK_FOR_PARSE_ERROR() -#define CHECK_STREAM STREAM); CHECK_FOR_PARSE_ERROR(); (0 +#define CHECK_STREAM STREAM); CHECK_FOR_PARSE_ERROR(); ((void)0 #ifndef PRODUCT void Identifier::print_on(outputStream* str) const { diff -r 16b10327b00d -r 90d6c221d4e5 src/share/vm/classfile/javaClasses.cpp --- a/src/share/vm/classfile/javaClasses.cpp Tue Jul 16 10:55:48 2013 -0400 +++ b/src/share/vm/classfile/javaClasses.cpp Tue Jul 16 12:20:08 2013 -0400 @@ -961,7 +961,7 @@ // Read thread status value from threadStatus field in java.lang.Thread java class. java_lang_Thread::ThreadStatus java_lang_Thread::get_thread_status(oop java_thread) { - assert(Thread::current()->is_VM_thread() || + assert(Thread::current()->is_Watcher_thread() || Thread::current()->is_VM_thread() || JavaThread::current()->thread_state() == _thread_in_vm, "Java Thread is not running in vm"); // The threadStatus is only present starting in 1.5 @@ -2825,6 +2825,7 @@ int java_security_AccessControlContext::_context_offset = 0; int java_security_AccessControlContext::_privilegedContext_offset = 0; int java_security_AccessControlContext::_isPrivileged_offset = 0; +int java_security_AccessControlContext::_isAuthorized_offset = -1; void java_security_AccessControlContext::compute_offsets() { assert(_isPrivileged_offset == 0, "offsets should be initialized only once"); @@ -2845,8 +2846,19 @@ fatal("Invalid layout of java.security.AccessControlContext"); } _isPrivileged_offset = fd.offset(); -} - + + // The offset may not be present for bootstrapping with older JDK. + if (ik->find_local_field(vmSymbols::isAuthorized_name(), vmSymbols::bool_signature(), &fd)) { + _isAuthorized_offset = fd.offset(); + } +} + + +bool java_security_AccessControlContext::is_authorized(Handle context) { + assert(context.not_null() && context->klass() == SystemDictionary::AccessControlContext_klass(), "Invalid type"); + assert(_isAuthorized_offset != -1, "should be set"); + return context->bool_field(_isAuthorized_offset) != 0; +} oop java_security_AccessControlContext::create(objArrayHandle context, bool isPrivileged, Handle privileged_context, TRAPS) { assert(_isPrivileged_offset != 0, "offsets should have been initialized"); @@ -2858,6 +2870,10 @@ result->obj_field_put(_context_offset, context()); result->obj_field_put(_privilegedContext_offset, privileged_context()); result->bool_field_put(_isPrivileged_offset, isPrivileged); + // whitelist AccessControlContexts created by the JVM if present + if (_isAuthorized_offset != -1) { + result->bool_field_put(_isAuthorized_offset, true); + } return result; } @@ -2967,6 +2983,15 @@ } +bool java_lang_System::has_security_manager() { + InstanceKlass* ik = InstanceKlass::cast(SystemDictionary::System_klass()); + address addr = ik->static_field_addr(static_security_offset); + if (UseCompressedOops) { + return oopDesc::load_decode_heap_oop((narrowOop *)addr) != NULL; + } else { + return oopDesc::load_decode_heap_oop((oop*)addr) != NULL; + } +} int java_lang_Class::_klass_offset; int java_lang_Class::_array_klass_offset; @@ -3030,6 +3055,7 @@ int java_lang_System::static_in_offset; int java_lang_System::static_out_offset; int java_lang_System::static_err_offset; +int java_lang_System::static_security_offset; int java_lang_StackTraceElement::declaringClass_offset; int java_lang_StackTraceElement::methodName_offset; int java_lang_StackTraceElement::fileName_offset; @@ -3155,6 +3181,7 @@ java_lang_System::static_in_offset = java_lang_System::hc_static_in_offset * x; java_lang_System::static_out_offset = java_lang_System::hc_static_out_offset * x; java_lang_System::static_err_offset = java_lang_System::hc_static_err_offset * x; + java_lang_System::static_security_offset = java_lang_System::hc_static_security_offset * x; // java_lang_StackTraceElement java_lang_StackTraceElement::declaringClass_offset = java_lang_StackTraceElement::hc_declaringClass_offset * x + header; @@ -3354,6 +3381,7 @@ CHECK_STATIC_OFFSET("java/lang/System", java_lang_System, in, "Ljava/io/InputStream;"); CHECK_STATIC_OFFSET("java/lang/System", java_lang_System, out, "Ljava/io/PrintStream;"); CHECK_STATIC_OFFSET("java/lang/System", java_lang_System, err, "Ljava/io/PrintStream;"); + CHECK_STATIC_OFFSET("java/lang/System", java_lang_System, security, "Ljava/lang/SecurityManager;"); // java.lang.StackTraceElement diff -r 16b10327b00d -r 90d6c221d4e5 src/share/vm/classfile/javaClasses.hpp --- a/src/share/vm/classfile/javaClasses.hpp Tue Jul 16 10:55:48 2013 -0400 +++ b/src/share/vm/classfile/javaClasses.hpp Tue Jul 16 12:20:08 2013 -0400 @@ -234,6 +234,7 @@ static GrowableArray* _fixup_mirror_list; static void set_init_lock(oop java_class, oop init_lock); + static void set_protection_domain(oop java_class, oop protection_domain); public: static void compute_offsets(); @@ -272,7 +273,6 @@ // Support for embedded per-class oops static oop protection_domain(oop java_class); - static void set_protection_domain(oop java_class, oop protection_domain); static oop init_lock(oop java_class); static objArrayOop signers(oop java_class); static void set_signers(oop java_class, objArrayOop signers); @@ -1167,11 +1167,14 @@ static int _context_offset; static int _privilegedContext_offset; static int _isPrivileged_offset; + static int _isAuthorized_offset; static void compute_offsets(); public: static oop create(objArrayHandle context, bool isPrivileged, Handle privileged_context, TRAPS); + static bool is_authorized(Handle context); + // Debugging/initialization friend class JavaClasses; }; @@ -1231,18 +1234,22 @@ enum { hc_static_in_offset = 0, hc_static_out_offset = 1, - hc_static_err_offset = 2 + hc_static_err_offset = 2, + hc_static_security_offset = 3 }; static int static_in_offset; static int static_out_offset; static int static_err_offset; + static int static_security_offset; public: static int in_offset_in_bytes(); static int out_offset_in_bytes(); static int err_offset_in_bytes(); + static bool has_security_manager(); + // Debugging friend class JavaClasses; }; diff -r 16b10327b00d -r 90d6c221d4e5 src/share/vm/classfile/symbolTable.cpp --- a/src/share/vm/classfile/symbolTable.cpp Tue Jul 16 10:55:48 2013 -0400 +++ b/src/share/vm/classfile/symbolTable.cpp Tue Jul 16 12:20:08 2013 -0400 @@ -598,6 +598,8 @@ bool StringTable::_needs_rehashing = false; +volatile int StringTable::_parallel_claimed_idx = 0; + // Pick hashing algorithm unsigned int StringTable::hash_string(const jchar* s, int len) { return use_alternate_hashcode() ? AltHashing::murmur3_32(seed(), s, len) : @@ -761,8 +763,18 @@ } } -void StringTable::oops_do(OopClosure* f) { - for (int i = 0; i < the_table()->table_size(); ++i) { +void StringTable::buckets_do(OopClosure* f, int start_idx, int end_idx) { + const int limit = the_table()->table_size(); + + assert(0 <= start_idx && start_idx <= limit, + err_msg("start_idx (" INT32_FORMAT ") oob?", start_idx)); + assert(0 <= end_idx && end_idx <= limit, + err_msg("end_idx (" INT32_FORMAT ") oob?", end_idx)); + assert(start_idx <= end_idx, + err_msg("Ordering: start_idx=" INT32_FORMAT", end_idx=" INT32_FORMAT, + start_idx, end_idx)); + + for (int i = start_idx; i < end_idx; i += 1) { HashtableEntry* entry = the_table()->bucket(i); while (entry != NULL) { assert(!entry->is_shared(), "CDS not used for the StringTable"); @@ -774,6 +786,27 @@ } } +void StringTable::oops_do(OopClosure* f) { + buckets_do(f, 0, the_table()->table_size()); +} + +void StringTable::possibly_parallel_oops_do(OopClosure* f) { + const int ClaimChunkSize = 32; + const int limit = the_table()->table_size(); + + for (;;) { + // Grab next set of buckets to scan + int start_idx = Atomic::add(ClaimChunkSize, &_parallel_claimed_idx) - ClaimChunkSize; + if (start_idx >= limit) { + // End of table + break; + } + + int end_idx = MIN2(limit, start_idx + ClaimChunkSize); + buckets_do(f, start_idx, end_idx); + } +} + void StringTable::verify() { for (int i = 0; i < the_table()->table_size(); ++i) { HashtableEntry* p = the_table()->bucket(i); diff -r 16b10327b00d -r 90d6c221d4e5 src/share/vm/classfile/symbolTable.hpp --- a/src/share/vm/classfile/symbolTable.hpp Tue Jul 16 10:55:48 2013 -0400 +++ b/src/share/vm/classfile/symbolTable.hpp Tue Jul 16 12:20:08 2013 -0400 @@ -1,5 +1,5 @@ /* - * Copyright (c) 1997, 2012, Oracle and/or its affiliates. All rights reserved. + * Copyright (c) 1997, 2013, Oracle and/or its affiliates. All rights reserved. * DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER. * * This code is free software; you can redistribute it and/or modify it @@ -246,12 +246,19 @@ // Set if one bucket is out of balance due to hash algorithm deficiency static bool _needs_rehashing; + // Claimed high water mark for parallel chunked scanning + static volatile int _parallel_claimed_idx; + static oop intern(Handle string_or_null, jchar* chars, int length, TRAPS); oop basic_add(int index, Handle string_or_null, jchar* name, int len, unsigned int hashValue, TRAPS); oop lookup(int index, jchar* chars, int length, unsigned int hashValue); + // Apply the give oop closure to the entries to the buckets + // in the range [start_idx, end_idx). + static void buckets_do(OopClosure* f, int start_idx, int end_idx); + StringTable() : Hashtable((int)StringTableSize, sizeof (HashtableEntry)) {} @@ -277,9 +284,12 @@ unlink_or_oops_do(cl, NULL); } - // Invoke "f->do_oop" on the locations of all oops in the table. + // Serially invoke "f->do_oop" on the locations of all oops in the table. static void oops_do(OopClosure* f); + // Possibly parallel version of the above + static void possibly_parallel_oops_do(OopClosure* f); + // Hashing algorithm, used as the hash value used by the // StringTable for bucket selection and comparison (stored in the // HashtableEntry structures). This is used in the String.intern() method. @@ -315,5 +325,8 @@ // Rehash the symbol table if it gets out of balance static void rehash_table(); static bool needs_rehashing() { return _needs_rehashing; } + + // Parallel chunked scanning + static void clear_parallel_claimed_index() { _parallel_claimed_idx = 0; } }; #endif // SHARE_VM_CLASSFILE_SYMBOLTABLE_HPP diff -r 16b10327b00d -r 90d6c221d4e5 src/share/vm/classfile/systemDictionary.cpp --- a/src/share/vm/classfile/systemDictionary.cpp Tue Jul 16 10:55:48 2013 -0400 +++ b/src/share/vm/classfile/systemDictionary.cpp Tue Jul 16 12:20:08 2013 -0400 @@ -56,6 +56,11 @@ #include "services/classLoadingService.hpp" #include "services/threadService.hpp" +#if INCLUDE_TRACE + #include "trace/tracing.hpp" + #include "trace/traceMacros.hpp" +#endif + Dictionary* SystemDictionary::_dictionary = NULL; PlaceholderTable* SystemDictionary::_placeholders = NULL; @@ -586,10 +591,15 @@ } -Klass* SystemDictionary::resolve_instance_class_or_null(Symbol* name, Handle class_loader, Handle protection_domain, TRAPS) { +Klass* SystemDictionary::resolve_instance_class_or_null(Symbol* name, + Handle class_loader, + Handle protection_domain, + TRAPS) { assert(name != NULL && !FieldType::is_array(name) && !FieldType::is_obj(name), "invalid class name"); + TracingTime class_load_start_time = Tracing::time(); + // UseNewReflection // Fix for 4474172; see evaluation for more details class_loader = Handle(THREAD, java_lang_ClassLoader::non_reflection_class_loader(class_loader())); @@ -804,8 +814,9 @@ // during compilations. MutexLocker mu(Compile_lock, THREAD); update_dictionary(d_index, d_hash, p_index, p_hash, - k, class_loader, THREAD); + k, class_loader, THREAD); } + if (JvmtiExport::should_post_class_load()) { Thread *thread = THREAD; assert(thread->is_Java_thread(), "thread->is_Java_thread()"); @@ -861,8 +872,8 @@ // This brackets the SystemDictionary updates for both defining // and initiating loaders MutexLocker mu(SystemDictionary_lock, THREAD); - placeholders()->find_and_remove(p_index, p_hash, name, loader_data, PlaceholderTable::LOAD_INSTANCE, THREAD); - SystemDictionary_lock->notify_all(); + placeholders()->find_and_remove(p_index, p_hash, name, loader_data, PlaceholderTable::LOAD_INSTANCE, THREAD); + SystemDictionary_lock->notify_all(); } } @@ -870,6 +881,8 @@ return NULL; } + post_class_load_event(class_load_start_time, k, class_loader); + #ifdef ASSERT { ClassLoaderData* loader_data = k->class_loader_data(); @@ -993,6 +1006,8 @@ TRAPS) { TempNewSymbol parsed_name = NULL; + TracingTime class_load_start_time = Tracing::time(); + ClassLoaderData* loader_data; if (host_klass.not_null()) { // Create a new CLD for anonymous class, that uses the same class loader @@ -1048,6 +1063,8 @@ assert(THREAD->is_Java_thread(), "thread->is_Java_thread()"); JvmtiExport::post_class_load((JavaThread *) THREAD, k()); } + + post_class_load_event(class_load_start_time, k, class_loader); } assert(host_klass.not_null() || cp_patches == NULL, "cp_patches only found with host_klass"); @@ -1435,6 +1452,7 @@ JvmtiExport::post_class_load((JavaThread *) THREAD, k()); } + } // Support parallel classloading @@ -1678,6 +1696,7 @@ } return newsize; } + // Assumes classes in the SystemDictionary are only unloaded at a safepoint // Note: anonymous classes are not in the SD. bool SystemDictionary::do_unloading(BoolObjectClosure* is_alive) { @@ -2024,12 +2043,6 @@ } } - // Assign a classid if one has not already been assigned. The - // counter does not need to be atomically incremented since this - // is only done while holding the SystemDictionary_lock. - // All loaded classes get a unique ID. - TRACE_INIT_ID(k); - // Make a new system dictionary entry. Klass* sd_check = find_class(d_index, d_hash, name, loader_data); if (sd_check == NULL) { @@ -2612,6 +2625,27 @@ "Loaded klasses should be in SystemDictionary"); } +// utility function for class load event +void SystemDictionary::post_class_load_event(TracingTime start_time, + instanceKlassHandle k, + Handle initiating_loader) { +#if INCLUDE_TRACE + EventClassLoad event(UNTIMED); + if (event.should_commit()) { + event.set_endtime(Tracing::time()); + event.set_starttime(start_time); + event.set_loadedClass(k()); + oop defining_class_loader = k->class_loader(); + event.set_definingClassLoader(defining_class_loader != NULL ? + defining_class_loader->klass() : (Klass*)NULL); + oop class_loader = initiating_loader.is_null() ? (oop)NULL : initiating_loader(); + event.set_initiatingClassLoader(class_loader != NULL ? + class_loader->klass() : (Klass*)NULL); + event.commit(); + } +#endif /* INCLUDE_TRACE */ +} + #ifndef PRODUCT // statistics code diff -r 16b10327b00d -r 90d6c221d4e5 src/share/vm/classfile/systemDictionary.hpp --- a/src/share/vm/classfile/systemDictionary.hpp Tue Jul 16 10:55:48 2013 -0400 +++ b/src/share/vm/classfile/systemDictionary.hpp Tue Jul 16 12:20:08 2013 -0400 @@ -31,9 +31,11 @@ #include "oops/symbol.hpp" #include "runtime/java.hpp" #include "runtime/reflectionUtils.hpp" +#include "trace/traceTime.hpp" #include "utilities/hashtable.hpp" #include "utilities/hashtable.inline.hpp" + // The system dictionary stores all loaded classes and maps: // // [class name,class loader] -> class i.e. [Symbol*,oop] -> Klass* @@ -636,6 +638,9 @@ // Setup link to hierarchy static void add_to_hierarchy(instanceKlassHandle k, TRAPS); + // event based tracing + static void post_class_load_event(TracingTime start_time, instanceKlassHandle k, + Handle initiating_loader); // We pass in the hashtable index so we can calculate it outside of // the SystemDictionary_lock. diff -r 16b10327b00d -r 90d6c221d4e5 src/share/vm/classfile/verifier.hpp --- a/src/share/vm/classfile/verifier.hpp Tue Jul 16 10:55:48 2013 -0400 +++ b/src/share/vm/classfile/verifier.hpp Tue Jul 16 12:20:08 2013 -0400 @@ -86,9 +86,9 @@ // These macros are used similarly to CHECK macros but also check // the status of the verifier and return if that has an error. #define CHECK_VERIFY(verifier) \ - CHECK); if ((verifier)->has_error()) return; (0 + CHECK); if ((verifier)->has_error()) return; ((void)0 #define CHECK_VERIFY_(verifier, result) \ - CHECK_(result)); if ((verifier)->has_error()) return (result); (0 + CHECK_(result)); if ((verifier)->has_error()) return (result); ((void)0 class TypeOrigin VALUE_OBJ_CLASS_SPEC { private: diff -r 16b10327b00d -r 90d6c221d4e5 src/share/vm/classfile/vmSymbols.hpp --- a/src/share/vm/classfile/vmSymbols.hpp Tue Jul 16 10:55:48 2013 -0400 +++ b/src/share/vm/classfile/vmSymbols.hpp Tue Jul 16 12:20:08 2013 -0400 @@ -94,6 +94,7 @@ template(java_lang_SecurityManager, "java/lang/SecurityManager") \ template(java_security_AccessControlContext, "java/security/AccessControlContext") \ template(java_security_ProtectionDomain, "java/security/ProtectionDomain") \ + template(impliesCreateAccessControlContext_name, "impliesCreateAccessControlContext") \ template(java_io_OutputStream, "java/io/OutputStream") \ template(java_io_Reader, "java/io/Reader") \ template(java_io_BufferedReader, "java/io/BufferedReader") \ @@ -346,6 +347,7 @@ template(contextClassLoader_name, "contextClassLoader") \ template(inheritedAccessControlContext_name, "inheritedAccessControlContext") \ template(isPrivileged_name, "isPrivileged") \ + template(isAuthorized_name, "isAuthorized") \ template(getClassContext_name, "getClassContext") \ template(wait_name, "wait") \ template(checkPackageAccess_name, "checkPackageAccess") \ @@ -769,6 +771,17 @@ do_name( decrypt_name, "decrypt") \ do_signature(byteArray_int_int_byteArray_int_signature, "([BII[BI)V") \ \ + /* support for java.util.zip */ \ + do_class(java_util_zip_CRC32, "java/util/zip/CRC32") \ + do_intrinsic(_updateCRC32, java_util_zip_CRC32, update_name, int2_int_signature, F_SN) \ + do_name( update_name, "update") \ + do_intrinsic(_updateBytesCRC32, java_util_zip_CRC32, updateBytes_name, updateBytes_signature, F_SN) \ + do_name( updateBytes_name, "updateBytes") \ + do_signature(updateBytes_signature, "(I[BII)I") \ + do_intrinsic(_updateByteBufferCRC32, java_util_zip_CRC32, updateByteBuffer_name, updateByteBuffer_signature, F_SN) \ + do_name( updateByteBuffer_name, "updateByteBuffer") \ + do_signature(updateByteBuffer_signature, "(IJII)I") \ + \ /* support for sun.misc.Unsafe */ \ do_class(sun_misc_Unsafe, "sun/misc/Unsafe") \ \ diff -r 16b10327b00d -r 90d6c221d4e5 src/share/vm/code/codeCache.cpp --- a/src/share/vm/code/codeCache.cpp Tue Jul 16 10:55:48 2013 -0400 +++ b/src/share/vm/code/codeCache.cpp Tue Jul 16 12:20:08 2013 -0400 @@ -1,5 +1,5 @@ /* - * Copyright (c) 1997, 2012, Oracle and/or its affiliates. All rights reserved. + * Copyright (c) 1997, 2013, Oracle and/or its affiliates. All rights reserved. * DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER. * * This code is free software; you can redistribute it and/or modify it @@ -45,6 +45,7 @@ #include "runtime/java.hpp" #include "runtime/mutexLocker.hpp" #include "services/memoryService.hpp" +#include "trace/tracing.hpp" #include "utilities/xmlstream.hpp" // Helper class for printing in CodeCache @@ -114,7 +115,6 @@ } }; - // CodeCache implementation CodeHeap * CodeCache::_heap = new CodeHeap(); @@ -126,6 +126,7 @@ nmethod* CodeCache::_scavenge_root_nmethods = NULL; nmethod* CodeCache::_saved_nmethods = NULL; +int CodeCache::_codemem_full_count = 0; CodeBlob* CodeCache::first() { assert_locked_or_safepoint(CodeCache_lock); @@ -829,6 +830,22 @@ } } +void CodeCache::report_codemem_full() { + _codemem_full_count++; + EventCodeCacheFull event; + if (event.should_commit()) { + event.set_startAddress((u8)low_bound()); + event.set_commitedTopAddress((u8)high()); + event.set_reservedTopAddress((u8)high_bound()); + event.set_entryCount(nof_blobs()); + event.set_methodCount(nof_nmethods()); + event.set_adaptorCount(nof_adapters()); + event.set_unallocatedCapacity(unallocated_capacity()/K); + event.set_fullCount(_codemem_full_count); + event.commit(); + } +} + //------------------------------------------------------------------------------------------------ // Non-product version diff -r 16b10327b00d -r 90d6c221d4e5 src/share/vm/code/codeCache.hpp --- a/src/share/vm/code/codeCache.hpp Tue Jul 16 10:55:48 2013 -0400 +++ b/src/share/vm/code/codeCache.hpp Tue Jul 16 12:20:08 2013 -0400 @@ -1,5 +1,5 @@ /* - * Copyright (c) 1997, 2012, Oracle and/or its affiliates. All rights reserved. + * Copyright (c) 1997, 2013, Oracle and/or its affiliates. All rights reserved. * DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER. * * This code is free software; you can redistribute it and/or modify it @@ -64,11 +64,15 @@ static void mark_scavenge_root_nmethods() PRODUCT_RETURN; static void verify_perm_nmethods(CodeBlobClosure* f_or_null) PRODUCT_RETURN; + static int _codemem_full_count; + public: // Initialization static void initialize(); + static void report_codemem_full(); + // Allocation/administration static CodeBlob* allocate(int size, bool is_critical = false); // allocates a new CodeBlob static void commit(CodeBlob* cb); // called when the allocated CodeBlob has been filled @@ -155,6 +159,7 @@ // The full limits of the codeCache static address low_bound() { return (address) _heap->low_boundary(); } static address high_bound() { return (address) _heap->high_boundary(); } + static address high() { return (address) _heap->high(); } // Profiling static address first_address(); // first address used for CodeBlobs @@ -186,6 +191,8 @@ // tells how many nmethods have dependencies static int number_of_nmethods_with_dependencies(); + + static int get_codemem_full_count() { return _codemem_full_count; } }; #endif // SHARE_VM_CODE_CODECACHE_HPP diff -r 16b10327b00d -r 90d6c221d4e5 src/share/vm/code/debugInfo.hpp --- a/src/share/vm/code/debugInfo.hpp Tue Jul 16 10:55:48 2013 -0400 +++ b/src/share/vm/code/debugInfo.hpp Tue Jul 16 12:20:08 2013 -0400 @@ -274,7 +274,7 @@ Method* read_method() { Method* o = (Method*)(code()->metadata_at(read_int())); assert(o == NULL || - o->is_metadata(), "meta data only"); + o->is_metaspace_object(), "meta data only"); return o; } ScopeValue* read_object_value(); diff -r 16b10327b00d -r 90d6c221d4e5 src/share/vm/code/dependencies.cpp --- a/src/share/vm/code/dependencies.cpp Tue Jul 16 10:55:48 2013 -0400 +++ b/src/share/vm/code/dependencies.cpp Tue Jul 16 12:20:08 2013 -0400 @@ -655,8 +655,8 @@ } else { o = _deps->oop_recorder()->metadata_at(i); } - assert(o == NULL || o->is_metadata(), - err_msg("Should be perm " PTR_FORMAT, o)); + assert(o == NULL || o->is_metaspace_object(), + err_msg("Should be metadata " PTR_FORMAT, o)); return o; } @@ -989,7 +989,7 @@ assert(changes.involves_context(context_type), "irrelevant dependency"); Klass* new_type = changes.new_type(); - count_find_witness_calls(); + (void)count_find_witness_calls(); NOT_PRODUCT(deps_find_witness_singles++); // Current thread must be in VM (not native mode, as in CI): diff -r 16b10327b00d -r 90d6c221d4e5 src/share/vm/code/nmethod.cpp --- a/src/share/vm/code/nmethod.cpp Tue Jul 16 10:55:48 2013 -0400 +++ b/src/share/vm/code/nmethod.cpp Tue Jul 16 12:20:08 2013 -0400 @@ -1081,11 +1081,6 @@ metadata_Relocation* reloc = iter.metadata_reloc(); reloc->fix_metadata_relocation(); } - - // There must not be any interfering patches or breakpoints. - assert(!(iter.type() == relocInfo::breakpoint_type - && iter.breakpoint_reloc()->active()), - "no active breakpoint"); } } @@ -2615,7 +2610,8 @@ relocation_begin()-1+ip[1]); for (; ip < index_end; ip++) tty->print_cr(" (%d ?)", ip[0]); - tty->print_cr(" @" INTPTR_FORMAT ": index_size=%d", ip, *ip++); + tty->print_cr(" @" INTPTR_FORMAT ": index_size=%d", ip, *ip); + ip++; tty->print_cr("reloc_end @" INTPTR_FORMAT ":", ip); } } diff -r 16b10327b00d -r 90d6c221d4e5 src/share/vm/code/relocInfo.cpp --- a/src/share/vm/code/relocInfo.cpp Tue Jul 16 10:55:48 2013 -0400 +++ b/src/share/vm/code/relocInfo.cpp Tue Jul 16 12:20:08 2013 -0400 @@ -338,31 +338,6 @@ _limit = limit; } - -void PatchingRelocIterator:: prepass() { - // turn breakpoints off during patching - _init_state = (*this); // save cursor - while (next()) { - if (type() == relocInfo::breakpoint_type) { - breakpoint_reloc()->set_active(false); - } - } - (RelocIterator&)(*this) = _init_state; // reset cursor for client -} - - -void PatchingRelocIterator:: postpass() { - // turn breakpoints back on after patching - (RelocIterator&)(*this) = _init_state; // reset cursor again - while (next()) { - if (type() == relocInfo::breakpoint_type) { - breakpoint_Relocation* bpt = breakpoint_reloc(); - bpt->set_active(bpt->enabled()); - } - } -} - - // All the strange bit-encodings are in here. // The idea is to encode relocation data which are small integers // very efficiently (a single extra halfword). Larger chunks of @@ -704,51 +679,6 @@ _target = address_from_scaled_offset(offset, base); } - -void breakpoint_Relocation::pack_data_to(CodeSection* dest) { - short* p = (short*) dest->locs_end(); - address point = dest->locs_point(); - - *p++ = _bits; - - assert(_target != NULL, "sanity"); - - if (internal()) normalize_address(_target, dest); - - jint target_bits = - (jint)( internal() ? scaled_offset (_target, point) - : runtime_address_to_index(_target) ); - if (settable()) { - // save space for set_target later - p = add_jint(p, target_bits); - } else { - p = add_var_int(p, target_bits); - } - - for (int i = 0; i < instrlen(); i++) { - // put placeholder words until bytes can be saved - p = add_short(p, (short)0x7777); - } - - dest->set_locs_end((relocInfo*) p); -} - - -void breakpoint_Relocation::unpack_data() { - _bits = live_bits(); - - int targetlen = datalen() - 1 - instrlen(); - jint target_bits = 0; - if (targetlen == 0) target_bits = 0; - else if (targetlen == 1) target_bits = *(data()+1); - else if (targetlen == 2) target_bits = relocInfo::jint_from_data(data()+1); - else { ShouldNotReachHere(); } - - _target = internal() ? address_from_scaled_offset(target_bits, addr()) - : index_to_runtime_address (target_bits); -} - - //// miscellaneous methods oop* oop_Relocation::oop_addr() { int n = _oop_index; @@ -933,81 +863,6 @@ return target; } - -breakpoint_Relocation::breakpoint_Relocation(int kind, address target, bool internal) { - bool active = false; - bool enabled = (kind == initialization); - bool removable = (kind != safepoint); - bool settable = (target == NULL); - - int bits = kind; - if (enabled) bits |= enabled_state; - if (internal) bits |= internal_attr; - if (removable) bits |= removable_attr; - if (settable) bits |= settable_attr; - - _bits = bits | high_bit; - _target = target; - - assert(this->kind() == kind, "kind encoded"); - assert(this->enabled() == enabled, "enabled encoded"); - assert(this->active() == active, "active encoded"); - assert(this->internal() == internal, "internal encoded"); - assert(this->removable() == removable, "removable encoded"); - assert(this->settable() == settable, "settable encoded"); -} - - -address breakpoint_Relocation::target() const { - return _target; -} - - -void breakpoint_Relocation::set_target(address x) { - assert(settable(), "must be settable"); - jint target_bits = - (jint)(internal() ? scaled_offset (x, addr()) - : runtime_address_to_index(x)); - short* p = &live_bits() + 1; - p = add_jint(p, target_bits); - assert(p == instrs(), "new target must fit"); - _target = x; -} - - -void breakpoint_Relocation::set_enabled(bool b) { - if (enabled() == b) return; - - if (b) { - set_bits(bits() | enabled_state); - } else { - set_active(false); // remove the actual breakpoint insn, if any - set_bits(bits() & ~enabled_state); - } -} - - -void breakpoint_Relocation::set_active(bool b) { - assert(!b || enabled(), "cannot activate a disabled breakpoint"); - - if (active() == b) return; - - // %%% should probably seize a lock here (might not be the right lock) - //MutexLockerEx ml_patch(Patching_lock, true); - //if (active() == b) return; // recheck state after locking - - if (b) { - set_bits(bits() | active_state); - if (instrlen() == 0) - fatal("breakpoints in original code must be undoable"); - pd_swap_in_breakpoint (addr(), instrs(), instrlen()); - } else { - set_bits(bits() & ~active_state); - pd_swap_out_breakpoint(addr(), instrs(), instrlen()); - } -} - - //--------------------------------------------------------------------------------- // Non-product code diff -r 16b10327b00d -r 90d6c221d4e5 src/share/vm/code/relocInfo.hpp --- a/src/share/vm/code/relocInfo.hpp Tue Jul 16 10:55:48 2013 -0400 +++ b/src/share/vm/code/relocInfo.hpp Tue Jul 16 12:20:08 2013 -0400 @@ -49,9 +49,6 @@ // RelocIterator // A StackObj which iterates over the relocations associated with // a range of code addresses. Can be used to operate a copy of code. -// PatchingRelocIterator -// Specialized subtype of RelocIterator which removes breakpoints -// temporarily during iteration, then restores them. // BoundRelocation // An _internal_ type shared by packers and unpackers of relocations. // It pastes together a RelocationHolder with some pointers into @@ -204,15 +201,6 @@ // immediate field must not straddle a unit of memory coherence. // //%note reloc_3 // -// relocInfo::breakpoint_type -- a conditional breakpoint in the code -// Value: none -// Instruction types: any whatsoever -// Data: [b [T]t i...] -// The b is a bit-packed word representing the breakpoint's attributes. -// The t is a target address which the breakpoint calls (when it is enabled). -// The i... is a place to store one or two instruction words overwritten -// by a trap, so that the breakpoint may be subsequently removed. -// // relocInfo::static_stub_type -- an extra stub for each static_call_type // Value: none // Instruction types: a virtual call: { set_oop; jump; } @@ -271,8 +259,8 @@ section_word_type = 9, // internal, but a cross-section reference poll_type = 10, // polling instruction for safepoints poll_return_type = 11, // polling instruction for safepoints at return - breakpoint_type = 12, // an initialization barrier or safepoint - metadata_type = 13, // metadata that used to be oops + metadata_type = 12, // metadata that used to be oops + yet_unused_type_1 = 13, // Still unused yet_unused_type_2 = 14, // Still unused data_prefix_tag = 15, // tag for a prefix (carries data arguments) type_mask = 15 // A mask which selects only the above values @@ -312,7 +300,6 @@ visitor(internal_word) \ visitor(poll) \ visitor(poll_return) \ - visitor(breakpoint) \ visitor(section_word) \ @@ -454,7 +441,7 @@ public: enum { // Conservatively large estimate of maximum length (in shorts) - // of any relocation record (probably breakpoints are largest). + // of any relocation record. // Extended format is length prefix, data words, and tag/offset suffix. length_limit = 1 + 1 + (3*BytesPerWord/BytesPerShort) + 1, have_format = format_width > 0 @@ -571,8 +558,6 @@ void initialize(nmethod* nm, address begin, address limit); - friend class PatchingRelocIterator; - // make an uninitialized one, for PatchingRelocIterator: RelocIterator() { initialize_misc(); } public: @@ -779,9 +764,6 @@ void pd_verify_data_value (address x, intptr_t off) { pd_set_data_value(x, off, true); } address pd_call_destination (address orig_addr = NULL); void pd_set_call_destination (address x); - void pd_swap_in_breakpoint (address x, short* instrs, int instrlen); - void pd_swap_out_breakpoint (address x, short* instrs, int instrlen); - static int pd_breakpoint_size (); // this extracts the address of an address in the code stream instead of the reloc data address* pd_address_in_code (); @@ -1302,87 +1284,6 @@ void fix_relocation_after_move(const CodeBuffer* src, CodeBuffer* dest); }; - -class breakpoint_Relocation : public Relocation { - relocInfo::relocType type() { return relocInfo::breakpoint_type; } - - enum { - // attributes which affect the interpretation of the data: - removable_attr = 0x0010, // buffer [i...] allows for undoing the trap - internal_attr = 0x0020, // the target is an internal addr (local stub) - settable_attr = 0x0040, // the target is settable - - // states which can change over time: - enabled_state = 0x0100, // breakpoint must be active in running code - active_state = 0x0200, // breakpoint instruction actually in code - - kind_mask = 0x000F, // mask for extracting kind - high_bit = 0x4000 // extra bit which is always set - }; - - public: - enum { - // kinds: - initialization = 1, - safepoint = 2 - }; - - // If target is NULL, 32 bits are reserved for a later set_target(). - static RelocationHolder spec(int kind, address target = NULL, bool internal_target = false) { - RelocationHolder rh = newHolder(); - new(rh) breakpoint_Relocation(kind, target, internal_target); - return rh; - } - - private: - // We require every bits value to NOT to fit into relocInfo::datalen_width, - // because we are going to actually store state in the reloc, and so - // cannot allow it to be compressed (and hence copied by the iterator). - - short _bits; // bit-encoded kind, attrs, & state - address _target; - - breakpoint_Relocation(int kind, address target, bool internal_target); - - friend class RelocIterator; - breakpoint_Relocation() { } - - short bits() const { return _bits; } - short& live_bits() const { return data()[0]; } - short* instrs() const { return data() + datalen() - instrlen(); } - int instrlen() const { return removable() ? pd_breakpoint_size() : 0; } - - void set_bits(short x) { - assert(live_bits() == _bits, "must be the only mutator of reloc info"); - live_bits() = _bits = x; - } - - public: - address target() const; - void set_target(address x); - - int kind() const { return bits() & kind_mask; } - bool enabled() const { return (bits() & enabled_state) != 0; } - bool active() const { return (bits() & active_state) != 0; } - bool internal() const { return (bits() & internal_attr) != 0; } - bool removable() const { return (bits() & removable_attr) != 0; } - bool settable() const { return (bits() & settable_attr) != 0; } - - void set_enabled(bool b); // to activate, you must also say set_active - void set_active(bool b); // actually inserts bpt (must be enabled 1st) - - // data is packed as 16 bits, followed by the target (1 or 2 words), followed - // if necessary by empty storage for saving away original instruction bytes. - void pack_data_to(CodeSection* dest); - void unpack_data(); - - // during certain operations, breakpoints must be out of the way: - void fix_relocation_after_move(const CodeBuffer* src, CodeBuffer* dest) { - assert(!active(), "cannot perform relocation on enabled breakpoints"); - } -}; - - // We know all the xxx_Relocation classes, so now we can define these: #define EACH_CASE(name) \ inline name##_Relocation* RelocIterator::name##_reloc() { \ @@ -1401,25 +1302,4 @@ initialize(nm, begin, limit); } -// if you are going to patch code, you should use this subclass of -// RelocIterator -class PatchingRelocIterator : public RelocIterator { - private: - RelocIterator _init_state; - - void prepass(); // deactivates all breakpoints - void postpass(); // reactivates all enabled breakpoints - - // do not copy these puppies; it would have unpredictable side effects - // these are private and have no bodies defined because they should not be called - PatchingRelocIterator(const RelocIterator&); - void operator=(const RelocIterator&); - - public: - PatchingRelocIterator(nmethod* nm, address begin = NULL, address limit = NULL) - : RelocIterator(nm, begin, limit) { prepass(); } - - ~PatchingRelocIterator() { postpass(); } -}; - #endif // SHARE_VM_CODE_RELOCINFO_HPP diff -r 16b10327b00d -r 90d6c221d4e5 src/share/vm/compiler/compileBroker.cpp --- a/src/share/vm/compiler/compileBroker.cpp Tue Jul 16 10:55:48 2013 -0400 +++ b/src/share/vm/compiler/compileBroker.cpp Tue Jul 16 12:20:08 2013 -0400 @@ -1,5 +1,5 @@ /* - * Copyright (c) 1999, 2012, Oracle and/or its affiliates. All rights reserved. + * Copyright (c) 1999, 2013, Oracle and/or its affiliates. All rights reserved. * DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER. * * This code is free software; you can redistribute it and/or modify it @@ -43,6 +43,7 @@ #include "runtime/os.hpp" #include "runtime/sharedRuntime.hpp" #include "runtime/sweeper.hpp" +#include "trace/tracing.hpp" #include "utilities/dtrace.hpp" #include "utilities/events.hpp" #ifdef COMPILER1 @@ -179,9 +180,11 @@ int CompileBroker::_sum_nmethod_size = 0; int CompileBroker::_sum_nmethod_code_size = 0; -CompileQueue* CompileBroker::_c2_method_queue = NULL; -CompileQueue* CompileBroker::_c1_method_queue = NULL; -CompileTask* CompileBroker::_task_free_list = NULL; +long CompileBroker::_peak_compilation_time = 0; + +CompileQueue* CompileBroker::_c2_method_queue = NULL; +CompileQueue* CompileBroker::_c1_method_queue = NULL; +CompileTask* CompileBroker::_task_free_list = NULL; GrowableArray* CompileBroker::_method_threads = NULL; @@ -1795,6 +1798,7 @@ ciMethod* target = ci_env.get_method_from_handle(target_handle); TraceTime t1("compilation", &time); + EventCompilation event; AbstractCompiler *comp = compiler(task_level); if (comp == NULL) { @@ -1836,6 +1840,16 @@ } // simulate crash during compilation assert(task->compile_id() != CICrashAt, "just as planned"); + if (event.should_commit()) { + event.set_method(target->get_Method()); + event.set_compileID(compile_id); + event.set_compileLevel(task->comp_level()); + event.set_succeded(task->is_success()); + event.set_isOsr(is_osr); + event.set_codeSize((task->code() == NULL) ? 0 : task->code()->total_size()); + event.set_inlinedBytes(task->num_inlined_bytecodes()); + event.commit(); + } } pop_jni_handle_block(); @@ -1916,6 +1930,10 @@ } warning("CodeCache is full. Compiler has been disabled."); warning("Try increasing the code cache size using -XX:ReservedCodeCacheSize="); + + CodeCache::report_codemem_full(); + + #ifndef PRODUCT if (CompileTheWorld || ExitOnFullCodeCache) { codecache_print(/* detailed= */ true); @@ -2073,8 +2091,10 @@ // java.lang.management.CompilationMBean _perf_total_compilation->inc(time.ticks()); + _t_total_compilation.add(time); + _peak_compilation_time = time.milliseconds() > _peak_compilation_time ? time.milliseconds() : _peak_compilation_time; + if (CITime) { - _t_total_compilation.add(time); if (is_osr) { _t_osr_compilation.add(time); _sum_osr_bytes_compiled += method->code_size() + task->num_inlined_bytecodes(); @@ -2172,7 +2192,6 @@ tty->print_cr(" nmethod total size : %6d bytes", CompileBroker::_sum_nmethod_size); } - // Debugging output for failure void CompileBroker::print_last_compile() { if ( _last_compile_level != CompLevel_none && diff -r 16b10327b00d -r 90d6c221d4e5 src/share/vm/compiler/compileBroker.hpp --- a/src/share/vm/compiler/compileBroker.hpp Tue Jul 16 10:55:48 2013 -0400 +++ b/src/share/vm/compiler/compileBroker.hpp Tue Jul 16 12:20:08 2013 -0400 @@ -1,5 +1,5 @@ /* - * Copyright (c) 1999, 2012, Oracle and/or its affiliates. All rights reserved. + * Copyright (c) 1999, 2013, Oracle and/or its affiliates. All rights reserved. * DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER. * * This code is free software; you can redistribute it and/or modify it @@ -299,17 +299,17 @@ static elapsedTimer _t_osr_compilation; static elapsedTimer _t_standard_compilation; + static int _total_compile_count; static int _total_bailout_count; static int _total_invalidated_count; - static int _total_compile_count; static int _total_native_compile_count; static int _total_osr_compile_count; static int _total_standard_compile_count; - static int _sum_osr_bytes_compiled; static int _sum_standard_bytes_compiled; static int _sum_nmethod_size; static int _sum_nmethod_code_size; + static long _peak_compilation_time; static CompilerThread* make_compiler_thread(const char* name, CompileQueue* queue, CompilerCounters* counters, TRAPS); static void init_compiler_threads(int c1_compiler_count, int c2_compiler_count); @@ -421,6 +421,19 @@ // compiler name for debugging static const char* compiler_name(int comp_level); + + static int get_total_compile_count() { return _total_compile_count; } + static int get_total_bailout_count() { return _total_bailout_count; } + static int get_total_invalidated_count() { return _total_invalidated_count; } + static int get_total_native_compile_count() { return _total_native_compile_count; } + static int get_total_osr_compile_count() { return _total_osr_compile_count; } + static int get_total_standard_compile_count() { return _total_standard_compile_count; } + static int get_sum_osr_bytes_compiled() { return _sum_osr_bytes_compiled; } + static int get_sum_standard_bytes_compiled() { return _sum_standard_bytes_compiled; } + static int get_sum_nmethod_size() { return _sum_nmethod_size;} + static int get_sum_nmethod_code_size() { return _sum_nmethod_code_size; } + static long get_peak_compilation_time() { return _peak_compilation_time; } + static long get_total_compilation_time() { return _t_total_compilation.milliseconds(); } }; #endif // SHARE_VM_COMPILER_COMPILEBROKER_HPP diff -r 16b10327b00d -r 90d6c221d4e5 src/share/vm/gc_implementation/concurrentMarkSweep/compactibleFreeListSpace.cpp --- a/src/share/vm/gc_implementation/concurrentMarkSweep/compactibleFreeListSpace.cpp Tue Jul 16 10:55:48 2013 -0400 +++ b/src/share/vm/gc_implementation/concurrentMarkSweep/compactibleFreeListSpace.cpp Tue Jul 16 12:20:08 2013 -0400 @@ -2017,12 +2017,6 @@ ALL_SINCE_SAVE_MARKS_CLOSURES(CFLS_OOP_SINCE_SAVE_MARKS_DEFN) - -void CompactibleFreeListSpace::object_iterate_since_last_GC(ObjectClosure* cl) { - // ugghh... how would one do this efficiently for a non-contiguous space? - guarantee(false, "NYI"); -} - bool CompactibleFreeListSpace::linearAllocationWouldFail() const { return _smallLinearAllocBlock._word_size == 0; } diff -r 16b10327b00d -r 90d6c221d4e5 src/share/vm/gc_implementation/concurrentMarkSweep/compactibleFreeListSpace.hpp --- a/src/share/vm/gc_implementation/concurrentMarkSweep/compactibleFreeListSpace.hpp Tue Jul 16 10:55:48 2013 -0400 +++ b/src/share/vm/gc_implementation/concurrentMarkSweep/compactibleFreeListSpace.hpp Tue Jul 16 12:20:08 2013 -0400 @@ -396,7 +396,6 @@ // iteration support for promotion void save_marks(); bool no_allocs_since_save_marks(); - void object_iterate_since_last_GC(ObjectClosure* cl); // iteration support for sweeping void save_sweep_limit() { diff -r 16b10327b00d -r 90d6c221d4e5 src/share/vm/gc_implementation/concurrentMarkSweep/concurrentMarkSweepGeneration.cpp --- a/src/share/vm/gc_implementation/concurrentMarkSweep/concurrentMarkSweepGeneration.cpp Tue Jul 16 10:55:48 2013 -0400 +++ b/src/share/vm/gc_implementation/concurrentMarkSweep/concurrentMarkSweepGeneration.cpp Tue Jul 16 12:20:08 2013 -0400 @@ -37,8 +37,12 @@ #include "gc_implementation/concurrentMarkSweep/vmCMSOperations.hpp" #include "gc_implementation/parNew/parNewGeneration.hpp" #include "gc_implementation/shared/collectorCounters.hpp" +#include "gc_implementation/shared/gcTimer.hpp" +#include "gc_implementation/shared/gcTrace.hpp" +#include "gc_implementation/shared/gcTraceTime.hpp" #include "gc_implementation/shared/isGCActiveMark.hpp" #include "gc_interface/collectedHeap.inline.hpp" +#include "memory/allocation.hpp" #include "memory/cardTableRS.hpp" #include "memory/collectorPolicy.hpp" #include "memory/gcLocker.inline.hpp" @@ -60,7 +64,8 @@ // statics CMSCollector* ConcurrentMarkSweepGeneration::_collector = NULL; -bool CMSCollector::_full_gc_requested = false; +bool CMSCollector::_full_gc_requested = false; +GCCause::Cause CMSCollector::_full_gc_cause = GCCause::_no_gc; ////////////////////////////////////////////////////////////////// // In support of CMS/VM thread synchronization @@ -591,7 +596,10 @@ _concurrent_cycles_since_last_unload(0), _roots_scanning_options(0), _inter_sweep_estimate(CMS_SweepWeight, CMS_SweepPadding), - _intra_sweep_estimate(CMS_SweepWeight, CMS_SweepPadding) + _intra_sweep_estimate(CMS_SweepWeight, CMS_SweepPadding), + _gc_tracer_cm(new (ResourceObj::C_HEAP, mtGC) CMSTracer()), + _gc_timer_cm(new (ResourceObj::C_HEAP, mtGC) ConcurrentGCTimer()), + _cms_start_registered(false) { if (ExplicitGCInvokesConcurrentAndUnloadsClasses) { ExplicitGCInvokesConcurrent = true; @@ -1676,18 +1684,38 @@ _full_gcs_since_conc_gc++; } -void CMSCollector::request_full_gc(unsigned int full_gc_count) { +void CMSCollector::request_full_gc(unsigned int full_gc_count, GCCause::Cause cause) { GenCollectedHeap* gch = GenCollectedHeap::heap(); unsigned int gc_count = gch->total_full_collections(); if (gc_count == full_gc_count) { MutexLockerEx y(CGC_lock, Mutex::_no_safepoint_check_flag); _full_gc_requested = true; + _full_gc_cause = cause; CGC_lock->notify(); // nudge CMS thread } else { assert(gc_count > full_gc_count, "Error: causal loop"); } } +bool CMSCollector::is_external_interruption() { + GCCause::Cause cause = GenCollectedHeap::heap()->gc_cause(); + return GCCause::is_user_requested_gc(cause) || + GCCause::is_serviceability_requested_gc(cause); +} + +void CMSCollector::report_concurrent_mode_interruption() { + if (is_external_interruption()) { + if (PrintGCDetails) { + gclog_or_tty->print(" (concurrent mode interrupted)"); + } + } else { + if (PrintGCDetails) { + gclog_or_tty->print(" (concurrent mode failure)"); + } + _gc_tracer_cm->report_concurrent_mode_failure(); + } +} + // The foreground and background collectors need to coordinate in order // to make sure that they do not mutually interfere with CMS collections. @@ -1845,14 +1873,8 @@ } ) - if (PrintGCDetails && first_state > Idling) { - GCCause::Cause cause = GenCollectedHeap::heap()->gc_cause(); - if (GCCause::is_user_requested_gc(cause) || - GCCause::is_serviceability_requested_gc(cause)) { - gclog_or_tty->print(" (concurrent mode interrupted)"); - } else { - gclog_or_tty->print(" (concurrent mode failure)"); - } + if (first_state > Idling) { + report_concurrent_mode_interruption(); } set_did_compact(should_compact); @@ -1868,6 +1890,10 @@ // Reference objects are active. ref_processor()->clean_up_discovered_references(); + if (first_state > Idling) { + save_heap_summary(); + } + do_compaction_work(clear_all_soft_refs); // Has the GC time limit been exceeded? @@ -1971,7 +1997,14 @@ // a mark-sweep-compact. void CMSCollector::do_compaction_work(bool clear_all_soft_refs) { GenCollectedHeap* gch = GenCollectedHeap::heap(); - TraceTime t("CMS:MSC ", PrintGCDetails && Verbose, true, gclog_or_tty); + + STWGCTimer* gc_timer = GenMarkSweep::gc_timer(); + gc_timer->register_gc_start(os::elapsed_counter()); + + SerialOldTracer* gc_tracer = GenMarkSweep::gc_tracer(); + gc_tracer->report_gc_start(gch->gc_cause(), gc_timer->gc_start()); + + GCTraceTime t("CMS:MSC ", PrintGCDetails && Verbose, true, NULL); if (PrintGC && Verbose && !(GCCause::is_user_requested_gc(gch->gc_cause()))) { gclog_or_tty->print_cr("Compact ConcurrentMarkSweepGeneration after %d " "collections passed to foreground collector", _full_gcs_since_conc_gc); @@ -2062,6 +2095,10 @@ size_policy()->msc_collection_end(gch->gc_cause()); } + gc_timer->register_gc_end(os::elapsed_counter()); + + gc_tracer->report_gc_end(gc_timer->gc_end(), gc_timer->time_partitions()); + // For a mark-sweep-compact, compute_new_size() will be called // in the heap's do_collection() method. } @@ -2093,7 +2130,7 @@ // required. _collectorState = FinalMarking; } - collect_in_foreground(clear_all_soft_refs); + collect_in_foreground(clear_all_soft_refs, GenCollectedHeap::heap()->gc_cause()); // For a mark-sweep, compute_new_size() will be called // in the heap's do_collection() method. @@ -2153,7 +2190,7 @@ // one "collect" method between the background collector and the foreground // collector but the if-then-else required made it cleaner to have // separate methods. -void CMSCollector::collect_in_background(bool clear_all_soft_refs) { +void CMSCollector::collect_in_background(bool clear_all_soft_refs, GCCause::Cause cause) { assert(Thread::current()->is_ConcurrentGC_thread(), "A CMS asynchronous collection is only allowed on a CMS thread."); @@ -2172,6 +2209,7 @@ } else { assert(_collectorState == Idling, "Should be idling before start."); _collectorState = InitialMarking; + register_gc_start(cause); // Reset the expansion cause, now that we are about to begin // a new cycle. clear_expansion_cause(); @@ -2184,6 +2222,7 @@ // ensuing concurrent GC cycle. update_should_unload_classes(); _full_gc_requested = false; // acks all outstanding full gc requests + _full_gc_cause = GCCause::_no_gc; // Signal that we are about to start a collection gch->increment_total_full_collections(); // ... starting a collection cycle _collection_count_start = gch->total_full_collections(); @@ -2263,7 +2302,6 @@ { ReleaseForegroundGC x(this); stats().record_cms_begin(); - VM_CMS_Initial_Mark initial_mark_op(this); VMThread::execute(&initial_mark_op); } @@ -2343,6 +2381,7 @@ CMSTokenSync z(true); // not strictly needed. if (_collectorState == Resizing) { compute_new_size(); + save_heap_summary(); _collectorState = Resetting; } else { assert(_collectorState == Idling, "The state should only change" @@ -2401,7 +2440,39 @@ } } -void CMSCollector::collect_in_foreground(bool clear_all_soft_refs) { +void CMSCollector::register_foreground_gc_start(GCCause::Cause cause) { + if (!_cms_start_registered) { + register_gc_start(cause); + } +} + +void CMSCollector::register_gc_start(GCCause::Cause cause) { + _cms_start_registered = true; + _gc_timer_cm->register_gc_start(os::elapsed_counter()); + _gc_tracer_cm->report_gc_start(cause, _gc_timer_cm->gc_start()); +} + +void CMSCollector::register_gc_end() { + if (_cms_start_registered) { + report_heap_summary(GCWhen::AfterGC); + + _gc_timer_cm->register_gc_end(os::elapsed_counter()); + _gc_tracer_cm->report_gc_end(_gc_timer_cm->gc_end(), _gc_timer_cm->time_partitions()); + _cms_start_registered = false; + } +} + +void CMSCollector::save_heap_summary() { + GenCollectedHeap* gch = GenCollectedHeap::heap(); + _last_heap_summary = gch->create_heap_summary(); + _last_metaspace_summary = gch->create_metaspace_summary(); +} + +void CMSCollector::report_heap_summary(GCWhen::Type when) { + _gc_tracer_cm->report_gc_heap_summary(when, _last_heap_summary, _last_metaspace_summary); +} + +void CMSCollector::collect_in_foreground(bool clear_all_soft_refs, GCCause::Cause cause) { assert(_foregroundGCIsActive && !_foregroundGCShouldWait, "Foreground collector should be waiting, not executing"); assert(Thread::current()->is_VM_thread(), "A foreground collection" @@ -2409,8 +2480,8 @@ assert(ConcurrentMarkSweepThread::vm_thread_has_cms_token(), "VM thread should have CMS token"); - NOT_PRODUCT(TraceTime t("CMS:MS (foreground) ", PrintGCDetails && Verbose, - true, gclog_or_tty);) + NOT_PRODUCT(GCTraceTime t("CMS:MS (foreground) ", PrintGCDetails && Verbose, + true, NULL);) if (UseAdaptiveSizePolicy) { size_policy()->ms_collection_begin(); } @@ -2434,6 +2505,7 @@ } switch (_collectorState) { case InitialMarking: + register_foreground_gc_start(cause); init_mark_was_synchronous = true; // fact to be exploited in re-mark checkpointRootsInitial(false); assert(_collectorState == Marking, "Collector state should have changed" @@ -2482,6 +2554,7 @@ GenCollectedHeap::heap()->total_collections() >= VerifyGCStartAt) { Universe::verify("Verify before reset: "); } + save_heap_summary(); reset(false); assert(_collectorState == Idling, "Collector state should " "have changed"); @@ -3057,26 +3130,6 @@ ALL_SINCE_SAVE_MARKS_CLOSURES(CMS_SINCE_SAVE_MARKS_DEFN) void -ConcurrentMarkSweepGeneration::object_iterate_since_last_GC(ObjectClosure* blk) -{ - // Not currently implemented; need to do the following. -- ysr. - // dld -- I think that is used for some sort of allocation profiler. So it - // really means the objects allocated by the mutator since the last - // GC. We could potentially implement this cheaply by recording only - // the direct allocations in a side data structure. - // - // I think we probably ought not to be required to support these - // iterations at any arbitrary point; I think there ought to be some - // call to enable/disable allocation profiling in a generation/space, - // and the iterator ought to return the objects allocated in the - // gen/space since the enable call, or the last iterator call (which - // will probably be at a GC.) That way, for gens like CM&S that would - // require some extra data structure to support this, we only pay the - // cost when it's in use... - cmsSpace()->object_iterate_since_last_GC(blk); -} - -void ConcurrentMarkSweepGeneration::younger_refs_iterate(OopsInGenClosure* cl) { cl->set_generation(this); younger_refs_in_space_iterate(_cmsSpace, cl); @@ -3504,6 +3557,9 @@ check_correct_thread_executing(); TraceCMSMemoryManagerStats tms(_collectorState,GenCollectedHeap::heap()->gc_cause()); + save_heap_summary(); + report_heap_summary(GCWhen::BeforeGC); + ReferenceProcessor* rp = ref_processor(); SpecializationStats::clear(); assert(_restart_addr == NULL, "Control point invariant"); @@ -3549,8 +3605,8 @@ // CMS collection cycle. setup_cms_unloading_and_verification_state(); - NOT_PRODUCT(TraceTime t("\ncheckpointRootsInitialWork", - PrintGCDetails && Verbose, true, gclog_or_tty);) + NOT_PRODUCT(GCTraceTime t("\ncheckpointRootsInitialWork", + PrintGCDetails && Verbose, true, _gc_timer_cm);) if (UseAdaptiveSizePolicy) { size_policy()->checkpoint_roots_initial_begin(); } @@ -4542,8 +4598,10 @@ // The code in this method may need further // tweaking for better performance and some restructuring // for cleaner interfaces. + GCTimer *gc_timer = NULL; // Currently not tracing concurrent phases rp->preclean_discovered_references( - rp->is_alive_non_header(), &keep_alive, &complete_trace, &yield_cl); + rp->is_alive_non_header(), &keep_alive, &complete_trace, &yield_cl, + gc_timer); } if (clean_survivor) { // preclean the active survivor space(s) @@ -4885,8 +4943,8 @@ // Temporarily set flag to false, GCH->do_collection will // expect it to be false and set to true FlagSetting fl(gch->_is_gc_active, false); - NOT_PRODUCT(TraceTime t("Scavenge-Before-Remark", - PrintGCDetails && Verbose, true, gclog_or_tty);) + NOT_PRODUCT(GCTraceTime t("Scavenge-Before-Remark", + PrintGCDetails && Verbose, true, _gc_timer_cm);) int level = _cmsGen->level() - 1; if (level >= 0) { gch->do_collection(true, // full (i.e. force, see below) @@ -4915,7 +4973,7 @@ void CMSCollector::checkpointRootsFinalWork(bool asynch, bool clear_all_soft_refs, bool init_mark_was_synchronous) { - NOT_PRODUCT(TraceTime tr("checkpointRootsFinalWork", PrintGCDetails, false, gclog_or_tty);) + NOT_PRODUCT(GCTraceTime tr("checkpointRootsFinalWork", PrintGCDetails, false, _gc_timer_cm);) assert(haveFreelistLocks(), "must have free list locks"); assert_lock_strong(bitMapLock()); @@ -4966,11 +5024,11 @@ // the most recent young generation GC, minus those cleaned up by the // concurrent precleaning. if (CMSParallelRemarkEnabled && CollectedHeap::use_parallel_gc_threads()) { - TraceTime t("Rescan (parallel) ", PrintGCDetails, false, gclog_or_tty); + GCTraceTime t("Rescan (parallel) ", PrintGCDetails, false, _gc_timer_cm); do_remark_parallel(); } else { - TraceTime t("Rescan (non-parallel) ", PrintGCDetails, false, - gclog_or_tty); + GCTraceTime t("Rescan (non-parallel) ", PrintGCDetails, false, + _gc_timer_cm); do_remark_non_parallel(); } } @@ -4983,7 +5041,7 @@ verify_overflow_empty(); { - NOT_PRODUCT(TraceTime ts("refProcessingWork", PrintGCDetails, false, gclog_or_tty);) + NOT_PRODUCT(GCTraceTime ts("refProcessingWork", PrintGCDetails, false, _gc_timer_cm);) refProcessingWork(asynch, clear_all_soft_refs); } verify_work_stacks_empty(); @@ -5044,6 +5102,8 @@ verify_after_remark(); } + _gc_tracer_cm->report_object_count_after_gc(&_is_alive_closure); + // Change under the freelistLocks. _collectorState = Sweeping; // Call isAllClear() under bitMapLock @@ -5697,7 +5757,7 @@ NULL, // space is set further below &_markBitMap, &_markStack, &mrias_cl); { - TraceTime t("grey object rescan", PrintGCDetails, false, gclog_or_tty); + GCTraceTime t("grey object rescan", PrintGCDetails, false, _gc_timer_cm); // Iterate over the dirty cards, setting the corresponding bits in the // mod union table. { @@ -5734,7 +5794,7 @@ Universe::verify(); } { - TraceTime t("root rescan", PrintGCDetails, false, gclog_or_tty); + GCTraceTime t("root rescan", PrintGCDetails, false, _gc_timer_cm); verify_work_stacks_empty(); @@ -5756,7 +5816,7 @@ } { - TraceTime t("visit unhandled CLDs", PrintGCDetails, false, gclog_or_tty); + GCTraceTime t("visit unhandled CLDs", PrintGCDetails, false, _gc_timer_cm); verify_work_stacks_empty(); @@ -5775,7 +5835,7 @@ } { - TraceTime t("dirty klass scan", PrintGCDetails, false, gclog_or_tty); + GCTraceTime t("dirty klass scan", PrintGCDetails, false, _gc_timer_cm); verify_work_stacks_empty(); @@ -5977,7 +6037,9 @@ _span, &_markBitMap, &_markStack, &cmsKeepAliveClosure, false /* !preclean */); { - TraceTime t("weak refs processing", PrintGCDetails, false, gclog_or_tty); + GCTraceTime t("weak refs processing", PrintGCDetails, false, _gc_timer_cm); + + ReferenceProcessorStats stats; if (rp->processing_is_mt()) { // Set the degree of MT here. If the discovery is done MT, there // may have been a different number of threads doing the discovery @@ -5996,16 +6058,20 @@ } rp->set_active_mt_degree(active_workers); CMSRefProcTaskExecutor task_executor(*this); - rp->process_discovered_references(&_is_alive_closure, + stats = rp->process_discovered_references(&_is_alive_closure, &cmsKeepAliveClosure, &cmsDrainMarkingStackClosure, - &task_executor); + &task_executor, + _gc_timer_cm); } else { - rp->process_discovered_references(&_is_alive_closure, + stats = rp->process_discovered_references(&_is_alive_closure, &cmsKeepAliveClosure, &cmsDrainMarkingStackClosure, - NULL); - } + NULL, + _gc_timer_cm); + } + _gc_tracer_cm->report_gc_reference_stats(stats); + } // This is the point where the entire marking should have completed. @@ -6013,7 +6079,7 @@ if (should_unload_classes()) { { - TraceTime t("class unloading", PrintGCDetails, false, gclog_or_tty); + GCTraceTime t("class unloading", PrintGCDetails, false, _gc_timer_cm); // Unload classes and purge the SystemDictionary. bool purged_class = SystemDictionary::do_unloading(&_is_alive_closure); @@ -6026,7 +6092,7 @@ } { - TraceTime t("scrub symbol table", PrintGCDetails, false, gclog_or_tty); + GCTraceTime t("scrub symbol table", PrintGCDetails, false, _gc_timer_cm); // Clean up unreferenced symbols in symbol table. SymbolTable::unlink(); } @@ -6035,7 +6101,7 @@ // CMS doesn't use the StringTable as hard roots when class unloading is turned off. // Need to check if we really scanned the StringTable. if ((roots_scanning_options() & SharedHeap::SO_Strings) == 0) { - TraceTime t("scrub string table", PrintGCDetails, false, gclog_or_tty); + GCTraceTime t("scrub string table", PrintGCDetails, false, _gc_timer_cm); // Delete entries for dead interned strings. StringTable::unlink(&_is_alive_closure); } @@ -6380,12 +6446,14 @@ _cmsGen->rotate_debug_collection_type(); } ) + + register_gc_end(); } void CMSCollector::do_CMS_operation(CMS_op_type op, GCCause::Cause gc_cause) { gclog_or_tty->date_stamp(PrintGC && PrintGCDateStamps); TraceCPUTime tcpu(PrintGCDetails, true, gclog_or_tty); - TraceTime t(GCCauseString("GC", gc_cause), PrintGC, !PrintGCDetails, gclog_or_tty); + GCTraceTime t(GCCauseString("GC", gc_cause), PrintGC, !PrintGCDetails, NULL); TraceCollectorStats tcs(counters()); switch (op) { diff -r 16b10327b00d -r 90d6c221d4e5 src/share/vm/gc_implementation/concurrentMarkSweep/concurrentMarkSweepGeneration.hpp --- a/src/share/vm/gc_implementation/concurrentMarkSweep/concurrentMarkSweepGeneration.hpp Tue Jul 16 10:55:48 2013 -0400 +++ b/src/share/vm/gc_implementation/concurrentMarkSweep/concurrentMarkSweepGeneration.hpp Tue Jul 16 12:20:08 2013 -0400 @@ -25,8 +25,10 @@ #ifndef SHARE_VM_GC_IMPLEMENTATION_CONCURRENTMARKSWEEP_CONCURRENTMARKSWEEPGENERATION_HPP #define SHARE_VM_GC_IMPLEMENTATION_CONCURRENTMARKSWEEP_CONCURRENTMARKSWEEPGENERATION_HPP +#include "gc_implementation/shared/gcHeapSummary.hpp" #include "gc_implementation/shared/gSpaceCounters.hpp" #include "gc_implementation/shared/gcStats.hpp" +#include "gc_implementation/shared/gcWhen.hpp" #include "gc_implementation/shared/generationCounters.hpp" #include "memory/freeBlockDictionary.hpp" #include "memory/generation.hpp" @@ -53,6 +55,8 @@ class CMSAdaptiveSizePolicy; class CMSConcMarkingTask; class CMSGCAdaptivePolicyCounters; +class CMSTracer; +class ConcurrentGCTimer; class ConcurrentMarkSweepGeneration; class ConcurrentMarkSweepPolicy; class ConcurrentMarkSweepThread; @@ -61,6 +65,7 @@ class PromotionInfo; class ScanMarkedObjectsAgainCarefullyClosure; class TenuredGeneration; +class SerialOldTracer; // A generic CMS bit map. It's the basis for both the CMS marking bit map // as well as for the mod union table (in each case only a subset of the @@ -567,8 +572,9 @@ bool _completed_initialization; // In support of ExplicitGCInvokesConcurrent - static bool _full_gc_requested; - unsigned int _collection_count_start; + static bool _full_gc_requested; + static GCCause::Cause _full_gc_cause; + unsigned int _collection_count_start; // Should we unload classes this concurrent cycle? bool _should_unload_classes; @@ -609,6 +615,20 @@ AdaptivePaddedAverage _inter_sweep_estimate; AdaptivePaddedAverage _intra_sweep_estimate; + CMSTracer* _gc_tracer_cm; + ConcurrentGCTimer* _gc_timer_cm; + + bool _cms_start_registered; + + GCHeapSummary _last_heap_summary; + MetaspaceSummary _last_metaspace_summary; + + void register_foreground_gc_start(GCCause::Cause cause); + void register_gc_start(GCCause::Cause cause); + void register_gc_end(); + void save_heap_summary(); + void report_heap_summary(GCWhen::Type when); + protected: ConcurrentMarkSweepGeneration* _cmsGen; // old gen (CMS) MemRegion _span; // span covering above two @@ -827,6 +847,10 @@ void do_mark_sweep_work(bool clear_all_soft_refs, CollectorState first_state, bool should_start_over); + // Work methods for reporting concurrent mode interruption or failure + bool is_external_interruption(); + void report_concurrent_mode_interruption(); + // If the backgrould GC is active, acquire control from the background // GC and do the collection. void acquire_control_and_collect(bool full, bool clear_all_soft_refs); @@ -876,11 +900,11 @@ bool clear_all_soft_refs, size_t size, bool tlab); - void collect_in_background(bool clear_all_soft_refs); - void collect_in_foreground(bool clear_all_soft_refs); + void collect_in_background(bool clear_all_soft_refs, GCCause::Cause cause); + void collect_in_foreground(bool clear_all_soft_refs, GCCause::Cause cause); // In support of ExplicitGCInvokesConcurrent - static void request_full_gc(unsigned int full_gc_count); + static void request_full_gc(unsigned int full_gc_count, GCCause::Cause cause); // Should we unload classes in a particular concurrent cycle? bool should_unload_classes() const { return _should_unload_classes; @@ -1249,7 +1273,6 @@ // Iteration support and related enquiries void save_marks(); bool no_allocs_since_save_marks(); - void object_iterate_since_last_GC(ObjectClosure* cl); void younger_refs_iterate(OopsInGenClosure* cl); // Iteration support specific to CMS generations diff -r 16b10327b00d -r 90d6c221d4e5 src/share/vm/gc_implementation/concurrentMarkSweep/concurrentMarkSweepThread.cpp --- a/src/share/vm/gc_implementation/concurrentMarkSweep/concurrentMarkSweepThread.cpp Tue Jul 16 10:55:48 2013 -0400 +++ b/src/share/vm/gc_implementation/concurrentMarkSweep/concurrentMarkSweepThread.cpp Tue Jul 16 12:20:08 2013 -0400 @@ -140,7 +140,9 @@ while (!_should_terminate) { sleepBeforeNextCycle(); if (_should_terminate) break; - _collector->collect_in_background(false); // !clear_all_soft_refs + GCCause::Cause cause = _collector->_full_gc_requested ? + _collector->_full_gc_cause : GCCause::_cms_concurrent_mark; + _collector->collect_in_background(false, cause); } assert(_should_terminate, "just checking"); // Check that the state of any protocol for synchronization diff -r 16b10327b00d -r 90d6c221d4e5 src/share/vm/gc_implementation/concurrentMarkSweep/vmCMSOperations.cpp --- a/src/share/vm/gc_implementation/concurrentMarkSweep/vmCMSOperations.cpp Tue Jul 16 10:55:48 2013 -0400 +++ b/src/share/vm/gc_implementation/concurrentMarkSweep/vmCMSOperations.cpp Tue Jul 16 12:20:08 2013 -0400 @@ -1,5 +1,5 @@ /* - * Copyright (c) 2005, 2012, Oracle and/or its affiliates. All rights reserved. + * Copyright (c) 2005, 2013, Oracle and/or its affiliates. All rights reserved. * DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER. * * This code is free software; you can redistribute it and/or modify it @@ -26,9 +26,12 @@ #include "gc_implementation/concurrentMarkSweep/concurrentMarkSweepGeneration.inline.hpp" #include "gc_implementation/concurrentMarkSweep/concurrentMarkSweepThread.hpp" #include "gc_implementation/concurrentMarkSweep/vmCMSOperations.hpp" +#include "gc_implementation/shared/gcTimer.hpp" +#include "gc_implementation/shared/gcTraceTime.hpp" #include "gc_implementation/shared/isGCActiveMark.hpp" #include "memory/gcLocker.inline.hpp" #include "runtime/interfaceSupport.hpp" +#include "runtime/os.hpp" #include "utilities/dtrace.hpp" @@ -60,6 +63,7 @@ void VM_CMS_Operation::verify_before_gc() { if (VerifyBeforeGC && GenCollectedHeap::heap()->total_collections() >= VerifyGCStartAt) { + GCTraceTime tm("Verify Before", false, false, _collector->_gc_timer_cm); HandleMark hm; FreelistLocker x(_collector); MutexLockerEx y(_collector->bitMapLock(), Mutex::_no_safepoint_check_flag); @@ -71,6 +75,7 @@ void VM_CMS_Operation::verify_after_gc() { if (VerifyAfterGC && GenCollectedHeap::heap()->total_collections() >= VerifyGCStartAt) { + GCTraceTime tm("Verify After", false, false, _collector->_gc_timer_cm); HandleMark hm; FreelistLocker x(_collector); MutexLockerEx y(_collector->bitMapLock(), Mutex::_no_safepoint_check_flag); @@ -140,6 +145,8 @@ ); #endif /* USDT2 */ + _collector->_gc_timer_cm->register_gc_pause_start("Initial Mark", os::elapsed_counter()); + GenCollectedHeap* gch = GenCollectedHeap::heap(); GCCauseSetter gccs(gch, GCCause::_cms_initial_mark); @@ -149,6 +156,9 @@ _collector->do_CMS_operation(CMSCollector::CMS_op_checkpointRootsInitial, gch->gc_cause()); VM_CMS_Operation::verify_after_gc(); + + _collector->_gc_timer_cm->register_gc_pause_end(os::elapsed_counter()); + #ifndef USDT2 HS_DTRACE_PROBE(hs_private, cms__initmark__end); #else /* USDT2 */ @@ -172,6 +182,8 @@ ); #endif /* USDT2 */ + _collector->_gc_timer_cm->register_gc_pause_start("Final Mark", os::elapsed_counter()); + GenCollectedHeap* gch = GenCollectedHeap::heap(); GCCauseSetter gccs(gch, GCCause::_cms_final_remark); @@ -181,6 +193,10 @@ _collector->do_CMS_operation(CMSCollector::CMS_op_checkpointRootsFinal, gch->gc_cause()); VM_CMS_Operation::verify_after_gc(); + + _collector->save_heap_summary(); + _collector->_gc_timer_cm->register_gc_pause_end(os::elapsed_counter()); + #ifndef USDT2 HS_DTRACE_PROBE(hs_private, cms__remark__end); #else /* USDT2 */ @@ -225,7 +241,7 @@ // In case CMS thread was in icms_wait(), wake it up. CMSCollector::start_icms(); // Nudge the CMS thread to start a concurrent collection. - CMSCollector::request_full_gc(_full_gc_count_before); + CMSCollector::request_full_gc(_full_gc_count_before, _gc_cause); } else { assert(_full_gc_count_before < gch->total_full_collections(), "Error"); FullGCCount_lock->notify_all(); // Inform the Java thread its work is done diff -r 16b10327b00d -r 90d6c221d4e5 src/share/vm/gc_implementation/g1/concurrentMark.cpp --- a/src/share/vm/gc_implementation/g1/concurrentMark.cpp Tue Jul 16 10:55:48 2013 -0400 +++ b/src/share/vm/gc_implementation/g1/concurrentMark.cpp Tue Jul 16 12:20:08 2013 -0400 @@ -36,6 +36,9 @@ #include "gc_implementation/g1/heapRegionRemSet.hpp" #include "gc_implementation/g1/heapRegionSeq.inline.hpp" #include "gc_implementation/shared/vmGCOperations.hpp" +#include "gc_implementation/shared/gcTimer.hpp" +#include "gc_implementation/shared/gcTrace.hpp" +#include "gc_implementation/shared/gcTraceTime.hpp" #include "memory/genOopClosures.inline.hpp" #include "memory/referencePolicy.hpp" #include "memory/resourceArea.hpp" @@ -1342,6 +1345,9 @@ _remark_times.add((now - start) * 1000.0); g1p->record_concurrent_mark_remark_end(); + + G1CMIsAliveClosure is_alive(g1h); + g1h->gc_tracer_cm()->report_object_count_after_gc(&is_alive); } // Base class of the closures that finalize and verify the @@ -2129,6 +2135,7 @@ } g1h->verify_region_sets_optional(); + g1h->trace_heap_after_concurrent_cycle(); } void ConcurrentMark::completeCleanup() { @@ -2439,7 +2446,7 @@ if (G1Log::finer()) { gclog_or_tty->put(' '); } - TraceTime t("GC ref-proc", G1Log::finer(), false, gclog_or_tty); + GCTraceTime t("GC ref-proc", G1Log::finer(), false, g1h->gc_timer_cm()); ReferenceProcessor* rp = g1h->ref_processor_cm(); @@ -2491,10 +2498,13 @@ rp->set_active_mt_degree(active_workers); // Process the weak references. - rp->process_discovered_references(&g1_is_alive, - &g1_keep_alive, - &g1_drain_mark_stack, - executor); + const ReferenceProcessorStats& stats = + rp->process_discovered_references(&g1_is_alive, + &g1_keep_alive, + &g1_drain_mark_stack, + executor, + g1h->gc_timer_cm()); + g1h->gc_tracer_cm()->report_gc_reference_stats(stats); // The do_oop work routines of the keep_alive and drain_marking_stack // oop closures will set the has_overflown flag if we overflow the @@ -3227,6 +3237,9 @@ satb_mq_set.set_active_all_threads( false, /* new active value */ satb_mq_set.is_active() /* expected_active */); + + _g1h->trace_heap_after_concurrent_cycle(); + _g1h->register_concurrent_cycle_end(); } static void print_ms_time_info(const char* prefix, const char* name, diff -r 16b10327b00d -r 90d6c221d4e5 src/share/vm/gc_implementation/g1/concurrentMark.hpp --- a/src/share/vm/gc_implementation/g1/concurrentMark.hpp Tue Jul 16 10:55:48 2013 -0400 +++ b/src/share/vm/gc_implementation/g1/concurrentMark.hpp Tue Jul 16 12:20:08 2013 -0400 @@ -569,8 +569,6 @@ void clear_has_overflown() { _has_overflown = false; } bool restart_for_overflow() { return _restart_for_overflow; } - bool has_aborted() { return _has_aborted; } - // Methods to enter the two overflow sync barriers void enter_first_sync_barrier(uint worker_id); void enter_second_sync_barrier(uint worker_id); @@ -821,6 +819,8 @@ // Called to abort the marking cycle after a Full GC takes palce. void abort(); + bool has_aborted() { return _has_aborted; } + // This prints the global/local fingers. It is used for debugging. NOT_PRODUCT(void print_finger();) diff -r 16b10327b00d -r 90d6c221d4e5 src/share/vm/gc_implementation/g1/concurrentMarkThread.cpp --- a/src/share/vm/gc_implementation/g1/concurrentMarkThread.cpp Tue Jul 16 10:55:48 2013 -0400 +++ b/src/share/vm/gc_implementation/g1/concurrentMarkThread.cpp Tue Jul 16 12:20:08 2013 -0400 @@ -93,7 +93,6 @@ ResourceMark rm; HandleMark hm; double cycle_start = os::elapsedVTime(); - char verbose_str[128]; // We have to ensure that we finish scanning the root regions // before the next GC takes place. To ensure this we have to @@ -155,8 +154,7 @@ } CMCheckpointRootsFinalClosure final_cl(_cm); - sprintf(verbose_str, "GC remark"); - VM_CGC_Operation op(&final_cl, verbose_str, true /* needs_pll */); + VM_CGC_Operation op(&final_cl, "GC remark", true /* needs_pll */); VMThread::execute(&op); } if (cm()->restart_for_overflow()) { @@ -187,8 +185,7 @@ } CMCleanUp cl_cl(_cm); - sprintf(verbose_str, "GC cleanup"); - VM_CGC_Operation op(&cl_cl, verbose_str, false /* needs_pll */); + VM_CGC_Operation op(&cl_cl, "GC cleanup", false /* needs_pll */); VMThread::execute(&op); } else { // We don't want to update the marking status if a GC pause @@ -292,6 +289,7 @@ // called System.gc() with +ExplicitGCInvokesConcurrent). _sts.join(); g1h->increment_old_marking_cycles_completed(true /* concurrent */); + g1h->register_concurrent_cycle_end(); _sts.leave(); } assert(_should_terminate, "just checking"); diff -r 16b10327b00d -r 90d6c221d4e5 src/share/vm/gc_implementation/g1/evacuationInfo.hpp --- /dev/null Thu Jan 01 00:00:00 1970 +0000 +++ b/src/share/vm/gc_implementation/g1/evacuationInfo.hpp Tue Jul 16 12:20:08 2013 -0400 @@ -0,0 +1,81 @@ +/* + * Copyright (c) 2013, Oracle and/or its affiliates. All rights reserved. + * DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER. + * + * This code is free software; you can redistribute it and/or modify it + * under the terms of the GNU General Public License version 2 only, as + * published by the Free Software Foundation. + * + * This code is distributed in the hope that it will be useful, but WITHOUT + * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or + * FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License + * version 2 for more details (a copy is included in the LICENSE file that + * accompanied this code). + * + * You should have received a copy of the GNU General Public License version + * 2 along with this work; if not, write to the Free Software Foundation, + * Inc., 51 Franklin St, Fifth Floor, Boston, MA 02110-1301 USA. + * + * Please contact Oracle, 500 Oracle Parkway, Redwood Shores, CA 94065 USA + * or visit www.oracle.com if you need additional information or have any + * questions. + * + */ + +#ifndef SHARE_VM_GC_IMPLEMENTATION_SHARED_EVACUATIONINFO_HPP +#define SHARE_VM_GC_IMPLEMENTATION_SHARED_EVACUATIONINFO_HPP + +#include "memory/allocation.hpp" + +class EvacuationInfo : public StackObj { + uint _collectionset_regions; + uint _allocation_regions; + size_t _collectionset_used_before; + size_t _collectionset_used_after; + size_t _alloc_regions_used_before; + size_t _bytes_copied; + uint _regions_freed; + +public: + EvacuationInfo() : _collectionset_regions(0), _allocation_regions(0), _collectionset_used_before(0), + _collectionset_used_after(0), _alloc_regions_used_before(0), + _bytes_copied(0), _regions_freed(0) { } + + void set_collectionset_regions(uint collectionset_regions) { + _collectionset_regions = collectionset_regions; + } + + void set_allocation_regions(uint allocation_regions) { + _allocation_regions = allocation_regions; + } + + void set_collectionset_used_before(size_t used) { + _collectionset_used_before = used; + } + + void increment_collectionset_used_after(size_t used) { + _collectionset_used_after += used; + } + + void set_alloc_regions_used_before(size_t used) { + _alloc_regions_used_before = used; + } + + void set_bytes_copied(size_t copied) { + _bytes_copied = copied; + } + + void set_regions_freed(uint freed) { + _regions_freed += freed; + } + + uint collectionset_regions() { return _collectionset_regions; } + uint allocation_regions() { return _allocation_regions; } + size_t collectionset_used_before() { return _collectionset_used_before; } + size_t collectionset_used_after() { return _collectionset_used_after; } + size_t alloc_regions_used_before() { return _alloc_regions_used_before; } + size_t bytes_copied() { return _bytes_copied; } + uint regions_freed() { return _regions_freed; } +}; + +#endif // SHARE_VM_GC_IMPLEMENTATION_SHARED_EVACUATIONINFO_HPP diff -r 16b10327b00d -r 90d6c221d4e5 src/share/vm/gc_implementation/g1/g1CardCounts.cpp --- a/src/share/vm/gc_implementation/g1/g1CardCounts.cpp Tue Jul 16 10:55:48 2013 -0400 +++ b/src/share/vm/gc_implementation/g1/g1CardCounts.cpp Tue Jul 16 12:20:08 2013 -0400 @@ -152,12 +152,9 @@ if (card_num < _committed_max_card_num) { count = (uint) _card_counts[card_num]; if (count < G1ConcRSHotCardLimit) { - _card_counts[card_num] += 1; + _card_counts[card_num] = + (jubyte)(MIN2((uintx)(_card_counts[card_num] + 1), G1ConcRSHotCardLimit)); } - assert(_card_counts[card_num] <= G1ConcRSHotCardLimit, - err_msg("Refinement count overflow? " - "new count: "UINT32_FORMAT, - (uint) _card_counts[card_num])); } } return count; diff -r 16b10327b00d -r 90d6c221d4e5 src/share/vm/gc_implementation/g1/g1CollectedHeap.cpp --- a/src/share/vm/gc_implementation/g1/g1CollectedHeap.cpp Tue Jul 16 10:55:48 2013 -0400 +++ b/src/share/vm/gc_implementation/g1/g1CollectedHeap.cpp Tue Jul 16 12:20:08 2013 -0400 @@ -38,10 +38,15 @@ #include "gc_implementation/g1/g1MarkSweep.hpp" #include "gc_implementation/g1/g1OopClosures.inline.hpp" #include "gc_implementation/g1/g1RemSet.inline.hpp" +#include "gc_implementation/g1/g1YCTypes.hpp" #include "gc_implementation/g1/heapRegion.inline.hpp" #include "gc_implementation/g1/heapRegionRemSet.hpp" #include "gc_implementation/g1/heapRegionSeq.inline.hpp" #include "gc_implementation/g1/vm_operations_g1.hpp" +#include "gc_implementation/shared/gcHeapSummary.hpp" +#include "gc_implementation/shared/gcTimer.hpp" +#include "gc_implementation/shared/gcTrace.hpp" +#include "gc_implementation/shared/gcTraceTime.hpp" #include "gc_implementation/shared/isGCActiveMark.hpp" #include "memory/gcLocker.inline.hpp" #include "memory/genOopClosures.inline.hpp" @@ -49,7 +54,6 @@ #include "memory/referenceProcessor.hpp" #include "oops/oop.inline.hpp" #include "oops/oop.pcgc.inline.hpp" -#include "runtime/aprofiler.hpp" #include "runtime/vmThread.hpp" size_t G1CollectedHeap::_humongous_object_threshold_in_words = 0; @@ -76,7 +80,7 @@ // The number of GC workers is passed to heap_region_par_iterate_chunked(). // It does use run_task() which sets _n_workers in the task. // G1ParTask executes g1_process_strong_roots() -> -// SharedHeap::process_strong_roots() which calls eventuall to +// SharedHeap::process_strong_roots() which calls eventually to // CardTableModRefBS::par_non_clean_card_iterate_work() which uses // SequentialSubTasksDone. SharedHeap::process_strong_roots() also // directly uses SubTasksDone (_process_strong_tasks field in SharedHeap). @@ -457,7 +461,7 @@ #endif // Returns true if the reference points to an object that -// can move in an incremental collecction. +// can move in an incremental collection. bool G1CollectedHeap::is_scavengable(const void* p) { G1CollectedHeap* g1h = G1CollectedHeap::heap(); G1CollectorPolicy* g1p = g1h->g1_policy(); @@ -548,7 +552,7 @@ return res; } - // Wait here until we get notifed either when (a) there are no + // Wait here until we get notified either when (a) there are no // more free regions coming or (b) some regions have been moved on // the secondary_free_list. SecondaryFreeList_lock->wait(Mutex::_no_safepoint_check_flag); @@ -623,7 +627,7 @@ uint first = G1_NULL_HRS_INDEX; if (num_regions == 1) { // Only one region to allocate, no need to go through the slower - // path. The caller will attempt the expasion if this fails, so + // path. The caller will attempt the expansion if this fails, so // let's not try to expand here too. HeapRegion* hr = new_region(word_size, false /* do_expand */); if (hr != NULL) { @@ -688,7 +692,7 @@ // the first region. HeapWord* new_obj = first_hr->bottom(); // This will be the new end of the first region in the series that - // should also match the end of the last region in the seriers. + // should also match the end of the last region in the series. HeapWord* new_end = new_obj + word_size_sum; // This will be the new top of the first region that will reflect // this allocation. @@ -863,7 +867,7 @@ bool* gc_overhead_limit_was_exceeded) { assert_heap_not_locked_and_not_at_safepoint(); - // Loop until the allocation is satisified, or unsatisfied after GC. + // Loop until the allocation is satisfied, or unsatisfied after GC. for (int try_count = 1, gclocker_retry_count = 0; /* we'll return */; try_count += 1) { unsigned int gc_count_before; @@ -1003,7 +1007,7 @@ (*gclocker_retry_count_ret) += 1; } - // We can reach here if we were unsuccessul in scheduling a + // We can reach here if we were unsuccessful in scheduling a // collection (because another thread beat us to it) or if we were // stalled due to the GC locker. In either can we should retry the // allocation attempt in case another thread successfully @@ -1128,7 +1132,7 @@ (*gclocker_retry_count_ret) += 1; } - // We can reach here if we were unsuccessul in scheduling a + // We can reach here if we were unsuccessful in scheduling a // collection (because another thread beat us to it) or if we were // stalled due to the GC locker. In either can we should retry the // allocation attempt in case another thread successfully @@ -1298,10 +1302,17 @@ return false; } + STWGCTimer* gc_timer = G1MarkSweep::gc_timer(); + gc_timer->register_gc_start(os::elapsed_counter()); + + SerialOldTracer* gc_tracer = G1MarkSweep::gc_tracer(); + gc_tracer->report_gc_start(gc_cause(), gc_timer->gc_start()); + SvcGCMarker sgcm(SvcGCMarker::FULL); ResourceMark rm; print_heap_before_gc(); + trace_heap_before_gc(gc_tracer); size_t metadata_prev_used = MetaspaceAux::allocated_used_bytes(); @@ -1322,7 +1333,7 @@ TraceCPUTime tcpu(G1Log::finer(), true, gclog_or_tty); { - TraceTime t(GCCauseString("Full GC", gc_cause()), G1Log::fine(), true, gclog_or_tty); + GCTraceTime t(GCCauseString("Full GC", gc_cause()), G1Log::fine(), true, NULL); TraceCollectorStats tcs(g1mm()->full_collection_counters()); TraceMemoryManagerStats tms(true /* fullGC */, gc_cause()); @@ -1351,7 +1362,7 @@ verify_before_gc(); - pre_full_gc_dump(); + pre_full_gc_dump(gc_timer); COMPILER2_PRESENT(DerivedPointerTable::clear()); @@ -1433,7 +1444,7 @@ reset_gc_time_stamp(); // Since everything potentially moved, we will clear all remembered - // sets, and clear all cards. Later we will rebuild remebered + // sets, and clear all cards. Later we will rebuild remembered // sets. We will also reset the GC time stamps of the regions. clear_rsets_post_compaction(); check_gc_time_stamps(); @@ -1553,8 +1564,12 @@ } print_heap_after_gc(); - - post_full_gc_dump(); + trace_heap_after_gc(gc_tracer); + + post_full_gc_dump(gc_timer); + + gc_timer->register_gc_end(os::elapsed_counter()); + gc_tracer->report_gc_end(gc_timer->gc_end(), gc_timer->time_partitions()); } return true; @@ -1919,7 +1934,7 @@ _ref_processor_stw(NULL), _process_strong_tasks(new SubTasksDone(G1H_PS_NumElements)), _bot_shared(NULL), - _evac_failure_scan_stack(NULL) , + _evac_failure_scan_stack(NULL), _mark_in_progress(false), _cg1r(NULL), _summary_bytes_used(0), _g1mm(NULL), @@ -1939,12 +1954,18 @@ _surviving_young_words(NULL), _old_marking_cycles_started(0), _old_marking_cycles_completed(0), + _concurrent_cycle_started(false), _in_cset_fast_test(NULL), _in_cset_fast_test_base(NULL), _dirty_cards_region_list(NULL), _worker_cset_start_region(NULL), - _worker_cset_start_region_time_stamp(NULL) { - _g1h = this; // To catch bugs. + _worker_cset_start_region_time_stamp(NULL), + _gc_timer_stw(new (ResourceObj::C_HEAP, mtGC) STWGCTimer()), + _gc_timer_cm(new (ResourceObj::C_HEAP, mtGC) ConcurrentGCTimer()), + _gc_tracer_stw(new (ResourceObj::C_HEAP, mtGC) G1NewTracer()), + _gc_tracer_cm(new (ResourceObj::C_HEAP, mtGC) G1OldTracer()) { + + _g1h = this; if (_process_strong_tasks == NULL || !_process_strong_tasks->valid()) { vm_exit_during_initialization("Failed necessary allocation."); } @@ -1959,13 +1980,14 @@ _worker_cset_start_region = NEW_C_HEAP_ARRAY(HeapRegion*, n_queues, mtGC); _worker_cset_start_region_time_stamp = NEW_C_HEAP_ARRAY(unsigned int, n_queues, mtGC); + _evacuation_failed_info_array = NEW_C_HEAP_ARRAY(EvacuationFailedInfo, n_queues, mtGC); for (int i = 0; i < n_queues; i++) { RefToScanQueue* q = new RefToScanQueue(); q->initialize(); _task_queues->register_queue(i, q); - } - + ::new (&_evacuation_failed_info_array[i]) EvacuationFailedInfo(); + } clear_cset_start_regions(); // Initialize the G1EvacuationFailureALot counters and flags. @@ -2025,7 +2047,7 @@ HeapRegion::GrainBytes); // It is important to do this in a way such that concurrent readers can't - // temporarily think somethings in the heap. (I've actually seen this + // temporarily think something is in the heap. (I've actually seen this // happen in asserts: DLD.) _reserved.set_word_size(0); _reserved.set_start((HeapWord*)heap_rs.base()); @@ -2462,7 +2484,7 @@ // We need to clear the "in_progress" flag in the CM thread before // we wake up any waiters (especially when ExplicitInvokesConcurrent // is set) so that if a waiter requests another System.gc() it doesn't - // incorrectly see that a marking cyle is still in progress. + // incorrectly see that a marking cycle is still in progress. if (concurrent) { _cmThread->clear_in_progress(); } @@ -2474,6 +2496,49 @@ FullGCCount_lock->notify_all(); } +void G1CollectedHeap::register_concurrent_cycle_start(jlong start_time) { + _concurrent_cycle_started = true; + _gc_timer_cm->register_gc_start(start_time); + + _gc_tracer_cm->report_gc_start(gc_cause(), _gc_timer_cm->gc_start()); + trace_heap_before_gc(_gc_tracer_cm); +} + +void G1CollectedHeap::register_concurrent_cycle_end() { + if (_concurrent_cycle_started) { + _gc_timer_cm->register_gc_end(os::elapsed_counter()); + + if (_cm->has_aborted()) { + _gc_tracer_cm->report_concurrent_mode_failure(); + } + _gc_tracer_cm->report_gc_end(_gc_timer_cm->gc_end(), _gc_timer_cm->time_partitions()); + + _concurrent_cycle_started = false; + } +} + +void G1CollectedHeap::trace_heap_after_concurrent_cycle() { + if (_concurrent_cycle_started) { + trace_heap_after_gc(_gc_tracer_cm); + } +} + +G1YCType G1CollectedHeap::yc_type() { + bool is_young = g1_policy()->gcs_are_young(); + bool is_initial_mark = g1_policy()->during_initial_mark_pause(); + bool is_during_mark = mark_in_progress(); + + if (is_initial_mark) { + return InitialMark; + } else if (is_during_mark) { + return DuringMark; + } else if (is_young) { + return Normal; + } else { + return Mixed; + } +} + void G1CollectedHeap::collect(GCCause::Cause cause) { assert_heap_not_locked(); @@ -2599,11 +2664,6 @@ heap_region_iterate(&blk); } -void G1CollectedHeap::object_iterate_since_last_GC(ObjectClosure* cl) { - // FIXME: is this right? - guarantee(false, "object_iterate_since_last_GC not supported by G1 heap"); -} - // Calls a SpaceClosure on a HeapRegion. class SpaceClosureRegionClosure: public HeapRegionClosure { @@ -2676,13 +2736,13 @@ break; } - // Noone should have claimed it directly. We can given + // No one should have claimed it directly. We can given // that we claimed its "starts humongous" region. assert(chr->claim_value() != claim_value, "sanity"); assert(chr->humongous_start_region() == r, "sanity"); if (chr->claimHeapRegion(claim_value)) { - // we should always be able to claim it; noone else should + // we should always be able to claim it; no one else should // be trying to claim this region bool res2 = cl->doHeapRegion(chr); @@ -2976,7 +3036,7 @@ // the min TLAB size. // Also, this value can be at most the humongous object threshold, - // since we can't allow tlabs to grow big enough to accomodate + // since we can't allow tlabs to grow big enough to accommodate // humongous objects. HeapRegion* hr = _mutator_alloc_region.get(); @@ -3532,8 +3592,6 @@ void G1CollectedHeap::gc_prologue(bool full /* Ignored */) { // always_do_update_barrier = false; assert(InlineCacheBuffer::is_empty(), "should have cleaned up ICBuffer"); - // Call allocation profiler - AllocationProfiler::iterate_since_last_gc(); // Fill TLAB's and such ensure_parsability(true); } @@ -3743,10 +3801,15 @@ return false; } + _gc_timer_stw->register_gc_start(os::elapsed_counter()); + + _gc_tracer_stw->report_gc_start(gc_cause(), _gc_timer_stw->gc_start()); + SvcGCMarker sgcm(SvcGCMarker::MINOR); ResourceMark rm; print_heap_before_gc(); + trace_heap_before_gc(_gc_tracer_stw); HRSPhaseSetter x(HRSPhaseEvacuation); verify_region_sets_optional(); @@ -3771,11 +3834,17 @@ // Inner scope for scope based logging, timers, and stats collection { + EvacuationInfo evacuation_info; + if (g1_policy()->during_initial_mark_pause()) { // We are about to start a marking cycle, so we increment the // full collection counter. increment_old_marking_cycles_started(); + register_concurrent_cycle_start(_gc_timer_stw->gc_start()); } + + _gc_tracer_stw->report_yc_type(yc_type()); + TraceCPUTime tcpu(G1Log::finer(), true, gclog_or_tty); int active_workers = (G1CollectedHeap::use_parallel_gc_threads() ? @@ -3885,7 +3954,7 @@ g1_policy()->print_collection_set(g1_policy()->inc_cset_head(), gclog_or_tty); #endif // YOUNG_LIST_VERBOSE - g1_policy()->finalize_cset(target_pause_time_ms); + g1_policy()->finalize_cset(target_pause_time_ms, evacuation_info); _cm->note_start_of_gc(); // We should not verify the per-thread SATB buffers given that @@ -3921,10 +3990,10 @@ setup_surviving_young_words(); // Initialize the GC alloc regions. - init_gc_alloc_regions(); + init_gc_alloc_regions(evacuation_info); // Actually do the work... - evacuate_collection_set(); + evacuate_collection_set(evacuation_info); // We do this to mainly verify the per-thread SATB buffers // (which have been filtered by now) since we didn't verify @@ -3936,7 +4005,7 @@ true /* verify_thread_buffers */, true /* verify_fingers */); - free_collection_set(g1_policy()->collection_set()); + free_collection_set(g1_policy()->collection_set(), evacuation_info); g1_policy()->clear_collection_set(); cleanup_surviving_young_words(); @@ -3964,13 +4033,19 @@ #endif // YOUNG_LIST_VERBOSE g1_policy()->record_survivor_regions(_young_list->survivor_length(), - _young_list->first_survivor_region(), - _young_list->last_survivor_region()); + _young_list->first_survivor_region(), + _young_list->last_survivor_region()); _young_list->reset_auxilary_lists(); if (evacuation_failed()) { _summary_bytes_used = recalculate_used(); + uint n_queues = MAX2((int)ParallelGCThreads, 1); + for (uint i = 0; i < n_queues; i++) { + if (_evacuation_failed_info_array[i].has_failed()) { + _gc_tracer_stw->report_evacuation_failed(_evacuation_failed_info_array[i]); + } + } } else { // The "used" of the the collection set have already been subtracted // when they were freed. Add in the bytes evacuated. @@ -4013,7 +4088,7 @@ } } - // We redo the verificaiton but now wrt to the new CSet which + // We redo the verification but now wrt to the new CSet which // has just got initialized after the previous CSet was freed. _cm->verify_no_cset_oops(true /* verify_stacks */, true /* verify_enqueued_buffers */, @@ -4026,7 +4101,7 @@ // investigate this in CR 7178365. double sample_end_time_sec = os::elapsedTime(); double pause_time_ms = (sample_end_time_sec - sample_start_time_sec) * MILLIUNITS; - g1_policy()->record_collection_pause_end(pause_time_ms); + g1_policy()->record_collection_pause_end(pause_time_ms, evacuation_info); MemoryService::track_memory_usage(); @@ -4093,14 +4168,19 @@ TASKQUEUE_STATS_ONLY(reset_taskqueue_stats()); print_heap_after_gc(); + trace_heap_after_gc(_gc_tracer_stw); // We must call G1MonitoringSupport::update_sizes() in the same scoping level // as an active TraceMemoryManagerStats object (i.e. before the destructor for the // TraceMemoryManagerStats is called) so that the G1 memory pools are updated // before any GC notifications are raised. g1mm()->update_sizes(); - } - + + _gc_tracer_stw->report_evacuation_info(&evacuation_info); + _gc_tracer_stw->report_tenuring_threshold(_g1_policy->tenuring_threshold()); + _gc_timer_stw->register_gc_end(os::elapsed_counter()); + _gc_tracer_stw->report_gc_end(_gc_timer_stw->gc_end(), _gc_timer_stw->time_partitions()); + } // It should now be safe to tell the concurrent mark thread to start // without its logging output interfering with the logging output // that came from the pause. @@ -4152,7 +4232,7 @@ assert(_mutator_alloc_region.get() == NULL, "post-condition"); } -void G1CollectedHeap::init_gc_alloc_regions() { +void G1CollectedHeap::init_gc_alloc_regions(EvacuationInfo& evacuation_info) { assert_at_safepoint(true /* should_be_vm_thread */); _survivor_gc_alloc_region.init(); @@ -4167,7 +4247,7 @@ // a cleanup and it should be on the free list now), or // d) it's humongous (this means that it was emptied // during a cleanup and was added to the free list, but - // has been subseqently used to allocate a humongous + // has been subsequently used to allocate a humongous // object that may be less than the region size). if (retained_region != NULL && !retained_region->in_collection_set() && @@ -4184,10 +4264,13 @@ retained_region->note_start_of_copying(during_im); _old_gc_alloc_region.set(retained_region); _hr_printer.reuse(retained_region); - } -} - -void G1CollectedHeap::release_gc_alloc_regions(uint no_of_gc_workers) { + evacuation_info.set_alloc_regions_used_before(retained_region->used()); + } +} + +void G1CollectedHeap::release_gc_alloc_regions(uint no_of_gc_workers, EvacuationInfo& evacuation_info) { + evacuation_info.set_allocation_regions(_survivor_gc_alloc_region.count() + + _old_gc_alloc_region.count()); _survivor_gc_alloc_region.release(); // If we have an old GC alloc region to release, we'll save it in // _retained_old_gc_alloc_region. If we don't @@ -4270,7 +4353,7 @@ } oop -G1CollectedHeap::handle_evacuation_failure_par(OopsInHeapRegionClosure* cl, +G1CollectedHeap::handle_evacuation_failure_par(G1ParScanThreadState* _par_scan_state, oop old) { assert(obj_in_cs(old), err_msg("obj: "PTR_FORMAT" should still be in the CSet", @@ -4279,7 +4362,12 @@ oop forward_ptr = old->forward_to_atomic(old); if (forward_ptr == NULL) { // Forward-to-self succeeded. - + assert(_par_scan_state != NULL, "par scan state"); + OopsInHeapRegionClosure* cl = _par_scan_state->evac_failure_closure(); + uint queue_num = _par_scan_state->queue_num(); + + _evacuation_failed = true; + _evacuation_failed_info_array[queue_num].register_copy_failure(old->size()); if (_evac_failure_closure != cl) { MutexLockerEx x(EvacFailureStack_lock, Mutex::_no_safepoint_check_flag); assert(!_drain_in_progress, @@ -4310,8 +4398,6 @@ } void G1CollectedHeap::handle_evacuation_failure_common(oop old, markOop m) { - set_evacuation_failed(true); - preserve_mark_if_necessary(old, m); HeapRegion* r = heap_region_containing(old); @@ -4561,8 +4647,7 @@ if (obj_ptr == NULL) { // This will either forward-to-self, or detect that someone else has // installed a forwarding pointer. - OopsInHeapRegionClosure* cl = _par_scan_state->evac_failure_closure(); - return _g1->handle_evacuation_failure_par(cl, old); + return _g1->handle_evacuation_failure_par(_par_scan_state, old); } oop obj = oop(obj_ptr); @@ -5166,7 +5251,7 @@ // will be copied, the reference field set to point to the // new location, and the RSet updated. Otherwise we need to // use the the non-heap or metadata closures directly to copy - // the refernt object and update the pointer, while avoiding + // the referent object and update the pointer, while avoiding // updating the RSet. if (_g1h->is_in_g1_reserved(p)) { @@ -5334,7 +5419,7 @@ } }; -// Driver routine for parallel reference enqueing. +// Driver routine for parallel reference enqueueing. // Creates an instance of the ref enqueueing gang // task and has the worker threads execute it. @@ -5463,7 +5548,7 @@ // processor would have seen that the reference object had already // been 'discovered' and would have skipped discovering the reference, // but would not have treated the reference object as a regular oop. - // As a reult the copy closure would not have been applied to the + // As a result the copy closure would not have been applied to the // referent object. // // We need to explicitly copy these referent objects - the references @@ -5539,21 +5624,28 @@ // Setup the soft refs policy... rp->setup_policy(false); + ReferenceProcessorStats stats; if (!rp->processing_is_mt()) { // Serial reference processing... - rp->process_discovered_references(&is_alive, - &keep_alive, - &drain_queue, - NULL); + stats = rp->process_discovered_references(&is_alive, + &keep_alive, + &drain_queue, + NULL, + _gc_timer_stw); } else { // Parallel reference processing assert(rp->num_q() == no_of_gc_workers, "sanity"); assert(no_of_gc_workers <= rp->max_num_q(), "sanity"); G1STWRefProcTaskExecutor par_task_executor(this, workers(), _task_queues, no_of_gc_workers); - rp->process_discovered_references(&is_alive, &keep_alive, &drain_queue, &par_task_executor); - } - + stats = rp->process_discovered_references(&is_alive, + &keep_alive, + &drain_queue, + &par_task_executor, + _gc_timer_stw); + } + + _gc_tracer_stw->report_gc_reference_stats(stats); // We have completed copying any necessary live referent objects // (that were not copied during the actual pause) so we can // retire any active alloc buffers @@ -5577,7 +5669,7 @@ // Serial reference processing... rp->enqueue_discovered_references(); } else { - // Parallel reference enqueuing + // Parallel reference enqueueing assert(no_of_gc_workers == workers()->active_workers(), "Need to reset active workers"); @@ -5594,15 +5686,15 @@ // FIXME // CM's reference processing also cleans up the string and symbol tables. // Should we do that here also? We could, but it is a serial operation - // and could signicantly increase the pause time. + // and could significantly increase the pause time. double ref_enq_time = os::elapsedTime() - ref_enq_start; g1_policy()->phase_times()->record_ref_enq_time(ref_enq_time * 1000.0); } -void G1CollectedHeap::evacuate_collection_set() { +void G1CollectedHeap::evacuate_collection_set(EvacuationInfo& evacuation_info) { _expand_heap_after_alloc_failure = true; - set_evacuation_failed(false); + _evacuation_failed = false; // Should G1EvacuationFailureALot be in effect for this GC? NOT_PRODUCT(set_evacuation_failure_alot_for_current_gc();) @@ -5691,7 +5783,7 @@ JNIHandles::weak_oops_do(&is_alive, &keep_alive); } - release_gc_alloc_regions(n_workers); + release_gc_alloc_regions(n_workers, evacuation_info); g1_rem_set()->cleanup_after_oops_into_collection_set_do(); // Reset and re-enable the hot card cache. @@ -5714,7 +5806,7 @@ // Enqueue any remaining references remaining on the STW // reference processor's discovered lists. We need to do // this after the card table is cleaned (and verified) as - // the act of enqueuing entries on to the pending list + // the act of enqueueing entries on to the pending list // will log these updates (and dirty their associated // cards). We need these updates logged to update any // RSets. @@ -5942,7 +6034,7 @@ g1_policy()->phase_times()->record_clear_ct_time(elapsed * 1000.0); } -void G1CollectedHeap::free_collection_set(HeapRegion* cs_head) { +void G1CollectedHeap::free_collection_set(HeapRegion* cs_head, EvacuationInfo& evacuation_info) { size_t pre_used = 0; FreeRegionList local_free_list("Local List for CSet Freeing"); @@ -6028,10 +6120,12 @@ cur->set_evacuation_failed(false); // The region is now considered to be old. _old_set.add(cur); + evacuation_info.increment_collectionset_used_after(cur->used()); } cur = next; } + evacuation_info.set_regions_freed(local_free_list.length()); policy->record_max_rs_lengths(rs_lengths); policy->cset_regions_freed(); diff -r 16b10327b00d -r 90d6c221d4e5 src/share/vm/gc_implementation/g1/g1CollectedHeap.hpp --- a/src/share/vm/gc_implementation/g1/g1CollectedHeap.hpp Tue Jul 16 10:55:48 2013 -0400 +++ b/src/share/vm/gc_implementation/g1/g1CollectedHeap.hpp Tue Jul 16 12:20:08 2013 -0400 @@ -1,5 +1,5 @@ /* - * Copyright (c) 2001, 2012, Oracle and/or its affiliates. All rights reserved. + * Copyright (c) 2001, 2013, Oracle and/or its affiliates. All rights reserved. * DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER. * * This code is free software; you can redistribute it and/or modify it @@ -26,10 +26,12 @@ #define SHARE_VM_GC_IMPLEMENTATION_G1_G1COLLECTEDHEAP_HPP #include "gc_implementation/g1/concurrentMark.hpp" +#include "gc_implementation/g1/evacuationInfo.hpp" #include "gc_implementation/g1/g1AllocRegion.hpp" #include "gc_implementation/g1/g1HRPrinter.hpp" +#include "gc_implementation/g1/g1MonitoringSupport.hpp" #include "gc_implementation/g1/g1RemSet.hpp" -#include "gc_implementation/g1/g1MonitoringSupport.hpp" +#include "gc_implementation/g1/g1YCTypes.hpp" #include "gc_implementation/g1/heapRegionSeq.hpp" #include "gc_implementation/g1/heapRegionSets.hpp" #include "gc_implementation/shared/hSpaceCounters.hpp" @@ -61,7 +63,12 @@ class ConcurrentMark; class ConcurrentMarkThread; class ConcurrentG1Refine; +class ConcurrentGCTimer; class GenerationCounters; +class STWGCTimer; +class G1NewTracer; +class G1OldTracer; +class EvacuationFailedInfo; typedef OverflowTaskQueue RefToScanQueue; typedef GenericTaskQueueSet RefToScanQueueSet; @@ -160,7 +167,7 @@ // An instance is embedded into the G1CH and used as the // (optional) _is_alive_non_header closure in the STW // reference processor. It is also extensively used during -// refence processing during STW evacuation pauses. +// reference processing during STW evacuation pauses. class G1STWIsAliveClosure: public BoolObjectClosure { G1CollectedHeap* _g1; public: @@ -323,10 +330,10 @@ void release_mutator_alloc_region(); // It initializes the GC alloc regions at the start of a GC. - void init_gc_alloc_regions(); + void init_gc_alloc_regions(EvacuationInfo& evacuation_info); // It releases the GC alloc regions at the end of a GC. - void release_gc_alloc_regions(uint no_of_gc_workers); + void release_gc_alloc_regions(uint no_of_gc_workers, EvacuationInfo& evacuation_info); // It does any cleanup that needs to be done on the GC alloc regions // before a Full GC. @@ -389,6 +396,8 @@ // concurrent cycles) we have completed. volatile unsigned int _old_marking_cycles_completed; + bool _concurrent_cycle_started; + // This is a non-product method that is helpful for testing. It is // called at the end of a GC and artificially expands the heap by // allocating a number of dead regions. This way we can induce very @@ -734,6 +743,12 @@ return _old_marking_cycles_completed; } + void register_concurrent_cycle_start(jlong start_time); + void register_concurrent_cycle_end(); + void trace_heap_after_concurrent_cycle(); + + G1YCType yc_type(); + G1HRPrinter* hr_printer() { return &_hr_printer; } protected: @@ -769,7 +784,7 @@ bool do_collection_pause_at_safepoint(double target_pause_time_ms); // Actually do the work of evacuating the collection set. - void evacuate_collection_set(); + void evacuate_collection_set(EvacuationInfo& evacuation_info); // The g1 remembered set of the heap. G1RemSet* _g1_rem_set; @@ -794,7 +809,7 @@ // After a collection pause, make the regions in the CS into free // regions. - void free_collection_set(HeapRegion* cs_head); + void free_collection_set(HeapRegion* cs_head, EvacuationInfo& evacuation_info); // Abandon the current collection set without recording policy // statistics or updating free lists. @@ -863,9 +878,7 @@ // True iff a evacuation has failed in the current collection. bool _evacuation_failed; - // Set the attribute indicating whether evacuation has failed in the - // current collection. - void set_evacuation_failed(bool b) { _evacuation_failed = b; } + EvacuationFailedInfo* _evacuation_failed_info_array; // Failed evacuations cause some logical from-space objects to have // forwarding pointers to themselves. Reset them. @@ -907,7 +920,7 @@ void finalize_for_evac_failure(); // An attempt to evacuate "obj" has failed; take necessary steps. - oop handle_evacuation_failure_par(OopsInHeapRegionClosure* cl, oop obj); + oop handle_evacuation_failure_par(G1ParScanThreadState* _par_scan_state, oop obj); void handle_evacuation_failure_common(oop obj, markOop m); #ifndef PRODUCT @@ -939,13 +952,13 @@ inline bool evacuation_should_fail(); // Reset the G1EvacuationFailureALot counters. Should be called at - // the end of an evacuation pause in which an evacuation failure ocurred. + // the end of an evacuation pause in which an evacuation failure occurred. inline void reset_evacuation_should_fail(); #endif // !PRODUCT // ("Weak") Reference processing support. // - // G1 has 2 instances of the referece processor class. One + // G1 has 2 instances of the reference processor class. One // (_ref_processor_cm) handles reference object discovery // and subsequent processing during concurrent marking cycles. // @@ -995,6 +1008,12 @@ // The (stw) reference processor... ReferenceProcessor* _ref_processor_stw; + STWGCTimer* _gc_timer_stw; + ConcurrentGCTimer* _gc_timer_cm; + + G1OldTracer* _gc_tracer_cm; + G1NewTracer* _gc_tracer_stw; + // During reference object discovery, the _is_alive_non_header // closure (if non-null) is applied to the referent object to // determine whether the referent is live. If so then the @@ -1140,9 +1159,12 @@ // The STW reference processor.... ReferenceProcessor* ref_processor_stw() const { return _ref_processor_stw; } - // The Concurent Marking reference processor... + // The Concurrent Marking reference processor... ReferenceProcessor* ref_processor_cm() const { return _ref_processor_cm; } + ConcurrentGCTimer* gc_timer_cm() const { return _gc_timer_cm; } + G1OldTracer* gc_tracer_cm() const { return _gc_tracer_cm; } + virtual size_t capacity() const; virtual size_t used() const; // This should be called when we're not holding the heap lock. The @@ -1200,7 +1222,7 @@ // verify_region_sets_optional() is planted in the code for // list verification in non-product builds (and it can be enabled in - // product builds by definning HEAP_REGION_SET_FORCE_VERIFY to be 1). + // product builds by defining HEAP_REGION_SET_FORCE_VERIFY to be 1). #if HEAP_REGION_SET_FORCE_VERIFY void verify_region_sets_optional() { verify_region_sets(); @@ -1266,7 +1288,7 @@ // The same as above but assume that the caller holds the Heap_lock. void collect_locked(GCCause::Cause cause); - // True iff a evacuation has failed in the most-recent collection. + // True iff an evacuation has failed in the most-recent collection. bool evacuation_failed() { return _evacuation_failed; } // It will free a region if it has allocated objects in it that are @@ -1338,11 +1360,6 @@ object_iterate(cl); } - // Iterate over all objects allocated since the last collection, calling - // "cl.do_object" on each. The heap must have been initialized properly - // to support this function, or else this call will fail. - virtual void object_iterate_since_last_GC(ObjectClosure* cl); - // Iterate over all spaces in use in the heap, in ascending address order. virtual void space_iterate(SpaceClosure* cl); @@ -1554,6 +1571,7 @@ // Override; it uses the "prev" marking information virtual void verify(bool silent); + virtual void print_on(outputStream* st) const; virtual void print_extended_on(outputStream* st) const; virtual void print_on_error(outputStream* st) const; @@ -1839,7 +1857,7 @@ G1ParScanHeapEvacClosure* _evac_cl; G1ParScanPartialArrayClosure* _partial_scan_cl; - int _hash_seed; + int _hash_seed; uint _queue_num; size_t _term_attempts; diff -r 16b10327b00d -r 90d6c221d4e5 src/share/vm/gc_implementation/g1/g1CollectorPolicy.cpp --- a/src/share/vm/gc_implementation/g1/g1CollectorPolicy.cpp Tue Jul 16 10:55:48 2013 -0400 +++ b/src/share/vm/gc_implementation/g1/g1CollectorPolicy.cpp Tue Jul 16 12:20:08 2013 -0400 @@ -909,7 +909,7 @@ // Anything below that is considered to be zero #define MIN_TIMER_GRANULARITY 0.0000001 -void G1CollectorPolicy::record_collection_pause_end(double pause_time_ms) { +void G1CollectorPolicy::record_collection_pause_end(double pause_time_ms, EvacuationInfo& evacuation_info) { double end_time_sec = os::elapsedTime(); assert(_cur_collection_pause_used_regions_at_start >= cset_region_length(), "otherwise, the subtraction below does not make sense"); @@ -941,6 +941,9 @@ _mmu_tracker->add_pause(end_time_sec - pause_time_ms/1000.0, end_time_sec, false); + evacuation_info.set_collectionset_used_before(_collection_set_bytes_used_before); + evacuation_info.set_bytes_copied(_bytes_copied_during_gc); + if (update_stats) { _trace_gen0_time_data.record_end_collection(pause_time_ms, phase_times()); // this is where we update the allocation rate of the application @@ -1896,7 +1899,7 @@ } -void G1CollectorPolicy::finalize_cset(double target_pause_time_ms) { +void G1CollectorPolicy::finalize_cset(double target_pause_time_ms, EvacuationInfo& evacuation_info) { double young_start_time_sec = os::elapsedTime(); YoungList* young_list = _g1->young_list(); @@ -2102,6 +2105,7 @@ double non_young_end_time_sec = os::elapsedTime(); phase_times()->record_non_young_cset_choice_time_ms((non_young_end_time_sec - non_young_start_time_sec) * 1000.0); + evacuation_info.set_collectionset_regions(cset_region_length()); } void TraceGen0TimeData::record_start_collection(double time_to_stop_the_world_ms) { diff -r 16b10327b00d -r 90d6c221d4e5 src/share/vm/gc_implementation/g1/g1CollectorPolicy.hpp --- a/src/share/vm/gc_implementation/g1/g1CollectorPolicy.hpp Tue Jul 16 10:55:48 2013 -0400 +++ b/src/share/vm/gc_implementation/g1/g1CollectorPolicy.hpp Tue Jul 16 12:20:08 2013 -0400 @@ -671,7 +671,7 @@ // Record the start and end of an evacuation pause. void record_collection_pause_start(double start_time_sec); - void record_collection_pause_end(double pause_time_ms); + void record_collection_pause_end(double pause_time_ms, EvacuationInfo& evacuation_info); // Record the start and end of a full collection. void record_full_collection_start(); @@ -720,7 +720,7 @@ // Choose a new collection set. Marks the chosen regions as being // "in_collection_set", and links them together. The head and number of // the collection set are available via access methods. - void finalize_cset(double target_pause_time_ms); + void finalize_cset(double target_pause_time_ms, EvacuationInfo& evacuation_info); // The head of the list (via "next_in_collection_set()") representing the // current collection set. @@ -879,6 +879,7 @@ ageTable _survivors_age_table; public: + uint tenuring_threshold() const { return _tenuring_threshold; } inline GCAllocPurpose evacuation_destination(HeapRegion* src_region, uint age, size_t word_sz) { diff -r 16b10327b00d -r 90d6c221d4e5 src/share/vm/gc_implementation/g1/g1GCPhaseTimes.hpp --- a/src/share/vm/gc_implementation/g1/g1GCPhaseTimes.hpp Tue Jul 16 10:55:48 2013 -0400 +++ b/src/share/vm/gc_implementation/g1/g1GCPhaseTimes.hpp Tue Jul 16 12:20:08 2013 -0400 @@ -38,7 +38,7 @@ NOT_PRODUCT(static const T _uninitialized;) // We are caching the sum and average to only have to calculate them once. - // This is not done in an MT-safe way. It is intetened to allow single + // This is not done in an MT-safe way. It is intended to allow single // threaded code to call sum() and average() multiple times in any order // without having to worry about the cost. bool _has_new_data; diff -r 16b10327b00d -r 90d6c221d4e5 src/share/vm/gc_implementation/g1/g1MarkSweep.cpp --- a/src/share/vm/gc_implementation/g1/g1MarkSweep.cpp Tue Jul 16 10:55:48 2013 -0400 +++ b/src/share/vm/gc_implementation/g1/g1MarkSweep.cpp Tue Jul 16 12:20:08 2013 -0400 @@ -1,5 +1,5 @@ /* - * Copyright (c) 2001, 2012, Oracle and/or its affiliates. All rights reserved. + * Copyright (c) 2001, 2013, Oracle and/or its affiliates. All rights reserved. * DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER. * * This code is free software; you can redistribute it and/or modify it @@ -31,6 +31,10 @@ #include "code/icBuffer.hpp" #include "gc_implementation/g1/g1Log.hpp" #include "gc_implementation/g1/g1MarkSweep.hpp" +#include "gc_implementation/shared/gcHeapSummary.hpp" +#include "gc_implementation/shared/gcTimer.hpp" +#include "gc_implementation/shared/gcTrace.hpp" +#include "gc_implementation/shared/gcTraceTime.hpp" #include "memory/gcLocker.hpp" #include "memory/genCollectedHeap.hpp" #include "memory/modRefBarrierSet.hpp" @@ -39,7 +43,6 @@ #include "oops/instanceRefKlass.hpp" #include "oops/oop.inline.hpp" #include "prims/jvmtiExport.hpp" -#include "runtime/aprofiler.hpp" #include "runtime/biasedLocking.hpp" #include "runtime/fprofiler.hpp" #include "runtime/synchronizer.hpp" @@ -119,7 +122,7 @@ void G1MarkSweep::mark_sweep_phase1(bool& marked_for_unloading, bool clear_all_softrefs) { // Recursively traverse all live objects and mark them - TraceTime tm("phase 1", G1Log::fine() && Verbose, true, gclog_or_tty); + GCTraceTime tm("phase 1", G1Log::fine() && Verbose, true, gc_timer()); GenMarkSweep::trace(" 1"); SharedHeap* sh = SharedHeap::heap(); @@ -139,10 +142,13 @@ assert(rp == G1CollectedHeap::heap()->ref_processor_stw(), "Sanity"); rp->setup_policy(clear_all_softrefs); - rp->process_discovered_references(&GenMarkSweep::is_alive, - &GenMarkSweep::keep_alive, - &GenMarkSweep::follow_stack_closure, - NULL); + const ReferenceProcessorStats& stats = + rp->process_discovered_references(&GenMarkSweep::is_alive, + &GenMarkSweep::keep_alive, + &GenMarkSweep::follow_stack_closure, + NULL, + gc_timer()); + gc_tracer()->report_gc_reference_stats(stats); // This is the point where the entire marking should have completed. @@ -185,6 +191,8 @@ gclog_or_tty->print_cr("]"); } } + + gc_tracer()->report_object_count_after_gc(&GenMarkSweep::is_alive); } class G1PrepareCompactClosure: public HeapRegionClosure { @@ -257,7 +265,7 @@ G1CollectedHeap* g1h = G1CollectedHeap::heap(); - TraceTime tm("phase 2", G1Log::fine() && Verbose, true, gclog_or_tty); + GCTraceTime tm("phase 2", G1Log::fine() && Verbose, true, gc_timer()); GenMarkSweep::trace("2"); // find the first region @@ -294,7 +302,7 @@ G1CollectedHeap* g1h = G1CollectedHeap::heap(); // Adjust the pointers to reflect the new locations - TraceTime tm("phase 3", G1Log::fine() && Verbose, true, gclog_or_tty); + GCTraceTime tm("phase 3", G1Log::fine() && Verbose, true, gc_timer()); GenMarkSweep::trace("3"); SharedHeap* sh = SharedHeap::heap(); @@ -353,7 +361,7 @@ // to use a higher index (saved from phase2) when verifying perm_gen. G1CollectedHeap* g1h = G1CollectedHeap::heap(); - TraceTime tm("phase 4", G1Log::fine() && Verbose, true, gclog_or_tty); + GCTraceTime tm("phase 4", G1Log::fine() && Verbose, true, gc_timer()); GenMarkSweep::trace("4"); G1SpaceCompactClosure blk; diff -r 16b10327b00d -r 90d6c221d4e5 src/share/vm/gc_implementation/g1/g1MarkSweep.hpp --- a/src/share/vm/gc_implementation/g1/g1MarkSweep.hpp Tue Jul 16 10:55:48 2013 -0400 +++ b/src/share/vm/gc_implementation/g1/g1MarkSweep.hpp Tue Jul 16 12:20:08 2013 -0400 @@ -1,5 +1,5 @@ /* - * Copyright (c) 2001, 2010, Oracle and/or its affiliates. All rights reserved. + * Copyright (c) 2001, 2013, Oracle and/or its affiliates. All rights reserved. * DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER. * * This code is free software; you can redistribute it and/or modify it @@ -54,6 +54,9 @@ static void invoke_at_safepoint(ReferenceProcessor* rp, bool clear_all_softrefs); + static STWGCTimer* gc_timer() { return GenMarkSweep::_gc_timer; } + static SerialOldTracer* gc_tracer() { return GenMarkSweep::_gc_tracer; } + private: // Mark live objects diff -r 16b10327b00d -r 90d6c221d4e5 src/share/vm/gc_implementation/g1/g1MonitoringSupport.hpp --- a/src/share/vm/gc_implementation/g1/g1MonitoringSupport.hpp Tue Jul 16 10:55:48 2013 -0400 +++ b/src/share/vm/gc_implementation/g1/g1MonitoringSupport.hpp Tue Jul 16 12:20:08 2013 -0400 @@ -1,5 +1,5 @@ /* - * Copyright (c) 2011, 2012, Oracle and/or its affiliates. All rights reserved. + * Copyright (c) 2011, 2013, Oracle and/or its affiliates. All rights reserved. * DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER. * * This code is free software; you can redistribute it and/or modify it @@ -224,6 +224,7 @@ // Monitoring support used by // MemoryService // jstat counters + // Tracing size_t overall_reserved() { return _overall_reserved; } size_t overall_committed() { return _overall_committed; } diff -r 16b10327b00d -r 90d6c221d4e5 src/share/vm/gc_implementation/g1/g1SATBCardTableModRefBS.cpp --- a/src/share/vm/gc_implementation/g1/g1SATBCardTableModRefBS.cpp Tue Jul 16 10:55:48 2013 -0400 +++ b/src/share/vm/gc_implementation/g1/g1SATBCardTableModRefBS.cpp Tue Jul 16 12:20:08 2013 -0400 @@ -47,7 +47,7 @@ JavaThread* jt = (JavaThread*)thr; jt->satb_mark_queue().enqueue(pre_val); } else { - MutexLocker x(Shared_SATB_Q_lock); + MutexLockerEx x(Shared_SATB_Q_lock, Mutex::_no_safepoint_check_flag); JavaThread::satb_mark_queue_set().shared_satb_queue()->enqueue(pre_val); } } diff -r 16b10327b00d -r 90d6c221d4e5 src/share/vm/gc_implementation/g1/g1YCTypes.hpp --- /dev/null Thu Jan 01 00:00:00 1970 +0000 +++ b/src/share/vm/gc_implementation/g1/g1YCTypes.hpp Tue Jul 16 12:20:08 2013 -0400 @@ -0,0 +1,51 @@ +/* + * Copyright (c) 2012, 2013, Oracle and/or its affiliates. All rights reserved. + * DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER. + * + * This code is free software; you can redistribute it and/or modify it + * under the terms of the GNU General Public License version 2 only, as + * published by the Free Software Foundation. + * + * This code is distributed in the hope that it will be useful, but WITHOUT + * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or + * FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License + * version 2 for more details (a copy is included in the LICENSE file that + * accompanied this code). + * + * You should have received a copy of the GNU General Public License version + * 2 along with this work; if not, write to the Free Software Foundation, + * Inc., 51 Franklin St, Fifth Floor, Boston, MA 02110-1301 USA. + * + * Please contact Oracle, 500 Oracle Parkway, Redwood Shores, CA 94065 USA + * or visit www.oracle.com if you need additional information or have any + * questions. + * + */ + +#ifndef SHARE_VM_GC_IMPLEMENTATION_G1_G1YCTYPES_HPP +#define SHARE_VM_GC_IMPLEMENTATION_G1_G1YCTYPES_HPP + +#include "utilities/debug.hpp" + +enum G1YCType { + Normal, + InitialMark, + DuringMark, + Mixed, + G1YCTypeEndSentinel +}; + +class G1YCTypeHelper { + public: + static const char* to_string(G1YCType type) { + switch(type) { + case Normal: return "Normal"; + case InitialMark: return "Initial Mark"; + case DuringMark: return "During Mark"; + case Mixed: return "Mixed"; + default: ShouldNotReachHere(); return NULL; + } + } +}; + +#endif // SHARE_VM_GC_IMPLEMENTATION_G1_G1YCTYPES_HPP diff -r 16b10327b00d -r 90d6c221d4e5 src/share/vm/gc_implementation/g1/heapRegion.cpp --- a/src/share/vm/gc_implementation/g1/heapRegion.cpp Tue Jul 16 10:55:48 2013 -0400 +++ b/src/share/vm/gc_implementation/g1/heapRegion.cpp Tue Jul 16 12:20:08 2013 -0400 @@ -798,7 +798,7 @@ if (!g1->is_obj_dead_cond(obj, this, vo)) { if (obj->is_oop()) { Klass* klass = obj->klass(); - if (!klass->is_metadata()) { + if (!klass->is_metaspace_object()) { gclog_or_tty->print_cr("klass "PTR_FORMAT" of object "PTR_FORMAT" " "not metadata", klass, obj); *failures = true; diff -r 16b10327b00d -r 90d6c221d4e5 src/share/vm/gc_implementation/g1/vm_operations_g1.cpp --- a/src/share/vm/gc_implementation/g1/vm_operations_g1.cpp Tue Jul 16 10:55:48 2013 -0400 +++ b/src/share/vm/gc_implementation/g1/vm_operations_g1.cpp Tue Jul 16 12:20:08 2013 -0400 @@ -1,5 +1,5 @@ /* - * Copyright (c) 2001, 2012, Oracle and/or its affiliates. All rights reserved. + * Copyright (c) 2001, 2013, Oracle and/or its affiliates. All rights reserved. * DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER. * * This code is free software; you can redistribute it and/or modify it @@ -28,6 +28,8 @@ #include "gc_implementation/g1/g1CollectorPolicy.hpp" #include "gc_implementation/g1/g1Log.hpp" #include "gc_implementation/g1/vm_operations_g1.hpp" +#include "gc_implementation/shared/gcTimer.hpp" +#include "gc_implementation/shared/gcTraceTime.hpp" #include "gc_implementation/shared/isGCActiveMark.hpp" #include "gc_implementation/g1/vm_operations_g1.hpp" #include "runtime/interfaceSupport.hpp" @@ -227,7 +229,7 @@ void VM_CGC_Operation::doit() { gclog_or_tty->date_stamp(G1Log::fine() && PrintGCDateStamps); TraceCPUTime tcpu(G1Log::finer(), true, gclog_or_tty); - TraceTime t(_printGCMessage, G1Log::fine(), true, gclog_or_tty); + GCTraceTime t(_printGCMessage, G1Log::fine(), true, G1CollectedHeap::heap()->gc_timer_cm()); SharedHeap* sh = SharedHeap::heap(); // This could go away if CollectedHeap gave access to _gc_is_active... if (sh != NULL) { diff -r 16b10327b00d -r 90d6c221d4e5 src/share/vm/gc_implementation/parNew/parNewGeneration.cpp --- a/src/share/vm/gc_implementation/parNew/parNewGeneration.cpp Tue Jul 16 10:55:48 2013 -0400 +++ b/src/share/vm/gc_implementation/parNew/parNewGeneration.cpp Tue Jul 16 12:20:08 2013 -0400 @@ -1,5 +1,5 @@ /* - * Copyright (c) 2001, 2012, Oracle and/or its affiliates. All rights reserved. + * Copyright (c) 2001, 2013, Oracle and/or its affiliates. All rights reserved. * DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER. * * This code is free software; you can redistribute it and/or modify it @@ -29,6 +29,11 @@ #include "gc_implementation/shared/adaptiveSizePolicy.hpp" #include "gc_implementation/shared/ageTable.hpp" #include "gc_implementation/shared/parGCAllocBuffer.hpp" +#include "gc_implementation/shared/gcHeapSummary.hpp" +#include "gc_implementation/shared/gcTimer.hpp" +#include "gc_implementation/shared/gcTrace.hpp" +#include "gc_implementation/shared/gcTraceTime.hpp" +#include "gc_implementation/shared/copyFailedInfo.hpp" #include "gc_implementation/shared/spaceDecorator.hpp" #include "memory/defNewGeneration.inline.hpp" #include "memory/genCollectedHeap.hpp" @@ -75,7 +80,6 @@ work_queue_set_, &term_), _is_alive_closure(gen_), _scan_weak_ref_closure(gen_, this), _keep_alive_closure(&_scan_weak_ref_closure), - _promotion_failure_size(0), _strong_roots_time(0.0), _term_time(0.0) { #if TASKQUEUE_STATS @@ -279,13 +283,10 @@ } } -void ParScanThreadState::print_and_clear_promotion_failure_size() { - if (_promotion_failure_size != 0) { - if (PrintPromotionFailure) { - gclog_or_tty->print(" (%d: promotion failure size = " SIZE_FORMAT ") ", - _thread_num, _promotion_failure_size); - } - _promotion_failure_size = 0; +void ParScanThreadState::print_promotion_failure_size() { + if (_promotion_failed_info.has_failed() && PrintPromotionFailure) { + gclog_or_tty->print(" (%d: promotion failure size = " SIZE_FORMAT ") ", + _thread_num, _promotion_failed_info.first_size()); } } @@ -305,6 +306,7 @@ inline ParScanThreadState& thread_state(int i); + void trace_promotion_failed(YoungGCTracer& gc_tracer); void reset(int active_workers, bool promotion_failed); void flush(); @@ -353,13 +355,21 @@ return ((ParScanThreadState*)_data)[i]; } +void ParScanThreadStateSet::trace_promotion_failed(YoungGCTracer& gc_tracer) { + for (int i = 0; i < length(); ++i) { + if (thread_state(i).promotion_failed()) { + gc_tracer.report_promotion_failed(thread_state(i).promotion_failed_info()); + thread_state(i).promotion_failed_info().reset(); + } + } +} void ParScanThreadStateSet::reset(int active_threads, bool promotion_failed) { _term.reset_for_reuse(active_threads); if (promotion_failed) { for (int i = 0; i < length(); ++i) { - thread_state(i).print_and_clear_promotion_failure_size(); + thread_state(i).print_promotion_failure_size(); } } } @@ -583,14 +593,6 @@ gch->set_n_termination(active_workers); } -// The "i" passed to this method is the part of the work for -// this thread. It is not the worker ID. The "i" is derived -// from _started_workers which is incremented in internal_note_start() -// called in GangWorker loop() and which is called under the -// which is called under the protection of the gang monitor and is -// called after a task is started. So "i" is based on -// first-come-first-served. - void ParNewGenTask::work(uint worker_id) { GenCollectedHeap* gch = GenCollectedHeap::heap(); // Since this is being done in a separate thread, need new resource @@ -876,16 +878,45 @@ } +// A Generation that does parallel young-gen collection. + bool ParNewGeneration::_avoid_promotion_undo = false; -// A Generation that does parallel young-gen collection. +void ParNewGeneration::handle_promotion_failed(GenCollectedHeap* gch, ParScanThreadStateSet& thread_state_set, ParNewTracer& gc_tracer) { + assert(_promo_failure_scan_stack.is_empty(), "post condition"); + _promo_failure_scan_stack.clear(true); // Clear cached segments. + + remove_forwarding_pointers(); + if (PrintGCDetails) { + gclog_or_tty->print(" (promotion failed)"); + } + // All the spaces are in play for mark-sweep. + swap_spaces(); // Make life simpler for CMS || rescan; see 6483690. + from()->set_next_compaction_space(to()); + gch->set_incremental_collection_failed(); + // Inform the next generation that a promotion failure occurred. + _next_gen->promotion_failure_occurred(); + + // Trace promotion failure in the parallel GC threads + thread_state_set.trace_promotion_failed(gc_tracer); + // Single threaded code may have reported promotion failure to the global state + if (_promotion_failed_info.has_failed()) { + gc_tracer.report_promotion_failed(_promotion_failed_info); + } + // Reset the PromotionFailureALot counters. + NOT_PRODUCT(Universe::heap()->reset_promotion_should_fail();) +} void ParNewGeneration::collect(bool full, bool clear_all_soft_refs, size_t size, bool is_tlab) { assert(full || size > 0, "otherwise we don't want to collect"); + GenCollectedHeap* gch = GenCollectedHeap::heap(); + + _gc_timer->register_gc_start(os::elapsed_counter()); + assert(gch->kind() == CollectedHeap::GenCollectedHeap, "not a CMS generational heap"); AdaptiveSizePolicy* size_policy = gch->gen_policy()->size_policy(); @@ -906,7 +937,7 @@ set_avoid_promotion_undo(true); } - // If the next generation is too full to accomodate worst-case promotion + // If the next generation is too full to accommodate worst-case promotion // from this generation, pass on collection; let the next generation // do it. if (!collection_attempt_is_safe()) { @@ -915,6 +946,10 @@ } assert(to()->is_empty(), "Else not collection_attempt_is_safe"); + ParNewTracer gc_tracer; + gc_tracer.report_gc_start(gch->gc_cause(), _gc_timer->gc_start()); + gch->trace_heap_before_gc(&gc_tracer); + init_assuming_no_promotion_failure(); if (UseAdaptiveSizePolicy) { @@ -922,7 +957,7 @@ size_policy->minor_collection_begin(); } - TraceTime t1(GCCauseString("GC", gch->gc_cause()), PrintGC && !PrintGCDetails, true, gclog_or_tty); + GCTraceTime t1(GCCauseString("GC", gch->gc_cause()), PrintGC && !PrintGCDetails, true, NULL); // Capture heap used before collection (for printing). size_t gch_prev_used = gch->used(); @@ -975,17 +1010,21 @@ rp->setup_policy(clear_all_soft_refs); // Can the mt_degree be set later (at run_task() time would be best)? rp->set_active_mt_degree(active_workers); + ReferenceProcessorStats stats; if (rp->processing_is_mt()) { ParNewRefProcTaskExecutor task_executor(*this, thread_state_set); - rp->process_discovered_references(&is_alive, &keep_alive, - &evacuate_followers, &task_executor); + stats = rp->process_discovered_references(&is_alive, &keep_alive, + &evacuate_followers, &task_executor, + _gc_timer); } else { thread_state_set.flush(); gch->set_par_threads(0); // 0 ==> non-parallel. gch->save_marks(); - rp->process_discovered_references(&is_alive, &keep_alive, - &evacuate_followers, NULL); + stats = rp->process_discovered_references(&is_alive, &keep_alive, + &evacuate_followers, NULL, + _gc_timer); } + gc_tracer.report_gc_reference_stats(stats); if (!promotion_failed()) { // Swap the survivor spaces. eden()->clear(SpaceDecorator::Mangle); @@ -1010,22 +1049,7 @@ adjust_desired_tenuring_threshold(); } else { - assert(_promo_failure_scan_stack.is_empty(), "post condition"); - _promo_failure_scan_stack.clear(true); // Clear cached segments. - - remove_forwarding_pointers(); - if (PrintGCDetails) { - gclog_or_tty->print(" (promotion failed)"); - } - // All the spaces are in play for mark-sweep. - swap_spaces(); // Make life simpler for CMS || rescan; see 6483690. - from()->set_next_compaction_space(to()); - gch->set_incremental_collection_failed(); - // Inform the next generation that a promotion failure occurred. - _next_gen->promotion_failure_occurred(); - - // Reset the PromotionFailureALot counters. - NOT_PRODUCT(Universe::heap()->reset_promotion_should_fail();) + handle_promotion_failed(gch, thread_state_set, gc_tracer); } // set new iteration safe limit for the survivor spaces from()->set_concurrent_iteration_safe_limit(from()->top()); @@ -1065,6 +1089,13 @@ rp->enqueue_discovered_references(NULL); } rp->verify_no_references_recorded(); + + gch->trace_heap_after_gc(&gc_tracer); + gc_tracer.report_tenuring_threshold(tenuring_threshold()); + + _gc_timer->register_gc_end(os::elapsed_counter()); + + gc_tracer.report_gc_end(_gc_timer->gc_end(), _gc_timer->time_partitions()); } static int sum; @@ -1174,8 +1205,7 @@ new_obj = old; preserve_mark_if_necessary(old, m); - // Log the size of the maiden promotion failure - par_scan_state->log_promotion_failure(sz); + par_scan_state->register_promotion_failure(sz); } old->forward_to(new_obj); @@ -1300,8 +1330,7 @@ failed_to_promote = true; preserve_mark_if_necessary(old, m); - // Log the size of the maiden promotion failure - par_scan_state->log_promotion_failure(sz); + par_scan_state->register_promotion_failure(sz); } } else { // Is in to-space; do copying ourselves. @@ -1599,8 +1628,7 @@ } #undef BUSY -void ParNewGeneration::ref_processor_init() -{ +void ParNewGeneration::ref_processor_init() { if (_ref_processor == NULL) { // Allocate and initialize a reference processor _ref_processor = diff -r 16b10327b00d -r 90d6c221d4e5 src/share/vm/gc_implementation/parNew/parNewGeneration.hpp --- a/src/share/vm/gc_implementation/parNew/parNewGeneration.hpp Tue Jul 16 10:55:48 2013 -0400 +++ b/src/share/vm/gc_implementation/parNew/parNewGeneration.hpp Tue Jul 16 12:20:08 2013 -0400 @@ -25,7 +25,9 @@ #ifndef SHARE_VM_GC_IMPLEMENTATION_PARNEW_PARNEWGENERATION_HPP #define SHARE_VM_GC_IMPLEMENTATION_PARNEW_PARNEWGENERATION_HPP +#include "gc_implementation/shared/gcTrace.hpp" #include "gc_implementation/shared/parGCAllocBuffer.hpp" +#include "gc_implementation/shared/copyFailedInfo.hpp" #include "memory/defNewGeneration.hpp" #include "utilities/taskqueue.hpp" @@ -105,7 +107,7 @@ #endif // TASKQUEUE_STATS // Stats for promotion failure - size_t _promotion_failure_size; + PromotionFailedInfo _promotion_failed_info; // Timing numbers. double _start; @@ -180,13 +182,16 @@ void undo_alloc_in_to_space(HeapWord* obj, size_t word_sz); // Promotion failure stats - size_t promotion_failure_size() { return promotion_failure_size(); } - void log_promotion_failure(size_t sz) { - if (_promotion_failure_size == 0) { - _promotion_failure_size = sz; - } + void register_promotion_failure(size_t sz) { + _promotion_failed_info.register_copy_failure(sz); } - void print_and_clear_promotion_failure_size(); + PromotionFailedInfo& promotion_failed_info() { + return _promotion_failed_info; + } + bool promotion_failed() { + return _promotion_failed_info.has_failed(); + } + void print_promotion_failure_size(); #if TASKQUEUE_STATS TaskQueueStats & taskqueue_stats() const { return _work_queue->stats; } @@ -337,6 +342,8 @@ // word being overwritten with a self-forwarding-pointer. void preserve_mark_if_necessary(oop obj, markOop m); + void handle_promotion_failed(GenCollectedHeap* gch, ParScanThreadStateSet& thread_state_set, ParNewTracer& gc_tracer); + protected: bool _survivor_overflow; diff -r 16b10327b00d -r 90d6c221d4e5 src/share/vm/gc_implementation/parallelScavenge/asPSOldGen.cpp --- a/src/share/vm/gc_implementation/parallelScavenge/asPSOldGen.cpp Tue Jul 16 10:55:48 2013 -0400 +++ b/src/share/vm/gc_implementation/parallelScavenge/asPSOldGen.cpp Tue Jul 16 12:20:08 2013 -0400 @@ -70,6 +70,17 @@ _virtual_space = vs; } +void ASPSOldGen::initialize_work(const char* perf_data_name, int level) { + + PSOldGen::initialize_work(perf_data_name, level); + + // The old gen can grow to gen_size_limit(). _reserve reflects only + // the current maximum that can be committed. + assert(_reserved.byte_size() <= gen_size_limit(), "Consistency check"); + + initialize_performance_counters(perf_data_name, level); +} + void ASPSOldGen::reset_after_change() { _reserved = MemRegion((HeapWord*)virtual_space()->low_boundary(), (HeapWord*)virtual_space()->high_boundary()); diff -r 16b10327b00d -r 90d6c221d4e5 src/share/vm/gc_implementation/parallelScavenge/asPSOldGen.hpp --- a/src/share/vm/gc_implementation/parallelScavenge/asPSOldGen.hpp Tue Jul 16 10:55:48 2013 -0400 +++ b/src/share/vm/gc_implementation/parallelScavenge/asPSOldGen.hpp Tue Jul 16 12:20:08 2013 -0400 @@ -50,6 +50,8 @@ size_t max_gen_size() { return _reserved.byte_size(); } void set_gen_size_limit(size_t v) { _gen_size_limit = v; } + virtual void initialize_work(const char* perf_data_name, int level); + // After a shrink or expand reset the generation void reset_after_change(); diff -r 16b10327b00d -r 90d6c221d4e5 src/share/vm/gc_implementation/parallelScavenge/cardTableExtension.cpp --- a/src/share/vm/gc_implementation/parallelScavenge/cardTableExtension.cpp Tue Jul 16 10:55:48 2013 -0400 +++ b/src/share/vm/gc_implementation/parallelScavenge/cardTableExtension.cpp Tue Jul 16 12:20:08 2013 -0400 @@ -1,5 +1,5 @@ /* - * Copyright (c) 2001, 2012, Oracle and/or its affiliates. All rights reserved. + * Copyright (c) 2001, 2013, Oracle and/or its affiliates. All rights reserved. * DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER. * * This code is free software; you can redistribute it and/or modify it @@ -565,11 +565,9 @@ if(new_start_aligned < new_end_for_commit) { MemRegion new_committed = MemRegion(new_start_aligned, new_end_for_commit); - if (!os::commit_memory((char*)new_committed.start(), - new_committed.byte_size())) { - vm_exit_out_of_memory(new_committed.byte_size(), OOM_MMAP_ERROR, - "card table expansion"); - } + os::commit_memory_or_exit((char*)new_committed.start(), + new_committed.byte_size(), !ExecMem, + "card table expansion"); } result = true; } else if (new_start_aligned > cur_committed.start()) { diff -r 16b10327b00d -r 90d6c221d4e5 src/share/vm/gc_implementation/parallelScavenge/parallelScavengeHeap.cpp --- a/src/share/vm/gc_implementation/parallelScavenge/parallelScavengeHeap.cpp Tue Jul 16 10:55:48 2013 -0400 +++ b/src/share/vm/gc_implementation/parallelScavenge/parallelScavengeHeap.cpp Tue Jul 16 12:20:08 2013 -0400 @@ -1,5 +1,5 @@ /* - * Copyright (c) 2001, 2012, Oracle and/or its affiliates. All rights reserved. + * Copyright (c) 2001, 2013, Oracle and/or its affiliates. All rights reserved. * DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER. * * This code is free software; you can redistribute it and/or modify it @@ -35,6 +35,8 @@ #include "gc_implementation/parallelScavenge/psPromotionManager.hpp" #include "gc_implementation/parallelScavenge/psScavenge.hpp" #include "gc_implementation/parallelScavenge/vmPSOperations.hpp" +#include "gc_implementation/shared/gcHeapSummary.hpp" +#include "gc_implementation/shared/gcWhen.hpp" #include "memory/gcLocker.inline.hpp" #include "oops/oop.inline.hpp" #include "runtime/handles.inline.hpp" @@ -642,6 +644,29 @@ ensure_parsability(false); // no need to retire TLABs for verification } +PSHeapSummary ParallelScavengeHeap::create_ps_heap_summary() { + PSOldGen* old = old_gen(); + HeapWord* old_committed_end = (HeapWord*)old->virtual_space()->committed_high_addr(); + VirtualSpaceSummary old_summary(old->reserved().start(), old_committed_end, old->reserved().end()); + SpaceSummary old_space(old->reserved().start(), old_committed_end, old->used_in_bytes()); + + PSYoungGen* young = young_gen(); + VirtualSpaceSummary young_summary(young->reserved().start(), + (HeapWord*)young->virtual_space()->committed_high_addr(), young->reserved().end()); + + MutableSpace* eden = young_gen()->eden_space(); + SpaceSummary eden_space(eden->bottom(), eden->end(), eden->used_in_bytes()); + + MutableSpace* from = young_gen()->from_space(); + SpaceSummary from_space(from->bottom(), from->end(), from->used_in_bytes()); + + MutableSpace* to = young_gen()->to_space(); + SpaceSummary to_space(to->bottom(), to->end(), to->used_in_bytes()); + + VirtualSpaceSummary heap_summary = create_heap_space_summary(); + return PSHeapSummary(heap_summary, used(), old_summary, old_space, young_summary, eden_space, from_space, to_space); +} + void ParallelScavengeHeap::print_on(outputStream* st) const { young_gen()->print_on(st); old_gen()->print_on(st); @@ -706,6 +731,12 @@ } } +void ParallelScavengeHeap::trace_heap(GCWhen::Type when, GCTracer* gc_tracer) { + const PSHeapSummary& heap_summary = create_ps_heap_summary(); + const MetaspaceSummary& metaspace_summary = create_metaspace_summary(); + gc_tracer->report_gc_heap_summary(when, heap_summary, metaspace_summary); +} + ParallelScavengeHeap* ParallelScavengeHeap::heap() { assert(_psh != NULL, "Uninitialized access to ParallelScavengeHeap::heap()"); assert(_psh->kind() == CollectedHeap::ParallelScavengeHeap, "not a parallel scavenge heap"); diff -r 16b10327b00d -r 90d6c221d4e5 src/share/vm/gc_implementation/parallelScavenge/parallelScavengeHeap.hpp --- a/src/share/vm/gc_implementation/parallelScavenge/parallelScavengeHeap.hpp Tue Jul 16 10:55:48 2013 -0400 +++ b/src/share/vm/gc_implementation/parallelScavenge/parallelScavengeHeap.hpp Tue Jul 16 12:20:08 2013 -0400 @@ -1,5 +1,5 @@ /* - * Copyright (c) 2001, 2012, Oracle and/or its affiliates. All rights reserved. + * Copyright (c) 2001, 2013, Oracle and/or its affiliates. All rights reserved. * DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER. * * This code is free software; you can redistribute it and/or modify it @@ -30,14 +30,18 @@ #include "gc_implementation/parallelScavenge/psOldGen.hpp" #include "gc_implementation/parallelScavenge/psYoungGen.hpp" #include "gc_implementation/shared/gcPolicyCounters.hpp" +#include "gc_implementation/shared/gcWhen.hpp" #include "gc_interface/collectedHeap.inline.hpp" #include "utilities/ostream.hpp" class AdjoiningGenerations; +class CollectorPolicy; +class GCHeapSummary; class GCTaskManager; -class PSAdaptiveSizePolicy; class GenerationSizer; class CollectorPolicy; +class PSAdaptiveSizePolicy; +class PSHeapSummary; class ParallelScavengeHeap : public CollectedHeap { friend class VMStructs; @@ -65,6 +69,8 @@ static GCTaskManager* _gc_task_manager; // The task manager. + void trace_heap(GCWhen::Type when, GCTracer* tracer); + protected: static inline size_t total_invocations(); HeapWord* allocate_new_tlab(size_t size); @@ -219,6 +225,7 @@ jlong millis_since_last_gc(); void prepare_for_verify(); + PSHeapSummary create_ps_heap_summary(); virtual void print_on(outputStream* st) const; virtual void print_on_error(outputStream* st) const; virtual void print_gc_threads_on(outputStream* st) const; diff -r 16b10327b00d -r 90d6c221d4e5 src/share/vm/gc_implementation/parallelScavenge/pcTasks.cpp --- a/src/share/vm/gc_implementation/parallelScavenge/pcTasks.cpp Tue Jul 16 10:55:48 2013 -0400 +++ b/src/share/vm/gc_implementation/parallelScavenge/pcTasks.cpp Tue Jul 16 12:20:08 2013 -0400 @@ -27,6 +27,8 @@ #include "code/codeCache.hpp" #include "gc_implementation/parallelScavenge/pcTasks.hpp" #include "gc_implementation/parallelScavenge/psParallelCompact.hpp" +#include "gc_implementation/shared/gcTimer.hpp" +#include "gc_implementation/shared/gcTraceTime.hpp" #include "gc_interface/collectedHeap.hpp" #include "memory/universe.hpp" #include "oops/objArrayKlass.inline.hpp" @@ -48,8 +50,8 @@ ResourceMark rm; - NOT_PRODUCT(TraceTime tm("ThreadRootsMarkingTask", - PrintGCDetails && TraceParallelOldGCTasks, true, gclog_or_tty)); + NOT_PRODUCT(GCTraceTime tm("ThreadRootsMarkingTask", + PrintGCDetails && TraceParallelOldGCTasks, true, NULL)); ParCompactionManager* cm = ParCompactionManager::gc_thread_compaction_manager(which); @@ -77,8 +79,8 @@ void MarkFromRootsTask::do_it(GCTaskManager* manager, uint which) { assert(Universe::heap()->is_gc_active(), "called outside gc"); - NOT_PRODUCT(TraceTime tm("MarkFromRootsTask", - PrintGCDetails && TraceParallelOldGCTasks, true, gclog_or_tty)); + NOT_PRODUCT(GCTraceTime tm("MarkFromRootsTask", + PrintGCDetails && TraceParallelOldGCTasks, true, NULL)); ParCompactionManager* cm = ParCompactionManager::gc_thread_compaction_manager(which); PSParallelCompact::MarkAndPushClosure mark_and_push_closure(cm); @@ -148,8 +150,8 @@ { assert(Universe::heap()->is_gc_active(), "called outside gc"); - NOT_PRODUCT(TraceTime tm("RefProcTask", - PrintGCDetails && TraceParallelOldGCTasks, true, gclog_or_tty)); + NOT_PRODUCT(GCTraceTime tm("RefProcTask", + PrintGCDetails && TraceParallelOldGCTasks, true, NULL)); ParCompactionManager* cm = ParCompactionManager::gc_thread_compaction_manager(which); PSParallelCompact::MarkAndPushClosure mark_and_push_closure(cm); @@ -204,8 +206,8 @@ void StealMarkingTask::do_it(GCTaskManager* manager, uint which) { assert(Universe::heap()->is_gc_active(), "called outside gc"); - NOT_PRODUCT(TraceTime tm("StealMarkingTask", - PrintGCDetails && TraceParallelOldGCTasks, true, gclog_or_tty)); + NOT_PRODUCT(GCTraceTime tm("StealMarkingTask", + PrintGCDetails && TraceParallelOldGCTasks, true, NULL)); ParCompactionManager* cm = ParCompactionManager::gc_thread_compaction_manager(which); @@ -237,8 +239,8 @@ void StealRegionCompactionTask::do_it(GCTaskManager* manager, uint which) { assert(Universe::heap()->is_gc_active(), "called outside gc"); - NOT_PRODUCT(TraceTime tm("StealRegionCompactionTask", - PrintGCDetails && TraceParallelOldGCTasks, true, gclog_or_tty)); + NOT_PRODUCT(GCTraceTime tm("StealRegionCompactionTask", + PrintGCDetails && TraceParallelOldGCTasks, true, NULL)); ParCompactionManager* cm = ParCompactionManager::gc_thread_compaction_manager(which); @@ -304,8 +306,8 @@ void UpdateDensePrefixTask::do_it(GCTaskManager* manager, uint which) { - NOT_PRODUCT(TraceTime tm("UpdateDensePrefixTask", - PrintGCDetails && TraceParallelOldGCTasks, true, gclog_or_tty)); + NOT_PRODUCT(GCTraceTime tm("UpdateDensePrefixTask", + PrintGCDetails && TraceParallelOldGCTasks, true, NULL)); ParCompactionManager* cm = ParCompactionManager::gc_thread_compaction_manager(which); @@ -319,8 +321,8 @@ void DrainStacksCompactionTask::do_it(GCTaskManager* manager, uint which) { assert(Universe::heap()->is_gc_active(), "called outside gc"); - NOT_PRODUCT(TraceTime tm("DrainStacksCompactionTask", - PrintGCDetails && TraceParallelOldGCTasks, true, gclog_or_tty)); + NOT_PRODUCT(GCTraceTime tm("DrainStacksCompactionTask", + PrintGCDetails && TraceParallelOldGCTasks, true, NULL)); ParCompactionManager* cm = ParCompactionManager::gc_thread_compaction_manager(which); diff -r 16b10327b00d -r 90d6c221d4e5 src/share/vm/gc_implementation/parallelScavenge/psAdaptiveSizePolicy.cpp --- a/src/share/vm/gc_implementation/parallelScavenge/psAdaptiveSizePolicy.cpp Tue Jul 16 10:55:48 2013 -0400 +++ b/src/share/vm/gc_implementation/parallelScavenge/psAdaptiveSizePolicy.cpp Tue Jul 16 12:20:08 2013 -0400 @@ -1250,14 +1250,13 @@ avg_promoted()->deviation()); } - gclog_or_tty->print( " avg_promoted_padded_avg: %f" + gclog_or_tty->print_cr( " avg_promoted_padded_avg: %f" " avg_pretenured_padded_avg: %f" " tenuring_thresh: %d" " target_size: " SIZE_FORMAT, avg_promoted()->padded_average(), _avg_pretenured->padded_average(), tenuring_threshold, target_size); - tty->cr(); } set_survivor_size(target_size); @@ -1279,7 +1278,7 @@ avg_promoted()->sample(promoted + _avg_pretenured->padded_average()); if (PrintAdaptiveSizePolicy) { - gclog_or_tty->print( + gclog_or_tty->print_cr( "AdaptiveSizePolicy::update_averages:" " survived: " SIZE_FORMAT " promoted: " SIZE_FORMAT diff -r 16b10327b00d -r 90d6c221d4e5 src/share/vm/gc_implementation/parallelScavenge/psMarkSweep.cpp --- a/src/share/vm/gc_implementation/parallelScavenge/psMarkSweep.cpp Tue Jul 16 10:55:48 2013 -0400 +++ b/src/share/vm/gc_implementation/parallelScavenge/psMarkSweep.cpp Tue Jul 16 12:20:08 2013 -0400 @@ -34,6 +34,10 @@ #include "gc_implementation/parallelScavenge/psOldGen.hpp" #include "gc_implementation/parallelScavenge/psScavenge.hpp" #include "gc_implementation/parallelScavenge/psYoungGen.hpp" +#include "gc_implementation/shared/gcHeapSummary.hpp" +#include "gc_implementation/shared/gcTimer.hpp" +#include "gc_implementation/shared/gcTrace.hpp" +#include "gc_implementation/shared/gcTraceTime.hpp" #include "gc_implementation/shared/isGCActiveMark.hpp" #include "gc_implementation/shared/markSweep.hpp" #include "gc_implementation/shared/spaceDecorator.hpp" @@ -108,8 +112,12 @@ } ParallelScavengeHeap* heap = (ParallelScavengeHeap*)Universe::heap(); + assert(heap->kind() == CollectedHeap::ParallelScavengeHeap, "Sanity"); GCCause::Cause gc_cause = heap->gc_cause(); - assert(heap->kind() == CollectedHeap::ParallelScavengeHeap, "Sanity"); + + _gc_timer->register_gc_start(os::elapsed_counter()); + _gc_tracer->report_gc_start(gc_cause, _gc_timer->gc_start()); + PSAdaptiveSizePolicy* size_policy = heap->size_policy(); // The scope of casr should end after code that can change @@ -131,6 +139,7 @@ AdaptiveSizePolicyOutput(size_policy, heap->total_collections()); heap->print_heap_before_gc(); + heap->trace_heap_before_gc(_gc_tracer); // Fill in TLABs heap->accumulate_statistics_all_tlabs(); @@ -147,7 +156,7 @@ old_gen->verify_object_start_array(); } - heap->pre_full_gc_dump(); + heap->pre_full_gc_dump(_gc_timer); // Filled in below to track the state of the young gen after the collection. bool eden_empty; @@ -159,7 +168,7 @@ gclog_or_tty->date_stamp(PrintGC && PrintGCDateStamps); TraceCPUTime tcpu(PrintGCDetails, true, gclog_or_tty); - TraceTime t1(GCCauseString("Full GC", gc_cause), PrintGC, !PrintGCDetails, gclog_or_tty); + GCTraceTime t1(GCCauseString("Full GC", gc_cause), PrintGC, !PrintGCDetails, NULL); TraceCollectorStats tcs(counters()); TraceMemoryManagerStats tms(true /* Full GC */,gc_cause); @@ -374,13 +383,18 @@ NOT_PRODUCT(ref_processor()->verify_no_references_recorded()); heap->print_heap_after_gc(); + heap->trace_heap_after_gc(_gc_tracer); - heap->post_full_gc_dump(); + heap->post_full_gc_dump(_gc_timer); #ifdef TRACESPINNING ParallelTaskTerminator::print_termination_counts(); #endif + _gc_timer->register_gc_end(os::elapsed_counter()); + + _gc_tracer->report_gc_end(_gc_timer->gc_end(), _gc_timer->time_partitions()); + return true; } @@ -498,7 +512,7 @@ void PSMarkSweep::mark_sweep_phase1(bool clear_all_softrefs) { // Recursively traverse all live objects and mark them - TraceTime tm("phase 1", PrintGCDetails && Verbose, true, gclog_or_tty); + GCTraceTime tm("phase 1", PrintGCDetails && Verbose, true, _gc_timer); trace(" 1"); ParallelScavengeHeap* heap = (ParallelScavengeHeap*)Universe::heap(); @@ -531,8 +545,10 @@ // Process reference objects found during marking { ref_processor()->setup_policy(clear_all_softrefs); - ref_processor()->process_discovered_references( - is_alive_closure(), mark_and_push_closure(), follow_stack_closure(), NULL); + const ReferenceProcessorStats& stats = + ref_processor()->process_discovered_references( + is_alive_closure(), mark_and_push_closure(), follow_stack_closure(), NULL, _gc_timer); + gc_tracer()->report_gc_reference_stats(stats); } // This is the point where the entire marking should have completed. @@ -552,11 +568,12 @@ // Clean up unreferenced symbols in symbol table. SymbolTable::unlink(); + _gc_tracer->report_object_count_after_gc(is_alive_closure()); } void PSMarkSweep::mark_sweep_phase2() { - TraceTime tm("phase 2", PrintGCDetails && Verbose, true, gclog_or_tty); + GCTraceTime tm("phase 2", PrintGCDetails && Verbose, true, _gc_timer); trace("2"); // Now all live objects are marked, compute the new object addresses. @@ -586,7 +603,7 @@ void PSMarkSweep::mark_sweep_phase3() { // Adjust the pointers to reflect the new locations - TraceTime tm("phase 3", PrintGCDetails && Verbose, true, gclog_or_tty); + GCTraceTime tm("phase 3", PrintGCDetails && Verbose, true, _gc_timer); trace("3"); ParallelScavengeHeap* heap = (ParallelScavengeHeap*)Universe::heap(); @@ -629,7 +646,7 @@ void PSMarkSweep::mark_sweep_phase4() { EventMark m("4 compact heap"); - TraceTime tm("phase 4", PrintGCDetails && Verbose, true, gclog_or_tty); + GCTraceTime tm("phase 4", PrintGCDetails && Verbose, true, _gc_timer); trace("4"); // All pointers are now adjusted, move objects accordingly diff -r 16b10327b00d -r 90d6c221d4e5 src/share/vm/gc_implementation/parallelScavenge/psOldGen.hpp --- a/src/share/vm/gc_implementation/parallelScavenge/psOldGen.hpp Tue Jul 16 10:55:48 2013 -0400 +++ b/src/share/vm/gc_implementation/parallelScavenge/psOldGen.hpp Tue Jul 16 12:20:08 2013 -0400 @@ -110,7 +110,7 @@ virtual void initialize(ReservedSpace rs, size_t alignment, const char* perf_data_name, int level); void initialize_virtual_space(ReservedSpace rs, size_t alignment); - void initialize_work(const char* perf_data_name, int level); + virtual void initialize_work(const char* perf_data_name, int level); virtual void initialize_performance_counters(const char* perf_data_name, int level); MemRegion reserved() const { return _reserved; } diff -r 16b10327b00d -r 90d6c221d4e5 src/share/vm/gc_implementation/parallelScavenge/psParallelCompact.cpp --- a/src/share/vm/gc_implementation/parallelScavenge/psParallelCompact.cpp Tue Jul 16 10:55:48 2013 -0400 +++ b/src/share/vm/gc_implementation/parallelScavenge/psParallelCompact.cpp Tue Jul 16 12:20:08 2013 -0400 @@ -39,6 +39,10 @@ #include "gc_implementation/parallelScavenge/psPromotionManager.inline.hpp" #include "gc_implementation/parallelScavenge/psScavenge.hpp" #include "gc_implementation/parallelScavenge/psYoungGen.hpp" +#include "gc_implementation/shared/gcHeapSummary.hpp" +#include "gc_implementation/shared/gcTimer.hpp" +#include "gc_implementation/shared/gcTrace.hpp" +#include "gc_implementation/shared/gcTraceTime.hpp" #include "gc_implementation/shared/isGCActiveMark.hpp" #include "gc_interface/gcCause.hpp" #include "memory/gcLocker.inline.hpp" @@ -799,6 +803,8 @@ } #endif // #ifdef ASSERT +STWGCTimer PSParallelCompact::_gc_timer; +ParallelOldTracer PSParallelCompact::_gc_tracer; elapsedTimer PSParallelCompact::_accumulated_time; unsigned int PSParallelCompact::_total_invocations = 0; unsigned int PSParallelCompact::_maximum_compaction_gc_num = 0; @@ -972,7 +978,7 @@ // at each young gen gc. Do the update unconditionally (even though a // promotion failure does not swap spaces) because an unknown number of minor // collections will have swapped the spaces an unknown number of times. - TraceTime tm("pre compact", print_phases(), true, gclog_or_tty); + GCTraceTime tm("pre compact", print_phases(), true, &_gc_timer); ParallelScavengeHeap* heap = gc_heap(); _space_info[from_space_id].set_space(heap->young_gen()->from_space()); _space_info[to_space_id].set_space(heap->young_gen()->to_space()); @@ -989,6 +995,7 @@ _total_invocations++; heap->print_heap_before_gc(); + heap->trace_heap_before_gc(&_gc_tracer); // Fill in TLABs heap->accumulate_statistics_all_tlabs(); @@ -1014,7 +1021,7 @@ void PSParallelCompact::post_compact() { - TraceTime tm("post compact", print_phases(), true, gclog_or_tty); + GCTraceTime tm("post compact", print_phases(), true, &_gc_timer); for (unsigned int id = old_space_id; id < last_space_id; ++id) { // Clear the marking bitmap, summary data and split info. @@ -1840,7 +1847,7 @@ void PSParallelCompact::summary_phase(ParCompactionManager* cm, bool maximum_compaction) { - TraceTime tm("summary phase", print_phases(), true, gclog_or_tty); + GCTraceTime tm("summary phase", print_phases(), true, &_gc_timer); // trace("2"); #ifdef ASSERT @@ -1998,11 +2005,15 @@ return false; } + ParallelScavengeHeap* heap = gc_heap(); + + _gc_timer.register_gc_start(os::elapsed_counter()); + _gc_tracer.report_gc_start(heap->gc_cause(), _gc_timer.gc_start()); + TimeStamp marking_start; TimeStamp compaction_start; TimeStamp collection_exit; - ParallelScavengeHeap* heap = gc_heap(); GCCause::Cause gc_cause = heap->gc_cause(); PSYoungGen* young_gen = heap->young_gen(); PSOldGen* old_gen = heap->old_gen(); @@ -2018,7 +2029,7 @@ heap->record_gen_tops_before_GC(); } - heap->pre_full_gc_dump(); + heap->pre_full_gc_dump(&_gc_timer); _print_phases = PrintGCDetails && PrintParallelOldGCPhaseTimes; @@ -2045,7 +2056,7 @@ gclog_or_tty->date_stamp(PrintGC && PrintGCDateStamps); TraceCPUTime tcpu(PrintGCDetails, true, gclog_or_tty); - TraceTime t1(GCCauseString("Full GC", gc_cause), PrintGC, !PrintGCDetails, gclog_or_tty); + GCTraceTime t1(GCCauseString("Full GC", gc_cause), PrintGC, !PrintGCDetails, NULL); TraceCollectorStats tcs(counters()); TraceMemoryManagerStats tms(true /* Full GC */,gc_cause); @@ -2065,7 +2076,7 @@ bool marked_for_unloading = false; marking_start.update(); - marking_phase(vmthread_cm, maximum_heap_compaction); + marking_phase(vmthread_cm, maximum_heap_compaction, &_gc_tracer); bool max_on_system_gc = UseMaximumCompactionOnSystemGC && gc_cause == GCCause::_java_lang_system_gc; @@ -2218,6 +2229,8 @@ collection_exit.update(); heap->print_heap_after_gc(); + heap->trace_heap_after_gc(&_gc_tracer); + if (PrintGCTaskTimeStamps) { gclog_or_tty->print_cr("VM-Thread " INT64_FORMAT " " INT64_FORMAT " " INT64_FORMAT, @@ -2226,12 +2239,17 @@ gc_task_manager()->print_task_time_stamps(); } - heap->post_full_gc_dump(); + heap->post_full_gc_dump(&_gc_timer); #ifdef TRACESPINNING ParallelTaskTerminator::print_termination_counts(); #endif + _gc_timer.register_gc_end(os::elapsed_counter()); + + _gc_tracer.report_dense_prefix(dense_prefix(old_space_id)); + _gc_tracer.report_gc_end(_gc_timer.gc_end(), _gc_timer.time_partitions()); + return true; } @@ -2330,9 +2348,10 @@ } void PSParallelCompact::marking_phase(ParCompactionManager* cm, - bool maximum_heap_compaction) { + bool maximum_heap_compaction, + ParallelOldTracer *gc_tracer) { // Recursively traverse all live objects and mark them - TraceTime tm("marking phase", print_phases(), true, gclog_or_tty); + GCTraceTime tm("marking phase", print_phases(), true, &_gc_timer); ParallelScavengeHeap* heap = gc_heap(); uint parallel_gc_threads = heap->gc_task_manager()->workers(); @@ -2347,7 +2366,8 @@ ClassLoaderDataGraph::clear_claimed_marks(); { - TraceTime tm_m("par mark", print_phases(), true, gclog_or_tty); + GCTraceTime tm_m("par mark", print_phases(), true, &_gc_timer); + ParallelScavengeHeap::ParStrongRootsScope psrs; GCTaskQueue* q = GCTaskQueue::create(); @@ -2375,19 +2395,24 @@ // Process reference objects found during marking { - TraceTime tm_r("reference processing", print_phases(), true, gclog_or_tty); + GCTraceTime tm_r("reference processing", print_phases(), true, &_gc_timer); + + ReferenceProcessorStats stats; if (ref_processor()->processing_is_mt()) { RefProcTaskExecutor task_executor; - ref_processor()->process_discovered_references( + stats = ref_processor()->process_discovered_references( is_alive_closure(), &mark_and_push_closure, &follow_stack_closure, - &task_executor); + &task_executor, &_gc_timer); } else { - ref_processor()->process_discovered_references( - is_alive_closure(), &mark_and_push_closure, &follow_stack_closure, NULL); + stats = ref_processor()->process_discovered_references( + is_alive_closure(), &mark_and_push_closure, &follow_stack_closure, NULL, + &_gc_timer); } + + gc_tracer->report_gc_reference_stats(stats); } - TraceTime tm_c("class unloading", print_phases(), true, gclog_or_tty); + GCTraceTime tm_c("class unloading", print_phases(), true, &_gc_timer); // This is the point where the entire marking should have completed. assert(cm->marking_stacks_empty(), "Marking should have completed"); @@ -2406,6 +2431,7 @@ // Clean up unreferenced symbols in symbol table. SymbolTable::unlink(); + _gc_tracer.report_object_count_after_gc(is_alive_closure()); } void PSParallelCompact::follow_klass(ParCompactionManager* cm, Klass* klass) { @@ -2446,7 +2472,7 @@ void PSParallelCompact::adjust_roots() { // Adjust the pointers to reflect the new locations - TraceTime tm("adjust roots", print_phases(), true, gclog_or_tty); + GCTraceTime tm("adjust roots", print_phases(), true, &_gc_timer); // Need new claim bits when tracing through and adjusting pointers. ClassLoaderDataGraph::clear_claimed_marks(); @@ -2482,7 +2508,7 @@ void PSParallelCompact::enqueue_region_draining_tasks(GCTaskQueue* q, uint parallel_gc_threads) { - TraceTime tm("drain task setup", print_phases(), true, gclog_or_tty); + GCTraceTime tm("drain task setup", print_phases(), true, &_gc_timer); // Find the threads that are active unsigned int which = 0; @@ -2556,7 +2582,7 @@ void PSParallelCompact::enqueue_dense_prefix_tasks(GCTaskQueue* q, uint parallel_gc_threads) { - TraceTime tm("dense prefix task setup", print_phases(), true, gclog_or_tty); + GCTraceTime tm("dense prefix task setup", print_phases(), true, &_gc_timer); ParallelCompactData& sd = PSParallelCompact::summary_data(); @@ -2638,7 +2664,7 @@ GCTaskQueue* q, ParallelTaskTerminator* terminator_ptr, uint parallel_gc_threads) { - TraceTime tm("steal task setup", print_phases(), true, gclog_or_tty); + GCTraceTime tm("steal task setup", print_phases(), true, &_gc_timer); // Once a thread has drained it's stack, it should try to steal regions from // other threads. @@ -2686,7 +2712,7 @@ void PSParallelCompact::compact() { // trace("5"); - TraceTime tm("compaction phase", print_phases(), true, gclog_or_tty); + GCTraceTime tm("compaction phase", print_phases(), true, &_gc_timer); ParallelScavengeHeap* heap = (ParallelScavengeHeap*)Universe::heap(); assert(heap->kind() == CollectedHeap::ParallelScavengeHeap, "Sanity"); @@ -2703,7 +2729,7 @@ enqueue_region_stealing_tasks(q, &terminator, active_gc_threads); { - TraceTime tm_pc("par compact", print_phases(), true, gclog_or_tty); + GCTraceTime tm_pc("par compact", print_phases(), true, &_gc_timer); gc_task_manager()->execute_and_wait(q); @@ -2717,7 +2743,7 @@ { // Update the deferred objects, if any. Any compaction manager can be used. - TraceTime tm_du("deferred updates", print_phases(), true, gclog_or_tty); + GCTraceTime tm_du("deferred updates", print_phases(), true, &_gc_timer); ParCompactionManager* cm = ParCompactionManager::manager_array(0); for (unsigned int id = old_space_id; id < last_space_id; ++id) { update_deferred_objects(cm, SpaceId(id)); diff -r 16b10327b00d -r 90d6c221d4e5 src/share/vm/gc_implementation/parallelScavenge/psParallelCompact.hpp --- a/src/share/vm/gc_implementation/parallelScavenge/psParallelCompact.hpp Tue Jul 16 10:55:48 2013 -0400 +++ b/src/share/vm/gc_implementation/parallelScavenge/psParallelCompact.hpp Tue Jul 16 12:20:08 2013 -0400 @@ -46,6 +46,8 @@ class PreGCValues; class MoveAndUpdateClosure; class RefProcTaskExecutor; +class ParallelOldTracer; +class STWGCTimer; // The SplitInfo class holds the information needed to 'split' a source region // so that the live data can be copied to two destination *spaces*. Normally, @@ -972,6 +974,8 @@ friend class RefProcTaskProxy; private: + static STWGCTimer _gc_timer; + static ParallelOldTracer _gc_tracer; static elapsedTimer _accumulated_time; static unsigned int _total_invocations; static unsigned int _maximum_compaction_gc_num; @@ -1015,7 +1019,8 @@ // Mark live objects static void marking_phase(ParCompactionManager* cm, - bool maximum_heap_compaction); + bool maximum_heap_compaction, + ParallelOldTracer *gc_tracer); template static inline void follow_root(ParCompactionManager* cm, T* p); @@ -1284,6 +1289,8 @@ // Reference Processing static ReferenceProcessor* const ref_processor() { return _ref_processor; } + static STWGCTimer* gc_timer() { return &_gc_timer; } + // Return the SpaceId for the given address. static SpaceId space_id(HeapWord* addr); diff -r 16b10327b00d -r 90d6c221d4e5 src/share/vm/gc_implementation/parallelScavenge/psPromotionManager.cpp --- a/src/share/vm/gc_implementation/parallelScavenge/psPromotionManager.cpp Tue Jul 16 10:55:48 2013 -0400 +++ b/src/share/vm/gc_implementation/parallelScavenge/psPromotionManager.cpp Tue Jul 16 12:20:08 2013 -0400 @@ -1,5 +1,5 @@ /* - * Copyright (c) 2002, 2012, Oracle and/or its affiliates. All rights reserved. + * Copyright (c) 2002, 2013, Oracle and/or its affiliates. All rights reserved. * DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER. * * This code is free software; you can redistribute it and/or modify it @@ -27,6 +27,7 @@ #include "gc_implementation/parallelScavenge/psOldGen.hpp" #include "gc_implementation/parallelScavenge/psPromotionManager.inline.hpp" #include "gc_implementation/parallelScavenge/psScavenge.inline.hpp" +#include "gc_implementation/shared/gcTrace.hpp" #include "gc_implementation/shared/mutableSpace.hpp" #include "memory/memRegion.hpp" #include "oops/oop.inline.hpp" @@ -49,7 +50,7 @@ guarantee(_manager_array != NULL, "Could not initialize promotion manager"); _stack_array_depth = new OopStarTaskQueueSet(ParallelGCThreads); - guarantee(_stack_array_depth != NULL, "Cound not initialize promotion manager"); + guarantee(_stack_array_depth != NULL, "Could not initialize promotion manager"); // Create and register the PSPromotionManager(s) for the worker threads. for(uint i=0; iclaimed_stack_depth()->is_empty(), "should be empty"); + if (manager->_promotion_failed_info.has_failed()) { + gc_tracer.report_promotion_failed(manager->_promotion_failed_info); + promotion_failure_occurred = true; + } manager->flush_labs(); } + return promotion_failure_occurred; } #if TASKQUEUE_STATS @@ -187,6 +195,8 @@ _old_lab.initialize(MemRegion(lab_base, (size_t)0)); _old_gen_is_full = false; + _promotion_failed_info.reset(); + TASKQUEUE_STATS_ONLY(reset_stats()); } @@ -305,6 +315,8 @@ // We won any races, we "own" this object. assert(obj == obj->forwardee(), "Sanity"); + _promotion_failed_info.register_copy_failure(obj->size()); + obj->push_contents(this); // Save the mark if needed diff -r 16b10327b00d -r 90d6c221d4e5 src/share/vm/gc_implementation/parallelScavenge/psPromotionManager.hpp --- a/src/share/vm/gc_implementation/parallelScavenge/psPromotionManager.hpp Tue Jul 16 10:55:48 2013 -0400 +++ b/src/share/vm/gc_implementation/parallelScavenge/psPromotionManager.hpp Tue Jul 16 12:20:08 2013 -0400 @@ -1,5 +1,5 @@ /* - * Copyright (c) 2002, 2012, Oracle and/or its affiliates. All rights reserved. + * Copyright (c) 2002, 2013, Oracle and/or its affiliates. All rights reserved. * DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER. * * This code is free software; you can redistribute it and/or modify it @@ -26,6 +26,8 @@ #define SHARE_VM_GC_IMPLEMENTATION_PARALLELSCAVENGE_PSPROMOTIONMANAGER_HPP #include "gc_implementation/parallelScavenge/psPromotionLAB.hpp" +#include "gc_implementation/shared/gcTrace.hpp" +#include "gc_implementation/shared/copyFailedInfo.hpp" #include "memory/allocation.hpp" #include "utilities/taskqueue.hpp" @@ -33,7 +35,7 @@ // psPromotionManager is used by a single thread to manage object survival // during a scavenge. The promotion manager contains thread local data only. // -// NOTE! Be carefull when allocating the stacks on cheap. If you are going +// NOTE! Be careful when allocating the stacks on cheap. If you are going // to use a promotion manager in more than one thread, the stacks MUST be // on cheap. This can lead to memory leaks, though, as they are not auto // deallocated. @@ -85,6 +87,8 @@ uint _array_chunk_size; uint _min_array_size_for_chunking; + PromotionFailedInfo _promotion_failed_info; + // Accessors static PSOldGen* old_gen() { return _old_gen; } static MutableSpace* young_space() { return _young_space; } @@ -149,7 +153,7 @@ static void initialize(); static void pre_scavenge(); - static void post_scavenge(); + static bool post_scavenge(YoungGCTracer& gc_tracer); static PSPromotionManager* gc_thread_promotion_manager(int index); static PSPromotionManager* vm_thread_promotion_manager(); diff -r 16b10327b00d -r 90d6c221d4e5 src/share/vm/gc_implementation/parallelScavenge/psPromotionManager.inline.hpp --- a/src/share/vm/gc_implementation/parallelScavenge/psPromotionManager.inline.hpp Tue Jul 16 10:55:48 2013 -0400 +++ b/src/share/vm/gc_implementation/parallelScavenge/psPromotionManager.inline.hpp Tue Jul 16 12:20:08 2013 -0400 @@ -1,5 +1,5 @@ /* - * Copyright (c) 2002, 2012, Oracle and/or its affiliates. All rights reserved. + * Copyright (c) 2002, 2013, Oracle and/or its affiliates. All rights reserved. * DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER. * * This code is free software; you can redistribute it and/or modify it @@ -152,7 +152,7 @@ // This is the promotion failed test, and code handling. // The code belongs here for two reasons. It is slightly - // different thatn the code below, and cannot share the + // different than the code below, and cannot share the // CAS testing code. Keeping the code here also minimizes // the impact on the common case fast path code. diff -r 16b10327b00d -r 90d6c221d4e5 src/share/vm/gc_implementation/parallelScavenge/psScavenge.cpp --- a/src/share/vm/gc_implementation/parallelScavenge/psScavenge.cpp Tue Jul 16 10:55:48 2013 -0400 +++ b/src/share/vm/gc_implementation/parallelScavenge/psScavenge.cpp Tue Jul 16 12:20:08 2013 -0400 @@ -34,6 +34,10 @@ #include "gc_implementation/parallelScavenge/psParallelCompact.hpp" #include "gc_implementation/parallelScavenge/psScavenge.inline.hpp" #include "gc_implementation/parallelScavenge/psTasks.hpp" +#include "gc_implementation/shared/gcHeapSummary.hpp" +#include "gc_implementation/shared/gcTimer.hpp" +#include "gc_implementation/shared/gcTrace.hpp" +#include "gc_implementation/shared/gcTraceTime.hpp" #include "gc_implementation/shared/isGCActiveMark.hpp" #include "gc_implementation/shared/spaceDecorator.hpp" #include "gc_interface/gcCause.hpp" @@ -63,10 +67,11 @@ HeapWord* PSScavenge::_young_generation_boundary = NULL; uintptr_t PSScavenge::_young_generation_boundary_compressed = 0; elapsedTimer PSScavenge::_accumulated_time; +STWGCTimer PSScavenge::_gc_timer; +ParallelScavengeTracer PSScavenge::_gc_tracer; Stack PSScavenge::_preserved_mark_stack; Stack PSScavenge::_preserved_oop_stack; CollectorCounters* PSScavenge::_counters = NULL; -bool PSScavenge::_promotion_failed = false; // Define before use class PSIsAliveClosure: public BoolObjectClosure { @@ -259,6 +264,8 @@ assert(_preserved_mark_stack.is_empty(), "should be empty"); assert(_preserved_oop_stack.is_empty(), "should be empty"); + _gc_timer.register_gc_start(os::elapsed_counter()); + TimeStamp scavenge_entry; TimeStamp scavenge_midpoint; TimeStamp scavenge_exit; @@ -278,11 +285,14 @@ return false; } + _gc_tracer.report_gc_start(heap->gc_cause(), _gc_timer.gc_start()); + bool promotion_failure_occurred = false; PSYoungGen* young_gen = heap->young_gen(); PSOldGen* old_gen = heap->old_gen(); PSAdaptiveSizePolicy* size_policy = heap->size_policy(); + heap->increment_total_collections(); AdaptiveSizePolicyOutput(size_policy, heap->total_collections()); @@ -299,12 +309,12 @@ } heap->print_heap_before_gc(); + heap->trace_heap_before_gc(&_gc_tracer); assert(!NeverTenure || _tenuring_threshold == markOopDesc::max_age + 1, "Sanity"); assert(!AlwaysTenure || _tenuring_threshold == 0, "Sanity"); size_t prev_used = heap->used(); - assert(promotion_failed() == false, "Sanity"); // Fill in TLABs heap->accumulate_statistics_all_tlabs(); @@ -321,7 +331,7 @@ gclog_or_tty->date_stamp(PrintGC && PrintGCDateStamps); TraceCPUTime tcpu(PrintGCDetails, true, gclog_or_tty); - TraceTime t1(GCCauseString("GC", gc_cause), PrintGC, !PrintGCDetails, gclog_or_tty); + GCTraceTime t1(GCCauseString("GC", gc_cause), PrintGC, !PrintGCDetails, NULL); TraceCollectorStats tcs(counters()); TraceMemoryManagerStats tms(false /* not full GC */,gc_cause); @@ -387,7 +397,7 @@ // We'll use the promotion manager again later. PSPromotionManager* promotion_manager = PSPromotionManager::vm_thread_promotion_manager(); { - // TraceTime("Roots"); + GCTraceTime tm("Scavenge", false, false, &_gc_timer); ParallelScavengeHeap::ParStrongRootsScope psrs; GCTaskQueue* q = GCTaskQueue::create(); @@ -429,36 +439,41 @@ // Process reference objects discovered during scavenge { + GCTraceTime tm("References", false, false, &_gc_timer); + reference_processor()->setup_policy(false); // not always_clear reference_processor()->set_active_mt_degree(active_workers); PSKeepAliveClosure keep_alive(promotion_manager); PSEvacuateFollowersClosure evac_followers(promotion_manager); + ReferenceProcessorStats stats; if (reference_processor()->processing_is_mt()) { PSRefProcTaskExecutor task_executor; - reference_processor()->process_discovered_references( - &_is_alive_closure, &keep_alive, &evac_followers, &task_executor); + stats = reference_processor()->process_discovered_references( + &_is_alive_closure, &keep_alive, &evac_followers, &task_executor, + &_gc_timer); } else { - reference_processor()->process_discovered_references( - &_is_alive_closure, &keep_alive, &evac_followers, NULL); + stats = reference_processor()->process_discovered_references( + &_is_alive_closure, &keep_alive, &evac_followers, NULL, &_gc_timer); + } + + _gc_tracer.report_gc_reference_stats(stats); + + // Enqueue reference objects discovered during scavenge. + if (reference_processor()->processing_is_mt()) { + PSRefProcTaskExecutor task_executor; + reference_processor()->enqueue_discovered_references(&task_executor); + } else { + reference_processor()->enqueue_discovered_references(NULL); } } - // Enqueue reference objects discovered during scavenge. - if (reference_processor()->processing_is_mt()) { - PSRefProcTaskExecutor task_executor; - reference_processor()->enqueue_discovered_references(&task_executor); - } else { - reference_processor()->enqueue_discovered_references(NULL); - } - + GCTraceTime tm("StringTable", false, false, &_gc_timer); // Unlink any dead interned Strings and process the remaining live ones. PSScavengeRootsClosure root_closure(promotion_manager); StringTable::unlink_or_oops_do(&_is_alive_closure, &root_closure); // Finally, flush the promotion_manager's labs, and deallocate its stacks. - PSPromotionManager::post_scavenge(); - - promotion_failure_occurred = promotion_failed(); + promotion_failure_occurred = PSPromotionManager::post_scavenge(_gc_tracer); if (promotion_failure_occurred) { clean_up_failed_promotion(); if (PrintGC) { @@ -473,8 +488,6 @@ if (!promotion_failure_occurred) { // Swap the survivor spaces. - - young_gen->eden_space()->clear(SpaceDecorator::Mangle); young_gen->from_space()->clear(SpaceDecorator::Mangle); young_gen->swap_spaces(); @@ -612,7 +625,11 @@ NOT_PRODUCT(reference_processor()->verify_no_references_recorded()); - CodeCache::prune_scavenge_root_nmethods(); + { + GCTraceTime tm("Prune Scavenge Root Methods", false, false, &_gc_timer); + + CodeCache::prune_scavenge_root_nmethods(); + } // Re-verify object start arrays if (VerifyObjectStartArray && @@ -652,6 +669,8 @@ } heap->print_heap_after_gc(); + heap->trace_heap_after_gc(&_gc_tracer); + _gc_tracer.report_tenuring_threshold(tenuring_threshold()); if (ZapUnusedHeapArea) { young_gen->eden_space()->check_mangled_unused_area_complete(); @@ -672,6 +691,11 @@ ParallelTaskTerminator::print_termination_counts(); #endif + + _gc_timer.register_gc_end(os::elapsed_counter()); + + _gc_tracer.report_gc_end(_gc_timer.gc_end(), _gc_timer.time_partitions()); + return !promotion_failure_occurred; } @@ -681,7 +705,6 @@ void PSScavenge::clean_up_failed_promotion() { ParallelScavengeHeap* heap = (ParallelScavengeHeap*)Universe::heap(); assert(heap->kind() == CollectedHeap::ParallelScavengeHeap, "Sanity"); - assert(promotion_failed(), "Sanity"); PSYoungGen* young_gen = heap->young_gen(); @@ -706,7 +729,6 @@ // Clear the preserved mark and oop stack caches. _preserved_mark_stack.clear(true); _preserved_oop_stack.clear(true); - _promotion_failed = false; } // Reset the PromotionFailureALot counters. @@ -717,11 +739,10 @@ // fails. Some markOops will need preservation, some will not. Note // that the entire eden is traversed after a failed promotion, with // all forwarded headers replaced by the default markOop. This means -// it is not neccessary to preserve most markOops. +// it is not necessary to preserve most markOops. void PSScavenge::oop_promotion_failed(oop obj, markOop obj_mark) { - _promotion_failed = true; if (obj_mark->must_be_preserved_for_promotion_failure(obj)) { - // Should use per-worker private stakcs hetre rather than + // Should use per-worker private stacks here rather than // locking a common pair of stacks. ThreadCritical tc; _preserved_oop_stack.push(obj); diff -r 16b10327b00d -r 90d6c221d4e5 src/share/vm/gc_implementation/parallelScavenge/psScavenge.hpp --- a/src/share/vm/gc_implementation/parallelScavenge/psScavenge.hpp Tue Jul 16 10:55:48 2013 -0400 +++ b/src/share/vm/gc_implementation/parallelScavenge/psScavenge.hpp Tue Jul 16 12:20:08 2013 -0400 @@ -1,5 +1,5 @@ /* - * Copyright (c) 2002, 2012, Oracle and/or its affiliates. All rights reserved. + * Copyright (c) 2002, 2013, Oracle and/or its affiliates. All rights reserved. * DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER. * * This code is free software; you can redistribute it and/or modify it @@ -28,6 +28,7 @@ #include "gc_implementation/parallelScavenge/cardTableExtension.hpp" #include "gc_implementation/parallelScavenge/psVirtualspace.hpp" #include "gc_implementation/shared/collectorCounters.hpp" +#include "gc_implementation/shared/gcTrace.hpp" #include "memory/allocation.hpp" #include "oops/oop.hpp" #include "utilities/stack.hpp" @@ -37,8 +38,10 @@ class OopStack; class ReferenceProcessor; class ParallelScavengeHeap; +class ParallelScavengeTracer; class PSIsAliveClosure; class PSRefProcTaskExecutor; +class STWGCTimer; class PSScavenge: AllStatic { friend class PSIsAliveClosure; @@ -68,6 +71,8 @@ static bool _survivor_overflow; // Overflow this collection static uint _tenuring_threshold; // tenuring threshold for next scavenge static elapsedTimer _accumulated_time; // total time spent on scavenge + static STWGCTimer _gc_timer; // GC time book keeper + static ParallelScavengeTracer _gc_tracer; // GC tracing // The lowest address possible for the young_gen. // This is used to decide if an oop should be scavenged, // cards should be marked, etc. @@ -77,7 +82,6 @@ static Stack _preserved_mark_stack; // List of marks to be restored after failed promotion static Stack _preserved_oop_stack; // List of oops that need their mark restored. static CollectorCounters* _counters; // collector performance counters - static bool _promotion_failed; static void clean_up_failed_promotion(); @@ -93,7 +97,6 @@ // Accessors static uint tenuring_threshold() { return _tenuring_threshold; } static elapsedTimer* accumulated_time() { return &_accumulated_time; } - static bool promotion_failed() { return _promotion_failed; } static int consecutive_skipped_scavenges() { return _consecutive_skipped_scavenges; } diff -r 16b10327b00d -r 90d6c221d4e5 src/share/vm/gc_implementation/parallelScavenge/psVirtualspace.cpp --- a/src/share/vm/gc_implementation/parallelScavenge/psVirtualspace.cpp Tue Jul 16 10:55:48 2013 -0400 +++ b/src/share/vm/gc_implementation/parallelScavenge/psVirtualspace.cpp Tue Jul 16 12:20:08 2013 -0400 @@ -1,5 +1,5 @@ /* - * Copyright (c) 2003, 2010, Oracle and/or its affiliates. All rights reserved. + * Copyright (c) 2003, 2013, Oracle and/or its affiliates. All rights reserved. * DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER. * * This code is free software; you can redistribute it and/or modify it @@ -101,7 +101,8 @@ } char* const base_addr = committed_high_addr(); - bool result = special() || os::commit_memory(base_addr, bytes, alignment()); + bool result = special() || + os::commit_memory(base_addr, bytes, alignment(), !ExecMem); if (result) { _committed_high_addr += bytes; } @@ -154,7 +155,7 @@ if (tmp_bytes > 0) { char* const commit_base = committed_high_addr(); if (other_space->special() || - os::commit_memory(commit_base, tmp_bytes, alignment())) { + os::commit_memory(commit_base, tmp_bytes, alignment(), !ExecMem)) { // Reduce the reserved region in the other space. other_space->set_reserved(other_space->reserved_low_addr() + tmp_bytes, other_space->reserved_high_addr(), @@ -269,7 +270,8 @@ } char* const base_addr = committed_low_addr() - bytes; - bool result = special() || os::commit_memory(base_addr, bytes, alignment()); + bool result = special() || + os::commit_memory(base_addr, bytes, alignment(), !ExecMem); if (result) { _committed_low_addr -= bytes; } @@ -322,7 +324,7 @@ if (tmp_bytes > 0) { char* const commit_base = committed_low_addr() - tmp_bytes; if (other_space->special() || - os::commit_memory(commit_base, tmp_bytes, alignment())) { + os::commit_memory(commit_base, tmp_bytes, alignment(), !ExecMem)) { // Reduce the reserved region in the other space. other_space->set_reserved(other_space->reserved_low_addr(), other_space->reserved_high_addr() - tmp_bytes, diff -r 16b10327b00d -r 90d6c221d4e5 src/share/vm/gc_implementation/shared/copyFailedInfo.hpp --- /dev/null Thu Jan 01 00:00:00 1970 +0000 +++ b/src/share/vm/gc_implementation/shared/copyFailedInfo.hpp Tue Jul 16 12:20:08 2013 -0400 @@ -0,0 +1,90 @@ +/* + * Copyright (c) 2012, 2013, Oracle and/or its affiliates. All rights reserved. + * DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER. + * + * This code is free software; you can redistribute it and/or modify it + * under the terms of the GNU General Public License version 2 only, as + * published by the Free Software Foundation. + * + * This code is distributed in the hope that it will be useful, but WITHOUT + * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or + * FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License + * version 2 for more details (a copy is included in the LICENSE file that + * accompanied this code). + * + * You should have received a copy of the GNU General Public License version + * 2 along with this work; if not, write to the Free Software Foundation, + * Inc., 51 Franklin St, Fifth Floor, Boston, MA 02110-1301 USA. + * + * Please contact Oracle, 500 Oracle Parkway, Redwood Shores, CA 94065 USA + * or visit www.oracle.com if you need additional information or have any + * questions. + * + */ + +#ifndef SHARE_VM_GC_IMPLEMENTATION_SHARED_COPYFAILEDINFO_HPP +#define SHARE_VM_GC_IMPLEMENTATION_SHARED_COPYFAILEDINFO_HPP + +#include "runtime/thread.hpp" +#include "utilities/globalDefinitions.hpp" + +class CopyFailedInfo : public CHeapObj { + size_t _first_size; + size_t _smallest_size; + size_t _total_size; + uint _count; + + public: + CopyFailedInfo() : _first_size(0), _smallest_size(0), _total_size(0), _count(0) {} + + virtual void register_copy_failure(size_t size) { + if (_first_size == 0) { + _first_size = size; + _smallest_size = size; + } else if (size < _smallest_size) { + _smallest_size = size; + } + _total_size += size; + _count++; + } + + virtual void reset() { + _first_size = 0; + _smallest_size = 0; + _total_size = 0; + _count = 0; + } + + bool has_failed() const { return _count != 0; } + size_t first_size() const { return _first_size; } + size_t smallest_size() const { return _smallest_size; } + size_t total_size() const { return _total_size; } + uint failed_count() const { return _count; } +}; + +class PromotionFailedInfo : public CopyFailedInfo { + OSThread* _thread; + + public: + PromotionFailedInfo() : CopyFailedInfo(), _thread(NULL) {} + + void register_copy_failure(size_t size) { + CopyFailedInfo::register_copy_failure(size); + if (_thread == NULL) { + _thread = Thread::current()->osthread(); + } else { + assert(_thread == Thread::current()->osthread(), "The PromotionFailedInfo should be thread local."); + } + } + + void reset() { + CopyFailedInfo::reset(); + _thread = NULL; + } + + OSThread* thread() const { return _thread; } +}; + +class EvacuationFailedInfo : public CopyFailedInfo {}; + +#endif /* SHARE_VM_GC_IMPLEMENTATION_SHARED_COPYFAILEDINFO_HPP */ diff -r 16b10327b00d -r 90d6c221d4e5 src/share/vm/gc_implementation/shared/gcHeapSummary.hpp --- /dev/null Thu Jan 01 00:00:00 1970 +0000 +++ b/src/share/vm/gc_implementation/shared/gcHeapSummary.hpp Tue Jul 16 12:20:08 2013 -0400 @@ -0,0 +1,142 @@ +/* + * Copyright (c) 2012, 2013, Oracle and/or its affiliates. All rights reserved. + * DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER. + * + * This code is free software; you can redistribute it and/or modify it + * under the terms of the GNU General Public License version 2 only, as + * published by the Free Software Foundation. + * + * This code is distributed in the hope that it will be useful, but WITHOUT + * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or + * FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License + * version 2 for more details (a copy is included in the LICENSE file that + * accompanied this code). + * + * You should have received a copy of the GNU General Public License version + * 2 along with this work; if not, write to the Free Software Foundation, + * Inc., 51 Franklin St, Fifth Floor, Boston, MA 02110-1301 USA. + * + * Please contact Oracle, 500 Oracle Parkway, Redwood Shores, CA 94065 USA + * or visit www.oracle.com if you need additional information or have any + * questions. + * + */ + +#ifndef SHARE_VM_GC_IMPLEMENTATION_SHARED_GCHEAPSUMMARY_HPP +#define SHARE_VM_GC_IMPLEMENTATION_SHARED_GCHEAPSUMMARY_HPP + +#include "memory/allocation.hpp" + +class VirtualSpaceSummary : public StackObj { + HeapWord* _start; + HeapWord* _committed_end; + HeapWord* _reserved_end; +public: + VirtualSpaceSummary() : + _start(NULL), _committed_end(NULL), _reserved_end(NULL) { } + VirtualSpaceSummary(HeapWord* start, HeapWord* committed_end, HeapWord* reserved_end) : + _start(start), _committed_end(committed_end), _reserved_end(reserved_end) { } + + HeapWord* start() const { return _start; } + HeapWord* committed_end() const { return _committed_end; } + HeapWord* reserved_end() const { return _reserved_end; } + size_t committed_size() const { return (uintptr_t)_committed_end - (uintptr_t)_start; } + size_t reserved_size() const { return (uintptr_t)_reserved_end - (uintptr_t)_start; } +}; + +class SpaceSummary : public StackObj { + HeapWord* _start; + HeapWord* _end; + size_t _used; +public: + SpaceSummary() : + _start(NULL), _end(NULL), _used(0) { } + SpaceSummary(HeapWord* start, HeapWord* end, size_t used) : + _start(start), _end(end), _used(used) { } + + HeapWord* start() const { return _start; } + HeapWord* end() const { return _end; } + size_t used() const { return _used; } + size_t size() const { return (uintptr_t)_end - (uintptr_t)_start; } +}; + +class MetaspaceSizes : public StackObj { + size_t _capacity; + size_t _used; + size_t _reserved; + + public: + MetaspaceSizes() : _capacity(0), _used(0), _reserved(0) {} + MetaspaceSizes(size_t capacity, size_t used, size_t reserved) : + _capacity(capacity), _used(used), _reserved(reserved) {} + + size_t capacity() const { return _capacity; } + size_t used() const { return _used; } + size_t reserved() const { return _reserved; } +}; + +class GCHeapSummary; +class PSHeapSummary; + +class GCHeapSummaryVisitor { + public: + virtual void visit(const GCHeapSummary* heap_summary) const = 0; + virtual void visit(const PSHeapSummary* heap_summary) const {} +}; + +class GCHeapSummary : public StackObj { + VirtualSpaceSummary _heap; + size_t _used; + + public: + GCHeapSummary() : + _heap(), _used(0) { } + GCHeapSummary(VirtualSpaceSummary& heap_space, size_t used) : + _heap(heap_space), _used(used) { } + + const VirtualSpaceSummary& heap() const { return _heap; } + size_t used() const { return _used; } + + virtual void accept(GCHeapSummaryVisitor* visitor) const { + visitor->visit(this); + } +}; + +class PSHeapSummary : public GCHeapSummary { + VirtualSpaceSummary _old; + SpaceSummary _old_space; + VirtualSpaceSummary _young; + SpaceSummary _eden; + SpaceSummary _from; + SpaceSummary _to; + public: + PSHeapSummary(VirtualSpaceSummary& heap_space, size_t heap_used, VirtualSpaceSummary old, SpaceSummary old_space, VirtualSpaceSummary young, SpaceSummary eden, SpaceSummary from, SpaceSummary to) : + GCHeapSummary(heap_space, heap_used), _old(old), _old_space(old_space), _young(young), _eden(eden), _from(from), _to(to) { } + const VirtualSpaceSummary& old() const { return _old; } + const SpaceSummary& old_space() const { return _old_space; } + const VirtualSpaceSummary& young() const { return _young; } + const SpaceSummary& eden() const { return _eden; } + const SpaceSummary& from() const { return _from; } + const SpaceSummary& to() const { return _to; } + + virtual void accept(GCHeapSummaryVisitor* visitor) const { + visitor->visit(this); + } +}; + +class MetaspaceSummary : public StackObj { + MetaspaceSizes _meta_space; + MetaspaceSizes _data_space; + MetaspaceSizes _class_space; + + public: + MetaspaceSummary() : _meta_space(), _data_space(), _class_space() {} + MetaspaceSummary(const MetaspaceSizes& meta_space, const MetaspaceSizes& data_space, const MetaspaceSizes& class_space) : + _meta_space(meta_space), _data_space(data_space), _class_space(class_space) { } + + const MetaspaceSizes& meta_space() const { return _meta_space; } + const MetaspaceSizes& data_space() const { return _data_space; } + const MetaspaceSizes& class_space() const { return _class_space; } +}; + +#endif // SHARE_VM_GC_IMPLEMENTATION_SHARED_GCHEAPSUMMARY_HPP diff -r 16b10327b00d -r 90d6c221d4e5 src/share/vm/gc_implementation/shared/gcTimer.cpp --- /dev/null Thu Jan 01 00:00:00 1970 +0000 +++ b/src/share/vm/gc_implementation/shared/gcTimer.cpp Tue Jul 16 12:20:08 2013 -0400 @@ -0,0 +1,374 @@ +/* + * Copyright (c) 2012, 2013, Oracle and/or its affiliates. All rights reserved. + * DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER. + * + * This code is free software; you can redistribute it and/or modify it + * under the terms of the GNU General Public License version 2 only, as + * published by the Free Software Foundation. + * + * This code is distributed in the hope that it will be useful, but WITHOUT + * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or + * FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License + * version 2 for more details (a copy is included in the LICENSE file that + * accompanied this code). + * + * You should have received a copy of the GNU General Public License version + * 2 along with this work; if not, write to the Free Software Foundation, + * Inc., 51 Franklin St, Fifth Floor, Boston, MA 02110-1301 USA. + * + * Please contact Oracle, 500 Oracle Parkway, Redwood Shores, CA 94065 USA + * or visit www.oracle.com if you need additional information or have any + * questions. + * + */ + +#include "precompiled.hpp" +#include "gc_implementation/shared/gcTimer.hpp" +#include "utilities/growableArray.hpp" + +void GCTimer::register_gc_start(jlong time) { + _time_partitions.clear(); + _gc_start = time; +} + +void GCTimer::register_gc_end(jlong time) { + assert(!_time_partitions.has_active_phases(), + "We should have ended all started phases, before ending the GC"); + + _gc_end = time; +} + +void GCTimer::register_gc_pause_start(const char* name, jlong time) { + _time_partitions.report_gc_phase_start(name, time); +} + +void GCTimer::register_gc_pause_end(jlong time) { + _time_partitions.report_gc_phase_end(time); +} + +void GCTimer::register_gc_phase_start(const char* name, jlong time) { + _time_partitions.report_gc_phase_start(name, time); +} + +void GCTimer::register_gc_phase_end(jlong time) { + _time_partitions.report_gc_phase_end(time); +} + + +void STWGCTimer::register_gc_start(jlong time) { + GCTimer::register_gc_start(time); + register_gc_pause_start("GC Pause", time); +} + +void STWGCTimer::register_gc_end(jlong time) { + register_gc_pause_end(time); + GCTimer::register_gc_end(time); +} + +void ConcurrentGCTimer::register_gc_pause_start(const char* name, jlong time) { + GCTimer::register_gc_pause_start(name, time); +} + +void ConcurrentGCTimer::register_gc_pause_end(jlong time) { + GCTimer::register_gc_pause_end(time); +} + +void PhasesStack::clear() { + _next_phase_level = 0; +} + +void PhasesStack::push(int phase_index) { + assert(_next_phase_level < PHASE_LEVELS, "Overflow"); + + _phase_indices[_next_phase_level] = phase_index; + + _next_phase_level++; +} + +int PhasesStack::pop() { + assert(_next_phase_level > 0, "Underflow"); + + _next_phase_level--; + + return _phase_indices[_next_phase_level]; +} + +int PhasesStack::count() const { + return _next_phase_level; +} + + +TimePartitions::TimePartitions() { + _phases = new (ResourceObj::C_HEAP, mtGC) GrowableArray(INITIAL_CAPACITY, true, mtGC); + clear(); +} + +TimePartitions::~TimePartitions() { + delete _phases; + _phases = NULL; +} + +void TimePartitions::clear() { + _phases->clear(); + _active_phases.clear(); + _sum_of_pauses = 0; + _longest_pause = 0; +} + +void TimePartitions::report_gc_phase_start(const char* name, jlong time) { + assert(_phases->length() <= 1000, "Too many recored phases?"); + + int level = _active_phases.count(); + + PausePhase phase; + phase.set_level(level); + phase.set_name(name); + phase.set_start(time); + + int index = _phases->append(phase); + + _active_phases.push(index); +} + +void TimePartitions::update_statistics(GCPhase* phase) { + // FIXME: This should only be done for pause phases + if (phase->level() == 0) { + jlong pause = phase->end() - phase->start(); + _sum_of_pauses += pause; + _longest_pause = MAX2(pause, _longest_pause); + } +} + +void TimePartitions::report_gc_phase_end(jlong time) { + int phase_index = _active_phases.pop(); + GCPhase* phase = _phases->adr_at(phase_index); + phase->set_end(time); + update_statistics(phase); +} + +int TimePartitions::num_phases() const { + return _phases->length(); +} + +GCPhase* TimePartitions::phase_at(int index) const { + assert(index >= 0, "Out of bounds"); + assert(index < _phases->length(), "Out of bounds"); + + return _phases->adr_at(index); +} + +jlong TimePartitions::sum_of_pauses() { + return _sum_of_pauses; +} + +jlong TimePartitions::longest_pause() { + return _longest_pause; +} + +bool TimePartitions::has_active_phases() { + return _active_phases.count() > 0; +} + +bool TimePartitionPhasesIterator::has_next() { + return _next < _time_partitions->num_phases(); +} + +GCPhase* TimePartitionPhasesIterator::next() { + assert(has_next(), "Must have phases left"); + return _time_partitions->phase_at(_next++); +} + + +/////////////// Unit tests /////////////// + +#ifndef PRODUCT + +class TimePartitionPhasesIteratorTest { + public: + static void all() { + one_pause(); + two_pauses(); + one_sub_pause_phase(); + many_sub_pause_phases(); + many_sub_pause_phases2(); + max_nested_pause_phases(); + } + + static void validate_pause_phase(GCPhase* phase, int level, const char* name, jlong start, jlong end) { + assert(phase->level() == level, "Incorrect level"); + assert(strcmp(phase->name(), name) == 0, "Incorrect name"); + assert(phase->start() == start, "Incorrect start"); + assert(phase->end() == end, "Incorrect end"); + } + + static void one_pause() { + TimePartitions time_partitions; + time_partitions.report_gc_phase_start("PausePhase", 2); + time_partitions.report_gc_phase_end(8); + + TimePartitionPhasesIterator iter(&time_partitions); + + validate_pause_phase(iter.next(), 0, "PausePhase", 2, 8); + assert(time_partitions.sum_of_pauses() == 8-2, "Incorrect"); + assert(time_partitions.longest_pause() == 8-2, "Incorrect"); + + assert(!iter.has_next(), "Too many elements"); + } + + static void two_pauses() { + TimePartitions time_partitions; + time_partitions.report_gc_phase_start("PausePhase1", 2); + time_partitions.report_gc_phase_end(3); + time_partitions.report_gc_phase_start("PausePhase2", 4); + time_partitions.report_gc_phase_end(6); + + TimePartitionPhasesIterator iter(&time_partitions); + + validate_pause_phase(iter.next(), 0, "PausePhase1", 2, 3); + validate_pause_phase(iter.next(), 0, "PausePhase2", 4, 6); + + assert(time_partitions.sum_of_pauses() == 3, "Incorrect"); + assert(time_partitions.longest_pause() == 2, "Incorrect"); + + assert(!iter.has_next(), "Too many elements"); + } + + static void one_sub_pause_phase() { + TimePartitions time_partitions; + time_partitions.report_gc_phase_start("PausePhase", 2); + time_partitions.report_gc_phase_start("SubPhase", 3); + time_partitions.report_gc_phase_end(4); + time_partitions.report_gc_phase_end(5); + + TimePartitionPhasesIterator iter(&time_partitions); + + validate_pause_phase(iter.next(), 0, "PausePhase", 2, 5); + validate_pause_phase(iter.next(), 1, "SubPhase", 3, 4); + + assert(time_partitions.sum_of_pauses() == 3, "Incorrect"); + assert(time_partitions.longest_pause() == 3, "Incorrect"); + + assert(!iter.has_next(), "Too many elements"); + } + + static void max_nested_pause_phases() { + TimePartitions time_partitions; + time_partitions.report_gc_phase_start("PausePhase", 2); + time_partitions.report_gc_phase_start("SubPhase1", 3); + time_partitions.report_gc_phase_start("SubPhase2", 4); + time_partitions.report_gc_phase_start("SubPhase3", 5); + time_partitions.report_gc_phase_end(6); + time_partitions.report_gc_phase_end(7); + time_partitions.report_gc_phase_end(8); + time_partitions.report_gc_phase_end(9); + + TimePartitionPhasesIterator iter(&time_partitions); + + validate_pause_phase(iter.next(), 0, "PausePhase", 2, 9); + validate_pause_phase(iter.next(), 1, "SubPhase1", 3, 8); + validate_pause_phase(iter.next(), 2, "SubPhase2", 4, 7); + validate_pause_phase(iter.next(), 3, "SubPhase3", 5, 6); + + assert(time_partitions.sum_of_pauses() == 7, "Incorrect"); + assert(time_partitions.longest_pause() == 7, "Incorrect"); + + assert(!iter.has_next(), "Too many elements"); + } + + static void many_sub_pause_phases() { + TimePartitions time_partitions; + time_partitions.report_gc_phase_start("PausePhase", 2); + + time_partitions.report_gc_phase_start("SubPhase1", 3); + time_partitions.report_gc_phase_end(4); + time_partitions.report_gc_phase_start("SubPhase2", 5); + time_partitions.report_gc_phase_end(6); + time_partitions.report_gc_phase_start("SubPhase3", 7); + time_partitions.report_gc_phase_end(8); + time_partitions.report_gc_phase_start("SubPhase4", 9); + time_partitions.report_gc_phase_end(10); + + time_partitions.report_gc_phase_end(11); + + TimePartitionPhasesIterator iter(&time_partitions); + + validate_pause_phase(iter.next(), 0, "PausePhase", 2, 11); + validate_pause_phase(iter.next(), 1, "SubPhase1", 3, 4); + validate_pause_phase(iter.next(), 1, "SubPhase2", 5, 6); + validate_pause_phase(iter.next(), 1, "SubPhase3", 7, 8); + validate_pause_phase(iter.next(), 1, "SubPhase4", 9, 10); + + assert(time_partitions.sum_of_pauses() == 9, "Incorrect"); + assert(time_partitions.longest_pause() == 9, "Incorrect"); + + assert(!iter.has_next(), "Too many elements"); + } + + static void many_sub_pause_phases2() { + TimePartitions time_partitions; + time_partitions.report_gc_phase_start("PausePhase", 2); + + time_partitions.report_gc_phase_start("SubPhase1", 3); + time_partitions.report_gc_phase_start("SubPhase11", 4); + time_partitions.report_gc_phase_end(5); + time_partitions.report_gc_phase_start("SubPhase12", 6); + time_partitions.report_gc_phase_end(7); + time_partitions.report_gc_phase_end(8); + time_partitions.report_gc_phase_start("SubPhase2", 9); + time_partitions.report_gc_phase_start("SubPhase21", 10); + time_partitions.report_gc_phase_end(11); + time_partitions.report_gc_phase_start("SubPhase22", 12); + time_partitions.report_gc_phase_end(13); + time_partitions.report_gc_phase_end(14); + time_partitions.report_gc_phase_start("SubPhase3", 15); + time_partitions.report_gc_phase_end(16); + + time_partitions.report_gc_phase_end(17); + + TimePartitionPhasesIterator iter(&time_partitions); + + validate_pause_phase(iter.next(), 0, "PausePhase", 2, 17); + validate_pause_phase(iter.next(), 1, "SubPhase1", 3, 8); + validate_pause_phase(iter.next(), 2, "SubPhase11", 4, 5); + validate_pause_phase(iter.next(), 2, "SubPhase12", 6, 7); + validate_pause_phase(iter.next(), 1, "SubPhase2", 9, 14); + validate_pause_phase(iter.next(), 2, "SubPhase21", 10, 11); + validate_pause_phase(iter.next(), 2, "SubPhase22", 12, 13); + validate_pause_phase(iter.next(), 1, "SubPhase3", 15, 16); + + assert(time_partitions.sum_of_pauses() == 15, "Incorrect"); + assert(time_partitions.longest_pause() == 15, "Incorrect"); + + assert(!iter.has_next(), "Too many elements"); + } +}; + +class GCTimerTest { +public: + static void all() { + gc_start(); + gc_end(); + } + + static void gc_start() { + GCTimer gc_timer; + gc_timer.register_gc_start(1); + + assert(gc_timer.gc_start() == 1, "Incorrect"); + } + + static void gc_end() { + GCTimer gc_timer; + gc_timer.register_gc_start(1); + gc_timer.register_gc_end(2); + + assert(gc_timer.gc_end() == 2, "Incorrect"); + } +}; + +void GCTimerAllTest::all() { + GCTimerTest::all(); + TimePartitionPhasesIteratorTest::all(); +} + +#endif diff -r 16b10327b00d -r 90d6c221d4e5 src/share/vm/gc_implementation/shared/gcTimer.hpp --- /dev/null Thu Jan 01 00:00:00 1970 +0000 +++ b/src/share/vm/gc_implementation/shared/gcTimer.hpp Tue Jul 16 12:20:08 2013 -0400 @@ -0,0 +1,195 @@ +/* + * Copyright (c) 2012, 2013, Oracle and/or its affiliates. All rights reserved. + * DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER. + * + * This code is free software; you can redistribute it and/or modify it + * under the terms of the GNU General Public License version 2 only, as + * published by the Free Software Foundation. + * + * This code is distributed in the hope that it will be useful, but WITHOUT + * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or + * FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License + * version 2 for more details (a copy is included in the LICENSE file that + * accompanied this code). + * + * You should have received a copy of the GNU General Public License version + * 2 along with this work; if not, write to the Free Software Foundation, + * Inc., 51 Franklin St, Fifth Floor, Boston, MA 02110-1301 USA. + * + * Please contact Oracle, 500 Oracle Parkway, Redwood Shores, CA 94065 USA + * or visit www.oracle.com if you need additional information or have any + * questions. + * + */ + +#ifndef SHARE_VM_GC_IMPLEMENTATION_SHARED_GCTIMER_HPP +#define SHARE_VM_GC_IMPLEMENTATION_SHARED_GCTIMER_HPP + +#include "memory/allocation.hpp" +#include "prims/jni_md.h" +#include "utilities/macros.hpp" + +class ConcurrentPhase; +class GCPhase; +class PausePhase; + +template class GrowableArray; + +class PhaseVisitor { + public: + virtual void visit(GCPhase* phase) = 0; + virtual void visit(PausePhase* phase) { visit((GCPhase*)phase); } + virtual void visit(ConcurrentPhase* phase) { visit((GCPhase*)phase); } +}; + +class GCPhase { + const char* _name; + int _level; + jlong _start; + jlong _end; + + public: + void set_name(const char* name) { _name = name; } + const char* name() { return _name; } + + int level() { return _level; } + void set_level(int level) { _level = level; } + + jlong start() { return _start; } + void set_start(jlong time) { _start = time; } + + jlong end() { return _end; } + void set_end(jlong time) { _end = time; } + + virtual void accept(PhaseVisitor* visitor) = 0; +}; + +class PausePhase : public GCPhase { + public: + void accept(PhaseVisitor* visitor) { + visitor->visit(this); + } +}; + +class ConcurrentPhase : public GCPhase { + void accept(PhaseVisitor* visitor) { + visitor->visit(this); + } +}; + +class PhasesStack { + public: + // FIXME: Temporary set to 5 (used to be 4), since Reference processing needs it. + static const int PHASE_LEVELS = 5; + + private: + int _phase_indices[PHASE_LEVELS]; + int _next_phase_level; + + public: + PhasesStack() { clear(); } + void clear(); + + void push(int phase_index); + int pop(); + int count() const; +}; + +class TimePartitions { + static const int INITIAL_CAPACITY = 10; + + // Currently we only support pause phases. + GrowableArray* _phases; + PhasesStack _active_phases; + + jlong _sum_of_pauses; + jlong _longest_pause; + + public: + TimePartitions(); + ~TimePartitions(); + void clear(); + + void report_gc_phase_start(const char* name, jlong time); + void report_gc_phase_end(jlong time); + + int num_phases() const; + GCPhase* phase_at(int index) const; + + jlong sum_of_pauses(); + jlong longest_pause(); + + bool has_active_phases(); + private: + void update_statistics(GCPhase* phase); +}; + +class PhasesIterator { + public: + virtual bool has_next() = 0; + virtual GCPhase* next() = 0; +}; + +class GCTimer : public ResourceObj { + NOT_PRODUCT(friend class GCTimerTest;) + protected: + jlong _gc_start; + jlong _gc_end; + TimePartitions _time_partitions; + + public: + virtual void register_gc_start(jlong time); + virtual void register_gc_end(jlong time); + + void register_gc_phase_start(const char* name, jlong time); + void register_gc_phase_end(jlong time); + + jlong gc_start() { return _gc_start; } + jlong gc_end() { return _gc_end; } + + TimePartitions* time_partitions() { return &_time_partitions; } + + long longest_pause(); + long sum_of_pauses(); + + protected: + void register_gc_pause_start(const char* name, jlong time); + void register_gc_pause_end(jlong time); +}; + +class STWGCTimer : public GCTimer { + public: + virtual void register_gc_start(jlong time); + virtual void register_gc_end(jlong time); +}; + +class ConcurrentGCTimer : public GCTimer { + public: + void register_gc_pause_start(const char* name, jlong time); + void register_gc_pause_end(jlong time); +}; + +class TimePartitionPhasesIterator { + TimePartitions* _time_partitions; + int _next; + + public: + TimePartitionPhasesIterator(TimePartitions* time_partitions) : _time_partitions(time_partitions), _next(0) { } + + virtual bool has_next(); + virtual GCPhase* next(); +}; + + +/////////////// Unit tests /////////////// + +#ifndef PRODUCT + +class GCTimerAllTest { + public: + static void all(); +}; + +#endif + +#endif // SHARE_VM_GC_IMPLEMENTATION_SHARED_GCTIMER_HPP diff -r 16b10327b00d -r 90d6c221d4e5 src/share/vm/gc_implementation/shared/gcTrace.cpp --- /dev/null Thu Jan 01 00:00:00 1970 +0000 +++ b/src/share/vm/gc_implementation/shared/gcTrace.cpp Tue Jul 16 12:20:08 2013 -0400 @@ -0,0 +1,222 @@ +/* + * Copyright (c) 2012, 2013, Oracle and/or its affiliates. All rights reserved. + * DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER. + * + * This code is free software; you can redistribute it and/or modify it + * under the terms of the GNU General Public License version 2 only, as + * published by the Free Software Foundation. + * + * This code is distributed in the hope that it will be useful, but WITHOUT + * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or + * FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License + * version 2 for more details (a copy is included in the LICENSE file that + * accompanied this code). + * + * You should have received a copy of the GNU General Public License version + * 2 along with this work; if not, write to the Free Software Foundation, + * Inc., 51 Franklin St, Fifth Floor, Boston, MA 02110-1301 USA. + * + * Please contact Oracle, 500 Oracle Parkway, Redwood Shores, CA 94065 USA + * or visit www.oracle.com if you need additional information or have any + * questions. + * + */ + +#include "precompiled.hpp" +#include "gc_implementation/shared/copyFailedInfo.hpp" +#include "gc_implementation/shared/gcHeapSummary.hpp" +#include "gc_implementation/shared/gcTimer.hpp" +#include "gc_implementation/shared/gcTrace.hpp" +#include "gc_implementation/shared/objectCountEventSender.hpp" +#include "memory/heapInspection.hpp" +#include "memory/referenceProcessorStats.hpp" +#include "runtime/os.hpp" +#include "utilities/globalDefinitions.hpp" + +#if INCLUDE_ALL_GCS +#include "gc_implementation/g1/evacuationInfo.hpp" +#endif + +#define assert_unset_gc_id() assert(_shared_gc_info.id() == SharedGCInfo::UNSET_GCID, "GC already started?") +#define assert_set_gc_id() assert(_shared_gc_info.id() != SharedGCInfo::UNSET_GCID, "GC not started?") + +static GCId GCTracer_next_gc_id = 0; +static GCId create_new_gc_id() { + return GCTracer_next_gc_id++; +} + +void GCTracer::report_gc_start_impl(GCCause::Cause cause, jlong timestamp) { + assert_unset_gc_id(); + + GCId gc_id = create_new_gc_id(); + _shared_gc_info.set_id(gc_id); + _shared_gc_info.set_cause(cause); + _shared_gc_info.set_start_timestamp(timestamp); +} + +void GCTracer::report_gc_start(GCCause::Cause cause, jlong timestamp) { + assert_unset_gc_id(); + + report_gc_start_impl(cause, timestamp); +} + +bool GCTracer::has_reported_gc_start() const { + return _shared_gc_info.id() != SharedGCInfo::UNSET_GCID; +} + +void GCTracer::report_gc_end_impl(jlong timestamp, TimePartitions* time_partitions) { + assert_set_gc_id(); + + _shared_gc_info.set_sum_of_pauses(time_partitions->sum_of_pauses()); + _shared_gc_info.set_longest_pause(time_partitions->longest_pause()); + _shared_gc_info.set_end_timestamp(timestamp); + + send_phase_events(time_partitions); + send_garbage_collection_event(); +} + +void GCTracer::report_gc_end(jlong timestamp, TimePartitions* time_partitions) { + assert_set_gc_id(); + + report_gc_end_impl(timestamp, time_partitions); + + _shared_gc_info.set_id(SharedGCInfo::UNSET_GCID); +} + +void GCTracer::report_gc_reference_stats(const ReferenceProcessorStats& rps) const { + assert_set_gc_id(); + + send_reference_stats_event(REF_SOFT, rps.soft_count()); + send_reference_stats_event(REF_WEAK, rps.weak_count()); + send_reference_stats_event(REF_FINAL, rps.final_count()); + send_reference_stats_event(REF_PHANTOM, rps.phantom_count()); +} + +#if INCLUDE_SERVICES +class ObjectCountEventSenderClosure : public KlassInfoClosure { + const GCId _gc_id; + const double _size_threshold_percentage; + const size_t _total_size_in_words; + const jlong _timestamp; + + public: + ObjectCountEventSenderClosure(GCId gc_id, size_t total_size_in_words, jlong timestamp) : + _gc_id(gc_id), + _size_threshold_percentage(ObjectCountCutOffPercent / 100), + _total_size_in_words(total_size_in_words), + _timestamp(timestamp) + {} + + virtual void do_cinfo(KlassInfoEntry* entry) { + if (should_send_event(entry)) { + ObjectCountEventSender::send(entry, _gc_id, _timestamp); + } + } + + private: + bool should_send_event(const KlassInfoEntry* entry) const { + double percentage_of_heap = ((double) entry->words()) / _total_size_in_words; + return percentage_of_heap >= _size_threshold_percentage; + } +}; + +void GCTracer::report_object_count_after_gc(BoolObjectClosure* is_alive_cl) { + assert_set_gc_id(); + assert(is_alive_cl != NULL, "Must supply function to check liveness"); + + if (ObjectCountEventSender::should_send_event()) { + ResourceMark rm; + + KlassInfoTable cit(false); + if (!cit.allocation_failed()) { + HeapInspection hi(false, false, false, NULL); + hi.populate_table(&cit, is_alive_cl); + + jlong timestamp = os::elapsed_counter(); + ObjectCountEventSenderClosure event_sender(_shared_gc_info.id(), cit.size_of_instances_in_words(), timestamp); + cit.iterate(&event_sender); + } + } +} +#endif // INCLUDE_SERVICES + +void GCTracer::report_gc_heap_summary(GCWhen::Type when, const GCHeapSummary& heap_summary, const MetaspaceSummary& meta_space_summary) const { + assert_set_gc_id(); + + send_gc_heap_summary_event(when, heap_summary); + send_meta_space_summary_event(when, meta_space_summary); +} + +void YoungGCTracer::report_gc_end_impl(jlong timestamp, TimePartitions* time_partitions) { + assert_set_gc_id(); + assert(_tenuring_threshold != UNSET_TENURING_THRESHOLD, "Tenuring threshold has not been reported"); + + GCTracer::report_gc_end_impl(timestamp, time_partitions); + send_young_gc_event(); + + _tenuring_threshold = UNSET_TENURING_THRESHOLD; +} + +void YoungGCTracer::report_promotion_failed(const PromotionFailedInfo& pf_info) { + assert_set_gc_id(); + + send_promotion_failed_event(pf_info); +} + +void YoungGCTracer::report_tenuring_threshold(const uint tenuring_threshold) { + _tenuring_threshold = tenuring_threshold; +} + +void OldGCTracer::report_gc_end_impl(jlong timestamp, TimePartitions* time_partitions) { + assert_set_gc_id(); + + GCTracer::report_gc_end_impl(timestamp, time_partitions); + send_old_gc_event(); +} + +void ParallelOldTracer::report_gc_end_impl(jlong timestamp, TimePartitions* time_partitions) { + assert_set_gc_id(); + + OldGCTracer::report_gc_end_impl(timestamp, time_partitions); + send_parallel_old_event(); +} + +void ParallelOldTracer::report_dense_prefix(void* dense_prefix) { + assert_set_gc_id(); + + _parallel_old_gc_info.report_dense_prefix(dense_prefix); +} + +void OldGCTracer::report_concurrent_mode_failure() { + assert_set_gc_id(); + + send_concurrent_mode_failure_event(); +} + +#if INCLUDE_ALL_GCS +void G1NewTracer::report_yc_type(G1YCType type) { + assert_set_gc_id(); + + _g1_young_gc_info.set_type(type); +} + +void G1NewTracer::report_gc_end_impl(jlong timestamp, TimePartitions* time_partitions) { + assert_set_gc_id(); + + YoungGCTracer::report_gc_end_impl(timestamp, time_partitions); + send_g1_young_gc_event(); +} + +void G1NewTracer::report_evacuation_info(EvacuationInfo* info) { + assert_set_gc_id(); + + send_evacuation_info_event(info); +} + +void G1NewTracer::report_evacuation_failed(EvacuationFailedInfo& ef_info) { + assert_set_gc_id(); + + send_evacuation_failed_event(ef_info); + ef_info.reset(); +} +#endif diff -r 16b10327b00d -r 90d6c221d4e5 src/share/vm/gc_implementation/shared/gcTrace.hpp --- /dev/null Thu Jan 01 00:00:00 1970 +0000 +++ b/src/share/vm/gc_implementation/shared/gcTrace.hpp Tue Jul 16 12:20:08 2013 -0400 @@ -0,0 +1,233 @@ +/* + * Copyright (c) 2012, 2013, Oracle and/or its affiliates. All rights reserved. + * DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER. + * + * This code is free software; you can redistribute it and/or modify it + * under the terms of the GNU General Public License version 2 only, as + * published by the Free Software Foundation. + * + * This code is distributed in the hope that it will be useful, but WITHOUT + * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or + * FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License + * version 2 for more details (a copy is included in the LICENSE file that + * accompanied this code). + * + * You should have received a copy of the GNU General Public License version + * 2 along with this work; if not, write to the Free Software Foundation, + * Inc., 51 Franklin St, Fifth Floor, Boston, MA 02110-1301 USA. + * + * Please contact Oracle, 500 Oracle Parkway, Redwood Shores, CA 94065 USA + * or visit www.oracle.com if you need additional information or have any + * questions. + * + */ + +#ifndef SHARE_VM_GC_IMPLEMENTATION_SHARED_GCTRACE_HPP +#define SHARE_VM_GC_IMPLEMENTATION_SHARED_GCTRACE_HPP + +#include "gc_interface/gcCause.hpp" +#include "gc_interface/gcName.hpp" +#include "gc_implementation/shared/gcWhen.hpp" +#include "gc_implementation/shared/copyFailedInfo.hpp" +#include "memory/allocation.hpp" +#include "memory/referenceType.hpp" +#if INCLUDE_ALL_GCS +#include "gc_implementation/g1/g1YCTypes.hpp" +#endif +#include "utilities/macros.hpp" + +typedef uint GCId; + +class EvacuationInfo; +class GCHeapSummary; +class MetaspaceSummary; +class PSHeapSummary; +class ReferenceProcessorStats; +class TimePartitions; +class BoolObjectClosure; + +class SharedGCInfo VALUE_OBJ_CLASS_SPEC { + static const jlong UNSET_TIMESTAMP = -1; + + public: + static const GCId UNSET_GCID = (GCId)-1; + + private: + GCId _id; + GCName _name; + GCCause::Cause _cause; + jlong _start_timestamp; + jlong _end_timestamp; + jlong _sum_of_pauses; + jlong _longest_pause; + + public: + SharedGCInfo(GCName name) : _id(UNSET_GCID), _name(name), _cause(GCCause::_last_gc_cause), + _start_timestamp(UNSET_TIMESTAMP), _end_timestamp(UNSET_TIMESTAMP), _sum_of_pauses(0), _longest_pause(0) {} + + void set_id(GCId id) { _id = id; } + GCId id() const { return _id; } + + void set_start_timestamp(jlong timestamp) { _start_timestamp = timestamp; } + jlong start_timestamp() const { return _start_timestamp; } + + void set_end_timestamp(jlong timestamp) { _end_timestamp = timestamp; } + jlong end_timestamp() const { return _end_timestamp; } + + void set_name(GCName name) { _name = name; } + GCName name() const { return _name; } + + void set_cause(GCCause::Cause cause) { _cause = cause; } + GCCause::Cause cause() const { return _cause; } + + void set_sum_of_pauses(jlong duration) { _sum_of_pauses = duration; } + jlong sum_of_pauses() const { return _sum_of_pauses; } + + void set_longest_pause(jlong duration) { _longest_pause = duration; } + jlong longest_pause() const { return _longest_pause; } +}; + +class ParallelOldGCInfo VALUE_OBJ_CLASS_SPEC { + void* _dense_prefix; + public: + ParallelOldGCInfo() : _dense_prefix(NULL) {} + void report_dense_prefix(void* addr) { + _dense_prefix = addr; + } + void* dense_prefix() const { return _dense_prefix; } +}; + +#if INCLUDE_ALL_GCS + +class G1YoungGCInfo VALUE_OBJ_CLASS_SPEC { + G1YCType _type; + public: + G1YoungGCInfo() : _type(G1YCTypeEndSentinel) {} + void set_type(G1YCType type) { + _type = type; + } + G1YCType type() const { return _type; } +}; + +#endif // INCLUDE_ALL_GCS + +class GCTracer : public ResourceObj { + protected: + SharedGCInfo _shared_gc_info; + + public: + void report_gc_start(GCCause::Cause cause, jlong timestamp); + void report_gc_end(jlong timestamp, TimePartitions* time_partitions); + void report_gc_heap_summary(GCWhen::Type when, const GCHeapSummary& heap_summary, const MetaspaceSummary& meta_space_summary) const; + void report_gc_reference_stats(const ReferenceProcessorStats& rp) const; + void report_object_count_after_gc(BoolObjectClosure* object_filter) NOT_SERVICES_RETURN; + bool has_reported_gc_start() const; + + protected: + GCTracer(GCName name) : _shared_gc_info(name) {} + virtual void report_gc_start_impl(GCCause::Cause cause, jlong timestamp); + virtual void report_gc_end_impl(jlong timestamp, TimePartitions* time_partitions); + + private: + void send_garbage_collection_event() const; + void send_gc_heap_summary_event(GCWhen::Type when, const GCHeapSummary& heap_summary) const; + void send_meta_space_summary_event(GCWhen::Type when, const MetaspaceSummary& meta_space_summary) const; + void send_reference_stats_event(ReferenceType type, size_t count) const; + void send_phase_events(TimePartitions* time_partitions) const; +}; + +class YoungGCTracer : public GCTracer { + static const uint UNSET_TENURING_THRESHOLD = (uint) -1; + + uint _tenuring_threshold; + + protected: + YoungGCTracer(GCName name) : GCTracer(name), _tenuring_threshold(UNSET_TENURING_THRESHOLD) {} + virtual void report_gc_end_impl(jlong timestamp, TimePartitions* time_partitions); + + public: + void report_promotion_failed(const PromotionFailedInfo& pf_info); + void report_tenuring_threshold(const uint tenuring_threshold); + + private: + void send_young_gc_event() const; + void send_promotion_failed_event(const PromotionFailedInfo& pf_info) const; +}; + +class OldGCTracer : public GCTracer { + protected: + OldGCTracer(GCName name) : GCTracer(name) {} + virtual void report_gc_end_impl(jlong timestamp, TimePartitions* time_partitions); + + public: + void report_concurrent_mode_failure(); + + private: + void send_old_gc_event() const; + void send_concurrent_mode_failure_event(); +}; + +class ParallelOldTracer : public OldGCTracer { + ParallelOldGCInfo _parallel_old_gc_info; + + public: + ParallelOldTracer() : OldGCTracer(ParallelOld) {} + void report_dense_prefix(void* dense_prefix); + + protected: + void report_gc_end_impl(jlong timestamp, TimePartitions* time_partitions); + + private: + void send_parallel_old_event() const; +}; + +class SerialOldTracer : public OldGCTracer { + public: + SerialOldTracer() : OldGCTracer(SerialOld) {} +}; + +class ParallelScavengeTracer : public YoungGCTracer { + public: + ParallelScavengeTracer() : YoungGCTracer(ParallelScavenge) {} +}; + +class DefNewTracer : public YoungGCTracer { + public: + DefNewTracer() : YoungGCTracer(DefNew) {} +}; + +class ParNewTracer : public YoungGCTracer { + public: + ParNewTracer() : YoungGCTracer(ParNew) {} +}; + +#if INCLUDE_ALL_GCS +class G1NewTracer : public YoungGCTracer { + G1YoungGCInfo _g1_young_gc_info; + + public: + G1NewTracer() : YoungGCTracer(G1New) {} + + void report_yc_type(G1YCType type); + void report_gc_end_impl(jlong timestamp, TimePartitions* time_partitions); + void report_evacuation_info(EvacuationInfo* info); + void report_evacuation_failed(EvacuationFailedInfo& ef_info); + + private: + void send_g1_young_gc_event(); + void send_evacuation_info_event(EvacuationInfo* info); + void send_evacuation_failed_event(const EvacuationFailedInfo& ef_info) const; +}; +#endif + +class CMSTracer : public OldGCTracer { + public: + CMSTracer() : OldGCTracer(ConcurrentMarkSweep) {} +}; + +class G1OldTracer : public OldGCTracer { + public: + G1OldTracer() : OldGCTracer(G1Old) {} +}; + +#endif // SHARE_VM_GC_IMPLEMENTATION_SHARED_GCTRACE_HPP diff -r 16b10327b00d -r 90d6c221d4e5 src/share/vm/gc_implementation/shared/gcTraceSend.cpp --- /dev/null Thu Jan 01 00:00:00 1970 +0000 +++ b/src/share/vm/gc_implementation/shared/gcTraceSend.cpp Tue Jul 16 12:20:08 2013 -0400 @@ -0,0 +1,297 @@ +/* + * Copyright (c) 2012, 2013, Oracle and/or its affiliates. All rights reserved. + * DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER. + * + * This code is free software; you can redistribute it and/or modify it + * under the terms of the GNU General Public License version 2 only, as + * published by the Free Software Foundation. + * + * This code is distributed in the hope that it will be useful, but WITHOUT + * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or + * FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License + * version 2 for more details (a copy is included in the LICENSE file that + * accompanied this code). + * + * You should have received a copy of the GNU General Public License version + * 2 along with this work; if not, write to the Free Software Foundation, + * Inc., 51 Franklin St, Fifth Floor, Boston, MA 02110-1301 USA. + * + * Please contact Oracle, 500 Oracle Parkway, Redwood Shores, CA 94065 USA + * or visit www.oracle.com if you need additional information or have any + * questions. + * + */ + +#include "precompiled.hpp" +#include "gc_implementation/shared/gcHeapSummary.hpp" +#include "gc_implementation/shared/gcTimer.hpp" +#include "gc_implementation/shared/gcTrace.hpp" +#include "gc_implementation/shared/gcWhen.hpp" +#include "gc_implementation/shared/copyFailedInfo.hpp" +#include "trace/tracing.hpp" +#include "trace/traceBackend.hpp" +#if INCLUDE_ALL_GCS +#include "gc_implementation/g1/evacuationInfo.hpp" +#include "gc_implementation/g1/g1YCTypes.hpp" +#endif + +// All GC dependencies against the trace framework is contained within this file. + +typedef uintptr_t TraceAddress; + +void GCTracer::send_garbage_collection_event() const { + EventGCGarbageCollection event(UNTIMED); + if (event.should_commit()) { + event.set_gcId(_shared_gc_info.id()); + event.set_name(_shared_gc_info.name()); + event.set_cause((u2) _shared_gc_info.cause()); + event.set_sumOfPauses(_shared_gc_info.sum_of_pauses()); + event.set_longestPause(_shared_gc_info.longest_pause()); + event.set_starttime(_shared_gc_info.start_timestamp()); + event.set_endtime(_shared_gc_info.end_timestamp()); + event.commit(); + } +} + +void GCTracer::send_reference_stats_event(ReferenceType type, size_t count) const { + EventGCReferenceStatistics e; + if (e.should_commit()) { + e.set_gcId(_shared_gc_info.id()); + e.set_type((u1)type); + e.set_count(count); + e.commit(); + } +} + +void ParallelOldTracer::send_parallel_old_event() const { + EventGCParallelOld e(UNTIMED); + if (e.should_commit()) { + e.set_gcId(_shared_gc_info.id()); + e.set_densePrefix((TraceAddress)_parallel_old_gc_info.dense_prefix()); + e.set_starttime(_shared_gc_info.start_timestamp()); + e.set_endtime(_shared_gc_info.end_timestamp()); + e.commit(); + } +} + +void YoungGCTracer::send_young_gc_event() const { + EventGCYoungGarbageCollection e(UNTIMED); + if (e.should_commit()) { + e.set_gcId(_shared_gc_info.id()); + e.set_tenuringThreshold(_tenuring_threshold); + e.set_starttime(_shared_gc_info.start_timestamp()); + e.set_endtime(_shared_gc_info.end_timestamp()); + e.commit(); + } +} + +void OldGCTracer::send_old_gc_event() const { + EventGCOldGarbageCollection e(UNTIMED); + if (e.should_commit()) { + e.set_gcId(_shared_gc_info.id()); + e.set_starttime(_shared_gc_info.start_timestamp()); + e.set_endtime(_shared_gc_info.end_timestamp()); + e.commit(); + } +} + +static TraceStructCopyFailed to_trace_struct(const CopyFailedInfo& cf_info) { + TraceStructCopyFailed failed_info; + failed_info.set_objectCount(cf_info.failed_count()); + failed_info.set_firstSize(cf_info.first_size()); + failed_info.set_smallestSize(cf_info.smallest_size()); + failed_info.set_totalSize(cf_info.total_size()); + return failed_info; +} + +void YoungGCTracer::send_promotion_failed_event(const PromotionFailedInfo& pf_info) const { + EventPromotionFailed e; + if (e.should_commit()) { + e.set_gcId(_shared_gc_info.id()); + e.set_data(to_trace_struct(pf_info)); + e.set_thread(pf_info.thread()->thread_id()); + e.commit(); + } +} + +// Common to CMS and G1 +void OldGCTracer::send_concurrent_mode_failure_event() { + EventConcurrentModeFailure e; + if (e.should_commit()) { + e.set_gcId(_shared_gc_info.id()); + e.commit(); + } +} + +#if INCLUDE_ALL_GCS +void G1NewTracer::send_g1_young_gc_event() { + EventGCG1GarbageCollection e(UNTIMED); + if (e.should_commit()) { + e.set_gcId(_shared_gc_info.id()); + e.set_type(_g1_young_gc_info.type()); + e.set_starttime(_shared_gc_info.start_timestamp()); + e.set_endtime(_shared_gc_info.end_timestamp()); + e.commit(); + } +} + +void G1NewTracer::send_evacuation_info_event(EvacuationInfo* info) { + EventEvacuationInfo e; + if (e.should_commit()) { + e.set_gcId(_shared_gc_info.id()); + e.set_cSetRegions(info->collectionset_regions()); + e.set_cSetUsedBefore(info->collectionset_used_before()); + e.set_cSetUsedAfter(info->collectionset_used_after()); + e.set_allocationRegions(info->allocation_regions()); + e.set_allocRegionsUsedBefore(info->alloc_regions_used_before()); + e.set_allocRegionsUsedAfter(info->alloc_regions_used_before() + info->bytes_copied()); + e.set_bytesCopied(info->bytes_copied()); + e.set_regionsFreed(info->regions_freed()); + e.commit(); + } +} + +void G1NewTracer::send_evacuation_failed_event(const EvacuationFailedInfo& ef_info) const { + EventEvacuationFailed e; + if (e.should_commit()) { + e.set_gcId(_shared_gc_info.id()); + e.set_data(to_trace_struct(ef_info)); + e.commit(); + } +} +#endif + +static TraceStructVirtualSpace to_trace_struct(const VirtualSpaceSummary& summary) { + TraceStructVirtualSpace space; + space.set_start((TraceAddress)summary.start()); + space.set_committedEnd((TraceAddress)summary.committed_end()); + space.set_committedSize(summary.committed_size()); + space.set_reservedEnd((TraceAddress)summary.reserved_end()); + space.set_reservedSize(summary.reserved_size()); + return space; +} + +static TraceStructObjectSpace to_trace_struct(const SpaceSummary& summary) { + TraceStructObjectSpace space; + space.set_start((TraceAddress)summary.start()); + space.set_end((TraceAddress)summary.end()); + space.set_used(summary.used()); + space.set_size(summary.size()); + return space; +} + +class GCHeapSummaryEventSender : public GCHeapSummaryVisitor { + GCId _id; + GCWhen::Type _when; + public: + GCHeapSummaryEventSender(GCId id, GCWhen::Type when) : _id(id), _when(when) {} + + void visit(const GCHeapSummary* heap_summary) const { + const VirtualSpaceSummary& heap_space = heap_summary->heap(); + + EventGCHeapSummary e; + if (e.should_commit()) { + e.set_gcId(_id); + e.set_when((u1)_when); + e.set_heapSpace(to_trace_struct(heap_space)); + e.set_heapUsed(heap_summary->used()); + e.commit(); + } + } + + void visit(const PSHeapSummary* ps_heap_summary) const { + visit((GCHeapSummary*)ps_heap_summary); + + const VirtualSpaceSummary& old_summary = ps_heap_summary->old(); + const SpaceSummary& old_space = ps_heap_summary->old_space(); + const VirtualSpaceSummary& young_summary = ps_heap_summary->young(); + const SpaceSummary& eden_space = ps_heap_summary->eden(); + const SpaceSummary& from_space = ps_heap_summary->from(); + const SpaceSummary& to_space = ps_heap_summary->to(); + + EventPSHeapSummary e; + if (e.should_commit()) { + e.set_gcId(_id); + e.set_when((u1)_when); + + e.set_oldSpace(to_trace_struct(ps_heap_summary->old())); + e.set_oldObjectSpace(to_trace_struct(ps_heap_summary->old_space())); + e.set_youngSpace(to_trace_struct(ps_heap_summary->young())); + e.set_edenSpace(to_trace_struct(ps_heap_summary->eden())); + e.set_fromSpace(to_trace_struct(ps_heap_summary->from())); + e.set_toSpace(to_trace_struct(ps_heap_summary->to())); + e.commit(); + } + } +}; + +void GCTracer::send_gc_heap_summary_event(GCWhen::Type when, const GCHeapSummary& heap_summary) const { + GCHeapSummaryEventSender visitor(_shared_gc_info.id(), when); + heap_summary.accept(&visitor); +} + +static TraceStructMetaspaceSizes to_trace_struct(const MetaspaceSizes& sizes) { + TraceStructMetaspaceSizes meta_sizes; + + meta_sizes.set_capacity(sizes.capacity()); + meta_sizes.set_used(sizes.used()); + meta_sizes.set_reserved(sizes.reserved()); + + return meta_sizes; +} + +void GCTracer::send_meta_space_summary_event(GCWhen::Type when, const MetaspaceSummary& meta_space_summary) const { + EventMetaspaceSummary e; + if (e.should_commit()) { + e.set_gcId(_shared_gc_info.id()); + e.set_when((u1) when); + e.set_metaspace(to_trace_struct(meta_space_summary.meta_space())); + e.set_dataSpace(to_trace_struct(meta_space_summary.data_space())); + e.set_classSpace(to_trace_struct(meta_space_summary.class_space())); + e.commit(); + } +} + +class PhaseSender : public PhaseVisitor { + GCId _gc_id; + public: + PhaseSender(GCId gc_id) : _gc_id(gc_id) {} + + template + void send_phase(PausePhase* pause) { + T event(UNTIMED); + if (event.should_commit()) { + event.set_gcId(_gc_id); + event.set_name(pause->name()); + event.set_starttime(pause->start()); + event.set_endtime(pause->end()); + event.commit(); + } + } + + void visit(GCPhase* pause) { ShouldNotReachHere(); } + void visit(ConcurrentPhase* pause) { Unimplemented(); } + void visit(PausePhase* pause) { + assert(PhasesStack::PHASE_LEVELS == 5, "Need more event types"); + + switch (pause->level()) { + case 0: send_phase(pause); break; + case 1: send_phase(pause); break; + case 2: send_phase(pause); break; + case 3: send_phase(pause); break; + default: /* Ignore sending this phase */ break; + } + } + +#undef send_phase +}; + +void GCTracer::send_phase_events(TimePartitions* time_partitions) const { + PhaseSender phase_reporter(_shared_gc_info.id()); + + TimePartitionPhasesIterator iter(time_partitions); + while (iter.has_next()) { + GCPhase* phase = iter.next(); + phase->accept(&phase_reporter); + } +} diff -r 16b10327b00d -r 90d6c221d4e5 src/share/vm/gc_implementation/shared/gcTraceTime.cpp --- /dev/null Thu Jan 01 00:00:00 1970 +0000 +++ b/src/share/vm/gc_implementation/shared/gcTraceTime.cpp Tue Jul 16 12:20:08 2013 -0400 @@ -0,0 +1,79 @@ +/* + * Copyright (c) 2012, 2013, Oracle and/or its affiliates. All rights reserved. + * DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER. + * + * This code is free software; you can redistribute it and/or modify it + * under the terms of the GNU General Public License version 2 only, as + * published by the Free Software Foundation. + * + * This code is distributed in the hope that it will be useful, but WITHOUT + * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or + * FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License + * version 2 for more details (a copy is included in the LICENSE file that + * accompanied this code). + * + * You should have received a copy of the GNU General Public License version + * 2 along with this work; if not, write to the Free Software Foundation, + * Inc., 51 Franklin St, Fifth Floor, Boston, MA 02110-1301 USA. + * + * Please contact Oracle, 500 Oracle Parkway, Redwood Shores, CA 94065 USA + * or visit www.oracle.com if you need additional information or have any + * questions. + * + */ + +#include "precompiled.hpp" +#include "gc_implementation/shared/gcTimer.hpp" +#include "gc_implementation/shared/gcTraceTime.hpp" +#include "runtime/globals.hpp" +#include "runtime/os.hpp" +#include "runtime/safepoint.hpp" +#include "runtime/thread.inline.hpp" +#include "runtime/timer.hpp" +#include "utilities/ostream.hpp" + + +GCTraceTime::GCTraceTime(const char* title, bool doit, bool print_cr, GCTimer* timer) : + _title(title), _doit(doit), _print_cr(print_cr), _timer(timer) { + if (_doit || _timer != NULL) { + _start_counter = os::elapsed_counter(); + } + + if (_timer != NULL) { + assert(SafepointSynchronize::is_at_safepoint(), "Tracing currently only supported at safepoints"); + assert(Thread::current()->is_VM_thread(), "Tracing currently only supported from the VM thread"); + + _timer->register_gc_phase_start(title, _start_counter); + } + + if (_doit) { + if (PrintGCTimeStamps) { + gclog_or_tty->stamp(); + gclog_or_tty->print(": "); + } + gclog_or_tty->print("[%s", title); + gclog_or_tty->flush(); + } +} + +GCTraceTime::~GCTraceTime() { + jlong stop_counter = 0; + + if (_doit || _timer != NULL) { + stop_counter = os::elapsed_counter(); + } + + if (_timer != NULL) { + _timer->register_gc_phase_end(stop_counter); + } + + if (_doit) { + double seconds = TimeHelper::counter_to_seconds(stop_counter - _start_counter); + if (_print_cr) { + gclog_or_tty->print_cr(", %3.7f secs]", seconds); + } else { + gclog_or_tty->print(", %3.7f secs]", seconds); + } + gclog_or_tty->flush(); + } +} diff -r 16b10327b00d -r 90d6c221d4e5 src/share/vm/gc_implementation/shared/gcTraceTime.hpp --- /dev/null Thu Jan 01 00:00:00 1970 +0000 +++ b/src/share/vm/gc_implementation/shared/gcTraceTime.hpp Tue Jul 16 12:20:08 2013 -0400 @@ -0,0 +1,44 @@ +/* + * Copyright (c) 2012, 2013, Oracle and/or its affiliates. All rights reserved. + * DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER. + * + * This code is free software; you can redistribute it and/or modify it + * under the terms of the GNU General Public License version 2 only, as + * published by the Free Software Foundation. + * + * This code is distributed in the hope that it will be useful, but WITHOUT + * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or + * FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License + * version 2 for more details (a copy is included in the LICENSE file that + * accompanied this code). + * + * You should have received a copy of the GNU General Public License version + * 2 along with this work; if not, write to the Free Software Foundation, + * Inc., 51 Franklin St, Fifth Floor, Boston, MA 02110-1301 USA. + * + * Please contact Oracle, 500 Oracle Parkway, Redwood Shores, CA 94065 USA + * or visit www.oracle.com if you need additional information or have any + * questions. + * + */ + +#ifndef SHARE_VM_GC_IMPLEMENTATION_SHARED_GCTRACETIME_HPP +#define SHARE_VM_GC_IMPLEMENTATION_SHARED_GCTRACETIME_HPP + +#include "prims/jni_md.h" + +class GCTimer; + +class GCTraceTime { + const char* _title; + bool _doit; + bool _print_cr; + GCTimer* _timer; + jlong _start_counter; + + public: + GCTraceTime(const char* title, bool doit, bool print_cr, GCTimer* timer); + ~GCTraceTime(); +}; + +#endif // SHARE_VM_GC_IMPLEMENTATION_SHARED_GCTRACETIME_HPP diff -r 16b10327b00d -r 90d6c221d4e5 src/share/vm/gc_implementation/shared/gcWhen.hpp --- /dev/null Thu Jan 01 00:00:00 1970 +0000 +++ b/src/share/vm/gc_implementation/shared/gcWhen.hpp Tue Jul 16 12:20:08 2013 -0400 @@ -0,0 +1,48 @@ +/* + * Copyright (c) 2012, 2013, Oracle and/or its affiliates. All rights reserved. + * DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER. + * + * This code is free software; you can redistribute it and/or modify it + * under the terms of the GNU General Public License version 2 only, as + * published by the Free Software Foundation. + * + * This code is distributed in the hope that it will be useful, but WITHOUT + * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or + * FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License + * version 2 for more details (a copy is included in the LICENSE file that + * accompanied this code). + * + * You should have received a copy of the GNU General Public License version + * 2 along with this work; if not, write to the Free Software Foundation, + * Inc., 51 Franklin St, Fifth Floor, Boston, MA 02110-1301 USA. + * + * Please contact Oracle, 500 Oracle Parkway, Redwood Shores, CA 94065 USA + * or visit www.oracle.com if you need additional information or have any + * questions. + * + */ + +#ifndef SHARE_VM_GC_IMPLEMENTATION_SHARED_GCWHEN_HPP +#define SHARE_VM_GC_IMPLEMENTATION_SHARED_GCWHEN_HPP + +#include "memory/allocation.hpp" +#include "utilities/debug.hpp" + +class GCWhen : AllStatic { + public: + enum Type { + BeforeGC, + AfterGC, + GCWhenEndSentinel + }; + + static const char* to_string(GCWhen::Type when) { + switch (when) { + case BeforeGC: return "Before GC"; + case AfterGC: return "After GC"; + default: ShouldNotReachHere(); return NULL; + } + } +}; + +#endif // SHARE_VM_GC_IMPLEMENTATION_SHARED_GCWHEN_HPP diff -r 16b10327b00d -r 90d6c221d4e5 src/share/vm/gc_implementation/shared/markSweep.cpp --- a/src/share/vm/gc_implementation/shared/markSweep.cpp Tue Jul 16 10:55:48 2013 -0400 +++ b/src/share/vm/gc_implementation/shared/markSweep.cpp Tue Jul 16 12:20:08 2013 -0400 @@ -24,6 +24,8 @@ #include "precompiled.hpp" #include "compiler/compileBroker.hpp" +#include "gc_implementation/shared/gcTimer.hpp" +#include "gc_implementation/shared/gcTrace.hpp" #include "gc_implementation/shared/markSweep.inline.hpp" #include "gc_interface/collectedHeap.inline.hpp" #include "oops/methodData.hpp" @@ -41,6 +43,8 @@ size_t MarkSweep::_preserved_count_max = 0; PreservedMark* MarkSweep::_preserved_marks = NULL; ReferenceProcessor* MarkSweep::_ref_processor = NULL; +STWGCTimer* MarkSweep::_gc_timer = NULL; +SerialOldTracer* MarkSweep::_gc_tracer = NULL; MarkSweep::FollowRootClosure MarkSweep::follow_root_closure; CodeBlobToOopClosure MarkSweep::follow_code_root_closure(&MarkSweep::follow_root_closure, /*do_marking=*/ true); @@ -173,7 +177,10 @@ void MarkSweep::KeepAliveClosure::do_oop(oop* p) { MarkSweep::KeepAliveClosure::do_oop_work(p); } void MarkSweep::KeepAliveClosure::do_oop(narrowOop* p) { MarkSweep::KeepAliveClosure::do_oop_work(p); } -void marksweep_init() { /* empty */ } +void marksweep_init() { + MarkSweep::_gc_timer = new (ResourceObj::C_HEAP, mtGC) STWGCTimer(); + MarkSweep::_gc_tracer = new (ResourceObj::C_HEAP, mtGC) SerialOldTracer(); +} #ifndef PRODUCT diff -r 16b10327b00d -r 90d6c221d4e5 src/share/vm/gc_implementation/shared/markSweep.hpp --- a/src/share/vm/gc_implementation/shared/markSweep.hpp Tue Jul 16 10:55:48 2013 -0400 +++ b/src/share/vm/gc_implementation/shared/markSweep.hpp Tue Jul 16 12:20:08 2013 -0400 @@ -1,5 +1,5 @@ /* - * Copyright (c) 1997, 2012, Oracle and/or its affiliates. All rights reserved. + * Copyright (c) 1997, 2013, Oracle and/or its affiliates. All rights reserved. * DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER. * * This code is free software; you can redistribute it and/or modify it @@ -36,6 +36,8 @@ class ReferenceProcessor; class DataLayout; +class SerialOldTracer; +class STWGCTimer; // MarkSweep takes care of global mark-compact garbage collection for a // GenCollectedHeap using a four-phase pointer forwarding algorithm. All @@ -128,6 +130,9 @@ // Reference processing (used in ...follow_contents) static ReferenceProcessor* _ref_processor; + static STWGCTimer* _gc_timer; + static SerialOldTracer* _gc_tracer; + // Non public closures static KeepAliveClosure keep_alive; @@ -151,6 +156,9 @@ // Reference Processing static ReferenceProcessor* const ref_processor() { return _ref_processor; } + static STWGCTimer* gc_timer() { return _gc_timer; } + static SerialOldTracer* gc_tracer() { return _gc_tracer; } + // Call backs for marking static void mark_object(oop obj); // Mark pointer and follow contents. Empty marking stack afterwards. diff -r 16b10327b00d -r 90d6c221d4e5 src/share/vm/gc_implementation/shared/objectCountEventSender.cpp --- /dev/null Thu Jan 01 00:00:00 1970 +0000 +++ b/src/share/vm/gc_implementation/shared/objectCountEventSender.cpp Tue Jul 16 12:20:08 2013 -0400 @@ -0,0 +1,55 @@ +/* + * Copyright (c) 2013, Oracle and/or its affiliates. All rights reserved. + * DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER. + * + * This code is free software; you can redistribute it and/or modify it + * under the terms of the GNU General Public License version 2 only, as + * published by the Free Software Foundation. + * + * This code is distributed in the hope that it will be useful, but WITHOUT + * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or + * FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License + * version 2 for more details (a copy is included in the LICENSE file that + * accompanied this code). + * + * You should have received a copy of the GNU General Public License version + * 2 along with this work; if not, write to the Free Software Foundation, + * Inc., 51 Franklin St, Fifth Floor, Boston, MA 02110-1301 USA. + * + * Please contact Oracle, 500 Oracle Parkway, Redwood Shores, CA 94065 USA + * or visit www.oracle.com if you need additional information or have any + * questions. + * + */ + + +#include "precompiled.hpp" +#include "gc_implementation/shared/objectCountEventSender.hpp" +#include "memory/heapInspection.hpp" +#include "trace/tracing.hpp" +#include "utilities/globalDefinitions.hpp" + +#if INCLUDE_SERVICES + +void ObjectCountEventSender::send(const KlassInfoEntry* entry, GCId gc_id, jlong timestamp) { + assert(Tracing::is_event_enabled(EventObjectCountAfterGC::eventId), + "Only call this method if the event is enabled"); + + EventObjectCountAfterGC event(UNTIMED); + event.set_gcId(gc_id); + event.set_class(entry->klass()); + event.set_count(entry->count()); + event.set_totalSize(entry->words() * BytesPerWord); + event.set_endtime(timestamp); + event.commit(); +} + +bool ObjectCountEventSender::should_send_event() { +#if INCLUDE_TRACE + return Tracing::is_event_enabled(EventObjectCountAfterGC::eventId); +#else + return false; +#endif // INCLUDE_TRACE +} + +#endif // INCLUDE_SERVICES diff -r 16b10327b00d -r 90d6c221d4e5 src/share/vm/gc_implementation/shared/objectCountEventSender.hpp --- /dev/null Thu Jan 01 00:00:00 1970 +0000 +++ b/src/share/vm/gc_implementation/shared/objectCountEventSender.hpp Tue Jul 16 12:20:08 2013 -0400 @@ -0,0 +1,44 @@ +/* + * Copyright (c) 2013, Oracle and/or its affiliates. All rights reserved. + * DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER. + * + * This code is free software; you can redistribute it and/or modify it + * under the terms of the GNU General Public License version 2 only, as + * published by the Free Software Foundation. + * + * This code is distributed in the hope that it will be useful, but WITHOUT + * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or + * FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License + * version 2 for more details (a copy is included in the LICENSE file that + * accompanied this code). + * + * You should have received a copy of the GNU General Public License version + * 2 along with this work; if not, write to the Free Software Foundation, + * Inc., 51 Franklin St, Fifth Floor, Boston, MA 02110-1301 USA. + * + * Please contact Oracle, 500 Oracle Parkway, Redwood Shores, CA 94065 USA + * or visit www.oracle.com if you need additional information or have any + * questions. + * + */ + +#ifndef SHARE_VM_OBJECT_COUNT_EVENT_SENDER_HPP +#define SHARE_VM_OBJECT_COUNT_EVENT_SENDER_HPP + +#include "gc_implementation/shared/gcTrace.hpp" +#include "memory/allocation.hpp" +#include "utilities/macros.hpp" + +#if INCLUDE_SERVICES + +class KlassInfoEntry; + +class ObjectCountEventSender : public AllStatic { + public: + static void send(const KlassInfoEntry* entry, GCId gc_id, jlong timestamp); + static bool should_send_event(); +}; + +#endif // INCLUDE_SERVICES + +#endif // SHARE_VM_OBJECT_COUNT_EVENT_SENDER diff -r 16b10327b00d -r 90d6c221d4e5 src/share/vm/gc_implementation/shared/vmGCOperations.cpp --- a/src/share/vm/gc_implementation/shared/vmGCOperations.cpp Tue Jul 16 10:55:48 2013 -0400 +++ b/src/share/vm/gc_implementation/shared/vmGCOperations.cpp Tue Jul 16 12:20:08 2013 -0400 @@ -145,32 +145,37 @@ return false; } +bool VM_GC_HeapInspection::collect() { + if (GC_locker::is_active()) { + return false; + } + Universe::heap()->collect_as_vm_thread(GCCause::_heap_inspection); + return true; +} + void VM_GC_HeapInspection::doit() { HandleMark hm; - CollectedHeap* ch = Universe::heap(); - ch->ensure_parsability(false); // must happen, even if collection does - // not happen (e.g. due to GC_locker) + Universe::heap()->ensure_parsability(false); // must happen, even if collection does + // not happen (e.g. due to GC_locker) + // or _full_gc being false if (_full_gc) { - // The collection attempt below would be skipped anyway if - // the gc locker is held. The following dump may then be a tad - // misleading to someone expecting only live objects to show - // up in the dump (see CR 6944195). Just issue a suitable warning - // in that case and do not attempt to do a collection. - // The latter is a subtle point, because even a failed attempt - // to GC will, in fact, induce one in the future, which we - // probably want to avoid in this case because the GC that we may - // be about to attempt holds value for us only - // if it happens now and not if it happens in the eventual - // future. - if (GC_locker::is_active()) { + if (!collect()) { + // The collection attempt was skipped because the gc locker is held. + // The following dump may then be a tad misleading to someone expecting + // only live objects to show up in the dump (see CR 6944195). Just issue + // a suitable warning in that case and do not attempt to do a collection. + // The latter is a subtle point, because even a failed attempt + // to GC will, in fact, induce one in the future, which we + // probably want to avoid in this case because the GC that we may + // be about to attempt holds value for us only + // if it happens now and not if it happens in the eventual + // future. warning("GC locker is held; pre-dump GC was skipped"); - } else { - ch->collect_as_vm_thread(GCCause::_heap_inspection); } } HeapInspection inspect(_csv_format, _print_help, _print_class_stats, _columns); - inspect.heap_inspection(_out, _need_prologue /* need_prologue */); + inspect.heap_inspection(_out); } diff -r 16b10327b00d -r 90d6c221d4e5 src/share/vm/gc_implementation/shared/vmGCOperations.hpp --- a/src/share/vm/gc_implementation/shared/vmGCOperations.hpp Tue Jul 16 10:55:48 2013 -0400 +++ b/src/share/vm/gc_implementation/shared/vmGCOperations.hpp Tue Jul 16 12:20:08 2013 -0400 @@ -129,21 +129,18 @@ private: outputStream* _out; bool _full_gc; - bool _need_prologue; bool _csv_format; // "comma separated values" format for spreadsheet. bool _print_help; bool _print_class_stats; const char* _columns; public: - VM_GC_HeapInspection(outputStream* out, bool request_full_gc, - bool need_prologue) : + VM_GC_HeapInspection(outputStream* out, bool request_full_gc) : VM_GC_Operation(0 /* total collections, dummy, ignored */, GCCause::_heap_inspection /* GC Cause */, 0 /* total full collections, dummy, ignored */, request_full_gc) { _out = out; _full_gc = request_full_gc; - _need_prologue = need_prologue; _csv_format = false; _print_help = false; _print_class_stats = false; @@ -159,6 +156,8 @@ void set_print_help(bool value) {_print_help = value;} void set_print_class_stats(bool value) {_print_class_stats = value;} void set_columns(const char* value) {_columns = value;} + protected: + bool collect(); }; diff -r 16b10327b00d -r 90d6c221d4e5 src/share/vm/gc_interface/allocTracer.cpp --- /dev/null Thu Jan 01 00:00:00 1970 +0000 +++ b/src/share/vm/gc_interface/allocTracer.cpp Tue Jul 16 12:20:08 2013 -0400 @@ -0,0 +1,48 @@ +/* + * Copyright (c) 2013, Oracle and/or its affiliates. All rights reserved. + * DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER. + * + * This code is free software; you can redistribute it and/or modify it + * under the terms of the GNU General Public License version 2 only, as + * published by the Free Software Foundation. + * + * This code is distributed in the hope that it will be useful, but WITHOUT + * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or + * FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License + * version 2 for more details (a copy is included in the LICENSE file that + * accompanied this code). + * + * You should have received a copy of the GNU General Public License version + * 2 along with this work; if not, write to the Free Software Foundation, + * Inc., 51 Franklin St, Fifth Floor, Boston, MA 02110-1301 USA. + * + * Please contact Oracle, 500 Oracle Parkway, Redwood Shores, CA 94065 USA + * or visit www.oracle.com if you need additional information or have any + * questions. + * + */ + +#include "precompiled.hpp" +#include "gc_interface/allocTracer.hpp" +#include "trace/tracing.hpp" +#include "runtime/handles.hpp" +#include "utilities/globalDefinitions.hpp" + +void AllocTracer::send_allocation_outside_tlab_event(KlassHandle klass, size_t alloc_size) { + EventAllocObjectOutsideTLAB event; + if (event.should_commit()) { + event.set_class(klass()); + event.set_allocationSize(alloc_size); + event.commit(); + } +} + +void AllocTracer::send_allocation_in_new_tlab_event(KlassHandle klass, size_t tlab_size, size_t alloc_size) { + EventAllocObjectInNewTLAB event; + if (event.should_commit()) { + event.set_class(klass()); + event.set_allocationSize(alloc_size); + event.set_tlabSize(tlab_size); + event.commit(); + } +} diff -r 16b10327b00d -r 90d6c221d4e5 src/share/vm/gc_interface/allocTracer.hpp --- /dev/null Thu Jan 01 00:00:00 1970 +0000 +++ b/src/share/vm/gc_interface/allocTracer.hpp Tue Jul 16 12:20:08 2013 -0400 @@ -0,0 +1,37 @@ +/* + * Copyright (c) 2013, Oracle and/or its affiliates. All rights reserved. + * DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER. + * + * This code is free software; you can redistribute it and/or modify it + * under the terms of the GNU General Public License version 2 only, as + * published by the Free Software Foundation. + * + * This code is distributed in the hope that it will be useful, but WITHOUT + * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or + * FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License + * version 2 for more details (a copy is included in the LICENSE file that + * accompanied this code). + * + * You should have received a copy of the GNU General Public License version + * 2 along with this work; if not, write to the Free Software Foundation, + * Inc., 51 Franklin St, Fifth Floor, Boston, MA 02110-1301 USA. + * + * Please contact Oracle, 500 Oracle Parkway, Redwood Shores, CA 94065 USA + * or visit www.oracle.com if you need additional information or have any + * questions. + * + */ + +#ifndef SHARE_VM_GC_INTERFACE_ALLOCTRACER_HPP +#define SHARE_VM_GC_INTERFACE_ALLOCTRACER_HPP + +#include "memory/allocation.hpp" +#include "runtime/handles.hpp" + +class AllocTracer : AllStatic { + public: + static void send_allocation_outside_tlab_event(KlassHandle klass, size_t alloc_size); + static void send_allocation_in_new_tlab_event(KlassHandle klass, size_t tlab_size, size_t alloc_size); +}; + +#endif /* SHARE_VM_GC_INTERFACE_ALLOCTRACER_HPP */ diff -r 16b10327b00d -r 90d6c221d4e5 src/share/vm/gc_interface/collectedHeap.cpp --- a/src/share/vm/gc_interface/collectedHeap.cpp Tue Jul 16 10:55:48 2013 -0400 +++ b/src/share/vm/gc_interface/collectedHeap.cpp Tue Jul 16 12:20:08 2013 -0400 @@ -1,5 +1,5 @@ /* - * Copyright (c) 2001, 2012, Oracle and/or its affiliates. All rights reserved. + * Copyright (c) 2001, 2013, Oracle and/or its affiliates. All rights reserved. * DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER. * * This code is free software; you can redistribute it and/or modify it @@ -24,9 +24,15 @@ #include "precompiled.hpp" #include "classfile/systemDictionary.hpp" +#include "gc_implementation/shared/gcHeapSummary.hpp" +#include "gc_implementation/shared/gcTrace.hpp" +#include "gc_implementation/shared/gcTraceTime.hpp" +#include "gc_implementation/shared/gcWhen.hpp" #include "gc_implementation/shared/vmGCOperations.hpp" +#include "gc_interface/allocTracer.hpp" #include "gc_interface/collectedHeap.hpp" #include "gc_interface/collectedHeap.inline.hpp" +#include "memory/metaspace.hpp" #include "oops/oop.inline.hpp" #include "oops/instanceMirrorKlass.hpp" #include "runtime/init.hpp" @@ -65,11 +71,71 @@ } } +VirtualSpaceSummary CollectedHeap::create_heap_space_summary() { + size_t capacity_in_words = capacity() / HeapWordSize; + + return VirtualSpaceSummary( + reserved_region().start(), reserved_region().start() + capacity_in_words, reserved_region().end()); +} + +GCHeapSummary CollectedHeap::create_heap_summary() { + VirtualSpaceSummary heap_space = create_heap_space_summary(); + return GCHeapSummary(heap_space, used()); +} + +MetaspaceSummary CollectedHeap::create_metaspace_summary() { + const MetaspaceSizes meta_space( + MetaspaceAux::allocated_capacity_bytes(), + MetaspaceAux::allocated_used_bytes(), + MetaspaceAux::reserved_in_bytes()); + const MetaspaceSizes data_space( + MetaspaceAux::allocated_capacity_bytes(Metaspace::NonClassType), + MetaspaceAux::allocated_used_bytes(Metaspace::NonClassType), + MetaspaceAux::reserved_in_bytes(Metaspace::NonClassType)); + const MetaspaceSizes class_space( + MetaspaceAux::allocated_capacity_bytes(Metaspace::ClassType), + MetaspaceAux::allocated_used_bytes(Metaspace::ClassType), + MetaspaceAux::reserved_in_bytes(Metaspace::ClassType)); + + return MetaspaceSummary(meta_space, data_space, class_space); +} + +void CollectedHeap::print_heap_before_gc() { + if (PrintHeapAtGC) { + Universe::print_heap_before_gc(); + } + if (_gc_heap_log != NULL) { + _gc_heap_log->log_heap_before(); + } +} + +void CollectedHeap::print_heap_after_gc() { + if (PrintHeapAtGC) { + Universe::print_heap_after_gc(); + } + if (_gc_heap_log != NULL) { + _gc_heap_log->log_heap_after(); + } +} + +void CollectedHeap::trace_heap(GCWhen::Type when, GCTracer* gc_tracer) { + const GCHeapSummary& heap_summary = create_heap_summary(); + const MetaspaceSummary& metaspace_summary = create_metaspace_summary(); + gc_tracer->report_gc_heap_summary(when, heap_summary, metaspace_summary); +} + +void CollectedHeap::trace_heap_before_gc(GCTracer* gc_tracer) { + trace_heap(GCWhen::BeforeGC, gc_tracer); +} + +void CollectedHeap::trace_heap_after_gc(GCTracer* gc_tracer) { + trace_heap(GCWhen::AfterGC, gc_tracer); +} + // Memory state functions. CollectedHeap::CollectedHeap() : _n_par_threads(0) - { const size_t max_len = size_t(arrayOopDesc::max_array_length(T_INT)); const size_t elements_per_word = HeapWordSize / sizeof(jint); @@ -185,7 +251,7 @@ } #endif -HeapWord* CollectedHeap::allocate_from_tlab_slow(Thread* thread, size_t size) { +HeapWord* CollectedHeap::allocate_from_tlab_slow(KlassHandle klass, Thread* thread, size_t size) { // Retain tlab and allocate object in shared space if // the amount free in the tlab is too large to discard. @@ -209,6 +275,9 @@ if (obj == NULL) { return NULL; } + + AllocTracer::send_allocation_in_new_tlab_event(klass, new_tlab_size * HeapWordSize, size * HeapWordSize); + if (ZeroTLAB) { // ..and clear it. Copy::zero_to_words(obj, new_tlab_size); @@ -458,28 +527,28 @@ } } -void CollectedHeap::pre_full_gc_dump() { +void CollectedHeap::pre_full_gc_dump(GCTimer* timer) { if (HeapDumpBeforeFullGC) { - TraceTime tt("Heap Dump (before full gc): ", PrintGCDetails, false, gclog_or_tty); + GCTraceTime tt("Heap Dump (before full gc): ", PrintGCDetails, false, timer); // We are doing a "major" collection and a heap dump before // major collection has been requested. HeapDumper::dump_heap(); } if (PrintClassHistogramBeforeFullGC) { - TraceTime tt("Class Histogram (before full gc): ", PrintGCDetails, true, gclog_or_tty); - VM_GC_HeapInspection inspector(gclog_or_tty, false /* ! full gc */, false /* ! prologue */); + GCTraceTime tt("Class Histogram (before full gc): ", PrintGCDetails, true, timer); + VM_GC_HeapInspection inspector(gclog_or_tty, false /* ! full gc */); inspector.doit(); } } -void CollectedHeap::post_full_gc_dump() { +void CollectedHeap::post_full_gc_dump(GCTimer* timer) { if (HeapDumpAfterFullGC) { - TraceTime tt("Heap Dump (after full gc): ", PrintGCDetails, false, gclog_or_tty); + GCTraceTime tt("Heap Dump (after full gc): ", PrintGCDetails, false, timer); HeapDumper::dump_heap(); } if (PrintClassHistogramAfterFullGC) { - TraceTime tt("Class Histogram (after full gc): ", PrintGCDetails, true, gclog_or_tty); - VM_GC_HeapInspection inspector(gclog_or_tty, false /* ! full gc */, false /* ! prologue */); + GCTraceTime tt("Class Histogram (after full gc): ", PrintGCDetails, true, timer); + VM_GC_HeapInspection inspector(gclog_or_tty, false /* ! full gc */); inspector.doit(); } } @@ -490,7 +559,7 @@ assert(size >= 0, "int won't convert to size_t"); HeapWord* obj; assert(ScavengeRootsInCode > 0, "must be"); - obj = common_mem_allocate_init(size, CHECK_NULL); + obj = common_mem_allocate_init(real_klass, size, CHECK_NULL); post_allocation_setup_common(klass, obj); assert(Universe::is_bootstrapping() || !((oop)obj)->is_array(), "must not be an array"); diff -r 16b10327b00d -r 90d6c221d4e5 src/share/vm/gc_interface/collectedHeap.hpp --- a/src/share/vm/gc_interface/collectedHeap.hpp Tue Jul 16 10:55:48 2013 -0400 +++ b/src/share/vm/gc_interface/collectedHeap.hpp Tue Jul 16 12:20:08 2013 -0400 @@ -1,5 +1,5 @@ /* - * Copyright (c) 2001, 2012, Oracle and/or its affiliates. All rights reserved. + * Copyright (c) 2001, 2013, Oracle and/or its affiliates. All rights reserved. * DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER. * * This code is free software; you can redistribute it and/or modify it @@ -26,6 +26,7 @@ #define SHARE_VM_GC_INTERFACE_COLLECTEDHEAP_HPP #include "gc_interface/gcCause.hpp" +#include "gc_implementation/shared/gcWhen.hpp" #include "memory/allocation.hpp" #include "memory/barrierSet.hpp" #include "runtime/handles.hpp" @@ -38,11 +39,16 @@ // class defines the functions that a heap must implement, and contains // infrastructure common to all heaps. +class AdaptiveSizePolicy; class BarrierSet; +class CollectorPolicy; +class GCHeapSummary; +class GCTimer; +class GCTracer; +class MetaspaceSummary; +class Thread; class ThreadClosure; -class AdaptiveSizePolicy; -class Thread; -class CollectorPolicy; +class VirtualSpaceSummary; class GCMessage : public FormatBuffer<1024> { public: @@ -128,16 +134,16 @@ virtual void resize_all_tlabs(); // Allocate from the current thread's TLAB, with broken-out slow path. - inline static HeapWord* allocate_from_tlab(Thread* thread, size_t size); - static HeapWord* allocate_from_tlab_slow(Thread* thread, size_t size); + inline static HeapWord* allocate_from_tlab(KlassHandle klass, Thread* thread, size_t size); + static HeapWord* allocate_from_tlab_slow(KlassHandle klass, Thread* thread, size_t size); // Allocate an uninitialized block of the given size, or returns NULL if // this is impossible. - inline static HeapWord* common_mem_allocate_noinit(size_t size, TRAPS); + inline static HeapWord* common_mem_allocate_noinit(KlassHandle klass, size_t size, TRAPS); // Like allocate_init, but the block returned by a successful allocation // is guaranteed initialized to zeros. - inline static HeapWord* common_mem_allocate_init(size_t size, TRAPS); + inline static HeapWord* common_mem_allocate_init(KlassHandle klass, size_t size, TRAPS); // Helper functions for (VM) allocation. inline static void post_allocation_setup_common(KlassHandle klass, HeapWord* obj); @@ -166,6 +172,8 @@ // Fill with a single object (either an int array or a java.lang.Object). static inline void fill_with_object_impl(HeapWord* start, size_t words, bool zap = true); + virtual void trace_heap(GCWhen::Type when, GCTracer* tracer); + // Verification functions virtual void check_for_bad_heap_word_value(HeapWord* addr, size_t size) PRODUCT_RETURN; @@ -202,8 +210,6 @@ MemRegion reserved_region() const { return _reserved; } address base() const { return (address)reserved_region().start(); } - // Future cleanup here. The following functions should specify bytes or - // heapwords as part of their signature. virtual size_t capacity() const = 0; virtual size_t used() const = 0; @@ -550,8 +556,13 @@ virtual void prepare_for_verify() = 0; // Generate any dumps preceding or following a full gc - void pre_full_gc_dump(); - void post_full_gc_dump(); + void pre_full_gc_dump(GCTimer* timer); + void post_full_gc_dump(GCTimer* timer); + + VirtualSpaceSummary create_heap_space_summary(); + GCHeapSummary create_heap_summary(); + + MetaspaceSummary create_metaspace_summary(); // Print heap information on the given outputStream. virtual void print_on(outputStream* st) const = 0; @@ -560,7 +571,7 @@ print_on(tty); } // Print more detailed heap information on the given - // outputStream. The default behaviour is to call print_on(). It is + // outputStream. The default behavior is to call print_on(). It is // up to each subclass to override it and add any additional output // it needs. virtual void print_extended_on(outputStream* st) const { @@ -589,23 +600,11 @@ // Default implementation does nothing. virtual void print_tracing_info() const = 0; - // If PrintHeapAtGC is set call the appropriate routi - void print_heap_before_gc() { - if (PrintHeapAtGC) { - Universe::print_heap_before_gc(); - } - if (_gc_heap_log != NULL) { - _gc_heap_log->log_heap_before(); - } - } - void print_heap_after_gc() { - if (PrintHeapAtGC) { - Universe::print_heap_after_gc(); - } - if (_gc_heap_log != NULL) { - _gc_heap_log->log_heap_after(); - } - } + void print_heap_before_gc(); + void print_heap_after_gc(); + + void trace_heap_before_gc(GCTracer* gc_tracer); + void trace_heap_after_gc(GCTracer* gc_tracer); // Heap verification virtual void verify(bool silent, VerifyOption option) = 0; @@ -619,7 +618,7 @@ inline bool promotion_should_fail(); // Reset the PromotionFailureALot counters. Should be called at the end of a - // GC in which promotion failure ocurred. + // GC in which promotion failure occurred. inline void reset_promotion_should_fail(volatile size_t* count); inline void reset_promotion_should_fail(); #endif // #ifndef PRODUCT diff -r 16b10327b00d -r 90d6c221d4e5 src/share/vm/gc_interface/collectedHeap.inline.hpp --- a/src/share/vm/gc_interface/collectedHeap.inline.hpp Tue Jul 16 10:55:48 2013 -0400 +++ b/src/share/vm/gc_interface/collectedHeap.inline.hpp Tue Jul 16 12:20:08 2013 -0400 @@ -25,6 +25,7 @@ #ifndef SHARE_VM_GC_INTERFACE_COLLECTEDHEAP_INLINE_HPP #define SHARE_VM_GC_INTERFACE_COLLECTEDHEAP_INLINE_HPP +#include "gc_interface/allocTracer.hpp" #include "gc_interface/collectedHeap.hpp" #include "memory/threadLocalAllocBuffer.inline.hpp" #include "memory/universe.hpp" @@ -107,7 +108,7 @@ post_allocation_notify(klass, (oop)obj); } -HeapWord* CollectedHeap::common_mem_allocate_noinit(size_t size, TRAPS) { +HeapWord* CollectedHeap::common_mem_allocate_noinit(KlassHandle klass, size_t size, TRAPS) { // Clear unhandled oops for memory allocation. Memory allocation might // not take out a lock if from tlab, so clear here. @@ -120,7 +121,7 @@ HeapWord* result = NULL; if (UseTLAB) { - result = CollectedHeap::allocate_from_tlab(THREAD, size); + result = allocate_from_tlab(klass, THREAD, size); if (result != NULL) { assert(!HAS_PENDING_EXCEPTION, "Unexpected exception, will result in uninitialized storage"); @@ -136,6 +137,9 @@ assert(!HAS_PENDING_EXCEPTION, "Unexpected exception, will result in uninitialized storage"); THREAD->incr_allocated_bytes(size * HeapWordSize); + + AllocTracer::send_allocation_outside_tlab_event(klass, size * HeapWordSize); + return result; } @@ -165,13 +169,13 @@ } } -HeapWord* CollectedHeap::common_mem_allocate_init(size_t size, TRAPS) { - HeapWord* obj = common_mem_allocate_noinit(size, CHECK_NULL); +HeapWord* CollectedHeap::common_mem_allocate_init(KlassHandle klass, size_t size, TRAPS) { + HeapWord* obj = common_mem_allocate_noinit(klass, size, CHECK_NULL); init_obj(obj, size); return obj; } -HeapWord* CollectedHeap::allocate_from_tlab(Thread* thread, size_t size) { +HeapWord* CollectedHeap::allocate_from_tlab(KlassHandle klass, Thread* thread, size_t size) { assert(UseTLAB, "should use UseTLAB"); HeapWord* obj = thread->tlab().allocate(size); @@ -179,7 +183,7 @@ return obj; } // Otherwise... - return allocate_from_tlab_slow(thread, size); + return allocate_from_tlab_slow(klass, thread, size); } void CollectedHeap::init_obj(HeapWord* obj, size_t size) { @@ -194,7 +198,7 @@ debug_only(check_for_valid_allocation_state()); assert(!Universe::heap()->is_gc_active(), "Allocation during gc not allowed"); assert(size >= 0, "int won't convert to size_t"); - HeapWord* obj = common_mem_allocate_init(size, CHECK_NULL); + HeapWord* obj = common_mem_allocate_init(klass, size, CHECK_NULL); post_allocation_setup_obj(klass, obj); NOT_PRODUCT(Universe::heap()->check_for_bad_heap_word_value(obj, size)); return (oop)obj; @@ -207,7 +211,7 @@ debug_only(check_for_valid_allocation_state()); assert(!Universe::heap()->is_gc_active(), "Allocation during gc not allowed"); assert(size >= 0, "int won't convert to size_t"); - HeapWord* obj = common_mem_allocate_init(size, CHECK_NULL); + HeapWord* obj = common_mem_allocate_init(klass, size, CHECK_NULL); post_allocation_setup_array(klass, obj, length); NOT_PRODUCT(Universe::heap()->check_for_bad_heap_word_value(obj, size)); return (oop)obj; @@ -220,7 +224,7 @@ debug_only(check_for_valid_allocation_state()); assert(!Universe::heap()->is_gc_active(), "Allocation during gc not allowed"); assert(size >= 0, "int won't convert to size_t"); - HeapWord* obj = common_mem_allocate_noinit(size, CHECK_NULL); + HeapWord* obj = common_mem_allocate_noinit(klass, size, CHECK_NULL); ((oop)obj)->set_klass_gap(0); post_allocation_setup_array(klass, obj, length); #ifndef PRODUCT diff -r 16b10327b00d -r 90d6c221d4e5 src/share/vm/gc_interface/gcCause.cpp --- a/src/share/vm/gc_interface/gcCause.cpp Tue Jul 16 10:55:48 2013 -0400 +++ b/src/share/vm/gc_interface/gcCause.cpp Tue Jul 16 12:20:08 2013 -0400 @@ -72,6 +72,9 @@ case _cms_final_remark: return "CMS Final Remark"; + case _cms_concurrent_mark: + return "CMS Concurrent Mark"; + case _old_generation_expanded_on_last_scavenge: return "Old Generation Expanded On Last Scavenge"; diff -r 16b10327b00d -r 90d6c221d4e5 src/share/vm/gc_interface/gcCause.hpp --- a/src/share/vm/gc_interface/gcCause.hpp Tue Jul 16 10:55:48 2013 -0400 +++ b/src/share/vm/gc_interface/gcCause.hpp Tue Jul 16 12:20:08 2013 -0400 @@ -60,6 +60,7 @@ _cms_generation_full, _cms_initial_mark, _cms_final_remark, + _cms_concurrent_mark, _old_generation_expanded_on_last_scavenge, _old_generation_too_full_to_scavenge, diff -r 16b10327b00d -r 90d6c221d4e5 src/share/vm/gc_interface/gcName.hpp --- /dev/null Thu Jan 01 00:00:00 1970 +0000 +++ b/src/share/vm/gc_interface/gcName.hpp Tue Jul 16 12:20:08 2013 -0400 @@ -0,0 +1,61 @@ +/* + * Copyright (c) 2002, 2013, Oracle and/or its affiliates. All rights reserved. + * DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER. + * + * This code is free software; you can redistribute it and/or modify it + * under the terms of the GNU General Public License version 2 only, as + * published by the Free Software Foundation. + * + * This code is distributed in the hope that it will be useful, but WITHOUT + * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or + * FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License + * version 2 for more details (a copy is included in the LICENSE file that + * accompanied this code). + * + * You should have received a copy of the GNU General Public License version + * 2 along with this work; if not, write to the Free Software Foundation, + * Inc., 51 Franklin St, Fifth Floor, Boston, MA 02110-1301 USA. + * + * Please contact Oracle, 500 Oracle Parkway, Redwood Shores, CA 94065 USA + * or visit www.oracle.com if you need additional information or have any + * questions. + * + */ + +#ifndef SHARE_VM_GC_INTERFACE_GCNAME_HPP +#define SHARE_VM_GC_INTERFACE_GCNAME_HPP + +#include "utilities/debug.hpp" + +enum GCName { + ParallelOld, + SerialOld, + PSMarkSweep, + ParallelScavenge, + DefNew, + ParNew, + G1New, + ConcurrentMarkSweep, + G1Old, + GCNameEndSentinel +}; + +class GCNameHelper { + public: + static const char* to_string(GCName name) { + switch(name) { + case ParallelOld: return "ParallelOld"; + case SerialOld: return "SerialOld"; + case PSMarkSweep: return "PSMarkSweep"; + case ParallelScavenge: return "ParallelScavenge"; + case DefNew: return "DefNew"; + case ParNew: return "ParNew"; + case G1New: return "G1New"; + case ConcurrentMarkSweep: return "ConcurrentMarkSweep"; + case G1Old: return "G1Old"; + default: ShouldNotReachHere(); return NULL; + } + } +}; + +#endif // SHARE_VM_GC_INTERFACE_GCNAME_HPP diff -r 16b10327b00d -r 90d6c221d4e5 src/share/vm/interpreter/abstractInterpreter.hpp --- a/src/share/vm/interpreter/abstractInterpreter.hpp Tue Jul 16 10:55:48 2013 -0400 +++ b/src/share/vm/interpreter/abstractInterpreter.hpp Tue Jul 16 12:20:08 2013 -0400 @@ -1,5 +1,5 @@ /* - * Copyright (c) 1997, 2012, Oracle and/or its affiliates. All rights reserved. + * Copyright (c) 1997, 2013, Oracle and/or its affiliates. All rights reserved. * DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER. * * This code is free software; you can redistribute it and/or modify it @@ -102,6 +102,9 @@ java_lang_math_pow, // implementation of java.lang.Math.pow (x,y) java_lang_math_exp, // implementation of java.lang.Math.exp (x) java_lang_ref_reference_get, // implementation of java.lang.ref.Reference.get() + java_util_zip_CRC32_update, // implementation of java.util.zip.CRC32.update() + java_util_zip_CRC32_updateBytes, // implementation of java.util.zip.CRC32.updateBytes() + java_util_zip_CRC32_updateByteBuffer, // implementation of java.util.zip.CRC32.updateByteBuffer() number_of_method_entries, invalid = -1 }; diff -r 16b10327b00d -r 90d6c221d4e5 src/share/vm/interpreter/bytecodeInterpreter.cpp --- a/src/share/vm/interpreter/bytecodeInterpreter.cpp Tue Jul 16 10:55:48 2013 -0400 +++ b/src/share/vm/interpreter/bytecodeInterpreter.cpp Tue Jul 16 12:20:08 2013 -0400 @@ -481,9 +481,9 @@ // So we have a second version of the assertion which handles the case where EnableInvokeDynamic was // switched off because of the wrong classes. if (EnableInvokeDynamic || FLAG_IS_CMDLINE(EnableInvokeDynamic)) { - assert(abs(istate->_stack_base - istate->_stack_limit) == (istate->_method->max_stack() + 1), "bad stack limit"); + assert(labs(istate->_stack_base - istate->_stack_limit) == (istate->_method->max_stack() + 1), "bad stack limit"); } else { - const int extra_stack_entries = Method::extra_stack_entries_for_indy; + const int extra_stack_entries = Method::extra_stack_entries_for_jsr292; assert(labs(istate->_stack_base - istate->_stack_limit) == (istate->_method->max_stack() + extra_stack_entries + 1), "bad stack limit"); } @@ -1581,7 +1581,7 @@ #define ARRAY_LOADTO32(T, T2, format, stackRes, extra) \ { \ ARRAY_INTRO(-2); \ - extra; \ + (void)extra; \ SET_ ## stackRes(*(T2 *)(((address) arrObj->base(T)) + index * sizeof(T2)), \ -2); \ UPDATE_PC_AND_TOS_AND_CONTINUE(1, -1); \ @@ -1592,8 +1592,8 @@ { \ ARRAY_INTRO(-2); \ SET_ ## stackRes(*(T2 *)(((address) arrObj->base(T)) + index * sizeof(T2)), -1); \ - extra; \ - UPDATE_PC_AND_CONTINUE(1); \ + (void)extra; \ + UPDATE_PC_AND_CONTINUE(1); \ } CASE(_iaload): @@ -1617,7 +1617,7 @@ #define ARRAY_STOREFROM32(T, T2, format, stackSrc, extra) \ { \ ARRAY_INTRO(-3); \ - extra; \ + (void)extra; \ *(T2 *)(((address) arrObj->base(T)) + index * sizeof(T2)) = stackSrc( -1); \ UPDATE_PC_AND_TOS_AND_CONTINUE(1, -3); \ } @@ -1626,7 +1626,7 @@ #define ARRAY_STOREFROM64(T, T2, stackSrc, extra) \ { \ ARRAY_INTRO(-4); \ - extra; \ + (void)extra; \ *(T2 *)(((address) arrObj->base(T)) + index * sizeof(T2)) = stackSrc( -1); \ UPDATE_PC_AND_TOS_AND_CONTINUE(1, -4); \ } @@ -2233,7 +2233,7 @@ } Method* method = cache->f1_as_method(); - VERIFY_OOP(method); + if (VerifyOops) method->verify(); if (cache->has_appendix()) { ConstantPool* constants = METHOD->constants(); @@ -2265,8 +2265,7 @@ } Method* method = cache->f1_as_method(); - - VERIFY_OOP(method); + if (VerifyOops) method->verify(); if (cache->has_appendix()) { ConstantPool* constants = METHOD->constants(); diff -r 16b10327b00d -r 90d6c221d4e5 src/share/vm/interpreter/interpreter.cpp --- a/src/share/vm/interpreter/interpreter.cpp Tue Jul 16 10:55:48 2013 -0400 +++ b/src/share/vm/interpreter/interpreter.cpp Tue Jul 16 12:20:08 2013 -0400 @@ -1,5 +1,5 @@ /* - * Copyright (c) 1997, 2012, Oracle and/or its affiliates. All rights reserved. + * Copyright (c) 1997, 2013, Oracle and/or its affiliates. All rights reserved. * DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER. * * This code is free software; you can redistribute it and/or modify it @@ -195,6 +195,17 @@ return kind; } +#ifndef CC_INTERP + if (UseCRC32Intrinsics && m->is_native()) { + // Use optimized stub code for CRC32 native methods. + switch (m->intrinsic_id()) { + case vmIntrinsics::_updateCRC32 : return java_util_zip_CRC32_update; + case vmIntrinsics::_updateBytesCRC32 : return java_util_zip_CRC32_updateBytes; + case vmIntrinsics::_updateByteBufferCRC32 : return java_util_zip_CRC32_updateByteBuffer; + } + } +#endif + // Native method? // Note: This test must come _before_ the test for intrinsic // methods. See also comments below. @@ -297,6 +308,9 @@ case java_lang_math_sqrt : tty->print("java_lang_math_sqrt" ); break; case java_lang_math_log : tty->print("java_lang_math_log" ); break; case java_lang_math_log10 : tty->print("java_lang_math_log10" ); break; + case java_util_zip_CRC32_update : tty->print("java_util_zip_CRC32_update"); break; + case java_util_zip_CRC32_updateBytes : tty->print("java_util_zip_CRC32_updateBytes"); break; + case java_util_zip_CRC32_updateByteBuffer : tty->print("java_util_zip_CRC32_updateByteBuffer"); break; default: if (kind >= method_handle_invoke_FIRST && kind <= method_handle_invoke_LAST) { diff -r 16b10327b00d -r 90d6c221d4e5 src/share/vm/interpreter/templateInterpreter.cpp --- a/src/share/vm/interpreter/templateInterpreter.cpp Tue Jul 16 10:55:48 2013 -0400 +++ b/src/share/vm/interpreter/templateInterpreter.cpp Tue Jul 16 12:20:08 2013 -0400 @@ -1,5 +1,5 @@ /* - * Copyright (c) 1997, 2012, Oracle and/or its affiliates. All rights reserved. + * Copyright (c) 1997, 2013, Oracle and/or its affiliates. All rights reserved. * DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER. * * This code is free software; you can redistribute it and/or modify it @@ -373,6 +373,12 @@ method_entry(java_lang_math_pow ) method_entry(java_lang_ref_reference_get) + if (UseCRC32Intrinsics) { + method_entry(java_util_zip_CRC32_update) + method_entry(java_util_zip_CRC32_updateBytes) + method_entry(java_util_zip_CRC32_updateByteBuffer) + } + initialize_method_handle_entries(); // all native method kinds (must be one contiguous block) diff -r 16b10327b00d -r 90d6c221d4e5 src/share/vm/memory/allocation.cpp --- a/src/share/vm/memory/allocation.cpp Tue Jul 16 10:55:48 2013 -0400 +++ b/src/share/vm/memory/allocation.cpp Tue Jul 16 12:20:08 2013 -0400 @@ -71,13 +71,6 @@ return MetaspaceShared::is_in_shared_space(this); } -bool MetaspaceObj::is_metadata() const { - // GC Verify checks use this in guarantees. - // TODO: either replace them with is_metaspace_object() or remove them. - // is_metaspace_object() is slower than this test. This test doesn't - // seem very useful for metaspace objects anymore though. - return !Universe::heap()->is_in_reserved(this); -} bool MetaspaceObj::is_metaspace_object() const { return Metaspace::contains((void*)this); @@ -243,10 +236,11 @@ size_t _num_used; // number of chunks currently checked out const size_t _size; // size of each chunk (must be uniform) - // Our three static pools + // Our four static pools static ChunkPool* _large_pool; static ChunkPool* _medium_pool; static ChunkPool* _small_pool; + static ChunkPool* _tiny_pool; // return first element or null void* get_first() { @@ -263,7 +257,7 @@ ChunkPool(size_t size) : _size(size) { _first = NULL; _num_chunks = _num_used = 0; } // Allocate a new chunk from the pool (might expand the pool) - _NOINLINE_ void* allocate(size_t bytes) { + _NOINLINE_ void* allocate(size_t bytes, AllocFailType alloc_failmode) { assert(bytes == _size, "bad size"); void* p = NULL; // No VM lock can be taken inside ThreadCritical lock, so os::malloc @@ -273,9 +267,9 @@ p = get_first(); } if (p == NULL) p = os::malloc(bytes, mtChunk, CURRENT_PC); - if (p == NULL) + if (p == NULL && alloc_failmode == AllocFailStrategy::EXIT_OOM) { vm_exit_out_of_memory(bytes, OOM_MALLOC_ERROR, "ChunkPool::allocate"); - + } return p; } @@ -326,15 +320,18 @@ static ChunkPool* large_pool() { assert(_large_pool != NULL, "must be initialized"); return _large_pool; } static ChunkPool* medium_pool() { assert(_medium_pool != NULL, "must be initialized"); return _medium_pool; } static ChunkPool* small_pool() { assert(_small_pool != NULL, "must be initialized"); return _small_pool; } + static ChunkPool* tiny_pool() { assert(_tiny_pool != NULL, "must be initialized"); return _tiny_pool; } static void initialize() { _large_pool = new ChunkPool(Chunk::size + Chunk::aligned_overhead_size()); _medium_pool = new ChunkPool(Chunk::medium_size + Chunk::aligned_overhead_size()); _small_pool = new ChunkPool(Chunk::init_size + Chunk::aligned_overhead_size()); + _tiny_pool = new ChunkPool(Chunk::tiny_size + Chunk::aligned_overhead_size()); } static void clean() { enum { BlocksToKeep = 5 }; + _tiny_pool->free_all_but(BlocksToKeep); _small_pool->free_all_but(BlocksToKeep); _medium_pool->free_all_but(BlocksToKeep); _large_pool->free_all_but(BlocksToKeep); @@ -344,6 +341,7 @@ ChunkPool* ChunkPool::_large_pool = NULL; ChunkPool* ChunkPool::_medium_pool = NULL; ChunkPool* ChunkPool::_small_pool = NULL; +ChunkPool* ChunkPool::_tiny_pool = NULL; void chunkpool_init() { ChunkPool::initialize(); @@ -372,7 +370,7 @@ //-------------------------------------------------------------------------------------- // Chunk implementation -void* Chunk::operator new(size_t requested_size, size_t length) { +void* Chunk::operator new (size_t requested_size, AllocFailType alloc_failmode, size_t length) { // requested_size is equal to sizeof(Chunk) but in order for the arena // allocations to come out aligned as expected the size must be aligned // to expected arena alignment. @@ -380,13 +378,15 @@ assert(ARENA_ALIGN(requested_size) == aligned_overhead_size(), "Bad alignment"); size_t bytes = ARENA_ALIGN(requested_size) + length; switch (length) { - case Chunk::size: return ChunkPool::large_pool()->allocate(bytes); - case Chunk::medium_size: return ChunkPool::medium_pool()->allocate(bytes); - case Chunk::init_size: return ChunkPool::small_pool()->allocate(bytes); + case Chunk::size: return ChunkPool::large_pool()->allocate(bytes, alloc_failmode); + case Chunk::medium_size: return ChunkPool::medium_pool()->allocate(bytes, alloc_failmode); + case Chunk::init_size: return ChunkPool::small_pool()->allocate(bytes, alloc_failmode); + case Chunk::tiny_size: return ChunkPool::tiny_pool()->allocate(bytes, alloc_failmode); default: { - void *p = os::malloc(bytes, mtChunk, CALLER_PC); - if (p == NULL) + void* p = os::malloc(bytes, mtChunk, CALLER_PC); + if (p == NULL && alloc_failmode == AllocFailStrategy::EXIT_OOM) { vm_exit_out_of_memory(bytes, OOM_MALLOC_ERROR, "Chunk::new"); + } return p; } } @@ -398,6 +398,7 @@ case Chunk::size: ChunkPool::large_pool()->free(c); break; case Chunk::medium_size: ChunkPool::medium_pool()->free(c); break; case Chunk::init_size: ChunkPool::small_pool()->free(c); break; + case Chunk::tiny_size: ChunkPool::tiny_pool()->free(c); break; default: os::free(c, mtChunk); } } @@ -440,7 +441,7 @@ Arena::Arena(size_t init_size) { size_t round_size = (sizeof (char *)) - 1; init_size = (init_size+round_size) & ~round_size; - _first = _chunk = new (init_size) Chunk(init_size); + _first = _chunk = new (AllocFailStrategy::EXIT_OOM, init_size) Chunk(init_size); _hwm = _chunk->bottom(); // Save the cached hwm, max _max = _chunk->top(); set_size_in_bytes(init_size); @@ -448,7 +449,7 @@ } Arena::Arena() { - _first = _chunk = new (Chunk::init_size) Chunk(Chunk::init_size); + _first = _chunk = new (AllocFailStrategy::EXIT_OOM, Chunk::init_size) Chunk(Chunk::init_size); _hwm = _chunk->bottom(); // Save the cached hwm, max _max = _chunk->top(); set_size_in_bytes(Chunk::init_size); @@ -555,12 +556,9 @@ size_t len = MAX2(x, (size_t) Chunk::size); Chunk *k = _chunk; // Get filled-up chunk address - _chunk = new (len) Chunk(len); + _chunk = new (alloc_failmode, len) Chunk(len); if (_chunk == NULL) { - if (alloc_failmode == AllocFailStrategy::EXIT_OOM) { - signal_out_of_memory(len * Chunk::aligned_overhead_size(), "Arena::grow"); - } return NULL; } if (k) k->set_next(_chunk); // Append new chunk to end of linked list diff -r 16b10327b00d -r 90d6c221d4e5 src/share/vm/memory/allocation.hpp --- a/src/share/vm/memory/allocation.hpp Tue Jul 16 10:55:48 2013 -0400 +++ b/src/share/vm/memory/allocation.hpp Tue Jul 16 12:20:08 2013 -0400 @@ -157,7 +157,8 @@ mtJavaHeap = 0x0C00, // Java heap mtClassShared = 0x0D00, // class data sharing mtTest = 0x0E00, // Test type for verifying NMT - mt_number_of_types = 0x000E, // number of memory types (mtDontTrack + mtTracing = 0x0F00, // memory used for Tracing + mt_number_of_types = 0x000F, // number of memory types (mtDontTrack // is not included as validate type) mtDontTrack = 0x0F00, // memory we do not or cannot track mt_masks = 0x7F00, @@ -263,7 +264,6 @@ class MetaspaceObj { public: - bool is_metadata() const; bool is_metaspace_object() const; // more specific test but slower bool is_shared() const; void print_address_on(outputStream* st) const; // nonvirtual address printing @@ -339,7 +339,7 @@ Chunk* _next; // Next Chunk in list const size_t _len; // Size of this Chunk public: - void* operator new(size_t size, size_t length); + void* operator new(size_t size, AllocFailType alloc_failmode, size_t length); void operator delete(void* p); Chunk(size_t length); @@ -353,7 +353,8 @@ slack = 20, // suspected sizeof(Chunk) + internal malloc headers #endif - init_size = 1*K - slack, // Size of first chunk + tiny_size = 256 - slack, // Size of first chunk (tiny) + init_size = 1*K - slack, // Size of first chunk (normal aka small) medium_size= 10*K - slack, // Size of medium-sized chunk size = 32*K - slack, // Default size of an Arena chunk (following the first) non_pool_size = init_size + 32 // An initial size which is not one of above @@ -402,10 +403,15 @@ void signal_out_of_memory(size_t request, const char* whence) const; - void check_for_overflow(size_t request, const char* whence) const { + bool check_for_overflow(size_t request, const char* whence, + AllocFailType alloc_failmode = AllocFailStrategy::EXIT_OOM) const { if (UINTPTR_MAX - request < (uintptr_t)_hwm) { + if (alloc_failmode == AllocFailStrategy::RETURN_NULL) { + return false; + } signal_out_of_memory(request, whence); } + return true; } public: @@ -429,7 +435,8 @@ assert(is_power_of_2(ARENA_AMALLOC_ALIGNMENT) , "should be a power of 2"); x = ARENA_ALIGN(x); debug_only(if (UseMallocOnly) return malloc(x);) - check_for_overflow(x, "Arena::Amalloc"); + if (!check_for_overflow(x, "Arena::Amalloc", alloc_failmode)) + return NULL; NOT_PRODUCT(inc_bytes_allocated(x);) if (_hwm + x > _max) { return grow(x, alloc_failmode); @@ -443,7 +450,8 @@ void *Amalloc_4(size_t x, AllocFailType alloc_failmode = AllocFailStrategy::EXIT_OOM) { assert( (x&(sizeof(char*)-1)) == 0, "misaligned size" ); debug_only(if (UseMallocOnly) return malloc(x);) - check_for_overflow(x, "Arena::Amalloc_4"); + if (!check_for_overflow(x, "Arena::Amalloc_4", alloc_failmode)) + return NULL; NOT_PRODUCT(inc_bytes_allocated(x);) if (_hwm + x > _max) { return grow(x, alloc_failmode); @@ -464,7 +472,8 @@ size_t delta = (((size_t)_hwm + DALIGN_M1) & ~DALIGN_M1) - (size_t)_hwm; x += delta; #endif - check_for_overflow(x, "Arena::Amalloc_D"); + if (!check_for_overflow(x, "Arena::Amalloc_D", alloc_failmode)) + return NULL; NOT_PRODUCT(inc_bytes_allocated(x);) if (_hwm + x > _max) { return grow(x, alloc_failmode); // grow() returns a result aligned >= 8 bytes. @@ -634,8 +643,15 @@ #define NEW_RESOURCE_ARRAY_IN_THREAD(thread, type, size)\ (type*) resource_allocate_bytes(thread, (size) * sizeof(type)) +#define NEW_RESOURCE_ARRAY_IN_THREAD_RETURN_NULL(thread, type, size)\ + (type*) resource_allocate_bytes(thread, (size) * sizeof(type), AllocFailStrategy::RETURN_NULL) + #define REALLOC_RESOURCE_ARRAY(type, old, old_size, new_size)\ - (type*) resource_reallocate_bytes((char*)(old), (old_size) * sizeof(type), (new_size) * sizeof(type) ) + (type*) resource_reallocate_bytes((char*)(old), (old_size) * sizeof(type), (new_size) * sizeof(type)) + +#define REALLOC_RESOURCE_ARRAY_RETURN_NULL(type, old, old_size, new_size)\ + (type*) resource_reallocate_bytes((char*)(old), (old_size) * sizeof(type),\ + (new_size) * sizeof(type), AllocFailStrategy::RETURN_NULL) #define FREE_RESOURCE_ARRAY(type, old, size)\ resource_free_bytes((char*)(old), (size) * sizeof(type)) @@ -646,28 +662,40 @@ #define NEW_RESOURCE_OBJ(type)\ NEW_RESOURCE_ARRAY(type, 1) +#define NEW_RESOURCE_OBJ_RETURN_NULL(type)\ + NEW_RESOURCE_ARRAY_RETURN_NULL(type, 1) + +#define NEW_C_HEAP_ARRAY3(type, size, memflags, pc, allocfail)\ + (type*) AllocateHeap(size * sizeof(type), memflags, pc, allocfail) + +#define NEW_C_HEAP_ARRAY2(type, size, memflags, pc)\ + (type*) (AllocateHeap((size) * sizeof(type), memflags, pc)) + #define NEW_C_HEAP_ARRAY(type, size, memflags)\ (type*) (AllocateHeap((size) * sizeof(type), memflags)) +#define NEW_C_HEAP_ARRAY2_RETURN_NULL(type, size, memflags, pc)\ + NEW_C_HEAP_ARRAY3(type, size, memflags, pc, AllocFailStrategy::RETURN_NULL) + +#define NEW_C_HEAP_ARRAY_RETURN_NULL(type, size, memflags)\ + NEW_C_HEAP_ARRAY3(type, size, memflags, (address)0, AllocFailStrategy::RETURN_NULL) + #define REALLOC_C_HEAP_ARRAY(type, old, size, memflags)\ (type*) (ReallocateHeap((char*)old, (size) * sizeof(type), memflags)) +#define REALLOC_C_HEAP_ARRAY_RETURN_NULL(type, old, size, memflags)\ + (type*) (ReallocateHeap((char*)old, (size) * sizeof(type), memflags, AllocFailStrategy::RETURN_NULL)) + #define FREE_C_HEAP_ARRAY(type, old, memflags) \ FreeHeap((char*)(old), memflags) -#define NEW_C_HEAP_ARRAY2(type, size, memflags, pc)\ - (type*) (AllocateHeap((size) * sizeof(type), memflags, pc)) - -#define REALLOC_C_HEAP_ARRAY2(type, old, size, memflags, pc)\ - (type*) (ReallocateHeap((char*)old, (size) * sizeof(type), memflags, pc)) - -#define NEW_C_HEAP_ARRAY3(type, size, memflags, pc, allocfail) \ - (type*) AllocateHeap(size * sizeof(type), memflags, pc, allocfail) - // allocate type in heap without calling ctor #define NEW_C_HEAP_OBJ(type, memflags)\ NEW_C_HEAP_ARRAY(type, 1, memflags) +#define NEW_C_HEAP_OBJ_RETURN_NULL(type, memflags)\ + NEW_C_HEAP_ARRAY_RETURN_NULL(type, 1, memflags) + // deallocate obj of type in heap without calling dtor #define FREE_C_HEAP_OBJ(objname, memflags)\ FreeHeap((char*)objname, memflags); @@ -712,13 +740,21 @@ // is set so that we always use malloc except for Solaris where we set the // limit to get mapped memory. template -class ArrayAllocator : StackObj { +class ArrayAllocator VALUE_OBJ_CLASS_SPEC { char* _addr; bool _use_malloc; size_t _size; + bool _free_in_destructor; public: - ArrayAllocator() : _addr(NULL), _use_malloc(false), _size(0) { } - ~ArrayAllocator() { free(); } + ArrayAllocator(bool free_in_destructor = true) : + _addr(NULL), _use_malloc(false), _size(0), _free_in_destructor(free_in_destructor) { } + + ~ArrayAllocator() { + if (_free_in_destructor) { + free(); + } + } + E* allocate(size_t length); void free(); }; diff -r 16b10327b00d -r 90d6c221d4e5 src/share/vm/memory/allocation.inline.hpp --- a/src/share/vm/memory/allocation.inline.hpp Tue Jul 16 10:55:48 2013 -0400 +++ b/src/share/vm/memory/allocation.inline.hpp Tue Jul 16 12:20:08 2013 -0400 @@ -146,10 +146,7 @@ vm_exit_out_of_memory(_size, OOM_MMAP_ERROR, "Allocator (reserve)"); } - bool success = os::commit_memory(_addr, _size, false /* executable */); - if (!success) { - vm_exit_out_of_memory(_size, OOM_MMAP_ERROR, "Allocator (commit)"); - } + os::commit_memory_or_exit(_addr, _size, !ExecMem, "Allocator (commit)"); return (E*)_addr; } diff -r 16b10327b00d -r 90d6c221d4e5 src/share/vm/memory/cardTableModRefBS.cpp --- a/src/share/vm/memory/cardTableModRefBS.cpp Tue Jul 16 10:55:48 2013 -0400 +++ b/src/share/vm/memory/cardTableModRefBS.cpp Tue Jul 16 12:20:08 2013 -0400 @@ -110,11 +110,8 @@ jbyte* guard_card = &_byte_map[_guard_index]; uintptr_t guard_page = align_size_down((uintptr_t)guard_card, _page_size); _guard_region = MemRegion((HeapWord*)guard_page, _page_size); - if (!os::commit_memory((char*)guard_page, _page_size, _page_size)) { - // Do better than this for Merlin - vm_exit_out_of_memory(_page_size, OOM_MMAP_ERROR, "card table last card"); - } - + os::commit_memory_or_exit((char*)guard_page, _page_size, _page_size, + !ExecMem, "card table last card"); *guard_card = last_card; _lowest_non_clean = @@ -312,12 +309,9 @@ MemRegion(cur_committed.end(), new_end_for_commit); assert(!new_committed.is_empty(), "Region should not be empty here"); - if (!os::commit_memory((char*)new_committed.start(), - new_committed.byte_size(), _page_size)) { - // Do better than this for Merlin - vm_exit_out_of_memory(new_committed.byte_size(), OOM_MMAP_ERROR, - "card table expansion"); - } + os::commit_memory_or_exit((char*)new_committed.start(), + new_committed.byte_size(), _page_size, + !ExecMem, "card table expansion"); // Use new_end_aligned (as opposed to new_end_for_commit) because // the cur_committed region may include the guard region. } else if (new_end_aligned < cur_committed.end()) { @@ -418,7 +412,7 @@ } // Touch the last card of the covered region to show that it // is committed (or SEGV). - debug_only(*byte_for(_covered[ind].last());) + debug_only((void) (*byte_for(_covered[ind].last()));) debug_only(verify_guard();) } diff -r 16b10327b00d -r 90d6c221d4e5 src/share/vm/memory/defNewGeneration.cpp --- a/src/share/vm/memory/defNewGeneration.cpp Tue Jul 16 10:55:48 2013 -0400 +++ b/src/share/vm/memory/defNewGeneration.cpp Tue Jul 16 12:20:08 2013 -0400 @@ -1,5 +1,5 @@ /* - * Copyright (c) 2001, 2012, Oracle and/or its affiliates. All rights reserved. + * Copyright (c) 2001, 2013, Oracle and/or its affiliates. All rights reserved. * DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER. * * This code is free software; you can redistribute it and/or modify it @@ -25,6 +25,10 @@ #include "precompiled.hpp" #include "gc_implementation/shared/collectorCounters.hpp" #include "gc_implementation/shared/gcPolicyCounters.hpp" +#include "gc_implementation/shared/gcHeapSummary.hpp" +#include "gc_implementation/shared/gcTimer.hpp" +#include "gc_implementation/shared/gcTraceTime.hpp" +#include "gc_implementation/shared/gcTrace.hpp" #include "gc_implementation/shared/spaceDecorator.hpp" #include "memory/defNewGeneration.inline.hpp" #include "memory/gcLocker.inline.hpp" @@ -223,6 +227,8 @@ _next_gen = NULL; _tenuring_threshold = MaxTenuringThreshold; _pretenure_size_threshold_words = PretenureSizeThreshold >> LogHeapWordSize; + + _gc_timer = new (ResourceObj::C_HEAP, mtGC) STWGCTimer(); } void DefNewGeneration::compute_space_boundaries(uintx minimum_eden_size, @@ -444,11 +450,6 @@ } } -void DefNewGeneration::object_iterate_since_last_GC(ObjectClosure* cl) { - // $$$ This may be wrong in case of "scavenge failure"? - eden()->object_iterate(cl); -} - void DefNewGeneration::younger_refs_iterate(OopsInGenClosure* cl) { assert(false, "NYI -- are you sure you want to call this?"); } @@ -558,12 +559,18 @@ size_t size, bool is_tlab) { assert(full || size > 0, "otherwise we don't want to collect"); + GenCollectedHeap* gch = GenCollectedHeap::heap(); + + _gc_timer->register_gc_start(os::elapsed_counter()); + DefNewTracer gc_tracer; + gc_tracer.report_gc_start(gch->gc_cause(), _gc_timer->gc_start()); + _next_gen = gch->next_gen(this); assert(_next_gen != NULL, "This must be the youngest gen, and not the only gen"); - // If the next generation is too full to accomodate promotion + // If the next generation is too full to accommodate promotion // from this generation, pass on collection; let the next generation // do it. if (!collection_attempt_is_safe()) { @@ -577,10 +584,12 @@ init_assuming_no_promotion_failure(); - TraceTime t1(GCCauseString("GC", gch->gc_cause()), PrintGC && !PrintGCDetails, true, gclog_or_tty); + GCTraceTime t1(GCCauseString("GC", gch->gc_cause()), PrintGC && !PrintGCDetails, true, NULL); // Capture heap used before collection (for printing). size_t gch_prev_used = gch->used(); + gch->trace_heap_before_gc(&gc_tracer); + SpecializationStats::clear(); // These can be shared for all code paths @@ -631,9 +640,12 @@ FastKeepAliveClosure keep_alive(this, &scan_weak_ref); ReferenceProcessor* rp = ref_processor(); rp->setup_policy(clear_all_soft_refs); + const ReferenceProcessorStats& stats = rp->process_discovered_references(&is_alive, &keep_alive, &evacuate_followers, - NULL); - if (!promotion_failed()) { + NULL, _gc_timer); + gc_tracer.report_gc_reference_stats(stats); + + if (!_promotion_failed) { // Swap the survivor spaces. eden()->clear(SpaceDecorator::Mangle); from()->clear(SpaceDecorator::Mangle); @@ -680,6 +692,7 @@ // Inform the next generation that a promotion failure occurred. _next_gen->promotion_failure_occurred(); + gc_tracer.report_promotion_failed(_promotion_failed_info); // Reset the PromotionFailureALot counters. NOT_PRODUCT(Universe::heap()->reset_promotion_should_fail();) @@ -689,11 +702,18 @@ to()->set_concurrent_iteration_safe_limit(to()->top()); SpecializationStats::print(); - // We need to use a monotonically non-deccreasing time in ms + // We need to use a monotonically non-decreasing time in ms // or we will see time-warp warnings and os::javaTimeMillis() // does not guarantee monotonicity. jlong now = os::javaTimeNanos() / NANOSECS_PER_MILLISEC; update_time_of_last_gc(now); + + gch->trace_heap_after_gc(&gc_tracer); + gc_tracer.report_tenuring_threshold(tenuring_threshold()); + + _gc_timer->register_gc_end(os::elapsed_counter()); + + gc_tracer.report_gc_end(_gc_timer->gc_end(), _gc_timer->time_partitions()); } class RemoveForwardPointerClosure: public ObjectClosure { @@ -705,6 +725,7 @@ void DefNewGeneration::init_assuming_no_promotion_failure() { _promotion_failed = false; + _promotion_failed_info.reset(); from()->set_next_compaction_space(NULL); } @@ -726,7 +747,7 @@ } void DefNewGeneration::preserve_mark(oop obj, markOop m) { - assert(promotion_failed() && m->must_be_preserved_for_promotion_failure(obj), + assert(_promotion_failed && m->must_be_preserved_for_promotion_failure(obj), "Oversaving!"); _objs_with_preserved_marks.push(obj); _preserved_marks_of_objs.push(m); @@ -744,6 +765,7 @@ old->size()); } _promotion_failed = true; + _promotion_failed_info.register_copy_failure(old->size()); preserve_mark_if_necessary(old, old->mark()); // forward to self old->forward_to(old); @@ -962,6 +984,10 @@ from()->set_top_for_allocations(); } +void DefNewGeneration::ref_processor_init() { + Generation::ref_processor_init(); +} + void DefNewGeneration::update_counters() { if (UsePerfData) { diff -r 16b10327b00d -r 90d6c221d4e5 src/share/vm/memory/defNewGeneration.hpp --- a/src/share/vm/memory/defNewGeneration.hpp Tue Jul 16 10:55:48 2013 -0400 +++ b/src/share/vm/memory/defNewGeneration.hpp Tue Jul 16 12:20:08 2013 -0400 @@ -1,5 +1,5 @@ /* - * Copyright (c) 2001, 2012, Oracle and/or its affiliates. All rights reserved. + * Copyright (c) 2001, 2013, Oracle and/or its affiliates. All rights reserved. * DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER. * * This code is free software; you can redistribute it and/or modify it @@ -28,12 +28,14 @@ #include "gc_implementation/shared/ageTable.hpp" #include "gc_implementation/shared/cSpaceCounters.hpp" #include "gc_implementation/shared/generationCounters.hpp" +#include "gc_implementation/shared/copyFailedInfo.hpp" #include "memory/generation.inline.hpp" #include "utilities/stack.hpp" class EdenSpace; class ContiguousSpace; class ScanClosure; +class STWGCTimer; // DefNewGeneration is a young generation containing eden, from- and // to-space. @@ -46,15 +48,17 @@ uint _tenuring_threshold; // Tenuring threshold for next collection. ageTable _age_table; // Size of object to pretenure in words; command line provides bytes - size_t _pretenure_size_threshold_words; + size_t _pretenure_size_threshold_words; ageTable* age_table() { return &_age_table; } + // Initialize state to optimistically assume no promotion failure will // happen. void init_assuming_no_promotion_failure(); // True iff a promotion has failed in the current collection. bool _promotion_failed; bool promotion_failed() { return _promotion_failed; } + PromotionFailedInfo _promotion_failed_info; // Handling promotion failure. A young generation collection // can fail if a live object cannot be copied out of its @@ -132,6 +136,8 @@ ContiguousSpace* _from_space; ContiguousSpace* _to_space; + STWGCTimer* _gc_timer; + enum SomeProtectedConstants { // Generations are GenGrain-aligned and have size that are multiples of // GenGrain. @@ -203,6 +209,8 @@ DefNewGeneration(ReservedSpace rs, size_t initial_byte_size, int level, const char* policy="Copy"); + virtual void ref_processor_init(); + virtual Generation::Name kind() { return Generation::DefNew; } // Accessing spaces @@ -244,7 +252,6 @@ // Iteration void object_iterate(ObjectClosure* blk); - void object_iterate_since_last_GC(ObjectClosure* cl); void younger_refs_iterate(OopsInGenClosure* cl); diff -r 16b10327b00d -r 90d6c221d4e5 src/share/vm/memory/filemap.cpp --- a/src/share/vm/memory/filemap.cpp Tue Jul 16 10:55:48 2013 -0400 +++ b/src/share/vm/memory/filemap.cpp Tue Jul 16 12:20:08 2013 -0400 @@ -549,3 +549,13 @@ return false; } + +void FileMapInfo::print_shared_spaces() { + gclog_or_tty->print_cr("Shared Spaces:"); + for (int i = 0; i < MetaspaceShared::n_regions; i++) { + struct FileMapInfo::FileMapHeader::space_info* si = &_header._space[i]; + gclog_or_tty->print(" %s " INTPTR_FORMAT "-" INTPTR_FORMAT, + shared_region_name[i], + si->_base, si->_base + si->_used); + } +} diff -r 16b10327b00d -r 90d6c221d4e5 src/share/vm/memory/filemap.hpp --- a/src/share/vm/memory/filemap.hpp Tue Jul 16 10:55:48 2013 -0400 +++ b/src/share/vm/memory/filemap.hpp Tue Jul 16 12:20:08 2013 -0400 @@ -149,6 +149,7 @@ // Return true if given address is in the mapped shared space. bool is_in_shared_space(const void* p) NOT_CDS_RETURN_(false); + void print_shared_spaces() NOT_CDS_RETURN; }; #endif // SHARE_VM_MEMORY_FILEMAP_HPP diff -r 16b10327b00d -r 90d6c221d4e5 src/share/vm/memory/genCollectedHeap.cpp --- a/src/share/vm/memory/genCollectedHeap.cpp Tue Jul 16 10:55:48 2013 -0400 +++ b/src/share/vm/memory/genCollectedHeap.cpp Tue Jul 16 12:20:08 2013 -0400 @@ -1,5 +1,5 @@ /* - * Copyright (c) 2000, 2012, Oracle and/or its affiliates. All rights reserved. + * Copyright (c) 2000, 2013, Oracle and/or its affiliates. All rights reserved. * DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER. * * This code is free software; you can redistribute it and/or modify it @@ -28,6 +28,7 @@ #include "classfile/vmSymbols.hpp" #include "code/icBuffer.hpp" #include "gc_implementation/shared/collectorCounters.hpp" +#include "gc_implementation/shared/gcTraceTime.hpp" #include "gc_implementation/shared/vmGCOperations.hpp" #include "gc_interface/collectedHeap.inline.hpp" #include "memory/filemap.hpp" @@ -41,7 +42,6 @@ #include "memory/space.hpp" #include "oops/oop.inline.hpp" #include "oops/oop.inline2.hpp" -#include "runtime/aprofiler.hpp" #include "runtime/biasedLocking.hpp" #include "runtime/fprofiler.hpp" #include "runtime/handles.hpp" @@ -388,7 +388,7 @@ const char* gc_cause_prefix = complete ? "Full GC" : "GC"; gclog_or_tty->date_stamp(PrintGC && PrintGCDateStamps); TraceCPUTime tcpu(PrintGCDetails, true, gclog_or_tty); - TraceTime t(GCCauseString(gc_cause_prefix, gc_cause()), PrintGCDetails, false, gclog_or_tty); + GCTraceTime t(GCCauseString(gc_cause_prefix, gc_cause()), PrintGCDetails, false, NULL); gc_prologue(complete); increment_total_collections(complete); @@ -417,10 +417,11 @@ // The full_collections increment was missed above. increment_total_full_collections(); } - pre_full_gc_dump(); // do any pre full gc dumps + pre_full_gc_dump(NULL); // do any pre full gc dumps } // Timer for individual generations. Last argument is false: no CR - TraceTime t1(_gens[i]->short_name(), PrintGCDetails, false, gclog_or_tty); + // FIXME: We should try to start the timing earlier to cover more of the GC pause + GCTraceTime t1(_gens[i]->short_name(), PrintGCDetails, false, NULL); TraceCollectorStats tcs(_gens[i]->counters()); TraceMemoryManagerStats tmms(_gens[i]->kind(),gc_cause()); @@ -534,7 +535,8 @@ complete = complete || (max_level_collected == n_gens() - 1); if (complete) { // We did a "major" collection - post_full_gc_dump(); // do any post full gc dumps + // FIXME: See comment at pre_full_gc_dump call + post_full_gc_dump(NULL); // do any post full gc dumps } if (PrintGCDetails) { @@ -870,12 +872,6 @@ } } -void GenCollectedHeap::object_iterate_since_last_GC(ObjectClosure* cl) { - for (int i = 0; i < _n_gens; i++) { - _gens[i]->object_iterate_since_last_GC(cl); - } -} - Space* GenCollectedHeap::space_containing(const void* addr) const { for (int i = 0; i < _n_gens; i++) { Space* res = _gens[i]->space_containing(addr); @@ -1183,8 +1179,6 @@ CollectedHeap::accumulate_statistics_all_tlabs(); ensure_parsability(true); // retire TLABs - // Call allocation profiler - AllocationProfiler::iterate_since_last_gc(); // Walk generations GenGCPrologueClosure blk(full); generation_iterate(&blk, false); // not old-to-young. diff -r 16b10327b00d -r 90d6c221d4e5 src/share/vm/memory/genCollectedHeap.hpp --- a/src/share/vm/memory/genCollectedHeap.hpp Tue Jul 16 10:55:48 2013 -0400 +++ b/src/share/vm/memory/genCollectedHeap.hpp Tue Jul 16 12:20:08 2013 -0400 @@ -222,7 +222,6 @@ void oop_iterate(MemRegion mr, ExtendedOopClosure* cl); void object_iterate(ObjectClosure* cl); void safe_object_iterate(ObjectClosure* cl); - void object_iterate_since_last_GC(ObjectClosure* cl); Space* space_containing(const void* addr) const; // A CollectedHeap is divided into a dense sequence of "blocks"; that is, diff -r 16b10327b00d -r 90d6c221d4e5 src/share/vm/memory/genMarkSweep.cpp --- a/src/share/vm/memory/genMarkSweep.cpp Tue Jul 16 10:55:48 2013 -0400 +++ b/src/share/vm/memory/genMarkSweep.cpp Tue Jul 16 12:20:08 2013 -0400 @@ -1,5 +1,5 @@ /* - * Copyright (c) 2001, 2012, Oracle and/or its affiliates. All rights reserved. + * Copyright (c) 2001, 2013, Oracle and/or its affiliates. All rights reserved. * DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER. * * This code is free software; you can redistribute it and/or modify it @@ -29,6 +29,10 @@ #include "classfile/vmSymbols.hpp" #include "code/codeCache.hpp" #include "code/icBuffer.hpp" +#include "gc_implementation/shared/gcHeapSummary.hpp" +#include "gc_implementation/shared/gcTimer.hpp" +#include "gc_implementation/shared/gcTrace.hpp" +#include "gc_implementation/shared/gcTraceTime.hpp" #include "gc_interface/collectedHeap.inline.hpp" #include "memory/genCollectedHeap.hpp" #include "memory/genMarkSweep.hpp" @@ -65,7 +69,9 @@ _ref_processor = rp; rp->setup_policy(clear_all_softrefs); - TraceTime t1(GCCauseString("Full GC", gch->gc_cause()), PrintGC && !PrintGCDetails, true, gclog_or_tty); + GCTraceTime t1(GCCauseString("Full GC", gch->gc_cause()), PrintGC && !PrintGCDetails, true, NULL); + + gch->trace_heap_before_gc(_gc_tracer); // When collecting the permanent generation Method*s may be moving, // so we either have to flush all bcp data or convert it into bci. @@ -155,6 +161,8 @@ // does not guarantee monotonicity. jlong now = os::javaTimeNanos() / NANOSECS_PER_MILLISEC; gch->update_time_of_last_gc(now); + + gch->trace_heap_after_gc(_gc_tracer); } void GenMarkSweep::allocate_stacks() { @@ -192,7 +200,7 @@ void GenMarkSweep::mark_sweep_phase1(int level, bool clear_all_softrefs) { // Recursively traverse all live objects and mark them - TraceTime tm("phase 1", PrintGC && Verbose, true, gclog_or_tty); + GCTraceTime tm("phase 1", PrintGC && Verbose, true, _gc_timer); trace(" 1"); GenCollectedHeap* gch = GenCollectedHeap::heap(); @@ -219,8 +227,10 @@ // Process reference objects found during marking { ref_processor()->setup_policy(clear_all_softrefs); - ref_processor()->process_discovered_references( - &is_alive, &keep_alive, &follow_stack_closure, NULL); + const ReferenceProcessorStats& stats = + ref_processor()->process_discovered_references( + &is_alive, &keep_alive, &follow_stack_closure, NULL, _gc_timer); + gc_tracer()->report_gc_reference_stats(stats); } // This is the point where the entire marking should have completed. @@ -240,6 +250,8 @@ // Clean up unreferenced symbols in symbol table. SymbolTable::unlink(); + + gc_tracer()->report_object_count_after_gc(&is_alive); } @@ -259,7 +271,7 @@ GenCollectedHeap* gch = GenCollectedHeap::heap(); - TraceTime tm("phase 2", PrintGC && Verbose, true, gclog_or_tty); + GCTraceTime tm("phase 2", PrintGC && Verbose, true, _gc_timer); trace("2"); gch->prepare_for_compaction(); @@ -276,7 +288,7 @@ GenCollectedHeap* gch = GenCollectedHeap::heap(); // Adjust the pointers to reflect the new locations - TraceTime tm("phase 3", PrintGC && Verbose, true, gclog_or_tty); + GCTraceTime tm("phase 3", PrintGC && Verbose, true, _gc_timer); trace("3"); // Need new claim bits for the pointer adjustment tracing. @@ -331,7 +343,7 @@ // to use a higher index (saved from phase2) when verifying perm_gen. GenCollectedHeap* gch = GenCollectedHeap::heap(); - TraceTime tm("phase 4", PrintGC && Verbose, true, gclog_or_tty); + GCTraceTime tm("phase 4", PrintGC && Verbose, true, _gc_timer); trace("4"); GenCompactClosure blk; diff -r 16b10327b00d -r 90d6c221d4e5 src/share/vm/memory/generation.cpp --- a/src/share/vm/memory/generation.cpp Tue Jul 16 10:55:48 2013 -0400 +++ b/src/share/vm/memory/generation.cpp Tue Jul 16 12:20:08 2013 -0400 @@ -1,5 +1,5 @@ /* - * Copyright (c) 1997, 2012, Oracle and/or its affiliates. All rights reserved. + * Copyright (c) 1997, 2013, Oracle and/or its affiliates. All rights reserved. * DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER. * * This code is free software; you can redistribute it and/or modify it @@ -23,6 +23,8 @@ */ #include "precompiled.hpp" +#include "gc_implementation/shared/gcTimer.hpp" +#include "gc_implementation/shared/gcTrace.hpp" #include "gc_implementation/shared/spaceDecorator.hpp" #include "gc_interface/collectedHeap.inline.hpp" #include "memory/allocation.inline.hpp" @@ -624,12 +626,26 @@ bool clear_all_soft_refs, size_t size, bool is_tlab) { + GenCollectedHeap* gch = GenCollectedHeap::heap(); + SpecializationStats::clear(); // Temporarily expand the span of our ref processor, so // refs discovery is over the entire heap, not just this generation ReferenceProcessorSpanMutator - x(ref_processor(), GenCollectedHeap::heap()->reserved_region()); + x(ref_processor(), gch->reserved_region()); + + STWGCTimer* gc_timer = GenMarkSweep::gc_timer(); + gc_timer->register_gc_start(os::elapsed_counter()); + + SerialOldTracer* gc_tracer = GenMarkSweep::gc_tracer(); + gc_tracer->report_gc_start(gch->gc_cause(), gc_timer->gc_start()); + GenMarkSweep::invoke_at_safepoint(_level, ref_processor(), clear_all_soft_refs); + + gc_timer->register_gc_end(os::elapsed_counter()); + + gc_tracer->report_gc_end(os::elapsed_counter(), gc_timer->time_partitions()); + SpecializationStats::print(); } @@ -795,16 +811,6 @@ blk->do_space(_the_space); } -void OneContigSpaceCardGeneration::object_iterate_since_last_GC(ObjectClosure* blk) { - // Deal with delayed initialization of _the_space, - // and lack of initialization of _last_gc. - if (_last_gc.space() == NULL) { - assert(the_space() != NULL, "shouldn't be NULL"); - _last_gc = the_space()->bottom_mark(); - } - the_space()->object_iterate_from(_last_gc, blk); -} - void OneContigSpaceCardGeneration::younger_refs_iterate(OopsInGenClosure* blk) { blk->set_generation(this); younger_refs_in_space_iterate(_the_space, blk); diff -r 16b10327b00d -r 90d6c221d4e5 src/share/vm/memory/generation.hpp --- a/src/share/vm/memory/generation.hpp Tue Jul 16 10:55:48 2013 -0400 +++ b/src/share/vm/memory/generation.hpp Tue Jul 16 12:20:08 2013 -0400 @@ -551,12 +551,6 @@ // the heap. This defaults to object_iterate() unless overridden. virtual void safe_object_iterate(ObjectClosure* cl); - // Iterate over all objects allocated in the generation since the last - // collection, calling "cl.do_object" on each. The generation must have - // been initialized properly to support this function, or else this call - // will fail. - virtual void object_iterate_since_last_GC(ObjectClosure* cl) = 0; - // Apply "cl->do_oop" to (the address of) all and only all the ref fields // in the current generation that contain pointers to objects in younger // generations. Objects allocated since the last "save_marks" call are @@ -724,7 +718,6 @@ // Iteration void object_iterate(ObjectClosure* blk); void space_iterate(SpaceClosure* blk, bool usedOnly = false); - void object_iterate_since_last_GC(ObjectClosure* cl); void younger_refs_iterate(OopsInGenClosure* blk); diff -r 16b10327b00d -r 90d6c221d4e5 src/share/vm/memory/heapInspection.cpp --- a/src/share/vm/memory/heapInspection.cpp Tue Jul 16 10:55:48 2013 -0400 +++ b/src/share/vm/memory/heapInspection.cpp Tue Jul 16 12:20:08 2013 -0400 @@ -95,7 +95,7 @@ } elt = elt->next(); } - elt = new KlassInfoEntry(k, list()); + elt = new (std::nothrow) KlassInfoEntry(k, list()); // We may be out of space to allocate the new entry. if (elt != NULL) { set_list(elt); @@ -127,13 +127,15 @@ _table->lookup(k); } -KlassInfoTable::KlassInfoTable(int size, HeapWord* ref, - bool need_class_stats) { +KlassInfoTable::KlassInfoTable(bool need_class_stats) { + _size_of_instances_in_words = 0; _size = 0; - _ref = ref; - _buckets = NEW_C_HEAP_ARRAY(KlassInfoBucket, size, mtInternal); + _ref = (HeapWord*) Universe::boolArrayKlassObj(); + _buckets = + (KlassInfoBucket*) AllocateHeap(sizeof(KlassInfoBucket) * _num_buckets, + mtInternal, 0, AllocFailStrategy::RETURN_NULL); if (_buckets != NULL) { - _size = size; + _size = _num_buckets; for (int index = 0; index < _size; index++) { _buckets[index].initialize(); } @@ -155,7 +157,6 @@ } uint KlassInfoTable::hash(const Klass* p) { - assert(p->is_metadata(), "all klasses are metadata"); return (uint)(((uintptr_t)p - (uintptr_t)_ref) >> 2); } @@ -179,6 +180,7 @@ if (elt != NULL) { elt->set_count(elt->count() + 1); elt->set_words(elt->words() + obj->size()); + _size_of_instances_in_words += obj->size(); return true; } else { return false; @@ -192,14 +194,18 @@ } } +size_t KlassInfoTable::size_of_instances_in_words() const { + return _size_of_instances_in_words; +} + int KlassInfoHisto::sort_helper(KlassInfoEntry** e1, KlassInfoEntry** e2) { return (*e1)->compare(*e1,*e2); } -KlassInfoHisto::KlassInfoHisto(KlassInfoTable* cit, const char* title, int estimatedCount) : +KlassInfoHisto::KlassInfoHisto(KlassInfoTable* cit, const char* title) : _cit(cit), _title(title) { - _elements = new (ResourceObj::C_HEAP, mtInternal) GrowableArray(estimatedCount,true); + _elements = new (ResourceObj::C_HEAP, mtInternal) GrowableArray(_histo_initial_size, true); } KlassInfoHisto::~KlassInfoHisto() { @@ -444,25 +450,37 @@ private: KlassInfoTable* _cit; size_t _missed_count; + BoolObjectClosure* _filter; public: - RecordInstanceClosure(KlassInfoTable* cit) : - _cit(cit), _missed_count(0) {} + RecordInstanceClosure(KlassInfoTable* cit, BoolObjectClosure* filter) : + _cit(cit), _missed_count(0), _filter(filter) {} void do_object(oop obj) { - if (!_cit->record_instance(obj)) { - _missed_count++; + if (should_visit(obj)) { + if (!_cit->record_instance(obj)) { + _missed_count++; + } } } size_t missed_count() { return _missed_count; } + + private: + bool should_visit(oop obj) { + return _filter == NULL || _filter->do_object_b(obj); + } }; -void HeapInspection::heap_inspection(outputStream* st, bool need_prologue) { +size_t HeapInspection::populate_table(KlassInfoTable* cit, BoolObjectClosure *filter) { ResourceMark rm; - // Get some random number for ref (the hash key) - HeapWord* ref = (HeapWord*) Universe::boolArrayKlassObj(); - CollectedHeap* heap = Universe::heap(); - bool is_shared_heap = false; + + RecordInstanceClosure ric(cit, filter); + Universe::heap()->object_iterate(&ric); + return ric.missed_count(); +} + +void HeapInspection::heap_inspection(outputStream* st) { + ResourceMark rm; if (_print_help) { for (int c=0; cobject_iterate(&ric); - - // Report if certain classes are not counted because of - // running out of C-heap for the histogram. - size_t missed_count = ric.missed_count(); + size_t missed_count = populate_table(&cit); if (missed_count != 0) { st->print_cr("WARNING: Ran out of C-heap; undercounted " SIZE_FORMAT " total instances in data below", missed_count); } + // Sort and print klass instance info const char *title = "\n" " num #instances #bytes class name\n" "----------------------------------------------"; - KlassInfoHisto histo(&cit, title, KlassInfoHisto::histo_initial_size); + KlassInfoHisto histo(&cit, title); HistoClosure hc(&histo); + cit.iterate(&hc); + histo.sort(); histo.print_histo_on(st, _print_class_stats, _csv_format, _columns); } else { st->print_cr("WARNING: Ran out of C-heap; histogram not generated"); } st->flush(); - - if (need_prologue && is_shared_heap) { - SharedHeap* sh = (SharedHeap*)heap; - sh->gc_epilogue(false /* !full */); // release all acquired locks, etc. - } } class FindInstanceClosure : public ObjectClosure { diff -r 16b10327b00d -r 90d6c221d4e5 src/share/vm/memory/heapInspection.hpp --- a/src/share/vm/memory/heapInspection.hpp Tue Jul 16 10:55:48 2013 -0400 +++ b/src/share/vm/memory/heapInspection.hpp Tue Jul 16 12:20:08 2013 -0400 @@ -203,7 +203,7 @@ const char* name() const; }; -class KlassInfoClosure: public StackObj { +class KlassInfoClosure : public StackObj { public: // Called for each KlassInfoEntry. virtual void do_cinfo(KlassInfoEntry* cie) = 0; @@ -224,6 +224,8 @@ class KlassInfoTable: public StackObj { private: int _size; + static const int _num_buckets = 20011; + size_t _size_of_instances_in_words; // An aligned reference address (typically the least // address in the perm gen) used for hashing klass @@ -242,21 +244,19 @@ }; public: - // Table size - enum { - cit_size = 20011 - }; - KlassInfoTable(int size, HeapWord* ref, bool need_class_stats); + KlassInfoTable(bool need_class_stats); ~KlassInfoTable(); bool record_instance(const oop obj); void iterate(KlassInfoClosure* cic); bool allocation_failed() { return _buckets == NULL; } + size_t size_of_instances_in_words() const; friend class KlassInfoHisto; }; class KlassInfoHisto : public StackObj { private: + static const int _histo_initial_size = 1000; KlassInfoTable *_cit; GrowableArray* _elements; GrowableArray* elements() const { return _elements; } @@ -334,11 +334,7 @@ } public: - enum { - histo_initial_size = 1000 - }; - KlassInfoHisto(KlassInfoTable* cit, const char* title, - int estimatedCount); + KlassInfoHisto(KlassInfoTable* cit, const char* title); ~KlassInfoHisto(); void add(KlassInfoEntry* cie); void print_histo_on(outputStream* st, bool print_class_stats, bool csv_format, const char *columns); @@ -347,6 +343,11 @@ #endif // INCLUDE_SERVICES +// These declarations are needed since teh declaration of KlassInfoTable and +// KlassInfoClosure are guarded by #if INLCUDE_SERVICES +class KlassInfoTable; +class KlassInfoClosure; + class HeapInspection : public StackObj { bool _csv_format; // "comma separated values" format for spreadsheet. bool _print_help; @@ -357,8 +358,11 @@ bool print_class_stats, const char *columns) : _csv_format(csv_format), _print_help(print_help), _print_class_stats(print_class_stats), _columns(columns) {} - void heap_inspection(outputStream* st, bool need_prologue) NOT_SERVICES_RETURN; + void heap_inspection(outputStream* st) NOT_SERVICES_RETURN; + size_t populate_table(KlassInfoTable* cit, BoolObjectClosure* filter = NULL) NOT_SERVICES_RETURN; static void find_instances_at_safepoint(Klass* k, GrowableArray* result) NOT_SERVICES_RETURN; + private: + void iterate_over_heap(KlassInfoTable* cit, BoolObjectClosure* filter = NULL); }; #endif // SHARE_VM_MEMORY_HEAPINSPECTION_HPP diff -r 16b10327b00d -r 90d6c221d4e5 src/share/vm/memory/metaspace.cpp --- a/src/share/vm/memory/metaspace.cpp Tue Jul 16 10:55:48 2013 -0400 +++ b/src/share/vm/memory/metaspace.cpp Tue Jul 16 12:20:08 2013 -0400 @@ -70,7 +70,7 @@ SpecializedChunk = 128, ClassSmallChunk = 256, SmallChunk = 512, - ClassMediumChunk = 1 * K, + ClassMediumChunk = 4 * K, MediumChunk = 8 * K, HumongousChunkGranularity = 8 }; @@ -580,7 +580,6 @@ // Number of small chunks to allocate to a manager // If class space manager, small chunks are unlimited static uint const _small_chunk_limit; - bool has_small_chunk_limit() { return !vs_list()->is_class(); } // Sum of all space in allocated chunks size_t _allocated_blocks_words; @@ -1298,13 +1297,18 @@ bool MetaspaceGC::should_expand(VirtualSpaceList* vsl, size_t word_size) { - size_t committed_capacity_bytes = MetaspaceAux::allocated_capacity_bytes(); // If the user wants a limit, impose one. - size_t max_metaspace_size_bytes = MaxMetaspaceSize; - size_t metaspace_size_bytes = MetaspaceSize; - if (!FLAG_IS_DEFAULT(MaxMetaspaceSize) && - MetaspaceAux::reserved_in_bytes() >= MaxMetaspaceSize) { - return false; + // The reason for someone using this flag is to limit reserved space. So + // for non-class virtual space, compare against virtual spaces that are reserved. + // For class virtual space, we only compare against the committed space, not + // reserved space, because this is a larger space prereserved for compressed + // class pointers. + if (!FLAG_IS_DEFAULT(MaxMetaspaceSize)) { + size_t real_allocated = Metaspace::space_list()->virtual_space_total() + + MetaspaceAux::allocated_capacity_bytes(Metaspace::ClassType); + if (real_allocated >= MaxMetaspaceSize) { + return false; + } } // Class virtual space should always be expanded. Call GC for the other @@ -1318,11 +1322,12 @@ } - // If the capacity is below the minimum capacity, allow the // expansion. Also set the high-water-mark (capacity_until_GC) // to that minimum capacity so that a GC will not be induced // until that minimum capacity is exceeded. + size_t committed_capacity_bytes = MetaspaceAux::allocated_capacity_bytes(); + size_t metaspace_size_bytes = MetaspaceSize; if (committed_capacity_bytes < metaspace_size_bytes || capacity_until_GC() == 0) { set_capacity_until_GC(metaspace_size_bytes); @@ -1556,19 +1561,7 @@ // ChunkManager methods -// Verification of _free_chunks_total and _free_chunks_count does not -// work with the CMS collector because its use of additional locks -// complicate the mutex deadlock detection but it can still be useful -// for detecting errors in the chunk accounting with other collectors. - size_t ChunkManager::free_chunks_total() { -#ifdef ASSERT - if (!UseConcMarkSweepGC && !SpaceManager::expand_lock()->is_locked()) { - MutexLockerEx cl(SpaceManager::expand_lock(), - Mutex::_no_safepoint_check_flag); - slow_locked_verify_free_chunks_total(); - } -#endif return _free_chunks_total; } @@ -1866,13 +1859,11 @@ Metachunk* chunk = chunks_in_use(index); // Count the free space in all the chunk but not the // current chunk from which allocations are still being done. - if (chunk != NULL) { - Metachunk* prev = chunk; - while (chunk != NULL && chunk != current_chunk()) { + while (chunk != NULL) { + if (chunk != current_chunk()) { result += chunk->free_word_size(); - prev = chunk; - chunk = chunk->next(); } + chunk = chunk->next(); } return result; } @@ -1961,8 +1952,7 @@ // chunks will be allocated. size_t chunk_word_size; if (chunks_in_use(MediumIndex) == NULL && - (!has_small_chunk_limit() || - sum_count_in_chunks_in_use(SmallIndex) < _small_chunk_limit)) { + sum_count_in_chunks_in_use(SmallIndex) < _small_chunk_limit) { chunk_word_size = (size_t) small_chunk_size(); if (word_size + Metachunk::overhead() > small_chunk_size()) { chunk_word_size = medium_chunk_size(); @@ -2608,14 +2598,14 @@ "->" SIZE_FORMAT "(" SIZE_FORMAT ")", prev_metadata_used, - allocated_capacity_bytes(), + allocated_used_bytes(), reserved_in_bytes()); } else { gclog_or_tty->print(" " SIZE_FORMAT "K" "->" SIZE_FORMAT "K" "(" SIZE_FORMAT "K)", prev_metadata_used / K, - allocated_capacity_bytes() / K, + allocated_used_bytes() / K, reserved_in_bytes()/ K); } @@ -2671,10 +2661,10 @@ // Print total fragmentation for class and data metaspaces separately void MetaspaceAux::print_waste(outputStream* out) { - size_t specialized_waste = 0, small_waste = 0, medium_waste = 0, large_waste = 0; - size_t specialized_count = 0, small_count = 0, medium_count = 0, large_count = 0; - size_t cls_specialized_waste = 0, cls_small_waste = 0, cls_medium_waste = 0, cls_large_waste = 0; - size_t cls_specialized_count = 0, cls_small_count = 0, cls_medium_count = 0, cls_large_count = 0; + size_t specialized_waste = 0, small_waste = 0, medium_waste = 0; + size_t specialized_count = 0, small_count = 0, medium_count = 0, humongous_count = 0; + size_t cls_specialized_waste = 0, cls_small_waste = 0, cls_medium_waste = 0; + size_t cls_specialized_count = 0, cls_small_count = 0, cls_medium_count = 0, cls_humongous_count = 0; ClassLoaderDataGraphMetaspaceIterator iter; while (iter.repeat()) { @@ -2686,8 +2676,7 @@ small_count += msp->vsm()->sum_count_in_chunks_in_use(SmallIndex); medium_waste += msp->vsm()->sum_waste_in_chunks_in_use(MediumIndex); medium_count += msp->vsm()->sum_count_in_chunks_in_use(MediumIndex); - large_waste += msp->vsm()->sum_waste_in_chunks_in_use(HumongousIndex); - large_count += msp->vsm()->sum_count_in_chunks_in_use(HumongousIndex); + humongous_count += msp->vsm()->sum_count_in_chunks_in_use(HumongousIndex); cls_specialized_waste += msp->class_vsm()->sum_waste_in_chunks_in_use(SpecializedIndex); cls_specialized_count += msp->class_vsm()->sum_count_in_chunks_in_use(SpecializedIndex); @@ -2695,20 +2684,23 @@ cls_small_count += msp->class_vsm()->sum_count_in_chunks_in_use(SmallIndex); cls_medium_waste += msp->class_vsm()->sum_waste_in_chunks_in_use(MediumIndex); cls_medium_count += msp->class_vsm()->sum_count_in_chunks_in_use(MediumIndex); - cls_large_waste += msp->class_vsm()->sum_waste_in_chunks_in_use(HumongousIndex); - cls_large_count += msp->class_vsm()->sum_count_in_chunks_in_use(HumongousIndex); + cls_humongous_count += msp->class_vsm()->sum_count_in_chunks_in_use(HumongousIndex); } } out->print_cr("Total fragmentation waste (words) doesn't count free space"); out->print_cr(" data: " SIZE_FORMAT " specialized(s) " SIZE_FORMAT ", " SIZE_FORMAT " small(s) " SIZE_FORMAT ", " - SIZE_FORMAT " medium(s) " SIZE_FORMAT, + SIZE_FORMAT " medium(s) " SIZE_FORMAT ", " + "large count " SIZE_FORMAT, specialized_count, specialized_waste, small_count, - small_waste, medium_count, medium_waste); + small_waste, medium_count, medium_waste, humongous_count); out->print_cr(" class: " SIZE_FORMAT " specialized(s) " SIZE_FORMAT ", " - SIZE_FORMAT " small(s) " SIZE_FORMAT, + SIZE_FORMAT " small(s) " SIZE_FORMAT ", " + SIZE_FORMAT " medium(s) " SIZE_FORMAT ", " + "large count " SIZE_FORMAT, cls_specialized_count, cls_specialized_waste, - cls_small_count, cls_small_waste); + cls_small_count, cls_small_waste, + cls_medium_count, cls_medium_waste, cls_humongous_count); } // Dump global metaspace things from the end of ClassLoaderDataGraph @@ -3049,18 +3041,24 @@ if (Verbose && TraceMetadataChunkAllocation) { gclog_or_tty->print_cr("Metaspace allocation failed for size " SIZE_FORMAT, word_size); - if (loader_data->metaspace_or_null() != NULL) loader_data->metaspace_or_null()->dump(gclog_or_tty); + if (loader_data->metaspace_or_null() != NULL) loader_data->dump(gclog_or_tty); MetaspaceAux::dump(gclog_or_tty); } // -XX:+HeapDumpOnOutOfMemoryError and -XX:OnOutOfMemoryError support - report_java_out_of_memory("Metadata space"); + const char* space_string = (mdtype == ClassType) ? "Class Metadata space" : + "Metadata space"; + report_java_out_of_memory(space_string); if (JvmtiExport::should_post_resource_exhausted()) { JvmtiExport::post_resource_exhausted( JVMTI_RESOURCE_EXHAUSTED_OOM_ERROR, - "Metadata space"); + space_string); } - THROW_OOP_0(Universe::out_of_memory_error_perm_gen()); + if (mdtype == ClassType) { + THROW_OOP_0(Universe::out_of_memory_error_class_metaspace()); + } else { + THROW_OOP_0(Universe::out_of_memory_error_metaspace()); + } } } return Metablock::initialize(result, word_size); diff -r 16b10327b00d -r 90d6c221d4e5 src/share/vm/memory/metaspace.hpp --- a/src/share/vm/memory/metaspace.hpp Tue Jul 16 10:55:48 2013 -0400 +++ b/src/share/vm/memory/metaspace.hpp Tue Jul 16 12:20:08 2013 -0400 @@ -193,7 +193,10 @@ }; class MetaspaceAux : AllStatic { + static size_t free_chunks_total(Metaspace::MetadataType mdtype); + static size_t free_chunks_total_in_bytes(Metaspace::MetadataType mdtype); + public: // Statistics for class space and data space in metaspace. // These methods iterate over the classloader data graph @@ -205,10 +208,6 @@ // Iterates over the virtual space list. static size_t reserved_in_bytes(Metaspace::MetadataType mdtype); - static size_t free_chunks_total(Metaspace::MetadataType mdtype); - static size_t free_chunks_total_in_bytes(Metaspace::MetadataType mdtype); - - public: // Running sum of space in all Metachunks that has been // allocated to a Metaspace. This is used instead of // iterating over all the classloaders. One for each diff -r 16b10327b00d -r 90d6c221d4e5 src/share/vm/memory/metaspaceShared.cpp --- a/src/share/vm/memory/metaspaceShared.cpp Tue Jul 16 10:55:48 2013 -0400 +++ b/src/share/vm/memory/metaspaceShared.cpp Tue Jul 16 12:20:08 2013 -0400 @@ -826,35 +826,15 @@ bool reading() const { return true; } }; - -// Save bounds of shared spaces mapped in. -static char* _ro_base = NULL; -static char* _rw_base = NULL; -static char* _md_base = NULL; -static char* _mc_base = NULL; - // Return true if given address is in the mapped shared space. bool MetaspaceShared::is_in_shared_space(const void* p) { - if (_ro_base == NULL || _rw_base == NULL) { - return false; - } else { - return ((p >= _ro_base && p < (_ro_base + SharedReadOnlySize)) || - (p >= _rw_base && p < (_rw_base + SharedReadWriteSize))); - } + return UseSharedSpaces && FileMapInfo::current_info()->is_in_shared_space(p); } void MetaspaceShared::print_shared_spaces() { - gclog_or_tty->print_cr("Shared Spaces:"); - gclog_or_tty->print(" read-only " INTPTR_FORMAT "-" INTPTR_FORMAT, - _ro_base, _ro_base + SharedReadOnlySize); - gclog_or_tty->print(" read-write " INTPTR_FORMAT "-" INTPTR_FORMAT, - _rw_base, _rw_base + SharedReadWriteSize); - gclog_or_tty->cr(); - gclog_or_tty->print(" misc-data " INTPTR_FORMAT "-" INTPTR_FORMAT, - _md_base, _md_base + SharedMiscDataSize); - gclog_or_tty->print(" misc-code " INTPTR_FORMAT "-" INTPTR_FORMAT, - _mc_base, _mc_base + SharedMiscCodeSize); - gclog_or_tty->cr(); + if (UseSharedSpaces) { + FileMapInfo::current_info()->print_shared_spaces(); + } } @@ -874,6 +854,11 @@ assert(!DumpSharedSpaces, "Should not be called with DumpSharedSpaces"); + char* _ro_base = NULL; + char* _rw_base = NULL; + char* _md_base = NULL; + char* _mc_base = NULL; + // Map each shared region if ((_ro_base = mapinfo->map_region(ro)) != NULL && (_rw_base = mapinfo->map_region(rw)) != NULL && diff -r 16b10327b00d -r 90d6c221d4e5 src/share/vm/memory/oopFactory.hpp --- a/src/share/vm/memory/oopFactory.hpp Tue Jul 16 10:55:48 2013 -0400 +++ b/src/share/vm/memory/oopFactory.hpp Tue Jul 16 12:20:08 2013 -0400 @@ -1,5 +1,5 @@ /* - * Copyright (c) 1997, 2012, Oracle and/or its affiliates. All rights reserved. + * Copyright (c) 1997, 2013, Oracle and/or its affiliates. All rights reserved. * DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER. * * This code is free software; you can redistribute it and/or modify it @@ -27,6 +27,7 @@ #include "classfile/symbolTable.hpp" #include "classfile/systemDictionary.hpp" +#include "memory/referenceType.hpp" #include "memory/universe.hpp" #include "oops/objArrayKlass.hpp" #include "oops/oop.hpp" diff -r 16b10327b00d -r 90d6c221d4e5 src/share/vm/memory/referenceProcessor.cpp --- a/src/share/vm/memory/referenceProcessor.cpp Tue Jul 16 10:55:48 2013 -0400 +++ b/src/share/vm/memory/referenceProcessor.cpp Tue Jul 16 12:20:08 2013 -0400 @@ -1,5 +1,5 @@ /* - * Copyright (c) 2001, 2012, Oracle and/or its affiliates. All rights reserved. + * Copyright (c) 2001, 2013, Oracle and/or its affiliates. All rights reserved. * DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER. * * This code is free software; you can redistribute it and/or modify it @@ -25,6 +25,8 @@ #include "precompiled.hpp" #include "classfile/javaClasses.hpp" #include "classfile/systemDictionary.hpp" +#include "gc_implementation/shared/gcTimer.hpp" +#include "gc_implementation/shared/gcTraceTime.hpp" #include "gc_interface/collectedHeap.hpp" #include "gc_interface/collectedHeap.inline.hpp" #include "memory/referencePolicy.hpp" @@ -180,11 +182,20 @@ // past clock value. } -void ReferenceProcessor::process_discovered_references( +size_t ReferenceProcessor::total_count(DiscoveredList lists[]) { + size_t total = 0; + for (uint i = 0; i < _max_num_q; ++i) { + total += lists[i].length(); + } + return total; +} + +ReferenceProcessorStats ReferenceProcessor::process_discovered_references( BoolObjectClosure* is_alive, OopClosure* keep_alive, VoidClosure* complete_gc, - AbstractRefProcTaskExecutor* task_executor) { + AbstractRefProcTaskExecutor* task_executor, + GCTimer* gc_timer) { NOT_PRODUCT(verify_ok_to_handle_reflists()); assert(!enqueuing_is_done(), "If here enqueuing should not be complete"); @@ -202,34 +213,43 @@ _soft_ref_timestamp_clock = java_lang_ref_SoftReference::clock(); bool trace_time = PrintGCDetails && PrintReferenceGC; + // Soft references + size_t soft_count = 0; { - TraceTime tt("SoftReference", trace_time, false, gclog_or_tty); - process_discovered_reflist(_discoveredSoftRefs, _current_soft_ref_policy, true, - is_alive, keep_alive, complete_gc, task_executor); + GCTraceTime tt("SoftReference", trace_time, false, gc_timer); + soft_count = + process_discovered_reflist(_discoveredSoftRefs, _current_soft_ref_policy, true, + is_alive, keep_alive, complete_gc, task_executor); } update_soft_ref_master_clock(); // Weak references + size_t weak_count = 0; { - TraceTime tt("WeakReference", trace_time, false, gclog_or_tty); - process_discovered_reflist(_discoveredWeakRefs, NULL, true, - is_alive, keep_alive, complete_gc, task_executor); + GCTraceTime tt("WeakReference", trace_time, false, gc_timer); + weak_count = + process_discovered_reflist(_discoveredWeakRefs, NULL, true, + is_alive, keep_alive, complete_gc, task_executor); } // Final references + size_t final_count = 0; { - TraceTime tt("FinalReference", trace_time, false, gclog_or_tty); - process_discovered_reflist(_discoveredFinalRefs, NULL, false, - is_alive, keep_alive, complete_gc, task_executor); + GCTraceTime tt("FinalReference", trace_time, false, gc_timer); + final_count = + process_discovered_reflist(_discoveredFinalRefs, NULL, false, + is_alive, keep_alive, complete_gc, task_executor); } // Phantom references + size_t phantom_count = 0; { - TraceTime tt("PhantomReference", trace_time, false, gclog_or_tty); - process_discovered_reflist(_discoveredPhantomRefs, NULL, false, - is_alive, keep_alive, complete_gc, task_executor); + GCTraceTime tt("PhantomReference", trace_time, false, gc_timer); + phantom_count = + process_discovered_reflist(_discoveredPhantomRefs, NULL, false, + is_alive, keep_alive, complete_gc, task_executor); } // Weak global JNI references. It would make more sense (semantically) to @@ -238,12 +258,14 @@ // thus use JNI weak references to circumvent the phantom references and // resurrect a "post-mortem" object. { - TraceTime tt("JNI Weak Reference", trace_time, false, gclog_or_tty); + GCTraceTime tt("JNI Weak Reference", trace_time, false, gc_timer); if (task_executor != NULL) { task_executor->set_single_threaded_mode(); } process_phaseJNI(is_alive, keep_alive, complete_gc); } + + return ReferenceProcessorStats(soft_count, weak_count, final_count, phantom_count); } #ifndef PRODUCT @@ -878,7 +900,7 @@ balance_queues(_discoveredPhantomRefs); } -void +size_t ReferenceProcessor::process_discovered_reflist( DiscoveredList refs_lists[], ReferencePolicy* policy, @@ -901,12 +923,11 @@ must_balance) { balance_queues(refs_lists); } + + size_t total_list_count = total_count(refs_lists); + if (PrintReferenceGC && PrintGCDetails) { - size_t total = 0; - for (uint i = 0; i < _max_num_q; ++i) { - total += refs_lists[i].length(); - } - gclog_or_tty->print(", %u refs", total); + gclog_or_tty->print(", %u refs", total_list_count); } // Phase 1 (soft refs only): @@ -951,6 +972,8 @@ is_alive, keep_alive, complete_gc); } } + + return total_list_count; } void ReferenceProcessor::clean_up_discovered_references() { @@ -1266,14 +1289,15 @@ BoolObjectClosure* is_alive, OopClosure* keep_alive, VoidClosure* complete_gc, - YieldClosure* yield) { + YieldClosure* yield, + GCTimer* gc_timer) { NOT_PRODUCT(verify_ok_to_handle_reflists()); // Soft references { - TraceTime tt("Preclean SoftReferences", PrintGCDetails && PrintReferenceGC, - false, gclog_or_tty); + GCTraceTime tt("Preclean SoftReferences", PrintGCDetails && PrintReferenceGC, + false, gc_timer); for (uint i = 0; i < _max_num_q; i++) { if (yield->should_return()) { return; @@ -1285,8 +1309,8 @@ // Weak references { - TraceTime tt("Preclean WeakReferences", PrintGCDetails && PrintReferenceGC, - false, gclog_or_tty); + GCTraceTime tt("Preclean WeakReferences", PrintGCDetails && PrintReferenceGC, + false, gc_timer); for (uint i = 0; i < _max_num_q; i++) { if (yield->should_return()) { return; @@ -1298,8 +1322,8 @@ // Final references { - TraceTime tt("Preclean FinalReferences", PrintGCDetails && PrintReferenceGC, - false, gclog_or_tty); + GCTraceTime tt("Preclean FinalReferences", PrintGCDetails && PrintReferenceGC, + false, gc_timer); for (uint i = 0; i < _max_num_q; i++) { if (yield->should_return()) { return; @@ -1311,8 +1335,8 @@ // Phantom references { - TraceTime tt("Preclean PhantomReferences", PrintGCDetails && PrintReferenceGC, - false, gclog_or_tty); + GCTraceTime tt("Preclean PhantomReferences", PrintGCDetails && PrintReferenceGC, + false, gc_timer); for (uint i = 0; i < _max_num_q; i++) { if (yield->should_return()) { return; diff -r 16b10327b00d -r 90d6c221d4e5 src/share/vm/memory/referenceProcessor.hpp --- a/src/share/vm/memory/referenceProcessor.hpp Tue Jul 16 10:55:48 2013 -0400 +++ b/src/share/vm/memory/referenceProcessor.hpp Tue Jul 16 12:20:08 2013 -0400 @@ -1,5 +1,5 @@ /* - * Copyright (c) 2001, 2012, Oracle and/or its affiliates. All rights reserved. + * Copyright (c) 2001, 2013, Oracle and/or its affiliates. All rights reserved. * DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER. * * This code is free software; you can redistribute it and/or modify it @@ -26,8 +26,12 @@ #define SHARE_VM_MEMORY_REFERENCEPROCESSOR_HPP #include "memory/referencePolicy.hpp" +#include "memory/referenceProcessorStats.hpp" +#include "memory/referenceType.hpp" #include "oops/instanceRefKlass.hpp" +class GCTimer; + // ReferenceProcessor class encapsulates the per-"collector" processing // of java.lang.Reference objects for GC. The interface is useful for supporting // a generational abstraction, in particular when there are multiple @@ -204,6 +208,10 @@ }; class ReferenceProcessor : public CHeapObj { + + private: + size_t total_count(DiscoveredList lists[]); + protected: // Compatibility with pre-4965777 JDK's static bool _pending_list_uses_discovered_field; @@ -282,13 +290,13 @@ } // Process references with a certain reachability level. - void process_discovered_reflist(DiscoveredList refs_lists[], - ReferencePolicy* policy, - bool clear_referent, - BoolObjectClosure* is_alive, - OopClosure* keep_alive, - VoidClosure* complete_gc, - AbstractRefProcTaskExecutor* task_executor); + size_t process_discovered_reflist(DiscoveredList refs_lists[], + ReferencePolicy* policy, + bool clear_referent, + BoolObjectClosure* is_alive, + OopClosure* keep_alive, + VoidClosure* complete_gc, + AbstractRefProcTaskExecutor* task_executor); void process_phaseJNI(BoolObjectClosure* is_alive, OopClosure* keep_alive, @@ -349,7 +357,8 @@ void preclean_discovered_references(BoolObjectClosure* is_alive, OopClosure* keep_alive, VoidClosure* complete_gc, - YieldClosure* yield); + YieldClosure* yield, + GCTimer* gc_timer); // Delete entries in the discovered lists that have // either a null referent or are not active. Such @@ -500,12 +509,13 @@ bool discover_reference(oop obj, ReferenceType rt); // Process references found during GC (called by the garbage collector) - void process_discovered_references(BoolObjectClosure* is_alive, - OopClosure* keep_alive, - VoidClosure* complete_gc, - AbstractRefProcTaskExecutor* task_executor); + ReferenceProcessorStats + process_discovered_references(BoolObjectClosure* is_alive, + OopClosure* keep_alive, + VoidClosure* complete_gc, + AbstractRefProcTaskExecutor* task_executor, + GCTimer *gc_timer); - public: // Enqueue references at end of GC (called by the garbage collector) bool enqueue_discovered_references(AbstractRefProcTaskExecutor* task_executor = NULL); diff -r 16b10327b00d -r 90d6c221d4e5 src/share/vm/memory/referenceProcessorStats.hpp --- /dev/null Thu Jan 01 00:00:00 1970 +0000 +++ b/src/share/vm/memory/referenceProcessorStats.hpp Tue Jul 16 12:20:08 2013 -0400 @@ -0,0 +1,73 @@ +/* + * Copyright (c) 2012, 2013, Oracle and/or its affiliates. All rights reserved. + * DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER. + * + * This code is free software; you can redistribute it and/or modify it + * under the terms of the GNU General Public License version 2 only, as + * published by the Free Software Foundation. + * + * This code is distributed in the hope that it will be useful, but WITHOUT + * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or + * FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License + * version 2 for more details (a copy is included in the LICENSE file that + * accompanied this code). + * + * You should have received a copy of the GNU General Public License version + * 2 along with this work; if not, write to the Free Software Foundation, + * Inc., 51 Franklin St, Fifth Floor, Boston, MA 02110-1301 USA. + * + * Please contact Oracle, 500 Oracle Parkway, Redwood Shores, CA 94065 USA + * or visit www.oracle.com if you need additional information or have any + * questions. + * + */ + +#ifndef SHARE_VM_MEMORY_REFERENCEPROCESSORSTATS_HPP +#define SHARE_VM_MEMORY_REFERENCEPROCESSORSTATS_HPP + +#include "utilities/globalDefinitions.hpp" + +class ReferenceProcessor; + +// ReferenceProcessorStats contains statistics about how many references that +// have been traversed when processing references during garbage collection. +class ReferenceProcessorStats { + size_t _soft_count; + size_t _weak_count; + size_t _final_count; + size_t _phantom_count; + + public: + ReferenceProcessorStats() : + _soft_count(0), + _weak_count(0), + _final_count(0), + _phantom_count(0) {} + + ReferenceProcessorStats(size_t soft_count, + size_t weak_count, + size_t final_count, + size_t phantom_count) : + _soft_count(soft_count), + _weak_count(weak_count), + _final_count(final_count), + _phantom_count(phantom_count) + {} + + size_t soft_count() const { + return _soft_count; + } + + size_t weak_count() const { + return _weak_count; + } + + size_t final_count() const { + return _final_count; + } + + size_t phantom_count() const { + return _phantom_count; + } +}; +#endif diff -r 16b10327b00d -r 90d6c221d4e5 src/share/vm/memory/referenceType.hpp --- /dev/null Thu Jan 01 00:00:00 1970 +0000 +++ b/src/share/vm/memory/referenceType.hpp Tue Jul 16 12:20:08 2013 -0400 @@ -0,0 +1,41 @@ +/* + * Copyright (c) 2012, 2013, Oracle and/or its affiliates. All rights reserved. + * DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER. + * + * This code is free software; you can redistribute it and/or modify it + * under the terms of the GNU General Public License version 2 only, as + * published by the Free Software Foundation. + * + * This code is distributed in the hope that it will be useful, but WITHOUT + * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or + * FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License + * version 2 for more details (a copy is included in the LICENSE file that + * accompanied this code). + * + * You should have received a copy of the GNU General Public License version + * 2 along with this work; if not, write to the Free Software Foundation, + * Inc., 51 Franklin St, Fifth Floor, Boston, MA 02110-1301 USA. + * + * Please contact Oracle, 500 Oracle Parkway, Redwood Shores, CA 94065 USA + * or visit www.oracle.com if you need additional information or have any + * questions. + * + */ + +#ifndef SHARE_VM_MEMORY_REFRERENCETYPE_HPP +#define SHARE_VM_MEMORY_REFRERENCETYPE_HPP + +#include "utilities/debug.hpp" + +// ReferenceType is used to distinguish between java/lang/ref/Reference subclasses + +enum ReferenceType { + REF_NONE, // Regular class + REF_OTHER, // Subclass of java/lang/ref/Reference, but not subclass of one of the classes below + REF_SOFT, // Subclass of java/lang/ref/SoftReference + REF_WEAK, // Subclass of java/lang/ref/WeakReference + REF_FINAL, // Subclass of java/lang/ref/FinalReference + REF_PHANTOM // Subclass of java/lang/ref/PhantomReference +}; + +#endif // SHARE_VM_MEMORY_REFRERENCETYPE_HPP diff -r 16b10327b00d -r 90d6c221d4e5 src/share/vm/memory/sharedHeap.cpp --- a/src/share/vm/memory/sharedHeap.cpp Tue Jul 16 10:55:48 2013 -0400 +++ b/src/share/vm/memory/sharedHeap.cpp Tue Jul 16 12:20:08 2013 -0400 @@ -1,5 +1,5 @@ /* - * Copyright (c) 2000, 2012, Oracle and/or its affiliates. All rights reserved. + * Copyright (c) 2000, 2013, Oracle and/or its affiliates. All rights reserved. * DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER. * * This code is free software; you can redistribute it and/or modify it @@ -47,7 +47,6 @@ SH_PS_SystemDictionary_oops_do, SH_PS_ClassLoaderDataGraph_oops_do, SH_PS_jvmti_oops_do, - SH_PS_StringTable_oops_do, SH_PS_CodeCache_oops_do, // Leave this one last. SH_PS_NumElements @@ -127,6 +126,8 @@ { if (_active) { outer->change_strong_roots_parity(); + // Zero the claimed high water mark in the StringTable + StringTable::clear_parallel_claimed_index(); } } @@ -154,14 +155,16 @@ // Global (strong) JNI handles if (!_process_strong_tasks->is_task_claimed(SH_PS_JNIHandles_oops_do)) JNIHandles::oops_do(roots); + // All threads execute this; the individual threads are task groups. CLDToOopClosure roots_from_clds(roots); CLDToOopClosure* roots_from_clds_p = (is_scavenging ? NULL : &roots_from_clds); - if (ParallelGCThreads > 0) { - Threads::possibly_parallel_oops_do(roots, roots_from_clds_p ,code_roots); + if (CollectedHeap::use_parallel_gc_threads()) { + Threads::possibly_parallel_oops_do(roots, roots_from_clds_p, code_roots); } else { Threads::oops_do(roots, roots_from_clds_p, code_roots); } + if (!_process_strong_tasks-> is_task_claimed(SH_PS_ObjectSynchronizer_oops_do)) ObjectSynchronizer::oops_do(roots); if (!_process_strong_tasks->is_task_claimed(SH_PS_FlatProfiler_oops_do)) @@ -189,8 +192,12 @@ } } - if (!_process_strong_tasks->is_task_claimed(SH_PS_StringTable_oops_do)) { - if (so & SO_Strings) { + // All threads execute the following. A specific chunk of buckets + // from the StringTable are the individual tasks. + if (so & SO_Strings) { + if (CollectedHeap::use_parallel_gc_threads()) { + StringTable::possibly_parallel_oops_do(roots); + } else { StringTable::oops_do(roots); } } diff -r 16b10327b00d -r 90d6c221d4e5 src/share/vm/memory/sharedHeap.hpp --- a/src/share/vm/memory/sharedHeap.hpp Tue Jul 16 10:55:48 2013 -0400 +++ b/src/share/vm/memory/sharedHeap.hpp Tue Jul 16 12:20:08 2013 -0400 @@ -166,11 +166,6 @@ // Same as above, restricted to a memory region. virtual void oop_iterate(MemRegion mr, ExtendedOopClosure* cl) = 0; - // Iterate over all objects allocated since the last collection, calling - // "cl->do_object" on each. The heap must have been initialized properly - // to support this function, or else this call will fail. - virtual void object_iterate_since_last_GC(ObjectClosure* cl) = 0; - // Iterate over all spaces in use in the heap, in an undefined order. virtual void space_iterate(SpaceClosure* cl) = 0; diff -r 16b10327b00d -r 90d6c221d4e5 src/share/vm/memory/universe.cpp --- a/src/share/vm/memory/universe.cpp Tue Jul 16 10:55:48 2013 -0400 +++ b/src/share/vm/memory/universe.cpp Tue Jul 16 12:20:08 2013 -0400 @@ -52,7 +52,6 @@ #include "oops/oop.inline.hpp" #include "oops/typeArrayKlass.hpp" #include "prims/jvmtiRedefineClassesTrace.hpp" -#include "runtime/aprofiler.hpp" #include "runtime/arguments.hpp" #include "runtime/deoptimization.hpp" #include "runtime/fprofiler.hpp" @@ -108,9 +107,11 @@ oop Universe::_the_min_jint_string = NULL; LatestMethodOopCache* Universe::_finalizer_register_cache = NULL; LatestMethodOopCache* Universe::_loader_addClass_cache = NULL; +LatestMethodOopCache* Universe::_pd_implies_cache = NULL; ActiveMethodOopsCache* Universe::_reflect_invoke_cache = NULL; oop Universe::_out_of_memory_error_java_heap = NULL; -oop Universe::_out_of_memory_error_perm_gen = NULL; +oop Universe::_out_of_memory_error_metaspace = NULL; +oop Universe::_out_of_memory_error_class_metaspace = NULL; oop Universe::_out_of_memory_error_array_size = NULL; oop Universe::_out_of_memory_error_gc_overhead_limit = NULL; objArrayOop Universe::_preallocated_out_of_memory_error_array = NULL; @@ -179,7 +180,8 @@ f->do_oop((oop*)&_the_null_string); f->do_oop((oop*)&_the_min_jint_string); f->do_oop((oop*)&_out_of_memory_error_java_heap); - f->do_oop((oop*)&_out_of_memory_error_perm_gen); + f->do_oop((oop*)&_out_of_memory_error_metaspace); + f->do_oop((oop*)&_out_of_memory_error_class_metaspace); f->do_oop((oop*)&_out_of_memory_error_array_size); f->do_oop((oop*)&_out_of_memory_error_gc_overhead_limit); f->do_oop((oop*)&_preallocated_out_of_memory_error_array); @@ -224,6 +226,7 @@ _finalizer_register_cache->serialize(f); _loader_addClass_cache->serialize(f); _reflect_invoke_cache->serialize(f); + _pd_implies_cache->serialize(f); } void Universe::check_alignment(uintx size, uintx alignment, const char* name) { @@ -529,7 +532,9 @@ if (vt) vt->initialize_vtable(false, CHECK); if (ko->oop_is_instance()) { InstanceKlass* ik = (InstanceKlass*)ko; - for (KlassHandle s_h(THREAD, ik->subklass()); s_h() != NULL; s_h = (THREAD, s_h()->next_sibling())) { + for (KlassHandle s_h(THREAD, ik->subklass()); + s_h() != NULL; + s_h = KlassHandle(THREAD, s_h()->next_sibling())) { reinitialize_vtable_of(s_h, CHECK); } } @@ -559,7 +564,8 @@ // a potential loop which could happen if an out of memory occurs when attempting // to allocate the backtrace. return ((throwable() != Universe::_out_of_memory_error_java_heap) && - (throwable() != Universe::_out_of_memory_error_perm_gen) && + (throwable() != Universe::_out_of_memory_error_metaspace) && + (throwable() != Universe::_out_of_memory_error_class_metaspace) && (throwable() != Universe::_out_of_memory_error_array_size) && (throwable() != Universe::_out_of_memory_error_gc_overhead_limit)); } @@ -645,6 +651,7 @@ // Metaspace::initialize_shared_spaces() tries to populate them. Universe::_finalizer_register_cache = new LatestMethodOopCache(); Universe::_loader_addClass_cache = new LatestMethodOopCache(); + Universe::_pd_implies_cache = new LatestMethodOopCache(); Universe::_reflect_invoke_cache = new ActiveMethodOopsCache(); if (UseSharedSpaces) { @@ -819,12 +826,14 @@ // keep the Universe::narrow_oop_base() set in Universe::reserve_heap() Universe::set_narrow_oop_shift(LogMinObjAlignmentInBytes); if (verbose) { - tty->print(", Compressed Oops with base: "PTR_FORMAT, Universe::narrow_oop_base()); + tty->print(", %s: "PTR_FORMAT, + narrow_oop_mode_to_string(HeapBasedNarrowOop), + Universe::narrow_oop_base()); } } else { Universe::set_narrow_oop_base(0); if (verbose) { - tty->print(", zero based Compressed Oops"); + tty->print(", %s", narrow_oop_mode_to_string(ZeroBasedNarrowOop)); } #ifdef _WIN64 if (!Universe::narrow_oop_use_implicit_null_checks()) { @@ -839,7 +848,7 @@ } else { Universe::set_narrow_oop_shift(0); if (verbose) { - tty->print(", 32-bits Oops"); + tty->print(", %s", narrow_oop_mode_to_string(UnscaledNarrowOop)); } } } @@ -946,6 +955,33 @@ } +const char* Universe::narrow_oop_mode_to_string(Universe::NARROW_OOP_MODE mode) { + switch (mode) { + case UnscaledNarrowOop: + return "32-bits Oops"; + case ZeroBasedNarrowOop: + return "zero based Compressed Oops"; + case HeapBasedNarrowOop: + return "Compressed Oops with base"; + } + + ShouldNotReachHere(); + return ""; +} + + +Universe::NARROW_OOP_MODE Universe::narrow_oop_mode() { + if (narrow_oop_base() != 0) { + return HeapBasedNarrowOop; + } + + if (narrow_oop_shift() != 0) { + return ZeroBasedNarrowOop; + } + + return UnscaledNarrowOop; +} + void universe2_init() { EXCEPTION_MARK; @@ -980,7 +1016,8 @@ k = SystemDictionary::resolve_or_fail(vmSymbols::java_lang_OutOfMemoryError(), true, CHECK_false); k_h = instanceKlassHandle(THREAD, k); Universe::_out_of_memory_error_java_heap = k_h->allocate_instance(CHECK_false); - Universe::_out_of_memory_error_perm_gen = k_h->allocate_instance(CHECK_false); + Universe::_out_of_memory_error_metaspace = k_h->allocate_instance(CHECK_false); + Universe::_out_of_memory_error_class_metaspace = k_h->allocate_instance(CHECK_false); Universe::_out_of_memory_error_array_size = k_h->allocate_instance(CHECK_false); Universe::_out_of_memory_error_gc_overhead_limit = k_h->allocate_instance(CHECK_false); @@ -1013,7 +1050,9 @@ java_lang_Throwable::set_message(Universe::_out_of_memory_error_java_heap, msg()); msg = java_lang_String::create_from_str("Metadata space", CHECK_false); - java_lang_Throwable::set_message(Universe::_out_of_memory_error_perm_gen, msg()); + java_lang_Throwable::set_message(Universe::_out_of_memory_error_metaspace, msg()); + msg = java_lang_String::create_from_str("Class Metadata space", CHECK_false); + java_lang_Throwable::set_message(Universe::_out_of_memory_error_class_metaspace, msg()); msg = java_lang_String::create_from_str("Requested array size exceeds VM limit", CHECK_false); java_lang_Throwable::set_message(Universe::_out_of_memory_error_array_size, msg()); @@ -1079,6 +1118,23 @@ Universe::_loader_addClass_cache->init( SystemDictionary::ClassLoader_klass(), m, CHECK_false); + // Setup method for checking protection domain + InstanceKlass::cast(SystemDictionary::ProtectionDomain_klass())->link_class(CHECK_false); + m = InstanceKlass::cast(SystemDictionary::ProtectionDomain_klass())-> + find_method(vmSymbols::impliesCreateAccessControlContext_name(), + vmSymbols::void_boolean_signature()); + // Allow NULL which should only happen with bootstrapping. + if (m != NULL) { + if (m->is_static()) { + // NoSuchMethodException doesn't actually work because it tries to run the + // function before java_lang_Class is linked. Print error and exit. + tty->print_cr("ProtectionDomain.impliesCreateAccessControlContext() has the wrong linkage"); + return false; // initialization failed + } + Universe::_pd_implies_cache->init( + SystemDictionary::ProtectionDomain_klass(), m, CHECK_false);; + } + // The folowing is initializing converter functions for serialization in // JVM.cpp. If we clean up the StrictMath code above we may want to find // a better solution for this as well. @@ -1096,6 +1152,7 @@ // Initialize performance counters for metaspaces MetaspaceCounters::initialize_performance_counters(); + MemoryService::add_metaspace_memory_pools(); GC_locker::unlock(); // allow gc after bootstrapping @@ -1496,6 +1553,7 @@ Method* LatestMethodOopCache::get_Method() { + if (klass() == NULL) return NULL; InstanceKlass* ik = InstanceKlass::cast(klass()); Method* m = ik->method_with_idnum(method_idnum()); assert(m != NULL, "sanity check"); diff -r 16b10327b00d -r 90d6c221d4e5 src/share/vm/memory/universe.hpp --- a/src/share/vm/memory/universe.hpp Tue Jul 16 10:55:48 2013 -0400 +++ b/src/share/vm/memory/universe.hpp Tue Jul 16 12:20:08 2013 -0400 @@ -176,11 +176,14 @@ static oop _the_min_jint_string; // A cache of "-2147483648" as a Java string static LatestMethodOopCache* _finalizer_register_cache; // static method for registering finalizable objects static LatestMethodOopCache* _loader_addClass_cache; // method for registering loaded classes in class loader vector + static LatestMethodOopCache* _pd_implies_cache; // method for checking protection domain attributes static ActiveMethodOopsCache* _reflect_invoke_cache; // method for security checks - static oop _out_of_memory_error_java_heap; // preallocated error object (no backtrace) - static oop _out_of_memory_error_perm_gen; // preallocated error object (no backtrace) - static oop _out_of_memory_error_array_size;// preallocated error object (no backtrace) - static oop _out_of_memory_error_gc_overhead_limit; // preallocated error object (no backtrace) + // preallocated error objects (no backtrace) + static oop _out_of_memory_error_java_heap; + static oop _out_of_memory_error_metaspace; + static oop _out_of_memory_error_class_metaspace; + static oop _out_of_memory_error_array_size; + static oop _out_of_memory_error_gc_overhead_limit; static Array* _the_empty_int_array; // Canonicalized int array static Array* _the_empty_short_array; // Canonicalized short array @@ -253,19 +256,6 @@ return m; } - // Narrow Oop encoding mode: - // 0 - Use 32-bits oops without encoding when - // NarrowOopHeapBaseMin + heap_size < 4Gb - // 1 - Use zero based compressed oops with encoding when - // NarrowOopHeapBaseMin + heap_size < 32Gb - // 2 - Use compressed oops with heap base + encoding. - enum NARROW_OOP_MODE { - UnscaledNarrowOop = 0, - ZeroBasedNarrowOop = 1, - HeapBasedNarrowOop = 2 - }; - static char* preferred_heap_base(size_t heap_size, NARROW_OOP_MODE mode); - static char* preferred_metaspace_base(size_t heap_size, NARROW_OOP_MODE mode); static void set_narrow_oop_base(address base) { assert(UseCompressedOops, "no compressed oops?"); _narrow_oop._base = base; @@ -346,7 +336,10 @@ static oop the_min_jint_string() { return _the_min_jint_string; } static Method* finalizer_register_method() { return _finalizer_register_cache->get_Method(); } static Method* loader_addClass_method() { return _loader_addClass_cache->get_Method(); } + + static Method* protection_domain_implies_method() { return _pd_implies_cache->get_Method(); } static ActiveMethodOopsCache* reflect_invoke_cache() { return _reflect_invoke_cache; } + static oop null_ptr_exception_instance() { return _null_ptr_exception_instance; } static oop arithmetic_exception_instance() { return _arithmetic_exception_instance; } static oop virtual_machine_error_instance() { return _virtual_machine_error_instance; } @@ -361,7 +354,8 @@ // may or may not have a backtrace. If error has a backtrace then the stack trace is already // filled in. static oop out_of_memory_error_java_heap() { return gen_out_of_memory_error(_out_of_memory_error_java_heap); } - static oop out_of_memory_error_perm_gen() { return gen_out_of_memory_error(_out_of_memory_error_perm_gen); } + static oop out_of_memory_error_metaspace() { return gen_out_of_memory_error(_out_of_memory_error_metaspace); } + static oop out_of_memory_error_class_metaspace() { return gen_out_of_memory_error(_out_of_memory_error_class_metaspace); } static oop out_of_memory_error_array_size() { return gen_out_of_memory_error(_out_of_memory_error_array_size); } static oop out_of_memory_error_gc_overhead_limit() { return gen_out_of_memory_error(_out_of_memory_error_gc_overhead_limit); } @@ -380,6 +374,21 @@ static CollectedHeap* heap() { return _collectedHeap; } // For UseCompressedOops + // Narrow Oop encoding mode: + // 0 - Use 32-bits oops without encoding when + // NarrowOopHeapBaseMin + heap_size < 4Gb + // 1 - Use zero based compressed oops with encoding when + // NarrowOopHeapBaseMin + heap_size < 32Gb + // 2 - Use compressed oops with heap base + encoding. + enum NARROW_OOP_MODE { + UnscaledNarrowOop = 0, + ZeroBasedNarrowOop = 1, + HeapBasedNarrowOop = 2 + }; + static NARROW_OOP_MODE narrow_oop_mode(); + static const char* narrow_oop_mode_to_string(NARROW_OOP_MODE mode); + static char* preferred_heap_base(size_t heap_size, NARROW_OOP_MODE mode); + static char* preferred_metaspace_base(size_t heap_size, NARROW_OOP_MODE mode); static address narrow_oop_base() { return _narrow_oop._base; } static bool is_narrow_oop_base(void* addr) { return (narrow_oop_base() == (address)addr); } static int narrow_oop_shift() { return _narrow_oop._shift; } diff -r 16b10327b00d -r 90d6c221d4e5 src/share/vm/oops/arrayKlass.cpp --- a/src/share/vm/oops/arrayKlass.cpp Tue Jul 16 10:55:48 2013 -0400 +++ b/src/share/vm/oops/arrayKlass.cpp Tue Jul 16 12:20:08 2013 -0400 @@ -71,7 +71,6 @@ } ArrayKlass::ArrayKlass(Symbol* name) { - set_alloc_size(0); set_name(name); set_super(Universe::is_bootstrapping() ? (Klass*)NULL : SystemDictionary::Object_klass()); @@ -161,12 +160,6 @@ } } - -void ArrayKlass::with_array_klasses_do(void f(Klass* k)) { - array_klasses_do(f); -} - - // GC support void ArrayKlass::oops_do(OopClosure* cl) { @@ -221,8 +214,8 @@ // Verification -void ArrayKlass::verify_on(outputStream* st) { - Klass::verify_on(st); +void ArrayKlass::verify_on(outputStream* st, bool check_dictionary) { + Klass::verify_on(st, check_dictionary); if (component_mirror() != NULL) { guarantee(component_mirror()->klass() != NULL, "should have a class"); diff -r 16b10327b00d -r 90d6c221d4e5 src/share/vm/oops/arrayKlass.hpp --- a/src/share/vm/oops/arrayKlass.hpp Tue Jul 16 10:55:48 2013 -0400 +++ b/src/share/vm/oops/arrayKlass.hpp Tue Jul 16 12:20:08 2013 -0400 @@ -39,7 +39,6 @@ Klass* volatile _higher_dimension; // Refers the (n+1)'th-dimensional array (if present). Klass* volatile _lower_dimension; // Refers the (n-1)'th-dimensional array (if present). int _vtable_len; // size of vtable for this klass - juint _alloc_size; // allocation profiling support oop _component_mirror; // component type, as a java/lang/Class protected: @@ -65,10 +64,6 @@ void set_lower_dimension(Klass* k) { _lower_dimension = k; } Klass** adr_lower_dimension() { return (Klass**)&this->_lower_dimension;} - // Allocation profiling support - juint alloc_size() const { return _alloc_size; } - void set_alloc_size(juint n) { _alloc_size = n; } - // offset of first element, including any padding for the sake of alignment int array_header_in_bytes() const { return layout_helper_header_size(layout_helper()); } int log2_element_size() const { return layout_helper_log2_element_size(layout_helper()); } @@ -126,7 +121,6 @@ // Iterators void array_klasses_do(void f(Klass* k)); void array_klasses_do(void f(Klass* k, TRAPS), TRAPS); - void with_array_klasses_do(void f(Klass* k)); // GC support virtual void oops_do(OopClosure* cl); @@ -152,7 +146,7 @@ void oop_print_on(oop obj, outputStream* st); // Verification - void verify_on(outputStream* st); + void verify_on(outputStream* st, bool check_dictionary); void oop_verify_on(oop obj, outputStream* st); }; diff -r 16b10327b00d -r 90d6c221d4e5 src/share/vm/oops/compiledICHolder.cpp --- a/src/share/vm/oops/compiledICHolder.cpp Tue Jul 16 10:55:48 2013 -0400 +++ b/src/share/vm/oops/compiledICHolder.cpp Tue Jul 16 12:20:08 2013 -0400 @@ -48,8 +48,6 @@ // Verification void CompiledICHolder::verify_on(outputStream* st) { - guarantee(holder_method()->is_metadata(), "should be in metaspace"); guarantee(holder_method()->is_method(), "should be method"); - guarantee(holder_klass()->is_metadata(), "should be in metaspace"); guarantee(holder_klass()->is_klass(), "should be klass"); } diff -r 16b10327b00d -r 90d6c221d4e5 src/share/vm/oops/constMethod.cpp --- a/src/share/vm/oops/constMethod.cpp Tue Jul 16 10:55:48 2013 -0400 +++ b/src/share/vm/oops/constMethod.cpp Tue Jul 16 12:20:08 2013 -0400 @@ -440,7 +440,6 @@ void ConstMethod::verify_on(outputStream* st) { guarantee(is_constMethod(), "object must be constMethod"); - guarantee(is_metadata(), err_msg("Should be metadata " PTR_FORMAT, this)); // Verification can occur during oop construction before the method or // other fields have been initialized. diff -r 16b10327b00d -r 90d6c221d4e5 src/share/vm/oops/constantPool.cpp --- a/src/share/vm/oops/constantPool.cpp Tue Jul 16 10:55:48 2013 -0400 +++ b/src/share/vm/oops/constantPool.cpp Tue Jul 16 12:20:08 2013 -0400 @@ -2095,12 +2095,10 @@ CPSlot entry = slot_at(i); if (tag.is_klass()) { if (entry.is_resolved()) { - guarantee(entry.get_klass()->is_metadata(), "should be metadata"); guarantee(entry.get_klass()->is_klass(), "should be klass"); } } else if (tag.is_unresolved_klass()) { if (entry.is_resolved()) { - guarantee(entry.get_klass()->is_metadata(), "should be metadata"); guarantee(entry.get_klass()->is_klass(), "should be klass"); } } else if (tag.is_symbol()) { @@ -2112,13 +2110,11 @@ if (cache() != NULL) { // Note: cache() can be NULL before a class is completely setup or // in temporary constant pools used during constant pool merging - guarantee(cache()->is_metadata(), "should be metadata"); guarantee(cache()->is_constantPoolCache(), "should be constant pool cache"); } if (pool_holder() != NULL) { // Note: pool_holder() can be NULL in temporary constant pools // used during constant pool merging - guarantee(pool_holder()->is_metadata(), "should be metadata"); guarantee(pool_holder()->is_klass(), "should be klass"); } } diff -r 16b10327b00d -r 90d6c221d4e5 src/share/vm/oops/generateOopMap.cpp --- a/src/share/vm/oops/generateOopMap.cpp Tue Jul 16 10:55:48 2013 -0400 +++ b/src/share/vm/oops/generateOopMap.cpp Tue Jul 16 12:20:08 2013 -0400 @@ -642,11 +642,21 @@ // CellType handling methods // +// Allocate memory and throw LinkageError if failure. +#define ALLOC_RESOURCE_ARRAY(var, type, count) \ + var = NEW_RESOURCE_ARRAY_RETURN_NULL(type, count); \ + if (var == NULL) { \ + report_error("Cannot reserve enough memory to analyze this method"); \ + return; \ + } + + void GenerateOopMap::init_state() { _state_len = _max_locals + _max_stack + _max_monitors; - _state = NEW_RESOURCE_ARRAY(CellTypeState, _state_len); + ALLOC_RESOURCE_ARRAY(_state, CellTypeState, _state_len); memset(_state, 0, _state_len * sizeof(CellTypeState)); - _state_vec_buf = NEW_RESOURCE_ARRAY(char, MAX3(_max_locals, _max_stack, _max_monitors) + 1/*for null terminator char */); + int count = MAX3(_max_locals, _max_stack, _max_monitors) + 1/*for null terminator char */; + ALLOC_RESOURCE_ARRAY(_state_vec_buf, char, count); } void GenerateOopMap::make_context_uninitialized() { @@ -905,7 +915,7 @@ // But cumbersome since we don't know the stack heights yet. (Nor the // monitor stack heights...) - _basic_blocks = NEW_RESOURCE_ARRAY(BasicBlock, _bb_count); + ALLOC_RESOURCE_ARRAY(_basic_blocks, BasicBlock, _bb_count); // Make a pass through the bytecodes. Count the number of monitorenters. // This can be used an upper bound on the monitor stack depth in programs @@ -976,8 +986,8 @@ return; } - CellTypeState *basicBlockState = - NEW_RESOURCE_ARRAY(CellTypeState, bbNo * _state_len); + CellTypeState *basicBlockState; + ALLOC_RESOURCE_ARRAY(basicBlockState, CellTypeState, bbNo * _state_len); memset(basicBlockState, 0, bbNo * _state_len * sizeof(CellTypeState)); // Make a pass over the basicblocks and assign their state vectors. diff -r 16b10327b00d -r 90d6c221d4e5 src/share/vm/oops/instanceKlass.cpp --- a/src/share/vm/oops/instanceKlass.cpp Tue Jul 16 10:55:48 2013 -0400 +++ b/src/share/vm/oops/instanceKlass.cpp Tue Jul 16 12:20:08 2013 -0400 @@ -1321,12 +1321,6 @@ ArrayKlass::cast(array_klasses())->array_klasses_do(f); } - -void InstanceKlass::with_array_klasses_do(void f(Klass* k)) { - f(this); - array_klasses_do(f); -} - #ifdef ASSERT static int linear_search(Array* methods, Symbol* name, Symbol* signature) { int len = methods->length(); @@ -3088,27 +3082,26 @@ virtual void do_oop(narrowOop* p) { VerifyFieldClosure::do_oop_work(p); } }; -void InstanceKlass::verify_on(outputStream* st) { - Klass::verify_on(st); - Thread *thread = Thread::current(); - +void InstanceKlass::verify_on(outputStream* st, bool check_dictionary) { #ifndef PRODUCT - // Avoid redundant verifies + // Avoid redundant verifies, this really should be in product. if (_verify_count == Universe::verify_count()) return; _verify_count = Universe::verify_count(); #endif - // Verify that klass is present in SystemDictionary - if (is_loaded() && !is_anonymous()) { + + // Verify Klass + Klass::verify_on(st, check_dictionary); + + // Verify that klass is present in SystemDictionary if not already + // verifying the SystemDictionary. + if (is_loaded() && !is_anonymous() && check_dictionary) { Symbol* h_name = name(); SystemDictionary::verify_obj_klass_present(h_name, class_loader_data()); } - // Verify static fields - VerifyFieldClosure blk; - // Verify vtables if (is_linked()) { - ResourceMark rm(thread); + ResourceMark rm; // $$$ This used to be done only for m/s collections. Doing it // always seemed a valid generalization. (DLD -- 6/00) vtable()->verify(st); @@ -3116,7 +3109,6 @@ // Verify first subklass if (subklass_oop() != NULL) { - guarantee(subklass_oop()->is_metadata(), "should be in metaspace"); guarantee(subklass_oop()->is_klass(), "should be klass"); } @@ -3128,7 +3120,6 @@ fatal(err_msg("subclass points to itself " PTR_FORMAT, sib)); } - guarantee(sib->is_metadata(), "should be in metaspace"); guarantee(sib->is_klass(), "should be klass"); guarantee(sib->super() == super, "siblings should have same superklass"); } @@ -3164,7 +3155,6 @@ if (methods() != NULL) { Array* methods = this->methods(); for (int j = 0; j < methods->length(); j++) { - guarantee(methods->at(j)->is_metadata(), "should be in metaspace"); guarantee(methods->at(j)->is_method(), "non-method in methods array"); } for (int j = 0; j < methods->length() - 1; j++) { @@ -3202,16 +3192,13 @@ // Verify other fields if (array_klasses() != NULL) { - guarantee(array_klasses()->is_metadata(), "should be in metaspace"); guarantee(array_klasses()->is_klass(), "should be klass"); } if (constants() != NULL) { - guarantee(constants()->is_metadata(), "should be in metaspace"); guarantee(constants()->is_constantPool(), "should be constant pool"); } const Klass* host = host_klass(); if (host != NULL) { - guarantee(host->is_metadata(), "should be in metaspace"); guarantee(host->is_klass(), "should be klass"); } } diff -r 16b10327b00d -r 90d6c221d4e5 src/share/vm/oops/instanceKlass.hpp --- a/src/share/vm/oops/instanceKlass.hpp Tue Jul 16 10:55:48 2013 -0400 +++ b/src/share/vm/oops/instanceKlass.hpp Tue Jul 16 12:20:08 2013 -0400 @@ -26,6 +26,7 @@ #define SHARE_VM_OOPS_INSTANCEKLASS_HPP #include "classfile/classLoaderData.hpp" +#include "memory/referenceType.hpp" #include "oops/annotations.hpp" #include "oops/constMethod.hpp" #include "oops/fieldInfo.hpp" @@ -37,6 +38,7 @@ #include "utilities/accessFlags.hpp" #include "utilities/bitMap.inline.hpp" #include "utilities/macros.hpp" +#include "trace/traceMacros.hpp" // An InstanceKlass is the VM level representation of a Java class. // It contains all information needed for at class at execution runtime. @@ -792,7 +794,6 @@ void methods_do(void f(Method* method)); void array_klasses_do(void f(Klass* k)); void array_klasses_do(void f(Klass* k, TRAPS), TRAPS); - void with_array_klasses_do(void f(Klass* k)); bool super_types_do(SuperTypeClosure* blk); // Casting from Klass* @@ -872,10 +873,6 @@ } } - // Allocation profiling support - juint alloc_size() const { return _alloc_count * size_helper(); } - void set_alloc_size(juint n) {} - // Use this to return the size of an instance in heap words: int size_helper() const { return layout_helper_to_size_helper(layout_helper()); @@ -1048,7 +1045,7 @@ const char* internal_name() const; // Verification - void verify_on(outputStream* st); + void verify_on(outputStream* st, bool check_dictionary); void oop_verify_on(oop obj, outputStream* st); }; diff -r 16b10327b00d -r 90d6c221d4e5 src/share/vm/oops/klass.cpp --- a/src/share/vm/oops/klass.cpp Tue Jul 16 10:55:48 2013 -0400 +++ b/src/share/vm/oops/klass.cpp Tue Jul 16 12:20:08 2013 -0400 @@ -37,6 +37,7 @@ #include "oops/klass.inline.hpp" #include "oops/oop.inline2.hpp" #include "runtime/atomic.hpp" +#include "trace/traceMacros.hpp" #include "utilities/stack.hpp" #include "utilities/macros.hpp" #if INCLUDE_ALL_GCS @@ -167,8 +168,7 @@ set_subklass(NULL); set_next_sibling(NULL); set_next_link(NULL); - set_alloc_count(0); - TRACE_SET_KLASS_TRACE_ID(this, 0); + TRACE_INIT_ID(this); set_prototype_header(markOopDesc::prototype()); set_biased_lock_revocation_count(0); @@ -376,7 +376,6 @@ } bool Klass::is_loader_alive(BoolObjectClosure* is_alive) { - assert(is_metadata(), "p is not meta-data"); assert(ClassLoaderDataGraph::contains((address)this), "is in the metaspace"); #ifdef ASSERT @@ -543,12 +542,6 @@ return NULL; } - -void Klass::with_array_klasses_do(void f(Klass* k)) { - f(this); -} - - oop Klass::class_loader() const { return class_loader_data()->class_loader(); } const char* Klass::external_name() const { @@ -647,27 +640,24 @@ // Verification -void Klass::verify_on(outputStream* st) { - guarantee(!Universe::heap()->is_in_reserved(this), "Shouldn't be"); - guarantee(this->is_metadata(), "should be in metaspace"); +void Klass::verify_on(outputStream* st, bool check_dictionary) { + // This can be expensive, but it is worth checking that this klass is actually + // in the CLD graph but not in production. assert(ClassLoaderDataGraph::contains((address)this), "Should be"); guarantee(this->is_klass(),"should be klass"); if (super() != NULL) { - guarantee(super()->is_metadata(), "should be in metaspace"); guarantee(super()->is_klass(), "should be klass"); } if (secondary_super_cache() != NULL) { Klass* ko = secondary_super_cache(); - guarantee(ko->is_metadata(), "should be in metaspace"); guarantee(ko->is_klass(), "should be klass"); } for ( uint i = 0; i < primary_super_limit(); i++ ) { Klass* ko = _primary_supers[i]; if (ko != NULL) { - guarantee(ko->is_metadata(), "should be in metaspace"); guarantee(ko->is_klass(), "should be klass"); } } @@ -679,7 +669,6 @@ void Klass::oop_verify_on(oop obj, outputStream* st) { guarantee(obj->is_oop(), "should be oop"); - guarantee(obj->klass()->is_metadata(), "should not be in Java heap"); guarantee(obj->klass()->is_klass(), "klass field is not a klass"); } diff -r 16b10327b00d -r 90d6c221d4e5 src/share/vm/oops/klass.hpp --- a/src/share/vm/oops/klass.hpp Tue Jul 16 10:55:48 2013 -0400 +++ b/src/share/vm/oops/klass.hpp Tue Jul 16 12:20:08 2013 -0400 @@ -79,7 +79,6 @@ // [last_biased_lock_bulk_revocation_time] (64 bits) // [prototype_header] // [biased_lock_revocation_count] -// [alloc_count ] // [_modified_oops] // [_accumulated_modified_oops] // [trace_id] @@ -171,8 +170,6 @@ markOop _prototype_header; // Used when biased locking is both enabled and disabled for this type jint _biased_lock_revocation_count; - juint _alloc_count; // allocation profiling support - TRACE_DEFINE_KLASS_TRACE_ID; // Remembered sets support for the oops in the klasses. @@ -290,11 +287,6 @@ void set_next_sibling(Klass* s); public: - // Allocation profiling support - juint alloc_count() const { return _alloc_count; } - void set_alloc_count(juint n) { _alloc_count = n; } - virtual juint alloc_size() const = 0; - virtual void set_alloc_size(juint n) = 0; // Compiler support static ByteSize super_offset() { return in_ByteSize(offset_of(Klass, _super)); } @@ -677,7 +669,6 @@ #endif // INCLUDE_ALL_GCS virtual void array_klasses_do(void f(Klass* k)) {} - virtual void with_array_klasses_do(void f(Klass* k)); // Return self, except for abstract classes with exactly 1 // implementor. Then return the 1 concrete implementation. @@ -703,8 +694,8 @@ virtual const char* internal_name() const = 0; // Verification - virtual void verify_on(outputStream* st); - void verify() { verify_on(tty); } + virtual void verify_on(outputStream* st, bool check_dictionary); + void verify(bool check_dictionary = true) { verify_on(tty, check_dictionary); } #ifndef PRODUCT void verify_vtable_index(int index); diff -r 16b10327b00d -r 90d6c221d4e5 src/share/vm/oops/method.cpp --- a/src/share/vm/oops/method.cpp Tue Jul 16 10:55:48 2013 -0400 +++ b/src/share/vm/oops/method.cpp Tue Jul 16 12:20:08 2013 -0400 @@ -1969,14 +1969,9 @@ void Method::verify_on(outputStream* st) { guarantee(is_method(), "object must be method"); - guarantee(is_metadata(), "should be metadata"); guarantee(constants()->is_constantPool(), "should be constant pool"); - guarantee(constants()->is_metadata(), "should be metadata"); guarantee(constMethod()->is_constMethod(), "should be ConstMethod*"); - guarantee(constMethod()->is_metadata(), "should be metadata"); MethodData* md = method_data(); guarantee(md == NULL || - md->is_metadata(), "should be metadata"); - guarantee(md == NULL || md->is_methodData(), "should be method data"); } diff -r 16b10327b00d -r 90d6c221d4e5 src/share/vm/oops/objArrayKlass.cpp --- a/src/share/vm/oops/objArrayKlass.cpp Tue Jul 16 10:55:48 2013 -0400 +++ b/src/share/vm/oops/objArrayKlass.cpp Tue Jul 16 12:20:08 2013 -0400 @@ -676,11 +676,9 @@ // Verification -void ObjArrayKlass::verify_on(outputStream* st) { - ArrayKlass::verify_on(st); - guarantee(element_klass()->is_metadata(), "should be in metaspace"); +void ObjArrayKlass::verify_on(outputStream* st, bool check_dictionary) { + ArrayKlass::verify_on(st, check_dictionary); guarantee(element_klass()->is_klass(), "should be klass"); - guarantee(bottom_klass()->is_metadata(), "should be in metaspace"); guarantee(bottom_klass()->is_klass(), "should be klass"); Klass* bk = bottom_klass(); guarantee(bk->oop_is_instance() || bk->oop_is_typeArray(), "invalid bottom klass"); diff -r 16b10327b00d -r 90d6c221d4e5 src/share/vm/oops/objArrayKlass.hpp --- a/src/share/vm/oops/objArrayKlass.hpp Tue Jul 16 10:55:48 2013 -0400 +++ b/src/share/vm/oops/objArrayKlass.hpp Tue Jul 16 12:20:08 2013 -0400 @@ -151,7 +151,7 @@ const char* internal_name() const; // Verification - void verify_on(outputStream* st); + void verify_on(outputStream* st, bool check_dictionary); void oop_verify_on(oop obj, outputStream* st); }; diff -r 16b10327b00d -r 90d6c221d4e5 src/share/vm/oops/symbol.cpp --- a/src/share/vm/oops/symbol.cpp Tue Jul 16 10:55:48 2013 -0400 +++ b/src/share/vm/oops/symbol.cpp Tue Jul 16 12:20:08 2013 -0400 @@ -32,7 +32,9 @@ #include "memory/allocation.inline.hpp" #include "memory/resourceArea.hpp" -Symbol::Symbol(const u1* name, int length, int refcount) : _refcount(refcount), _length(length) { +Symbol::Symbol(const u1* name, int length, int refcount) { + _refcount = refcount; + _length = length; _identity_hash = os::random(); for (int i = 0; i < _length; i++) { byte_at_put(i, name[i]); diff -r 16b10327b00d -r 90d6c221d4e5 src/share/vm/oops/symbol.hpp --- a/src/share/vm/oops/symbol.hpp Tue Jul 16 10:55:48 2013 -0400 +++ b/src/share/vm/oops/symbol.hpp Tue Jul 16 12:20:08 2013 -0400 @@ -27,6 +27,7 @@ #include "utilities/utf8.hpp" #include "memory/allocation.hpp" +#include "runtime/atomic.hpp" // A Symbol is a canonicalized string. // All Symbols reside in global SymbolTable and are reference counted. @@ -101,14 +102,22 @@ // type without virtual functions. class ClassLoaderData; -class Symbol : public MetaspaceObj { +// We separate the fields in SymbolBase from Symbol::_body so that +// Symbol::size(int) can correctly calculate the space needed. +class SymbolBase : public MetaspaceObj { + public: + ATOMIC_SHORT_PAIR( + volatile short _refcount, // needs atomic operation + unsigned short _length // number of UTF8 characters in the symbol (does not need atomic op) + ); + int _identity_hash; +}; + +class Symbol : private SymbolBase { friend class VMStructs; friend class SymbolTable; friend class MoveSymbols; private: - volatile int _refcount; - int _identity_hash; - unsigned short _length; // number of UTF8 characters in the symbol jbyte _body[1]; enum { @@ -117,7 +126,7 @@ }; static int size(int length) { - size_t sz = heap_word_size(sizeof(Symbol) + (length > 0 ? length - 1 : 0)); + size_t sz = heap_word_size(sizeof(SymbolBase) + (length > 0 ? length : 0)); return align_object_size(sz); } diff -r 16b10327b00d -r 90d6c221d4e5 src/share/vm/opto/c2_globals.hpp --- a/src/share/vm/opto/c2_globals.hpp Tue Jul 16 10:55:48 2013 -0400 +++ b/src/share/vm/opto/c2_globals.hpp Tue Jul 16 12:20:08 2013 -0400 @@ -1,5 +1,5 @@ /* - * Copyright (c) 2000, 2012, Oracle and/or its affiliates. All rights reserved. + * Copyright (c) 2000, 2013, Oracle and/or its affiliates. All rights reserved. * DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER. * * This code is free software; you can redistribute it and/or modify it @@ -406,10 +406,10 @@ develop(intx, WarmCallMaxSize, 999999, \ "size of the largest inlinable method") \ \ - product(intx, MaxNodeLimit, 65000, \ + product(intx, MaxNodeLimit, 80000, \ "Maximum number of nodes") \ \ - product(intx, NodeLimitFudgeFactor, 1000, \ + product(intx, NodeLimitFudgeFactor, 2000, \ "Fudge Factor for certain optimizations") \ \ product(bool, UseJumpTables, true, \ diff -r 16b10327b00d -r 90d6c221d4e5 src/share/vm/opto/chaitin.cpp --- a/src/share/vm/opto/chaitin.cpp Tue Jul 16 10:55:48 2013 -0400 +++ b/src/share/vm/opto/chaitin.cpp Tue Jul 16 12:20:08 2013 -0400 @@ -1,5 +1,5 @@ /* - * Copyright (c) 2000, 2012, Oracle and/or its affiliates. All rights reserved. + * Copyright (c) 2000, 2013, Oracle and/or its affiliates. All rights reserved. * DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER. * * This code is free software; you can redistribute it and/or modify it @@ -435,6 +435,9 @@ // Insert un-coalesced copies. Visit all Phis. Where inputs to a Phi do // not match the Phi itself, insert a copy. coalesce.insert_copies(_matcher); + if (C->failing()) { + return; + } } // After aggressive coalesce, attempt a first cut at coloring. diff -r 16b10327b00d -r 90d6c221d4e5 src/share/vm/opto/coalesce.cpp --- a/src/share/vm/opto/coalesce.cpp Tue Jul 16 10:55:48 2013 -0400 +++ b/src/share/vm/opto/coalesce.cpp Tue Jul 16 12:20:08 2013 -0400 @@ -1,5 +1,5 @@ /* - * Copyright (c) 1997, 2010, Oracle and/or its affiliates. All rights reserved. + * Copyright (c) 1997, 2013, Oracle and/or its affiliates. All rights reserved. * DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER. * * This code is free software; you can redistribute it and/or modify it @@ -240,6 +240,8 @@ _unique = C->unique(); for( uint i=0; i<_phc._cfg._num_blocks; i++ ) { + C->check_node_count(NodeLimitFudgeFactor, "out of nodes in coalesce"); + if (C->failing()) return; Block *b = _phc._cfg._blocks[i]; uint cnt = b->num_preds(); // Number of inputs to the Phi diff -r 16b10327b00d -r 90d6c221d4e5 src/share/vm/opto/compile.cpp --- a/src/share/vm/opto/compile.cpp Tue Jul 16 10:55:48 2013 -0400 +++ b/src/share/vm/opto/compile.cpp Tue Jul 16 12:20:08 2013 -0400 @@ -1,5 +1,5 @@ /* - * Copyright (c) 1997, 2012, Oracle and/or its affiliates. All rights reserved. + * Copyright (c) 1997, 2013, Oracle and/or its affiliates. All rights reserved. * DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER. * * This code is free software; you can redistribute it and/or modify it @@ -63,6 +63,7 @@ #include "runtime/signature.hpp" #include "runtime/stubRoutines.hpp" #include "runtime/timer.hpp" +#include "trace/tracing.hpp" #include "utilities/copy.hpp" #ifdef TARGET_ARCH_MODEL_x86_32 # include "adfiles/ad_x86_32.hpp" @@ -786,7 +787,7 @@ if (failing()) return; - print_method("Before RemoveUseless", 3); + print_method(PHASE_BEFORE_REMOVEUSELESS, 3); // Remove clutter produced by parsing. if (!failing()) { @@ -1801,9 +1802,9 @@ { ResourceMark rm; - print_method("Before StringOpts", 3); + print_method(PHASE_BEFORE_STRINGOPTS, 3); PhaseStringOpts pso(initial_gvn(), for_igvn()); - print_method("After StringOpts", 3); + print_method(PHASE_AFTER_STRINGOPTS, 3); } // now inline anything that we skipped the first time around @@ -1958,7 +1959,7 @@ NOT_PRODUCT( verify_graph_edges(); ) - print_method("After Parsing"); + print_method(PHASE_AFTER_PARSING); { // Iterative Global Value Numbering, including ideal transforms @@ -1969,7 +1970,7 @@ igvn.optimize(); } - print_method("Iter GVN 1", 2); + print_method(PHASE_ITER_GVN1, 2); if (failing()) return; @@ -1978,7 +1979,7 @@ inline_incrementally(igvn); } - print_method("Incremental Inline", 2); + print_method(PHASE_INCREMENTAL_INLINE, 2); if (failing()) return; @@ -1987,7 +1988,7 @@ // Inline valueOf() methods now. inline_boxing_calls(igvn); - print_method("Incremental Boxing Inline", 2); + print_method(PHASE_INCREMENTAL_BOXING_INLINE, 2); if (failing()) return; } @@ -2002,7 +2003,7 @@ // Cleanup graph (remove dead nodes). TracePhase t2("idealLoop", &_t_idealLoop, true); PhaseIdealLoop ideal_loop( igvn, false, true ); - if (major_progress()) print_method("PhaseIdealLoop before EA", 2); + if (major_progress()) print_method(PHASE_PHASEIDEAL_BEFORE_EA, 2); if (failing()) return; } ConnectionGraph::do_analysis(this, &igvn); @@ -2011,7 +2012,7 @@ // Optimize out fields loads from scalar replaceable allocations. igvn.optimize(); - print_method("Iter GVN after EA", 2); + print_method(PHASE_ITER_GVN_AFTER_EA, 2); if (failing()) return; @@ -2022,7 +2023,7 @@ igvn.set_delay_transform(false); igvn.optimize(); - print_method("Iter GVN after eliminating allocations and locks", 2); + print_method(PHASE_ITER_GVN_AFTER_ELIMINATION, 2); if (failing()) return; } @@ -2038,7 +2039,7 @@ TracePhase t2("idealLoop", &_t_idealLoop, true); PhaseIdealLoop ideal_loop( igvn, true ); loop_opts_cnt--; - if (major_progress()) print_method("PhaseIdealLoop 1", 2); + if (major_progress()) print_method(PHASE_PHASEIDEALLOOP1, 2); if (failing()) return; } // Loop opts pass if partial peeling occurred in previous pass @@ -2046,7 +2047,7 @@ TracePhase t3("idealLoop", &_t_idealLoop, true); PhaseIdealLoop ideal_loop( igvn, false ); loop_opts_cnt--; - if (major_progress()) print_method("PhaseIdealLoop 2", 2); + if (major_progress()) print_method(PHASE_PHASEIDEALLOOP2, 2); if (failing()) return; } // Loop opts pass for loop-unrolling before CCP @@ -2054,7 +2055,7 @@ TracePhase t4("idealLoop", &_t_idealLoop, true); PhaseIdealLoop ideal_loop( igvn, false ); loop_opts_cnt--; - if (major_progress()) print_method("PhaseIdealLoop 3", 2); + if (major_progress()) print_method(PHASE_PHASEIDEALLOOP3, 2); } if (!failing()) { // Verify that last round of loop opts produced a valid graph @@ -2071,7 +2072,7 @@ TracePhase t2("ccp", &_t_ccp, true); ccp.do_transform(); } - print_method("PhaseCPP 1", 2); + print_method(PHASE_CPP1, 2); assert( true, "Break here to ccp.dump_old2new_map()"); @@ -2082,7 +2083,7 @@ igvn.optimize(); } - print_method("Iter GVN 2", 2); + print_method(PHASE_ITER_GVN2, 2); if (failing()) return; @@ -2095,7 +2096,7 @@ assert( cnt++ < 40, "infinite cycle in loop optimization" ); PhaseIdealLoop ideal_loop( igvn, true); loop_opts_cnt--; - if (major_progress()) print_method("PhaseIdealLoop iterations", 2); + if (major_progress()) print_method(PHASE_PHASEIDEALLOOP_ITERATIONS, 2); if (failing()) return; } } @@ -2128,7 +2129,7 @@ } } - print_method("Optimize finished", 2); + print_method(PHASE_OPTIMIZE_FINISHED, 2); } @@ -2176,7 +2177,7 @@ cfg.GlobalCodeMotion(m,unique(),proj_list); if (failing()) return; - print_method("Global code motion", 2); + print_method(PHASE_GLOBAL_CODE_MOTION, 2); NOT_PRODUCT( verify_graph_edges(); ) @@ -2229,7 +2230,7 @@ Output(); } - print_method("Final Code"); + print_method(PHASE_FINAL_CODE); // He's dead, Jim. _cfg = (PhaseCFG*)0xdeadbeef; @@ -3316,8 +3317,16 @@ // Record the first failure reason. _failure_reason = reason; } + + EventCompilerFailure event; + if (event.should_commit()) { + event.set_compileID(Compile::compile_id()); + event.set_failure(reason); + event.commit(); + } + if (!C->failure_reason_is(C2Compiler::retry_no_subsuming_loads())) { - C->print_method(_failure_reason); + C->print_method(PHASE_FAILURE); } _root = NULL; // flush the graph, too } diff -r 16b10327b00d -r 90d6c221d4e5 src/share/vm/opto/compile.hpp --- a/src/share/vm/opto/compile.hpp Tue Jul 16 10:55:48 2013 -0400 +++ b/src/share/vm/opto/compile.hpp Tue Jul 16 12:20:08 2013 -0400 @@ -1,5 +1,5 @@ /* - * Copyright (c) 1997, 2012, Oracle and/or its affiliates. All rights reserved. + * Copyright (c) 1997, 2013, Oracle and/or its affiliates. All rights reserved. * DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER. * * This code is free software; you can redistribute it and/or modify it @@ -36,10 +36,12 @@ #include "libadt/vectset.hpp" #include "memory/resourceArea.hpp" #include "opto/idealGraphPrinter.hpp" +#include "opto/phasetype.hpp" #include "opto/phase.hpp" #include "opto/regmask.hpp" #include "runtime/deoptimization.hpp" #include "runtime/vmThread.hpp" +#include "trace/tracing.hpp" class Block; class Bundle; @@ -322,6 +324,7 @@ IdealGraphPrinter* _printer; #endif + // Node management uint _unique; // Counter for unique Node indices VectorSet _dead_node_list; // Set of dead nodes @@ -573,17 +576,43 @@ bool has_method_handle_invokes() const { return _has_method_handle_invokes; } void set_has_method_handle_invokes(bool z) { _has_method_handle_invokes = z; } + jlong _latest_stage_start_counter; + void begin_method() { #ifndef PRODUCT if (_printer) _printer->begin_method(this); #endif + C->_latest_stage_start_counter = os::elapsed_counter(); } - void print_method(const char * name, int level = 1) { + + void print_method(CompilerPhaseType cpt, int level = 1) { + EventCompilerPhase event(UNTIMED); + if (event.should_commit()) { + event.set_starttime(C->_latest_stage_start_counter); + event.set_endtime(os::elapsed_counter()); + event.set_phase((u1) cpt); + event.set_compileID(C->_compile_id); + event.set_phaseLevel(level); + event.commit(); + } + + #ifndef PRODUCT - if (_printer) _printer->print_method(this, name, level); + if (_printer) _printer->print_method(this, CompilerPhaseTypeHelper::to_string(cpt), level); #endif + C->_latest_stage_start_counter = os::elapsed_counter(); } - void end_method() { + + void end_method(int level = 1) { + EventCompilerPhase event(UNTIMED); + if (event.should_commit()) { + event.set_starttime(C->_latest_stage_start_counter); + event.set_endtime(os::elapsed_counter()); + event.set_phase((u1) PHASE_END); + event.set_compileID(C->_compile_id); + event.set_phaseLevel(level); + event.commit(); + } #ifndef PRODUCT if (_printer) _printer->end_method(); #endif diff -r 16b10327b00d -r 90d6c221d4e5 src/share/vm/opto/escape.cpp --- a/src/share/vm/opto/escape.cpp Tue Jul 16 10:55:48 2013 -0400 +++ b/src/share/vm/opto/escape.cpp Tue Jul 16 12:20:08 2013 -0400 @@ -1,5 +1,5 @@ /* - * Copyright (c) 2005, 2012, Oracle and/or its affiliates. All rights reserved. + * Copyright (c) 2005, 2013, Oracle and/or its affiliates. All rights reserved. * DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER. * * This code is free software; you can redistribute it and/or modify it @@ -277,7 +277,7 @@ // scalar replaceable objects. split_unique_types(alloc_worklist); if (C->failing()) return false; - C->print_method("After Escape Analysis", 2); + C->print_method(PHASE_AFTER_EA, 2); #ifdef ASSERT } else if (Verbose && (PrintEscapeAnalysis || PrintEliminateAllocations)) { @@ -933,6 +933,7 @@ (call->as_CallLeaf()->_name != NULL && (strcmp(call->as_CallLeaf()->_name, "g1_wb_pre") == 0 || strcmp(call->as_CallLeaf()->_name, "g1_wb_post") == 0 || + strcmp(call->as_CallLeaf()->_name, "updateBytesCRC32") == 0 || strcmp(call->as_CallLeaf()->_name, "aescrypt_encryptBlock") == 0 || strcmp(call->as_CallLeaf()->_name, "aescrypt_decryptBlock") == 0 || strcmp(call->as_CallLeaf()->_name, "cipherBlockChaining_encryptAESCrypt") == 0 || diff -r 16b10327b00d -r 90d6c221d4e5 src/share/vm/opto/graphKit.cpp --- a/src/share/vm/opto/graphKit.cpp Tue Jul 16 10:55:48 2013 -0400 +++ b/src/share/vm/opto/graphKit.cpp Tue Jul 16 12:20:08 2013 -0400 @@ -3332,9 +3332,14 @@ if (ptr == NULL) { // reduce dumb test in callers return NULL; } - ptr = ptr->uncast(); // strip a raw-to-oop cast - if (ptr == NULL) return NULL; - + if (ptr->is_CheckCastPP()) { // strip only one raw-to-oop cast + ptr = ptr->in(1); + if (ptr == NULL) return NULL; + } + // Return NULL for allocations with several casts: + // j.l.reflect.Array.newInstance(jobject, jint) + // Object.clone() + // to keep more precise type from last cast. if (ptr->is_Proj()) { Node* allo = ptr->in(0); if (allo != NULL && allo->is_Allocate()) { diff -r 16b10327b00d -r 90d6c221d4e5 src/share/vm/opto/library_call.cpp --- a/src/share/vm/opto/library_call.cpp Tue Jul 16 10:55:48 2013 -0400 +++ b/src/share/vm/opto/library_call.cpp Tue Jul 16 12:20:08 2013 -0400 @@ -1,5 +1,5 @@ /* - * Copyright (c) 1999, 2012, Oracle and/or its affiliates. All rights reserved. + * Copyright (c) 1999, 2013, Oracle and/or its affiliates. All rights reserved. * DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER. * * This code is free software; you can redistribute it and/or modify it @@ -38,6 +38,7 @@ #include "opto/subnode.hpp" #include "prims/nativeLookup.hpp" #include "runtime/sharedRuntime.hpp" +#include "trace/traceMacros.hpp" class LibraryIntrinsic : public InlineCallGenerator { // Extend the set of intrinsics known to the runtime: @@ -290,6 +291,9 @@ Node* inline_cipherBlockChaining_AESCrypt_predicate(bool decrypting); Node* get_key_start_from_aescrypt_object(Node* aescrypt_object); bool inline_encodeISOArray(); + bool inline_updateCRC32(); + bool inline_updateBytesCRC32(); + bool inline_updateByteBufferCRC32(); }; @@ -487,6 +491,12 @@ is_predicted = true; break; + case vmIntrinsics::_updateCRC32: + case vmIntrinsics::_updateBytesCRC32: + case vmIntrinsics::_updateByteBufferCRC32: + if (!UseCRC32Intrinsics) return NULL; + break; + default: assert(id <= vmIntrinsics::LAST_COMPILER_INLINE, "caller responsibility"); assert(id != vmIntrinsics::_Object_init && id != vmIntrinsics::_invoke, "enum out of order?"); @@ -806,6 +816,13 @@ case vmIntrinsics::_encodeISOArray: return inline_encodeISOArray(); + case vmIntrinsics::_updateCRC32: + return inline_updateCRC32(); + case vmIntrinsics::_updateBytesCRC32: + return inline_updateBytesCRC32(); + case vmIntrinsics::_updateByteBufferCRC32: + return inline_updateByteBufferCRC32(); + default: // If you get here, it may be that someone has added a new intrinsic // to the list in vmSymbols.hpp without implementing it here. @@ -883,7 +900,7 @@ IfNode* iff = create_and_map_if(control(), test, true_prob, COUNT_UNKNOWN); - Node* if_slow = _gvn.transform( new (C) IfTrueNode(iff) ); + Node* if_slow = _gvn.transform(new (C) IfTrueNode(iff)); if (if_slow == top()) { // The slow branch is never taken. No need to build this guard. return NULL; @@ -892,7 +909,7 @@ if (region != NULL) region->add_req(if_slow); - Node* if_fast = _gvn.transform( new (C) IfFalseNode(iff) ); + Node* if_fast = _gvn.transform(new (C) IfFalseNode(iff)); set_control(if_fast); return if_slow; @@ -911,8 +928,8 @@ return NULL; // already stopped if (_gvn.type(index)->higher_equal(TypeInt::POS)) // [0,maxint] return NULL; // index is already adequately typed - Node* cmp_lt = _gvn.transform( new (C) CmpINode(index, intcon(0)) ); - Node* bol_lt = _gvn.transform( new (C) BoolNode(cmp_lt, BoolTest::lt) ); + Node* cmp_lt = _gvn.transform(new (C) CmpINode(index, intcon(0))); + Node* bol_lt = _gvn.transform(new (C) BoolNode(cmp_lt, BoolTest::lt)); Node* is_neg = generate_guard(bol_lt, region, PROB_MIN); if (is_neg != NULL && pos_index != NULL) { // Emulate effect of Parse::adjust_map_after_if. @@ -929,9 +946,9 @@ return NULL; // already stopped if (_gvn.type(index)->higher_equal(TypeInt::POS1)) // [1,maxint] return NULL; // index is already adequately typed - Node* cmp_le = _gvn.transform( new (C) CmpINode(index, intcon(0)) ); + Node* cmp_le = _gvn.transform(new (C) CmpINode(index, intcon(0))); BoolTest::mask le_or_eq = (never_negative ? BoolTest::eq : BoolTest::le); - Node* bol_le = _gvn.transform( new (C) BoolNode(cmp_le, le_or_eq) ); + Node* bol_le = _gvn.transform(new (C) BoolNode(cmp_le, le_or_eq)); Node* is_notp = generate_guard(bol_le, NULL, PROB_MIN); if (is_notp != NULL && pos_index != NULL) { // Emulate effect of Parse::adjust_map_after_if. @@ -967,9 +984,9 @@ return NULL; // common case of whole-array copy Node* last = subseq_length; if (!zero_offset) // last += offset - last = _gvn.transform( new (C) AddINode(last, offset)); - Node* cmp_lt = _gvn.transform( new (C) CmpUNode(array_length, last) ); - Node* bol_lt = _gvn.transform( new (C) BoolNode(cmp_lt, BoolTest::lt) ); + last = _gvn.transform(new (C) AddINode(last, offset)); + Node* cmp_lt = _gvn.transform(new (C) CmpUNode(array_length, last)); + Node* bol_lt = _gvn.transform(new (C) BoolNode(cmp_lt, BoolTest::lt)); Node* is_over = generate_guard(bol_lt, region, PROB_MIN); return is_over; } @@ -1150,8 +1167,8 @@ Node* argument_cnt = load_String_length(no_ctrl, argument); // Check for receiver count != argument count - Node* cmp = _gvn.transform( new(C) CmpINode(receiver_cnt, argument_cnt) ); - Node* bol = _gvn.transform( new(C) BoolNode(cmp, BoolTest::ne) ); + Node* cmp = _gvn.transform(new(C) CmpINode(receiver_cnt, argument_cnt)); + Node* bol = _gvn.transform(new(C) BoolNode(cmp, BoolTest::ne)); Node* if_ne = generate_slow_guard(bol, NULL); if (if_ne != NULL) { phi->init_req(4, intcon(0)); @@ -1257,7 +1274,7 @@ Node* sourceOffset = load_String_offset(no_ctrl, string_object); Node* sourceCount = load_String_length(no_ctrl, string_object); - Node* target = _gvn.transform( makecon(TypeOopPtr::make_from_constant(target_array, true)) ); + Node* target = _gvn.transform( makecon(TypeOopPtr::make_from_constant(target_array, true))); jint target_length = target_array->length(); const TypeAry* target_array_type = TypeAry::make(TypeInt::CHAR, TypeInt::make(0, target_length, Type::WidenMin)); const TypeAryPtr* target_type = TypeAryPtr::make(TypePtr::BotPTR, target_array_type, target_array->klass(), true, Type::OffsetBot); @@ -1364,8 +1381,8 @@ Node* substr_cnt = load_String_length(no_ctrl, arg); // Check for substr count > string count - Node* cmp = _gvn.transform( new(C) CmpINode(substr_cnt, source_cnt) ); - Node* bol = _gvn.transform( new(C) BoolNode(cmp, BoolTest::gt) ); + Node* cmp = _gvn.transform(new(C) CmpINode(substr_cnt, source_cnt)); + Node* bol = _gvn.transform(new(C) BoolNode(cmp, BoolTest::gt)); Node* if_gt = generate_slow_guard(bol, NULL); if (if_gt != NULL) { result_phi->init_req(2, intcon(-1)); @@ -1374,8 +1391,8 @@ if (!stopped()) { // Check for substr count == 0 - cmp = _gvn.transform( new(C) CmpINode(substr_cnt, intcon(0)) ); - bol = _gvn.transform( new(C) BoolNode(cmp, BoolTest::eq) ); + cmp = _gvn.transform(new(C) CmpINode(substr_cnt, intcon(0))); + bol = _gvn.transform(new(C) BoolNode(cmp, BoolTest::eq)); Node* if_zero = generate_slow_guard(bol, NULL); if (if_zero != NULL) { result_phi->init_req(3, intcon(0)); @@ -1551,7 +1568,7 @@ // Check PI/4 : abs(arg) Node *cmp = _gvn.transform(new (C) CmpDNode(pi4,abs)); // Check: If PI/4 < abs(arg) then go slow - Node *bol = _gvn.transform( new (C) BoolNode( cmp, BoolTest::lt ) ); + Node *bol = _gvn.transform(new (C) BoolNode( cmp, BoolTest::lt )); // Branch either way IfNode *iff = create_and_xform_if(control(),bol, PROB_STATIC_FREQUENT, COUNT_UNKNOWN); set_control(opt_iff(r,iff)); @@ -1616,8 +1633,8 @@ // to the runtime to properly handle corner cases IfNode* iff = create_and_xform_if(control(), bolisnum, PROB_STATIC_FREQUENT, COUNT_UNKNOWN); - Node* if_slow = _gvn.transform( new (C) IfFalseNode(iff) ); - Node* if_fast = _gvn.transform( new (C) IfTrueNode(iff) ); + Node* if_slow = _gvn.transform(new (C) IfFalseNode(iff)); + Node* if_fast = _gvn.transform(new (C) IfTrueNode(iff)); if (!if_slow->is_top()) { RegionNode* result_region = new (C) RegionNode(3); @@ -1703,42 +1720,42 @@ // Check x:0 Node *cmp = _gvn.transform(new (C) CmpDNode(x, zeronode)); // Check: If (x<=0) then go complex path - Node *bol1 = _gvn.transform( new (C) BoolNode( cmp, BoolTest::le ) ); + Node *bol1 = _gvn.transform(new (C) BoolNode( cmp, BoolTest::le )); // Branch either way IfNode *if1 = create_and_xform_if(control(),bol1, PROB_STATIC_INFREQUENT, COUNT_UNKNOWN); // Fast path taken; set region slot 3 - Node *fast_taken = _gvn.transform( new (C) IfFalseNode(if1) ); + Node *fast_taken = _gvn.transform(new (C) IfFalseNode(if1)); r->init_req(3,fast_taken); // Capture fast-control // Fast path not-taken, i.e. slow path - Node *complex_path = _gvn.transform( new (C) IfTrueNode(if1) ); + Node *complex_path = _gvn.transform(new (C) IfTrueNode(if1)); // Set fast path result - Node *fast_result = _gvn.transform( new (C) PowDNode(C, control(), x, y) ); + Node *fast_result = _gvn.transform(new (C) PowDNode(C, control(), x, y)); phi->init_req(3, fast_result); // Complex path // Build the second if node (if y is long) // Node for (long)y - Node *longy = _gvn.transform( new (C) ConvD2LNode(y)); + Node *longy = _gvn.transform(new (C) ConvD2LNode(y)); // Node for (double)((long) y) - Node *doublelongy= _gvn.transform( new (C) ConvL2DNode(longy)); + Node *doublelongy= _gvn.transform(new (C) ConvL2DNode(longy)); // Check (double)((long) y) : y Node *cmplongy= _gvn.transform(new (C) CmpDNode(doublelongy, y)); // Check if (y isn't long) then go to slow path - Node *bol2 = _gvn.transform( new (C) BoolNode( cmplongy, BoolTest::ne ) ); + Node *bol2 = _gvn.transform(new (C) BoolNode( cmplongy, BoolTest::ne )); // Branch either way IfNode *if2 = create_and_xform_if(complex_path,bol2, PROB_STATIC_INFREQUENT, COUNT_UNKNOWN); - Node* ylong_path = _gvn.transform( new (C) IfFalseNode(if2)); - - Node *slow_path = _gvn.transform( new (C) IfTrueNode(if2) ); + Node* ylong_path = _gvn.transform(new (C) IfFalseNode(if2)); + + Node *slow_path = _gvn.transform(new (C) IfTrueNode(if2)); // Calculate DPow(abs(x), y)*(1 & (long)y) // Node for constant 1 Node *conone = longcon(1); // 1& (long)y - Node *signnode= _gvn.transform( new (C) AndLNode(conone, longy) ); + Node *signnode= _gvn.transform(new (C) AndLNode(conone, longy)); // A huge number is always even. Detect a huge number by checking // if y + 1 == y and set integer to be tested for parity to 0. @@ -1746,9 +1763,9 @@ // (long)9.223372036854776E18 = max_jlong // (double)(long)9.223372036854776E18 = 9.223372036854776E18 // max_jlong is odd but 9.223372036854776E18 is even - Node* yplus1 = _gvn.transform( new (C) AddDNode(y, makecon(TypeD::make(1)))); + Node* yplus1 = _gvn.transform(new (C) AddDNode(y, makecon(TypeD::make(1)))); Node *cmpyplus1= _gvn.transform(new (C) CmpDNode(yplus1, y)); - Node *bolyplus1 = _gvn.transform( new (C) BoolNode( cmpyplus1, BoolTest::eq ) ); + Node *bolyplus1 = _gvn.transform(new (C) BoolNode( cmpyplus1, BoolTest::eq )); Node* correctedsign = NULL; if (ConditionalMoveLimit != 0) { correctedsign = _gvn.transform( CMoveNode::make(C, NULL, bolyplus1, signnode, longcon(0), TypeLong::LONG)); @@ -1756,8 +1773,8 @@ IfNode *ifyplus1 = create_and_xform_if(ylong_path,bolyplus1, PROB_FAIR, COUNT_UNKNOWN); RegionNode *r = new (C) RegionNode(3); Node *phi = new (C) PhiNode(r, TypeLong::LONG); - r->init_req(1, _gvn.transform( new (C) IfFalseNode(ifyplus1))); - r->init_req(2, _gvn.transform( new (C) IfTrueNode(ifyplus1))); + r->init_req(1, _gvn.transform(new (C) IfFalseNode(ifyplus1))); + r->init_req(2, _gvn.transform(new (C) IfTrueNode(ifyplus1))); phi->init_req(1, signnode); phi->init_req(2, longcon(0)); correctedsign = _gvn.transform(phi); @@ -1770,11 +1787,11 @@ // Check (1&(long)y)==0? Node *cmpeq1 = _gvn.transform(new (C) CmpLNode(correctedsign, conzero)); // Check if (1&(long)y)!=0?, if so the result is negative - Node *bol3 = _gvn.transform( new (C) BoolNode( cmpeq1, BoolTest::ne ) ); + Node *bol3 = _gvn.transform(new (C) BoolNode( cmpeq1, BoolTest::ne )); // abs(x) - Node *absx=_gvn.transform( new (C) AbsDNode(x)); + Node *absx=_gvn.transform(new (C) AbsDNode(x)); // abs(x)^y - Node *absxpowy = _gvn.transform( new (C) PowDNode(C, control(), absx, y) ); + Node *absxpowy = _gvn.transform(new (C) PowDNode(C, control(), absx, y)); // -abs(x)^y Node *negabsxpowy = _gvn.transform(new (C) NegDNode (absxpowy)); // (1&(long)y)==1?-DPow(abs(x), y):DPow(abs(x), y) @@ -1785,8 +1802,8 @@ IfNode *ifyeven = create_and_xform_if(ylong_path,bol3, PROB_FAIR, COUNT_UNKNOWN); RegionNode *r = new (C) RegionNode(3); Node *phi = new (C) PhiNode(r, Type::DOUBLE); - r->init_req(1, _gvn.transform( new (C) IfFalseNode(ifyeven))); - r->init_req(2, _gvn.transform( new (C) IfTrueNode(ifyeven))); + r->init_req(1, _gvn.transform(new (C) IfFalseNode(ifyeven))); + r->init_req(2, _gvn.transform(new (C) IfTrueNode(ifyeven))); phi->init_req(1, absxpowy); phi->init_req(2, negabsxpowy); signresult = _gvn.transform(phi); @@ -1919,7 +1936,7 @@ int cmp_op = Op_CmpI; Node* xkey = xvalue; Node* ykey = yvalue; - Node* ideal_cmpxy = _gvn.transform( new(C) CmpINode(xkey, ykey) ); + Node* ideal_cmpxy = _gvn.transform(new(C) CmpINode(xkey, ykey)); if (ideal_cmpxy->is_Cmp()) { // E.g., if we have CmpI(length - offset, count), // it might idealize to CmpI(length, count + offset) @@ -2012,7 +2029,7 @@ default: if (cmpxy == NULL) cmpxy = ideal_cmpxy; - best_bol = _gvn.transform( new(C) BoolNode(cmpxy, BoolTest::lt) ); + best_bol = _gvn.transform(new(C) BoolNode(cmpxy, BoolTest::lt)); // and fall through: case BoolTest::lt: // x < y case BoolTest::le: // x <= y @@ -2072,7 +2089,7 @@ return Type::AnyPtr; } else if (base_type == TypePtr::NULL_PTR) { // Since this is a NULL+long form, we have to switch to a rawptr. - base = _gvn.transform( new (C) CastX2PNode(offset) ); + base = _gvn.transform(new (C) CastX2PNode(offset)); offset = MakeConX(0); return Type::RawPtr; } else if (base_type->base() == Type::RawPtr) { @@ -2466,7 +2483,7 @@ case T_ADDRESS: // Repackage the long as a pointer. val = ConvL2X(val); - val = _gvn.transform( new (C) CastX2PNode(val) ); + val = _gvn.transform(new (C) CastX2PNode(val)); break; } @@ -2774,7 +2791,7 @@ // SCMemProjNodes represent the memory state of a LoadStore. Their // main role is to prevent LoadStore nodes from being optimized away // when their results aren't used. - Node* proj = _gvn.transform( new (C) SCMemProjNode(load_store)); + Node* proj = _gvn.transform(new (C) SCMemProjNode(load_store)); set_memory(proj, alias_idx); // Add the trailing membar surrounding the access @@ -3009,8 +3026,8 @@ Node* rec_thr = argument(0); Node* tls_ptr = NULL; Node* cur_thr = generate_current_thread(tls_ptr); - Node* cmp_thr = _gvn.transform( new (C) CmpPNode(cur_thr, rec_thr) ); - Node* bol_thr = _gvn.transform( new (C) BoolNode(cmp_thr, BoolTest::ne) ); + Node* cmp_thr = _gvn.transform(new (C) CmpPNode(cur_thr, rec_thr)); + Node* bol_thr = _gvn.transform(new (C) BoolNode(cmp_thr, BoolTest::ne)); generate_slow_guard(bol_thr, slow_region); @@ -3021,36 +3038,36 @@ // Set the control input on the field _interrupted read to prevent it floating up. Node* int_bit = make_load(control(), p, TypeInt::BOOL, T_INT); - Node* cmp_bit = _gvn.transform( new (C) CmpINode(int_bit, intcon(0)) ); - Node* bol_bit = _gvn.transform( new (C) BoolNode(cmp_bit, BoolTest::ne) ); + Node* cmp_bit = _gvn.transform(new (C) CmpINode(int_bit, intcon(0))); + Node* bol_bit = _gvn.transform(new (C) BoolNode(cmp_bit, BoolTest::ne)); IfNode* iff_bit = create_and_map_if(control(), bol_bit, PROB_UNLIKELY_MAG(3), COUNT_UNKNOWN); // First fast path: if (!TLS._interrupted) return false; - Node* false_bit = _gvn.transform( new (C) IfFalseNode(iff_bit) ); + Node* false_bit = _gvn.transform(new (C) IfFalseNode(iff_bit)); result_rgn->init_req(no_int_result_path, false_bit); result_val->init_req(no_int_result_path, intcon(0)); // drop through to next case - set_control( _gvn.transform(new (C) IfTrueNode(iff_bit)) ); + set_control( _gvn.transform(new (C) IfTrueNode(iff_bit))); // (c) Or, if interrupt bit is set and clear_int is false, use 2nd fast path. Node* clr_arg = argument(1); - Node* cmp_arg = _gvn.transform( new (C) CmpINode(clr_arg, intcon(0)) ); - Node* bol_arg = _gvn.transform( new (C) BoolNode(cmp_arg, BoolTest::ne) ); + Node* cmp_arg = _gvn.transform(new (C) CmpINode(clr_arg, intcon(0))); + Node* bol_arg = _gvn.transform(new (C) BoolNode(cmp_arg, BoolTest::ne)); IfNode* iff_arg = create_and_map_if(control(), bol_arg, PROB_FAIR, COUNT_UNKNOWN); // Second fast path: ... else if (!clear_int) return true; - Node* false_arg = _gvn.transform( new (C) IfFalseNode(iff_arg) ); + Node* false_arg = _gvn.transform(new (C) IfFalseNode(iff_arg)); result_rgn->init_req(no_clear_result_path, false_arg); result_val->init_req(no_clear_result_path, intcon(1)); // drop through to next case - set_control( _gvn.transform(new (C) IfTrueNode(iff_arg)) ); + set_control( _gvn.transform(new (C) IfTrueNode(iff_arg))); // (d) Otherwise, go to the slow path. slow_region->add_req(control()); - set_control( _gvn.transform(slow_region) ); + set_control( _gvn.transform(slow_region)); if (stopped()) { // There is no slow path. @@ -3106,7 +3123,7 @@ if (region == NULL) never_see_null = true; Node* p = basic_plus_adr(mirror, offset); const TypeKlassPtr* kls_type = TypeKlassPtr::OBJECT_OR_NULL; - Node* kls = _gvn.transform( LoadKlassNode::make(_gvn, immutable_memory(), p, TypeRawPtr::BOTTOM, kls_type) ); + Node* kls = _gvn.transform( LoadKlassNode::make(_gvn, immutable_memory(), p, TypeRawPtr::BOTTOM, kls_type)); Node* null_ctl = top(); kls = null_check_oop(kls, &null_ctl, never_see_null); if (region != NULL) { @@ -3128,9 +3145,9 @@ Node* mods = make_load(NULL, modp, TypeInt::INT, T_INT); Node* mask = intcon(modifier_mask); Node* bits = intcon(modifier_bits); - Node* mbit = _gvn.transform( new (C) AndINode(mods, mask) ); - Node* cmp = _gvn.transform( new (C) CmpINode(mbit, bits) ); - Node* bol = _gvn.transform( new (C) BoolNode(cmp, BoolTest::ne) ); + Node* mbit = _gvn.transform(new (C) AndINode(mods, mask)); + Node* cmp = _gvn.transform(new (C) CmpINode(mbit, bits)); + Node* bol = _gvn.transform(new (C) BoolNode(cmp, BoolTest::ne)); return generate_fair_guard(bol, region); } Node* LibraryCallKit::generate_interface_guard(Node* kls, RegionNode* region) { @@ -3281,7 +3298,7 @@ phi->add_req(makecon(TypeInstPtr::make(env()->Object_klass()->java_mirror()))); // If we fall through, it's a plain class. Get its _super. p = basic_plus_adr(kls, in_bytes(Klass::super_offset())); - kls = _gvn.transform( LoadKlassNode::make(_gvn, immutable_memory(), p, TypeRawPtr::BOTTOM, TypeKlassPtr::OBJECT_OR_NULL) ); + kls = _gvn.transform( LoadKlassNode::make(_gvn, immutable_memory(), p, TypeRawPtr::BOTTOM, TypeKlassPtr::OBJECT_OR_NULL)); null_ctl = top(); kls = null_check_oop(kls, &null_ctl); if (null_ctl != top()) { @@ -3394,8 +3411,8 @@ set_control(region->in(_prim_0_path)); // go back to first null check if (!stopped()) { // Since superc is primitive, make a guard for the superc==subc case. - Node* cmp_eq = _gvn.transform( new (C) CmpPNode(args[0], args[1]) ); - Node* bol_eq = _gvn.transform( new (C) BoolNode(cmp_eq, BoolTest::eq) ); + Node* cmp_eq = _gvn.transform(new (C) CmpPNode(args[0], args[1])); + Node* bol_eq = _gvn.transform(new (C) BoolNode(cmp_eq, BoolTest::eq)); generate_guard(bol_eq, region, PROB_FAIR); if (region->req() == PATH_LIMIT+1) { // A guard was added. If the added guard is taken, superc==subc. @@ -3460,11 +3477,11 @@ ? ((jint)Klass::_lh_array_tag_type_value << Klass::_lh_array_tag_shift) : Klass::_lh_neutral_value); - Node* cmp = _gvn.transform( new(C) CmpINode(layout_val, intcon(nval)) ); + Node* cmp = _gvn.transform(new(C) CmpINode(layout_val, intcon(nval))); BoolTest::mask btest = BoolTest::lt; // correct for testing is_[obj]array // invert the test if we are looking for a non-array if (not_array) btest = BoolTest(btest).negate(); - Node* bol = _gvn.transform( new(C) BoolNode(cmp, btest) ); + Node* bol = _gvn.transform(new(C) BoolNode(cmp, btest)); return generate_fair_guard(bol, region); } @@ -3524,7 +3541,7 @@ // Return the combined state. set_i_o( _gvn.transform(result_io) ); - set_all_memory( _gvn.transform(result_mem) ); + set_all_memory( _gvn.transform(result_mem)); C->set_has_split_ifs(true); // Has chance for split-if optimization set_result(result_reg, result_val); @@ -3677,8 +3694,8 @@ const TypePtr* native_call_addr = TypeMetadataPtr::make(method); Node* native_call = makecon(native_call_addr); - Node* chk_native = _gvn.transform( new(C) CmpPNode(target_call, native_call) ); - Node* test_native = _gvn.transform( new(C) BoolNode(chk_native, BoolTest::ne) ); + Node* chk_native = _gvn.transform(new(C) CmpPNode(target_call, native_call)); + Node* test_native = _gvn.transform(new(C) BoolNode(chk_native, BoolTest::ne)); return generate_slow_guard(test_native, slow_region); } @@ -3799,10 +3816,10 @@ // Test the header to see if it is unlocked. Node *lock_mask = _gvn.MakeConX(markOopDesc::biased_lock_mask_in_place); - Node *lmasked_header = _gvn.transform( new (C) AndXNode(header, lock_mask) ); + Node *lmasked_header = _gvn.transform(new (C) AndXNode(header, lock_mask)); Node *unlocked_val = _gvn.MakeConX(markOopDesc::unlocked_value); - Node *chk_unlocked = _gvn.transform( new (C) CmpXNode( lmasked_header, unlocked_val)); - Node *test_unlocked = _gvn.transform( new (C) BoolNode( chk_unlocked, BoolTest::ne) ); + Node *chk_unlocked = _gvn.transform(new (C) CmpXNode( lmasked_header, unlocked_val)); + Node *test_unlocked = _gvn.transform(new (C) BoolNode( chk_unlocked, BoolTest::ne)); generate_slow_guard(test_unlocked, slow_region); @@ -3812,17 +3829,17 @@ // vm: see markOop.hpp. Node *hash_mask = _gvn.intcon(markOopDesc::hash_mask); Node *hash_shift = _gvn.intcon(markOopDesc::hash_shift); - Node *hshifted_header= _gvn.transform( new (C) URShiftXNode(header, hash_shift) ); + Node *hshifted_header= _gvn.transform(new (C) URShiftXNode(header, hash_shift)); // This hack lets the hash bits live anywhere in the mark object now, as long // as the shift drops the relevant bits into the low 32 bits. Note that // Java spec says that HashCode is an int so there's no point in capturing // an 'X'-sized hashcode (32 in 32-bit build or 64 in 64-bit build). hshifted_header = ConvX2I(hshifted_header); - Node *hash_val = _gvn.transform( new (C) AndINode(hshifted_header, hash_mask) ); + Node *hash_val = _gvn.transform(new (C) AndINode(hshifted_header, hash_mask)); Node *no_hash_val = _gvn.intcon(markOopDesc::no_hash); - Node *chk_assigned = _gvn.transform( new (C) CmpINode( hash_val, no_hash_val)); - Node *test_assigned = _gvn.transform( new (C) BoolNode( chk_assigned, BoolTest::eq) ); + Node *chk_assigned = _gvn.transform(new (C) CmpINode( hash_val, no_hash_val)); + Node *test_assigned = _gvn.transform(new (C) BoolNode( chk_assigned, BoolTest::eq)); generate_slow_guard(test_assigned, slow_region); @@ -3853,7 +3870,7 @@ // Return the combined state. set_i_o( _gvn.transform(result_io) ); - set_all_memory( _gvn.transform(result_mem) ); + set_all_memory( _gvn.transform(result_mem)); set_result(result_reg, result_val); return true; @@ -3981,7 +3998,7 @@ Node *opt_isnan = _gvn.transform(ifisnan); assert( opt_isnan->is_If(), "Expect an IfNode"); IfNode *opt_ifisnan = (IfNode*)opt_isnan; - Node *iftrue = _gvn.transform( new (C) IfTrueNode(opt_ifisnan) ); + Node *iftrue = _gvn.transform(new (C) IfTrueNode(opt_ifisnan)); set_control(iftrue); @@ -4022,7 +4039,7 @@ Node *opt_isnan = _gvn.transform(ifisnan); assert( opt_isnan->is_If(), "Expect an IfNode"); IfNode *opt_ifisnan = (IfNode*)opt_isnan; - Node *iftrue = _gvn.transform( new (C) IfTrueNode(opt_ifisnan) ); + Node *iftrue = _gvn.transform(new (C) IfTrueNode(opt_ifisnan)); set_control(iftrue); @@ -4151,8 +4168,8 @@ // Compute the length also, if needed: Node* countx = size; - countx = _gvn.transform( new (C) SubXNode(countx, MakeConX(base_off)) ); - countx = _gvn.transform( new (C) URShiftXNode(countx, intcon(LogBytesPerLong) )); + countx = _gvn.transform(new (C) SubXNode(countx, MakeConX(base_off))); + countx = _gvn.transform(new (C) URShiftXNode(countx, intcon(LogBytesPerLong) )); const TypePtr* raw_adr_type = TypeRawPtr::BOTTOM; bool disjoint_bases = true; @@ -4356,9 +4373,9 @@ } // Return the combined state. - set_control( _gvn.transform(result_reg) ); - set_i_o( _gvn.transform(result_i_o) ); - set_all_memory( _gvn.transform(result_mem) ); + set_control( _gvn.transform(result_reg)); + set_i_o( _gvn.transform(result_i_o)); + set_all_memory( _gvn.transform(result_mem)); } // original reexecute is set back here set_result(_gvn.transform(result_val)); @@ -4683,8 +4700,8 @@ // are dest_head = dest[0..off] and dest_tail = dest[off+len..dest.length]. Node* dest_size = alloc->in(AllocateNode::AllocSize); Node* dest_length = alloc->in(AllocateNode::ALength); - Node* dest_tail = _gvn.transform( new(C) AddINode(dest_offset, - copy_length) ); + Node* dest_tail = _gvn.transform(new(C) AddINode(dest_offset, + copy_length)); // If there is a head section that needs zeroing, do it now. if (find_int_con(dest_offset, -1) != 0) { @@ -4700,8 +4717,8 @@ // the copy to a more hardware-friendly word size of 64 bits. Node* tail_ctl = NULL; if (!stopped() && !dest_tail->eqv_uncast(dest_length)) { - Node* cmp_lt = _gvn.transform( new(C) CmpINode(dest_tail, dest_length) ); - Node* bol_lt = _gvn.transform( new(C) BoolNode(cmp_lt, BoolTest::lt) ); + Node* cmp_lt = _gvn.transform(new(C) CmpINode(dest_tail, dest_length)); + Node* bol_lt = _gvn.transform(new(C) BoolNode(cmp_lt, BoolTest::lt)); tail_ctl = generate_slow_guard(bol_lt, NULL); assert(tail_ctl != NULL || !stopped(), "must be an outcome"); } @@ -4744,7 +4761,7 @@ dest_size); done_ctl->init_req(2, control()); done_mem->init_req(2, memory(adr_type)); - set_control( _gvn.transform(done_ctl) ); + set_control( _gvn.transform(done_ctl)); set_memory( _gvn.transform(done_mem), adr_type ); } } @@ -4831,18 +4848,18 @@ // Clean up after the checked call. // The returned value is either 0 or -1^K, // where K = number of partially transferred array elements. - Node* cmp = _gvn.transform( new(C) CmpINode(checked_value, intcon(0)) ); - Node* bol = _gvn.transform( new(C) BoolNode(cmp, BoolTest::eq) ); + Node* cmp = _gvn.transform(new(C) CmpINode(checked_value, intcon(0))); + Node* bol = _gvn.transform(new(C) BoolNode(cmp, BoolTest::eq)); IfNode* iff = create_and_map_if(control(), bol, PROB_MAX, COUNT_UNKNOWN); // If it is 0, we are done, so transfer to the end. - Node* checks_done = _gvn.transform( new(C) IfTrueNode(iff) ); + Node* checks_done = _gvn.transform(new(C) IfTrueNode(iff)); result_region->init_req(checked_path, checks_done); result_i_o ->init_req(checked_path, checked_i_o); result_memory->init_req(checked_path, checked_mem); // If it is not zero, merge into the slow call. - set_control( _gvn.transform( new(C) IfFalseNode(iff) )); + set_control( _gvn.transform(new(C) IfFalseNode(iff) )); RegionNode* slow_reg2 = new(C) RegionNode(3); PhiNode* slow_i_o2 = new(C) PhiNode(slow_reg2, Type::ABIO); PhiNode* slow_mem2 = new(C) PhiNode(slow_reg2, Type::MEMORY, adr_type); @@ -4865,16 +4882,16 @@ } else { // We must continue the copy exactly where it failed, or else // another thread might see the wrong number of writes to dest. - Node* checked_offset = _gvn.transform( new(C) XorINode(checked_value, intcon(-1)) ); + Node* checked_offset = _gvn.transform(new(C) XorINode(checked_value, intcon(-1))); Node* slow_offset = new(C) PhiNode(slow_reg2, TypeInt::INT); slow_offset->init_req(1, intcon(0)); slow_offset->init_req(2, checked_offset); slow_offset = _gvn.transform(slow_offset); // Adjust the arguments by the conditionally incoming offset. - Node* src_off_plus = _gvn.transform( new(C) AddINode(src_offset, slow_offset) ); - Node* dest_off_plus = _gvn.transform( new(C) AddINode(dest_offset, slow_offset) ); - Node* length_minus = _gvn.transform( new(C) SubINode(copy_length, slow_offset) ); + Node* src_off_plus = _gvn.transform(new(C) AddINode(src_offset, slow_offset)); + Node* dest_off_plus = _gvn.transform(new(C) AddINode(dest_offset, slow_offset)); + Node* length_minus = _gvn.transform(new(C) SubINode(copy_length, slow_offset)); // Tweak the node variables to adjust the code produced below: src_offset = src_off_plus; @@ -4913,7 +4930,7 @@ } // Finished; return the combined state. - set_control( _gvn.transform(result_region) ); + set_control( _gvn.transform(result_region)); set_i_o( _gvn.transform(result_i_o) ); set_memory( _gvn.transform(result_memory), adr_type ); @@ -5095,10 +5112,10 @@ int end_round = (-1 << scale) & (BytesPerLong - 1); Node* end = ConvI2X(slice_len); if (scale != 0) - end = _gvn.transform( new(C) LShiftXNode(end, intcon(scale) )); + end = _gvn.transform(new(C) LShiftXNode(end, intcon(scale) )); end_base += end_round; - end = _gvn.transform( new(C) AddXNode(end, MakeConX(end_base)) ); - end = _gvn.transform( new(C) AndXNode(end, MakeConX(~end_round)) ); + end = _gvn.transform(new(C) AddXNode(end, MakeConX(end_base))); + end = _gvn.transform(new(C) AndXNode(end, MakeConX(~end_round))); mem = ClearArrayNode::clear_memory(control(), mem, dest, start_con, end, &_gvn); } else if (start_con < 0 && dest_size != top()) { @@ -5107,8 +5124,8 @@ Node* start = slice_idx; start = ConvI2X(start); if (scale != 0) - start = _gvn.transform( new(C) LShiftXNode( start, intcon(scale) )); - start = _gvn.transform( new(C) AddXNode(start, MakeConX(abase)) ); + start = _gvn.transform(new(C) LShiftXNode( start, intcon(scale) )); + start = _gvn.transform(new(C) AddXNode(start, MakeConX(abase))); if ((bump_bit | clear_low) != 0) { int to_clear = (bump_bit | clear_low); // Align up mod 8, then store a jint zero unconditionally @@ -5119,14 +5136,14 @@ assert((abase & to_clear) == 0, "array base must be long-aligned"); } else { // Bump 'start' up to (or past) the next jint boundary: - start = _gvn.transform( new(C) AddXNode(start, MakeConX(bump_bit)) ); + start = _gvn.transform(new(C) AddXNode(start, MakeConX(bump_bit))); assert((abase & clear_low) == 0, "array base must be int-aligned"); } // Round bumped 'start' down to jlong boundary in body of array. - start = _gvn.transform( new(C) AndXNode(start, MakeConX(~to_clear)) ); + start = _gvn.transform(new(C) AndXNode(start, MakeConX(~to_clear))); if (bump_bit != 0) { // Store a zero to the immediately preceding jint: - Node* x1 = _gvn.transform( new(C) AddXNode(start, MakeConX(-bump_bit)) ); + Node* x1 = _gvn.transform(new(C) AddXNode(start, MakeConX(-bump_bit))); Node* p1 = basic_plus_adr(dest, x1); mem = StoreNode::make(_gvn, control(), mem, p1, adr_type, intcon(0), T_INT); mem = _gvn.transform(mem); @@ -5193,8 +5210,8 @@ Node* sptr = basic_plus_adr(src, src_off); Node* dptr = basic_plus_adr(dest, dest_off); Node* countx = dest_size; - countx = _gvn.transform( new (C) SubXNode(countx, MakeConX(dest_off)) ); - countx = _gvn.transform( new (C) URShiftXNode(countx, intcon(LogBytesPerLong)) ); + countx = _gvn.transform(new (C) SubXNode(countx, MakeConX(dest_off))); + countx = _gvn.transform(new (C) URShiftXNode(countx, intcon(LogBytesPerLong))); bool disjoint_bases = true; // since alloc != NULL generate_unchecked_arraycopy(adr_type, T_LONG, disjoint_bases, @@ -5359,6 +5376,117 @@ return true; } +/** + * Calculate CRC32 for byte. + * int java.util.zip.CRC32.update(int crc, int b) + */ +bool LibraryCallKit::inline_updateCRC32() { + assert(UseCRC32Intrinsics, "need AVX and LCMUL instructions support"); + assert(callee()->signature()->size() == 2, "update has 2 parameters"); + // no receiver since it is static method + Node* crc = argument(0); // type: int + Node* b = argument(1); // type: int + + /* + * int c = ~ crc; + * b = timesXtoThe32[(b ^ c) & 0xFF]; + * b = b ^ (c >>> 8); + * crc = ~b; + */ + + Node* M1 = intcon(-1); + crc = _gvn.transform(new (C) XorINode(crc, M1)); + Node* result = _gvn.transform(new (C) XorINode(crc, b)); + result = _gvn.transform(new (C) AndINode(result, intcon(0xFF))); + + Node* base = makecon(TypeRawPtr::make(StubRoutines::crc_table_addr())); + Node* offset = _gvn.transform(new (C) LShiftINode(result, intcon(0x2))); + Node* adr = basic_plus_adr(top(), base, ConvI2X(offset)); + result = make_load(control(), adr, TypeInt::INT, T_INT); + + crc = _gvn.transform(new (C) URShiftINode(crc, intcon(8))); + result = _gvn.transform(new (C) XorINode(crc, result)); + result = _gvn.transform(new (C) XorINode(result, M1)); + set_result(result); + return true; +} + +/** + * Calculate CRC32 for byte[] array. + * int java.util.zip.CRC32.updateBytes(int crc, byte[] buf, int off, int len) + */ +bool LibraryCallKit::inline_updateBytesCRC32() { + assert(UseCRC32Intrinsics, "need AVX and LCMUL instructions support"); + assert(callee()->signature()->size() == 4, "updateBytes has 4 parameters"); + // no receiver since it is static method + Node* crc = argument(0); // type: int + Node* src = argument(1); // type: oop + Node* offset = argument(2); // type: int + Node* length = argument(3); // type: int + + const Type* src_type = src->Value(&_gvn); + const TypeAryPtr* top_src = src_type->isa_aryptr(); + if (top_src == NULL || top_src->klass() == NULL) { + // failed array check + return false; + } + + // Figure out the size and type of the elements we will be copying. + BasicType src_elem = src_type->isa_aryptr()->klass()->as_array_klass()->element_type()->basic_type(); + if (src_elem != T_BYTE) { + return false; + } + + // 'src_start' points to src array + scaled offset + Node* src_start = array_element_address(src, offset, src_elem); + + // We assume that range check is done by caller. + // TODO: generate range check (offset+length < src.length) in debug VM. + + // Call the stub. + address stubAddr = StubRoutines::updateBytesCRC32(); + const char *stubName = "updateBytesCRC32"; + + Node* call = make_runtime_call(RC_LEAF|RC_NO_FP, OptoRuntime::updateBytesCRC32_Type(), + stubAddr, stubName, TypePtr::BOTTOM, + crc, src_start, length); + Node* result = _gvn.transform(new (C) ProjNode(call, TypeFunc::Parms)); + set_result(result); + return true; +} + +/** + * Calculate CRC32 for ByteBuffer. + * int java.util.zip.CRC32.updateByteBuffer(int crc, long buf, int off, int len) + */ +bool LibraryCallKit::inline_updateByteBufferCRC32() { + assert(UseCRC32Intrinsics, "need AVX and LCMUL instructions support"); + assert(callee()->signature()->size() == 5, "updateByteBuffer has 4 parameters and one is long"); + // no receiver since it is static method + Node* crc = argument(0); // type: int + Node* src = argument(1); // type: long + Node* offset = argument(3); // type: int + Node* length = argument(4); // type: int + + src = ConvL2X(src); // adjust Java long to machine word + Node* base = _gvn.transform(new (C) CastX2PNode(src)); + offset = ConvI2X(offset); + + // 'src_start' points to src array + scaled offset + Node* src_start = basic_plus_adr(top(), base, offset); + + // Call the stub. + address stubAddr = StubRoutines::updateBytesCRC32(); + const char *stubName = "updateBytesCRC32"; + + Node* call = make_runtime_call(RC_LEAF|RC_NO_FP, OptoRuntime::updateBytesCRC32_Type(), + stubAddr, stubName, TypePtr::BOTTOM, + crc, src_start, length); + Node* result = _gvn.transform(new (C) ProjNode(call, TypeFunc::Parms)); + set_result(result); + return true; +} + //----------------------------inline_reference_get---------------------------- // public T java.lang.ref.Reference.get(); bool LibraryCallKit::inline_reference_get() { diff -r 16b10327b00d -r 90d6c221d4e5 src/share/vm/opto/loopnode.cpp --- a/src/share/vm/opto/loopnode.cpp Tue Jul 16 10:55:48 2013 -0400 +++ b/src/share/vm/opto/loopnode.cpp Tue Jul 16 12:20:08 2013 -0400 @@ -1,5 +1,5 @@ /* - * Copyright (c) 1998, 2012, Oracle and/or its affiliates. All rights reserved. + * Copyright (c) 1998, 2013, Oracle and/or its affiliates. All rights reserved. * DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER. * * This code is free software; you can redistribute it and/or modify it @@ -440,7 +440,7 @@ // ---- SUCCESS! Found A Trip-Counted Loop! ----- // assert(x->Opcode() == Op_Loop, "regular loops only"); - C->print_method("Before CountedLoop", 3); + C->print_method(PHASE_BEFORE_CLOOPS, 3); Node *hook = new (C) Node(6); @@ -791,7 +791,7 @@ } #endif - C->print_method("After CountedLoop", 3); + C->print_method(PHASE_AFTER_CLOOPS, 3); return true; } @@ -2164,7 +2164,7 @@ // Split shared headers and insert loop landing pads. // Do not bother doing this on the Root loop of course. if( !_verify_me && !_verify_only && _ltree_root->_child ) { - C->print_method("Before beautify loops", 3); + C->print_method(PHASE_BEFORE_BEAUTIFY_LOOPS, 3); if( _ltree_root->_child->beautify_loops( this ) ) { // Re-build loop tree! _ltree_root->_child = NULL; @@ -2178,7 +2178,7 @@ // Reset loop nesting depth _ltree_root->set_nest( 0 ); - C->print_method("After beautify loops", 3); + C->print_method(PHASE_AFTER_BEAUTIFY_LOOPS, 3); } } diff -r 16b10327b00d -r 90d6c221d4e5 src/share/vm/opto/matcher.cpp --- a/src/share/vm/opto/matcher.cpp Tue Jul 16 10:55:48 2013 -0400 +++ b/src/share/vm/opto/matcher.cpp Tue Jul 16 12:20:08 2013 -0400 @@ -1,5 +1,5 @@ /* - * Copyright (c) 1997, 2012, Oracle and/or its affiliates. All rights reserved. + * Copyright (c) 1997, 2013, Oracle and/or its affiliates. All rights reserved. * DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER. * * This code is free software; you can redistribute it and/or modify it @@ -317,7 +317,7 @@ find_shared( C->root() ); find_shared( C->top() ); - C->print_method("Before Matching"); + C->print_method(PHASE_BEFORE_MATCHING); // Create new ideal node ConP #NULL even if it does exist in old space // to avoid false sharing if the corresponding mach node is not used. @@ -985,6 +985,8 @@ mstack.push(n, Visit, NULL, -1); // set NULL as parent to indicate root while (mstack.is_nonempty()) { + C->check_node_count(NodeLimitFudgeFactor, "too many nodes matching instructions"); + if (C->failing()) return NULL; n = mstack.node(); // Leave node on stack Node_State nstate = mstack.state(); if (nstate == Visit) { @@ -1848,7 +1850,7 @@ for( uint i=0; kid != NULL && i<2; kid = s->_kids[1], i++ ) { // binary tree int newrule; - if( i == 0 ) + if( i == 0) newrule = kid->_rule[_leftOp[rule]]; else newrule = kid->_rule[_rightOp[rule]]; diff -r 16b10327b00d -r 90d6c221d4e5 src/share/vm/opto/memnode.cpp --- a/src/share/vm/opto/memnode.cpp Tue Jul 16 10:55:48 2013 -0400 +++ b/src/share/vm/opto/memnode.cpp Tue Jul 16 12:20:08 2013 -0400 @@ -2930,7 +2930,9 @@ Node *MemBarNode::Ideal(PhaseGVN *phase, bool can_reshape) { if (remove_dead_region(phase, can_reshape)) return this; // Don't bother trying to transform a dead node - if (in(0) && in(0)->is_top()) return NULL; + if (in(0) && in(0)->is_top()) { + return NULL; + } // Eliminate volatile MemBars for scalar replaced objects. if (can_reshape && req() == (Precedent+1)) { @@ -2939,6 +2941,22 @@ if ((opc == Op_MemBarAcquire || opc == Op_MemBarVolatile)) { // Volatile field loads and stores. Node* my_mem = in(MemBarNode::Precedent); + // The MembarAquire may keep an unused LoadNode alive through the Precedent edge + if ((my_mem != NULL) && (opc == Op_MemBarAcquire) && (my_mem->outcnt() == 1)) { + // if the Precedent is a decodeN and its input (a Load) is used at more than one place, + // replace this Precedent (decodeN) with the Load instead. + if ((my_mem->Opcode() == Op_DecodeN) && (my_mem->in(1)->outcnt() > 1)) { + Node* load_node = my_mem->in(1); + set_req(MemBarNode::Precedent, load_node); + phase->is_IterGVN()->_worklist.push(my_mem); + my_mem = load_node; + } else { + assert(my_mem->unique_out() == this, "sanity"); + del_req(Precedent); + phase->is_IterGVN()->_worklist.push(my_mem); // remove dead node later + my_mem = NULL; + } + } if (my_mem != NULL && my_mem->is_Mem()) { const TypeOopPtr* t_oop = my_mem->in(MemNode::Address)->bottom_type()->isa_oopptr(); // Check for scalar replaced object reference. @@ -4384,7 +4402,7 @@ } } #else // !ASSERT -#define verify_memory_slice(m,i,n) (0) // PRODUCT version is no-op +#define verify_memory_slice(m,i,n) (void)(0) // PRODUCT version is no-op #endif diff -r 16b10327b00d -r 90d6c221d4e5 src/share/vm/opto/phasetype.hpp --- /dev/null Thu Jan 01 00:00:00 1970 +0000 +++ b/src/share/vm/opto/phasetype.hpp Tue Jul 16 12:20:08 2013 -0400 @@ -0,0 +1,98 @@ +/* + * Copyright (c) 2012, 2013, Oracle and/or its affiliates. All rights reserved. + * DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER. + * + * This code is free software; you can redistribute it and/or modify it + * under the terms of the GNU General Public License version 2 only, as + * published by the Free Software Foundation. + * + * This code is distributed in the hope that it will be useful, but WITHOUT + * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or + * FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License + * version 2 for more details (a copy is included in the LICENSE file that + * accompanied this code). + * + * You should have received a copy of the GNU General Public License version + * 2 along with this work; if not, write to the Free Software Foundation, + * Inc., 51 Franklin St, Fifth Floor, Boston, MA 02110-1301 USA. + * + * Please contact Oracle, 500 Oracle Parkway, Redwood Shores, CA 94065 USA + * or visit www.oracle.com if you need additional information or have any + * questions. + * + */ + +#ifndef SHARE_VM_OPTO_PHASETYPE_HPP +#define SHARE_VM_OPTO_PHASETYPE_HPP + +enum CompilerPhaseType { + PHASE_BEFORE_STRINGOPTS, + PHASE_AFTER_STRINGOPTS, + PHASE_BEFORE_REMOVEUSELESS, + PHASE_AFTER_PARSING, + PHASE_ITER_GVN1, + PHASE_PHASEIDEAL_BEFORE_EA, + PHASE_ITER_GVN_AFTER_EA, + PHASE_ITER_GVN_AFTER_ELIMINATION, + PHASE_PHASEIDEALLOOP1, + PHASE_PHASEIDEALLOOP2, + PHASE_PHASEIDEALLOOP3, + PHASE_CPP1, + PHASE_ITER_GVN2, + PHASE_PHASEIDEALLOOP_ITERATIONS, + PHASE_OPTIMIZE_FINISHED, + PHASE_GLOBAL_CODE_MOTION, + PHASE_FINAL_CODE, + PHASE_AFTER_EA, + PHASE_BEFORE_CLOOPS, + PHASE_AFTER_CLOOPS, + PHASE_BEFORE_BEAUTIFY_LOOPS, + PHASE_AFTER_BEAUTIFY_LOOPS, + PHASE_BEFORE_MATCHING, + PHASE_INCREMENTAL_INLINE, + PHASE_INCREMENTAL_BOXING_INLINE, + PHASE_END, + PHASE_FAILURE, + + PHASE_NUM_TYPES +}; + +class CompilerPhaseTypeHelper { + public: + static const char* to_string(CompilerPhaseType cpt) { + switch (cpt) { + case PHASE_BEFORE_STRINGOPTS: return "Before StringOpts"; + case PHASE_AFTER_STRINGOPTS: return "After StringOpts"; + case PHASE_BEFORE_REMOVEUSELESS: return "Before RemoveUseless"; + case PHASE_AFTER_PARSING: return "After Parsing"; + case PHASE_ITER_GVN1: return "Iter GVN 1"; + case PHASE_PHASEIDEAL_BEFORE_EA: return "PhaseIdealLoop before EA"; + case PHASE_ITER_GVN_AFTER_EA: return "Iter GVN after EA"; + case PHASE_ITER_GVN_AFTER_ELIMINATION: return "Iter GVN after eliminating allocations and locks"; + case PHASE_PHASEIDEALLOOP1: return "PhaseIdealLoop 1"; + case PHASE_PHASEIDEALLOOP2: return "PhaseIdealLoop 2"; + case PHASE_PHASEIDEALLOOP3: return "PhaseIdealLoop 3"; + case PHASE_CPP1: return "PhaseCPP 1"; + case PHASE_ITER_GVN2: return "Iter GVN 2"; + case PHASE_PHASEIDEALLOOP_ITERATIONS: return "PhaseIdealLoop iterations"; + case PHASE_OPTIMIZE_FINISHED: return "Optimize finished"; + case PHASE_GLOBAL_CODE_MOTION: return "Global code motion"; + case PHASE_FINAL_CODE: return "Final Code"; + case PHASE_AFTER_EA: return "After Escape Analysis"; + case PHASE_BEFORE_CLOOPS: return "Before CountedLoop"; + case PHASE_AFTER_CLOOPS: return "After CountedLoop"; + case PHASE_BEFORE_BEAUTIFY_LOOPS: return "Before beautify loops"; + case PHASE_AFTER_BEAUTIFY_LOOPS: return "After beautify loops"; + case PHASE_BEFORE_MATCHING: return "Before Matching"; + case PHASE_INCREMENTAL_INLINE: return "Incremental Inline"; + case PHASE_INCREMENTAL_BOXING_INLINE: return "Incremental Boxing Inline"; + case PHASE_END: return "End"; + case PHASE_FAILURE: return "Failure"; + default: + ShouldNotReachHere(); + return NULL; + } + } +}; + +#endif //SHARE_VM_OPTO_PHASETYPE_HPP diff -r 16b10327b00d -r 90d6c221d4e5 src/share/vm/opto/runtime.cpp --- a/src/share/vm/opto/runtime.cpp Tue Jul 16 10:55:48 2013 -0400 +++ b/src/share/vm/opto/runtime.cpp Tue Jul 16 12:20:08 2013 -0400 @@ -1,5 +1,5 @@ /* - * Copyright (c) 1998, 2012, Oracle and/or its affiliates. All rights reserved. + * Copyright (c) 1998, 2013, Oracle and/or its affiliates. All rights reserved. * DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER. * * This code is free software; you can redistribute it and/or modify it @@ -829,6 +829,28 @@ return TypeFunc::make(domain, range); } +/** + * int updateBytesCRC32(int crc, byte* b, int len) + */ +const TypeFunc* OptoRuntime::updateBytesCRC32_Type() { + // create input type (domain) + int num_args = 3; + int argcnt = num_args; + const Type** fields = TypeTuple::fields(argcnt); + int argp = TypeFunc::Parms; + fields[argp++] = TypeInt::INT; // crc + fields[argp++] = TypePtr::NOTNULL; // src + fields[argp++] = TypeInt::INT; // len + assert(argp == TypeFunc::Parms+argcnt, "correct decoding"); + const TypeTuple* domain = TypeTuple::make(TypeFunc::Parms+argcnt, fields); + + // result type needed + fields = TypeTuple::fields(1); + fields[TypeFunc::Parms+0] = TypeInt::INT; // crc result + const TypeTuple* range = TypeTuple::make(TypeFunc::Parms+1, fields); + return TypeFunc::make(domain, range); +} + // for cipherBlockChaining calls of aescrypt encrypt/decrypt, four pointers and a length, returning void const TypeFunc* OptoRuntime::cipherBlockChaining_aescrypt_Type() { // create input type (domain) diff -r 16b10327b00d -r 90d6c221d4e5 src/share/vm/opto/runtime.hpp --- a/src/share/vm/opto/runtime.hpp Tue Jul 16 10:55:48 2013 -0400 +++ b/src/share/vm/opto/runtime.hpp Tue Jul 16 12:20:08 2013 -0400 @@ -1,5 +1,5 @@ /* - * Copyright (c) 1998, 2012, Oracle and/or its affiliates. All rights reserved. + * Copyright (c) 1998, 2013, Oracle and/or its affiliates. All rights reserved. * DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER. * * This code is free software; you can redistribute it and/or modify it @@ -284,6 +284,8 @@ static const TypeFunc* aescrypt_block_Type(); static const TypeFunc* cipherBlockChaining_aescrypt_Type(); + static const TypeFunc* updateBytesCRC32_Type(); + // leaf on stack replacement interpreter accessor types static const TypeFunc* osr_end_Type(); diff -r 16b10327b00d -r 90d6c221d4e5 src/share/vm/precompiled/precompiled.hpp --- a/src/share/vm/precompiled/precompiled.hpp Tue Jul 16 10:55:48 2013 -0400 +++ b/src/share/vm/precompiled/precompiled.hpp Tue Jul 16 12:20:08 2013 -0400 @@ -26,7 +26,6 @@ // or if the user passes USE_PRECOMPILED_HEADER=0 to the makefiles. #ifndef DONT_USE_PRECOMPILED_HEADER - # include "asm/assembler.hpp" # include "asm/assembler.inline.hpp" # include "asm/codeBuffer.hpp" diff -r 16b10327b00d -r 90d6c221d4e5 src/share/vm/prims/forte.cpp --- a/src/share/vm/prims/forte.cpp Tue Jul 16 10:55:48 2013 -0400 +++ b/src/share/vm/prims/forte.cpp Tue Jul 16 12:20:08 2013 -0400 @@ -619,7 +619,7 @@ void* null_argument_3); #pragma weak collector_func_load #define collector_func_load(x0,x1,x2,x3,x4,x5,x6) \ - ( collector_func_load ? collector_func_load(x0,x1,x2,x3,x4,x5,x6),0 : 0 ) + ( collector_func_load ? collector_func_load(x0,x1,x2,x3,x4,x5,x6),(void)0 : (void)0 ) #endif // __APPLE__ #endif // !_WINDOWS diff -r 16b10327b00d -r 90d6c221d4e5 src/share/vm/prims/jni.cpp --- a/src/share/vm/prims/jni.cpp Tue Jul 16 10:55:48 2013 -0400 +++ b/src/share/vm/prims/jni.cpp Tue Jul 16 12:20:08 2013 -0400 @@ -74,7 +74,6 @@ #include "runtime/vm_operations.hpp" #include "services/runtimeService.hpp" #include "trace/tracing.hpp" -#include "trace/traceEventTypes.hpp" #include "utilities/defaultStream.hpp" #include "utilities/dtrace.hpp" #include "utilities/events.hpp" @@ -880,7 +879,7 @@ env, capacity); #endif /* USDT2 */ //%note jni_11 - if (capacity < 0 && capacity > MAX_REASONABLE_LOCAL_CAPACITY) { + if (capacity < 0 || capacity > MAX_REASONABLE_LOCAL_CAPACITY) { #ifndef USDT2 DTRACE_PROBE1(hotspot_jni, PushLocalFrame__return, JNI_ERR); #else /* USDT2 */ @@ -5014,6 +5013,7 @@ #ifndef PRODUCT +#include "gc_implementation/shared/gcTimer.hpp" #include "gc_interface/collectedHeap.hpp" #if INCLUDE_ALL_GCS #include "gc_implementation/g1/heapRegionRemSet.hpp" @@ -5031,6 +5031,7 @@ if (ExecuteInternalVMTests) { tty->print_cr("Running internal VM tests"); run_unit_test(GlobalDefinitions::test_globals()); + run_unit_test(GCTimerAllTest::all()); run_unit_test(arrayOopDesc::test_max_array_length()); run_unit_test(CollectedHeap::test_is_in()); run_unit_test(QuickSort::test_quick_sort()); @@ -5096,7 +5097,7 @@ // function used to determine this will always return false. Atomic::xchg // does not have this problem. if (Atomic::xchg(1, &vm_created) == 1) { - return JNI_ERR; // already created, or create attempt in progress + return JNI_EEXIST; // already created, or create attempt in progress } if (Atomic::xchg(0, &safe_to_recreate_vm) == 0) { return JNI_ERR; // someone tried and failed and retry not allowed. @@ -5131,13 +5132,27 @@ JvmtiExport::post_thread_start(thread); } - EVENT_BEGIN(TraceEventThreadStart, event); - EVENT_COMMIT(event, - EVENT_SET(event, javalangthread, java_lang_Thread::thread_id(thread->threadObj()))); + EventThreadStart event; + if (event.should_commit()) { + event.set_javalangthread(java_lang_Thread::thread_id(thread->threadObj())); + event.commit(); + } + +#ifndef PRODUCT + #ifndef TARGET_OS_FAMILY_windows + #define CALL_TEST_FUNC_WITH_WRAPPER_IF_NEEDED(f) f() + #endif // Check if we should compile all classes on bootclasspath - NOT_PRODUCT(if (CompileTheWorld) ClassLoader::compile_the_world();) - NOT_PRODUCT(if (ReplayCompiles) ciReplay::replay(thread);) + if (CompileTheWorld) ClassLoader::compile_the_world(); + if (ReplayCompiles) ciReplay::replay(thread); + + // Some platforms (like Win*) need a wrapper around these test + // functions in order to properly handle error conditions. + CALL_TEST_FUNC_WITH_WRAPPER_IF_NEEDED(test_error_handler); + CALL_TEST_FUNC_WITH_WRAPPER_IF_NEEDED(execute_internal_vm_tests); +#endif + // Since this is not a JVM_ENTRY we have to set the thread state manually before leaving. ThreadStateTransition::transition_and_fence(thread, _thread_in_vm, _thread_in_native); } else { @@ -5154,8 +5169,6 @@ OrderAccess::release_store(&vm_created, 0); } - NOT_PRODUCT(test_error_handler(ErrorHandlerTest)); - NOT_PRODUCT(execute_internal_vm_tests()); return result; } @@ -5334,9 +5347,11 @@ JvmtiExport::post_thread_start(thread); } - EVENT_BEGIN(TraceEventThreadStart, event); - EVENT_COMMIT(event, - EVENT_SET(event, javalangthread, java_lang_Thread::thread_id(thread->threadObj()))); + EventThreadStart event; + if (event.should_commit()) { + event.set_javalangthread(java_lang_Thread::thread_id(thread->threadObj())); + event.commit(); + } *(JNIEnv**)penv = thread->jni_environment(); diff -r 16b10327b00d -r 90d6c221d4e5 src/share/vm/prims/jvm.cpp --- a/src/share/vm/prims/jvm.cpp Tue Jul 16 10:55:48 2013 -0400 +++ b/src/share/vm/prims/jvm.cpp Tue Jul 16 12:20:08 2013 -0400 @@ -59,6 +59,7 @@ #include "services/attachListener.hpp" #include "services/management.hpp" #include "services/threadService.hpp" +#include "trace/tracing.hpp" #include "utilities/copy.hpp" #include "utilities/defaultStream.hpp" #include "utilities/dtrace.hpp" @@ -1120,26 +1121,56 @@ JVM_END -// Obsolete since 1.2 (Class.setProtectionDomain removed), although -// still defined in core libraries as of 1.5. -JVM_ENTRY(void, JVM_SetProtectionDomain(JNIEnv *env, jclass cls, jobject protection_domain)) - JVMWrapper("JVM_SetProtectionDomain"); - if (JNIHandles::resolve(cls) == NULL) { - THROW(vmSymbols::java_lang_NullPointerException()); - } - if (!java_lang_Class::is_primitive(JNIHandles::resolve(cls))) { - // Call is ignored for primitive types - Klass* k = java_lang_Class::as_Klass(JNIHandles::resolve(cls)); - - // cls won't be an array, as this called only from ClassLoader.defineClass - if (k->oop_is_instance()) { - oop pd = JNIHandles::resolve(protection_domain); - assert(pd == NULL || pd->is_oop(), "just checking"); - java_lang_Class::set_protection_domain(k->java_mirror(), pd); +static bool is_authorized(Handle context, instanceKlassHandle klass, TRAPS) { + // If there is a security manager and protection domain, check the access + // in the protection domain, otherwise it is authorized. + if (java_lang_System::has_security_manager()) { + + // For bootstrapping, if pd implies method isn't in the JDK, allow + // this context to revert to older behavior. + // In this case the isAuthorized field in AccessControlContext is also not + // present. + if (Universe::protection_domain_implies_method() == NULL) { + return true; + } + + // Whitelist certain access control contexts + if (java_security_AccessControlContext::is_authorized(context)) { + return true; + } + + oop prot = klass->protection_domain(); + if (prot != NULL) { + // Call pd.implies(new SecurityPermission("createAccessControlContext")) + // in the new wrapper. + methodHandle m(THREAD, Universe::protection_domain_implies_method()); + Handle h_prot(THREAD, prot); + JavaValue result(T_BOOLEAN); + JavaCallArguments args(h_prot); + JavaCalls::call(&result, m, &args, CHECK_false); + return (result.get_jboolean() != 0); } } -JVM_END - + return true; +} + +// Create an AccessControlContext with a protection domain with null codesource +// and null permissions - which gives no permissions. +oop create_dummy_access_control_context(TRAPS) { + InstanceKlass* pd_klass = InstanceKlass::cast(SystemDictionary::ProtectionDomain_klass()); + // new ProtectionDomain(null,null); + oop null_protection_domain = pd_klass->allocate_instance(CHECK_NULL); + Handle null_pd(THREAD, null_protection_domain); + + // new ProtectionDomain[] {pd}; + objArrayOop context = oopFactory::new_objArray(pd_klass, 1, CHECK_NULL); + context->obj_at_put(0, null_pd()); + + // new AccessControlContext(new ProtectionDomain[] {pd}) + objArrayHandle h_context(THREAD, context); + oop result = java_security_AccessControlContext::create(h_context, false, Handle(), CHECK_NULL); + return result; +} JVM_ENTRY(jobject, JVM_DoPrivileged(JNIEnv *env, jclass cls, jobject action, jobject context, jboolean wrapException)) JVMWrapper("JVM_DoPrivileged"); @@ -1148,8 +1179,29 @@ THROW_MSG_0(vmSymbols::java_lang_NullPointerException(), "Null action"); } - // Stack allocated list of privileged stack elements - PrivilegedElement pi; + // Compute the frame initiating the do privileged operation and setup the privileged stack + vframeStream vfst(thread); + vfst.security_get_caller_frame(1); + + if (vfst.at_end()) { + THROW_MSG_0(vmSymbols::java_lang_InternalError(), "no caller?"); + } + + Method* method = vfst.method(); + instanceKlassHandle klass (THREAD, method->method_holder()); + + // Check that action object understands "Object run()" + Handle h_context; + if (context != NULL) { + h_context = Handle(THREAD, JNIHandles::resolve(context)); + bool authorized = is_authorized(h_context, klass, CHECK_NULL); + if (!authorized) { + // Create an unprivileged access control object and call it's run function + // instead. + oop noprivs = create_dummy_access_control_context(CHECK_NULL); + h_context = Handle(THREAD, noprivs); + } + } // Check that action object understands "Object run()" Handle object (THREAD, JNIHandles::resolve(action)); @@ -1163,12 +1215,10 @@ THROW_MSG_0(vmSymbols::java_lang_InternalError(), "No run method"); } - // Compute the frame initiating the do privileged operation and setup the privileged stack - vframeStream vfst(thread); - vfst.security_get_caller_frame(1); - + // Stack allocated list of privileged stack elements + PrivilegedElement pi; if (!vfst.at_end()) { - pi.initialize(&vfst, JNIHandles::resolve(context), thread->privileged_stack_top(), CHECK_NULL); + pi.initialize(&vfst, h_context(), thread->privileged_stack_top(), CHECK_NULL); thread->set_privileged_stack_top(&pi); } @@ -2999,6 +3049,8 @@ millis); #endif /* USDT2 */ + EventThreadSleep event; + if (millis == 0) { // When ConvertSleepToYield is on, this matches the classic VM implementation of // JVM_Sleep. Critical for similar threading behaviour (Win32) @@ -3019,6 +3071,10 @@ // An asynchronous exception (e.g., ThreadDeathException) could have been thrown on // us while we were sleeping. We do not overwrite those. if (!HAS_PENDING_EXCEPTION) { + if (event.should_commit()) { + event.set_time(millis); + event.commit(); + } #ifndef USDT2 HS_DTRACE_PROBE1(hotspot, thread__sleep__end,1); #else /* USDT2 */ @@ -3032,6 +3088,10 @@ } thread->osthread()->set_state(old_state); } + if (event.should_commit()) { + event.set_time(millis); + event.commit(); + } #ifndef USDT2 HS_DTRACE_PROBE1(hotspot, thread__sleep__end,0); #else /* USDT2 */ @@ -3230,24 +3290,10 @@ JVM_END -// Utility object for collecting method holders walking down the stack -class KlassLink: public ResourceObj { - public: - KlassHandle klass; - KlassLink* next; - - KlassLink(KlassHandle k) { klass = k; next = NULL; } -}; - - JVM_ENTRY(jobjectArray, JVM_GetClassContext(JNIEnv *env)) JVMWrapper("JVM_GetClassContext"); ResourceMark rm(THREAD); JvmtiVMObjectAllocEventCollector oam; - // Collect linked list of (handles to) method holders - KlassLink* first = NULL; - KlassLink* last = NULL; - int depth = 0; vframeStream vfst(thread); if (SystemDictionary::reflect_CallerSensitive_klass() != NULL) { @@ -3261,32 +3307,23 @@ } // Collect method holders + GrowableArray* klass_array = new GrowableArray(); for (; !vfst.at_end(); vfst.security_next()) { Method* m = vfst.method(); // Native frames are not returned if (!m->is_ignored_by_security_stack_walk() && !m->is_native()) { Klass* holder = m->method_holder(); assert(holder->is_klass(), "just checking"); - depth++; - KlassLink* l = new KlassLink(KlassHandle(thread, holder)); - if (first == NULL) { - first = last = l; - } else { - last->next = l; - last = l; - } + klass_array->append(holder); } } // Create result array of type [Ljava/lang/Class; - objArrayOop result = oopFactory::new_objArray(SystemDictionary::Class_klass(), depth, CHECK_NULL); + objArrayOop result = oopFactory::new_objArray(SystemDictionary::Class_klass(), klass_array->length(), CHECK_NULL); // Fill in mirrors corresponding to method holders - int index = 0; - while (first != NULL) { - result->obj_at_put(index++, first->klass()->java_mirror()); - first = first->next; + for (int i = 0; i < klass_array->length(); i++) { + result->obj_at_put(i, klass_array->at(i)->java_mirror()); } - assert(index == depth, "just checking"); return (jobjectArray) JNIHandles::make_local(env, result); JVM_END diff -r 16b10327b00d -r 90d6c221d4e5 src/share/vm/prims/jvm.h --- a/src/share/vm/prims/jvm.h Tue Jul 16 10:55:48 2013 -0400 +++ b/src/share/vm/prims/jvm.h Tue Jul 16 12:20:08 2013 -0400 @@ -471,9 +471,6 @@ JNIEXPORT jobject JNICALL JVM_GetProtectionDomain(JNIEnv *env, jclass cls); -JNIEXPORT void JNICALL -JVM_SetProtectionDomain(JNIEnv *env, jclass cls, jobject protection_domain); - JNIEXPORT jboolean JNICALL JVM_IsArrayClass(JNIEnv *env, jclass cls); diff -r 16b10327b00d -r 90d6c221d4e5 src/share/vm/prims/jvmti.xml --- a/src/share/vm/prims/jvmti.xml Tue Jul 16 10:55:48 2013 -0400 +++ b/src/share/vm/prims/jvmti.xml Tue Jul 16 12:20:08 2013 -0400 @@ -1897,7 +1897,7 @@ - + jvmtiMonitorStackDepthInfo diff -r 16b10327b00d -r 90d6c221d4e5 src/share/vm/prims/jvmtiGen.java --- a/src/share/vm/prims/jvmtiGen.java Tue Jul 16 10:55:48 2013 -0400 +++ b/src/share/vm/prims/jvmtiGen.java Tue Jul 16 12:20:08 2013 -0400 @@ -1,5 +1,5 @@ /* - * Copyright (c) 2003, 2005, Oracle and/or its affiliates. All rights reserved. + * Copyright (c) 2003, 2013, Oracle and/or its affiliates. All rights reserved. * DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER. * * This code is free software; you can redistribute it and/or modify it @@ -31,7 +31,6 @@ import org.xml.sax.SAXParseException; import org.w3c.dom.Document; import org.w3c.dom.DOMException; - // For write operation import javax.xml.transform.Transformer; import javax.xml.transform.TransformerException; @@ -129,6 +128,7 @@ factory.setNamespaceAware(true); factory.setValidating(true); + factory.setXIncludeAware(true); try { File datafile = new File(inFileName); diff -r 16b10327b00d -r 90d6c221d4e5 src/share/vm/prims/jvmtiImpl.cpp --- a/src/share/vm/prims/jvmtiImpl.cpp Tue Jul 16 10:55:48 2013 -0400 +++ b/src/share/vm/prims/jvmtiImpl.cpp Tue Jul 16 12:20:08 2013 -0400 @@ -360,19 +360,14 @@ case CLEAR_BREAKPOINT: _breakpoints->clear_at_safepoint(*_bp); break; - case CLEAR_ALL_BREAKPOINT: - _breakpoints->clearall_at_safepoint(); - break; default: assert(false, "Unknown operation"); } } void VM_ChangeBreakpoints::oops_do(OopClosure* f) { - // This operation keeps breakpoints alive - if (_breakpoints != NULL) { - _breakpoints->oops_do(f); - } + // The JvmtiBreakpoints in _breakpoints will be visited via + // JvmtiExport::oops_do. if (_bp != NULL) { _bp->oops_do(f); } @@ -433,23 +428,13 @@ } } -void JvmtiBreakpoints::clearall_at_safepoint() { - assert(SafepointSynchronize::is_at_safepoint(), "must be at safepoint"); - - int len = _bps.length(); - for (int i=0; ioop_is_objArray()) { + reference_klass = ObjArrayKlass::cast(reference_klass)->bottom_klass(); + } + + // Reflection::verify_class_access can only handle instance classes. + if (reference_klass != NULL && reference_klass->oop_is_instance()) { // Emulate LinkResolver::check_klass_accessability. Klass* caller = java_lang_Class::as_Klass(JNIHandles::resolve_non_null(caller_jh)); if (!Reflection::verify_class_access(caller, diff -r 16b10327b00d -r 90d6c221d4e5 src/share/vm/prims/unsafe.cpp --- a/src/share/vm/prims/unsafe.cpp Tue Jul 16 10:55:48 2013 -0400 +++ b/src/share/vm/prims/unsafe.cpp Tue Jul 16 12:20:08 2013 -0400 @@ -1,5 +1,5 @@ /* - * Copyright (c) 2000, 2012, Oracle and/or its affiliates. All rights reserved. + * Copyright (c) 2000, 2013, Oracle and/or its affiliates. All rights reserved. * DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER. * * This code is free software; you can redistribute it and/or modify it @@ -36,6 +36,7 @@ #include "runtime/reflection.hpp" #include "runtime/synchronizer.hpp" #include "services/threadService.hpp" +#include "trace/tracing.hpp" #include "utilities/copy.hpp" #include "utilities/dtrace.hpp" @@ -1204,6 +1205,7 @@ UNSAFE_ENTRY(void, Unsafe_Park(JNIEnv *env, jobject unsafe, jboolean isAbsolute, jlong time)) UnsafeWrapper("Unsafe_Park"); + EventThreadPark event; #ifndef USDT2 HS_DTRACE_PROBE3(hotspot, thread__park__begin, thread->parker(), (int) isAbsolute, time); #else /* USDT2 */ @@ -1218,6 +1220,13 @@ HOTSPOT_THREAD_PARK_END( (uintptr_t) thread->parker()); #endif /* USDT2 */ + if (event.should_commit()) { + oop obj = thread->current_park_blocker(); + event.set_klass(obj ? obj->klass() : NULL); + event.set_timeout(time); + event.set_address(obj ? (TYPE_ADDRESS) (uintptr_t) obj : 0); + event.commit(); + } UNSAFE_END UNSAFE_ENTRY(void, Unsafe_Unpark(JNIEnv *env, jobject unsafe, jobject jthread)) diff -r 16b10327b00d -r 90d6c221d4e5 src/share/vm/prims/whitebox.cpp --- a/src/share/vm/prims/whitebox.cpp Tue Jul 16 10:55:48 2013 -0400 +++ b/src/share/vm/prims/whitebox.cpp Tue Jul 16 12:20:08 2013 -0400 @@ -159,7 +159,7 @@ WB_ENTRY(void, WB_NMTCommitMemory(JNIEnv* env, jobject o, jlong addr, jlong size)) - os::commit_memory((char *)(uintptr_t)addr, size); + os::commit_memory((char *)(uintptr_t)addr, size, !ExecMem); MemTracker::record_virtual_memory_type((address)(uintptr_t)addr, mtTest); WB_END diff -r 16b10327b00d -r 90d6c221d4e5 src/share/vm/runtime/aprofiler.cpp --- a/src/share/vm/runtime/aprofiler.cpp Tue Jul 16 10:55:48 2013 -0400 +++ /dev/null Thu Jan 01 00:00:00 1970 +0000 @@ -1,143 +0,0 @@ -/* - * Copyright (c) 1997, 2013, Oracle and/or its affiliates. All rights reserved. - * DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER. - * - * This code is free software; you can redistribute it and/or modify it - * under the terms of the GNU General Public License version 2 only, as - * published by the Free Software Foundation. - * - * This code is distributed in the hope that it will be useful, but WITHOUT - * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or - * FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License - * version 2 for more details (a copy is included in the LICENSE file that - * accompanied this code). - * - * You should have received a copy of the GNU General Public License version - * 2 along with this work; if not, write to the Free Software Foundation, - * Inc., 51 Franklin St, Fifth Floor, Boston, MA 02110-1301 USA. - * - * Please contact Oracle, 500 Oracle Parkway, Redwood Shores, CA 94065 USA - * or visit www.oracle.com if you need additional information or have any - * questions. - * - */ - -#include "precompiled.hpp" -#include "classfile/systemDictionary.hpp" -#include "gc_interface/collectedHeap.inline.hpp" -#include "memory/resourceArea.hpp" -#include "memory/space.hpp" -#include "oops/oop.inline.hpp" -#include "oops/oop.inline2.hpp" -#include "runtime/aprofiler.hpp" - - -bool AllocationProfiler::_active = false; -GrowableArray* AllocationProfiler::_print_array = NULL; - - -class AllocProfClosure : public ObjectClosure { - public: - void do_object(oop obj) { - Klass* k = obj->klass(); - k->set_alloc_count(k->alloc_count() + 1); - k->set_alloc_size(k->alloc_size() + obj->size()); - } -}; - - -void AllocationProfiler::iterate_since_last_gc() { - if (is_active()) { - AllocProfClosure blk; - GenCollectedHeap* heap = GenCollectedHeap::heap(); - heap->object_iterate_since_last_GC(&blk); - } -} - - -void AllocationProfiler::engage() { - _active = true; -} - - -void AllocationProfiler::disengage() { - _active = false; -} - - -void AllocationProfiler::add_class_to_array(Klass* k) { - _print_array->append(k); -} - - -void AllocationProfiler::add_classes_to_array(Klass* k) { - // Iterate over klass and all array klasses for klass - k->with_array_klasses_do(&AllocationProfiler::add_class_to_array); -} - - -int AllocationProfiler::compare_classes(Klass** k1, Klass** k2) { - // Sort by total allocation size - return (*k2)->alloc_size() - (*k1)->alloc_size(); -} - - -int AllocationProfiler::average(size_t alloc_size, int alloc_count) { - return (int) ((double) (alloc_size * BytesPerWord) / MAX2(alloc_count, 1) + 0.5); -} - - -void AllocationProfiler::sort_and_print_array(size_t cutoff) { - _print_array->sort(&AllocationProfiler::compare_classes); - tty->print_cr("________________Size" - "__Instances" - "__Average" - "__Class________________"); - size_t total_alloc_size = 0; - int total_alloc_count = 0; - for (int index = 0; index < _print_array->length(); index++) { - Klass* k = _print_array->at(index); - size_t alloc_size = k->alloc_size(); - if (alloc_size > cutoff) { - int alloc_count = k->alloc_count(); -#ifdef PRODUCT - const char* name = k->external_name(); -#else - const char* name = k->internal_name(); -#endif - tty->print_cr("%20u %10u %8u %s", - alloc_size * BytesPerWord, - alloc_count, - average(alloc_size, alloc_count), - name); - total_alloc_size += alloc_size; - total_alloc_count += alloc_count; - } - k->set_alloc_count(0); - k->set_alloc_size(0); - } - tty->print_cr("%20u %10u %8u --total--", - total_alloc_size * BytesPerWord, - total_alloc_count, - average(total_alloc_size, total_alloc_count)); - tty->cr(); -} - - -void AllocationProfiler::print(size_t cutoff) { - ResourceMark rm; - assert(!is_active(), "AllocationProfiler cannot be active while printing profile"); - - tty->cr(); - tty->print_cr("Allocation profile (sizes in bytes, cutoff = " SIZE_FORMAT " bytes):", cutoff * BytesPerWord); - tty->cr(); - - // Print regular instance klasses and basic type array klasses - _print_array = new GrowableArray(SystemDictionary::number_of_classes()*2); - SystemDictionary::classes_do(&add_classes_to_array); - Universe::basic_type_classes_do(&add_classes_to_array); - sort_and_print_array(cutoff); - - // This used to print metadata in the permgen but since there isn't a permgen - // anymore, it is not yet implemented. -} diff -r 16b10327b00d -r 90d6c221d4e5 src/share/vm/runtime/aprofiler.hpp --- a/src/share/vm/runtime/aprofiler.hpp Tue Jul 16 10:55:48 2013 -0400 +++ /dev/null Thu Jan 01 00:00:00 1970 +0000 @@ -1,71 +0,0 @@ -/* - * Copyright (c) 1997, 2012, Oracle and/or its affiliates. All rights reserved. - * DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER. - * - * This code is free software; you can redistribute it and/or modify it - * under the terms of the GNU General Public License version 2 only, as - * published by the Free Software Foundation. - * - * This code is distributed in the hope that it will be useful, but WITHOUT - * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or - * FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License - * version 2 for more details (a copy is included in the LICENSE file that - * accompanied this code). - * - * You should have received a copy of the GNU General Public License version - * 2 along with this work; if not, write to the Free Software Foundation, - * Inc., 51 Franklin St, Fifth Floor, Boston, MA 02110-1301 USA. - * - * Please contact Oracle, 500 Oracle Parkway, Redwood Shores, CA 94065 USA - * or visit www.oracle.com if you need additional information or have any - * questions. - * - */ - -#ifndef SHARE_VM_RUNTIME_APROFILER_HPP -#define SHARE_VM_RUNTIME_APROFILER_HPP - -#include "memory/allocation.hpp" -#include "memory/universe.hpp" -#include "oops/klass.hpp" -#include "utilities/top.hpp" - -// A simple allocation profiler for Java. The profiler collects and prints -// the number and total size of instances allocated per class, including -// array classes. -// -// The profiler is currently global for all threads. It can be changed to a -// per threads profiler by keeping a more elaborate data structure and calling -// iterate_since_last_scavenge at thread switches. - - -class AllocationProfiler: AllStatic { - friend class GenCollectedHeap; - friend class G1CollectedHeap; - friend class MarkSweep; - private: - static bool _active; // tells whether profiler is active - static GrowableArray* _print_array; // temporary array for printing - - // Utility printing functions - static void add_class_to_array(Klass* k); - static void add_classes_to_array(Klass* k); - static int compare_classes(Klass** k1, Klass** k2); - static int average(size_t alloc_size, int alloc_count); - static void sort_and_print_array(size_t cutoff); - - // Call for collecting allocation information. Called at scavenge, mark-sweep and disengage. - static void iterate_since_last_gc(); - - public: - // Start profiler - static void engage(); - // Stop profiler - static void disengage(); - // Tells whether profiler is active - static bool is_active() { return _active; } - // Print profile - static void print(size_t cutoff); // Cutoff in total allocation size (in words) -}; - -#endif // SHARE_VM_RUNTIME_APROFILER_HPP diff -r 16b10327b00d -r 90d6c221d4e5 src/share/vm/runtime/arguments.cpp --- a/src/share/vm/runtime/arguments.cpp Tue Jul 16 10:55:48 2013 -0400 +++ b/src/share/vm/runtime/arguments.cpp Tue Jul 16 12:20:08 2013 -0400 @@ -68,7 +68,6 @@ SystemProperty* Arguments::_system_properties = NULL; const char* Arguments::_gc_log_filename = NULL; bool Arguments::_has_profile = false; -bool Arguments::_has_alloc_profile = false; uintx Arguments::_min_heap_size = 0; Arguments::Mode Arguments::_mode = _mixed; bool Arguments::_java_compiler = false; @@ -261,6 +260,9 @@ { "PrintRevisitStats", JDK_Version::jdk(8), JDK_Version::jdk(9) }, { "UseVectoredExceptions", JDK_Version::jdk(8), JDK_Version::jdk(9) }, { "UseSplitVerifier", JDK_Version::jdk(8), JDK_Version::jdk(9) }, + { "UseISM", JDK_Version::jdk(8), JDK_Version::jdk(9) }, + { "UsePermISM", JDK_Version::jdk(8), JDK_Version::jdk(9) }, + { "UseMPSS", JDK_Version::jdk(8), JDK_Version::jdk(9) }, #ifdef PRODUCT { "DesiredMethodLimit", JDK_Version::jdk_update(7, 2), JDK_Version::jdk(8) }, @@ -849,7 +851,7 @@ arg_len = equal_sign - argname; } - Flag* found_flag = Flag::find_flag((char*)argname, arg_len, true); + Flag* found_flag = Flag::find_flag((const char*)argname, arg_len, true); if (found_flag != NULL) { char locked_message_buf[BUFLEN]; found_flag->get_locked_message(locked_message_buf, BUFLEN); @@ -870,6 +872,14 @@ } else { jio_fprintf(defaultStream::error_stream(), "Unrecognized VM option '%s'\n", argname); + Flag* fuzzy_matched = Flag::fuzzy_match((const char*)argname, arg_len, true); + if (fuzzy_matched != NULL) { + jio_fprintf(defaultStream::error_stream(), + "Did you mean '%s%s%s'?\n", + (fuzzy_matched->is_bool()) ? "(+/-)" : "", + fuzzy_matched->name, + (fuzzy_matched->is_bool()) ? "" : "="); + } } // allow for commandline "commenting out" options like -XX:#+Verbose @@ -1566,6 +1576,17 @@ return result; } +void Arguments::set_heap_base_min_address() { + if (FLAG_IS_DEFAULT(HeapBaseMinAddress) && UseG1GC && HeapBaseMinAddress < 1*G) { + // By default HeapBaseMinAddress is 2G on all platforms except Solaris x86. + // G1 currently needs a lot of C-heap, so on Solaris we have to give G1 + // some extra space for the C-heap compared to other collectors. + // Use FLAG_SET_DEFAULT here rather than FLAG_SET_ERGO to make sure that + // code that checks for default values work correctly. + FLAG_SET_DEFAULT(HeapBaseMinAddress, 1*G); + } +} + void Arguments::set_heap_size() { if (!FLAG_IS_DEFAULT(DefaultMaxRAMFraction)) { // Deprecated flag @@ -1836,8 +1857,13 @@ "please refer to the release notes for the combinations " "allowed\n"); status = false; + } else if (ReservedCodeCacheSize > 2*G) { + // Code cache size larger than MAXINT is not supported. + jio_fprintf(defaultStream::error_stream(), + "Invalid ReservedCodeCacheSize=%dM. Must be at most %uM.\n", ReservedCodeCacheSize/M, + (2*G)/M); + status = false; } - return status; } @@ -1885,21 +1911,6 @@ // Note: Needs platform-dependent factoring. bool status = true; -#if ( (defined(COMPILER2) && defined(SPARC))) - // NOTE: The call to VM_Version_init depends on the fact that VM_Version_init - // on sparc doesn't require generation of a stub as is the case on, e.g., - // x86. Normally, VM_Version_init must be called from init_globals in - // init.cpp, which is called by the initial java thread *after* arguments - // have been parsed. VM_Version_init gets called twice on sparc. - extern void VM_Version_init(); - VM_Version_init(); - if (!VM_Version::has_v9()) { - jio_fprintf(defaultStream::error_stream(), - "V8 Machine detected, Server requires V9\n"); - status = false; - } -#endif /* COMPILER2 && SPARC */ - // Allow both -XX:-UseStackBanging and -XX:-UseBoundThreads in non-product // builds so the cost of stack banging can be measured. #if (defined(PRODUCT) && defined(SOLARIS)) @@ -1982,23 +1993,6 @@ status = status && check_gc_consistency(); status = status && check_stack_pages(); - if (_has_alloc_profile) { - if (UseParallelGC || UseParallelOldGC) { - jio_fprintf(defaultStream::error_stream(), - "error: invalid argument combination.\n" - "Allocation profiling (-Xaprof) cannot be used together with " - "Parallel GC (-XX:+UseParallelGC or -XX:+UseParallelOldGC).\n"); - status = false; - } - if (UseConcMarkSweepGC) { - jio_fprintf(defaultStream::error_stream(), - "error: invalid argument combination.\n" - "Allocation profiling (-Xaprof) cannot be used together with " - "the CMS collector (-XX:+UseConcMarkSweepGC).\n"); - status = false; - } - } - if (CMSIncrementalMode) { if (!UseConcMarkSweepGC) { jio_fprintf(defaultStream::error_stream(), @@ -2217,13 +2211,31 @@ status = false; } - if (ReservedCodeCacheSize < InitialCodeCacheSize) { + // Check lower bounds of the code cache + // Template Interpreter code is approximately 3X larger in debug builds. + uint min_code_cache_size = (CodeCacheMinimumUseSpace DEBUG_ONLY(* 3)) + CodeCacheMinimumFreeSpace; + if (InitialCodeCacheSize < (uintx)os::vm_page_size()) { jio_fprintf(defaultStream::error_stream(), - "Invalid ReservedCodeCacheSize: %dK. Should be greater than InitialCodeCacheSize=%dK\n", + "Invalid InitialCodeCacheSize=%dK. Must be at least %dK.\n", InitialCodeCacheSize/K, + os::vm_page_size()/K); + status = false; + } else if (ReservedCodeCacheSize < InitialCodeCacheSize) { + jio_fprintf(defaultStream::error_stream(), + "Invalid ReservedCodeCacheSize: %dK. Must be at least InitialCodeCacheSize=%dK.\n", ReservedCodeCacheSize/K, InitialCodeCacheSize/K); status = false; + } else if (ReservedCodeCacheSize < min_code_cache_size) { + jio_fprintf(defaultStream::error_stream(), + "Invalid ReservedCodeCacheSize=%dK. Must be at least %uK.\n", ReservedCodeCacheSize/K, + min_code_cache_size/K); + status = false; + } else if (ReservedCodeCacheSize > 2*G) { + // Code cache size larger than MAXINT is not supported. + jio_fprintf(defaultStream::error_stream(), + "Invalid ReservedCodeCacheSize=%dM. Must be at most %uM.\n", ReservedCodeCacheSize/M, + (2*G)/M); + status = false; } - return status; } @@ -2622,10 +2634,20 @@ // -Xoss } else if (match_option(option, "-Xoss", &tail)) { // HotSpot does not have separate native and Java stacks, ignore silently for compatibility - // -Xmaxjitcodesize + } else if (match_option(option, "-XX:CodeCacheExpansionSize=", &tail)) { + julong long_CodeCacheExpansionSize = 0; + ArgsRange errcode = parse_memory_size(tail, &long_CodeCacheExpansionSize, os::vm_page_size()); + if (errcode != arg_in_range) { + jio_fprintf(defaultStream::error_stream(), + "Invalid argument: %s. Must be at least %luK.\n", option->optionString, + os::vm_page_size()/K); + return JNI_EINVAL; + } + FLAG_SET_CMDLINE(uintx, CodeCacheExpansionSize, (uintx)long_CodeCacheExpansionSize); } else if (match_option(option, "-Xmaxjitcodesize", &tail) || match_option(option, "-XX:ReservedCodeCacheSize=", &tail)) { julong long_ReservedCodeCacheSize = 0; + ArgsRange errcode = parse_memory_size(tail, &long_ReservedCodeCacheSize, 1); if (errcode != arg_in_range) { jio_fprintf(defaultStream::error_stream(), @@ -2673,9 +2695,6 @@ "Flat profiling is not supported in this VM.\n"); return JNI_ERR; #endif // INCLUDE_FPROF - // -Xaprof - } else if (match_option(option, "-Xaprof", &tail)) { - _has_alloc_profile = true; // -Xconcurrentio } else if (match_option(option, "-Xconcurrentio", &tail)) { FLAG_SET_CMDLINE(bool, UseLWPSynchronization, true); @@ -2930,13 +2949,6 @@ FLAG_SET_CMDLINE(bool, UseTLAB, true); } else if (match_option(option, "-XX:-UseTLE", &tail)) { FLAG_SET_CMDLINE(bool, UseTLAB, false); -SOLARIS_ONLY( - } else if (match_option(option, "-XX:+UsePermISM", &tail)) { - warning("-XX:+UsePermISM is obsolete."); - FLAG_SET_CMDLINE(bool, UseISM, true); - } else if (match_option(option, "-XX:-UsePermISM", &tail)) { - FLAG_SET_CMDLINE(bool, UseISM, false); -) } else if (match_option(option, "-XX:+DisplayVMOutputToStderr", &tail)) { FLAG_SET_CMDLINE(bool, DisplayVMOutputToStdout, false); FLAG_SET_CMDLINE(bool, DisplayVMOutputToStderr, true); @@ -3109,8 +3121,6 @@ // Note that large pages are enabled/disabled for both the // Java heap and the code cache. FLAG_SET_DEFAULT(UseLargePages, false); - SOLARIS_ONLY(FLAG_SET_DEFAULT(UseMPSS, false)); - SOLARIS_ONLY(FLAG_SET_DEFAULT(UseISM, false)); } // Tiered compilation is undefined with C1. @@ -3525,6 +3535,8 @@ } } + set_heap_base_min_address(); + // Set heap size based on available physical memory set_heap_size(); diff -r 16b10327b00d -r 90d6c221d4e5 src/share/vm/runtime/arguments.hpp --- a/src/share/vm/runtime/arguments.hpp Tue Jul 16 10:55:48 2013 -0400 +++ b/src/share/vm/runtime/arguments.hpp Tue Jul 16 12:20:08 2013 -0400 @@ -262,7 +262,6 @@ // Option flags static bool _has_profile; - static bool _has_alloc_profile; static const char* _gc_log_filename; static uintx _min_heap_size; @@ -315,6 +314,8 @@ // limits the given memory size by the maximum amount of memory this process is // currently allowed to allocate or reserve. static julong limit_by_allocatable_memory(julong size); + // Setup HeapBaseMinAddress + static void set_heap_base_min_address(); // Setup heap size static void set_heap_size(); // Based on automatic selection criteria, should the @@ -462,9 +463,8 @@ // -Xloggc:, if not specified will be NULL static const char* gc_log_filename() { return _gc_log_filename; } - // -Xprof/-Xaprof + // -Xprof static bool has_profile() { return _has_profile; } - static bool has_alloc_profile() { return _has_alloc_profile; } // -Xms, -Xmx static uintx min_heap_size() { return _min_heap_size; } diff -r 16b10327b00d -r 90d6c221d4e5 src/share/vm/runtime/atomic.cpp --- a/src/share/vm/runtime/atomic.cpp Tue Jul 16 10:55:48 2013 -0400 +++ b/src/share/vm/runtime/atomic.cpp Tue Jul 16 12:20:08 2013 -0400 @@ -80,3 +80,32 @@ } return old; } + +void Atomic::inc(volatile short* dest) { + // Most platforms do not support atomic increment on a 2-byte value. However, + // if the value occupies the most significant 16 bits of an aligned 32-bit + // word, then we can do this with an atomic add of 0x10000 to the 32-bit word. + // + // The least significant parts of this 32-bit word will never be affected, even + // in case of overflow/underflow. + // + // Use the ATOMIC_SHORT_PAIR macro to get the desired alignment. +#ifdef VM_LITTLE_ENDIAN + assert((intx(dest) & 0x03) == 0x02, "wrong alignment"); + (void)Atomic::add(0x10000, (volatile int*)(dest-1)); +#else + assert((intx(dest) & 0x03) == 0x00, "wrong alignment"); + (void)Atomic::add(0x10000, (volatile int*)(dest)); +#endif +} + +void Atomic::dec(volatile short* dest) { +#ifdef VM_LITTLE_ENDIAN + assert((intx(dest) & 0x03) == 0x02, "wrong alignment"); + (void)Atomic::add(-0x10000, (volatile int*)(dest-1)); +#else + assert((intx(dest) & 0x03) == 0x00, "wrong alignment"); + (void)Atomic::add(-0x10000, (volatile int*)(dest)); +#endif +} + diff -r 16b10327b00d -r 90d6c221d4e5 src/share/vm/runtime/atomic.hpp --- a/src/share/vm/runtime/atomic.hpp Tue Jul 16 10:55:48 2013 -0400 +++ b/src/share/vm/runtime/atomic.hpp Tue Jul 16 12:20:08 2013 -0400 @@ -64,11 +64,13 @@ // Atomically increment location inline static void inc (volatile jint* dest); + static void inc (volatile jshort* dest); inline static void inc_ptr(volatile intptr_t* dest); inline static void inc_ptr(volatile void* dest); // Atomically decrement a location inline static void dec (volatile jint* dest); + static void dec (volatile jshort* dest); inline static void dec_ptr(volatile intptr_t* dest); inline static void dec_ptr(volatile void* dest); @@ -95,4 +97,24 @@ inline static void* cmpxchg_ptr(void* exchange_value, volatile void* dest, void* compare_value); }; +// To use Atomic::inc(jshort* dest) and Atomic::dec(jshort* dest), the address must be specially +// aligned, such that (*dest) occupies the upper 16 bits of an aligned 32-bit word. The best way to +// achieve is to place your short value next to another short value, which doesn't need atomic ops. +// +// Example +// ATOMIC_SHORT_PAIR( +// volatile short _refcount, // needs atomic operation +// unsigned short _length // number of UTF8 characters in the symbol (does not need atomic op) +// ); + +#ifdef VM_LITTLE_ENDIAN +#define ATOMIC_SHORT_PAIR(atomic_decl, non_atomic_decl) \ + non_atomic_decl; \ + atomic_decl +#else +#define ATOMIC_SHORT_PAIR(atomic_decl, non_atomic_decl) \ + atomic_decl ; \ + non_atomic_decl +#endif + #endif // SHARE_VM_RUNTIME_ATOMIC_HPP diff -r 16b10327b00d -r 90d6c221d4e5 src/share/vm/runtime/frame.cpp --- a/src/share/vm/runtime/frame.cpp Tue Jul 16 10:55:48 2013 -0400 +++ b/src/share/vm/runtime/frame.cpp Tue Jul 16 12:20:08 2013 -0400 @@ -387,7 +387,6 @@ Method* frame::interpreter_frame_method() const { assert(is_interpreted_frame(), "interpreted frame expected"); Method* m = *interpreter_frame_method_addr(); - assert(m->is_metadata(), "bad Method* in interpreter frame"); assert(m->is_method(), "not a Method*"); return m; } @@ -713,7 +712,8 @@ Method* m = ((nmethod *)_cb)->method(); if (m != NULL) { m->name_and_sig_as_C_string(buf, buflen); - st->print("J %s", buf); + st->print("J %s @ " PTR_FORMAT " [" PTR_FORMAT "+" SIZE_FORMAT "]", + buf, _pc, _cb->code_begin(), _pc - _cb->code_begin()); } else { st->print("J " PTR_FORMAT, pc()); } diff -r 16b10327b00d -r 90d6c221d4e5 src/share/vm/runtime/frame.hpp --- a/src/share/vm/runtime/frame.hpp Tue Jul 16 10:55:48 2013 -0400 +++ b/src/share/vm/runtime/frame.hpp Tue Jul 16 12:20:08 2013 -0400 @@ -1,5 +1,5 @@ /* - * Copyright (c) 1997, 2012, Oracle and/or its affiliates. All rights reserved. + * Copyright (c) 1997, 2013, Oracle and/or its affiliates. All rights reserved. * DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER. * * This code is free software; you can redistribute it and/or modify it @@ -134,6 +134,7 @@ bool is_interpreted_frame() const; bool is_java_frame() const; bool is_entry_frame() const; // Java frame called from C? + bool is_stub_frame() const; bool is_ignored_frame() const; bool is_native_frame() const; bool is_runtime_frame() const; diff -r 16b10327b00d -r 90d6c221d4e5 src/share/vm/runtime/frame.inline.hpp --- a/src/share/vm/runtime/frame.inline.hpp Tue Jul 16 10:55:48 2013 -0400 +++ b/src/share/vm/runtime/frame.inline.hpp Tue Jul 16 12:20:08 2013 -0400 @@ -1,5 +1,5 @@ /* - * Copyright (c) 1997, 2012, Oracle and/or its affiliates. All rights reserved. + * Copyright (c) 1997, 2013, Oracle and/or its affiliates. All rights reserved. * DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER. * * This code is free software; you can redistribute it and/or modify it @@ -79,6 +79,10 @@ return StubRoutines::returns_to_call_stub(pc()); } +inline bool frame::is_stub_frame() const { + return StubRoutines::is_stub_code(pc()) || (_cb != NULL && _cb->is_adapter_blob()); +} + inline bool frame::is_first_frame() const { return is_entry_frame() && entry_frame_is_first(); } diff -r 16b10327b00d -r 90d6c221d4e5 src/share/vm/runtime/globals.cpp --- a/src/share/vm/runtime/globals.cpp Tue Jul 16 10:55:48 2013 -0400 +++ b/src/share/vm/runtime/globals.cpp Tue Jul 16 12:20:08 2013 -0400 @@ -73,12 +73,6 @@ strcmp(kind, "{C2 diagnostic}") == 0 || strcmp(kind, "{ARCH diagnostic}") == 0 || strcmp(kind, "{Shark diagnostic}") == 0) { - if (strcmp(name, "EnableInvokeDynamic") == 0 && UnlockExperimentalVMOptions && !UnlockDiagnosticVMOptions) { - // transitional logic to allow tests to run until they are changed - static int warned; - if (++warned == 1) warning("Use -XX:+UnlockDiagnosticVMOptions before EnableInvokeDynamic flag"); - return true; - } return UnlockDiagnosticVMOptions; } else if (strcmp(kind, "{experimental}") == 0 || strcmp(kind, "{C2 experimental}") == 0 || @@ -282,14 +276,14 @@ Flag* Flag::flags = flagTable; size_t Flag::numFlags = (sizeof(flagTable) / sizeof(Flag)); -inline bool str_equal(const char* s, char* q, size_t len) { +inline bool str_equal(const char* s, const char* q, size_t len) { // s is null terminated, q is not! if (strlen(s) != (unsigned int) len) return false; return strncmp(s, q, len) == 0; } // Search the flag table for a named flag -Flag* Flag::find_flag(char* name, size_t length, bool allow_locked) { +Flag* Flag::find_flag(const char* name, size_t length, bool allow_locked) { for (Flag* current = &flagTable[0]; current->name != NULL; current++) { if (str_equal(current->name, name, length)) { // Found a matching entry. Report locked flags only if allowed. @@ -307,6 +301,52 @@ return NULL; } +// Compute string similarity based on Dice's coefficient +static float str_similar(const char* str1, const char* str2, size_t len2) { + int len1 = (int) strlen(str1); + int total = len1 + (int) len2; + + int hit = 0; + + for (int i = 0; i < len1 -1; ++i) { + for (int j = 0; j < (int) len2 -1; ++j) { + if ((str1[i] == str2[j]) && (str1[i+1] == str2[j+1])) { + ++hit; + break; + } + } + } + + return 2.0f * (float) hit / (float) total; +} + +Flag* Flag::fuzzy_match(const char* name, size_t length, bool allow_locked) { + float VMOptionsFuzzyMatchSimilarity = 0.7f; + Flag* match = NULL; + float score; + float max_score = -1; + + for (Flag* current = &flagTable[0]; current->name != NULL; current++) { + score = str_similar(current->name, name, length); + if (score > max_score) { + max_score = score; + match = current; + } + } + + if (!(match->is_unlocked() || match->is_unlocker())) { + if (!allow_locked) { + return NULL; + } + } + + if (max_score < VMOptionsFuzzyMatchSimilarity) { + return NULL; + } + + return match; +} + // Returns the address of the index'th element static Flag* address_of_flag(CommandLineFlagWithType flag) { assert((size_t)flag < Flag::numFlags, "bad command line flag index"); diff -r 16b10327b00d -r 90d6c221d4e5 src/share/vm/runtime/globals.hpp --- a/src/share/vm/runtime/globals.hpp Tue Jul 16 10:55:48 2013 -0400 +++ b/src/share/vm/runtime/globals.hpp Tue Jul 16 12:20:08 2013 -0400 @@ -175,6 +175,7 @@ define_pd_global(intx, ReservedCodeCacheSize, 32*M); define_pd_global(intx, CodeCacheExpansionSize, 32*K); define_pd_global(intx, CodeCacheMinBlockLength, 1); +define_pd_global(intx, CodeCacheMinimumUseSpace, 200*K); define_pd_global(uintx,MetaspaceSize, ScaleForWordSize(4*M)); define_pd_global(bool, NeverActAsServerClassMachine, true); define_pd_global(uint64_t,MaxRAM, 1ULL*G); @@ -220,7 +221,8 @@ // number of flags static size_t numFlags; - static Flag* find_flag(char* name, size_t length, bool allow_locked = false); + static Flag* find_flag(const char* name, size_t length, bool allow_locked = false); + static Flag* fuzzy_match(const char* name, size_t length, bool allow_locked = false); bool is_bool() const { return strcmp(type, "bool") == 0; } bool get_bool() const { return *((bool*) addr); } @@ -644,6 +646,9 @@ product(bool, UseAESIntrinsics, false, \ "use intrinsics for AES versions of crypto") \ \ + product(bool, UseCRC32Intrinsics, false, \ + "use intrinsics for java.util.zip.CRC32") \ + \ develop(bool, TraceCallFixup, false, \ "traces all call fixups") \ \ @@ -2311,6 +2316,10 @@ "Print diagnostic message when GC is stalled" \ "by JNI critical section") \ \ + experimental(double, ObjectCountCutOffPercent, 0.5, \ + "The percentage of the used heap that the instances of a class " \ + "must occupy for the class to generate a trace event.") \ + \ /* GC log rotation setting */ \ \ product(bool, UseGCLogFileRotation, false, \ @@ -3156,6 +3165,9 @@ product_pd(uintx, InitialCodeCacheSize, \ "Initial code cache size (in bytes)") \ \ + develop_pd(uintx, CodeCacheMinimumUseSpace, \ + "Minimum code cache size (in bytes) required to start VM.") \ + \ product_pd(uintx, ReservedCodeCacheSize, \ "Reserved code cache size (in bytes) - maximum code cache size") \ \ @@ -3668,6 +3680,9 @@ develop(bool, VerifyGenericSignatures, false, \ "Abort VM on erroneous or inconsistent generic signatures") \ \ + product(bool, ParseGenericDefaults, false, \ + "Parse generic signatures for default method handling") \ + \ product(bool, UseVMInterruptibleIO, false, \ "(Unstable, Solaris-specific) Thread interrupt before or with " \ "EINTR for I/O operations results in OS_INTRPT. The default value"\ @@ -3688,7 +3703,13 @@ experimental(uintx, ArrayAllocatorMallocLimit, \ SOLARIS_ONLY(64*K) NOT_SOLARIS(max_uintx), \ "Allocation less than this value will be allocated " \ - "using malloc. Larger allocations will use mmap.") + "using malloc. Larger allocations will use mmap.") \ + \ + product(bool, EnableTracing, false, \ + "Enable event-based tracing") \ + product(bool, UseLockedTracing, false, \ + "Use locked-tracing when doing event-based tracing") + /* * Macros for factoring of globals diff -r 16b10327b00d -r 90d6c221d4e5 src/share/vm/runtime/handles.hpp --- a/src/share/vm/runtime/handles.hpp Tue Jul 16 10:55:48 2013 -0400 +++ b/src/share/vm/runtime/handles.hpp Tue Jul 16 12:20:08 2013 -0400 @@ -227,7 +227,7 @@ HandleArea* _prev; // link to outer (older) area public: // Constructor - HandleArea(HandleArea* prev) { + HandleArea(HandleArea* prev) : Arena(Chunk::tiny_size) { debug_only(_handle_mark_nesting = 0); debug_only(_no_handle_mark_nesting = 0); _prev = prev; diff -r 16b10327b00d -r 90d6c221d4e5 src/share/vm/runtime/java.cpp --- a/src/share/vm/runtime/java.cpp Tue Jul 16 10:55:48 2013 -0400 +++ b/src/share/vm/runtime/java.cpp Tue Jul 16 12:20:08 2013 -0400 @@ -1,5 +1,5 @@ /* - * Copyright (c) 1997, 2012, Oracle and/or its affiliates. All rights reserved. + * Copyright (c) 1997, 2013, Oracle and/or its affiliates. All rights reserved. * DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER. * * This code is free software; you can redistribute it and/or modify it @@ -42,7 +42,6 @@ #include "oops/oop.inline.hpp" #include "oops/symbol.hpp" #include "prims/jvmtiExport.hpp" -#include "runtime/aprofiler.hpp" #include "runtime/arguments.hpp" #include "runtime/biasedLocking.hpp" #include "runtime/compilationPolicy.hpp" @@ -60,7 +59,6 @@ #include "services/memReporter.hpp" #include "services/memTracker.hpp" #include "trace/tracing.hpp" -#include "trace/traceEventTypes.hpp" #include "utilities/dtrace.hpp" #include "utilities/globalDefinitions.hpp" #include "utilities/histogram.hpp" @@ -510,16 +508,6 @@ } } - - if (Arguments::has_alloc_profile()) { - HandleMark hm; - // Do one last collection to enumerate all the objects - // allocated since the last one. - Universe::heap()->collect(GCCause::_allocation_profiler); - AllocationProfiler::disengage(); - AllocationProfiler::print(0); - } - if (PrintBytecodeHistogram) { BytecodeHistogram::print(); } @@ -528,9 +516,12 @@ JvmtiExport::post_thread_end(thread); } - EVENT_BEGIN(TraceEventThreadEnd, event); - EVENT_COMMIT(event, - EVENT_SET(event, javalangthread, java_lang_Thread::thread_id(thread->threadObj()))); + + EventThreadEnd event; + if (event.should_commit()) { + event.set_javalangthread(java_lang_Thread::thread_id(thread->threadObj())); + event.commit(); + } // Always call even when there are not JVMTI environments yet, since environments // may be attached late and JVMTI must track phases of VM execution diff -r 16b10327b00d -r 90d6c221d4e5 src/share/vm/runtime/mutexLocker.cpp --- a/src/share/vm/runtime/mutexLocker.cpp Tue Jul 16 10:55:48 2013 -0400 +++ b/src/share/vm/runtime/mutexLocker.cpp Tue Jul 16 12:20:08 2013 -0400 @@ -1,5 +1,5 @@ /* - * Copyright (c) 1997, 2012, Oracle and/or its affiliates. All rights reserved. + * Copyright (c) 1997, 2013, Oracle and/or its affiliates. All rights reserved. * DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER. * * This code is free software; you can redistribute it and/or modify it @@ -270,13 +270,12 @@ def(MethodCompileQueue_lock , Monitor, nonleaf+4, true ); def(Debug2_lock , Mutex , nonleaf+4, true ); def(Debug3_lock , Mutex , nonleaf+4, true ); - def(ProfileVM_lock , Monitor, nonleaf+4, false); // used for profiling of the VMThread + def(ProfileVM_lock , Monitor, special, false); // used for profiling of the VMThread def(CompileThread_lock , Monitor, nonleaf+5, false ); - def(JfrQuery_lock , Monitor, nonleaf, true); // JFR locks, keep these in consecutive order - def(JfrMsg_lock , Monitor, nonleaf+2, true); - def(JfrBuffer_lock , Mutex, nonleaf+3, true); - def(JfrStream_lock , Mutex, nonleaf+4, true); + def(JfrMsg_lock , Monitor, leaf, true); + def(JfrBuffer_lock , Mutex, nonleaf+1, true); + def(JfrStream_lock , Mutex, nonleaf+2, true); def(PeriodicTask_lock , Monitor, nonleaf+5, true); } diff -r 16b10327b00d -r 90d6c221d4e5 src/share/vm/runtime/objectMonitor.cpp --- a/src/share/vm/runtime/objectMonitor.cpp Tue Jul 16 10:55:48 2013 -0400 +++ b/src/share/vm/runtime/objectMonitor.cpp Tue Jul 16 12:20:08 2013 -0400 @@ -36,7 +36,10 @@ #include "runtime/stubRoutines.hpp" #include "runtime/thread.inline.hpp" #include "services/threadService.hpp" +#include "trace/tracing.hpp" +#include "trace/traceMacros.hpp" #include "utilities/dtrace.hpp" +#include "utilities/macros.hpp" #include "utilities/preserveException.hpp" #ifdef TARGET_OS_FAMILY_linux # include "os_linux.inline.hpp" @@ -371,6 +374,8 @@ // Ensure the object-monitor relationship remains stable while there's contention. Atomic::inc_ptr(&_count); + EventJavaMonitorEnter event; + { // Change java thread status to indicate blocked on monitor enter. JavaThreadBlockedOnMonitorEnterState jtbmes(jt, this); @@ -402,7 +407,7 @@ // _recursions = 0 ; _succ = NULL ; - exit (Self) ; + exit (false, Self) ; jt->java_suspend_self(); } @@ -435,6 +440,14 @@ if (JvmtiExport::should_post_monitor_contended_entered()) { JvmtiExport::post_monitor_contended_entered(jt, this); } + + if (event.should_commit()) { + event.set_klass(((oop)this->object())->klass()); + event.set_previousOwner((TYPE_JAVALANGTHREAD)_previous_owner_tid); + event.set_address((TYPE_ADDRESS)(uintptr_t)(this->object_addr())); + event.commit(); + } + if (ObjectMonitor::_sync_ContendedLockAttempts != NULL) { ObjectMonitor::_sync_ContendedLockAttempts->inc() ; } @@ -917,7 +930,7 @@ // Both impinge on OS scalability. Given that, at most one thread parked on // a monitor will use a timer. -void ATTR ObjectMonitor::exit(TRAPS) { +void ATTR ObjectMonitor::exit(bool not_suspended, TRAPS) { Thread * Self = THREAD ; if (THREAD != _owner) { if (THREAD->is_lock_owned((address) _owner)) { @@ -954,6 +967,14 @@ _Responsible = NULL ; } +#if INCLUDE_TRACE + // get the owner's thread id for the MonitorEnter event + // if it is enabled and the thread isn't suspended + if (not_suspended && Tracing::is_event_enabled(TraceJavaMonitorEnterEvent)) { + _previous_owner_tid = SharedRuntime::get_java_tid(Self); + } +#endif + for (;;) { assert (THREAD == _owner, "invariant") ; @@ -1343,7 +1364,7 @@ guarantee(Self == _owner, "complete_exit not owner"); intptr_t save = _recursions; // record the old recursion count _recursions = 0; // set the recursion level to be 0 - exit (Self) ; // exit the monitor + exit (true, Self) ; // exit the monitor guarantee (_owner != Self, "invariant"); return save; } @@ -1397,6 +1418,20 @@ for (v = *adr ; Atomic::cmpxchg (v + dx, adr, v) != v; v = *adr) ; return v ; } + +// helper method for posting a monitor wait event +void ObjectMonitor::post_monitor_wait_event(EventJavaMonitorWait* event, + jlong notifier_tid, + jlong timeout, + bool timedout) { + event->set_klass(((oop)this->object())->klass()); + event->set_timeout((TYPE_ULONG)timeout); + event->set_address((TYPE_ADDRESS)(uintptr_t)(this->object_addr())); + event->set_notifier((TYPE_OSTHREAD)notifier_tid); + event->set_timedOut((TYPE_BOOLEAN)timedout); + event->commit(); +} + // ----------------------------------------------------------------------------- // Wait/Notify/NotifyAll // @@ -1412,6 +1447,8 @@ // Throw IMSX or IEX. CHECK_OWNER(); + EventJavaMonitorWait event; + // check for a pending interrupt if (interruptible && Thread::is_interrupted(Self, true) && !HAS_PENDING_EXCEPTION) { // post monitor waited event. Note that this is past-tense, we are done waiting. @@ -1420,10 +1457,14 @@ // wait was not timed out due to thread interrupt. JvmtiExport::post_monitor_waited(jt, this, false); } + if (event.should_commit()) { + post_monitor_wait_event(&event, 0, millis, false); + } TEVENT (Wait - Throw IEX) ; THROW(vmSymbols::java_lang_InterruptedException()); return ; } + TEVENT (Wait) ; assert (Self->_Stalled == 0, "invariant") ; @@ -1455,7 +1496,7 @@ intptr_t save = _recursions; // record the old recursion count _waiters++; // increment the number of waiters _recursions = 0; // set the recursion level to be 1 - exit (Self) ; // exit the monitor + exit (true, Self) ; // exit the monitor guarantee (_owner != Self, "invariant") ; // As soon as the ObjectMonitor's ownership is dropped in the exit() @@ -1555,6 +1596,11 @@ if (JvmtiExport::should_post_monitor_waited()) { JvmtiExport::post_monitor_waited(jt, this, ret == OS_TIMEOUT); } + + if (event.should_commit()) { + post_monitor_wait_event(&event, node._notifier_tid, millis, ret == OS_TIMEOUT); + } + OrderAccess::fence() ; assert (Self->_Stalled != 0, "invariant") ; @@ -1634,6 +1680,8 @@ iterator->TState = ObjectWaiter::TS_ENTER ; } iterator->_notified = 1 ; + Thread * Self = THREAD; + iterator->_notifier_tid = Self->osthread()->thread_id(); ObjectWaiter * List = _EntryList ; if (List != NULL) { @@ -1758,6 +1806,8 @@ guarantee (iterator->TState == ObjectWaiter::TS_WAIT, "invariant") ; guarantee (iterator->_notified == 0, "invariant") ; iterator->_notified = 1 ; + Thread * Self = THREAD; + iterator->_notifier_tid = Self->osthread()->thread_id(); if (Policy != 4) { iterator->TState = ObjectWaiter::TS_ENTER ; } diff -r 16b10327b00d -r 90d6c221d4e5 src/share/vm/runtime/objectMonitor.hpp --- a/src/share/vm/runtime/objectMonitor.hpp Tue Jul 16 10:55:48 2013 -0400 +++ b/src/share/vm/runtime/objectMonitor.hpp Tue Jul 16 12:20:08 2013 -0400 @@ -1,5 +1,5 @@ /* - * Copyright (c) 1998, 2010, Oracle and/or its affiliates. All rights reserved. + * Copyright (c) 1998, 2013, Oracle and/or its affiliates. All rights reserved. * DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER. * * This code is free software; you can redistribute it and/or modify it @@ -29,7 +29,6 @@ #include "runtime/park.hpp" #include "runtime/perfData.hpp" - // ObjectWaiter serves as a "proxy" or surrogate thread. // TODO-FIXME: Eliminate ObjectWaiter and use the thread-specific // ParkEvent instead. Beware, however, that the JVMTI code @@ -43,6 +42,7 @@ ObjectWaiter * volatile _next; ObjectWaiter * volatile _prev; Thread* _thread; + jlong _notifier_tid; ParkEvent * _event; volatile int _notified ; volatile TStates TState ; @@ -55,6 +55,9 @@ void wait_reenter_end(ObjectMonitor *mon); }; +// forward declaration to avoid include tracing.hpp +class EventJavaMonitorWait; + // WARNING: // This is a very sensitive and fragile class. DO NOT make any // change unless you are fully aware of the underlying semantics. @@ -151,6 +154,7 @@ _SpinFreq = 0 ; _SpinClock = 0 ; OwnerIsThread = 0 ; + _previous_owner_tid = 0; } ~ObjectMonitor() { @@ -192,7 +196,7 @@ bool try_enter (TRAPS) ; void enter(TRAPS); - void exit(TRAPS); + void exit(bool not_suspended, TRAPS); void wait(jlong millis, bool interruptable, TRAPS); void notify(TRAPS); void notifyAll(TRAPS); @@ -218,6 +222,10 @@ void ctAsserts () ; void ExitEpilog (Thread * Self, ObjectWaiter * Wakee) ; bool ExitSuspendEquivalent (JavaThread * Self) ; + void post_monitor_wait_event(EventJavaMonitorWait * event, + jlong notifier_tid, + jlong timeout, + bool timedout); private: friend class ObjectSynchronizer; @@ -240,6 +248,7 @@ protected: // protected for jvmtiRawMonitor void * volatile _owner; // pointer to owning thread OR BasicLock + volatile jlong _previous_owner_tid; // thread id of the previous owner of the monitor volatile intptr_t _recursions; // recursion count, 0 for first entry private: int OwnerIsThread ; // _owner is (Thread *) vs SP/BasicLock diff -r 16b10327b00d -r 90d6c221d4e5 src/share/vm/runtime/os.cpp --- a/src/share/vm/runtime/os.cpp Tue Jul 16 10:55:48 2013 -0400 +++ b/src/share/vm/runtime/os.cpp Tue Jul 16 12:20:08 2013 -0400 @@ -265,8 +265,7 @@ VMThread::execute(&op1); Universe::print_heap_at_SIGBREAK(); if (PrintClassHistogram) { - VM_GC_HeapInspection op1(gclog_or_tty, true /* force full GC before heap inspection */, - true /* need_prologue */); + VM_GC_HeapInspection op1(gclog_or_tty, true /* force full GC before heap inspection */); VMThread::execute(&op1); } if (JvmtiExport::should_post_data_dump()) { @@ -648,10 +647,13 @@ #ifndef ASSERT NOT_PRODUCT(inc_stat_counter(&num_mallocs, 1)); NOT_PRODUCT(inc_stat_counter(&alloc_bytes, size)); + MemTracker::Tracker tkr = MemTracker::get_realloc_tracker(); void* ptr = ::realloc(memblock, size); if (ptr != NULL) { - MemTracker::record_realloc((address)memblock, (address)ptr, size, memflags, + tkr.record((address)memblock, (address)ptr, size, memflags, caller == 0 ? CALLER_PC : caller); + } else { + tkr.discard(); } return ptr; #else @@ -1444,15 +1446,20 @@ return (int) i; } +void os::SuspendedThreadTask::run() { + assert(Threads_lock->owned_by_self() || (_thread == VMThread::vm_thread()), "must have threads lock to call this"); + internal_do_task(); + _done = true; +} + bool os::create_stack_guard_pages(char* addr, size_t bytes) { return os::pd_create_stack_guard_pages(addr, bytes); } - char* os::reserve_memory(size_t bytes, char* addr, size_t alignment_hint) { char* result = pd_reserve_memory(bytes, addr, alignment_hint); if (result != NULL) { - MemTracker::record_virtual_memory_reserve((address)result, bytes, CALLER_PC); + MemTracker::record_virtual_memory_reserve((address)result, bytes, mtNone, CALLER_PC); } return result; @@ -1462,7 +1469,7 @@ MEMFLAGS flags) { char* result = pd_reserve_memory(bytes, addr, alignment_hint); if (result != NULL) { - MemTracker::record_virtual_memory_reserve((address)result, bytes, CALLER_PC); + MemTracker::record_virtual_memory_reserve((address)result, bytes, mtNone, CALLER_PC); MemTracker::record_virtual_memory_type((address)result, flags); } @@ -1472,7 +1479,7 @@ char* os::attempt_reserve_memory_at(size_t bytes, char* addr) { char* result = pd_attempt_reserve_memory_at(bytes, addr); if (result != NULL) { - MemTracker::record_virtual_memory_reserve((address)result, bytes, CALLER_PC); + MemTracker::record_virtual_memory_reserve((address)result, bytes, mtNone, CALLER_PC); } return result; } @@ -1499,18 +1506,36 @@ return res; } +void os::commit_memory_or_exit(char* addr, size_t bytes, bool executable, + const char* mesg) { + pd_commit_memory_or_exit(addr, bytes, executable, mesg); + MemTracker::record_virtual_memory_commit((address)addr, bytes, CALLER_PC); +} + +void os::commit_memory_or_exit(char* addr, size_t size, size_t alignment_hint, + bool executable, const char* mesg) { + os::pd_commit_memory_or_exit(addr, size, alignment_hint, executable, mesg); + MemTracker::record_virtual_memory_commit((address)addr, size, CALLER_PC); +} + bool os::uncommit_memory(char* addr, size_t bytes) { + MemTracker::Tracker tkr = MemTracker::get_virtual_memory_uncommit_tracker(); bool res = pd_uncommit_memory(addr, bytes); if (res) { - MemTracker::record_virtual_memory_uncommit((address)addr, bytes); + tkr.record((address)addr, bytes); + } else { + tkr.discard(); } return res; } bool os::release_memory(char* addr, size_t bytes) { + MemTracker::Tracker tkr = MemTracker::get_virtual_memory_release_tracker(); bool res = pd_release_memory(addr, bytes); if (res) { - MemTracker::record_virtual_memory_release((address)addr, bytes); + tkr.record((address)addr, bytes); + } else { + tkr.discard(); } return res; } @@ -1521,8 +1546,7 @@ bool allow_exec) { char* result = pd_map_memory(fd, file_name, file_offset, addr, bytes, read_only, allow_exec); if (result != NULL) { - MemTracker::record_virtual_memory_reserve((address)result, bytes, CALLER_PC); - MemTracker::record_virtual_memory_commit((address)result, bytes, CALLER_PC); + MemTracker::record_virtual_memory_reserve_and_commit((address)result, bytes, mtNone, CALLER_PC); } return result; } @@ -1535,10 +1559,12 @@ } bool os::unmap_memory(char *addr, size_t bytes) { + MemTracker::Tracker tkr = MemTracker::get_virtual_memory_release_tracker(); bool result = pd_unmap_memory(addr, bytes); if (result) { - MemTracker::record_virtual_memory_uncommit((address)addr, bytes); - MemTracker::record_virtual_memory_release((address)addr, bytes); + tkr.record((address)addr, bytes); + } else { + tkr.discard(); } return result; } @@ -1551,3 +1577,19 @@ pd_realign_memory(addr, bytes, alignment_hint); } +#ifndef TARGET_OS_FAMILY_windows +/* try to switch state from state "from" to state "to" + * returns the state set after the method is complete + */ +os::SuspendResume::State os::SuspendResume::switch_state(os::SuspendResume::State from, + os::SuspendResume::State to) +{ + os::SuspendResume::State result = + (os::SuspendResume::State) Atomic::cmpxchg((jint) to, (jint *) &_state, (jint) from); + if (result == from) { + // success + return to; + } + return result; +} +#endif diff -r 16b10327b00d -r 90d6c221d4e5 src/share/vm/runtime/os.hpp --- a/src/share/vm/runtime/os.hpp Tue Jul 16 10:55:48 2013 -0400 +++ b/src/share/vm/runtime/os.hpp Tue Jul 16 12:20:08 2013 -0400 @@ -78,6 +78,10 @@ CriticalPriority = 11 // Critical thread priority }; +// Executable parameter flag for os::commit_memory() and +// os::commit_memory_or_exit(). +const bool ExecMem = true; + // Typedef for structured exception handling support typedef void (*java_call_t)(JavaValue* value, methodHandle* method, JavaCallArguments* args, Thread* thread); @@ -104,9 +108,16 @@ static char* pd_attempt_reserve_memory_at(size_t bytes, char* addr); static void pd_split_reserved_memory(char *base, size_t size, size_t split, bool realloc); - static bool pd_commit_memory(char* addr, size_t bytes, bool executable = false); + static bool pd_commit_memory(char* addr, size_t bytes, bool executable); static bool pd_commit_memory(char* addr, size_t size, size_t alignment_hint, - bool executable = false); + bool executable); + // Same as pd_commit_memory() that either succeeds or calls + // vm_exit_out_of_memory() with the specified mesg. + static void pd_commit_memory_or_exit(char* addr, size_t bytes, + bool executable, const char* mesg); + static void pd_commit_memory_or_exit(char* addr, size_t size, + size_t alignment_hint, + bool executable, const char* mesg); static bool pd_uncommit_memory(char* addr, size_t bytes); static bool pd_release_memory(char* addr, size_t bytes); @@ -261,9 +272,16 @@ static char* attempt_reserve_memory_at(size_t bytes, char* addr); static void split_reserved_memory(char *base, size_t size, size_t split, bool realloc); - static bool commit_memory(char* addr, size_t bytes, bool executable = false); + static bool commit_memory(char* addr, size_t bytes, bool executable); static bool commit_memory(char* addr, size_t size, size_t alignment_hint, - bool executable = false); + bool executable); + // Same as commit_memory() that either succeeds or calls + // vm_exit_out_of_memory() with the specified mesg. + static void commit_memory_or_exit(char* addr, size_t bytes, + bool executable, const char* mesg); + static void commit_memory_or_exit(char* addr, size_t size, + size_t alignment_hint, + bool executable, const char* mesg); static bool uncommit_memory(char* addr, size_t bytes); static bool release_memory(char* addr, size_t bytes); @@ -489,16 +507,16 @@ // Symbol lookup, find nearest function name; basically it implements // dladdr() for all platforms. Name of the nearest function is copied - // to buf. Distance from its base address is returned as offset. + // to buf. Distance from its base address is optionally returned as offset. // If function name is not found, buf[0] is set to '\0' and offset is - // set to -1. + // set to -1 (if offset is non-NULL). static bool dll_address_to_function_name(address addr, char* buf, int buflen, int* offset); // Locate DLL/DSO. On success, full path of the library is copied to - // buf, and offset is set to be the distance between addr and the - // library's base address. On failure, buf[0] is set to '\0' and - // offset is set to -1. + // buf, and offset is optionally set to be the distance between addr + // and the library's base address. On failure, buf[0] is set to '\0' + // and offset is set to -1 (if offset is non-NULL). static bool dll_address_to_library_name(address addr, char* buf, int buflen, int* offset); @@ -781,6 +799,104 @@ // ResumeThread call) static void pause(); + class SuspendedThreadTaskContext { + public: + SuspendedThreadTaskContext(Thread* thread, void *ucontext) : _thread(thread), _ucontext(ucontext) {} + Thread* thread() const { return _thread; } + void* ucontext() const { return _ucontext; } + private: + Thread* _thread; + void* _ucontext; + }; + + class SuspendedThreadTask { + public: + SuspendedThreadTask(Thread* thread) : _thread(thread), _done(false) {} + virtual ~SuspendedThreadTask() {} + void run(); + bool is_done() { return _done; } + virtual void do_task(const SuspendedThreadTaskContext& context) = 0; + protected: + private: + void internal_do_task(); + Thread* _thread; + bool _done; + }; + +#ifndef TARGET_OS_FAMILY_windows + // Suspend/resume support + // Protocol: + // + // a thread starts in SR_RUNNING + // + // SR_RUNNING can go to + // * SR_SUSPEND_REQUEST when the WatcherThread wants to suspend it + // SR_SUSPEND_REQUEST can go to + // * SR_RUNNING if WatcherThread decides it waited for SR_SUSPENDED too long (timeout) + // * SR_SUSPENDED if the stopped thread receives the signal and switches state + // SR_SUSPENDED can go to + // * SR_WAKEUP_REQUEST when the WatcherThread has done the work and wants to resume + // SR_WAKEUP_REQUEST can go to + // * SR_RUNNING when the stopped thread receives the signal + // * SR_WAKEUP_REQUEST on timeout (resend the signal and try again) + class SuspendResume { + public: + enum State { + SR_RUNNING, + SR_SUSPEND_REQUEST, + SR_SUSPENDED, + SR_WAKEUP_REQUEST + }; + + private: + volatile State _state; + + private: + /* try to switch state from state "from" to state "to" + * returns the state set after the method is complete + */ + State switch_state(State from, State to); + + public: + SuspendResume() : _state(SR_RUNNING) { } + + State state() const { return _state; } + + State request_suspend() { + return switch_state(SR_RUNNING, SR_SUSPEND_REQUEST); + } + + State cancel_suspend() { + return switch_state(SR_SUSPEND_REQUEST, SR_RUNNING); + } + + State suspended() { + return switch_state(SR_SUSPEND_REQUEST, SR_SUSPENDED); + } + + State request_wakeup() { + return switch_state(SR_SUSPENDED, SR_WAKEUP_REQUEST); + } + + State running() { + return switch_state(SR_WAKEUP_REQUEST, SR_RUNNING); + } + + bool is_running() const { + return _state == SR_RUNNING; + } + + bool is_suspend_request() const { + return _state == SR_SUSPEND_REQUEST; + } + + bool is_suspended() const { + return _state == SR_SUSPENDED; + } + }; +#endif + + protected: static long _rand_seed; // seed for random number generator static int _processor_count; // number of processors @@ -799,8 +915,6 @@ // of the global SpinPause() with C linkage. // It'd also be eligible for inlining on many platforms. -extern "C" int SpinPause () ; -extern "C" int SafeFetch32 (int * adr, int errValue) ; -extern "C" intptr_t SafeFetchN (intptr_t * adr, intptr_t errValue) ; +extern "C" int SpinPause(); #endif // SHARE_VM_RUNTIME_OS_HPP diff -r 16b10327b00d -r 90d6c221d4e5 src/share/vm/runtime/perfData.cpp --- a/src/share/vm/runtime/perfData.cpp Tue Jul 16 10:55:48 2013 -0400 +++ b/src/share/vm/runtime/perfData.cpp Tue Jul 16 12:20:08 2013 -0400 @@ -323,6 +323,10 @@ } } +PerfData* PerfDataManager::find_by_name(const char* name) { + return _all->find_by_name(name); +} + PerfDataList* PerfDataManager::all() { MutexLocker ml(PerfDataManager_lock); diff -r 16b10327b00d -r 90d6c221d4e5 src/share/vm/runtime/perfData.hpp --- a/src/share/vm/runtime/perfData.hpp Tue Jul 16 10:55:48 2013 -0400 +++ b/src/share/vm/runtime/perfData.hpp Tue Jul 16 12:20:08 2013 -0400 @@ -693,6 +693,9 @@ // the given name. static bool exists(const char* name) { return _all->contains(name); } + // method to search for a instrumentation object by name + static PerfData* find_by_name(const char* name); + // method to map a CounterNS enumeration to a namespace string static const char* ns_to_string(CounterNS ns) { return _name_spaces[ns]; diff -r 16b10327b00d -r 90d6c221d4e5 src/share/vm/runtime/reflection.cpp --- a/src/share/vm/runtime/reflection.cpp Tue Jul 16 10:55:48 2013 -0400 +++ b/src/share/vm/runtime/reflection.cpp Tue Jul 16 12:20:08 2013 -0400 @@ -458,7 +458,7 @@ // doesn't have a classloader. if ((current_class == NULL) || (current_class == new_class) || - (InstanceKlass::cast(new_class)->is_public()) || + (new_class->is_public()) || is_same_class_package(current_class, new_class)) { return true; } diff -r 16b10327b00d -r 90d6c221d4e5 src/share/vm/runtime/sharedRuntime.cpp --- a/src/share/vm/runtime/sharedRuntime.cpp Tue Jul 16 10:55:48 2013 -0400 +++ b/src/share/vm/runtime/sharedRuntime.cpp Tue Jul 16 12:20:08 2013 -0400 @@ -813,8 +813,11 @@ // 3. Implict null exception in nmethod if (!cb->is_nmethod()) { - guarantee(cb->is_adapter_blob() || cb->is_method_handles_adapter_blob(), - "exception happened outside interpreter, nmethods and vtable stubs (1)"); + bool is_in_blob = cb->is_adapter_blob() || cb->is_method_handles_adapter_blob(); + if (!is_in_blob) { + cb->print(); + fatal(err_msg("exception happened outside interpreter, nmethods and vtable stubs at pc " INTPTR_FORMAT, pc)); + } Events::log_exception(thread, "NullPointerException in code blob at " INTPTR_FORMAT, pc); // There is no handler here, so we will simply unwind. return StubRoutines::throw_NullPointerException_at_call_entry(); @@ -2731,7 +2734,7 @@ // ResourceObject, so do not put any ResourceMarks in here. char *s = sig->as_C_string(); int len = (int)strlen(s); - *s++; len--; // Skip opening paren + s++; len--; // Skip opening paren char *t = s+len; while( *(--t) != ')' ) ; // Find close paren diff -r 16b10327b00d -r 90d6c221d4e5 src/share/vm/runtime/stubRoutines.cpp --- a/src/share/vm/runtime/stubRoutines.cpp Tue Jul 16 10:55:48 2013 -0400 +++ b/src/share/vm/runtime/stubRoutines.cpp Tue Jul 16 12:20:08 2013 -0400 @@ -1,5 +1,5 @@ /* - * Copyright (c) 1997, 2012, Oracle and/or its affiliates. All rights reserved. + * Copyright (c) 1997, 2013, Oracle and/or its affiliates. All rights reserved. * DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER. * * This code is free software; you can redistribute it and/or modify it @@ -125,6 +125,9 @@ address StubRoutines::_cipherBlockChaining_encryptAESCrypt = NULL; address StubRoutines::_cipherBlockChaining_decryptAESCrypt = NULL; +address StubRoutines::_updateBytesCRC32 = NULL; +address StubRoutines::_crc_table_adr = NULL; + double (* StubRoutines::_intrinsic_log )(double) = NULL; double (* StubRoutines::_intrinsic_log10 )(double) = NULL; double (* StubRoutines::_intrinsic_exp )(double) = NULL; @@ -133,6 +136,13 @@ double (* StubRoutines::_intrinsic_cos )(double) = NULL; double (* StubRoutines::_intrinsic_tan )(double) = NULL; +address StubRoutines::_safefetch32_entry = NULL; +address StubRoutines::_safefetch32_fault_pc = NULL; +address StubRoutines::_safefetch32_continuation_pc = NULL; +address StubRoutines::_safefetchN_entry = NULL; +address StubRoutines::_safefetchN_fault_pc = NULL; +address StubRoutines::_safefetchN_continuation_pc = NULL; + // Initialization // // Note: to break cycle with universe initialization, stubs are generated in two phases. diff -r 16b10327b00d -r 90d6c221d4e5 src/share/vm/runtime/stubRoutines.hpp --- a/src/share/vm/runtime/stubRoutines.hpp Tue Jul 16 10:55:48 2013 -0400 +++ b/src/share/vm/runtime/stubRoutines.hpp Tue Jul 16 12:20:08 2013 -0400 @@ -1,5 +1,5 @@ /* - * Copyright (c) 1997, 2012, Oracle and/or its affiliates. All rights reserved. + * Copyright (c) 1997, 2013, Oracle and/or its affiliates. All rights reserved. * DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER. * * This code is free software; you can redistribute it and/or modify it @@ -204,6 +204,9 @@ static address _cipherBlockChaining_encryptAESCrypt; static address _cipherBlockChaining_decryptAESCrypt; + static address _updateBytesCRC32; + static address _crc_table_adr; + // These are versions of the java.lang.Math methods which perform // the same operations as the intrinsic version. They are used for // constant folding in the compiler to ensure equivalence. If the @@ -218,11 +221,21 @@ static double (*_intrinsic_cos)(double); static double (*_intrinsic_tan)(double); + // Safefetch stubs. + static address _safefetch32_entry; + static address _safefetch32_fault_pc; + static address _safefetch32_continuation_pc; + static address _safefetchN_entry; + static address _safefetchN_fault_pc; + static address _safefetchN_continuation_pc; + public: // Initialization/Testing static void initialize1(); // must happen before universe::genesis static void initialize2(); // must happen after universe::genesis + static bool is_stub_code(address addr) { return contains(addr); } + static bool contains(address addr) { return (_code1 != NULL && _code1->blob_contains(addr)) || @@ -340,6 +353,9 @@ static address cipherBlockChaining_encryptAESCrypt() { return _cipherBlockChaining_encryptAESCrypt; } static address cipherBlockChaining_decryptAESCrypt() { return _cipherBlockChaining_decryptAESCrypt; } + static address updateBytesCRC32() { return _updateBytesCRC32; } + static address crc_table_addr() { return _crc_table_adr; } + static address select_fill_function(BasicType t, bool aligned, const char* &name); static address zero_aligned_words() { return _zero_aligned_words; } @@ -374,6 +390,34 @@ } // + // Safefetch stub support + // + + typedef int (*SafeFetch32Stub)(int* adr, int errValue); + typedef intptr_t (*SafeFetchNStub) (intptr_t* adr, intptr_t errValue); + + static SafeFetch32Stub SafeFetch32_stub() { return CAST_TO_FN_PTR(SafeFetch32Stub, _safefetch32_entry); } + static SafeFetchNStub SafeFetchN_stub() { return CAST_TO_FN_PTR(SafeFetchNStub, _safefetchN_entry); } + + static bool is_safefetch_fault(address pc) { + return pc != NULL && + (pc == _safefetch32_fault_pc || + pc == _safefetchN_fault_pc); + } + + static address continuation_for_safefetch_fault(address pc) { + assert(_safefetch32_continuation_pc != NULL && + _safefetchN_continuation_pc != NULL, + "not initialized"); + + if (pc == _safefetch32_fault_pc) return _safefetch32_continuation_pc; + if (pc == _safefetchN_fault_pc) return _safefetchN_continuation_pc; + + ShouldNotReachHere(); + return NULL; + } + + // // Default versions of the above arraycopy functions for platforms which do // not have specialized versions // @@ -392,4 +436,15 @@ static void arrayof_oop_copy_uninit(HeapWord* src, HeapWord* dest, size_t count); }; +// Safefetch allows to load a value from a location that's not known +// to be valid. If the load causes a fault, the error value is returned. +inline int SafeFetch32(int* adr, int errValue) { + assert(StubRoutines::SafeFetch32_stub(), "stub not yet generated"); + return StubRoutines::SafeFetch32_stub()(adr, errValue); +} +inline intptr_t SafeFetchN(intptr_t* adr, intptr_t errValue) { + assert(StubRoutines::SafeFetchN_stub(), "stub not yet generated"); + return StubRoutines::SafeFetchN_stub()(adr, errValue); +} + #endif // SHARE_VM_RUNTIME_STUBROUTINES_HPP diff -r 16b10327b00d -r 90d6c221d4e5 src/share/vm/runtime/sweeper.cpp --- a/src/share/vm/runtime/sweeper.cpp Tue Jul 16 10:55:48 2013 -0400 +++ b/src/share/vm/runtime/sweeper.cpp Tue Jul 16 12:20:08 2013 -0400 @@ -1,5 +1,5 @@ /* - * Copyright (c) 1997, 2012, Oracle and/or its affiliates. All rights reserved. + * Copyright (c) 1997, 2013, Oracle and/or its affiliates. All rights reserved. * DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER. * * This code is free software; you can redistribute it and/or modify it @@ -36,6 +36,7 @@ #include "runtime/os.hpp" #include "runtime/sweeper.hpp" #include "runtime/vm_operations.hpp" +#include "trace/tracing.hpp" #include "utilities/events.hpp" #include "utilities/xmlstream.hpp" @@ -130,6 +131,9 @@ long NMethodSweeper::_traversals = 0; // No. of stack traversals performed nmethod* NMethodSweeper::_current = NULL; // Current nmethod int NMethodSweeper::_seen = 0 ; // No. of nmethods we have currently processed in current pass of CodeCache +int NMethodSweeper::_flushed_count = 0; // Nof. nmethods flushed in current sweep +int NMethodSweeper::_zombified_count = 0; // Nof. nmethods made zombie in current sweep +int NMethodSweeper::_marked_count = 0; // Nof. nmethods marked for reclaim in current sweep volatile int NMethodSweeper::_invocations = 0; // No. of invocations left until we are completed with this pass volatile int NMethodSweeper::_sweep_started = 0; // Whether a sweep is in progress. @@ -143,6 +147,15 @@ int NMethodSweeper::_dead_compile_ids = 0; long NMethodSweeper::_last_flush_traversal_id = 0; +int NMethodSweeper::_number_of_flushes = 0; // Total of full traversals caused by full cache +int NMethodSweeper::_total_nof_methods_reclaimed = 0; +jlong NMethodSweeper::_total_time_sweeping = 0; +jlong NMethodSweeper::_total_time_this_sweep = 0; +jlong NMethodSweeper::_peak_sweep_time = 0; +jlong NMethodSweeper::_peak_sweep_fraction_time = 0; +jlong NMethodSweeper::_total_disconnect_time = 0; +jlong NMethodSweeper::_peak_disconnect_time = 0; + class MarkActivationClosure: public CodeBlobClosure { public: virtual void do_code_blob(CodeBlob* cb) { @@ -176,6 +189,8 @@ _invocations = NmethodSweepFraction; _current = CodeCache::first_nmethod(); _traversals += 1; + _total_time_this_sweep = 0; + if (PrintMethodFlushing) { tty->print_cr("### Sweep: stack traversal %d", _traversals); } @@ -229,12 +244,13 @@ } void NMethodSweeper::sweep_code_cache() { -#ifdef ASSERT - jlong sweep_start; - if (PrintMethodFlushing) { - sweep_start = os::javaTimeMillis(); - } -#endif + + jlong sweep_start_counter = os::elapsed_counter(); + + _flushed_count = 0; + _zombified_count = 0; + _marked_count = 0; + if (PrintMethodFlushing && Verbose) { tty->print_cr("### Sweep at %d out of %d. Invocations left: %d", _seen, CodeCache::nof_nmethods(), _invocations); } @@ -302,14 +318,34 @@ } } + jlong sweep_end_counter = os::elapsed_counter(); + jlong sweep_time = sweep_end_counter - sweep_start_counter; + _total_time_sweeping += sweep_time; + _total_time_this_sweep += sweep_time; + _peak_sweep_fraction_time = MAX2(sweep_time, _peak_sweep_fraction_time); + _total_nof_methods_reclaimed += _flushed_count; + + EventSweepCodeCache event(UNTIMED); + if (event.should_commit()) { + event.set_starttime(sweep_start_counter); + event.set_endtime(sweep_end_counter); + event.set_sweepIndex(_traversals); + event.set_sweepFractionIndex(NmethodSweepFraction - _invocations + 1); + event.set_sweptCount(todo); + event.set_flushedCount(_flushed_count); + event.set_markedCount(_marked_count); + event.set_zombifiedCount(_zombified_count); + event.commit(); + } + #ifdef ASSERT if(PrintMethodFlushing) { - jlong sweep_end = os::javaTimeMillis(); - tty->print_cr("### sweeper: sweep time(%d): " INT64_FORMAT, _invocations, sweep_end - sweep_start); + tty->print_cr("### sweeper: sweep time(%d): " INT64_FORMAT, _invocations, (jlong)sweep_time); } #endif if (_invocations == 1) { + _peak_sweep_time = MAX2(_peak_sweep_time, _total_time_this_sweep); log_sweep("finished"); } @@ -388,12 +424,14 @@ tty->print_cr("### Nmethod %3d/" PTR_FORMAT " (marked for reclamation) being flushed", nm->compile_id(), nm); } release_nmethod(nm); + _flushed_count++; } else { if (PrintMethodFlushing && Verbose) { tty->print_cr("### Nmethod %3d/" PTR_FORMAT " (zombie) being marked for reclamation", nm->compile_id(), nm); } nm->mark_for_reclamation(); _resweep = true; + _marked_count++; SWEEP(nm); } } else if (nm->is_not_entrant()) { @@ -405,6 +443,7 @@ } nm->make_zombie(); _resweep = true; + _zombified_count++; SWEEP(nm); } else { // Still alive, clean up its inline caches @@ -420,13 +459,16 @@ // Unloaded code, just make it a zombie if (PrintMethodFlushing && Verbose) tty->print_cr("### Nmethod %3d/" PTR_FORMAT " (unloaded) being made zombie", nm->compile_id(), nm); + if (nm->is_osr_method()) { SWEEP(nm); // No inline caches will ever point to osr methods, so we can just remove it release_nmethod(nm); + _flushed_count++; } else { nm->make_zombie(); _resweep = true; + _zombified_count++; SWEEP(nm); } } else { @@ -484,7 +526,7 @@ // If there was a race in detecting full code cache, only run // one vm op for it or keep the compiler shut off - debug_only(jlong start = os::javaTimeMillis();) + jlong disconnect_start_counter = os::elapsed_counter(); // Traverse the code cache trying to dump the oldest nmethods int curr_max_comp_id = CompileBroker::get_compilation_id(); @@ -541,13 +583,28 @@ _last_full_flush_time = os::javaTimeMillis(); } + jlong disconnect_end_counter = os::elapsed_counter(); + jlong disconnect_time = disconnect_end_counter - disconnect_start_counter; + _total_disconnect_time += disconnect_time; + _peak_disconnect_time = MAX2(disconnect_time, _peak_disconnect_time); + + EventCleanCodeCache event(UNTIMED); + if (event.should_commit()) { + event.set_starttime(disconnect_start_counter); + event.set_endtime(disconnect_end_counter); + event.set_disconnectedCount(disconnected); + event.set_madeNonEntrantCount(made_not_entrant); + event.commit(); + } + _number_of_flushes++; + // After two more traversals the sweeper will get rid of unrestored nmethods _last_flush_traversal_id = _traversals; _resweep = true; #ifdef ASSERT - jlong end = os::javaTimeMillis(); + if(PrintMethodFlushing && Verbose) { - tty->print_cr("### sweeper: unload time: " INT64_FORMAT, end-start); + tty->print_cr("### sweeper: unload time: " INT64_FORMAT, (jlong)disconnect_time); } #endif } diff -r 16b10327b00d -r 90d6c221d4e5 src/share/vm/runtime/sweeper.hpp --- a/src/share/vm/runtime/sweeper.hpp Tue Jul 16 10:55:48 2013 -0400 +++ b/src/share/vm/runtime/sweeper.hpp Tue Jul 16 12:20:08 2013 -0400 @@ -1,5 +1,5 @@ /* - * Copyright (c) 1997, 2012, Oracle and/or its affiliates. All rights reserved. + * Copyright (c) 1997, 2013, Oracle and/or its affiliates. All rights reserved. * DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER. * * This code is free software; you can redistribute it and/or modify it @@ -31,9 +31,12 @@ // class NMethodSweeper : public AllStatic { - static long _traversals; // Stack traversal count - static nmethod* _current; // Current nmethod - static int _seen; // Nof. nmethod we have currently processed in current pass of CodeCache + static long _traversals; // Stack scan count, also sweep ID. + static nmethod* _current; // Current nmethod + static int _seen; // Nof. nmethod we have currently processed in current pass of CodeCache + static int _flushed_count; // Nof. nmethods flushed in current sweep + static int _zombified_count; // Nof. nmethods made zombie in current sweep + static int _marked_count; // Nof. nmethods marked for reclaim in current sweep static volatile int _invocations; // No. of invocations left until we are completed with this pass static volatile int _sweep_started; // Flag to control conc sweeper @@ -53,6 +56,16 @@ static int _highest_marked; // highest compile id dumped at last emergency unloading static int _dead_compile_ids; // number of compile ids that where not in the cache last flush + // Stat counters + static int _number_of_flushes; // Total of full traversals caused by full cache + static int _total_nof_methods_reclaimed; // Accumulated nof methods flushed + static jlong _total_time_sweeping; // Accumulated time sweeping + static jlong _total_time_this_sweep; // Total time this sweep + static jlong _peak_sweep_time; // Peak time for a full sweep + static jlong _peak_sweep_fraction_time; // Peak time sweeping one fraction + static jlong _total_disconnect_time; // Total time cleaning code mem + static jlong _peak_disconnect_time; // Peak time cleaning code mem + static void process_nmethod(nmethod *nm); static void release_nmethod(nmethod* nm); @@ -60,7 +73,14 @@ static bool sweep_in_progress(); public: - static long traversal_count() { return _traversals; } + static long traversal_count() { return _traversals; } + static int number_of_flushes() { return _number_of_flushes; } + static int total_nof_methods_reclaimed() { return _total_nof_methods_reclaimed; } + static jlong total_time_sweeping() { return _total_time_sweeping; } + static jlong peak_sweep_time() { return _peak_sweep_time; } + static jlong peak_sweep_fraction_time() { return _peak_sweep_fraction_time; } + static jlong total_disconnect_time() { return _total_disconnect_time; } + static jlong peak_disconnect_time() { return _peak_disconnect_time; } #ifdef ASSERT // Keep track of sweeper activity in the ring buffer diff -r 16b10327b00d -r 90d6c221d4e5 src/share/vm/runtime/synchronizer.cpp --- a/src/share/vm/runtime/synchronizer.cpp Tue Jul 16 10:55:48 2013 -0400 +++ b/src/share/vm/runtime/synchronizer.cpp Tue Jul 16 12:20:08 2013 -0400 @@ -213,7 +213,7 @@ } } - ObjectSynchronizer::inflate(THREAD, object)->exit (THREAD) ; + ObjectSynchronizer::inflate(THREAD, object)->exit (true, THREAD) ; } // ----------------------------------------------------------------------------- @@ -343,7 +343,7 @@ // If this thread has locked the object, exit the monitor. Note: can't use // monitor->check(CHECK); must exit even if an exception is pending. if (monitor->check(THREAD)) { - monitor->exit(THREAD); + monitor->exit(true, THREAD); } } diff -r 16b10327b00d -r 90d6c221d4e5 src/share/vm/runtime/task.cpp --- a/src/share/vm/runtime/task.cpp Tue Jul 16 10:55:48 2013 -0400 +++ b/src/share/vm/runtime/task.cpp Tue Jul 16 12:20:08 2013 -0400 @@ -1,5 +1,5 @@ /* - * Copyright (c) 1997, 2010, Oracle and/or its affiliates. All rights reserved. + * Copyright (c) 1997, 2013, Oracle and/or its affiliates. All rights reserved. * DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER. * * This code is free software; you can redistribute it and/or modify it @@ -114,9 +114,11 @@ disenroll(); } +/* enroll could be called from a JavaThread, so we have to check for + * safepoint when taking the lock to avoid deadlocking */ void PeriodicTask::enroll() { MutexLockerEx ml(PeriodicTask_lock->owned_by_self() ? - NULL : PeriodicTask_lock, Mutex::_no_safepoint_check_flag); + NULL : PeriodicTask_lock); if (_num_tasks == PeriodicTask::max_tasks) { fatal("Overflow in PeriodicTask table"); @@ -131,9 +133,11 @@ } } +/* disenroll could be called from a JavaThread, so we have to check for + * safepoint when taking the lock to avoid deadlocking */ void PeriodicTask::disenroll() { MutexLockerEx ml(PeriodicTask_lock->owned_by_self() ? - NULL : PeriodicTask_lock, Mutex::_no_safepoint_check_flag); + NULL : PeriodicTask_lock); int index; for(index = 0; index < _num_tasks && _tasks[index] != this; index++) diff -r 16b10327b00d -r 90d6c221d4e5 src/share/vm/runtime/thread.cpp --- a/src/share/vm/runtime/thread.cpp Tue Jul 16 10:55:48 2013 -0400 +++ b/src/share/vm/runtime/thread.cpp Tue Jul 16 12:20:08 2013 -0400 @@ -45,7 +45,6 @@ #include "prims/jvmtiExport.hpp" #include "prims/jvmtiThreadState.hpp" #include "prims/privilegedStack.hpp" -#include "runtime/aprofiler.hpp" #include "runtime/arguments.hpp" #include "runtime/biasedLocking.hpp" #include "runtime/deoptimization.hpp" @@ -77,7 +76,8 @@ #include "services/management.hpp" #include "services/memTracker.hpp" #include "services/threadService.hpp" -#include "trace/traceEventTypes.hpp" +#include "trace/tracing.hpp" +#include "trace/traceMacros.hpp" #include "utilities/defaultStream.hpp" #include "utilities/dtrace.hpp" #include "utilities/events.hpp" @@ -219,7 +219,7 @@ set_osthread(NULL); set_resource_area(new (mtThread)ResourceArea()); set_handle_area(new (mtThread) HandleArea(NULL)); - set_metadata_handles(new (ResourceObj::C_HEAP, mtClass) GrowableArray(300, true)); + set_metadata_handles(new (ResourceObj::C_HEAP, mtClass) GrowableArray(30, true)); set_active_handles(NULL); set_free_handle_block(NULL); set_last_handle_mark(NULL); @@ -238,7 +238,6 @@ CHECK_UNHANDLED_OOPS_ONLY(_gc_locked_out_count = 0;) _jvmti_env_iteration_count = 0; set_allocated_bytes(0); - set_trace_buffer(NULL); _vm_operation_started_count = 0; _vm_operation_completed_count = 0; _current_pending_monitor = NULL; @@ -1659,9 +1658,11 @@ JvmtiExport::post_thread_start(this); } - EVENT_BEGIN(TraceEventThreadStart, event); - EVENT_COMMIT(event, - EVENT_SET(event, javalangthread, java_lang_Thread::thread_id(this->threadObj()))); + EventThreadStart event; + if (event.should_commit()) { + event.set_javalangthread(java_lang_Thread::thread_id(this->threadObj())); + event.commit(); + } // We call another function to do the rest so we are sure that the stack addresses used // from there will be lower than the stack base just computed @@ -1791,9 +1792,11 @@ // Called before the java thread exit since we want to read info // from java_lang_Thread object - EVENT_BEGIN(TraceEventThreadEnd, event); - EVENT_COMMIT(event, - EVENT_SET(event, javalangthread, java_lang_Thread::thread_id(this->threadObj()))); + EventThreadEnd event; + if (event.should_commit()) { + event.set_javalangthread(java_lang_Thread::thread_id(this->threadObj())); + event.commit(); + } // Call after last event on thread EVENT_THREAD_EXIT(this); @@ -3648,8 +3651,8 @@ // Notify JVMTI agents that VM initialization is complete - nop if no agents. JvmtiExport::post_vm_initialized(); - if (!TRACE_START()) { - vm_exit_during_initialization(Handle(THREAD, PENDING_EXCEPTION)); + if (TRACE_START() != JNI_OK) { + vm_exit_during_initialization("Failed to start tracing backend."); } if (CleanChunkPoolAsync) { @@ -3673,7 +3676,6 @@ } if (Arguments::has_profile()) FlatProfiler::engage(main_thread, true); - if (Arguments::has_alloc_profile()) AllocationProfiler::engage(); if (MemProfiling) MemProfiler::engage(); StatSampler::engage(); if (CheckJNICalls) JniPeriodicChecker::engage(); diff -r 16b10327b00d -r 90d6c221d4e5 src/share/vm/runtime/thread.hpp --- a/src/share/vm/runtime/thread.hpp Tue Jul 16 10:55:48 2013 -0400 +++ b/src/share/vm/runtime/thread.hpp Tue Jul 16 12:20:08 2013 -0400 @@ -47,7 +47,8 @@ #include "services/memRecorder.hpp" #endif // INCLUDE_NMT -#include "trace/tracing.hpp" +#include "trace/traceBackend.hpp" +#include "trace/traceMacros.hpp" #include "utilities/exceptions.hpp" #include "utilities/top.hpp" #if INCLUDE_ALL_GCS @@ -258,7 +259,7 @@ jlong _allocated_bytes; // Cumulative number of bytes allocated on // the Java heap - TRACE_BUFFER _trace_buffer; // Thread-local buffer for tracing + TRACE_DATA _trace_data; // Thread-local data for tracing int _vm_operation_started_count; // VM_Operation support int _vm_operation_completed_count; // VM_Operation support @@ -449,8 +450,7 @@ return allocated_bytes; } - TRACE_BUFFER trace_buffer() { return _trace_buffer; } - void set_trace_buffer(TRACE_BUFFER buf) { _trace_buffer = buf; } + TRACE_DATA* trace_data() { return &_trace_data; } // VM operation support int vm_operation_ticket() { return ++_vm_operation_started_count; } diff -r 16b10327b00d -r 90d6c221d4e5 src/share/vm/runtime/timer.cpp --- a/src/share/vm/runtime/timer.cpp Tue Jul 16 10:55:48 2013 -0400 +++ b/src/share/vm/runtime/timer.cpp Tue Jul 16 12:20:08 2013 -0400 @@ -1,5 +1,5 @@ /* - * Copyright (c) 1997, 2012, Oracle and/or its affiliates. All rights reserved. + * Copyright (c) 1997, 2013, Oracle and/or its affiliates. All rights reserved. * DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER. * * This code is free software; you can redistribute it and/or modify it @@ -39,6 +39,11 @@ # include "os_bsd.inline.hpp" #endif +double TimeHelper::counter_to_seconds(jlong counter) { + double count = (double) counter; + double freq = (double) os::elapsed_frequency(); + return counter/freq; +} void elapsedTimer::add(elapsedTimer t) { _counter += t._counter; @@ -59,9 +64,7 @@ } double elapsedTimer::seconds() const { - double count = (double) _counter; - double freq = (double) os::elapsed_frequency(); - return count/freq; + return TimeHelper::counter_to_seconds(_counter); } jlong elapsedTimer::milliseconds() const { @@ -90,9 +93,7 @@ double TimeStamp::seconds() const { assert(is_updated(), "must not be clear"); jlong new_count = os::elapsed_counter(); - double count = (double) new_count - _counter; - double freq = (double) os::elapsed_frequency(); - return count/freq; + return TimeHelper::counter_to_seconds(new_count - _counter); } jlong TimeStamp::milliseconds() const { @@ -110,19 +111,15 @@ } TraceTime::TraceTime(const char* title, - bool doit, - bool print_cr, - outputStream* logfile) { + bool doit) { _active = doit; _verbose = true; - _print_cr = print_cr; - _logfile = (logfile != NULL) ? logfile : tty; if (_active) { _accum = NULL; - _logfile->stamp(PrintGCTimeStamps); - _logfile->print("[%s", title); - _logfile->flush(); + tty->stamp(PrintGCTimeStamps); + tty->print("[%s", title); + tty->flush(); _t.start(); } } @@ -130,17 +127,14 @@ TraceTime::TraceTime(const char* title, elapsedTimer* accumulator, bool doit, - bool verbose, - outputStream* logfile) { + bool verbose) { _active = doit; _verbose = verbose; - _print_cr = true; - _logfile = (logfile != NULL) ? logfile : tty; if (_active) { if (_verbose) { - _logfile->stamp(PrintGCTimeStamps); - _logfile->print("[%s", title); - _logfile->flush(); + tty->stamp(PrintGCTimeStamps); + tty->print("[%s", title); + tty->flush(); } _accum = accumulator; _t.start(); @@ -152,12 +146,8 @@ _t.stop(); if (_accum!=NULL) _accum->add(_t); if (_verbose) { - if (_print_cr) { - _logfile->print_cr(", %3.7f secs]", _t.seconds()); - } else { - _logfile->print(", %3.7f secs]", _t.seconds()); - } - _logfile->flush(); + tty->print_cr(", %3.7f secs]", _t.seconds()); + tty->flush(); } } } diff -r 16b10327b00d -r 90d6c221d4e5 src/share/vm/runtime/timer.hpp --- a/src/share/vm/runtime/timer.hpp Tue Jul 16 10:55:48 2013 -0400 +++ b/src/share/vm/runtime/timer.hpp Tue Jul 16 12:20:08 2013 -0400 @@ -1,5 +1,5 @@ /* - * Copyright (c) 1997, 2010, Oracle and/or its affiliates. All rights reserved. + * Copyright (c) 1997, 2013, Oracle and/or its affiliates. All rights reserved. * DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER. * * This code is free software; you can redistribute it and/or modify it @@ -82,21 +82,16 @@ private: bool _active; // do timing bool _verbose; // report every timing - bool _print_cr; // add a CR to the end of the timer report elapsedTimer _t; // timer elapsedTimer* _accum; // accumulator - outputStream* _logfile; // output log file public: - // Constuctors + // Constructors TraceTime(const char* title, - bool doit = true, - bool print_cr = true, - outputStream *logfile = NULL); + bool doit = true); TraceTime(const char* title, elapsedTimer* accumulator, bool doit = true, - bool verbose = false, - outputStream *logfile = NULL ); + bool verbose = false); ~TraceTime(); // Accessors @@ -125,4 +120,9 @@ ~TraceCPUTime(); }; +class TimeHelper { + public: + static double counter_to_seconds(jlong counter); +}; + #endif // SHARE_VM_RUNTIME_TIMER_HPP diff -r 16b10327b00d -r 90d6c221d4e5 src/share/vm/runtime/virtualspace.cpp --- a/src/share/vm/runtime/virtualspace.cpp Tue Jul 16 10:55:48 2013 -0400 +++ b/src/share/vm/runtime/virtualspace.cpp Tue Jul 16 12:20:08 2013 -0400 @@ -533,11 +533,13 @@ lower_high() + lower_needs <= lower_high_boundary(), "must not expand beyond region"); if (!os::commit_memory(lower_high(), lower_needs, _executable)) { - debug_only(warning("os::commit_memory failed")); + debug_only(warning("INFO: os::commit_memory(" PTR_FORMAT + ", lower_needs=" SIZE_FORMAT ", %d) failed", + lower_high(), lower_needs, _executable);) return false; } else { _lower_high += lower_needs; - } + } } if (middle_needs > 0) { assert(lower_high_boundary() <= middle_high() && @@ -545,7 +547,10 @@ "must not expand beyond region"); if (!os::commit_memory(middle_high(), middle_needs, middle_alignment(), _executable)) { - debug_only(warning("os::commit_memory failed")); + debug_only(warning("INFO: os::commit_memory(" PTR_FORMAT + ", middle_needs=" SIZE_FORMAT ", " SIZE_FORMAT + ", %d) failed", middle_high(), middle_needs, + middle_alignment(), _executable);) return false; } _middle_high += middle_needs; @@ -555,7 +560,9 @@ upper_high() + upper_needs <= upper_high_boundary(), "must not expand beyond region"); if (!os::commit_memory(upper_high(), upper_needs, _executable)) { - debug_only(warning("os::commit_memory failed")); + debug_only(warning("INFO: os::commit_memory(" PTR_FORMAT + ", upper_needs=" SIZE_FORMAT ", %d) failed", + upper_high(), upper_needs, _executable);) return false; } else { _upper_high += upper_needs; diff -r 16b10327b00d -r 90d6c221d4e5 src/share/vm/runtime/vmStructs.cpp --- a/src/share/vm/runtime/vmStructs.cpp Tue Jul 16 10:55:48 2013 -0400 +++ b/src/share/vm/runtime/vmStructs.cpp Tue Jul 16 12:20:08 2013 -0400 @@ -60,6 +60,7 @@ #include "memory/generationSpec.hpp" #include "memory/heap.hpp" #include "memory/metablock.hpp" +#include "memory/referenceType.hpp" #include "memory/space.hpp" #include "memory/tenuredGeneration.hpp" #include "memory/universe.hpp" @@ -262,7 +263,7 @@ unchecked_c2_static_field) \ \ /******************************************************************/ \ - /* OopDesc and Klass hierarchies (NOTE: MethodData* incomplete) */ \ + /* OopDesc and Klass hierarchies (NOTE: MethodData* incomplete) */ \ /******************************************************************/ \ \ volatile_nonstatic_field(oopDesc, _mark, markOop) \ @@ -273,21 +274,20 @@ volatile_nonstatic_field(ArrayKlass, _higher_dimension, Klass*) \ volatile_nonstatic_field(ArrayKlass, _lower_dimension, Klass*) \ nonstatic_field(ArrayKlass, _vtable_len, int) \ - nonstatic_field(ArrayKlass, _alloc_size, juint) \ nonstatic_field(ArrayKlass, _component_mirror, oop) \ - nonstatic_field(CompiledICHolder, _holder_method, Method*) \ + nonstatic_field(CompiledICHolder, _holder_method, Method*) \ nonstatic_field(CompiledICHolder, _holder_klass, Klass*) \ nonstatic_field(ConstantPool, _tags, Array*) \ - nonstatic_field(ConstantPool, _cache, ConstantPoolCache*) \ + nonstatic_field(ConstantPool, _cache, ConstantPoolCache*) \ nonstatic_field(ConstantPool, _pool_holder, InstanceKlass*) \ nonstatic_field(ConstantPool, _operands, Array*) \ nonstatic_field(ConstantPool, _length, int) \ nonstatic_field(ConstantPool, _resolved_references, jobject) \ nonstatic_field(ConstantPool, _reference_map, Array*) \ nonstatic_field(ConstantPoolCache, _length, int) \ - nonstatic_field(ConstantPoolCache, _constant_pool, ConstantPool*) \ + nonstatic_field(ConstantPoolCache, _constant_pool, ConstantPool*) \ nonstatic_field(InstanceKlass, _array_klasses, Klass*) \ - nonstatic_field(InstanceKlass, _methods, Array*) \ + nonstatic_field(InstanceKlass, _methods, Array*) \ nonstatic_field(InstanceKlass, _local_interfaces, Array*) \ nonstatic_field(InstanceKlass, _transitive_interfaces, Array*) \ nonstatic_field(InstanceKlass, _fields, Array*) \ @@ -335,9 +335,8 @@ nonstatic_field(Klass, _access_flags, AccessFlags) \ nonstatic_field(Klass, _subklass, Klass*) \ nonstatic_field(Klass, _next_sibling, Klass*) \ - nonstatic_field(Klass, _alloc_count, juint) \ nonstatic_field(MethodData, _size, int) \ - nonstatic_field(MethodData, _method, Method*) \ + nonstatic_field(MethodData, _method, Method*) \ nonstatic_field(MethodData, _data_size, int) \ nonstatic_field(MethodData, _data[0], intptr_t) \ nonstatic_field(MethodData, _nof_decompiles, uint) \ @@ -378,7 +377,7 @@ nonstatic_field(ConstMethod, _size_of_parameters, u2) \ nonstatic_field(ObjArrayKlass, _element_klass, Klass*) \ nonstatic_field(ObjArrayKlass, _bottom_klass, Klass*) \ - volatile_nonstatic_field(Symbol, _refcount, int) \ + volatile_nonstatic_field(Symbol, _refcount, short) \ nonstatic_field(Symbol, _identity_hash, int) \ nonstatic_field(Symbol, _length, unsigned short) \ unchecked_nonstatic_field(Symbol, _body, sizeof(jbyte)) /* NOTE: no type */ \ @@ -436,10 +435,6 @@ static_field(Universe, _main_thread_group, oop) \ static_field(Universe, _system_thread_group, oop) \ static_field(Universe, _the_empty_class_klass_array, objArrayOop) \ - static_field(Universe, _out_of_memory_error_java_heap, oop) \ - static_field(Universe, _out_of_memory_error_perm_gen, oop) \ - static_field(Universe, _out_of_memory_error_array_size, oop) \ - static_field(Universe, _out_of_memory_error_gc_overhead_limit, oop) \ static_field(Universe, _null_ptr_exception_instance, oop) \ static_field(Universe, _arithmetic_exception_instance, oop) \ static_field(Universe, _vm_exception, oop) \ diff -r 16b10327b00d -r 90d6c221d4e5 src/share/vm/runtime/vmThread.cpp --- a/src/share/vm/runtime/vmThread.cpp Tue Jul 16 10:55:48 2013 -0400 +++ b/src/share/vm/runtime/vmThread.cpp Tue Jul 16 12:20:08 2013 -0400 @@ -35,6 +35,7 @@ #include "runtime/vmThread.hpp" #include "runtime/vm_operations.hpp" #include "services/runtimeService.hpp" +#include "trace/tracing.hpp" #include "utilities/dtrace.hpp" #include "utilities/events.hpp" #include "utilities/xmlstream.hpp" @@ -365,7 +366,23 @@ (char *) op->name(), strlen(op->name()), op->evaluation_mode()); #endif /* USDT2 */ + + EventExecuteVMOperation event; + op->evaluate(); + + if (event.should_commit()) { + bool is_concurrent = op->evaluate_concurrently(); + event.set_operation(op->type()); + event.set_safepoint(op->evaluate_at_safepoint()); + event.set_blocking(!is_concurrent); + // Only write caller thread information for non-concurrent vm operations. + // For concurrent vm operations, the thread id is set to 0 indicating thread is unknown. + // This is because the caller thread could have exited already. + event.set_caller(is_concurrent ? 0 : op->calling_thread()->osthread()->thread_id()); + event.commit(); + } + #ifndef USDT2 HS_DTRACE_PROBE3(hotspot, vmops__end, op->name(), strlen(op->name()), op->evaluation_mode()); @@ -601,7 +618,7 @@ { VMOperationQueue_lock->lock_without_safepoint_check(); bool ok = _vm_queue->add(op); - op->set_timestamp(os::javaTimeMillis()); + op->set_timestamp(os::javaTimeMillis()); VMOperationQueue_lock->notify(); VMOperationQueue_lock->unlock(); // VM_Operation got skipped diff -r 16b10327b00d -r 90d6c221d4e5 src/share/vm/runtime/vm_operations.cpp --- a/src/share/vm/runtime/vm_operations.cpp Tue Jul 16 10:55:48 2013 -0400 +++ b/src/share/vm/runtime/vm_operations.cpp Tue Jul 16 12:20:08 2013 -0400 @@ -1,5 +1,5 @@ /* - * Copyright (c) 1997, 2011, Oracle and/or its affiliates. All rights reserved. + * Copyright (c) 1997, 2013, Oracle and/or its affiliates. All rights reserved. * DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER. * * This code is free software; you can redistribute it and/or modify it @@ -37,6 +37,7 @@ #include "runtime/thread.inline.hpp" #include "runtime/vm_operations.hpp" #include "services/threadService.hpp" +#include "trace/tracing.hpp" #define VM_OP_NAME_INITIALIZE(name) #name, @@ -62,19 +63,21 @@ } } +const char* VM_Operation::mode_to_string(Mode mode) { + switch(mode) { + case _safepoint : return "safepoint"; + case _no_safepoint : return "no safepoint"; + case _concurrent : return "concurrent"; + case _async_safepoint: return "async safepoint"; + default : return "unknown"; + } +} // Called by fatal error handler. void VM_Operation::print_on_error(outputStream* st) const { st->print("VM_Operation (" PTR_FORMAT "): ", this); st->print("%s", name()); - const char* mode; - switch(evaluation_mode()) { - case _safepoint : mode = "safepoint"; break; - case _no_safepoint : mode = "no safepoint"; break; - case _concurrent : mode = "concurrent"; break; - case _async_safepoint: mode = "async safepoint"; break; - default : mode = "unknown"; break; - } + const char* mode = mode_to_string(evaluation_mode()); st->print(", mode: %s", mode); if (calling_thread()) { diff -r 16b10327b00d -r 90d6c221d4e5 src/share/vm/runtime/vm_operations.hpp --- a/src/share/vm/runtime/vm_operations.hpp Tue Jul 16 10:55:48 2013 -0400 +++ b/src/share/vm/runtime/vm_operations.hpp Tue Jul 16 12:20:08 2013 -0400 @@ -1,5 +1,5 @@ /* - * Copyright (c) 1997, 2012, Oracle and/or its affiliates. All rights reserved. + * Copyright (c) 1997, 2013, Oracle and/or its affiliates. All rights reserved. * DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER. * * This code is free software; you can redistribute it and/or modify it @@ -178,6 +178,8 @@ evaluation_mode() == _async_safepoint; } + static const char* mode_to_string(Mode mode); + // Debugging void print_on_error(outputStream* st) const; const char* name() const { return _names[type()]; } diff -r 16b10327b00d -r 90d6c221d4e5 src/share/vm/services/attachListener.cpp --- a/src/share/vm/services/attachListener.cpp Tue Jul 16 10:55:48 2013 -0400 +++ b/src/share/vm/services/attachListener.cpp Tue Jul 16 12:20:08 2013 -0400 @@ -227,7 +227,7 @@ } live_objects_only = strcmp(arg0, "-live") == 0; } - VM_GC_HeapInspection heapop(out, live_objects_only /* request full gc */, true /* need_prologue */); + VM_GC_HeapInspection heapop(out, live_objects_only /* request full gc */); VMThread::execute(&heapop); return JNI_OK; } diff -r 16b10327b00d -r 90d6c221d4e5 src/share/vm/services/diagnosticArgument.cpp --- a/src/share/vm/services/diagnosticArgument.cpp Tue Jul 16 10:55:48 2013 -0400 +++ b/src/share/vm/services/diagnosticArgument.cpp Tue Jul 16 12:20:08 2013 -0400 @@ -24,6 +24,7 @@ #include "precompiled.hpp" #include "memory/allocation.inline.hpp" +#include "memory/resourceArea.hpp" #include "runtime/thread.hpp" #include "services/diagnosticArgument.hpp" @@ -86,9 +87,18 @@ template <> void DCmdArgument::parse_value(const char* str, size_t len, TRAPS) { - if (str == NULL || sscanf(str, JLONG_FORMAT, &_value) != 1) { - THROW_MSG(vmSymbols::java_lang_IllegalArgumentException(), - "Integer parsing error in diagnostic command arguments\n"); + int scanned = -1; + if (str == NULL + || sscanf(str, JLONG_FORMAT"%n", &_value, &scanned) != 1 + || (size_t)scanned != len) + { + ResourceMark rm; + + char* buf = NEW_RESOURCE_ARRAY(char, len + 1); + strncpy(buf, str, len); + buf[len] = '\0'; + Exceptions::fthrow(THREAD_AND_LOCATION, vmSymbols::java_lang_IllegalArgumentException(), + "Integer parsing error in command argument '%s'. Could not parse: %s.", _name, buf); } } @@ -96,7 +106,7 @@ if (has_default()) { this->parse_value(_default_string, strlen(_default_string), THREAD); if (HAS_PENDING_EXCEPTION) { - fatal("Default string must be parsable"); + fatal("Default string must be parseable"); } } else { set_value(0); @@ -116,8 +126,13 @@ } else if (len == strlen("false") && strncasecmp(str, "false", len) == 0) { set_value(false); } else { - THROW_MSG(vmSymbols::java_lang_IllegalArgumentException(), - "Boolean parsing error in diagnostic command arguments"); + ResourceMark rm; + + char* buf = NEW_RESOURCE_ARRAY(char, len + 1); + strncpy(buf, str, len); + buf[len] = '\0'; + Exceptions::fthrow(THREAD_AND_LOCATION, vmSymbols::java_lang_IllegalArgumentException(), + "Boolean parsing error in command argument '%s'. Could not parse: %s.", _name, buf); } } } @@ -168,7 +183,7 @@ size_t len, TRAPS) { if (str == NULL) { THROW_MSG(vmSymbols::java_lang_IllegalArgumentException(), - "Integer parsing error nanotime value: syntax error"); + "Integer parsing error nanotime value: syntax error, value is null"); } int argc = sscanf(str, JLONG_FORMAT, &_value._time); @@ -232,7 +247,7 @@ } else { _value._time = 0; _value._nanotime = 0; - strcmp(_value._unit, "ns"); + strcpy(_value._unit, "ns"); } } diff -r 16b10327b00d -r 90d6c221d4e5 src/share/vm/services/diagnosticCommand.cpp --- a/src/share/vm/services/diagnosticCommand.cpp Tue Jul 16 10:55:48 2013 -0400 +++ b/src/share/vm/services/diagnosticCommand.cpp Tue Jul 16 12:20:08 2013 -0400 @@ -320,8 +320,7 @@ void ClassHistogramDCmd::execute(DCmdSource source, TRAPS) { VM_GC_HeapInspection heapop(output(), - !_all.value() /* request full gc if false */, - true /* need_prologue */); + !_all.value() /* request full gc if false */); VMThread::execute(&heapop); } @@ -361,8 +360,7 @@ } VM_GC_HeapInspection heapop(output(), - true, /* request_full_gc */ - true /* need_prologue */); + true /* request_full_gc */); heapop.set_csv_format(_csv.value()); heapop.set_print_help(_help.value()); heapop.set_print_class_stats(true); diff -r 16b10327b00d -r 90d6c221d4e5 src/share/vm/services/management.cpp --- a/src/share/vm/services/management.cpp Tue Jul 16 10:55:48 2013 -0400 +++ b/src/share/vm/services/management.cpp Tue Jul 16 12:20:08 2013 -0400 @@ -894,12 +894,6 @@ } } - // In our current implementation, we make sure that all non-heap - // pools have defined init and max sizes. Heap pools do not matter, - // as we never use total_init and total_max for them. - assert(heap || !has_undefined_init_size, "Undefined init size"); - assert(heap || !has_undefined_max_size, "Undefined max size"); - MemoryUsage usage((heap ? InitialHeapSize : total_init), total_used, total_committed, diff -r 16b10327b00d -r 90d6c221d4e5 src/share/vm/services/memBaseline.cpp --- a/src/share/vm/services/memBaseline.cpp Tue Jul 16 10:55:48 2013 -0400 +++ b/src/share/vm/services/memBaseline.cpp Tue Jul 16 12:20:08 2013 -0400 @@ -41,6 +41,7 @@ {mtOther, "Other"}, {mtSymbol, "Symbol"}, {mtNMT, "Memory Tracking"}, + {mtTracing, "Tracing"}, {mtChunk, "Pooled Free Chunks"}, {mtClassShared,"Shared spaces for classes"}, {mtTest, "Test"}, @@ -129,7 +130,7 @@ if (malloc_ptr->is_arena_record()) { // see if arena memory record present MemPointerRecord* next_malloc_ptr = (MemPointerRecordEx*)malloc_itr.peek_next(); - if (next_malloc_ptr->is_arena_memory_record()) { + if (next_malloc_ptr != NULL && next_malloc_ptr->is_arena_memory_record()) { assert(next_malloc_ptr->is_memory_record_of_arena(malloc_ptr), "Arena records do not match"); size = next_malloc_ptr->size(); @@ -485,7 +486,7 @@ const MemPointerRecord* mp1 = (const MemPointerRecord*)p1; const MemPointerRecord* mp2 = (const MemPointerRecord*)p2; int delta = UNSIGNED_COMPARE(mp1->addr(), mp2->addr()); - assert(delta != 0, "dup pointer"); + assert(p1 == p2 || delta != 0, "dup pointer"); return delta; } diff -r 16b10327b00d -r 90d6c221d4e5 src/share/vm/services/memPtr.hpp --- a/src/share/vm/services/memPtr.hpp Tue Jul 16 10:55:48 2013 -0400 +++ b/src/share/vm/services/memPtr.hpp Tue Jul 16 12:20:08 2013 -0400 @@ -1,5 +1,5 @@ /* - * Copyright (c) 2012, Oracle and/or its affiliates. All rights reserved. + * Copyright (c) 2012, 2013, Oracle and/or its affiliates. All rights reserved. * DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER. * * This code is free software; you can redistribute it and/or modify it @@ -457,9 +457,8 @@ public: SeqMemPointerRecord(): _seq(0){ } - SeqMemPointerRecord(address addr, MEMFLAGS flags, size_t size) - : MemPointerRecord(addr, flags, size) { - _seq = SequenceGenerator::next(); + SeqMemPointerRecord(address addr, MEMFLAGS flags, size_t size, jint seq) + : MemPointerRecord(addr, flags, size), _seq(seq) { } SeqMemPointerRecord(const SeqMemPointerRecord& copy_from) @@ -488,8 +487,8 @@ SeqMemPointerRecordEx(): _seq(0) { } SeqMemPointerRecordEx(address addr, MEMFLAGS flags, size_t size, - address pc): MemPointerRecordEx(addr, flags, size, pc) { - _seq = SequenceGenerator::next(); + jint seq, address pc): + MemPointerRecordEx(addr, flags, size, pc), _seq(seq) { } SeqMemPointerRecordEx(const SeqMemPointerRecordEx& copy_from) diff -r 16b10327b00d -r 90d6c221d4e5 src/share/vm/services/memRecorder.cpp --- a/src/share/vm/services/memRecorder.cpp Tue Jul 16 10:55:48 2013 -0400 +++ b/src/share/vm/services/memRecorder.cpp Tue Jul 16 12:20:08 2013 -0400 @@ -1,5 +1,5 @@ /* - * Copyright (c) 2012, Oracle and/or its affiliates. All rights reserved. + * Copyright (c) 2012, 2013, Oracle and/or its affiliates. All rights reserved. * DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER. * * This code is free software; you can redistribute it and/or modify it @@ -69,10 +69,11 @@ if (_pointer_records != NULL) { // recode itself + address pc = CURRENT_PC; record((address)this, (MemPointerRecord::malloc_tag()|mtNMT|otNMTRecorder), - sizeof(MemRecorder), CALLER_PC); + sizeof(MemRecorder), SequenceGenerator::next(), pc); record((address)_pointer_records, (MemPointerRecord::malloc_tag()|mtNMT|otNMTRecorder), - _pointer_records->instance_size(),CURRENT_PC); + _pointer_records->instance_size(), SequenceGenerator::next(), pc); } } @@ -116,7 +117,8 @@ } } -bool MemRecorder::record(address p, MEMFLAGS flags, size_t size, address pc) { +bool MemRecorder::record(address p, MEMFLAGS flags, size_t size, jint seq, address pc) { + assert(seq > 0, "No sequence number"); #ifdef ASSERT if (MemPointerRecord::is_virtual_memory_record(flags)) { assert((flags & MemPointerRecord::tag_masks) != 0, "bad virtual memory record"); @@ -133,11 +135,11 @@ #endif if (MemTracker::track_callsite()) { - SeqMemPointerRecordEx ap(p, flags, size, pc); + SeqMemPointerRecordEx ap(p, flags, size, seq, pc); debug_only(check_dup_seq(ap.seq());) return _pointer_records->append(&ap); } else { - SeqMemPointerRecord ap(p, flags, size); + SeqMemPointerRecord ap(p, flags, size, seq); debug_only(check_dup_seq(ap.seq());) return _pointer_records->append(&ap); } diff -r 16b10327b00d -r 90d6c221d4e5 src/share/vm/services/memRecorder.hpp --- a/src/share/vm/services/memRecorder.hpp Tue Jul 16 10:55:48 2013 -0400 +++ b/src/share/vm/services/memRecorder.hpp Tue Jul 16 12:20:08 2013 -0400 @@ -1,5 +1,5 @@ /* - * Copyright (c) 2012, Oracle and/or its affiliates. All rights reserved. + * Copyright (c) 2012, 2013, Oracle and/or its affiliates. All rights reserved. * DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER. * * This code is free software; you can redistribute it and/or modify it @@ -220,7 +220,7 @@ ~MemRecorder(); // record a memory operation - bool record(address addr, MEMFLAGS flags, size_t size, address caller_pc = 0); + bool record(address addr, MEMFLAGS flags, size_t size, jint seq, address caller_pc = 0); // linked list support inline void set_next(MemRecorder* rec) { diff -r 16b10327b00d -r 90d6c221d4e5 src/share/vm/services/memReporter.cpp --- a/src/share/vm/services/memReporter.cpp Tue Jul 16 10:55:48 2013 -0400 +++ b/src/share/vm/services/memReporter.cpp Tue Jul 16 12:20:08 2013 -0400 @@ -188,30 +188,51 @@ (MallocCallsitePointer*)prev_malloc_itr.current(); while (cur_malloc_callsite != NULL || prev_malloc_callsite != NULL) { - if (prev_malloc_callsite == NULL || - cur_malloc_callsite->addr() < prev_malloc_callsite->addr()) { + if (prev_malloc_callsite == NULL) { + assert(cur_malloc_callsite != NULL, "sanity check"); + // this is a new callsite _outputer.diff_malloc_callsite(cur_malloc_callsite->addr(), amount_in_current_scale(cur_malloc_callsite->amount()), cur_malloc_callsite->count(), diff_in_current_scale(cur_malloc_callsite->amount(), 0), diff(cur_malloc_callsite->count(), 0)); cur_malloc_callsite = (MallocCallsitePointer*)cur_malloc_itr.next(); - } else if (prev_malloc_callsite == NULL || - cur_malloc_callsite->addr() > prev_malloc_callsite->addr()) { - _outputer.diff_malloc_callsite(cur_malloc_callsite->addr(), - amount_in_current_scale(prev_malloc_callsite->amount()), - prev_malloc_callsite->count(), + } else if (cur_malloc_callsite == NULL) { + assert(prev_malloc_callsite != NULL, "Sanity check"); + // this callsite is already gone + _outputer.diff_malloc_callsite(prev_malloc_callsite->addr(), + 0, 0, diff_in_current_scale(0, prev_malloc_callsite->amount()), diff(0, prev_malloc_callsite->count())); prev_malloc_callsite = (MallocCallsitePointer*)prev_malloc_itr.next(); - } else { // the same callsite - _outputer.diff_malloc_callsite(cur_malloc_callsite->addr(), - amount_in_current_scale(cur_malloc_callsite->amount()), - cur_malloc_callsite->count(), - diff_in_current_scale(cur_malloc_callsite->amount(), prev_malloc_callsite->amount()), - diff(cur_malloc_callsite->count(), prev_malloc_callsite->count())); - cur_malloc_callsite = (MallocCallsitePointer*)cur_malloc_itr.next(); - prev_malloc_callsite = (MallocCallsitePointer*)prev_malloc_itr.next(); + } else { + assert(cur_malloc_callsite != NULL, "Sanity check"); + assert(prev_malloc_callsite != NULL, "Sanity check"); + if (cur_malloc_callsite->addr() < prev_malloc_callsite->addr()) { + // this is a new callsite + _outputer.diff_malloc_callsite(cur_malloc_callsite->addr(), + amount_in_current_scale(cur_malloc_callsite->amount()), + cur_malloc_callsite->count(), + diff_in_current_scale(cur_malloc_callsite->amount(), 0), + diff(cur_malloc_callsite->count(), 0)); + cur_malloc_callsite = (MallocCallsitePointer*)cur_malloc_itr.next(); + } else if (cur_malloc_callsite->addr() > prev_malloc_callsite->addr()) { + // this callsite is already gone + _outputer.diff_malloc_callsite(prev_malloc_callsite->addr(), + 0, 0, + diff_in_current_scale(0, prev_malloc_callsite->amount()), + diff(0, prev_malloc_callsite->count())); + prev_malloc_callsite = (MallocCallsitePointer*)prev_malloc_itr.next(); + } else { + // the same callsite + _outputer.diff_malloc_callsite(cur_malloc_callsite->addr(), + amount_in_current_scale(cur_malloc_callsite->amount()), + cur_malloc_callsite->count(), + diff_in_current_scale(cur_malloc_callsite->amount(), prev_malloc_callsite->amount()), + diff(cur_malloc_callsite->count(), prev_malloc_callsite->count())); + cur_malloc_callsite = (MallocCallsitePointer*)cur_malloc_itr.next(); + prev_malloc_callsite = (MallocCallsitePointer*)prev_malloc_itr.next(); + } } } @@ -222,6 +243,7 @@ VMCallsitePointer* prev_vm_callsite = (VMCallsitePointer*)prev_vm_itr.current(); while (cur_vm_callsite != NULL || prev_vm_callsite != NULL) { if (prev_vm_callsite == NULL || cur_vm_callsite->addr() < prev_vm_callsite->addr()) { + // this is a new callsite _outputer.diff_virtual_memory_callsite(cur_vm_callsite->addr(), amount_in_current_scale(cur_vm_callsite->reserved_amount()), amount_in_current_scale(cur_vm_callsite->committed_amount()), @@ -229,9 +251,10 @@ diff_in_current_scale(cur_vm_callsite->committed_amount(), 0)); cur_vm_callsite = (VMCallsitePointer*)cur_vm_itr.next(); } else if (cur_vm_callsite == NULL || cur_vm_callsite->addr() > prev_vm_callsite->addr()) { + // this callsite is already gone _outputer.diff_virtual_memory_callsite(prev_vm_callsite->addr(), - amount_in_current_scale(prev_vm_callsite->reserved_amount()), - amount_in_current_scale(prev_vm_callsite->committed_amount()), + amount_in_current_scale(0), + amount_in_current_scale(0), diff_in_current_scale(0, prev_vm_callsite->reserved_amount()), diff_in_current_scale(0, prev_vm_callsite->committed_amount())); prev_vm_callsite = (VMCallsitePointer*)prev_vm_itr.next(); diff -r 16b10327b00d -r 90d6c221d4e5 src/share/vm/services/memTracker.cpp --- a/src/share/vm/services/memTracker.cpp Tue Jul 16 10:55:48 2013 -0400 +++ b/src/share/vm/services/memTracker.cpp Tue Jul 16 12:20:08 2013 -0400 @@ -69,6 +69,7 @@ volatile jint MemTracker::_pooled_recorder_count = 0; volatile unsigned long MemTracker::_processing_generation = 0; volatile bool MemTracker::_worker_thread_idle = false; +volatile jint MemTracker::_pending_op_count = 0; volatile bool MemTracker::_slowdown_calling_thread = false; debug_only(intx MemTracker::_main_thread_tid = 0;) NOT_PRODUCT(volatile jint MemTracker::_pending_recorder_count = 0;) @@ -337,92 +338,14 @@ Atomic::inc(&_pooled_recorder_count); } -/* - * This is the most important method in whole nmt implementation. - * - * Create a memory record. - * 1. When nmt is in single-threaded bootstrapping mode, no lock is needed as VM - * still in single thread mode. - * 2. For all threads other than JavaThread, ThreadCritical is needed - * to write to recorders to global recorder. - * 3. For JavaThreads that are not longer visible by safepoint, also - * need to take ThreadCritical and records are written to global - * recorders, since these threads are NOT walked by Threads.do_thread(). - * 4. JavaThreads that are running in native state, have to transition - * to VM state before writing to per-thread recorders. - * 5. JavaThreads that are running in VM state do not need any lock and - * records are written to per-thread recorders. - * 6. For a thread has yet to attach VM 'Thread', they need to take - * ThreadCritical to write to global recorder. - * - * Important note: - * NO LOCK should be taken inside ThreadCritical lock !!! - */ -void MemTracker::create_memory_record(address addr, MEMFLAGS flags, - size_t size, address pc, Thread* thread) { - assert(addr != NULL, "Sanity check"); - if (!shutdown_in_progress()) { - // single thread, we just write records direct to global recorder,' - // with any lock - if (_state == NMT_bootstrapping_single_thread) { - assert(_main_thread_tid == os::current_thread_id(), "wrong thread"); - thread = NULL; - } else { - if (thread == NULL) { - // don't use Thread::current(), since it is possible that - // the calling thread has yet to attach to VM 'Thread', - // which will result assertion failure - thread = ThreadLocalStorage::thread(); - } - } - - if (thread != NULL) { - // slow down all calling threads except NMT worker thread, so it - // can catch up. - if (_slowdown_calling_thread && thread != _worker_thread) { - os::yield_all(); - } - - if (thread->is_Java_thread() && ((JavaThread*)thread)->is_safepoint_visible()) { - JavaThread* java_thread = (JavaThread*)thread; - JavaThreadState state = java_thread->thread_state(); - if (SafepointSynchronize::safepoint_safe(java_thread, state)) { - // JavaThreads that are safepoint safe, can run through safepoint, - // so ThreadCritical is needed to ensure no threads at safepoint create - // new records while the records are being gathered and the sequence number is changing - ThreadCritical tc; - create_record_in_recorder(addr, flags, size, pc, java_thread); - } else { - create_record_in_recorder(addr, flags, size, pc, java_thread); - } - } else { - // other threads, such as worker and watcher threads, etc. need to - // take ThreadCritical to write to global recorder - ThreadCritical tc; - create_record_in_recorder(addr, flags, size, pc, NULL); - } - } else { - if (_state == NMT_bootstrapping_single_thread) { - // single thread, no lock needed - create_record_in_recorder(addr, flags, size, pc, NULL); - } else { - // for thread has yet to attach VM 'Thread', we can not use VM mutex. - // use native thread critical instead - ThreadCritical tc; - create_record_in_recorder(addr, flags, size, pc, NULL); - } - } - } -} - // write a record to proper recorder. No lock can be taken from this method // down. -void MemTracker::create_record_in_recorder(address addr, MEMFLAGS flags, - size_t size, address pc, JavaThread* thread) { +void MemTracker::write_tracking_record(address addr, MEMFLAGS flags, + size_t size, jint seq, address pc, JavaThread* thread) { MemRecorder* rc = get_thread_recorder(thread); if (rc != NULL) { - rc->record(addr, flags, size, pc); + rc->record(addr, flags, size, seq, pc); } } @@ -487,39 +410,43 @@ return; } } - _sync_point_skip_count = 0; { // This method is running at safepoint, with ThreadCritical lock, // it should guarantee that NMT is fully sync-ed. ThreadCritical tc; - SequenceGenerator::reset(); + // We can NOT execute NMT sync-point if there are pending tracking ops. + if (_pending_op_count == 0) { + SequenceGenerator::reset(); + _sync_point_skip_count = 0; - // walk all JavaThreads to collect recorders - SyncThreadRecorderClosure stc; - Threads::threads_do(&stc); + // walk all JavaThreads to collect recorders + SyncThreadRecorderClosure stc; + Threads::threads_do(&stc); + + _thread_count = stc.get_thread_count(); + MemRecorder* pending_recorders = get_pending_recorders(); - _thread_count = stc.get_thread_count(); - MemRecorder* pending_recorders = get_pending_recorders(); + if (_global_recorder != NULL) { + _global_recorder->set_next(pending_recorders); + pending_recorders = _global_recorder; + _global_recorder = NULL; + } - if (_global_recorder != NULL) { - _global_recorder->set_next(pending_recorders); - pending_recorders = _global_recorder; - _global_recorder = NULL; + // see if NMT has too many outstanding recorder instances, it usually + // means that worker thread is lagging behind in processing them. + if (!AutoShutdownNMT) { + _slowdown_calling_thread = (MemRecorder::_instance_count > MAX_RECORDER_THREAD_RATIO * _thread_count); + } + + // check _worker_thread with lock to avoid racing condition + if (_worker_thread != NULL) { + _worker_thread->at_sync_point(pending_recorders, InstanceKlass::number_of_instance_classes()); + } + assert(SequenceGenerator::peek() == 1, "Should not have memory activities during sync-point"); + } else { + _sync_point_skip_count ++; } - - // see if NMT has too many outstanding recorder instances, it usually - // means that worker thread is lagging behind in processing them. - if (!AutoShutdownNMT) { - _slowdown_calling_thread = (MemRecorder::_instance_count > MAX_RECORDER_THREAD_RATIO * _thread_count); - } - - // check _worker_thread with lock to avoid racing condition - if (_worker_thread != NULL) { - _worker_thread->at_sync_point(pending_recorders, InstanceKlass::number_of_instance_classes()); - } - - assert(SequenceGenerator::peek() == 1, "Should not have memory activities during sync-point"); } } @@ -708,3 +635,243 @@ } #endif + +// Tracker Implementation + +/* + * Create a tracker. + * This is a fairly complicated constructor, as it has to make two important decisions: + * 1) Does it need to take ThreadCritical lock to write tracking record + * 2) Does it need to pre-reserve a sequence number for the tracking record + * + * The rules to determine if ThreadCritical is needed: + * 1. When nmt is in single-threaded bootstrapping mode, no lock is needed as VM + * still in single thread mode. + * 2. For all threads other than JavaThread, ThreadCritical is needed + * to write to recorders to global recorder. + * 3. For JavaThreads that are no longer visible by safepoint, also + * need to take ThreadCritical and records are written to global + * recorders, since these threads are NOT walked by Threads.do_thread(). + * 4. JavaThreads that are running in safepoint-safe states do not stop + * for safepoints, ThreadCritical lock should be taken to write + * memory records. + * 5. JavaThreads that are running in VM state do not need any lock and + * records are written to per-thread recorders. + * 6. For a thread has yet to attach VM 'Thread', they need to take + * ThreadCritical to write to global recorder. + * + * The memory operations that need pre-reserve sequence numbers: + * The memory operations that "release" memory blocks and the + * operations can fail, need to pre-reserve sequence number. They + * are realloc, uncommit and release. + * + * The reason for pre-reserve sequence number, is to prevent race condition: + * Thread 1 Thread 2 + * + * + * + * + * if Thread 2 happens to obtain the memory address Thread 1 just released, + * then NMT can mistakenly report the memory is free. + * + * Noticeably, free() does not need pre-reserve sequence number, because the call + * does not fail, so we can alway write "release" record before the memory is actaully + * freed. + * + * For realloc, uncommit and release, following coding pattern should be used: + * + * MemTracker::Tracker tkr = MemTracker::get_realloc_tracker(); + * ptr = ::realloc(...); + * if (ptr == NULL) { + * tkr.record(...) + * } else { + * tkr.discard(); + * } + * + * MemTracker::Tracker tkr = MemTracker::get_virtual_memory_uncommit_tracker(); + * if (uncommit(...)) { + * tkr.record(...); + * } else { + * tkr.discard(); + * } + * + * MemTracker::Tracker tkr = MemTracker::get_virtual_memory_release_tracker(); + * if (release(...)) { + * tkr.record(...); + * } else { + * tkr.discard(); + * } + * + * Since pre-reserved sequence number is only good for the generation that it is acquired, + * when there is pending Tracker that reserved sequence number, NMT sync-point has + * to be skipped to prevent from advancing generation. This is done by inc and dec + * MemTracker::_pending_op_count, when MemTracker::_pending_op_count > 0, NMT sync-point is skipped. + * Not all pre-reservation of sequence number will increment pending op count. For JavaThreads + * that honor safepoints, safepoint can not occur during the memory operations, so the + * pre-reserved sequence number won't cross the generation boundry. + */ +MemTracker::Tracker::Tracker(MemoryOperation op, Thread* thr) { + _op = NoOp; + _seq = 0; + if (MemTracker::is_on()) { + _java_thread = NULL; + _op = op; + + // figure out if ThreadCritical lock is needed to write this operation + // to MemTracker + if (MemTracker::is_single_threaded_bootstrap()) { + thr = NULL; + } else if (thr == NULL) { + // don't use Thread::current(), since it is possible that + // the calling thread has yet to attach to VM 'Thread', + // which will result assertion failure + thr = ThreadLocalStorage::thread(); + } + + if (thr != NULL) { + // Check NMT load + MemTracker::check_NMT_load(thr); + + if (thr->is_Java_thread() && ((JavaThread*)thr)->is_safepoint_visible()) { + _java_thread = (JavaThread*)thr; + JavaThreadState state = _java_thread->thread_state(); + // JavaThreads that are safepoint safe, can run through safepoint, + // so ThreadCritical is needed to ensure no threads at safepoint create + // new records while the records are being gathered and the sequence number is changing + _need_thread_critical_lock = + SafepointSynchronize::safepoint_safe(_java_thread, state); + } else { + _need_thread_critical_lock = true; + } + } else { + _need_thread_critical_lock + = !MemTracker::is_single_threaded_bootstrap(); + } + + // see if we need to pre-reserve sequence number for this operation + if (_op == Realloc || _op == Uncommit || _op == Release) { + if (_need_thread_critical_lock) { + ThreadCritical tc; + MemTracker::inc_pending_op_count(); + _seq = SequenceGenerator::next(); + } else { + // for the threads that honor safepoints, no safepoint can occur + // during the lifespan of tracker, so we don't need to increase + // pending op count. + _seq = SequenceGenerator::next(); + } + } + } +} + +void MemTracker::Tracker::discard() { + if (MemTracker::is_on() && _seq != 0) { + if (_need_thread_critical_lock) { + ThreadCritical tc; + MemTracker::dec_pending_op_count(); + } + _seq = 0; + } +} + + +void MemTracker::Tracker::record(address old_addr, address new_addr, size_t size, + MEMFLAGS flags, address pc) { + assert(old_addr != NULL && new_addr != NULL, "Sanity check"); + assert(_op == Realloc || _op == NoOp, "Wrong call"); + if (MemTracker::is_on() && NMT_CAN_TRACK(flags) && _op != NoOp) { + assert(_seq > 0, "Need pre-reserve sequence number"); + if (_need_thread_critical_lock) { + ThreadCritical tc; + // free old address, use pre-reserved sequence number + MemTracker::write_tracking_record(old_addr, MemPointerRecord::free_tag(), + 0, _seq, pc, _java_thread); + MemTracker::write_tracking_record(new_addr, flags | MemPointerRecord::malloc_tag(), + size, SequenceGenerator::next(), pc, _java_thread); + // decrement MemTracker pending_op_count + MemTracker::dec_pending_op_count(); + } else { + // free old address, use pre-reserved sequence number + MemTracker::write_tracking_record(old_addr, MemPointerRecord::free_tag(), + 0, _seq, pc, _java_thread); + MemTracker::write_tracking_record(new_addr, flags | MemPointerRecord::malloc_tag(), + size, SequenceGenerator::next(), pc, _java_thread); + } + _seq = 0; + } +} + +void MemTracker::Tracker::record(address addr, size_t size, MEMFLAGS flags, address pc) { + // OOM already? + if (addr == NULL) return; + + if (MemTracker::is_on() && NMT_CAN_TRACK(flags) && _op != NoOp) { + bool pre_reserved_seq = (_seq != 0); + address pc = CALLER_CALLER_PC; + MEMFLAGS orig_flags = flags; + + // or the tagging flags + switch(_op) { + case Malloc: + flags |= MemPointerRecord::malloc_tag(); + break; + case Free: + flags = MemPointerRecord::free_tag(); + break; + case Realloc: + fatal("Use the other Tracker::record()"); + break; + case Reserve: + case ReserveAndCommit: + flags |= MemPointerRecord::virtual_memory_reserve_tag(); + break; + case Commit: + flags = MemPointerRecord::virtual_memory_commit_tag(); + break; + case Type: + flags |= MemPointerRecord::virtual_memory_type_tag(); + break; + case Uncommit: + assert(pre_reserved_seq, "Need pre-reserve sequence number"); + flags = MemPointerRecord::virtual_memory_uncommit_tag(); + break; + case Release: + assert(pre_reserved_seq, "Need pre-reserve sequence number"); + flags = MemPointerRecord::virtual_memory_release_tag(); + break; + case ArenaSize: + // a bit of hack here, add a small postive offset to arena + // address for its size record, so the size record is sorted + // right after arena record. + flags = MemPointerRecord::arena_size_tag(); + addr += sizeof(void*); + break; + case StackRelease: + flags = MemPointerRecord::virtual_memory_release_tag(); + break; + default: + ShouldNotReachHere(); + } + + // write memory tracking record + if (_need_thread_critical_lock) { + ThreadCritical tc; + if (_seq == 0) _seq = SequenceGenerator::next(); + MemTracker::write_tracking_record(addr, flags, size, _seq, pc, _java_thread); + if (_op == ReserveAndCommit) { + MemTracker::write_tracking_record(addr, orig_flags | MemPointerRecord::virtual_memory_commit_tag(), + size, SequenceGenerator::next(), pc, _java_thread); + } + if (pre_reserved_seq) MemTracker::dec_pending_op_count(); + } else { + if (_seq == 0) _seq = SequenceGenerator::next(); + MemTracker::write_tracking_record(addr, flags, size, _seq, pc, _java_thread); + if (_op == ReserveAndCommit) { + MemTracker::write_tracking_record(addr, orig_flags | MemPointerRecord::virtual_memory_commit_tag(), + size, SequenceGenerator::next(), pc, _java_thread); + } + } + _seq = 0; + } +} + diff -r 16b10327b00d -r 90d6c221d4e5 src/share/vm/services/memTracker.hpp --- a/src/share/vm/services/memTracker.hpp Tue Jul 16 10:55:48 2013 -0400 +++ b/src/share/vm/services/memTracker.hpp Tue Jul 16 12:20:08 2013 -0400 @@ -1,5 +1,5 @@ /* - * Copyright (c) 2012, Oracle and/or its affiliates. All rights reserved. + * Copyright (c) 2012, 2013, Oracle and/or its affiliates. All rights reserved. * DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER. * * This code is free software; you can redistribute it and/or modify it @@ -54,6 +54,18 @@ NMT_sequence_overflow // overflow the sequence number }; + class Tracker { + public: + void discard() { } + + void record(address addr, size_t size = 0, MEMFLAGS flags = mtNone, address pc = NULL) { } + void record(address old_addr, address new_addr, size_t size, + MEMFLAGS flags, address pc = NULL) { } + }; + + private: + static Tracker _tkr; + public: static inline void init_tracking_options(const char* option_line) { } @@ -68,19 +80,18 @@ static inline void record_malloc(address addr, size_t size, MEMFLAGS flags, address pc = 0, Thread* thread = NULL) { } static inline void record_free(address addr, MEMFLAGS flags, Thread* thread = NULL) { } - static inline void record_realloc(address old_addr, address new_addr, size_t size, - MEMFLAGS flags, address pc = 0, Thread* thread = NULL) { } static inline void record_arena_size(address addr, size_t size) { } static inline void record_virtual_memory_reserve(address addr, size_t size, - address pc = 0, Thread* thread = NULL) { } + MEMFLAGS flags, address pc = 0, Thread* thread = NULL) { } + static inline void record_virtual_memory_reserve_and_commit(address addr, size_t size, + MEMFLAGS flags, address pc = 0, Thread* thread = NULL) { } static inline void record_virtual_memory_commit(address addr, size_t size, address pc = 0, Thread* thread = NULL) { } - static inline void record_virtual_memory_uncommit(address addr, size_t size, - Thread* thread = NULL) { } - static inline void record_virtual_memory_release(address addr, size_t size, - Thread* thread = NULL) { } static inline void record_virtual_memory_type(address base, MEMFLAGS flags, Thread* thread = NULL) { } + static inline Tracker get_realloc_tracker() { return _tkr; } + static inline Tracker get_virtual_memory_uncommit_tracker() { return _tkr; } + static inline Tracker get_virtual_memory_release_tracker() { return _tkr; } static inline bool baseline() { return false; } static inline bool has_baseline() { return false; } @@ -165,6 +176,45 @@ }; public: + class Tracker : public StackObj { + friend class MemTracker; + public: + enum MemoryOperation { + NoOp, // no op + Malloc, // malloc + Realloc, // realloc + Free, // free + Reserve, // virtual memory reserve + Commit, // virtual memory commit + ReserveAndCommit, // virtual memory reserve and commit + StackAlloc = ReserveAndCommit, // allocate thread stack + Type, // assign virtual memory type + Uncommit, // virtual memory uncommit + Release, // virtual memory release + ArenaSize, // set arena size + StackRelease // release thread stack + }; + + + protected: + Tracker(MemoryOperation op, Thread* thr = NULL); + + public: + void discard(); + + void record(address addr, size_t size = 0, MEMFLAGS flags = mtNone, address pc = NULL); + void record(address old_addr, address new_addr, size_t size, + MEMFLAGS flags, address pc = NULL); + + private: + bool _need_thread_critical_lock; + JavaThread* _java_thread; + MemoryOperation _op; // memory operation + jint _seq; // reserved sequence number + }; + + + public: // native memory tracking level enum NMTLevel { NMT_off, // native memory tracking is off @@ -276,109 +326,74 @@ // record a 'malloc' call static inline void record_malloc(address addr, size_t size, MEMFLAGS flags, address pc = 0, Thread* thread = NULL) { - if (is_on() && NMT_CAN_TRACK(flags)) { - assert(size > 0, "Sanity check"); - create_memory_record(addr, (flags|MemPointerRecord::malloc_tag()), size, pc, thread); - } + Tracker tkr(Tracker::Malloc, thread); + tkr.record(addr, size, flags, pc); } // record a 'free' call static inline void record_free(address addr, MEMFLAGS flags, Thread* thread = NULL) { - if (is_on() && NMT_CAN_TRACK(flags)) { - create_memory_record(addr, MemPointerRecord::free_tag(), 0, 0, thread); - } - } - // record a 'realloc' call - static inline void record_realloc(address old_addr, address new_addr, size_t size, - MEMFLAGS flags, address pc = 0, Thread* thread = NULL) { - if (is_on() && NMT_CAN_TRACK(flags)) { - assert(size > 0, "Sanity check"); - record_free(old_addr, flags, thread); - record_malloc(new_addr, size, flags, pc, thread); - } + Tracker tkr(Tracker::Free, thread); + tkr.record(addr, 0, flags, DEBUG_CALLER_PC); } - // record arena memory size static inline void record_arena_size(address addr, size_t size) { - // we add a positive offset to arena address, so we can have arena memory record - // sorted after arena record - if (is_on() && !UseMallocOnly) { - assert(addr != NULL, "Sanity check"); - create_memory_record((addr + sizeof(void*)), MemPointerRecord::arena_size_tag(), size, - DEBUG_CALLER_PC, NULL); - } + Tracker tkr(Tracker::ArenaSize); + tkr.record(addr, size); } // record a virtual memory 'reserve' call static inline void record_virtual_memory_reserve(address addr, size_t size, - address pc = 0, Thread* thread = NULL) { - if (is_on()) { - assert(size > 0, "Sanity check"); - create_memory_record(addr, MemPointerRecord::virtual_memory_reserve_tag(), - size, pc, thread); - } + MEMFLAGS flags, address pc = 0, Thread* thread = NULL) { + assert(size > 0, "Sanity check"); + Tracker tkr(Tracker::Reserve, thread); + tkr.record(addr, size, flags, pc); } static inline void record_thread_stack(address addr, size_t size, Thread* thr, address pc = 0) { - if (is_on()) { - assert(size > 0 && thr != NULL, "Sanity check"); - create_memory_record(addr, MemPointerRecord::virtual_memory_reserve_tag() | mtThreadStack, - size, pc, thr); - create_memory_record(addr, MemPointerRecord::virtual_memory_commit_tag() | mtThreadStack, - size, pc, thr); - } + Tracker tkr(Tracker::StackAlloc, thr); + tkr.record(addr, size, mtThreadStack, pc); } static inline void release_thread_stack(address addr, size_t size, Thread* thr) { - if (is_on()) { - assert(size > 0 && thr != NULL, "Sanity check"); - assert(!thr->is_Java_thread(), "too early"); - create_memory_record(addr, MemPointerRecord::virtual_memory_uncommit_tag() | mtThreadStack, - size, DEBUG_CALLER_PC, thr); - create_memory_record(addr, MemPointerRecord::virtual_memory_release_tag() | mtThreadStack, - size, DEBUG_CALLER_PC, thr); - } + Tracker tkr(Tracker::StackRelease, thr); + tkr.record(addr, size, mtThreadStack, DEBUG_CALLER_PC); } // record a virtual memory 'commit' call static inline void record_virtual_memory_commit(address addr, size_t size, address pc, Thread* thread = NULL) { - if (is_on()) { - assert(size > 0, "Sanity check"); - create_memory_record(addr, MemPointerRecord::virtual_memory_commit_tag(), - size, pc, thread); - } + Tracker tkr(Tracker::Commit, thread); + tkr.record(addr, size, mtNone, pc); } - // record a virtual memory 'uncommit' call - static inline void record_virtual_memory_uncommit(address addr, size_t size, - Thread* thread = NULL) { - if (is_on()) { - assert(size > 0, "Sanity check"); - create_memory_record(addr, MemPointerRecord::virtual_memory_uncommit_tag(), - size, DEBUG_CALLER_PC, thread); - } + static inline void record_virtual_memory_reserve_and_commit(address addr, size_t size, + MEMFLAGS flags, address pc, Thread* thread = NULL) { + Tracker tkr(Tracker::ReserveAndCommit, thread); + tkr.record(addr, size, flags, pc); } - // record a virtual memory 'release' call - static inline void record_virtual_memory_release(address addr, size_t size, - Thread* thread = NULL) { - if (is_on()) { - assert(size > 0, "Sanity check"); - create_memory_record(addr, MemPointerRecord::virtual_memory_release_tag(), - size, DEBUG_CALLER_PC, thread); - } - } // record memory type on virtual memory base address static inline void record_virtual_memory_type(address base, MEMFLAGS flags, Thread* thread = NULL) { - if (is_on()) { - assert(base > 0, "wrong base address"); - assert((flags & (~mt_masks)) == 0, "memory type only"); - create_memory_record(base, (flags | MemPointerRecord::virtual_memory_type_tag()), - 0, DEBUG_CALLER_PC, thread); - } + Tracker tkr(Tracker::Type); + tkr.record(base, 0, flags); + } + + // Get memory trackers for memory operations that can result race conditions. + // The memory tracker has to be obtained before realloc, virtual memory uncommit + // and virtual memory release, and call tracker.record() method if operation + // succeeded, or tracker.discard() to abort the tracking. + static inline Tracker get_realloc_tracker() { + return Tracker(Tracker::Realloc); + } + + static inline Tracker get_virtual_memory_uncommit_tracker() { + return Tracker(Tracker::Uncommit); + } + + static inline Tracker get_virtual_memory_release_tracker() { + return Tracker(Tracker::Release); } @@ -444,6 +459,45 @@ static MemRecorder* get_pending_recorders(); static void delete_all_pending_recorders(); + // write a memory tracking record in recorder + static void write_tracking_record(address addr, MEMFLAGS type, + size_t size, jint seq, address pc, JavaThread* thread); + + static bool is_single_threaded_bootstrap() { + return _state == NMT_bootstrapping_single_thread; + } + + static void check_NMT_load(Thread* thr) { + assert(thr != NULL, "Sanity check"); + if (_slowdown_calling_thread && thr != _worker_thread) { +#ifdef _WINDOWS + // On Windows, os::NakedYield() does not work as well + // as os::yield_all() + os::yield_all(); +#else + // On Solaris, os::yield_all() depends on os::sleep() + // which requires JavaTherad in _thread_in_vm state. + // Transits thread to _thread_in_vm state can be dangerous + // if caller holds lock, as it may deadlock with Threads_lock. + // So use NaKedYield instead. + // + // Linux and BSD, NakedYield() and yield_all() implementations + // are the same. + os::NakedYield(); +#endif + } + } + + static void inc_pending_op_count() { + Atomic::inc(&_pending_op_count); + } + + static void dec_pending_op_count() { + Atomic::dec(&_pending_op_count); + assert(_pending_op_count >= 0, "Sanity check"); + } + + private: // retrieve a pooled memory record or create new one if there is not // one available @@ -522,6 +576,12 @@ // if NMT should slow down calling thread to allow // worker thread to catch up static volatile bool _slowdown_calling_thread; + + // pending memory op count. + // Certain memory ops need to pre-reserve sequence number + // before memory operation can happen to avoid race condition. + // See MemTracker::Tracker for detail + static volatile jint _pending_op_count; }; #endif // !INCLUDE_NMT diff -r 16b10327b00d -r 90d6c221d4e5 src/share/vm/services/memoryManager.cpp --- a/src/share/vm/services/memoryManager.cpp Tue Jul 16 10:55:48 2013 -0400 +++ b/src/share/vm/services/memoryManager.cpp Tue Jul 16 12:20:08 2013 -0400 @@ -61,6 +61,10 @@ return (MemoryManager*) new CodeCacheMemoryManager(); } +MemoryManager* MemoryManager::get_metaspace_memory_manager() { + return (MemoryManager*) new MetaspaceMemoryManager(); +} + GCMemoryManager* MemoryManager::get_copy_memory_manager() { return (GCMemoryManager*) new CopyMemoryManager(); } diff -r 16b10327b00d -r 90d6c221d4e5 src/share/vm/services/memoryManager.hpp --- a/src/share/vm/services/memoryManager.hpp Tue Jul 16 10:55:48 2013 -0400 +++ b/src/share/vm/services/memoryManager.hpp Tue Jul 16 12:20:08 2013 -0400 @@ -56,6 +56,7 @@ enum Name { Abstract, CodeCache, + Metaspace, Copy, MarkSweepCompact, ParNew, @@ -88,6 +89,7 @@ // Static factory methods to get a memory manager of a specific type static MemoryManager* get_code_cache_memory_manager(); + static MemoryManager* get_metaspace_memory_manager(); static GCMemoryManager* get_copy_memory_manager(); static GCMemoryManager* get_msc_memory_manager(); static GCMemoryManager* get_parnew_memory_manager(); @@ -108,6 +110,14 @@ const char* name() { return "CodeCacheManager"; } }; +class MetaspaceMemoryManager : public MemoryManager { +public: + MetaspaceMemoryManager() : MemoryManager() {} + + MemoryManager::Name kind() { return MemoryManager::Metaspace; } + const char *name() { return "Metaspace Manager"; } +}; + class GCStatInfo : public ResourceObj { private: size_t _index; diff -r 16b10327b00d -r 90d6c221d4e5 src/share/vm/services/memoryPool.cpp --- a/src/share/vm/services/memoryPool.cpp Tue Jul 16 10:55:48 2013 -0400 +++ b/src/share/vm/services/memoryPool.cpp Tue Jul 16 12:20:08 2013 -0400 @@ -25,6 +25,7 @@ #include "precompiled.hpp" #include "classfile/systemDictionary.hpp" #include "classfile/vmSymbols.hpp" +#include "memory/metaspace.hpp" #include "oops/oop.inline.hpp" #include "runtime/handles.inline.hpp" #include "runtime/javaCalls.hpp" @@ -33,6 +34,7 @@ #include "services/memoryManager.hpp" #include "services/memoryPool.hpp" #include "utilities/macros.hpp" +#include "utilities/globalDefinitions.hpp" MemoryPool::MemoryPool(const char* name, PoolType type, @@ -256,3 +258,39 @@ return MemoryUsage(initial_size(), used, committed, maxSize); } + +MetaspacePool::MetaspacePool() : + MemoryPool("Metaspace", NonHeap, capacity_in_bytes(), calculate_max_size(), true, false) { } + +MemoryUsage MetaspacePool::get_memory_usage() { + size_t committed = align_size_down_(capacity_in_bytes(), os::vm_page_size()); + return MemoryUsage(initial_size(), used_in_bytes(), committed, max_size()); +} + +size_t MetaspacePool::used_in_bytes() { + return MetaspaceAux::allocated_used_bytes(Metaspace::NonClassType); +} + +size_t MetaspacePool::capacity_in_bytes() const { + return MetaspaceAux::allocated_capacity_bytes(Metaspace::NonClassType); +} + +size_t MetaspacePool::calculate_max_size() const { + return FLAG_IS_CMDLINE(MaxMetaspaceSize) ? MaxMetaspaceSize : max_uintx; +} + +CompressedKlassSpacePool::CompressedKlassSpacePool() : + MemoryPool("Compressed Class Space", NonHeap, capacity_in_bytes(), ClassMetaspaceSize, true, false) { } + +size_t CompressedKlassSpacePool::used_in_bytes() { + return MetaspaceAux::allocated_used_bytes(Metaspace::ClassType); +} + +size_t CompressedKlassSpacePool::capacity_in_bytes() const { + return MetaspaceAux::allocated_capacity_bytes(Metaspace::ClassType); +} + +MemoryUsage CompressedKlassSpacePool::get_memory_usage() { + size_t committed = align_size_down_(capacity_in_bytes(), os::vm_page_size()); + return MemoryUsage(initial_size(), used_in_bytes(), committed, max_size()); +} diff -r 16b10327b00d -r 90d6c221d4e5 src/share/vm/services/memoryPool.hpp --- a/src/share/vm/services/memoryPool.hpp Tue Jul 16 10:55:48 2013 -0400 +++ b/src/share/vm/services/memoryPool.hpp Tue Jul 16 12:20:08 2013 -0400 @@ -222,4 +222,21 @@ size_t used_in_bytes() { return _codeHeap->allocated_capacity(); } }; +class MetaspacePool : public MemoryPool { + size_t calculate_max_size() const; + size_t capacity_in_bytes() const; + public: + MetaspacePool(); + MemoryUsage get_memory_usage(); + size_t used_in_bytes(); +}; + +class CompressedKlassSpacePool : public MemoryPool { + size_t capacity_in_bytes() const; + public: + CompressedKlassSpacePool(); + MemoryUsage get_memory_usage(); + size_t used_in_bytes(); +}; + #endif // SHARE_VM_SERVICES_MEMORYPOOL_HPP diff -r 16b10327b00d -r 90d6c221d4e5 src/share/vm/services/memoryService.cpp --- a/src/share/vm/services/memoryService.cpp Tue Jul 16 10:55:48 2013 -0400 +++ b/src/share/vm/services/memoryService.cpp Tue Jul 16 12:20:08 2013 -0400 @@ -35,6 +35,7 @@ #include "memory/memRegion.hpp" #include "memory/tenuredGeneration.hpp" #include "oops/oop.inline.hpp" +#include "runtime/globals.hpp" #include "runtime/javaCalls.hpp" #include "services/classLoadingService.hpp" #include "services/lowMemoryDetector.hpp" @@ -60,9 +61,11 @@ GrowableArray* MemoryService::_managers_list = new (ResourceObj::C_HEAP, mtInternal) GrowableArray(init_managers_list_size, true); -GCMemoryManager* MemoryService::_minor_gc_manager = NULL; -GCMemoryManager* MemoryService::_major_gc_manager = NULL; -MemoryPool* MemoryService::_code_heap_pool = NULL; +GCMemoryManager* MemoryService::_minor_gc_manager = NULL; +GCMemoryManager* MemoryService::_major_gc_manager = NULL; +MemoryPool* MemoryService::_code_heap_pool = NULL; +MemoryPool* MemoryService::_metaspace_pool = NULL; +MemoryPool* MemoryService::_compressed_class_pool = NULL; class GcThreadCountClosure: public ThreadClosure { private: @@ -399,6 +402,22 @@ _managers_list->append(mgr); } +void MemoryService::add_metaspace_memory_pools() { + MemoryManager* mgr = MemoryManager::get_metaspace_memory_manager(); + + _metaspace_pool = new MetaspacePool(); + mgr->add_pool(_metaspace_pool); + _pools_list->append(_metaspace_pool); + + if (UseCompressedKlassPointers) { + _compressed_class_pool = new CompressedKlassSpacePool(); + mgr->add_pool(_compressed_class_pool); + _pools_list->append(_compressed_class_pool); + } + + _managers_list->append(mgr); +} + MemoryManager* MemoryService::get_memory_manager(instanceHandle mh) { for (int i = 0; i < _managers_list->length(); i++) { MemoryManager* mgr = _managers_list->at(i); diff -r 16b10327b00d -r 90d6c221d4e5 src/share/vm/services/memoryService.hpp --- a/src/share/vm/services/memoryService.hpp Tue Jul 16 10:55:48 2013 -0400 +++ b/src/share/vm/services/memoryService.hpp Tue Jul 16 12:20:08 2013 -0400 @@ -73,6 +73,9 @@ // Code heap memory pool static MemoryPool* _code_heap_pool; + static MemoryPool* _metaspace_pool; + static MemoryPool* _compressed_class_pool; + static void add_generation_memory_pool(Generation* gen, MemoryManager* major_mgr, MemoryManager* minor_mgr); @@ -121,6 +124,7 @@ public: static void set_universe_heap(CollectedHeap* heap); static void add_code_heap_memory_pool(CodeHeap* heap); + static void add_metaspace_memory_pools(); static MemoryPool* get_memory_pool(instanceHandle pool); static MemoryManager* get_memory_manager(instanceHandle mgr); diff -r 16b10327b00d -r 90d6c221d4e5 src/share/vm/services/threadService.cpp --- a/src/share/vm/services/threadService.cpp Tue Jul 16 10:55:48 2013 -0400 +++ b/src/share/vm/services/threadService.cpp Tue Jul 16 12:20:08 2013 -0400 @@ -327,27 +327,30 @@ while (waitingToLockMonitor != NULL || waitingToLockBlocker != NULL) { cycle->add_thread(currentThread); if (waitingToLockMonitor != NULL) { - currentThread = Threads::owning_thread_from_monitor_owner( - (address)waitingToLockMonitor->owner(), - false /* no locking needed */); - if (currentThread == NULL) { - // This function is called at a safepoint so the JavaThread - // that owns waitingToLockMonitor should be findable, but - // if it is not findable, then the previous currentThread is - // blocked permanently. We record this as a deadlock. - num_deadlocks++; + address currentOwner = (address)waitingToLockMonitor->owner(); + if (currentOwner != NULL) { + currentThread = Threads::owning_thread_from_monitor_owner( + currentOwner, + false /* no locking needed */); + if (currentThread == NULL) { + // This function is called at a safepoint so the JavaThread + // that owns waitingToLockMonitor should be findable, but + // if it is not findable, then the previous currentThread is + // blocked permanently. We record this as a deadlock. + num_deadlocks++; - cycle->set_deadlock(true); + cycle->set_deadlock(true); - // add this cycle to the deadlocks list - if (deadlocks == NULL) { - deadlocks = cycle; - } else { - last->set_next(cycle); + // add this cycle to the deadlocks list + if (deadlocks == NULL) { + deadlocks = cycle; + } else { + last->set_next(cycle); + } + last = cycle; + cycle = new DeadlockCycle(); + break; } - last = cycle; - cycle = new DeadlockCycle(); - break; } } else { if (concurrent_locks) { diff -r 16b10327b00d -r 90d6c221d4e5 src/share/vm/shark/sharkBuilder.cpp --- a/src/share/vm/shark/sharkBuilder.cpp Tue Jul 16 10:55:48 2013 -0400 +++ b/src/share/vm/shark/sharkBuilder.cpp Tue Jul 16 12:20:08 2013 -0400 @@ -471,7 +471,7 @@ Value* SharkBuilder::CreateInlineMetadata(Metadata* metadata, llvm::PointerType* type, const char* name) { assert(metadata != NULL, "inlined metadata must not be NULL"); - assert(metadata->is_metadata(), "sanity check"); + assert(metadata->is_metaspace_object(), "sanity check"); return CreateLoad( CreateIntToPtr( code_buffer_address(code_buffer()->inline_Metadata(metadata)), diff -r 16b10327b00d -r 90d6c221d4e5 src/share/vm/trace/noTraceBackend.hpp --- /dev/null Thu Jan 01 00:00:00 1970 +0000 +++ b/src/share/vm/trace/noTraceBackend.hpp Tue Jul 16 12:20:08 2013 -0400 @@ -0,0 +1,48 @@ +/* + * Copyright (c) 2013, Oracle and/or its affiliates. All rights reserved. + * DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER. + * + * This code is free software; you can redistribute it and/or modify it + * under the terms of the GNU General Public License version 2 only, as + * published by the Free Software Foundation. + * + * This code is distributed in the hope that it will be useful, but WITHOUT + * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or + * FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License + * version 2 for more details (a copy is included in the LICENSE file that + * accompanied this code). + * + * You should have received a copy of the GNU General Public License version + * 2 along with this work; if not, write to the Free Software Foundation, + * Inc., 51 Franklin St, Fifth Floor, Boston, MA 02110-1301 USA. + * + * Please contact Oracle, 500 Oracle Parkway, Redwood Shores, CA 94065 USA + * or visit www.oracle.com if you need additional information or have any + * questions. + * + */ +#ifndef SHARE_VM_TRACE_NOTRACEBACKEND_HPP +#define SHARE_VM_TRACE_NOTRACEBACKEND_HPP + +#include "prims/jni.h" + +typedef jlong TracingTime; +typedef jlong RelativeTracingTime; + +class NoTraceBackend { +public: + static TracingTime time() { + return 0; + } +}; + +class TraceThreadData { +public: + TraceThreadData() {} +}; + +typedef NoTraceBackend Tracing; + +#endif + + diff -r 16b10327b00d -r 90d6c221d4e5 src/share/vm/trace/trace.dtd --- /dev/null Thu Jan 01 00:00:00 1970 +0000 +++ b/src/share/vm/trace/trace.dtd Tue Jul 16 12:20:08 2013 -0400 @@ -0,0 +1,86 @@ + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + diff -r 16b10327b00d -r 90d6c221d4e5 src/share/vm/trace/trace.xml --- /dev/null Thu Jan 01 00:00:00 1970 +0000 +++ b/src/share/vm/trace/trace.xml Tue Jul 16 12:20:08 2013 -0400 @@ -0,0 +1,367 @@ + + + + + +%xinclude; +]> + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + diff -r 16b10327b00d -r 90d6c221d4e5 src/share/vm/trace/traceBackend.hpp --- /dev/null Thu Jan 01 00:00:00 1970 +0000 +++ b/src/share/vm/trace/traceBackend.hpp Tue Jul 16 12:20:08 2013 -0400 @@ -0,0 +1,70 @@ +/* + * Copyright (c) 2012, 2013, Oracle and/or its affiliates. All rights reserved. + * DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER. + * + * This code is free software; you can redistribute it and/or modify it + * under the terms of the GNU General Public License version 2 only, as + * published by the Free Software Foundation. + * + * This code is distributed in the hope that it will be useful, but WITHOUT + * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or + * FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License + * version 2 for more details (a copy is included in the LICENSE file that + * accompanied this code). + * + * You should have received a copy of the GNU General Public License version + * 2 along with this work; if not, write to the Free Software Foundation, + * Inc., 51 Franklin St, Fifth Floor, Boston, MA 02110-1301 USA. + * + * Please contact Oracle, 500 Oracle Parkway, Redwood Shores, CA 94065 USA + * or visit www.oracle.com if you need additional information or have any + * questions. + * + */ +#ifndef SHARE_VM_TRACE_TRACEBACKEND_HPP +#define SHARE_VM_TRACE_TRACEBACKEND_HPP + +#include "utilities/macros.hpp" + +#if INCLUDE_TRACE + +#include "runtime/globals.hpp" +#include "runtime/os.hpp" +#include "trace/traceTime.hpp" +#include "tracefiles/traceEventIds.hpp" + +class TraceBackend { +public: + static bool enabled(void) { + return EnableTracing; + } + + static bool is_event_enabled(TraceEventId id) { + return enabled(); + } + + static TracingTime time() { + return os::elapsed_counter(); + } + + static TracingTime time_adjustment(jlong time) { + return time; + } + + static void on_unloading_classes(void) { + } +}; + +class TraceThreadData { +public: + TraceThreadData() {} +}; + +typedef TraceBackend Tracing; + +#else /* INCLUDE_TRACE */ + +#include "trace/noTraceBackend.hpp" + +#endif /* INCLUDE_TRACE */ +#endif /* SHARE_VM_TRACE_TRACEBACKEND_HPP */ diff -r 16b10327b00d -r 90d6c221d4e5 src/share/vm/trace/traceDataTypes.hpp --- /dev/null Thu Jan 01 00:00:00 1970 +0000 +++ b/src/share/vm/trace/traceDataTypes.hpp Tue Jul 16 12:20:08 2013 -0400 @@ -0,0 +1,67 @@ +/* + * Copyright (c) 2012, 2013, Oracle and/or its affiliates. All rights reserved. + * DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER. + * + * This code is free software; you can redistribute it and/or modify it + * under the terms of the GNU General Public License version 2 only, as + * published by the Free Software Foundation. + * + * This code is distributed in the hope that it will be useful, but WITHOUT + * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or + * FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License + * version 2 for more details (a copy is included in the LICENSE file that + * accompanied this code). + * + * You should have received a copy of the GNU General Public License version + * 2 along with this work; if not, write to the Free Software Foundation, + * Inc., 51 Franklin St, Fifth Floor, Boston, MA 02110-1301 USA. + * + * Please contact Oracle, 500 Oracle Parkway, Redwood Shores, CA 94065 USA + * or visit www.oracle.com if you need additional information or have any + * questions. + * + */ + +#ifndef SHARE_VM_TRACE_TRACEDATATYPES_HPP +#define SHARE_VM_TRACE_TRACEDATATYPES_HPP + +#include + +#include "utilities/globalDefinitions.hpp" + +enum { + CONTENT_TYPE_NONE = 0, + CONTENT_TYPE_BYTES = 1, + CONTENT_TYPE_EPOCHMILLIS = 2, + CONTENT_TYPE_MILLIS = 3, + CONTENT_TYPE_NANOS = 4, + CONTENT_TYPE_TICKS = 5, + CONTENT_TYPE_ADDRESS = 6, + + CONTENT_TYPE_OSTHREAD, + CONTENT_TYPE_JAVALANGTHREAD, + CONTENT_TYPE_STACKTRACE, + CONTENT_TYPE_CLASS, + CONTENT_TYPE_PERCENTAGE, + + JVM_CONTENT_TYPES_START = 30, + JVM_CONTENT_TYPES_END = 100 +}; + +enum ReservedEvent { + EVENT_PRODUCERS, + EVENT_CHECKPOINT, + EVENT_BUFFERLOST, + + NUM_RESERVED_EVENTS +}; + +typedef enum ReservedEvent ReservedEvent; + +typedef u8 classid; +typedef u8 stacktraceid; +typedef u8 methodid; +typedef u8 fieldid; + +#endif // SHARE_VM_TRACE_TRACEDATATYPES_HPP + diff -r 16b10327b00d -r 90d6c221d4e5 src/share/vm/trace/traceEvent.hpp --- /dev/null Thu Jan 01 00:00:00 1970 +0000 +++ b/src/share/vm/trace/traceEvent.hpp Tue Jul 16 12:20:08 2013 -0400 @@ -0,0 +1,150 @@ +/* + * Copyright (c) 2012, 2013, Oracle and/or its affiliates. All rights reserved. + * DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER. + * + * This code is free software; you can redistribute it and/or modify it + * under the terms of the GNU General Public License version 2 only, as + * published by the Free Software Foundation. + * + * This code is distributed in the hope that it will be useful, but WITHOUT + * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or + * FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License + * version 2 for more details (a copy is included in the LICENSE file that + * accompanied this code). + * + * You should have received a copy of the GNU General Public License version + * 2 along with this work; if not, write to the Free Software Foundation, + * Inc., 51 Franklin St, Fifth Floor, Boston, MA 02110-1301 USA. + * + * Please contact Oracle, 500 Oracle Parkway, Redwood Shores, CA 94065 USA + * or visit www.oracle.com if you need additional information or have any + * questions. + * + */ + +#ifndef SHARE_VM_TRACE_TRACEEVENT_HPP +#define SHARE_VM_TRACE_TRACEEVENT_HPP + +enum EventStartTime { + UNTIMED, + TIMED +}; + +#include "utilities/macros.hpp" + +#if INCLUDE_TRACE + +#include "trace/traceBackend.hpp" +#include "trace/tracing.hpp" +#include "tracefiles/traceEventIds.hpp" +#include "tracefiles/traceTypes.hpp" + +template +class TraceEvent : public StackObj { + protected: + jlong _startTime; + jlong _endTime; + + private: + bool _started; +#ifdef ASSERT + bool _committed; + bool _cancelled; + protected: + bool _ignore_check; +#endif + + public: + TraceEvent(EventStartTime timing=TIMED) : + _startTime(0), + _endTime(0), + _started(false) +#ifdef ASSERT + , + _committed(false), + _cancelled(false), + _ignore_check(false) +#endif + { + if (T::is_enabled()) { + _started = true; + if (timing == TIMED && !T::isInstant) { + static_cast(this)->set_starttime(Tracing::time()); + } + } + } + + static bool is_enabled() { + return Tracing::is_event_enabled(T::eventId); + } + + bool should_commit() { + return _started; + } + + void ignoreCheck() { + DEBUG_ONLY(_ignore_check = true); + } + + void commit() { + if (!should_commit()) { + cancel(); + return; + } + if (_endTime == 0) { + static_cast(this)->set_endtime(Tracing::time()); + } + if (static_cast(this)->should_write()) { + static_cast(this)->writeEvent(); + } + set_commited(); + } + + void set_starttime(jlong time) { + _startTime = time; + } + + void set_endtime(jlong time) { + _endTime = time; + } + + TraceEventId id() const { + return T::eventId; + } + + bool is_instant() const { + return T::isInstant; + } + + bool is_requestable() const { + return T::isRequestable; + } + + bool has_thread() const { + return T::hasThread; + } + + bool has_stacktrace() const { + return T::hasStackTrace; + } + + void cancel() { + assert(!_committed && !_cancelled, "event was already committed/cancelled"); + DEBUG_ONLY(_cancelled = true); + } + + void set_commited() { + assert(!_committed, "event has already been committed"); + DEBUG_ONLY(_committed = true); + } + + ~TraceEvent() { + if (_started) { + assert(_ignore_check || _committed || _cancelled, "event was not committed/cancelled"); + } + } +}; + +#endif /* INCLUDE_TRACE */ + +#endif /* SHARE_VM_TRACE_TRACEEVENT_HPP */ diff -r 16b10327b00d -r 90d6c221d4e5 src/share/vm/trace/traceEventClasses.xsl --- /dev/null Thu Jan 01 00:00:00 1970 +0000 +++ b/src/share/vm/trace/traceEventClasses.xsl Tue Jul 16 12:20:08 2013 -0400 @@ -0,0 +1,246 @@ + + + + + + + + + + +#ifndef TRACEFILES_TRACEEVENTCLASSES_HPP +#define TRACEFILES_TRACEEVENTCLASSES_HPP + +// On purpose outside the INCLUDE_TRACE +// Some parts of traceEvent.hpp are used outside of +// INCLUDE_TRACE + +#include "memory/resourceArea.hpp" +#include "tracefiles/traceTypes.hpp" +#include "trace/traceEvent.hpp" +#include "utilities/macros.hpp" + +#if INCLUDE_TRACE + + +#include "trace/traceStream.hpp" +#include "utilities/ostream.hpp" + + + + +#else + +class TraceEvent { +public: + TraceEvent() {} + void set_starttime(jlong time) const {} + void set_endtime(jlong time) const {} + bool should_commit() const { return false; } + void commit() const {} +}; + + + + +#endif + +#endif + + + +struct TraceStruct +{ +private: + +public: + + + void writeStruct(TraceStream& ts) { + + } +}; + + + + +struct TraceStruct +{ +public: + +}; + + + + + +{ + public: + + + + + +}; + + + + + + +{ + public: + static const bool hasThread = ; + static const bool hasStackTrace = ; + static const bool isInstant = ; + static const bool isRequestable = ; + static const TraceEventId eventId = ; + + private: + + + void writeEventContent(void) { + TraceStream ts(*tty); + ts.print(": ["); + + ts.print("]\n"); + } + + public: + + + bool should_write(void) { + return true; + } + + + + + void writeEvent(void) { + ResourceMark rm; + if (UseLockedTracing) { + ttyLocker lock; + writeEventContent(); + } else { + writeEventContent(); + } + } +}; + + + + + + + + + + + + + + + + + + + + + + + + + +#if INCLUDE_TRACE + +#else + +#endif + + + +#if INCLUDE_TRACE + +#else + +#endif + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + ts.print(", "); + + + + + + + + + ts.print(", "); + + + + + diff -r 16b10327b00d -r 90d6c221d4e5 src/share/vm/trace/traceEventIds.xsl --- /dev/null Thu Jan 01 00:00:00 1970 +0000 +++ b/src/share/vm/trace/traceEventIds.xsl Tue Jul 16 12:20:08 2013 -0400 @@ -0,0 +1,74 @@ + + + + + + + + + + +#ifndef TRACEFILES_JFREVENTIDS_HPP +#define TRACEFILES_JFREVENTIDS_HPP + +#include "utilities/macros.hpp" + +#if INCLUDE_TRACE + +#include "trace/traceDataTypes.hpp" + +/** + * Enum of the event types in the JVM + */ +enum TraceEventId { + _traceeventbase = (NUM_RESERVED_EVENTS-1), // Make sure we start at right index. + + // Events -> enum entry + + + + MaxTraceEventId +}; + +/** + * Struct types in the JVM + */ +enum TraceStructId { + + + + + + + MaxTraceStructId +}; + +typedef enum TraceEventId TraceEventId; +typedef enum TraceStructId TraceStructId; + +#endif +#endif + + + diff -r 16b10327b00d -r 90d6c221d4e5 src/share/vm/trace/traceEventTypes.hpp --- a/src/share/vm/trace/traceEventTypes.hpp Tue Jul 16 10:55:48 2013 -0400 +++ /dev/null Thu Jan 01 00:00:00 1970 +0000 @@ -1,30 +0,0 @@ -/* - * Copyright (c) 1997, 2012, Oracle and/or its affiliates. All rights reserved. - * DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER. - * - * This code is free software; you can redistribute it and/or modify it - * under the terms of the GNU General Public License version 2 only, as - * published by the Free Software Foundation. - * - * This code is distributed in the hope that it will be useful, but WITHOUT - * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or - * FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License - * version 2 for more details (a copy is included in the LICENSE file that - * accompanied this code). - * - * You should have received a copy of the GNU General Public License version - * 2 along with this work; if not, write to the Free Software Foundation, - * Inc., 51 Franklin St, Fifth Floor, Boston, MA 02110-1301 USA. - * - * Please contact Oracle, 500 Oracle Parkway, Redwood Shores, CA 94065 USA - * or visit www.oracle.com if you need additional information or have any - * questions. - * - */ - -#ifndef SHARE_VM_TRACE_TRACE_EVENT_TYPES_HPP -#define SHARE_VM_TRACE_TRACE_EVENT_TYPES_HPP - -/* Empty, just a placeholder for tracing events */ - -#endif diff -r 16b10327b00d -r 90d6c221d4e5 src/share/vm/trace/traceMacros.hpp --- a/src/share/vm/trace/traceMacros.hpp Tue Jul 16 10:55:48 2013 -0400 +++ b/src/share/vm/trace/traceMacros.hpp Tue Jul 16 12:20:08 2013 -0400 @@ -1,5 +1,5 @@ /* - * Copyright (c) 1997, 2012, Oracle and/or its affiliates. All rights reserved. + * Copyright (c) 2012, 2013, Oracle and/or its affiliates. All rights reserved. * DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER. * * This code is free software; you can redistribute it and/or modify it @@ -25,22 +25,14 @@ #ifndef SHARE_VM_TRACE_TRACE_MACRO_HPP #define SHARE_VM_TRACE_TRACE_MACRO_HPP -#define EVENT_BEGIN(type, name) -#define EVENT_SET(name, field, value) -#define EVENT_COMMIT(name, ...) -#define EVENT_STARTED(name, time) -#define EVENT_ENDED(name, time) #define EVENT_THREAD_EXIT(thread) -#define TRACE_ENABLED 0 - #define TRACE_INIT_ID(k) -#define TRACE_BUFFER void* +#define TRACE_DATA TraceThreadData -#define TRACE_START() true -#define TRACE_INITIALIZE() 0 +#define TRACE_START() JNI_OK +#define TRACE_INITIALIZE() JNI_OK -#define TRACE_SET_KLASS_TRACE_ID(x1, x2) do { } while (0) #define TRACE_DEFINE_KLASS_METHODS typedef int ___IGNORED_hs_trace_type1 #define TRACE_DEFINE_KLASS_TRACE_ID typedef int ___IGNORED_hs_trace_type2 #define TRACE_DEFINE_OFFSET typedef int ___IGNORED_hs_trace_type3 diff -r 16b10327b00d -r 90d6c221d4e5 src/share/vm/trace/traceStream.hpp --- /dev/null Thu Jan 01 00:00:00 1970 +0000 +++ b/src/share/vm/trace/traceStream.hpp Tue Jul 16 12:20:08 2013 -0400 @@ -0,0 +1,121 @@ +/* + * Copyright (c) 2012, 2013, Oracle and/or its affiliates. All rights reserved. + * DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER. + * + * This code is free software; you can redistribute it and/or modify it + * under the terms of the GNU General Public License version 2 only, as + * published by the Free Software Foundation. + * + * This code is distributed in the hope that it will be useful, but WITHOUT + * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or + * FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License + * version 2 for more details (a copy is included in the LICENSE file that + * accompanied this code). + * + * You should have received a copy of the GNU General Public License version + * 2 along with this work; if not, write to the Free Software Foundation, + * Inc., 51 Franklin St, Fifth Floor, Boston, MA 02110-1301 USA. + * + * Please contact Oracle, 500 Oracle Parkway, Redwood Shores, CA 94065 USA + * or visit www.oracle.com if you need additional information or have any + * questions. + * + */ + +#ifndef SHARE_VM_TRACE_TRACESTREAM_HPP +#define SHARE_VM_TRACE_TRACESTREAM_HPP + +#include "utilities/macros.hpp" + +#if INCLUDE_TRACE + +#include "oops/klass.hpp" +#include "oops/method.hpp" +#include "oops/symbol.hpp" +#include "utilities/ostream.hpp" + +class TraceStream : public StackObj { + private: + outputStream& _st; + + public: + TraceStream(outputStream& stream): _st(stream) {} + + void print_val(const char* label, u1 val) { + _st.print("%s = "UINT32_FORMAT, label, val); + } + + void print_val(const char* label, u2 val) { + _st.print("%s = "UINT32_FORMAT, label, val); + } + + void print_val(const char* label, s2 val) { + _st.print("%s = "INT32_FORMAT, label, val); + } + + void print_val(const char* label, u4 val) { + _st.print("%s = "UINT32_FORMAT, label, val); + } + + void print_val(const char* label, s4 val) { + _st.print("%s = "INT32_FORMAT, label, val); + } + + void print_val(const char* label, u8 val) { + _st.print("%s = "UINT64_FORMAT, label, val); + } + + void print_val(const char* label, s8 val) { + _st.print("%s = "INT64_FORMAT, label, val); + } + + void print_val(const char* label, bool val) { + _st.print("%s = %s", label, val ? "true" : "false"); + } + + void print_val(const char* label, float val) { + _st.print("%s = %f", label, val); + } + + void print_val(const char* label, double val) { + _st.print("%s = %f", label, val); + } + + // Caller is machine generated code located in traceEventClasses.hpp + // Event::writeEvent() (pseudocode) contains the + // necessary ResourceMark for the resource allocations below. + // See traceEventClasses.xsl for details. + void print_val(const char* label, const Klass* const val) { + const char* description = "NULL"; + if (val != NULL) { + Symbol* name = val->name(); + if (name != NULL) { + description = name->as_C_string(); + } + } + _st.print("%s = %s", label, description); + } + + // Caller is machine generated code located in traceEventClasses.hpp + // Event::writeEvent() (pseudocode) contains the + // necessary ResourceMark for the resource allocations below. + // See traceEventClasses.xsl for details. + void print_val(const char* label, const Method* const val) { + const char* description = "NULL"; + if (val != NULL) { + description = val->name_and_sig_as_C_string(); + } + _st.print("%s = %s", label, description); + } + + void print_val(const char* label, const char* val) { + _st.print("%s = '%s'", label, val); + } + + void print(const char* val) { + _st.print(val); + } +}; + +#endif /* INCLUDE_TRACE */ +#endif /* SHARE_VM_TRACE_TRACESTREAM_HPP */ diff -r 16b10327b00d -r 90d6c221d4e5 src/share/vm/trace/traceTime.hpp --- /dev/null Thu Jan 01 00:00:00 1970 +0000 +++ b/src/share/vm/trace/traceTime.hpp Tue Jul 16 12:20:08 2013 -0400 @@ -0,0 +1,33 @@ +/* + * Copyright (c) 2012, 2013, Oracle and/or its affiliates. All rights reserved. + * DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER. + * + * This code is free software; you can redistribute it and/or modify it + * under the terms of the GNU General Public License version 2 only, as + * published by the Free Software Foundation. + * + * This code is distributed in the hope that it will be useful, but WITHOUT + * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or + * FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License + * version 2 for more details (a copy is included in the LICENSE file that + * accompanied this code). + * + * You should have received a copy of the GNU General Public License version + * 2 along with this work; if not, write to the Free Software Foundation, + * Inc., 51 Franklin St, Fifth Floor, Boston, MA 02110-1301 USA. + * + * Please contact Oracle, 500 Oracle Parkway, Redwood Shores, CA 94065 USA + * or visit www.oracle.com if you need additional information or have any + * questions. + * + */ + +#ifndef SHARE_VM_TRACE_TRACETIME_HPP +#define SHARE_VM_TRACE_TRACETIME_HPP + +#include "prims/jni.h" + +typedef jlong TracingTime; +typedef jlong RelativeTracingTime; + +#endif diff -r 16b10327b00d -r 90d6c221d4e5 src/share/vm/trace/traceTypes.xsl --- /dev/null Thu Jan 01 00:00:00 1970 +0000 +++ b/src/share/vm/trace/traceTypes.xsl Tue Jul 16 12:20:08 2013 -0400 @@ -0,0 +1,72 @@ + + + + + + + + + + +#ifndef TRACEFILES_JFRTYPES_HPP +#define TRACEFILES_JFRTYPES_HPP + +#include "trace/traceDataTypes.hpp" +#include "utilities/globalDefinitions.hpp" +#include "oops/symbol.hpp" + +enum JVMContentType { + _not_a_content_type = (JVM_CONTENT_TYPES_START - 1), + + + + + NUM_JVM_CONTENT_TYPES +}; + + +enum JVMEventRelations { + JVM_REL_NOT_AVAILABLE = 0, + + + + + NUM_EVENT_RELATIONS +}; + +/** + * Create typedefs for the JRA types: + * typedef s8 TYPE_LONG; + * typedef s4 TYPE_INTEGER; + * typedef const char * TYPE_STRING; + * ... + */ + +typedef TYPE_; + + +#endif // JFRFILES_JFRTYPES_HPP + + + diff -r 16b10327b00d -r 90d6c221d4e5 src/share/vm/trace/tracetypes.xml --- /dev/null Thu Jan 01 00:00:00 1970 +0000 +++ b/src/share/vm/trace/tracetypes.xml Tue Jul 16 12:20:08 2013 -0400 @@ -0,0 +1,368 @@ + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + diff -r 16b10327b00d -r 90d6c221d4e5 src/share/vm/trace/tracing.hpp --- a/src/share/vm/trace/tracing.hpp Tue Jul 16 10:55:48 2013 -0400 +++ b/src/share/vm/trace/tracing.hpp Tue Jul 16 12:20:08 2013 -0400 @@ -1,5 +1,5 @@ /* - * Copyright (c) 1997, 2012, Oracle and/or its affiliates. All rights reserved. + * Copyright (c) 2012, 2013, Oracle and/or its affiliates. All rights reserved. * DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER. * * This code is free software; you can redistribute it and/or modify it @@ -25,6 +25,7 @@ #ifndef SHARE_VM_TRACE_TRACING_HPP #define SHARE_VM_TRACE_TRACING_HPP -#include "trace/traceMacros.hpp" +#include "tracefiles/traceEventClasses.hpp" +#include "tracefiles/traceEventIds.hpp" #endif diff -r 16b10327b00d -r 90d6c221d4e5 src/share/vm/trace/xinclude.mod --- /dev/null Thu Jan 01 00:00:00 1970 +0000 +++ b/src/share/vm/trace/xinclude.mod Tue Jul 16 12:20:08 2013 -0400 @@ -0,0 +1,61 @@ + + + + + + + + + diff -r 16b10327b00d -r 90d6c221d4e5 src/share/vm/trace/xsl_util.xsl --- /dev/null Thu Jan 01 00:00:00 1970 +0000 +++ b/src/share/vm/trace/xsl_util.xsl Tue Jul 16 12:20:08 2013 -0400 @@ -0,0 +1,78 @@ + + + + + + + + + + + + + + + + + + + + + + + + + + + + + " + + + + /* AUTOMATICALLY GENERATED FILE - DO NOT EDIT */ + + + + + + + + + + + + + + + + + + + + + + + + diff -r 16b10327b00d -r 90d6c221d4e5 src/share/vm/utilities/bitMap.cpp --- a/src/share/vm/utilities/bitMap.cpp Tue Jul 16 10:55:48 2013 -0400 +++ b/src/share/vm/utilities/bitMap.cpp Tue Jul 16 12:20:08 2013 -0400 @@ -41,7 +41,7 @@ BitMap::BitMap(bm_word_t* map, idx_t size_in_bits) : - _map(map), _size(size_in_bits) + _map(map), _size(size_in_bits), _map_allocator(false) { assert(sizeof(bm_word_t) == BytesPerWord, "Implementation assumption."); assert(size_in_bits >= 0, "just checking"); @@ -49,7 +49,7 @@ BitMap::BitMap(idx_t size_in_bits, bool in_resource_area) : - _map(NULL), _size(0) + _map(NULL), _size(0), _map_allocator(false) { assert(sizeof(bm_word_t) == BytesPerWord, "Implementation assumption."); resize(size_in_bits, in_resource_area); @@ -65,8 +65,10 @@ if (in_resource_area) { _map = NEW_RESOURCE_ARRAY(bm_word_t, new_size_in_words); } else { - if (old_map != NULL) FREE_C_HEAP_ARRAY(bm_word_t, _map, mtInternal); - _map = NEW_C_HEAP_ARRAY(bm_word_t, new_size_in_words, mtInternal); + if (old_map != NULL) { + _map_allocator.free(); + } + _map = _map_allocator.allocate(new_size_in_words); } Copy::disjoint_words((HeapWord*)old_map, (HeapWord*) _map, MIN2(old_size_in_words, new_size_in_words)); diff -r 16b10327b00d -r 90d6c221d4e5 src/share/vm/utilities/bitMap.hpp --- a/src/share/vm/utilities/bitMap.hpp Tue Jul 16 10:55:48 2013 -0400 +++ b/src/share/vm/utilities/bitMap.hpp Tue Jul 16 12:20:08 2013 -0400 @@ -48,6 +48,7 @@ } RangeSizeHint; private: + ArrayAllocator _map_allocator; bm_word_t* _map; // First word in bitmap idx_t _size; // Size of bitmap (in bits) @@ -113,7 +114,7 @@ public: // Constructs a bitmap with no map, and size 0. - BitMap() : _map(NULL), _size(0) {} + BitMap() : _map(NULL), _size(0), _map_allocator(false) {} // Constructs a bitmap with the given map and size. BitMap(bm_word_t* map, idx_t size_in_bits); diff -r 16b10327b00d -r 90d6c221d4e5 src/share/vm/utilities/debug.cpp --- a/src/share/vm/utilities/debug.cpp Tue Jul 16 10:55:48 2013 -0400 +++ b/src/share/vm/utilities/debug.cpp Tue Jul 16 12:20:08 2013 -0400 @@ -314,8 +314,8 @@ #ifndef PRODUCT #include -void test_error_handler(size_t test_num) -{ +void test_error_handler() { + uintx test_num = ErrorHandlerTest; if (test_num == 0) return; // If asserts are disabled, use the corresponding guarantee instead. @@ -327,6 +327,8 @@ const char* const eol = os::line_separator(); const char* const msg = "this message should be truncated during formatting"; + char * const dataPtr = NULL; // bad data pointer + const void (*funcPtr)(void) = (const void(*)()) 0xF; // bad function pointer // Keep this in sync with test/runtime/6888954/vmerrors.sh. switch (n) { @@ -348,11 +350,16 @@ case 9: ShouldNotCallThis(); case 10: ShouldNotReachHere(); case 11: Unimplemented(); - // This is last because it does not generate an hs_err* file on Windows. - case 12: os::signal_raise(SIGSEGV); + // There's no guarantee the bad data pointer will crash us + // so "break" out to the ShouldNotReachHere(). + case 12: *dataPtr = '\0'; break; + // There's no guarantee the bad function pointer will crash us + // so "break" out to the ShouldNotReachHere(). + case 13: (*funcPtr)(); break; - default: ShouldNotReachHere(); + default: tty->print_cr("ERROR: %d: unexpected test_num value.", n); } + ShouldNotReachHere(); } #endif // !PRODUCT diff -r 16b10327b00d -r 90d6c221d4e5 src/share/vm/utilities/debug.hpp --- a/src/share/vm/utilities/debug.hpp Tue Jul 16 10:55:48 2013 -0400 +++ b/src/share/vm/utilities/debug.hpp Tue Jul 16 12:20:08 2013 -0400 @@ -1,5 +1,5 @@ /* - * Copyright (c) 1997, 2012, Oracle and/or its affiliates. All rights reserved. + * Copyright (c) 1997, 2013, Oracle and/or its affiliates. All rights reserved. * DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER. * * This code is free software; you can redistribute it and/or modify it @@ -243,7 +243,7 @@ void set_error_reported(); /* Test assert(), fatal(), guarantee(), etc. */ -NOT_PRODUCT(void test_error_handler(size_t test_num);) +NOT_PRODUCT(void test_error_handler();) void pd_ps(frame f); void pd_obfuscate_location(char *buf, size_t buflen); diff -r 16b10327b00d -r 90d6c221d4e5 src/share/vm/utilities/exceptions.hpp --- a/src/share/vm/utilities/exceptions.hpp Tue Jul 16 10:55:48 2013 -0400 +++ b/src/share/vm/utilities/exceptions.hpp Tue Jul 16 12:20:08 2013 -0400 @@ -194,15 +194,15 @@ #define HAS_PENDING_EXCEPTION (((ThreadShadow*)THREAD)->has_pending_exception()) #define CLEAR_PENDING_EXCEPTION (((ThreadShadow*)THREAD)->clear_pending_exception()) -#define CHECK THREAD); if (HAS_PENDING_EXCEPTION) return ; (0 -#define CHECK_(result) THREAD); if (HAS_PENDING_EXCEPTION) return result; (0 +#define CHECK THREAD); if (HAS_PENDING_EXCEPTION) return ; (void)(0 +#define CHECK_(result) THREAD); if (HAS_PENDING_EXCEPTION) return result; (void)(0 #define CHECK_0 CHECK_(0) #define CHECK_NH CHECK_(Handle()) #define CHECK_NULL CHECK_(NULL) #define CHECK_false CHECK_(false) -#define CHECK_AND_CLEAR THREAD); if (HAS_PENDING_EXCEPTION) { CLEAR_PENDING_EXCEPTION; return; } (0 -#define CHECK_AND_CLEAR_(result) THREAD); if (HAS_PENDING_EXCEPTION) { CLEAR_PENDING_EXCEPTION; return result; } (0 +#define CHECK_AND_CLEAR THREAD); if (HAS_PENDING_EXCEPTION) { CLEAR_PENDING_EXCEPTION; return; } (void)(0 +#define CHECK_AND_CLEAR_(result) THREAD); if (HAS_PENDING_EXCEPTION) { CLEAR_PENDING_EXCEPTION; return result; } (void)(0 #define CHECK_AND_CLEAR_0 CHECK_AND_CLEAR_(0) #define CHECK_AND_CLEAR_NH CHECK_AND_CLEAR_(Handle()) #define CHECK_AND_CLEAR_NULL CHECK_AND_CLEAR_(NULL) @@ -282,7 +282,7 @@ CLEAR_PENDING_EXCEPTION; \ ex->print(); \ ShouldNotReachHere(); \ - } (0 + } (void)(0 // ExceptionMark is a stack-allocated helper class for local exception handling. // It is used with the EXCEPTION_MARK macro. diff -r 16b10327b00d -r 90d6c221d4e5 src/share/vm/utilities/globalDefinitions.hpp --- a/src/share/vm/utilities/globalDefinitions.hpp Tue Jul 16 10:55:48 2013 -0400 +++ b/src/share/vm/utilities/globalDefinitions.hpp Tue Jul 16 12:20:08 2013 -0400 @@ -763,18 +763,6 @@ TosState as_TosState(BasicType type); -// ReferenceType is used to distinguish between java/lang/ref/Reference subclasses - -enum ReferenceType { - REF_NONE, // Regular class - REF_OTHER, // Subclass of java/lang/ref/Reference, but not subclass of one of the classes below - REF_SOFT, // Subclass of java/lang/ref/SoftReference - REF_WEAK, // Subclass of java/lang/ref/WeakReference - REF_FINAL, // Subclass of java/lang/ref/FinalReference - REF_PHANTOM // Subclass of java/lang/ref/PhantomReference -}; - - // JavaThreadState keeps track of which part of the code a thread is executing in. This // information is needed by the safepoint code. // diff -r 16b10327b00d -r 90d6c221d4e5 src/share/vm/utilities/macros.hpp --- a/src/share/vm/utilities/macros.hpp Tue Jul 16 10:55:48 2013 -0400 +++ b/src/share/vm/utilities/macros.hpp Tue Jul 16 12:20:08 2013 -0400 @@ -160,6 +160,10 @@ #define NOT_NMT_RETURN_(code) { return code; } #endif // INCLUDE_NMT +#ifndef INCLUDE_TRACE +#define INCLUDE_TRACE 1 +#endif // INCLUDE_TRACE + // COMPILER1 variant #ifdef COMPILER1 #ifdef COMPILER2 diff -r 16b10327b00d -r 90d6c221d4e5 src/share/vm/utilities/taskqueue.hpp --- a/src/share/vm/utilities/taskqueue.hpp Tue Jul 16 10:55:48 2013 -0400 +++ b/src/share/vm/utilities/taskqueue.hpp Tue Jul 16 12:20:08 2013 -0400 @@ -340,8 +340,12 @@ if (dirty_n_elems == N - 1) { // Actually means 0, so do the push. uint localBot = _bottom; - // g++ complains if the volatile result of the assignment is unused. - const_cast(_elems[localBot] = t); + // g++ complains if the volatile result of the assignment is + // unused, so we cast the volatile away. We cannot cast directly + // to void, because gcc treats that as not using the result of the + // assignment. However, casting to E& means that we trigger an + // unused-value warning. So, we cast the E& to void. + (void)const_cast(_elems[localBot] = t); OrderAccess::release_store(&_bottom, increment_index(localBot)); TASKQUEUE_STATS_ONLY(stats.record_push()); return true; @@ -397,7 +401,12 @@ return false; } - const_cast(t = _elems[oldAge.top()]); + // g++ complains if the volatile result of the assignment is + // unused, so we cast the volatile away. We cannot cast directly + // to void, because gcc treats that as not using the result of the + // assignment. However, casting to E& means that we trigger an + // unused-value warning. So, we cast the E& to void. + (void) const_cast(t = _elems[oldAge.top()]); Age newAge(oldAge); newAge.increment(); Age resAge = _age.cmpxchg(newAge, oldAge); @@ -640,8 +649,12 @@ uint dirty_n_elems = dirty_size(localBot, top); assert(dirty_n_elems < N, "n_elems out of range."); if (dirty_n_elems < max_elems()) { - // g++ complains if the volatile result of the assignment is unused. - const_cast(_elems[localBot] = t); + // g++ complains if the volatile result of the assignment is + // unused, so we cast the volatile away. We cannot cast directly + // to void, because gcc treats that as not using the result of the + // assignment. However, casting to E& means that we trigger an + // unused-value warning. So, we cast the E& to void. + (void) const_cast(_elems[localBot] = t); OrderAccess::release_store(&_bottom, increment_index(localBot)); TASKQUEUE_STATS_ONLY(stats.record_push()); return true; @@ -665,7 +678,12 @@ // This is necessary to prevent any read below from being reordered // before the store just above. OrderAccess::fence(); - const_cast(t = _elems[localBot]); + // g++ complains if the volatile result of the assignment is + // unused, so we cast the volatile away. We cannot cast directly + // to void, because gcc treats that as not using the result of the + // assignment. However, casting to E& means that we trigger an + // unused-value warning. So, we cast the E& to void. + (void) const_cast(t = _elems[localBot]); // This is a second read of "age"; the "size()" above is the first. // If there's still at least one element in the queue, based on the // "_bottom" and "age" we've read, then there can be no interference with diff -r 16b10327b00d -r 90d6c221d4e5 src/share/vm/utilities/vmError.cpp --- a/src/share/vm/utilities/vmError.cpp Tue Jul 16 10:55:48 2013 -0400 +++ b/src/share/vm/utilities/vmError.cpp Tue Jul 16 12:20:08 2013 -0400 @@ -1,5 +1,5 @@ /* - * Copyright (c) 2003, 2012, Oracle and/or its affiliates. All rights reserved. + * Copyright (c) 2003, 2013, Oracle and/or its affiliates. All rights reserved. * DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER. * * This code is free software; you can redistribute it and/or modify it @@ -799,6 +799,14 @@ VMError* volatile VMError::first_error = NULL; volatile jlong VMError::first_error_tid = -1; +// An error could happen before tty is initialized or after it has been +// destroyed. Here we use a very simple unbuffered fdStream for printing. +// Only out.print_raw() and out.print_raw_cr() should be used, as other +// printing methods need to allocate large buffer on stack. To format a +// string, use jio_snprintf() with a static buffer or use staticBufferStream. +fdStream VMError::out(defaultStream::output_fd()); +fdStream VMError::log; // error log used by VMError::report_and_die() + /** Expand a pattern into a buffer starting at pos and open a file using constructed path */ static int expand_and_open(const char* pattern, char* buf, size_t buflen, size_t pos) { int fd = -1; @@ -853,13 +861,6 @@ // Don't allocate large buffer on stack static char buffer[O_BUFLEN]; - // An error could happen before tty is initialized or after it has been - // destroyed. Here we use a very simple unbuffered fdStream for printing. - // Only out.print_raw() and out.print_raw_cr() should be used, as other - // printing methods need to allocate large buffer on stack. To format a - // string, use jio_snprintf() with a static buffer or use staticBufferStream. - static fdStream out(defaultStream::output_fd()); - // How many errors occurred in error handler when reporting first_error. static int recursive_error_count; @@ -868,7 +869,6 @@ static bool out_done = false; // done printing to standard out static bool log_done = false; // done saving error log static bool transmit_report_done = false; // done error reporting - static fdStream log; // error log // disble NMT to avoid further exception MemTracker::shutdown(MemTracker::NMT_error_reporting); @@ -908,10 +908,11 @@ // This is not the first error, see if it happened in a different thread // or in the same thread during error reporting. if (first_error_tid != mytid) { - jio_snprintf(buffer, sizeof(buffer), + char msgbuf[64]; + jio_snprintf(msgbuf, sizeof(msgbuf), "[thread " INT64_FORMAT " also had an error]", mytid); - out.print_raw_cr(buffer); + out.print_raw_cr(msgbuf); // error reporting is not MT-safe, block current thread os::infinite_sleep(); diff -r 16b10327b00d -r 90d6c221d4e5 src/share/vm/utilities/vmError.hpp --- a/src/share/vm/utilities/vmError.hpp Tue Jul 16 10:55:48 2013 -0400 +++ b/src/share/vm/utilities/vmError.hpp Tue Jul 16 12:20:08 2013 -0400 @@ -1,5 +1,5 @@ /* - * Copyright (c) 2003, 2012, Oracle and/or its affiliates. All rights reserved. + * Copyright (c) 2003, 2013, Oracle and/or its affiliates. All rights reserved. * DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER. * * This code is free software; you can redistribute it and/or modify it @@ -96,6 +96,9 @@ return (id != OOM_MALLOC_ERROR) && (id != OOM_MMAP_ERROR); } + static fdStream out; + static fdStream log; // error log used by VMError::report_and_die() + public: // Constructor for crashes diff -r 16b10327b00d -r 90d6c221d4e5 test/compiler/7088419/CRCTest.java --- /dev/null Thu Jan 01 00:00:00 1970 +0000 +++ b/test/compiler/7088419/CRCTest.java Tue Jul 16 12:20:08 2013 -0400 @@ -0,0 +1,132 @@ +/* + * Copyright (c) 2013, Oracle and/or its affiliates. All rights reserved. + * DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER. + * + * This code is free software; you can redistribute it and/or modify it + * under the terms of the GNU General Public License version 2 only, as + * published by the Free Software Foundation. + * + * This code is distributed in the hope that it will be useful, but WITHOUT + * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or + * FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License + * version 2 for more details (a copy is included in the LICENSE file that + * accompanied this code). + * + * You should have received a copy of the GNU General Public License version + * 2 along with this work; if not, write to the Free Software Foundation, + * Inc., 51 Franklin St, Fifth Floor, Boston, MA 02110-1301 USA. + * + * Please contact Oracle, 500 Oracle Parkway, Redwood Shores, CA 94065 USA + * or visit www.oracle.com if you need additional information or have any + * questions. + */ + +/* + @test + @bug 7088419 + @run main CRCTest + @summary Use x86 Hardware CRC32 Instruction with java.util.zip.CRC32 and java.util.zip.Adler32 + */ + +import java.nio.ByteBuffer; +import java.util.zip.CRC32; +import java.util.zip.Checksum; + +public class CRCTest { + + public static void main(String[] args) throws Exception { + + byte[] b = initializedBytes(4096 * 4096); + + { + CRC32 crc1 = new CRC32(); + CRC32 crc2 = new CRC32(); + CRC32 crc3 = new CRC32(); + CRC32 crc4 = new CRC32(); + + crc1.update(b, 0, b.length); + updateSerial(crc2, b, 0, b.length); + updateDirect(crc3, b, 0, b.length); + updateSerialSlow(crc4, b, 0, b.length); + + check(crc1, crc2); + check(crc3, crc4); + check(crc1, crc3); + + crc1.update(17); + crc2.update(17); + crc3.update(17); + crc4.update(17); + + crc1.update(b, 1, b.length-2); + updateSerial(crc2, b, 1, b.length-2); + updateDirect(crc3, b, 1, b.length-2); + updateSerialSlow(crc4, b, 1, b.length-2); + + check(crc1, crc2); + check(crc3, crc4); + check(crc1, crc3); + + report("finished huge crc", crc1, crc2, crc3, crc4); + + for (int i = 0; i < 256; i++) { + for (int j = 0; j < 256; j += 1) { + crc1.update(b, i, j); + updateSerial(crc2, b, i, j); + updateDirect(crc3, b, i, j); + updateSerialSlow(crc4, b, i, j); + + check(crc1, crc2); + check(crc3, crc4); + check(crc1, crc3); + + } + } + + report("finished small survey crc", crc1, crc2, crc3, crc4); + } + + } + + private static void report(String s, Checksum crc1, Checksum crc2, + Checksum crc3, Checksum crc4) { + System.out.println(s + ", crc1 = " + crc1.getValue() + + ", crc2 = " + crc2.getValue()+ + ", crc3 = " + crc3.getValue()+ + ", crc4 = " + crc4.getValue()); + } + + private static void check(Checksum crc1, Checksum crc2) throws Exception { + if (crc1.getValue() != crc2.getValue()) { + String s = "value 1 = " + crc1.getValue() + ", value 2 = " + crc2.getValue(); + System.err.println(s); + throw new Exception(s); + } + } + + private static byte[] initializedBytes(int M) { + byte[] bytes = new byte[M]; + for (int i = 0; i < bytes.length; i++) { + bytes[i] = (byte) i; + } + return bytes; + } + + private static void updateSerial(Checksum crc, byte[] b, int start, int length) { + for (int i = 0; i < length; i++) + crc.update(b[i+start]); + } + + private static void updateSerialSlow(Checksum crc, byte[] b, int start, int length) { + for (int i = 0; i < length; i++) + crc.update(b[i+start]); + crc.getValue(); + } + + private static void updateDirect(CRC32 crc3, byte[] b, int start, int length) { + ByteBuffer buf = ByteBuffer.allocateDirect(length); + buf.put(b, start, length); + buf.flip(); + crc3.update(buf); + } +} diff -r 16b10327b00d -r 90d6c221d4e5 test/compiler/8005956/PolynomialRoot.java --- /dev/null Thu Jan 01 00:00:00 1970 +0000 +++ b/test/compiler/8005956/PolynomialRoot.java Tue Jul 16 12:20:08 2013 -0400 @@ -0,0 +1,783 @@ +//package com.polytechnik.utils; +/* + * (C) Vladislav Malyshkin 2010 + * This file is under GPL version 3. + * + */ + +/** Polynomial root. + * @version $Id: PolynomialRoot.java,v 1.105 2012/08/18 00:00:05 mal Exp $ + * @author Vladislav Malyshkin mal@gromco.com + */ + +/** +* @test +* @bug 8005956 +* @summary C2: assert(!def_outside->member(r)) failed: Use of external LRG overlaps the same LRG defined in this block +* +* @run main/timeout=300 PolynomialRoot +*/ + +public class PolynomialRoot { + + +public static int findPolynomialRoots(final int n, + final double [] p, + final double [] re_root, + final double [] im_root) +{ + if(n==4) + { + return root4(p,re_root,im_root); + } + else if(n==3) + { + return root3(p,re_root,im_root); + } + else if(n==2) + { + return root2(p,re_root,im_root); + } + else if(n==1) + { + return root1(p,re_root,im_root); + } + else + { + throw new RuntimeException("n="+n+" is not supported yet"); + } +} + + + +static final double SQRT3=Math.sqrt(3.0),SQRT2=Math.sqrt(2.0); + + +private static final boolean PRINT_DEBUG=false; + +public static int root4(final double [] p,final double [] re_root,final double [] im_root) +{ + if(PRINT_DEBUG) System.err.println("=====================root4:p="+java.util.Arrays.toString(p)); + final double vs=p[4]; + if(PRINT_DEBUG) System.err.println("p[4]="+p[4]); + if(!(Math.abs(vs)>EPS)) + { + re_root[0]=re_root[1]=re_root[2]=re_root[3]= + im_root[0]=im_root[1]=im_root[2]=im_root[3]=Double.NaN; + return -1; + } + +/* zsolve_quartic.c - finds the complex roots of + * x^4 + a x^3 + b x^2 + c x + d = 0 + */ + final double a=p[3]/vs,b=p[2]/vs,c=p[1]/vs,d=p[0]/vs; + if(PRINT_DEBUG) System.err.println("input a="+a+" b="+b+" c="+c+" d="+d); + + + final double r4 = 1.0 / 4.0; + final double q2 = 1.0 / 2.0, q4 = 1.0 / 4.0, q8 = 1.0 / 8.0; + final double q1 = 3.0 / 8.0, q3 = 3.0 / 16.0; + final int mt; + + /* Deal easily with the cases where the quartic is degenerate. The + * ordering of solutions is done explicitly. */ + if (0 == b && 0 == c) + { + if (0 == d) + { + re_root[0]=-a; + im_root[0]=im_root[1]=im_root[2]=im_root[3]=0; + re_root[1]=re_root[2]=re_root[3]=0; + return 4; + } + else if (0 == a) + { + if (d > 0) + { + final double sq4 = Math.sqrt(Math.sqrt(d)); + re_root[0]=sq4*SQRT2/2; + im_root[0]=re_root[0]; + re_root[1]=-re_root[0]; + im_root[1]=re_root[0]; + re_root[2]=-re_root[0]; + im_root[2]=-re_root[0]; + re_root[3]=re_root[0]; + im_root[3]=-re_root[0]; + if(PRINT_DEBUG) System.err.println("Path a=0 d>0"); + } + else + { + final double sq4 = Math.sqrt(Math.sqrt(-d)); + re_root[0]=sq4; + im_root[0]=0; + re_root[1]=0; + im_root[1]=sq4; + re_root[2]=0; + im_root[2]=-sq4; + re_root[3]=-sq4; + im_root[3]=0; + if(PRINT_DEBUG) System.err.println("Path a=0 d<0"); + } + return 4; + } + } + + if (0.0 == c && 0.0 == d) + { + root2(new double []{p[2],p[3],p[4]},re_root,im_root); + re_root[2]=im_root[2]=re_root[3]=im_root[3]=0; + return 4; + } + + if(PRINT_DEBUG) System.err.println("G Path c="+c+" d="+d); + final double [] u=new double[3]; + + if(PRINT_DEBUG) System.err.println("Generic Path"); + /* For non-degenerate solutions, proceed by constructing and + * solving the resolvent cubic */ + final double aa = a * a; + final double pp = b - q1 * aa; + final double qq = c - q2 * a * (b - q4 * aa); + final double rr = d - q4 * a * (c - q4 * a * (b - q3 * aa)); + final double rc = q2 * pp , rc3 = rc / 3; + final double sc = q4 * (q4 * pp * pp - rr); + final double tc = -(q8 * qq * q8 * qq); + if(PRINT_DEBUG) System.err.println("aa="+aa+" pp="+pp+" qq="+qq+" rr="+rr+" rc="+rc+" sc="+sc+" tc="+tc); + final boolean flag_realroots; + + /* This code solves the resolvent cubic in a convenient fashion + * for this implementation of the quartic. If there are three real + * roots, then they are placed directly into u[]. If two are + * complex, then the real root is put into u[0] and the real + * and imaginary part of the complex roots are placed into + * u[1] and u[2], respectively. */ + { + final double qcub = (rc * rc - 3 * sc); + final double rcub = (rc*(2 * rc * rc - 9 * sc) + 27 * tc); + + final double Q = qcub / 9; + final double R = rcub / 54; + + final double Q3 = Q * Q * Q; + final double R2 = R * R; + + final double CR2 = 729 * rcub * rcub; + final double CQ3 = 2916 * qcub * qcub * qcub; + + if(PRINT_DEBUG) System.err.println("CR2="+CR2+" CQ3="+CQ3+" R="+R+" Q="+Q); + + if (0 == R && 0 == Q) + { + flag_realroots=true; + u[0] = -rc3; + u[1] = -rc3; + u[2] = -rc3; + } + else if (CR2 == CQ3) + { + flag_realroots=true; + final double sqrtQ = Math.sqrt (Q); + if (R > 0) + { + u[0] = -2 * sqrtQ - rc3; + u[1] = sqrtQ - rc3; + u[2] = sqrtQ - rc3; + } + else + { + u[0] = -sqrtQ - rc3; + u[1] = -sqrtQ - rc3; + u[2] = 2 * sqrtQ - rc3; + } + } + else if (R2 < Q3) + { + flag_realroots=true; + final double ratio = (R >= 0?1:-1) * Math.sqrt (R2 / Q3); + final double theta = Math.acos (ratio); + final double norm = -2 * Math.sqrt (Q); + + u[0] = norm * Math.cos (theta / 3) - rc3; + u[1] = norm * Math.cos ((theta + 2.0 * Math.PI) / 3) - rc3; + u[2] = norm * Math.cos ((theta - 2.0 * Math.PI) / 3) - rc3; + } + else + { + flag_realroots=false; + final double A = -(R >= 0?1:-1)*Math.pow(Math.abs(R)+Math.sqrt(R2-Q3),1.0/3.0); + final double B = Q / A; + + u[0] = A + B - rc3; + u[1] = -0.5 * (A + B) - rc3; + u[2] = -(SQRT3*0.5) * Math.abs (A - B); + } + if(PRINT_DEBUG) System.err.println("u[0]="+u[0]+" u[1]="+u[1]+" u[2]="+u[2]+" qq="+qq+" disc="+((CR2 - CQ3) / 2125764.0)); + } + /* End of solution to resolvent cubic */ + + /* Combine the square roots of the roots of the cubic + * resolvent appropriately. Also, calculate 'mt' which + * designates the nature of the roots: + * mt=1 : 4 real roots + * mt=2 : 0 real roots + * mt=3 : 2 real roots + */ + + + final double w1_re,w1_im,w2_re,w2_im,w3_re,w3_im,mod_w1w2,mod_w1w2_squared; + if (flag_realroots) + { + mod_w1w2=-1; + mt = 2; + int jmin=0; + double vmin=Math.abs(u[jmin]); + for(int j=1;j<3;j++) + { + final double vx=Math.abs(u[j]); + if(vx=0) + { + w1_re=Math.sqrt(u1); + w1_im=0; + } + else + { + w1_re=0; + w1_im=Math.sqrt(-u1); + } + if(u2>=0) + { + w2_re=Math.sqrt(u2); + w2_im=0; + } + else + { + w2_re=0; + w2_im=Math.sqrt(-u2); + } + if(PRINT_DEBUG) System.err.println("u1="+u1+" u2="+u2+" jmin="+jmin); + } + else + { + mt = 3; + final double w_mod2_sq=u[1]*u[1]+u[2]*u[2],w_mod2=Math.sqrt(w_mod2_sq),w_mod=Math.sqrt(w_mod2); + if(w_mod2_sq<=0) + { + w1_re=w1_im=0; + } + else + { + // calculate square root of a complex number (u[1],u[2]) + // the result is in the (w1_re,w1_im) + final double absu1=Math.abs(u[1]),absu2=Math.abs(u[2]),w; + if(absu1>=absu2) + { + final double t=absu2/absu1; + w=Math.sqrt(absu1*0.5 * (1.0 + Math.sqrt(1.0 + t * t))); + if(PRINT_DEBUG) System.err.println(" Path1 "); + } + else + { + final double t=absu1/absu2; + w=Math.sqrt(absu2*0.5 * (t + Math.sqrt(1.0 + t * t))); + if(PRINT_DEBUG) System.err.println(" Path1a "); + } + if(u[1]>=0) + { + w1_re=w; + w1_im=u[2]/(2*w); + if(PRINT_DEBUG) System.err.println(" Path2 "); + } + else + { + final double vi = (u[2] >= 0) ? w : -w; + w1_re=u[2]/(2*vi); + w1_im=vi; + if(PRINT_DEBUG) System.err.println(" Path2a "); + } + } + final double absu0=Math.abs(u[0]); + if(w_mod2>=absu0) + { + mod_w1w2=w_mod2; + mod_w1w2_squared=w_mod2_sq; + w2_re=w1_re; + w2_im=-w1_im; + } + else + { + mod_w1w2=-1; + mod_w1w2_squared=w_mod2*absu0; + if(u[0]>=0) + { + w2_re=Math.sqrt(absu0); + w2_im=0; + } + else + { + w2_re=0; + w2_im=Math.sqrt(absu0); + } + } + if(PRINT_DEBUG) System.err.println("u[0]="+u[0]+"u[1]="+u[1]+" u[2]="+u[2]+" absu0="+absu0+" w_mod="+w_mod+" w_mod2="+w_mod2); + } + + /* Solve the quadratic in order to obtain the roots + * to the quartic */ + if(mod_w1w2>0) + { + // a shorcut to reduce rounding error + w3_re=qq/(-8)/mod_w1w2; + w3_im=0; + } + else if(mod_w1w2_squared>0) + { + // regular path + final double mqq8n=qq/(-8)/mod_w1w2_squared; + w3_re=mqq8n*(w1_re*w2_re-w1_im*w2_im); + w3_im=-mqq8n*(w1_re*w2_im+w2_re*w1_im); + } + else + { + // typically occur when qq==0 + w3_re=w3_im=0; + } + + final double h = r4 * a; + if(PRINT_DEBUG) System.err.println("w1_re="+w1_re+" w1_im="+w1_im+" w2_re="+w2_re+" w2_im="+w2_im+" w3_re="+w3_re+" w3_im="+w3_im+" h="+h); + + re_root[0]=w1_re+w2_re+w3_re-h; + im_root[0]=w1_im+w2_im+w3_im; + re_root[1]=-(w1_re+w2_re)+w3_re-h; + im_root[1]=-(w1_im+w2_im)+w3_im; + re_root[2]=w2_re-w1_re-w3_re-h; + im_root[2]=w2_im-w1_im-w3_im; + re_root[3]=w1_re-w2_re-w3_re-h; + im_root[3]=w1_im-w2_im-w3_im; + + return 4; +} + + + + static void setRandomP(final double [] p,final int n,java.util.Random r) + { + if(r.nextDouble()<0.1) + { + // integer coefficiens + for(int j=0;j=0;k--) + { + final double res1=(res*rex-ims*imx)+p[k]; + final double ims1=(ims*rex+res*imx); + res=res1; + ims=ims1; + sabs+=xabs*sabs+p[k]; + } + sabs=Math.abs(sabs); + if(false && sabs>1/eps? + (!(Math.abs(res/sabs)<=eps)||!(Math.abs(ims/sabs)<=eps)) + : + (!(Math.abs(res)<=eps)||!(Math.abs(ims)<=eps))) + { + throw new RuntimeException( + getPolinomTXT(p)+"\n"+ + "\t x.r="+rex+" x.i="+imx+"\n"+ + "res/sabs="+(res/sabs)+" ims/sabs="+(ims/sabs)+ + " sabs="+sabs+ + "\nres="+res+" ims="+ims+" n="+n+" eps="+eps+" "+ + " sabs>1/eps="+(sabs>1/eps)+ + " f1="+(!(Math.abs(res/sabs)<=eps)||!(Math.abs(ims/sabs)<=eps))+ + " f2="+(!(Math.abs(res)<=eps)||!(Math.abs(ims)<=eps))+ + " "+txt); + } + } + + static String getPolinomTXT(final double [] p) + { + final StringBuilder buf=new StringBuilder(); + buf.append("order="+(p.length-1)+"\t"); + for(int k=0;k-1;) + { + for(int dr=3;dr-->0;) + { + setRandomP(p,n,rn); + for(int j=0;j<=dg;j++) + { + p[j]=0; + } + if(dr==0) + { + p[0]=-1+2.0*rn.nextDouble(); + } + else if(dr==1) + { + p[0]=p[1]=0; + } + + findPolynomialRoots(n,p,rex,imx); + + for(int j=0;j'?"); + if (outputWithError.getExitValue() == 0) { + throw new RuntimeException("Not expected to get exit value 0"); + } + + // The last JAVA process should run successfully for the purpose of sanity check + pb = ProcessTools.createJavaProcessBuilder( + "-XX:+PrintGC", + "-version" + ); + OutputAnalyzer outputWithNoError = new OutputAnalyzer(pb.start()); + outputWithNoError.shouldNotContain("Did you mean '(+/-)PrintGC'?"); + outputWithNoError.shouldHaveExitValue(0); + } +} + diff -r 16b10327b00d -r 90d6c221d4e5 test/gc/metaspace/TestMetaspaceMemoryPool.java --- /dev/null Thu Jan 01 00:00:00 1970 +0000 +++ b/test/gc/metaspace/TestMetaspaceMemoryPool.java Tue Jul 16 12:20:08 2013 -0400 @@ -0,0 +1,126 @@ +/* + * Copyright (c) 2013, Oracle and/or its affiliates. All rights reserved. + * DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER. + * + * This code is free software; you can redistribute it and/or modify it + * under the terms of the GNU General Public License version 2 only, as + * published by the Free Software Foundation. + * + * This code is distributed in the hope that it will be useful, but WITHOUT + * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or + * FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License + * version 2 for more details (a copy is included in the LICENSE file that + * accompanied this code). + * + * You should have received a copy of the GNU General Public License version + * 2 along with this work; if not, write to the Free Software Foundation, + * Inc., 51 Franklin St, Fifth Floor, Boston, MA 02110-1301 USA. + * + * Please contact Oracle, 500 Oracle Parkway, Redwood Shores, CA 94065 USA + * or visit www.oracle.com if you need additional information or have any + * questions. + */ + +import java.util.List; +import java.lang.management.ManagementFactory; +import java.lang.management.MemoryManagerMXBean; +import java.lang.management.MemoryPoolMXBean; +import java.lang.management.MemoryUsage; + +import java.lang.management.RuntimeMXBean; +import java.lang.management.ManagementFactory; + +/* @test TestMetaspaceMemoryPool + * @bug 8000754 + * @summary Tests that a MemoryPoolMXBeans is created for metaspace and that a + * MemoryManagerMXBean is created. + * @run main/othervm -XX:+IgnoreUnrecognizedVMOptions -XX:-UseCompressedOops TestMetaspaceMemoryPool + * @run main/othervm -XX:+IgnoreUnrecognizedVMOptions -XX:-UseCompressedOops -XX:MaxMetaspaceSize=60m TestMetaspaceMemoryPool + * @run main/othervm -XX:+IgnoreUnrecognizedVMOptions -XX:+UseCompressedOops -XX:+UseCompressedKlassPointers TestMetaspaceMemoryPool + * @run main/othervm -XX:+IgnoreUnrecognizedVMOptions -XX:+UseCompressedOops -XX:+UseCompressedKlassPointers -XX:ClassMetaspaceSize=60m TestMetaspaceMemoryPool + */ +public class TestMetaspaceMemoryPool { + public static void main(String[] args) { + verifyThatMetaspaceMemoryManagerExists(); + verifyMemoryPool(getMemoryPool("Metaspace"), isFlagDefined("MaxMetaspaceSize")); + + if (runsOn64bit()) { + if (usesCompressedOops()) { + MemoryPoolMXBean cksPool = getMemoryPool("Compressed Class Space"); + verifyMemoryPool(cksPool, true); + } + } + } + + private static boolean runsOn64bit() { + return !System.getProperty("sun.arch.data.model").equals("32"); + } + + private static boolean usesCompressedOops() { + return isFlagDefined("+UseCompressedOops"); + } + + private static boolean isFlagDefined(String name) { + RuntimeMXBean runtimeMxBean = ManagementFactory.getRuntimeMXBean(); + List args = runtimeMxBean.getInputArguments(); + for (String arg : args) { + if (arg.startsWith("-XX:" + name)) { + return true; + } + } + return false; + } + + private static void verifyThatMetaspaceMemoryManagerExists() { + List managers = ManagementFactory.getMemoryManagerMXBeans(); + for (MemoryManagerMXBean manager : managers) { + if (manager.getName().equals("Metaspace Manager")) { + return; + } + } + + throw new RuntimeException("Expected to find a metaspace memory manager"); + } + + private static MemoryPoolMXBean getMemoryPool(String name) { + List pools = ManagementFactory.getMemoryPoolMXBeans(); + for (MemoryPoolMXBean pool : pools) { + if (pool.getName().equals(name)) { + return pool; + } + } + + throw new RuntimeException("Expected to find a memory pool with name " + name); + } + + private static void verifyMemoryPool(MemoryPoolMXBean pool, boolean isMaxDefined) { + MemoryUsage mu = pool.getUsage(); + assertDefined(mu.getInit(), "init"); + assertDefined(mu.getUsed(), "used"); + assertDefined(mu.getCommitted(), "committed"); + + if (isMaxDefined) { + assertDefined(mu.getMax(), "max"); + } else { + assertUndefined(mu.getMax(), "max"); + } + } + + private static void assertDefined(long value, String name) { + assertTrue(value != -1, "Expected " + name + " to be defined"); + } + + private static void assertUndefined(long value, String name) { + assertEquals(value, -1, "Expected " + name + " to be undefined"); + } + + private static void assertEquals(long actual, long expected, String msg) { + assertTrue(actual == expected, msg); + } + + private static void assertTrue(boolean condition, String msg) { + if (!condition) { + throw new RuntimeException(msg); + } + } +} diff -r 16b10327b00d -r 90d6c221d4e5 test/gc/parallelScavenge/AdaptiveGCBoundary.java --- /dev/null Thu Jan 01 00:00:00 1970 +0000 +++ b/test/gc/parallelScavenge/AdaptiveGCBoundary.java Tue Jul 16 12:20:08 2013 -0400 @@ -0,0 +1,61 @@ +/* + * Copyright (c) 2013, Oracle and/or its affiliates. All rights reserved. + * DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER. + * + * This code is free software; you can redistribute it and/or modify it + * under the terms of the GNU General Public License version 2 only, as + * published by the Free Software Foundation. + * + * This code is distributed in the hope that it will be useful, but WITHOUT + * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or + * FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License + * version 2 for more details (a copy is included in the LICENSE file that + * accompanied this code). + * + * You should have received a copy of the GNU General Public License version + * 2 along with this work; if not, write to the Free Software Foundation, + * Inc., 51 Franklin St, Fifth Floor, Boston, MA 02110-1301 USA. + * + * Please contact Oracle, 500 Oracle Parkway, Redwood Shores, CA 94065 USA + * or visit www.oracle.com if you need additional information or have any + * questions. + */ + +/** + * @test AdaptiveGCBoundary + * @summary UseAdaptiveGCBoundary is broken + * @bug 8014546 + * @key gc + * @key regression + * @library /testlibrary + * @run main/othervm AdaptiveGCBoundary + * @author jon.masamitsu@oracle.com + */ + +import com.oracle.java.testlibrary.*; + +public class AdaptiveGCBoundary { + public static void main(String args[]) throws Exception { + + ProcessBuilder pb = ProcessTools.createJavaProcessBuilder( + "-showversion", + "-XX:+UseParallelGC", + "-XX:+UseAdaptiveGCBoundary", + "-XX:+PrintCommandLineFlags", + SystemGCCaller.class.getName() + ); + + OutputAnalyzer output = new OutputAnalyzer(pb.start()); + + output.shouldContain("+UseAdaptiveGCBoundary"); + + output.shouldNotContain("error"); + + output.shouldHaveExitValue(0); + } + static class SystemGCCaller { + public static void main(String [] args) { + System.gc(); + } + } +} diff -r 16b10327b00d -r 90d6c221d4e5 test/runtime/6888954/vmerrors.sh --- a/test/runtime/6888954/vmerrors.sh Tue Jul 16 10:55:48 2013 -0400 +++ b/test/runtime/6888954/vmerrors.sh Tue Jul 16 12:20:08 2013 -0400 @@ -1,5 +1,6 @@ # @test # @bug 6888954 +# @bug 8015884 # @summary exercise HotSpot error handling code # @author John Coomes # @run shell vmerrors.sh @@ -27,9 +28,24 @@ rc=0 assert_re='(assert|guarantee)[(](str|num).*failed: *' +# for bad_data_ptr_re: +# EXCEPTION_ACCESS_VIOLATION - Win-* +# SIGILL - MacOS X +# SIGSEGV - Linux-*, Solaris SPARC-*, Solaris X86-* +# +bad_data_ptr_re='(SIGILL|SIGSEGV|EXCEPTION_ACCESS_VIOLATION).* at pc=' +# +# for bad_func_ptr_re: +# EXCEPTION_ACCESS_VIOLATION - Win-* +# SIGBUS - Solaris SPARC-64 +# SIGSEGV - Linux-*, Solaris SPARC-32, Solaris X86-* +# +# Note: would like to use "pc=0x00*0f," in the pattern, but Solaris SPARC-* +# gets its signal at a PC in test_error_handler(). +# +bad_func_ptr_re='(SIGBUS|SIGSEGV|EXCEPTION_ACCESS_VIOLATION).* at pc=' guarantee_re='guarantee[(](str|num).*failed: *' fatal_re='fatal error: *' -signal_re='(SIGSEGV|EXCEPTION_ACCESS_VIOLATION).* at pc=' tail_1='.*expected null' tail_2='.*num=' @@ -39,8 +55,9 @@ "${fatal_re}${tail_1}" "${fatal_re}${tail_2}" \ "${fatal_re}.*truncated" "ChunkPool::allocate" \ "ShouldNotCall" "ShouldNotReachHere" \ - "Unimplemented" "$signal_re" - + "Unimplemented" "$bad_data_ptr_re" \ + "$bad_func_ptr_re" + do i2=$i [ $i -lt 10 ] && i2=0$i diff -r 16b10327b00d -r 90d6c221d4e5 test/runtime/7196045/Test7196045.java --- a/test/runtime/7196045/Test7196045.java Tue Jul 16 10:55:48 2013 -0400 +++ b/test/runtime/7196045/Test7196045.java Tue Jul 16 12:20:08 2013 -0400 @@ -26,7 +26,7 @@ * @test * @bug 7196045 * @summary Possible JVM deadlock in ThreadTimesClosure when using HotspotInternal non-public API. - * @run main/othervm Test7196045 + * @run main/othervm -XX:+UsePerfData Test7196045 */ import java.lang.management.ManagementFactory; diff -r 16b10327b00d -r 90d6c221d4e5 test/runtime/8001071/Test8001071.java --- /dev/null Thu Jan 01 00:00:00 1970 +0000 +++ b/test/runtime/8001071/Test8001071.java Tue Jul 16 12:20:08 2013 -0400 @@ -0,0 +1,45 @@ +/* + * Copyright (c) 2013, Oracle and/or its affiliates. All rights reserved. + * DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER. + * + * This code is free software; you can redistribute it and/or modify it + * under the terms of the GNU General Public License version 2 only, as + * published by the Free Software Foundation. + * + * This code is distributed in the hope that it will be useful, but WITHOUT + * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or + * FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License + * version 2 for more details (a copy is included in the LICENSE file that + * accompanied this code). + * + * You should have received a copy of the GNU General Public License version + * 2 along with this work; if not, write to the Free Software Foundation, + * Inc., 51 Franklin St, Fifth Floor, Boston, MA 02110-1301 USA. + * + * Please contact Oracle, 500 Oracle Parkway, Redwood Shores, CA 94065 USA + * or visit www.oracle.com if you need additional information or have any + * questions. + */ + +import sun.misc.Unsafe; +import java.lang.reflect.Field; + +@SuppressWarnings("sunapi") +public class Test8001071 { + public static Unsafe unsafe; + + static { + try { + Field f = Unsafe.class.getDeclaredField("theUnsafe"); + f.setAccessible(true); + unsafe = (Unsafe) f.get(null); + } catch ( Exception e ) { + e.printStackTrace(); + } + } + + public static void main(String args[]) { + unsafe.getObject(new Test8001071(), Short.MAX_VALUE); + } + +} diff -r 16b10327b00d -r 90d6c221d4e5 test/runtime/8001071/Test8001071.sh --- /dev/null Thu Jan 01 00:00:00 1970 +0000 +++ b/test/runtime/8001071/Test8001071.sh Tue Jul 16 12:20:08 2013 -0400 @@ -0,0 +1,63 @@ +#!/bin/sh + +# Copyright (c) 2013, Oracle and/or its affiliates. All rights reserved. +# DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER. + +# This code is free software; you can redistribute it and/or modify it +# under the terms of the GNU General Public License version 2 only, as +# published by the Free Software Foundation. + +# This code is distributed in the hope that it will be useful, but WITHOUT +# ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or +# FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License +# version 2 for more details (a copy is included in the LICENSE file that +# accompanied this code). + +# You should have received a copy of the GNU General Public License version +# 2 along with this work; if not, write to the Free Software Foundation, +# Inc., 51 Franklin St, Fifth Floor, Boston, MA 02110-1301 USA. + +# Please contact Oracle, 500 Oracle Parkway, Redwood Shores, CA 94065 USA +# or visit www.oracle.com if you need additional information or have any +# questions. + +## @test +## @bug 8001071 +## @summary Add simple range check into VM implemenation of Unsafe access methods +## @compile Test8001071.java +## @run shell Test8001071.sh +## @author filipp.zhinkin@oracle.com + +VERSION=`${TESTJAVA}/bin/java ${TESTVMOPTS} -version 2>&1` + +if [ -n "`echo $VERSION | grep debug`" -o -n "`echo $VERSION | grep jvmg`" ]; then + echo "Build type check passed" + echo "Continue testing" +else + echo "Fastdebug build is required for this test" + exit 0 +fi + +${TESTJAVA}/bin/java -cp ${TESTCLASSES} ${TESTVMOPTS} Test8001071 2>&1 + +HS_ERR_FILE=hs_err_pid*.log + +if [ ! -f $HS_ERR_FILE ] +then + echo "hs_err_pid log file was not found" + echo "Test failed" + exit 1 +fi + +grep "assert(byte_offset < p_size) failed: Unsafe access: offset.*> object's size.*" $HS_ERR_FILE + +if [ "0" = "$?" ]; +then + echo "Range check assertion failed as expected" + echo "Test passed" + exit 0 +else + echo "Range check assertion was not failed" + echo "Test failed" + exit 1 +fi diff -r 16b10327b00d -r 90d6c221d4e5 test/runtime/CommandLine/CompilerConfigFileWarning.java --- /dev/null Thu Jan 01 00:00:00 1970 +0000 +++ b/test/runtime/CommandLine/CompilerConfigFileWarning.java Tue Jul 16 12:20:08 2013 -0400 @@ -0,0 +1,50 @@ +/* + * Copyright (c) 2013, Oracle and/or its affiliates. All rights reserved. + * DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER. + * + * This code is free software; you can redistribute it and/or modify it + * under the terms of the GNU General Public License version 2 only, as + * published by the Free Software Foundation. + * + * This code is distributed in the hope that it will be useful, but WITHOUT + * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or + * FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License + * version 2 for more details (a copy is included in the LICENSE file that + * accompanied this code). + * + * You should have received a copy of the GNU General Public License version + * 2 along with this work; if not, write to the Free Software Foundation, + * Inc., 51 Franklin St, Fifth Floor, Boston, MA 02110-1301 USA. + * + * Please contact Oracle, 500 Oracle Parkway, Redwood Shores, CA 94065 USA + * or visit www.oracle.com if you need additional information or have any + * questions. + */ + +/* + * @test + * @bug 7167142 + * @summary Warn if unused .hotspot_compiler file is present + * @library /testlibrary + */ + +import java.io.PrintWriter; +import com.oracle.java.testlibrary.*; + +public class CompilerConfigFileWarning { + public static void main(String[] args) throws Exception { + String vmVersion = System.getProperty("java.vm.version"); + if (vmVersion.toLowerCase().contains("debug") || vmVersion.toLowerCase().contains("jvmg")) { + System.out.println("Skip on debug builds since we'll always read the file there"); + return; + } + + PrintWriter pw = new PrintWriter(".hotspot_compiler"); + pw.println("aa"); + pw.close(); + + ProcessBuilder pb = ProcessTools.createJavaProcessBuilder("-version"); + OutputAnalyzer output = new OutputAnalyzer(pb.start()); + output.shouldContain("warning: .hotspot_compiler file is present but has been ignored. Run with -XX:CompileCommandFile=.hotspot_compiler to load the file."); + } +} diff -r 16b10327b00d -r 90d6c221d4e5 test/runtime/CommandLine/ConfigFileWarning.java --- /dev/null Thu Jan 01 00:00:00 1970 +0000 +++ b/test/runtime/CommandLine/ConfigFileWarning.java Tue Jul 16 12:20:08 2013 -0400 @@ -0,0 +1,50 @@ +/* + * Copyright (c) 2013, Oracle and/or its affiliates. All rights reserved. + * DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER. + * + * This code is free software; you can redistribute it and/or modify it + * under the terms of the GNU General Public License version 2 only, as + * published by the Free Software Foundation. + * + * This code is distributed in the hope that it will be useful, but WITHOUT + * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or + * FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License + * version 2 for more details (a copy is included in the LICENSE file that + * accompanied this code). + * + * You should have received a copy of the GNU General Public License version + * 2 along with this work; if not, write to the Free Software Foundation, + * Inc., 51 Franklin St, Fifth Floor, Boston, MA 02110-1301 USA. + * + * Please contact Oracle, 500 Oracle Parkway, Redwood Shores, CA 94065 USA + * or visit www.oracle.com if you need additional information or have any + * questions. + */ + +/* + * @test + * @bug 7167142 + * @summary Warn if unused .hotspot_rc file is present + * @library /testlibrary + */ + +import java.io.PrintWriter; +import com.oracle.java.testlibrary.*; + +public class ConfigFileWarning { + public static void main(String[] args) throws Exception { + String vmVersion = System.getProperty("java.vm.version"); + if (vmVersion.toLowerCase().contains("debug") || vmVersion.toLowerCase().contains("jvmg")) { + System.out.println("Skip on debug builds since we'll always read the file there"); + return; + } + + PrintWriter pw = new PrintWriter(".hotspotrc"); + pw.println("aa"); + pw.close(); + + ProcessBuilder pb = ProcessTools.createJavaProcessBuilder("-version"); + OutputAnalyzer output = new OutputAnalyzer(pb.start()); + output.shouldContain("warning: .hotspotrc file is present but has been ignored. Run with -XX:Flags=.hotspotrc to load the file."); + } +} diff -r 16b10327b00d -r 90d6c221d4e5 test/runtime/SharedArchiveFile/CdsDifferentObjectAlignment.java --- /dev/null Thu Jan 01 00:00:00 1970 +0000 +++ b/test/runtime/SharedArchiveFile/CdsDifferentObjectAlignment.java Tue Jul 16 12:20:08 2013 -0400 @@ -0,0 +1,88 @@ +/* + * Copyright (c) 2013, Oracle and/or its affiliates. All rights reserved. + * DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER. + * + * This code is free software; you can redistribute it and/or modify it + * under the terms of the GNU General Public License version 2 only, as + * published by the Free Software Foundation. + * + * This code is distributed in the hope that it will be useful, but WITHOUT + * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or + * FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License + * version 2 for more details (a copy is included in the LICENSE file that + * accompanied this code). + * + * You should have received a copy of the GNU General Public License version + * 2 along with this work; if not, write to the Free Software Foundation, + * Inc., 51 Franklin St, Fifth Floor, Boston, MA 02110-1301 USA. + * + * Please contact Oracle, 500 Oracle Parkway, Redwood Shores, CA 94065 USA + * or visit www.oracle.com if you need additional information or have any + * questions. + */ + +/* + * @test CdsDifferentObjectAlignment + * @summary Testing CDS (class data sharing) using varying object alignment. + * Using different object alignment for each dump/load pair. + * This is a negative test; using object alignment for loading that + * is different from object alignment for creating a CDS file + * should fail when loading. + * @library /testlibrary + */ + +import com.oracle.java.testlibrary.*; + +public class CdsDifferentObjectAlignment { + public static void main(String[] args) throws Exception { + String nativeWordSize = System.getProperty("sun.arch.data.model"); + if (!Platform.is64bit()) { + System.out.println("ObjectAlignmentInBytes for CDS is only " + + "supported on 64bit platforms; this plaform is " + + nativeWordSize); + System.out.println("Skipping the test"); + } else { + createAndLoadSharedArchive(16, 64); + createAndLoadSharedArchive(64, 32); + } + } + + + // Parameters are object alignment expressed in bytes + private static void + createAndLoadSharedArchive(int createAlignment, int loadAlignment) + throws Exception { + String createAlignmentArgument = "-XX:ObjectAlignmentInBytes=" + + createAlignment; + String loadAlignmentArgument = "-XX:ObjectAlignmentInBytes=" + + loadAlignment; + + ProcessBuilder pb = ProcessTools.createJavaProcessBuilder( + "-XX:+UnlockDiagnosticVMOptions", + "-XX:SharedArchiveFile=./sample.jsa", + "-Xshare:dump", + createAlignmentArgument); + + OutputAnalyzer output = new OutputAnalyzer(pb.start()); + output.shouldContain("Loading classes to share"); + output.shouldHaveExitValue(0); + + pb = ProcessTools.createJavaProcessBuilder( + "-XX:+UnlockDiagnosticVMOptions", + "-XX:SharedArchiveFile=./sample.jsa", + "-Xshare:on", + loadAlignmentArgument, + "-version"); + + output = new OutputAnalyzer(pb.start()); + String expectedErrorMsg = + String.format( + "The shared archive file's ObjectAlignmentInBytes of %d " + + "does not equal the current ObjectAlignmentInBytes of %d", + createAlignment, + loadAlignment); + + output.shouldContain(expectedErrorMsg); + output.shouldHaveExitValue(1); + } +} diff -r 16b10327b00d -r 90d6c221d4e5 test/runtime/SharedArchiveFile/CdsSameObjectAlignment.java --- /dev/null Thu Jan 01 00:00:00 1970 +0000 +++ b/test/runtime/SharedArchiveFile/CdsSameObjectAlignment.java Tue Jul 16 12:20:08 2013 -0400 @@ -0,0 +1,92 @@ +/* + * Copyright (c) 2013, Oracle and/or its affiliates. All rights reserved. + * DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER. + * + * This code is free software; you can redistribute it and/or modify it + * under the terms of the GNU General Public License version 2 only, as + * published by the Free Software Foundation. + * + * This code is distributed in the hope that it will be useful, but WITHOUT + * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or + * FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License + * version 2 for more details (a copy is included in the LICENSE file that + * accompanied this code). + * + * You should have received a copy of the GNU General Public License version + * 2 along with this work; if not, write to the Free Software Foundation, + * Inc., 51 Franklin St, Fifth Floor, Boston, MA 02110-1301 USA. + * + * Please contact Oracle, 500 Oracle Parkway, Redwood Shores, CA 94065 USA + * or visit www.oracle.com if you need additional information or have any + * questions. + */ + +/* + * @test CdsSameObjectAlignment + * @summary Testing CDS (class data sharing) using varying object alignment. + * Using same object alignment for each dump/load pair + * @library /testlibrary + */ + +import com.oracle.java.testlibrary.*; + +public class CdsSameObjectAlignment { + public static void main(String[] args) throws Exception { + String nativeWordSize = System.getProperty("sun.arch.data.model"); + if (!Platform.is64bit()) { + System.out.println("ObjectAlignmentInBytes for CDS is only " + + "supported on 64bit platforms; this plaform is " + + nativeWordSize); + System.out.println("Skipping the test"); + } else { + dumpAndLoadSharedArchive(8); + dumpAndLoadSharedArchive(16); + dumpAndLoadSharedArchive(32); + dumpAndLoadSharedArchive(64); + } + } + + private static void + dumpAndLoadSharedArchive(int objectAlignmentInBytes) throws Exception { + String objectAlignmentArg = "-XX:ObjectAlignmentInBytes=" + + objectAlignmentInBytes; + System.out.println("dumpAndLoadSharedArchive(): objectAlignmentInBytes = " + + objectAlignmentInBytes); + + // create shared archive + ProcessBuilder pb = ProcessTools.createJavaProcessBuilder( + "-XX:+UnlockDiagnosticVMOptions", + "-XX:SharedArchiveFile=./sample.jsa", + "-Xshare:dump", + objectAlignmentArg); + + OutputAnalyzer output = new OutputAnalyzer(pb.start()); + output.shouldContain("Loading classes to share"); + output.shouldHaveExitValue(0); + + + // run using the shared archive + pb = ProcessTools.createJavaProcessBuilder( + "-XX:+UnlockDiagnosticVMOptions", + "-XX:SharedArchiveFile=./sample.jsa", + "-Xshare:on", + objectAlignmentArg, + "-version"); + + output = new OutputAnalyzer(pb.start()); + + try { + output.shouldContain("sharing"); + output.shouldHaveExitValue(0); + } catch (RuntimeException e) { + // CDS uses absolute addresses for performance. + // It will try to reserve memory at a specific address; + // there is a chance such reservation will fail + // If it does, it is NOT considered a failure of the feature, + // rather a possible expected outcome, though not likely + output.shouldContain( + "Unable to reserve shared space at required address"); + output.shouldHaveExitValue(1); + } + } +} diff -r 16b10327b00d -r 90d6c221d4e5 test/serviceability/threads/TestFalseDeadLock.java --- /dev/null Thu Jan 01 00:00:00 1970 +0000 +++ b/test/serviceability/threads/TestFalseDeadLock.java Tue Jul 16 12:20:08 2013 -0400 @@ -0,0 +1,95 @@ +/* + * Copyright (c) 2013, Oracle and/or its affiliates. All rights reserved. + * DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER. + * + * This code is free software; you can redistribute it and/or modify it + * under the terms of the GNU General Public License version 2 only, as + * published by the Free Software Foundation. + * + * This code is distributed in the hope that it will be useful, but WITHOUT + * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or + * FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License + * version 2 for more details (a copy is included in the LICENSE file that + * accompanied this code). + * + * You should have received a copy of the GNU General Public License version + * 2 along with this work; if not, write to the Free Software Foundation, + * Inc., 51 Franklin St, Fifth Floor, Boston, MA 02110-1301 USA. + * + * Please contact Oracle, 500 Oracle Parkway, Redwood Shores, CA 94065 USA + * or visit www.oracle.com if you need additional information or have any + * questions. + */ + +import java.lang.management.ManagementFactory; +import java.lang.management.ThreadMXBean; +import java.util.Random; + +/* + * @test + * @bug 8016304 + * @summary Make sure no deadlock is reported for this program which has no deadlocks. + * @run main/othervm TestFalseDeadLock + */ + +/* + * This test will not provoke the bug every time it is run since the bug is intermittent. + * The test has a fixed running time of 5 seconds. + */ + +public class TestFalseDeadLock { + private static ThreadMXBean bean; + private static volatile boolean running = true; + private static volatile boolean found = false; + + public static void main(String[] args) throws Exception { + bean = ManagementFactory.getThreadMXBean(); + Thread[] threads = new Thread[500]; + for (int i = 0; i < threads.length; i++) { + Test t = new Test(); + threads[i] = new Thread(t); + threads[i].start(); + } + try { + Thread.sleep(5000); + } catch (InterruptedException ex) { + } + running = false; + for (Thread t : threads) { + t.join(); + } + if (found) { + throw new Exception("Deadlock reported, but there is no deadlock."); + } + } + + public static class Test implements Runnable { + public void run() { + Random r = new Random(); + while (running) { + try { + synchronized (this) { + wait(r.nextInt(1000) + 1); + } + } catch (InterruptedException ex) { + } + recurse(2000); + } + if (bean.findDeadlockedThreads() != null) { + System.out.println("FOUND!"); + found = true; + } + } + + private void recurse(int i) { + if (!running) { + // It is important for the test to call println here + // since there are locks inside that path. + System.out.println("Hullo"); + } + else if (i > 0) { + recurse(i - 1); + } + } + } +} diff -r 16b10327b00d -r 90d6c221d4e5 test/testlibrary/com/oracle/java/testlibrary/Platform.java --- /dev/null Thu Jan 01 00:00:00 1970 +0000 +++ b/test/testlibrary/com/oracle/java/testlibrary/Platform.java Tue Jul 16 12:20:08 2013 -0400 @@ -0,0 +1,62 @@ +/* + * Copyright (c) 2013, Oracle and/or its affiliates. All rights reserved. + * DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER. + * + * This code is free software; you can redistribute it and/or modify it + * under the terms of the GNU General Public License version 2 only, as + * published by the Free Software Foundation. + * + * This code is distributed in the hope that it will be useful, but WITHOUT + * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or + * FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License + * version 2 for more details (a copy is included in the LICENSE file that + * accompanied this code). + * + * You should have received a copy of the GNU General Public License version + * 2 along with this work; if not, write to the Free Software Foundation, + * Inc., 51 Franklin St, Fifth Floor, Boston, MA 02110-1301 USA. + * + * Please contact Oracle, 500 Oracle Parkway, Redwood Shores, CA 94065 USA + * or visit www.oracle.com if you need additional information or have any + * questions. + */ + +package com.oracle.java.testlibrary; + +public class Platform { + private static final String osName = System.getProperty("os.name"); + private static final String dataModel = System.getProperty("sun.arch.data.model"); + private static final String vmVersion = System.getProperty("java.vm.version"); + + public static boolean is64bit() { + return dataModel.equals("64"); + } + + public static boolean isSolaris() { + return osName.toLowerCase().startsWith("sunos"); + } + + public static boolean isWindows() { + return osName.toLowerCase().startsWith("win"); + } + + public static boolean isOSX() { + return osName.toLowerCase().startsWith("mac"); + } + + public static boolean isLinux() { + return osName.toLowerCase().startsWith("linux"); + } + + public static String getOsName() { + return osName; + } + + public static boolean isDebugBuild() { + return vmVersion.toLowerCase().contains("debug"); + } + + public static String getVMVersion() { + return vmVersion; + } +} diff -r 16b10327b00d -r 90d6c221d4e5 test/testlibrary/com/oracle/java/testlibrary/ProcessTools.java --- a/test/testlibrary/com/oracle/java/testlibrary/ProcessTools.java Tue Jul 16 10:55:48 2013 -0400 +++ b/test/testlibrary/com/oracle/java/testlibrary/ProcessTools.java Tue Jul 16 12:20:08 2013 -0400 @@ -112,10 +112,8 @@ * @return String[] with platform specific arguments, empty if there are none */ public static String[] getPlatformSpecificVMArgs() { - String osName = System.getProperty("os.name"); - String dataModel = System.getProperty("sun.arch.data.model"); - if (osName.equals("SunOS") && dataModel.equals("64")) { + if (Platform.is64bit() && Platform.isSolaris()) { return new String[] { "-d64" }; }